From 415f616e4b8f5bae4e9ca8f2c1faa00945c77d38 Mon Sep 17 00:00:00 2001 From: Clara Draper <33430543+ClaraDraper-NOAA@users.noreply.github.com> Date: Mon, 26 Feb 2024 06:13:14 -0700 Subject: [PATCH 01/25] global_cycle - Removed checks preventing land increments for fractional grids. (#903) Removed two checks that caused global_cycle to report an error and exit in the case that both land increments were being added and fractional grids are used. There are no issues adding soil moisture and soil temperature increments in fractional grid mode, so the code can be allowed to proceed. Fixes #895. --- sorc/global_cycle.fd/cycle.f90 | 9 --------- 1 file changed, 9 deletions(-) diff --git a/sorc/global_cycle.fd/cycle.f90 b/sorc/global_cycle.fd/cycle.f90 index ab983bc04..859fbd0c0 100644 --- a/sorc/global_cycle.fd/cycle.f90 +++ b/sorc/global_cycle.fd/cycle.f90 @@ -454,10 +454,6 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & ENDIF IF (DO_LNDINC) THEN - IF (FRAC_GRID) THEN - print *, 'FATAL ERROR: land increment update does not work with fractional grids.' - call MPI_ABORT(MPI_COMM_WORLD, 17, IERR) - ENDIF ! identify variables to be updated, and allocate arrays. IF (TRIM(LND_SOI_FILE) .NE. "NULL") THEN DO_SOI_INC = .TRUE. @@ -505,11 +501,6 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & call MPI_ABORT(MPI_COMM_WORLD, 18, IERR) ENDIF - IF (FRAC_GRID .AND. DO_SNO_INC) THEN - print *, 'FATAL ERROR: Snow increment update does not work with fractional grids.' - call MPI_ABORT(MPI_COMM_WORLD, 19, IERR) - ENDIF - IF (IS_NOAHMP .AND. DO_SNO_INC) THEN print *, 'FATAL ERROR: Snow increment update does not work with NOAH_MP.' call MPI_ABORT(MPI_COMM_WORLD, 29, IERR) From 1dac85544dd4134de7cb899214c7db67bbe8c0b4 Mon Sep 17 00:00:00 2001 From: Larissa Reames <52886575+LarissaReames-NOAA@users.noreply.github.com> Date: Tue, 27 Feb 2024 10:28:59 -0600 Subject: [PATCH 02/25] chgres_cube: Implement WMO grib2 template 1 (rotated lat-lon) read capability (#902) Add ability to read GRIB2 data that uses the WMO-standard rotated lat-lon grid template (GRIB2 grid template 1). Update 'readthedocs'. Fixes #901. --- docs/source/chgres_cube.rst | 2 +- sorc/chgres_cube.fd/atm_input_data.F90 | 10 ++- sorc/chgres_cube.fd/model_grid.F90 | 114 ++++++++++++++++++++++++- sorc/chgres_cube.fd/program_setup.F90 | 6 +- 4 files changed, 125 insertions(+), 7 deletions(-) diff --git a/docs/source/chgres_cube.rst b/docs/source/chgres_cube.rst index 71f1274c3..9af27656e 100644 --- a/docs/source/chgres_cube.rst +++ b/docs/source/chgres_cube.rst @@ -369,7 +369,7 @@ Namelist variables with “input” in their name refer to data input to chgres_ * Set to 2 to create a boundary condition file. Use this option for all but the initialization time. * halo_blend - Integer number of row/columns to apply halo blending into the domain, where model and lateral boundary tendencies are applied. * halo_bndy - Integer number of rows/columns that exist within the halo, where pure lateral boundary conditions are applied. - * external_model - Name of source model for input data. Valid options: 'GFS', 'NAM', 'RAP', 'HRRR'. (Default: 'GFS') + * external_model - Name of source model for input data. Valid options: 'GFS', 'NAM', 'RAP', 'HRRR', 'RRFS'. (Default: 'GFS') **Optional Entries** diff --git a/sorc/chgres_cube.fd/atm_input_data.F90 b/sorc/chgres_cube.fd/atm_input_data.F90 index 7d995ee0a..fd7d254c1 100644 --- a/sorc/chgres_cube.fd/atm_input_data.F90 +++ b/sorc/chgres_cube.fd/atm_input_data.F90 @@ -3018,6 +3018,14 @@ subroutine read_winds(u,v,localpet,octet_23,rlevs,lugb,pdt_num) print*, "- CALL CALCALPHA_ROTLATLON with center lat,lon = ",latin1,lov call calcalpha_rotlatlon(lat,lon,latin1,lov,alpha) + elseif (gfld%igdtnum == 1) then ! grid definition template number - non-E stagger rotated lat/lon grid + + latin1 = real(float(gfld%igdtmpl(20))/1.0E6, kind=esmf_kind_r4) + 90.0_esmf_kind_r4 + lov = real(float(gfld%igdtmpl(21))/1.0E6, kind=esmf_kind_r4) + + print*, "- CALL CALCALPHA_ROTLATLON with center lat,lon = ",latin1,lov + call calcalpha_rotlatlon(lat,lon,latin1,lov,alpha) + elseif (gfld%igdtnum == 30) then ! grid definition template number - lambert conformal grid. lov = real(float(gfld%igdtmpl(14))/1.0E6, kind=esmf_kind_r4) @@ -3087,7 +3095,7 @@ subroutine read_winds(u,v,localpet,octet_23,rlevs,lugb,pdt_num) u(:,:,vlev) = u_tmp v(:,:,vlev) = v_tmp endif - else if (gfld%igdtnum == 32769) then ! grid definition template number - rotated lat/lon grid + else if (gfld%igdtnum == 32769 .or. gfld%igdtnum == 1) then ! grid definition template number - rotated lat/lon grid ws = sqrt(u_tmp**2 + v_tmp**2) wd = real((atan2(-u_tmp,-v_tmp) / d2r), kind=esmf_kind_r4) ! calculate grid-relative wind direction wd = real((wd + alpha + 180.0), kind=esmf_kind_r4) ! Rotate from grid- to earth-relative direction diff --git a/sorc/chgres_cube.fd/model_grid.F90 b/sorc/chgres_cube.fd/model_grid.F90 index 44431f59c..af3d2952f 100644 --- a/sorc/chgres_cube.fd/model_grid.F90 +++ b/sorc/chgres_cube.fd/model_grid.F90 @@ -675,7 +675,7 @@ subroutine define_input_grid_grib2(npets) elseif (gfld%igdtnum == 30) then print*,"- INPUT DATA ON LAMBERT CONFORMAL GRID." input_grid_type = 'lambert' - elseif (gfld%igdtnum == 32769) then + elseif (gfld%igdtnum == 32769 .or. gfld%igdtnum == 1) then print*,"- INPUT DATA ON ROTATED LAT/LON GRID." input_grid_type = 'rotated_latlon' else @@ -1477,10 +1477,14 @@ subroutine gdt_to_gds(igdtnum, igdstmpl, igdtlen, kgds, ni, nj, res) integer, intent(in ) :: igdtnum, igdtlen, igdstmpl(igdtlen) integer, intent( out) :: kgds(200), ni, nj - integer :: iscale + integer :: iscale, i real, intent( out) :: res + real :: clatr, slatr, clonr, dpr, slat + real :: slat0, clat0, clat, clon, rlat + real :: rlon0, rlon, hs + kgds=0 if (igdtnum.eq.32769) then ! rot lat/lon b grid @@ -1528,6 +1532,112 @@ subroutine gdt_to_gds(igdtnum, igdstmpl, igdtlen, kgds, ni, nj, res) res = ((float(kgds(9)) / 1000.0) + (float(kgds(10)) / 1000.0)) & * 0.5 * 111.0 + elseif (igdtnum.eq.1) then ! rot lat/lon b grid using standard wmo + ! template. + + iscale=igdstmpl(10)*igdstmpl(11) + if (iscale == 0) iscale = 1e6 + kgds(1)=205 ! oct 6, rotated lat/lon for Non-E + ! Stagger grid + kgds(2)=igdstmpl(8) ! octs 7-8, Ni + ni = kgds(2) + kgds(3)=igdstmpl(9) ! octs 9-10, Nj + nj = kgds(3) + + kgds(4)=nint(float(igdstmpl(12))/float(iscale)*1000.) ! octs 11-13, Lat of + ! 1st grid point + kgds(5)=nint(float(igdstmpl(13))/float(iscale)*1000.) ! octs 14-16, Lon of + ! 1st grid point + + kgds(6)=0 ! oct 17, resolution and component flags + if (igdstmpl(1)==2 ) kgds(6)=64 + if ( btest(igdstmpl(14),4).OR.btest(igdstmpl(14),5) ) kgds(6)=kgds(6)+128 + if ( btest(igdstmpl(14),3) ) kgds(6)=kgds(6)+8 + + kgds(7)=nint(float(igdstmpl(20))/float(iscale)*1000.) ! octs 18-20, + ! Lat of cent of rotation + kgds(8)=nint(float(igdstmpl(21))/float(iscale)*1000.) ! octs 21-23, + ! Lon of cent of rotation + kgds(7) = kgds(7) + 90000.0 + print*, "INPUT LAT, LON CENTER ", kgds(7), kgds(8) + + DPR = 180.0/3.1415926 + CLATR=COS((float(kgds(4))/1000.0)/DPR) + SLATR=SIN((float(kgds(4))/1000.0)/DPR) + CLONR=COS((float(kgds(5))/1000.0)/DPR) + SLAT0=SIN((float(kgds(7))/1000.0)/DPR) + CLAT0=COS((float(kgds(7))/1000.0)/DPR) + + SLAT=CLAT0*SLATR+SLAT0*CLATR*CLONR + CLAT=SQRT(1-SLAT**2) + CLON=(CLAT0*CLATR*CLONR-SLAT0*SLATR)/CLAT + CLON=MIN(MAX(CLON,-1.0),1.0) + + RLAT=DPR*ASIN(SLAT) + + RLON0=float(kgds(8))/1000.0 + + if ((kgds(5)-kgds(8)) > 0) then + HS = -1.0 + else + HS = 1.0 + endif + + RLON = MOD(RLON0+HS*DPR*ACOS(CLON)+3600,360.0) + + kgds(4)=nint(rlat*1000.) ! octs 11-13, Lat of + kgds(5)=nint(rlon*1000.) ! octs 14-16, Lon of + + kgds(12)=nint(float(igdstmpl(15))/float(iscale)*1000.) ! octs 29-31, Lat of + ! last grid point + kgds(13)=nint(float(igdstmpl(16))/float(iscale)*1000.) ! octs 32-34, Lon of + ! last grid point + + CLATR=COS((float(kgds(12))/1000.0)/DPR) + SLATR=SIN((float(kgds(12))/1000.0)/DPR) + CLONR=COS((float(kgds(13))/1000.0)/DPR) + + SLAT=CLAT0*SLATR+SLAT0*CLATR*CLONR + RLAT=DPR*ASIN(SLAT) + + CLAT=SQRT(1-SLAT**2) + CLON=(CLAT0*CLATR*CLONR-SLAT0*SLATR)/CLAT + CLON=MIN(MAX(CLON,-1.0),1.0) + + if ((kgds(13)-kgds(8)) > 0) then + HS = -1.0 + else + HS = 1.0 + endif + + RLON = MOD(RLON0+HS*DPR*ACOS(CLON)+3600,360.0) + + print*,'got here last point ',kgds(12), kgds(13) + print*,'got here last point rotated ', rlat, rlon + + kgds(12)=nint(rlat*1000.) ! octs 11-13, Lat of + kgds(13)=nint(rlon*1000.) ! octs 14-16, Lon of + + kgds(9)=igdstmpl(17) + kgds(10)=igdstmpl(18) + + kgds(11) = 0 ! oct 28, scan mode + if (btest(igdstmpl(19),7)) kgds(11) = 128 + if (btest(igdstmpl(19),6)) kgds(11) = kgds(11) + 64 + if (btest(igdstmpl(19),5)) kgds(11) = kgds(11) + 32 + + kgds(19)=0 ! oct 4, # vert coordinate parameters + kgds(20)=255 ! oct 5, used for thinned grids, set to 255 + + res = ((float(kgds(9)) / 1.e6) + (float(kgds(10)) / 1.e6)) & + * 0.5 * 111.0 + + + do i = 1, 25 + print*,'final kgds ',i,kgds(i) + enddo + + elseif(igdtnum==30) then kgds(1)=3 ! oct 6, lambert conformal diff --git a/sorc/chgres_cube.fd/program_setup.F90 b/sorc/chgres_cube.fd/program_setup.F90 index efec5e3a4..895f71c28 100644 --- a/sorc/chgres_cube.fd/program_setup.F90 +++ b/sorc/chgres_cube.fd/program_setup.F90 @@ -54,7 +54,7 @@ module program_setup !! gaussian nemsio files !! - "gfs_sigio" for spectral gfs !! gfs sigio/sfcio files. - character(len=20), public :: external_model="GFS" !< The model that the input data is derived from. Current supported options are: "GFS", "HRRR", "NAM", "RAP". Default: "GFS" + character(len=20), public :: external_model="GFS" !< The model that the input data is derived from. Current supported options are: "GFS", "HRRR", "NAM", "RAP", "RRFS". Default: "GFS" integer, parameter, public :: max_tracers=100 !< Maximum number of atmospheric tracers processed. integer, public :: num_tracers !< Number of atmospheric tracers to be processed. @@ -318,8 +318,8 @@ subroutine read_setup_namelist(filename) !------------------------------------------------------------------------- if (trim(input_type) == "grib2") then - if (.not. any((/character(4)::"GFS","NAM","RAP","HRRR"/)==trim(external_model))) then - call error_handler( "KNOWN SUPPORTED external_model INPUTS ARE GFS, NAM, RAP, AND HRRR. " // & + if (.not. any((/character(4)::"GFS","NAM","RAP","HRRR","RRFS"/)==trim(external_model))) then + call error_handler( "KNOWN SUPPORTED external_model INPUTS ARE GFS, NAM, RAP, HRRR, AND RRFS. " // & "IF YOU WISH TO PROCESS GRIB2 DATA FROM ANOTHER MODEL, YOU MAY ATTEMPT TO DO SO AT YOUR OWN RISK. " // & "ONE WAY TO DO THIS IS PROVIDE NAM FOR external_model AS IT IS A RELATIVELY STRAIGHT-" // & "FORWARD REGIONAL GRIB2 FILE. YOU MAY ALSO COMMENT OUT THIS ERROR MESSAGE IN " // & From 33c36775d30daaa0688ff600eaa21facae1e780f Mon Sep 17 00:00:00 2001 From: GeorgeGayno-NOAA <52789452+GeorgeGayno-NOAA@users.noreply.github.com> Date: Tue, 5 Mar 2024 14:57:11 -0500 Subject: [PATCH 03/25] Fix broken links in chgres_cube section of ReadTheDocs (#907) Fixes #905. --- docs/source/chgres_cube.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/chgres_cube.rst b/docs/source/chgres_cube.rst index 9af27656e..b976e500a 100644 --- a/docs/source/chgres_cube.rst +++ b/docs/source/chgres_cube.rst @@ -55,7 +55,7 @@ Program inputs and outputs for global applications **Inputs** -Users may create their own global grids, or use the pre-defined files are located `here `_. +Users may create their own global grids, or use the pre-defined files located `here `_. * FV3 mosaic file - (NetCDF format) * CRES_mosaic.nc @@ -76,7 +76,7 @@ Users may create their own global grids, or use the pre-defined files are locate * CRES_oro_data.tile5.nc * CRES_oro_data.tile6.nc - * FV3 surface climatological files - Located under the `./fix_sfc `_ sub-directory. One file for each tile. NetCDF format. + * FV3 surface climatological files - Located under the `./fix_sfc `_ sub-directory. One file for each tile. NetCDF format. * CRES.facsf.tileX.nc (fractional coverage for strong/weak zenith angle dependent albedo) * CRES.maximum_snow_albedo.tileX.nc (maximum snow albedo) * CRES.slope_type.tileX.nc (slope type) From 658c351047498cce6cfb0e3caeecf7274aaf2b8c Mon Sep 17 00:00:00 2001 From: GeorgeGayno-NOAA <52789452+GeorgeGayno-NOAA@users.noreply.github.com> Date: Thu, 7 Mar 2024 10:33:46 -0500 Subject: [PATCH 04/25] Update orog code to read high-resolution mask and terrain data in NetCDF (#900) Update orog program to read NetCDF versions of the UMD mask, GMTED2010 terrain and the RAMP terrain. Also, remove some unused variables and logic. Baseline the programs that were used to convert these datasets from simple binary to NetCDF. Fixes #786. --- CMakeLists.txt | 2 + docs/source/ufs_utils.rst | 6 +- sorc/orog_mask_tools.fd/CMakeLists.txt | 5 + .../orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F | 287 +++++------------- sorc/orog_mask_tools.fd/orog.fd/netcdf_io.F90 | 4 +- .../orog_netcdf_tools.fd/README | 8 + .../mask.fd/CMakeLists.txt | 17 ++ .../orog_netcdf_tools.fd/mask.fd/mask.f90 | 164 ++++++++++ .../orog_netcdf_tools.fd/mask.fd/runit.sh | 20 ++ .../ramp.fd/CMakeLists.txt | 17 ++ .../orog_netcdf_tools.fd/ramp.fd/ramp.f90 | 164 ++++++++++ .../orog_netcdf_tools.fd/ramp.fd/runit.sh | 20 ++ .../topo.fd/CMakeLists.txt | 17 ++ .../orog_netcdf_tools.fd/topo.fd/runit.sh | 20 ++ .../orog_netcdf_tools.fd/topo.fd/topo.f90 | 157 ++++++++++ ush/fv3gfs_make_orog.sh | 9 +- 16 files changed, 702 insertions(+), 215 deletions(-) create mode 100644 sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/README create mode 100644 sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/mask.fd/CMakeLists.txt create mode 100644 sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/mask.fd/mask.f90 create mode 100755 sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/mask.fd/runit.sh create mode 100644 sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/ramp.fd/CMakeLists.txt create mode 100644 sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/ramp.fd/ramp.f90 create mode 100755 sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/ramp.fd/runit.sh create mode 100644 sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/topo.fd/CMakeLists.txt create mode 100755 sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/topo.fd/runit.sh create mode 100644 sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/topo.fd/topo.f90 diff --git a/CMakeLists.txt b/CMakeLists.txt index ac452ab4f..24dea3c93 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -24,6 +24,8 @@ option(FRENCTOOLS "Enable building fre-nctools.fd" ON) option(GRIDTOOLS "Enable building grid_tools.fd" ON) option(CHGRES "Enable building chgres_cube.fd" ON) option(OROG_MASK_TOOLS "Enable building orog_mask_tools.fd" ON) +# OROG_MASK_TOOLS must be ON for OROG_NETCDF_TOOLS to build. +option(OROG_NETCDF_TOOLS "Enable building orog_netcdf_tools.fd" OFF) option(SFC_CLIMO_GEN "Enable building sfc_climo_gen.fd" ON) option(VCOORD_GEN "Enable building vcoord_gen.fd" ON) option(FVCOMTOOLS "Enable building fvcom_tools.fd" ON) diff --git a/docs/source/ufs_utils.rst b/docs/source/ufs_utils.rst index 7810f3817..388ec4cf6 100644 --- a/docs/source/ufs_utils.rst +++ b/docs/source/ufs_utils.rst @@ -240,11 +240,11 @@ Program inputs and outputs * The "grid" files (CRES_grid.tile#.nc) containing the geo-reference records for the grid - (NetCDF). Created by the make_hgrid or regional_esg_grid programs. * Global 30-arc-second University of Maryland land cover data. Used to create the land-sea mask. - * landcover30.fixed (unformatted binary). Located here `./fix/fix_orog `_. + * landcover.umd.30s.nc (NetCDF). Located here `./fix/fix_orog `_. * Global 30-arc-second USGS GMTED2010 orography data. - * gmted2010.30sec.int (unformatted binary). Located here `./fix/fix_orog `_. + * topography.gmted2010.30s.nc (NetCDF). Located here `./fix/fix_orog `_. * 30-arc-second RAMP Antarctic terrain data (Radarsat Antarctic Mapping Project) - * thirty.second.antarctic.new.bin (unformatted binary). Located here `./fix/fix_orog `_. + * topography.antarctica.ramp.30s.nc (NetCDF). Located here `./fix/fix_orog `_. **Output data:** diff --git a/sorc/orog_mask_tools.fd/CMakeLists.txt b/sorc/orog_mask_tools.fd/CMakeLists.txt index 59f9635dd..75c483aa7 100644 --- a/sorc/orog_mask_tools.fd/CMakeLists.txt +++ b/sorc/orog_mask_tools.fd/CMakeLists.txt @@ -7,6 +7,11 @@ add_subdirectory(orog.fd) add_subdirectory(orog_gsl.fd) add_subdirectory(lake.fd) add_subdirectory(inland.fd) +if(OROG_NETCDF_TOOLS) + add_subdirectory(orog_netcdf_tools.fd/mask.fd) + add_subdirectory(orog_netcdf_tools.fd/topo.fd) + add_subdirectory(orog_netcdf_tools.fd/ramp.fd) +endif() # If doxygen documentation we enabled, build it. if(ENABLE_DOCS) diff --git a/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F b/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F index 96e3b38d8..58b5ecb86 100644 --- a/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F +++ b/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F @@ -2,8 +2,8 @@ C> Terrain maker for global spectral model. C> @author Mark Iredell @date 92-04-16 -C> This program creates 7 terrain-related files computed from the navy -C> 10-minute terrain dataset. The model physics grid parameters and +C> This program creates 7 terrain-related files computed from the +C> GMTED2010 terrain dataset. The model physics grid parameters and C> spectral truncation and filter parameters are read by this program as C> input. C> @@ -49,13 +49,9 @@ C> SPECTRAL TRUNCATION (NM), RHOMBOIDAL FLAG (NR), C> AND FIRST AND SECOND FILTER PARAMETERS (NF0,NF1). C> RESPECTIVELY READ IN FREE FORMAT. -C> - UNIT235 - GTOPO 30" AVR for ZAVG elevation -C> - UNIT10 - 30" UMD land (lake) cover mask see MSKSRC switch -C> - XUNIT11 - GTOPO AVR -C> - XUNIT12 - GTOPO STD DEV -C> - XUNIT13 - GTOPO MAX -C> - UNIT14 - GTOPO SLM (10' NAVY if switched to get lakes -C> - UNIT15 - GICE Grumbine 30" RAMP Antarctica orog IMNx3616 +C> - NCID - GMTED2010 USGS orography (NetCDF) +C> - NCID - 30" UMD land cover mask. (NetCDF) +C> - NCID - GICE Grumbine 30" RAMP Antarctica orog IMNx3601. (NetCDF) C> - UNIT25 - Ocean land-sea mask on gaussian grid C> C> OUTPUT FILES: @@ -203,9 +199,9 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, real, PARAMETER :: PI=3.1415926535897931 integer, PARAMETER :: NMT=14 - integer :: efac,blat,zsave1,zsave2,itopo,kount - integer :: kount2,islmx,jslmx,oldslm,msksrc,mskocn,notocn - integer :: i,j,nx,ny,ncid,js,jn,iw,ie,k,it,jt,i1,error,id_dim + integer :: efac,blat,zsave1,zsave2 + integer :: mskocn,notocn + integer :: i,j,nx,ny,ncid,js,jn,iw,ie,k,it,jt,error,id_dim integer :: id_var,nx_in,ny_in,fsize,wgta,IN,INW,INE,IS,ISW,ISE integer :: M,N,IMT,IRET,ios,iosg,latg2,istat,itest,jtest integer :: i_south_pole,j_south_pole,i_north_pole,j_north_pole @@ -278,17 +274,14 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, DEGRAD = 180./PI SPECTR = NM .GT. 0 ! if NM <=0 grid is assumed lat/lon FILTER = .TRUE. ! Spectr Filter defaults true and set by NF1 & NF0 -! MSKSRC = 0 ! MSKSRC=0 navy 10 lake msk, 1 UMD 30, -1 no lakes - MSKSRC = 1 ! MSKSRC=0 navy 10 lake msk, 1 UMD 30, -1 no lakes REVLAT = BLAT .LT. 0 ! Reverse latitude/longitude for output - ITOPO = 1 ! topo 30" read, otherwise tiles (opt offline) MSKOCN = 1 ! Ocean land sea mask =1, =0 if not present NOTOCN = 1 ! =1 Ocean lsm input reverse: Ocean=1, land=0 ! --- The LSM Gaussian file from the ocean model sometimes arrives with ! --- 0=Ocean and 1=Land or it arrives with 1=Ocean and 0=land without ! --- metadata to distinguish its disposition. The AI below mitigates this. - print *,' In TERSUB, ITOPO=',itopo + print *,' In TERSUB' if (mskocn .eq. 1)then print *,' Ocean Model LSM Present and ' print *, ' Overrides OCEAN POINTS in LSM: mskocn=',mskocn @@ -296,71 +289,23 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, print *,' Ocean LSM Reversed: NOTOCN=',notocn endif endif -C -C --- old S-Y. files -C- OPEN(UNIT=11,FORM='FORMATTED',ERR=900) ! average -C- OPEN(UNIT=12,FORM='FORMATTED',ERR=900) ! Std Dev -C- OPEN(UNIT=13,FORM='FORMATTED',ERR=900) ! maximum -C- OPEN(UNIT=14,FORM='FORMATTED',ERR=900) ! sea-land-lake-mask -C -! --- READ(11,11) ZAVG -! --- READ(12,11) ZVAR -! --- READ(13,11) ZMAX -! --- 11 FORMAT(20I4) -! -! --- MSKSRC 0 navy 10' lake mask, =1 for 30" UMD lake mask, -! --- MSKSRC internally set if above fails at -1 for no lakes -! --- - IF (MSKSRC .eq. 0 ) then - READ(14,12,iostat=ios) ZSLMX - 12 FORMAT(80I1) - if (ios.ne.0) then - MSKSRC=-1 - print *,' navy10 lake mask rd fail -- ios,MSKSRC:',ios,MSKSRC - endif - ELSE - print *,' Attempt to open/read UMD 30" slmsk MSKSRC=',MSKSRC -! --- not 0 so MSKSRC=1 and attempt to open/read UMD 30" slmsk -! open(10,file= -! &"/scratch2/portfolios/NCEPDEV/global/noscrub/Jordan.Alpert/wx23ja -! &/terrain30/landcover30.fixed", -! & recl=43200*21600, access='direct',iostat=istat) - open(10,file="landcover30.fixed", - & recl=43200*21600, access='direct',iostat=istat) - - IF (istat.ne.0) then - MSKSRC=-1 - print *,' UMD lake mask open failed -- ios,MSKSRC:',istat,MSKSRC - ELSE -! - read(10, rec=1,iostat=istat) UMD - print *,' UMD lake mask opened OK -- ios,MSKSRC:',istat,MSKSRC -! - ENDIF -! -------------- - IF (istat.ne.0) then -! --- When UMD read fails attempt to read navy 10' - print *,' UMD lake mask rd err -- trying navy 10',istat - MSKSRC=0 - print *,' ***** MSKSRC set to 0 MSKSRC=',MSKSRC - if (MSKSRC .eq. 0 ) then - READ(14,12,iostat=ios) ZSLMX - if (ios.ne.0) then - MSKSRC=-1 - print *,' navy10 lake mask rd fail - ios,MSKSRC:',ios,MSKSRC - endif - endif - ELSE - print *,' UMD lake, UMD(50,50)=',UMD(50,50),MSKSRC - ENDIF -! -------------- -! --- good UMD land cover read and MSKSRC=1 - ENDIF + + print *,' Attempt to open/read UMD 30sec slmsk.' + + error=NF__OPEN("./landcover.umd.30s.nc",NF_NOWRITE,fsize,ncid) + call netcdf_err(error, 'Open file landcover.umd.30s.nc' ) + error=nf_inq_varid(ncid, 'land_mask', id_var) + call netcdf_err(error, 'Inquire varid of land_mask') + error=nf_get_var_int1(ncid, id_var, UMD) + call netcdf_err(error, 'Inquire data of land_mask') + error = nf_close(ncid) + + print *,' UMD lake, UMD(50,50)=',UMD(50,50) C C- READ_G for global 30" terrain C - print *,' About to call read_g, ITOPO=',ITOPO - if ( ITOPO .ne. 0 ) call read_g(glob,ITOPO) + print *,' Call read_g to read global topography' + call read_g(glob) ! --- transpose even though glob 30" is from S to N and NCEP std is N to S do j=1,jmn/2 do I=1,imn @@ -400,82 +345,30 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, ! --- ZAVG initialize from glob ZAVG=glob - SELECTCASE(MSKSRC) -C---- 30" sea land mask. 0 are water (lake or ocean) - CASE(1) -! --- transpose even though glob 30" is from S to N and NCEP std is N to S - do j=1,jmn/2 - do I=1,imn +! --- transpose mask even though glob 30" is from N to S and NCEP std is S to N + do j=1,jmn/2 + do I=1,imn jt=jmn - j + 1 i3save = UMD(I,j) UMD(I,j)=UMD(I,jt) UMD(I,jt) = i3save - enddo - enddo + enddo + enddo ! --- transpose UMD as USGS 30" is from dateline and NCEP std is 0 - do j=1,jmn - do i=1,imn/2 + do j=1,jmn + do i=1,imn/2 it=imn/2 + i i3save = UMD(i,J) UMD(i,J)=UMD(it,J) UMD(it,J) = i3save - enddo - enddo -! --- UMD slmsk with 30" lakes - do j=1,jmn - do i=1,imn - if ( UMD(i,j) .eq. 0 ) ZSLM(i,j) = 0 - enddo - enddo -! --- Global land in slm plus lakes on 30" grid and elev set over globe -! --- -! --- When navy 10' mask is set MSKSRC=0 - CASE(0) -! --- MSKSRC 0 navy 10' lake mask, =1 for 30" UMD lake mask, -1 no lakes - print *,' NAVY 10 (8) slmsk for lakes, MSKSRC=',MSKSRC - kount = 0 - kount2 = 0 - do j=1,jmn - oldslm = ZSLM(IMN,j) - do i=1,imn - i1 = i + 1 -! --- slmsk with 10' lakes - if ( glob(i,j) .eq. -9999 ) then - ZSLM(i,j) = 0 - kount = kount + 1 - endif - islmx=(i-1)/16 + 1 - jslmx=(j-1)/16 + 1 - if ( ZSLMX(islmx,jslmx) .eq. 0 ) then - if ( j .gt. 8 .and. j .lt. JMN-8 ) then - if (i1 .gt. IMN ) i1 = i1 - IMN -! ----- - if(ZSLM(i,j).eq.1 .and. oldslm .eq. 1 .and. ZSLM(i1,j).eq.1)then - if (i .ne. 1) oldslm = ZSLM(i,j) - ZSLM(i,j) = 0 - kount2 = kount2 + 1 - endif -! ----- - endif - endif - enddo - enddo -! --- - CASE(-1) - print *,' ***** set slm from 30" glob, MSKSRC=',MSKSRC - kount = 0 - kount2 = 0 - do j=1,jmn - do i=1,imn - i1 = i + 1 -! --- UMD slmsk with 10' lakes and set ZAVG from 30" glob - if ( glob(i,j) .eq. -9999 ) then - ZSLM(i,j) = 0 - kount = kount + 1 - endif - enddo - enddo - END SELECT + enddo + enddo +! --- Non-land is 0. + do j=1,jmn + do i=1,imn + if ( UMD(i,j) .eq. 0 ) ZSLM(i,j) = 0 + enddo + enddo deallocate (ZSLMX,UMD,glob) ! --- @@ -562,16 +455,27 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, & ' Before GICE ZAVG(1,52)=',ZAVG(1,52),ZSLM(1,52) print *, & ' Before GICE ZAVG(1,112)=',ZAVG(1,JMN-112),ZSLM(1,112) -! GICE: Grumbine 30" Antarctica orog IMNx3616 from S to N & wraped E-W. -! NB: Zfields are S to N and W-E! - iosg = 0 - READ(15,iostat=iosg) GICE - if(iosg .ne. 0 ) then + +! Read 30-sec Antarctica RAMP data. Points scan from South +! to North, and from Greenwich to Greenwich. + +! The error handling here needs to be cleaned up. + iosg = 0 + error=NF__OPEN("./topography.antarctica.ramp.30s.nc", + & NF_NOWRITE,fsize,ncid) + call netcdf_err(error, 'Opening RAMP topo file' ) + error=nf_inq_varid(ncid, 'topo', id_var) + call netcdf_err(error, 'Inquire varid of RAMP topo') + error=nf_get_var_real(ncid, id_var, GICE) + iosg=error + call netcdf_err(error, 'Inquire data of RAMP topo') + error = nf_close(ncid) + + if(iosg .ne. 0 ) then print *,' *** Err on reading GICE record, iosg=',iosg print *,' exec continues but NO GICE correction done ' -! stop - else - print *,' GICE 30" Antarctica RAMP orog 43200x3616 read OK' + else + print *,' GICE 30" Antarctica RAMP orog 43201x3601 read OK' print *,' Processing! ' print *,' Processing! ' print *,' Processing! ' @@ -591,7 +495,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, &' orig:',i5,i4,' Lat=',f7.3,f8.2,'E',' GICE=',f8.1) enddo enddo - endif + endif deallocate (GICE) @@ -1666,14 +1570,11 @@ SUBROUTINE MAKEMT(ZAVG,ZSLM,ORO,SLM,VAR,VAR4, INTEGER ZAVG(IMN,JMN),ZSLM(IMN,JMN) DIMENSION ORO(IM,JM),SLM(IM,JM),VAR(IM,JM),VAR4(IM,JM) DIMENSION IST(IM,jm),IEN(IM,jm),JST(JM),JEN(JM),numi(jm) - INTEGER mskocn,isave LOGICAL FLAG, DEBUG C==== DATA DEBUG/.TRUE./ DATA DEBUG/.FALSE./ C ! ---- OCLSM holds the ocean (im,jm) grid -! --- mskocn=1 Use ocean model sea land mask, OK and present, -! --- mskocn=0 dont use Ocean model sea land mask, not OK, not present print *,' _____ SUBROUTINE MAKEMT ' C---- GLOBAL XLAT AND XLON ( DEGREE ) C @@ -2578,7 +2479,7 @@ SUBROUTINE MAKEPC2(ZAVG,ZSLM,THETA,GAMMA,SIGMA, integer i,j,i1,j1,i2,jst,jen,numx,i0,ip1,ijax integer ilist(IMN) logical inside_a_polygon - LOGICAL FLAG, DEBUG + LOGICAL DEBUG C=== DATA DEBUG/.TRUE./ DATA DEBUG/.FALSE./ C @@ -3238,22 +3139,20 @@ SUBROUTINE MAKEOA2(ZAVG,zslm,VAR,GLAT,OA4,OL,IOA4,ELVMAX, real XNSUM(IM,JM),XNSUM1(IM,JM),XNSUM2(IM,JM) real XNSUM3(IM,JM),XNSUM4(IM,JM) real VAR(IM,JM),OL(IM,JM,4) - LOGICAL FLAG integer i,j,ilist(IMN),numx,i1,j1,ii1 - integer KWD,II,npts + integer KWD real LONO(4),LATO(4),LONI,LATI real DELXN,HC,HEIGHT,XNPU,XNPD,T integer NS0,NS1,NS2,NS3,NS4,NS5,NS6 logical inside_a_polygon real lon,lat,dlon,dlat,dlat_old real lon1,lat1,lon2,lat2 - real xnsum11,xnsum12,xnsum21,xnsum22,xnsumx + real xnsum11,xnsum12,xnsum21,xnsum22 real HC_11, HC_12, HC_21, HC_22 real xnsum1_11,xnsum1_12,xnsum1_21,xnsum1_22 real xnsum2_11,xnsum2_12,xnsum2_21,xnsum2_22 real get_lon_angle, get_lat_angle, get_xnsum - integer ist, ien, jst, jen - real xland,xwatr,xl1,xs1,oroavg,slm + integer jst, jen C C---- GLOBAL XLAT AND XLON ( DEGREE ) C @@ -4421,51 +4320,32 @@ SUBROUTINE SPFFT1(IMAX,INCW,INCG,KMAX,W,G,IDIR) !> Read input global 30-arc second orography data. !! !! @param[out] glob The orography data. -!! @param[in] itopo Not used. !! @author Jordan Alpert NOAA/EMC - subroutine read_g(glob,ITOPO) + subroutine read_g(glob) implicit none -cc - integer*2 glob(360*120,180*120) -cc - integer ix,jx - integer ia,ja -cc - parameter (ix=40*120,jx=50*120) - parameter (ia=60*120,ja=30*120) -cc - integer*2 idat(ix,jx) - integer itopo -cc - integer i,j,inttyp -cc - real(kind=8) dloin,dlain,rlon,rlat -cc - open(235, file="./fort.235", access='direct', recl=43200*21600*2) - read(235,rec=1)glob - close(235) -cc + + include 'netcdf.inc' + + integer*2, intent(out) :: glob(360*120,180*120) + + integer :: ncid, error, id_var, fsize + + fsize=65536 + + error=NF__OPEN("./topography.gmted2010.30s.nc", + & NF_NOWRITE,fsize,ncid) + call netcdf_err(error, 'Open file topography.gmted2010.30s.nc' ) + error=nf_inq_varid(ncid, 'topo', id_var) + call netcdf_err(error, 'Inquire varid of topo') + error=nf_get_var_int2(ncid, id_var, glob) + call netcdf_err(error, 'Read topo') + error = nf_close(ncid) + print*,' ' call maxmin (glob,360*120*180*120,'global0') -cc -cc - dloin=1.d0/120.d0 - dlain=1.d0/120.d0 -cc - rlon= -179.995833333333333333333333d0 - rlat= 89.995833333333333333333333d0 -cc - inttyp=-1 ! average rectangular subset -ccmr inttyp= 1 ! take closest grid point value -ccmr inttyp= 0 ! interpolate from four closest grid point values -cc -! call la2ga_gtopo30(glob,360*120,180*120, -! & dloin,dlain,rlon,rlat,inttyp, -! & .true.,glob, -! & 0,lonf,latg) -cc + return - end + end subroutine read_g !> Print the maximum, mininum, mean and !! standard deviation of an array. @@ -5036,7 +4916,6 @@ subroutine nanc(a,l,c) data inaq4/x'FFFFFFFF'/ c real(kind=8)a(l),rtc,t1,t2 - character*24 cn character*(*) c c t1=rtc() cgwv print *, ' nanc call ',c diff --git a/sorc/orog_mask_tools.fd/orog.fd/netcdf_io.F90 b/sorc/orog_mask_tools.fd/orog.fd/netcdf_io.F90 index 09d994b0b..7ff8ce725 100644 --- a/sorc/orog_mask_tools.fd/orog.fd/netcdf_io.F90 +++ b/sorc/orog_mask_tools.fd/orog.fd/netcdf_io.F90 @@ -25,7 +25,7 @@ subroutine write_netcdf(im, jm, slm, land_frac, oro, orf, hprime, ntiles, tile, real, intent(in), dimension(im,jm) :: slm, oro, orf, geolon, geolat, land_frac real, intent(in), dimension(im,jm,14):: hprime character(len=128) :: outfile - integer :: error, ncid, i + integer :: error, ncid integer :: header_buffer_val = 16384 integer :: fsize=65536, inital = 0 integer :: dim1, dim2 @@ -245,7 +245,7 @@ subroutine write_mask_netcdf(im, jm, slm, land_frac, ntiles, tile, geolon, geola integer, intent(in):: im, jm, ntiles, tile real, intent(in), dimension(im,jm) :: slm, geolon, geolat, land_frac character(len=128) :: outfile - integer :: error, ncid, i + integer :: error, ncid integer :: header_buffer_val = 16384 integer :: fsize=65536, inital = 0 integer :: dim1, dim2 diff --git a/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/README b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/README new file mode 100644 index 000000000..14dd17579 --- /dev/null +++ b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/README @@ -0,0 +1,8 @@ +These utilities were used to convert input data to the +orography code from binary to netcdf. + +ramp.fd - Convert Antarctic RAMP terrain data. + +orog.fd - Convert the GMTED2010 terrain data. + +mask.fd - Convert the UMD land mask data. diff --git a/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/mask.fd/CMakeLists.txt b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/mask.fd/CMakeLists.txt new file mode 100644 index 000000000..c9d6b34f2 --- /dev/null +++ b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/mask.fd/CMakeLists.txt @@ -0,0 +1,17 @@ +list(APPEND fortran_src + mask.f90 +) + +if(CMAKE_Fortran_COMPILER_ID MATCHES "^(Intel)$") + set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -r8 -i4 -convert big_endian") +elseif(CMAKE_Fortran_COMPILER_ID MATCHES "^(GNU)$") + set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -fdefault-real-8") +endif() + +set(exe_name mask.exe) +add_executable(${exe_name} ${fortran_src}) +target_link_libraries( + ${exe_name} + NetCDF::NetCDF_Fortran) + +install(TARGETS ${exe_name}) diff --git a/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/mask.fd/mask.f90 b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/mask.fd/mask.f90 new file mode 100644 index 000000000..e83bc6932 --- /dev/null +++ b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/mask.fd/mask.f90 @@ -0,0 +1,164 @@ + program mask_netcdf + +! Convert the UMD land use data to netcdf. + + use netcdf + + implicit none + + integer*4, parameter :: idim=43200 + integer*4, parameter :: jdim=21600 + integer*4, parameter :: idim_p1=43201 + integer*4, parameter :: jdim_p1=21601 + + character(len=150) :: filenetcdf, fileraw + + integer :: i, istat, ncid, status, dim_i, dim_j + integer :: dim_ip1, dim_jp1 + integer :: id_lon, id_lat, id_data + integer :: id_lat_corner, id_lon_corner + + integer(kind=1), allocatable :: mask(:,:) + + real(kind=8), allocatable :: lats(:), lons(:) + real(kind=8), allocatable :: lats_corner(:), lons_corner(:) + real(kind=8) :: lat11, lon11, dx, dy + + dx = 1.0_8/120.0_8 + dy = -(1.0_8/120.0_8) + + lat11 = 90.0_8 + dy*0.5_8 + lon11 = -180.0_8 + dx*0.5_8 + + allocate(lons(idim),lats(jdim),mask(idim,jdim)) + allocate(lons_corner(idim_p1),lats_corner(jdim_p1)) + + do i = 1, idim + lons(i) = real((i-1),8) * dx + lon11 + print*,'lon ',i,lons(i) + enddo + + do i = 1, jdim + lats(i) = real((i-1),8) * dy + lat11 + print*,'lat ',i,lats(i) + enddo + + lat11 = 90.0_8 + lon11 = -180.0_8 + + do i = 1, idim_p1 + lons_corner(i) = real((i-1),8) * dx + lon11 + print*,'lon_corner ',i,lons_corner(i) + enddo + + do i = 1, jdim_p1 + lats_corner(i) = real((i-1),8) * dy + lat11 + print*,'lat_corner ',i,lats_corner(i) + enddo + + fileraw="/scratch1/NCEPDEV/global/glopara/fix/raw/orog/landcover30.fixed" + + open(11, file=trim(fileraw), access='direct', recl=idim*jdim) + read(11, rec=1, iostat=istat) mask + if (istat /= 0) stop 99 + close(11) + + print*,'mask ', maxval(mask),minval(mask) + where(mask > 0) mask = 1 + + filenetcdf="./landcover.umd.30s.nc" + + print*,"- CREATE FILE: ", trim(filenetcdf) + status=nf90_create(filenetcdf, IOR(NF90_NETCDF4,NF90_CLASSIC_MODEL), ncid) + if (status /= nf90_noerr) stop 1 + + status=nf90_def_dim(ncid, 'idim', idim, dim_i) + if (status /= nf90_noerr) stop 3 + + status=nf90_def_dim(ncid, 'jdim', jdim, dim_j) + if (status /= nf90_noerr) stop 2 + + status=nf90_def_dim(ncid, 'idim_p1', (idim+1), dim_ip1) + if (status /= nf90_noerr) stop 4 + + status=nf90_def_dim(ncid, 'jdim_p1', (jdim+1), dim_jp1) + if (status /= nf90_noerr) stop 5 + + status=nf90_put_att(ncid, nf90_global, 'source', 'Univ. of Maryland land use data') + if (status /= nf90_noerr) stop 6 + + status=nf90_put_att(ncid, nf90_global, 'reference', 'http://glcf.umiacs.umd.edu/data/landcover/data.shtml') + if (status /= nf90_noerr) stop 66 + + status=nf90_put_att(ncid, nf90_global, 'projection', 'regular lat/lon') + if (status /= nf90_noerr) stop 67 + + status=nf90_def_var(ncid, 'lat', nf90_double, dim_j, id_lat) + if (status /= nf90_noerr) stop 17 + + status=nf90_put_att(ncid, id_lat, 'long_name', 'grid cell center latitude') + if (status /= nf90_noerr) stop 10 + + status=nf90_put_att(ncid, id_lat, 'units', 'degrees') + if (status /= nf90_noerr) stop 65 + + status=nf90_def_var(ncid, 'lat_corner', nf90_double, dim_jp1, id_lat_corner) + if (status /= nf90_noerr) stop 37 + + status=nf90_put_att(ncid, id_lat_corner, 'long_name', 'grid cell corner latitude') + if (status /= nf90_noerr) stop 38 + + status=nf90_put_att(ncid, id_lat_corner, 'units', 'degrees') + if (status /= nf90_noerr) stop 68 + + status=nf90_def_var(ncid, 'lon', nf90_double, dim_i, id_lon) + if (status /= nf90_noerr) stop 16 + + status=nf90_put_att(ncid, id_lon, 'long_name', 'grid cell center longitude') + if (status /= nf90_noerr) stop 10 + + status=nf90_put_att(ncid, id_lon, 'units', 'degrees') + if (status /= nf90_noerr) stop 69 + + status=nf90_def_var(ncid, 'lon_corner', nf90_double, dim_ip1, id_lon_corner) + if (status /= nf90_noerr) stop 16 + + status=nf90_put_att(ncid, id_lon_corner, 'long_name', 'grid cell corner longitude') + if (status /= nf90_noerr) stop 40 + + status=nf90_put_att(ncid, id_lon_corner, 'units', 'degrees') + if (status /= nf90_noerr) stop 70 + + status=nf90_def_var(ncid, 'land_mask', nf90_byte, (/dim_i,dim_j/), id_data) + if (status /= nf90_noerr) stop 20 + + status=nf90_put_att(ncid, id_data, 'units', 'category') + if (status /= nf90_noerr) stop 75 + + status=nf90_put_att(ncid, id_data, 'Non-land', int((/0/))) + if (status /= nf90_noerr) stop 55 + + status=nf90_put_att(ncid, id_data, 'Land', int((/1/))) + if (status /= nf90_noerr) stop 59 + + status=nf90_enddef(ncid) + if (status /= nf90_noerr) stop 22 + + status=nf90_put_var(ncid, id_lon, lons) + if (status /= nf90_noerr) stop 19 + + status=nf90_put_var(ncid, id_lon_corner, lons_corner) + if (status /= nf90_noerr) stop 59 + + status=nf90_put_var(ncid, id_lat, lats) + if (status /= nf90_noerr) stop 20 + + status=nf90_put_var(ncid, id_lat_corner, lats_corner) + if (status /= nf90_noerr) stop 57 + + status=nf90_put_var(ncid, id_data, mask) + if (status /= nf90_noerr) stop 24 + + status=nf90_close(ncid) + + end program mask_netcdf diff --git a/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/mask.fd/runit.sh b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/mask.fd/runit.sh new file mode 100755 index 000000000..8cc802868 --- /dev/null +++ b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/mask.fd/runit.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +# Run on Hera. + +#SBATCH --ntasks=1 --nodes=1 +#SBATCH -t 0:03:00 +#SBATCH -A fv3-cpu +#SBATCH -q debug +#SBATCH -J fv3 +#SBATCH -o ./log +#SBATCH -e ./log + +set -x + +source ../../../machine-setup.sh > /dev/null 2>&1 +module use ../../../../modulefiles +module load build.$target.intel +module list + +../../../../exec/mask.exe diff --git a/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/ramp.fd/CMakeLists.txt b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/ramp.fd/CMakeLists.txt new file mode 100644 index 000000000..384176eff --- /dev/null +++ b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/ramp.fd/CMakeLists.txt @@ -0,0 +1,17 @@ +list(APPEND fortran_src + ramp.f90 +) + +if(CMAKE_Fortran_COMPILER_ID MATCHES "^(Intel)$") + set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -r8 -i4 -convert big_endian") +elseif(CMAKE_Fortran_COMPILER_ID MATCHES "^(GNU)$") + set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -fdefault-real-8") +endif() + +set(exe_name ramp.exe) +add_executable(${exe_name} ${fortran_src}) +target_link_libraries( + ${exe_name} + NetCDF::NetCDF_Fortran) + +install(TARGETS ${exe_name}) diff --git a/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/ramp.fd/ramp.f90 b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/ramp.fd/ramp.f90 new file mode 100644 index 000000000..b897ddd16 --- /dev/null +++ b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/ramp.fd/ramp.f90 @@ -0,0 +1,164 @@ + program ramp_netcdf + +! Convert the antarctica RAMP data to netcdf. + + use netcdf + + implicit none + + integer*4, parameter :: idim=43201 + integer*4, parameter :: jdim=3601 + integer*4, parameter :: idim_p1=43202 + integer*4, parameter :: jdim_p1=3602 + + character(len=150) :: filenetcdf, fileraw + + integer :: i, istat, ncid, status, dim_i, dim_j + integer :: dim_ip1, dim_jp1 + integer :: id_lon, id_lat, id_data + integer :: id_lat_corner, id_lon_corner + + real(kind=4), allocatable :: topo(:,:) + + real(kind=8), allocatable :: lats(:), lons(:) + real(kind=8), allocatable :: lats_corner(:), lons_corner(:) + real(kind=8) :: lat11, lon11, dx, dy + + dx = 1.0_8/120.0_8 + dy = 1.0_8/120.0_8 + + lat11 = -(90.0_8) + dy*0.5_8 + lon11 = 0.0_8 + dx*0.5_8 + + allocate(lons(idim),lats(jdim),topo(idim,jdim)) + allocate(lons_corner(idim_p1),lats_corner(jdim_p1)) + + do i = 1, idim + lons(i) = real((i-1),8) * dx + lon11 + if (lons(i) > 360.0_8) lons(i) = 360.0_8 - lons(i) + print*,'lon ',i,lons(i) + enddo + + do i = 1, jdim + lats(i) = real((i-1),8) * dy + lat11 + print*,'lat ',i,lats(i) + enddo + + lat11 = -90.0_8 + lon11 = 0.0_8 + + do i = 1, idim_p1 + lons_corner(i) = real((i-1),8) * dx + lon11 + if (lons_corner(i) > 360.0_8) lons_corner(i) = 360.0_8 - lons_corner(i) + print*,'lon_corner ',i,lons_corner(i) + enddo + + do i = 1, jdim_p1 + lats_corner(i) = real((i-1),8) * dy + lat11 + print*,'lat_corner ',i,lats_corner(i) + enddo + + fileraw="/scratch1/NCEPDEV/global/glopara/fix/raw/orog/thirty.second.antarctic.new.bin" + + open(11, file=trim(fileraw), form='unformatted', access='sequential', iostat=istat) + print*,'iostat on open ',istat + read(11, iostat=istat) topo + print*,'iostat on read ',istat + if (istat /= 0) stop 99 + close(11) + + print*,'topo ', maxval(topo),minval(topo) + + print*,'point 1/1 ',topo(1,1) + print*,'point idim/jdim ',topo(idim,jdim) + + filenetcdf="./topography.antarctica.ramp.30s.nc" + + print*,"- CREATE FILE: ", trim(filenetcdf) + status=nf90_create(filenetcdf, IOR(NF90_NETCDF4,NF90_CLASSIC_MODEL), ncid) + if (status /= nf90_noerr) stop 1 + + status=nf90_def_dim(ncid, 'idim', idim, dim_i) + if (status /= nf90_noerr) stop 3 + + status=nf90_def_dim(ncid, 'jdim', jdim, dim_j) + if (status /= nf90_noerr) stop 2 + + status=nf90_def_dim(ncid, 'idim_p1', (idim+1), dim_ip1) + if (status /= nf90_noerr) stop 4 + + status=nf90_def_dim(ncid, 'jdim_p1', (jdim+1), dim_jp1) + if (status /= nf90_noerr) stop 5 + + status=nf90_put_att(ncid, nf90_global, 'source', 'RADARSAT ANTARCTIC MAPPING PROJECT (RAMP) TOPOGRAPHY DATA') + if (status /= nf90_noerr) stop 6 + + status=nf90_put_att(ncid, nf90_global, 'projection', 'regular lat/lon') + if (status /= nf90_noerr) stop 67 + + status=nf90_def_var(ncid, 'lat', nf90_double, dim_j, id_lat) + if (status /= nf90_noerr) stop 17 + + status=nf90_put_att(ncid, id_lat, 'long_name', 'grid cell center latitude') + if (status /= nf90_noerr) stop 10 + + status=nf90_put_att(ncid, id_lat, 'units', 'degrees') + if (status /= nf90_noerr) stop 85 + + status=nf90_def_var(ncid, 'lat_corner', nf90_double, dim_jp1, id_lat_corner) + if (status /= nf90_noerr) stop 37 + + status=nf90_put_att(ncid, id_lat_corner, 'long_name', 'grid cell corner latitude') + if (status /= nf90_noerr) stop 38 + + status=nf90_put_att(ncid, id_lat_corner, 'units', 'degrees') + if (status /= nf90_noerr) stop 86 + + status=nf90_def_var(ncid, 'lon', nf90_double, dim_i, id_lon) + if (status /= nf90_noerr) stop 16 + + status=nf90_put_att(ncid, id_lon, 'long_name', 'grid cell center longitude') + if (status /= nf90_noerr) stop 10 + + status=nf90_put_att(ncid, id_lon, 'units', 'degrees') + if (status /= nf90_noerr) stop 87 + + status=nf90_def_var(ncid, 'lon_corner', nf90_double, dim_ip1, id_lon_corner) + if (status /= nf90_noerr) stop 16 + + status=nf90_put_att(ncid, id_lon_corner, 'long_name', 'grid cell corner longitude') + if (status /= nf90_noerr) stop 40 + + status=nf90_put_att(ncid, id_lon_corner, 'units', 'degrees') + if (status /= nf90_noerr) stop 88 + + status=nf90_def_var(ncid, 'topo', nf90_float, (/dim_i,dim_j/), id_data) + if (status /= nf90_noerr) stop 20 + + status=nf90_put_att(ncid, id_data, 'long_name', 'topography') + if (status /= nf90_noerr) stop 65 + + status=nf90_put_att(ncid, id_data, 'units', 'meters') + if (status /= nf90_noerr) stop 55 + + status=nf90_enddef(ncid) + if (status /= nf90_noerr) stop 22 + + status=nf90_put_var(ncid, id_lon, lons) + if (status /= nf90_noerr) stop 19 + + status=nf90_put_var(ncid, id_lon_corner, lons_corner) + if (status /= nf90_noerr) stop 59 + + status=nf90_put_var(ncid, id_lat, lats) + if (status /= nf90_noerr) stop 20 + + status=nf90_put_var(ncid, id_lat_corner, lats_corner) + if (status /= nf90_noerr) stop 57 + + status=nf90_put_var(ncid, id_data, topo) + if (status /= nf90_noerr) stop 24 + + status=nf90_close(ncid) + + end program ramp_netcdf diff --git a/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/ramp.fd/runit.sh b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/ramp.fd/runit.sh new file mode 100755 index 000000000..f4739a1ff --- /dev/null +++ b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/ramp.fd/runit.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +# Run on Hera. + +#SBATCH --ntasks=1 --nodes=1 +#SBATCH -t 0:03:00 +#SBATCH -A fv3-cpu +#SBATCH -q debug +#SBATCH -J fv3 +#SBATCH -o ./log +#SBATCH -e ./log + +set -x + +source ../../../machine-setup.sh > /dev/null 2>&1 +module use ../../../../modulefiles +module load build.$target.intel +module list + +../../../../exec/ramp.exe diff --git a/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/topo.fd/CMakeLists.txt b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/topo.fd/CMakeLists.txt new file mode 100644 index 000000000..e34d448ee --- /dev/null +++ b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/topo.fd/CMakeLists.txt @@ -0,0 +1,17 @@ +list(APPEND fortran_src + topo.f90 +) + +if(CMAKE_Fortran_COMPILER_ID MATCHES "^(Intel)$") + set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -r8 -i4 -convert big_endian") +elseif(CMAKE_Fortran_COMPILER_ID MATCHES "^(GNU)$") + set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -fdefault-real-8") +endif() + +set(exe_name topo.exe) +add_executable(${exe_name} ${fortran_src}) +target_link_libraries( + ${exe_name} + NetCDF::NetCDF_Fortran) + +install(TARGETS ${exe_name}) diff --git a/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/topo.fd/runit.sh b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/topo.fd/runit.sh new file mode 100755 index 000000000..5e038c83d --- /dev/null +++ b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/topo.fd/runit.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +# Run on Hera. + +#SBATCH --ntasks=1 --nodes=1 +#SBATCH -t 0:03:00 +#SBATCH -A fv3-cpu +#SBATCH -q debug +#SBATCH -J fv3 +#SBATCH -o ./log +#SBATCH -e ./log + +set -x + +source ../../../machine-setup.sh > /dev/null 2>&1 +module use ../../../../modulefiles +module load build.$target.intel +module list + +../../../../exec/topo.exe diff --git a/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/topo.fd/topo.f90 b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/topo.fd/topo.f90 new file mode 100644 index 000000000..2674ae063 --- /dev/null +++ b/sorc/orog_mask_tools.fd/orog_netcdf_tools.fd/topo.fd/topo.f90 @@ -0,0 +1,157 @@ + program topo_netcdf + +! Convert the gmted20120 data to netcdf. + + use netcdf + + implicit none + + integer*4, parameter :: idim=43200 + integer*4, parameter :: jdim=21600 + integer*4, parameter :: idim_p1=43201 + integer*4, parameter :: jdim_p1=21601 + + character(len=150) :: filenetcdf, fileraw + + integer :: i, istat, ncid, status, dim_i, dim_j + integer :: dim_ip1, dim_jp1 + integer :: id_lon, id_lat, id_data + integer :: id_lat_corner, id_lon_corner + + integer(kind=2), allocatable :: topo(:,:) + + real(kind=8), allocatable :: lats(:), lons(:) + real(kind=8), allocatable :: lats_corner(:), lons_corner(:) + real(kind=8) :: lat11, lon11, dx, dy + + dx = 1.0_8/120.0_8 + dy = -(1.0_8/120.0_8) + + lat11 = 90.0_8 + dy*0.5_8 + lon11 = -180.0_8 + dx*0.5_8 + + allocate(lons(idim),lats(jdim),topo(idim,jdim)) + allocate(lons_corner(idim_p1),lats_corner(jdim_p1)) + + do i = 1, idim + lons(i) = real((i-1),8) * dx + lon11 + print*,'lon ',i,lons(i) + enddo + + do i = 1, jdim + lats(i) = real((i-1),8) * dy + lat11 + print*,'lat ',i,lats(i) + enddo + + lat11 = 90.0_8 + lon11 = -180.0_8 + + do i = 1, idim_p1 + lons_corner(i) = real((i-1),8) * dx + lon11 + print*,'lon_corner ',i,lons_corner(i) + enddo + + do i = 1, jdim_p1 + lats_corner(i) = real((i-1),8) * dy + lat11 + print*,'lat_corner ',i,lats_corner(i) + enddo + + fileraw="/scratch1/NCEPDEV/global/glopara/fix/raw/orog/gmted2010.30sec.int" + + open(11, file=trim(fileraw), access='direct', recl=idim*jdim*2) + read(11, rec=1, iostat=istat) topo + if (istat /= 0) stop 99 + close(11) + + print*,'topo ', maxval(topo),minval(topo) + + filenetcdf="./topography.gmted2010.30s.nc" + + print*,"- CREATE FILE: ", trim(filenetcdf) + status=nf90_create(filenetcdf, IOR(NF90_NETCDF4,NF90_CLASSIC_MODEL), ncid) + if (status /= nf90_noerr) stop 1 + + status=nf90_def_dim(ncid, 'idim', idim, dim_i) + if (status /= nf90_noerr) stop 3 + + status=nf90_def_dim(ncid, 'jdim', jdim, dim_j) + if (status /= nf90_noerr) stop 2 + + status=nf90_def_dim(ncid, 'idim_p1', (idim+1), dim_ip1) + if (status /= nf90_noerr) stop 4 + + status=nf90_def_dim(ncid, 'jdim_p1', (jdim+1), dim_jp1) + if (status /= nf90_noerr) stop 5 + + status=nf90_put_att(ncid, nf90_global, 'source', 'USGS GMTED2010 TOPOGRAPHY DATA') + if (status /= nf90_noerr) stop 6 + + status=nf90_put_att(ncid, nf90_global, 'projection', 'regular lat/lon') + if (status /= nf90_noerr) stop 67 + + status=nf90_def_var(ncid, 'lat', nf90_double, dim_j, id_lat) + if (status /= nf90_noerr) stop 17 + + status=nf90_put_att(ncid, id_lat, 'long_name', 'grid cell center latitude') + if (status /= nf90_noerr) stop 10 + + status=nf90_put_att(ncid, id_lat, 'units', 'degrees') + if (status /= nf90_noerr) stop 85 + + status=nf90_def_var(ncid, 'lat_corner', nf90_double, dim_jp1, id_lat_corner) + if (status /= nf90_noerr) stop 37 + + status=nf90_put_att(ncid, id_lat_corner, 'long_name', 'grid cell corner latitude') + if (status /= nf90_noerr) stop 38 + + status=nf90_put_att(ncid, id_lat_corner, 'units', 'degrees') + if (status /= nf90_noerr) stop 86 + + status=nf90_def_var(ncid, 'lon', nf90_double, dim_i, id_lon) + if (status /= nf90_noerr) stop 16 + + status=nf90_put_att(ncid, id_lon, 'long_name', 'grid cell center longitude') + if (status /= nf90_noerr) stop 10 + + status=nf90_put_att(ncid, id_lon, 'units', 'degrees') + if (status /= nf90_noerr) stop 87 + + status=nf90_def_var(ncid, 'lon_corner', nf90_double, dim_ip1, id_lon_corner) + if (status /= nf90_noerr) stop 16 + + status=nf90_put_att(ncid, id_lon_corner, 'long_name', 'grid cell corner longitude') + if (status /= nf90_noerr) stop 40 + + status=nf90_put_att(ncid, id_lon_corner, 'units', 'degrees') + if (status /= nf90_noerr) stop 88 + + status=nf90_def_var(ncid, 'topo', nf90_short, (/dim_i,dim_j/), id_data) + if (status /= nf90_noerr) stop 20 + + status=nf90_put_att(ncid, id_data, 'long_name', 'topography') + if (status /= nf90_noerr) stop 65 + + status=nf90_put_att(ncid, id_data, 'units', 'meters') + if (status /= nf90_noerr) stop 55 + + status=nf90_enddef(ncid) + if (status /= nf90_noerr) stop 22 + + status=nf90_put_var(ncid, id_lon, lons) + if (status /= nf90_noerr) stop 19 + + status=nf90_put_var(ncid, id_lon_corner, lons_corner) + if (status /= nf90_noerr) stop 59 + + status=nf90_put_var(ncid, id_lat, lats) + if (status /= nf90_noerr) stop 20 + + status=nf90_put_var(ncid, id_lat_corner, lats_corner) + if (status /= nf90_noerr) stop 57 + + status=nf90_put_var(ncid, id_data, topo) + if (status /= nf90_noerr) stop 24 + + status=nf90_close(ncid) + + end program topo_netcdf diff --git a/ush/fv3gfs_make_orog.sh b/ush/fv3gfs_make_orog.sh index a684cf6e4..6fcff6673 100755 --- a/ush/fv3gfs_make_orog.sh +++ b/ush/fv3gfs_make_orog.sh @@ -81,12 +81,9 @@ echo "indir = $indir" cd $workdir -cp ${indir}/thirty.second.antarctic.new.bin fort.15 -cp ${indir}/landcover30.fixed . -# uncomment next line to use the old gtopo30 data. -# cp ${indir}/gtopo30_gg.fine.nh fort.235 -# use gmted2020 data. -cp ${indir}/gmted2010.30sec.int fort.235 +cp ${indir}/topography.antarctica.ramp.30s.nc . +cp ${indir}/landcover.umd.30s.nc . +cp ${indir}/topography.gmted2010.30s.nc . if [ $inorogexist -eq 1 ]; then cp $inputorog . fi From 5e39d0a932d30f063765e5cd445a6dbd03bb7ff0 Mon Sep 17 00:00:00 2001 From: Alex Richert Date: Mon, 11 Mar 2024 07:47:35 -0700 Subject: [PATCH 05/25] Add IP library v5 support (#910) IP v5 subsumes the SP library. These updates are backwards compatible for older versions of the IP/SP combo. --- CMakeLists.txt | 4 +++- sorc/chgres_cube.fd/CMakeLists.txt | 5 ++++- sorc/emcsfc_snow2mdl.fd/CMakeLists.txt | 5 ++++- sorc/global_cycle.fd/CMakeLists.txt | 5 ++++- sorc/orog_mask_tools.fd/orog.fd/CMakeLists.txt | 5 ++++- sorc/weight_gen.fd/CMakeLists.txt | 7 ++++++- 6 files changed, 25 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 24dea3c93..60a0cf105 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -104,8 +104,10 @@ find_package(w3emc 2.9.0 REQUIRED) find_package(bacio 2.4.0 REQUIRED) find_package(nemsio 2.5.0 REQUIRED) find_package(sigio 2.3.0 REQUIRED) -find_package(sp 2.3.3 REQUIRED) find_package(ip 3.3.3 REQUIRED) +if(ip_VERSION LESS 5.0) + find_package(sp 2.3.3 REQUIRED) +endif() find_package(g2 3.4.3 REQUIRED) find_package(sigio 2.3.0 REQUIRED) diff --git a/sorc/chgres_cube.fd/CMakeLists.txt b/sorc/chgres_cube.fd/CMakeLists.txt index 53fe3644d..3a1803569 100644 --- a/sorc/chgres_cube.fd/CMakeLists.txt +++ b/sorc/chgres_cube.fd/CMakeLists.txt @@ -49,13 +49,16 @@ target_link_libraries( sigio::sigio bacio::bacio_4 ip::ip_d - sp::sp_d w3emc::w3emc_d esmf msis2 MPI::MPI_Fortran NetCDF::NetCDF_Fortran) +if(sp_FOUND) + target_link_libraries(chgres_cube_lib PUBLIC sp::sp_d) +endif() + if(OpenMP_Fortran_FOUND) target_link_libraries(${exe_name} PUBLIC OpenMP::OpenMP_Fortran) endif() diff --git a/sorc/emcsfc_snow2mdl.fd/CMakeLists.txt b/sorc/emcsfc_snow2mdl.fd/CMakeLists.txt index 081fa6912..08997374c 100644 --- a/sorc/emcsfc_snow2mdl.fd/CMakeLists.txt +++ b/sorc/emcsfc_snow2mdl.fd/CMakeLists.txt @@ -34,10 +34,13 @@ target_link_libraries(snow2mdl_lib PUBLIC g2::g2_d ip::ip_d - sp::sp_d bacio::bacio_4 w3emc::w3emc_d) +if(sp_FOUND) + target_link_libraries(snow2mdl_lib PUBLIC sp::sp_d) +endif() + if(OpenMP_Fortran_FOUND) target_link_libraries(${exe_name} PUBLIC OpenMP::OpenMP_Fortran) endif() diff --git a/sorc/global_cycle.fd/CMakeLists.txt b/sorc/global_cycle.fd/CMakeLists.txt index 0f73d4ab6..7adfae13f 100644 --- a/sorc/global_cycle.fd/CMakeLists.txt +++ b/sorc/global_cycle.fd/CMakeLists.txt @@ -39,11 +39,14 @@ target_link_libraries( w3emc::w3emc_d bacio::bacio_4 ip::ip_d - sp::sp_d MPI::MPI_Fortran NetCDF::NetCDF_Fortran noah_lsm_routines) +if(sp_FOUND) + target_link_libraries(global_cycle_lib PUBLIC sp::sp_d) +endif() + if(OpenMP_Fortran_FOUND) target_link_libraries(global_cycle_lib PUBLIC OpenMP::OpenMP_Fortran) endif() diff --git a/sorc/orog_mask_tools.fd/orog.fd/CMakeLists.txt b/sorc/orog_mask_tools.fd/orog.fd/CMakeLists.txt index 04ab86742..6fbed0573 100644 --- a/sorc/orog_mask_tools.fd/orog.fd/CMakeLists.txt +++ b/sorc/orog_mask_tools.fd/orog.fd/CMakeLists.txt @@ -28,9 +28,12 @@ target_link_libraries( bacio::bacio_4 w3emc::w3emc_d ip::ip_d - sp::sp_d NetCDF::NetCDF_Fortran) +if(sp_FOUND) + target_link_libraries(orog_lib PUBLIC sp::sp_d) +endif() + if(OpenMP_Fortran_FOUND) target_link_libraries(orog_lib PUBLIC OpenMP::OpenMP_Fortran) endif() diff --git a/sorc/weight_gen.fd/CMakeLists.txt b/sorc/weight_gen.fd/CMakeLists.txt index 6871c37cc..ce0190650 100644 --- a/sorc/weight_gen.fd/CMakeLists.txt +++ b/sorc/weight_gen.fd/CMakeLists.txt @@ -12,9 +12,14 @@ set(exe_name weight_gen) add_executable(${exe_name} ${fortran_src}) target_link_libraries( ${exe_name} - sp::sp_d NetCDF::NetCDF_Fortran) +if(sp_FOUND) + target_link_libraries(${exe_name} sp::sp_d) +else() + target_link_libraries(${exe_name} ip::ip_d) +endif() + install(TARGETS ${exe_name}) # If doxygen documentation we enabled, build it. From 216152f2ad5a835a0996fddde096d265e3ee70df Mon Sep 17 00:00:00 2001 From: GeorgeGayno-NOAA <52789452+GeorgeGayno-NOAA@users.noreply.github.com> Date: Wed, 13 Mar 2024 08:19:15 -0400 Subject: [PATCH 06/25] gdas_init utility bug fix (#912) Point to the correct orography directory when using v15/GDAS and v16 retro/GDAS input data to the chgres step. Fixes #908. --- util/gdas_init/driver.hera.sh | 2 ++ util/gdas_init/driver.jet.sh | 4 +++- util/gdas_init/driver.s4.sh | 4 +++- util/gdas_init/driver.wcoss2.sh | 2 ++ util/gdas_init/run_v15.chgres.sh | 4 ++-- util/gdas_init/run_v16retro.chgres.sh | 4 ++-- util/gdas_init/set_fixed_files.sh | 23 ++++++++++++++++++++++- 7 files changed, 36 insertions(+), 7 deletions(-) diff --git a/util/gdas_init/driver.hera.sh b/util/gdas_init/driver.hera.sh index 2694b7b18..58a0adf80 100755 --- a/util/gdas_init/driver.hera.sh +++ b/util/gdas_init/driver.hera.sh @@ -21,6 +21,8 @@ module load prod_util/1.1.0 PROJECT_CODE=fv3-cpu QUEUE=batch +export machine=hera + source config if [ $EXTRACT_DATA == yes ]; then diff --git a/util/gdas_init/driver.jet.sh b/util/gdas_init/driver.jet.sh index cc123c154..7d587ba3e 100755 --- a/util/gdas_init/driver.jet.sh +++ b/util/gdas_init/driver.jet.sh @@ -20,8 +20,10 @@ PROJECT_CODE=hfv3gfs QUEUE=batch PARTITION=xjet +export machine=jet + # Needed for NDATE utility -module load prod_util/1.2.2 +module load prod_util/2.1.1 source config diff --git a/util/gdas_init/driver.s4.sh b/util/gdas_init/driver.s4.sh index b0a78fbc0..34a015929 100755 --- a/util/gdas_init/driver.s4.sh +++ b/util/gdas_init/driver.s4.sh @@ -15,13 +15,15 @@ module load build.$target.$compiler module list # Needed for NDATE utility -module load prod_util/1.2.2 +module load prod_util/2.1.1 PROJECT_CODE=star QUEUE=s4 source config +export machine=s4 + if [ $EXTRACT_DATA == yes ]; then echo "HPSS is not accessible from S4! If you wish to run just the chgres portion, set EXTRACT_DATA=NO in the config file and try again." diff --git a/util/gdas_init/driver.wcoss2.sh b/util/gdas_init/driver.wcoss2.sh index 8d9f6ced5..b765c8d78 100755 --- a/util/gdas_init/driver.wcoss2.sh +++ b/util/gdas_init/driver.wcoss2.sh @@ -21,6 +21,8 @@ PROJECT_CODE=GFS-DEV source config +export machine=wcoss2 + this_dir=$PWD if [ $EXTRACT_DATA == yes ]; then diff --git a/util/gdas_init/run_v15.chgres.sh b/util/gdas_init/run_v15.chgres.sh index 8b9cbdd1c..585b60e18 100755 --- a/util/gdas_init/run_v15.chgres.sh +++ b/util/gdas_init/run_v15.chgres.sh @@ -47,8 +47,8 @@ cat << EOF > fort.41 mosaic_file_target_grid="${FIX_ORO}/${ORO_DIR}/${CTAR}_mosaic.nc" orog_dir_target_grid="${FIX_ORO}/${ORO_DIR}" orog_files_target_grid="${ORO_NAME}.tile1.nc","${ORO_NAME}.tile2.nc","${ORO_NAME}.tile3.nc","${ORO_NAME}.tile4.nc","${ORO_NAME}.tile5.nc","${ORO_NAME}.tile6.nc" - mosaic_file_input_grid="${FIX_ORO}/${CINP}/${CINP}_mosaic.nc" - orog_dir_input_grid="${FIX_ORO}/${CINP}" + mosaic_file_input_grid="${FIX_ORO_INPUT}/${CINP}/${CINP}_mosaic.nc" + orog_dir_input_grid="${FIX_ORO_INPUT}/${CINP}" orog_files_input_grid="${CINP}_oro_data.tile1.nc","${CINP}_oro_data.tile2.nc","${CINP}_oro_data.tile3.nc","${CINP}_oro_data.tile4.nc","${CINP}_oro_data.tile5.nc","${CINP}_oro_data.tile6.nc" data_dir_input_grid="${INPUT_DATA_DIR}" atm_core_files_input_grid="${YMDH}.fv_core.res.tile1.nc","${YMDH}.fv_core.res.tile2.nc","${YMDH}.fv_core.res.tile3.nc","${YMDH}.fv_core.res.tile4.nc","${YMDH}.fv_core.res.tile5.nc","${YMDH}.fv_core.res.tile6.nc","${YMDH}.fv_core.res.nc" diff --git a/util/gdas_init/run_v16retro.chgres.sh b/util/gdas_init/run_v16retro.chgres.sh index 2a5e59376..68aa0917f 100755 --- a/util/gdas_init/run_v16retro.chgres.sh +++ b/util/gdas_init/run_v16retro.chgres.sh @@ -65,8 +65,8 @@ cat << EOF > fort.41 mosaic_file_target_grid="${FIX_ORO}/${ORO_DIR}/${CTAR}_mosaic.nc" orog_dir_target_grid="${FIX_ORO}/${ORO_DIR}" orog_files_target_grid="${ORO_NAME}.tile1.nc","${ORO_NAME}.tile2.nc","${ORO_NAME}.tile3.nc","${ORO_NAME}.tile4.nc","${ORO_NAME}.tile5.nc","${ORO_NAME}.tile6.nc" - mosaic_file_input_grid="${FIX_ORO}/${CINP}/${CINP}_mosaic.nc" - orog_dir_input_grid="${FIX_ORO}/${CINP}" + mosaic_file_input_grid="${FIX_ORO_INPUT}/${CINP}/${CINP}_mosaic.nc" + orog_dir_input_grid="${FIX_ORO_INPUT}/${CINP}" orog_files_input_grid="${CINP}_oro_data.tile1.nc","${CINP}_oro_data.tile2.nc","${CINP}_oro_data.tile3.nc","${CINP}_oro_data.tile4.nc","${CINP}_oro_data.tile5.nc","${CINP}_oro_data.tile6.nc" data_dir_input_grid="${INPUT_DATA_DIR}" atm_core_files_input_grid="${YMDH}.fv_core.res.tile1.nc","${YMDH}.fv_core.res.tile2.nc","${YMDH}.fv_core.res.tile3.nc","${YMDH}.fv_core.res.tile4.nc","${YMDH}.fv_core.res.tile5.nc","${YMDH}.fv_core.res.tile6.nc","${YMDH}.fv_core.res.nc" diff --git a/util/gdas_init/set_fixed_files.sh b/util/gdas_init/set_fixed_files.sh index 7cb63d1bf..210b63a60 100755 --- a/util/gdas_init/set_fixed_files.sh +++ b/util/gdas_init/set_fixed_files.sh @@ -1,7 +1,7 @@ #!/bin/bash #--------------------------------------------------------------------------- -# Set directory names and file names for orog data. +# Set directory names and file names for the target grid orog data. #--------------------------------------------------------------------------- if [ ${CTAR} == 'C48' ] ; then @@ -22,3 +22,24 @@ fi ORO_DIR="${CTAR}" ORO_NAME="${CTAR}.mx${OCNRES}_oro_data" + +#--------------------------------------------------------------------------- +# When using the v15/v16 tiled warm restart data as input to the chgres +# step, the input grid orography is needed (there is no orography record +# in the restart files). Since the restart data was created before the +# latest orog version (20231027), need to use a previous version. +#--------------------------------------------------------------------------- + +if [ "$machine" = 'hera' ] ; then + FIX_ORO_INPUT=/scratch1/NCEPDEV/global/glopara/fix/orog/20230615 +elif [ "$machine" = 'wcoss2' ] ; then + FIX_ORO_INPUT=/lfs/h2/emc/global/noscrub/emc.global/FIX/fix/orog/20230615 +elif [ "$machine" = 'jet' ] ; then + FIX_ORO_INPUT=/lfs4/HFIP/hfv3gfs/glopara/git/fv3gfs/fix/orog/20230615 +elif [ "$machine" = 's4' ] ; then + FIX_ORO_INPUT=/data/prod/glopara/fix/orog/20230615 +else + set +x + echo ERROR machine $machine not supported. + exit 3 +fi From cadff2ba1a4ec048700b7d7bdf4602ad87186545 Mon Sep 17 00:00:00 2001 From: GeorgeGayno-NOAA <52789452+GeorgeGayno-NOAA@users.noreply.github.com> Date: Wed, 13 Mar 2024 16:00:21 -0400 Subject: [PATCH 07/25] Release v1.13.0 (#917) Update VERSION and ./docs/main.md files. Fixes #916. --- VERSION | 2 +- docs/main.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 0eed1a29e..feaae22ba 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.12.0 +1.13.0 diff --git a/docs/main.md b/docs/main.md index 57a5e43a3..95b69d3b8 100644 --- a/docs/main.md +++ b/docs/main.md @@ -9,6 +9,7 @@ https://github.com/ufs-community/UFS_UTILS. ## Documentation for Previous Versions of UFS_UTILS +* [UFS_UTILS Version 1.13.0](ver-1.13.0/index.html) * [UFS_UTILS Version 1.12.0](ver-1.12.0/index.html) * [UFS_UTILS Version 1.11.0](ver-1.11.0/index.html) * [UFS_UTILS Version 1.10.0](ver-1.10.0/index.html) From 1ffaf1f9a598213fbb0106cffdf8ca0d3fd32ac7 Mon Sep 17 00:00:00 2001 From: GeorgeGayno-NOAA <52789452+GeorgeGayno-NOAA@users.noreply.github.com> Date: Thu, 14 Mar 2024 15:02:31 -0400 Subject: [PATCH 08/25] Updates for Rocky8 on Jet (#919) Update the Jet build module to use the Rocky8 version of spack-stack. Update the ice blend regression test script to use Rocky8 versions of copygb, copygb2 and cnvgrib. Fixes #918. --- modulefiles/build.jet.intel.lua | 2 +- reg_tests/grid_gen/driver.jet.sh | 2 +- reg_tests/ice_blend/driver.jet.sh | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/modulefiles/build.jet.intel.lua b/modulefiles/build.jet.intel.lua index 2a7011957..ce9e9bdaa 100644 --- a/modulefiles/build.jet.intel.lua +++ b/modulefiles/build.jet.intel.lua @@ -5,7 +5,7 @@ Load environment to compile UFS_UTILS on Jet using Intel hpss_ver=os.getenv("hpss_ver") or "" load(pathJoin("hpss", hpss_ver)) -prepend_path("MODULEPATH", "/mnt/lfs4/HFIP/hfv3gfs/role.epic/spack-stack/spack-stack-1.6.0/envs/unified-env/install/modulefiles/Core") +prepend_path("MODULEPATH", "/mnt/lfs4/HFIP/hfv3gfs/role.epic/spack-stack/spack-stack-1.6.0/envs/unified-env-rocky8/install/modulefiles/Core") stack_intel_ver=os.getenv("stack_intel_ver") or "2021.5.0" load(pathJoin("stack-intel", stack_intel_ver)) diff --git a/reg_tests/grid_gen/driver.jet.sh b/reg_tests/grid_gen/driver.jet.sh index db387c9d4..2a4c76a1f 100755 --- a/reg_tests/grid_gen/driver.jet.sh +++ b/reg_tests/grid_gen/driver.jet.sh @@ -72,7 +72,7 @@ TEST1=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:20:00 -A $PROJECT_ #----------------------------------------------------------------------------- LOG_FILE2=${LOG_FILE}02 -TEST2=$(sbatch --parsable --ntasks-per-node=10 --nodes=3 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.viirs.bnu \ +TEST2=$(sbatch --parsable --ntasks-per-node=12 --nodes=4 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.viirs.bnu \ --partition=xjet -o $LOG_FILE2 -e $LOG_FILE2 ./c96.viirs.bnu.sh) #----------------------------------------------------------------------------- diff --git a/reg_tests/ice_blend/driver.jet.sh b/reg_tests/ice_blend/driver.jet.sh index 6c5498aa9..02a0599f0 100755 --- a/reg_tests/ice_blend/driver.jet.sh +++ b/reg_tests/ice_blend/driver.jet.sh @@ -53,9 +53,9 @@ fi export WGRIB=/apps/wgrib/1.8.1.0b/bin/wgrib export WGRIB2=${WGRIB2_ROOT}/bin/wgrib2 -export COPYGB=/lfs4/HFIP/hfv3gfs/emc.nemspara/role.ufsutils/ufs_utils/grib_util/NCEPLIBS-grib_util/exec/bin/copygb -export COPYGB2=/lfs4/HFIP/hfv3gfs/emc.nemspara/role.ufsutils/ufs_utils/grib_util/NCEPLIBS-grib_util/exec/bin/copygb2 -export CNVGRIB=/apps/cnvgrib/1.4.0/bin/cnvgrib +export COPYGB=/lfs4/HFIP/hfv3gfs/role.epic/spack-stack/spack-stack-1.6.0/envs/unified-env-rocky8/install/intel/2021.5.0/grib-util-1.3.0-hrqavdi/bin/copygb +export COPYGB2=/lfs4/HFIP/hfv3gfs/role.epic/spack-stack/spack-stack-1.6.0/envs/unified-env-rocky8/install/intel/2021.5.0/grib-util-1.3.0-hrqavdi/bin/copygb2 +export CNVGRIB=/mnt/lfs4/HFIP/hfv3gfs/role.epic/spack-stack/spack-stack-1.6.0/envs/unified-env-rocky8/install/intel/2021.5.0/grib-util-1.3.0-hrqavdi/bin/cnvgrib export HOMEreg=/lfs4/HFIP/hfv3gfs/emc.nemspara/role.ufsutils/ufs_utils/reg_tests/ice_blend From 72fb94fc6bb8c917a457ff8105c1552eb8c02f7a Mon Sep 17 00:00:00 2001 From: GeorgeGayno-NOAA <52789452+GeorgeGayno-NOAA@users.noreply.github.com> Date: Mon, 18 Mar 2024 09:43:52 -0400 Subject: [PATCH 09/25] Add instructions for the gdas_init utility to readthedocs (#920) This information was previously documented in the global workflow's readthedocs. Fixes #915. --- docs/source/ufs_utils.rst | 70 +++++++++++++++++++++++++++++++ util/gdas_init/set_fixed_files.sh | 1 + 2 files changed, 71 insertions(+) diff --git a/docs/source/ufs_utils.rst b/docs/source/ufs_utils.rst index 388ec4cf6..739110194 100644 --- a/docs/source/ufs_utils.rst +++ b/docs/source/ufs_utils.rst @@ -665,3 +665,73 @@ Run script ---------- To run, use the machine-dependent script under ./util/weight_gen + +*************************************************** +UFS_UTILS utilities +*************************************************** + +gdas_init +========= + +Introduction +------------ + +The gdas_init utility is used to create coldstart initial conditions for global cycled and forecast-only experiments using the chgres_cube program. It has two components: one that pulls the input data required by chgres_cube from HPSS, and one that runs chgres_cube. The utility is only supported on machines with access to HPSS: + + * Hera + * Jet + * WCOSS2 + * S4 (Only the chgres_cube step is supported, not the data pull step.) + +Location +-------- + +Find it here: ./util/gdas_init + +Build UFS_UTILS and set 'fixed' directories +------------------------------------------- + +Invoke the build script from the root directory: + +:: + + ./build_all.sh + +Set the 'fixed' directories using the script in the './fix' subdirectory (where $MACHINE is 'hera', 'jet', 'wcoss2', or 's4'): + +:: + + ./link_fixdirs.sh emc $MACHINE + +Configure for your experiment +----------------------------- + +Edit the variables in the 'config' file for your experiment: + + * **EXTRACT_DIR** - Directory where data extracted from HPSS is stored. + * **EXTRACT_DATA** - Set to 'yes' to extract data from HPSS. If data has been extracted and is located in EXTRACT_DIR, set to 'no'. On 's4' this step can't be run. Instead, the data must be pulled from another machine. + * **RUN_CHGRES** - To run chgres, set to 'yes'. To extract data only, set to 'no'. + * **yy/mm/dd/hh** - The year/month/day/hour of your desired experiment. Currently, does not support pre-ENKF GFS data, prior to 2012 May 21 00z. Use two digits. + * **LEVS** - Number of hybrid levels plus 1. To run with 127 levels, set LEVS to 128. + * **CRES_HIRES** - Resolution of the hires component of your experiment. Example: C768. + * **CRES_ENKF** - Resolution of the enkf component of the experiments. + * **UFS_DIR** - Location of your cloned UFS_UTILS repository. + * **OUTDIR** - Directory where the coldstart data output from chgres is stored. + * **CDUMP** - When 'gdas', will process gdas and enkf members. When 'gfs', will process gfs member for running free forecast only. + * **use_v16retro** - When 'yes', use v16 retrospective parallel data. The retrospective parallel tarballs can be missing or incomplete. So this option may not always work. Contact a UFS_UTILS repository manager if you encounter problems. + +Note: This utility selects the ocean resolution in the set_fixed_files.sh script using a default based on the user-selected CRES value. For example, for a cycled experiment with a CRES_HIRES/CRES_ENKF of C384/C192, the ocean resolution defaults to 0.25/0.50-degree. To choose another ocean resolution, the user will need to manually modify the set_fixed_files.sh script. + + +Kick off the utility +-------------------- + +Submit the driver script (where $MACHINE is 'hera', 'jet', 'wcoss2', or 's4') + +:: + + ./driver.$MACHINE.sh + +The standard output will be placed in log files in the current directory. + +The converted output will be found in $OUTDIR, including the needed abias and radstat initial condition files (if CDUMP=gdas). The files will be in the needed directory structure for the global-workflow system, therefore a user can move the contents of their $OUTDIR directly into their $ROTDIR. diff --git a/util/gdas_init/set_fixed_files.sh b/util/gdas_init/set_fixed_files.sh index 210b63a60..6604c834a 100755 --- a/util/gdas_init/set_fixed_files.sh +++ b/util/gdas_init/set_fixed_files.sh @@ -2,6 +2,7 @@ #--------------------------------------------------------------------------- # Set directory names and file names for the target grid orog data. +# A default ocean resolution (OCNRES) based on CTAR is used. #--------------------------------------------------------------------------- if [ ${CTAR} == 'C48' ] ; then From b87e6b2fd0c995f80b6b7be1d05807c084aafb1f Mon Sep 17 00:00:00 2001 From: Larissa Reames <52886575+LarissaReames-NOAA@users.noreply.github.com> Date: Wed, 20 Mar 2024 07:43:18 -0500 Subject: [PATCH 10/25] chgres_cube: Add a check for incompatible surface static data (#911) Add logic to ensure that the orogoraphy data is consistent with the static climatological surface data. The check looks for any points where land fraction > 0 but the static data point is missing (-999 flag). This will happen with older version of the orography data. The program now fails if incompatible static data is detected. Fixes #906. --- sorc/chgres_cube.fd/static_data.F90 | 83 ++++++++++++++++++++++------- 1 file changed, 64 insertions(+), 19 deletions(-) diff --git a/sorc/chgres_cube.fd/static_data.F90 b/sorc/chgres_cube.fd/static_data.F90 index e95756c6c..0de2b2af9 100644 --- a/sorc/chgres_cube.fd/static_data.F90 +++ b/sorc/chgres_cube.fd/static_data.F90 @@ -15,8 +15,8 @@ module static_data use esmf use utilities, only : error_handler, netcdf_err - - implicit none + +implicit none private @@ -49,6 +49,7 @@ module static_data subroutine get_static_fields(localpet) use model_grid, only : num_tiles_target_grid, & + land_frac_target_grid, & i_target, j_target implicit none @@ -60,11 +61,13 @@ subroutine get_static_fields(localpet) real(esmf_kind_r8), allocatable :: data_one_tile(:,:) real(esmf_kind_r8), allocatable :: max_data_one_tile(:,:) real(esmf_kind_r8), allocatable :: min_data_one_tile(:,:) - + real(esmf_kind_r8), allocatable :: land_frac_target_tile(:,:) if (localpet==0) then allocate(data_one_tile(i_target,j_target)) + allocate(land_frac_target_tile(i_target,j_target)) else allocate(data_one_tile(0,0)) + allocate(land_frac_target_tile(0,0)) endif call create_static_fields @@ -74,8 +77,11 @@ subroutine get_static_fields(localpet) !------------------------------------------------------------------------------ do tile = 1, num_tiles_target_grid + call ESMF_FieldGather(land_frac_target_grid, land_frac_target_tile, rootPet=0,tile=tile, rc=error) + if(ESMF_logFoundError(rcToCheck=error,msg=ESMF_LOGERR_PASSTHRU,line=__LINE__,file=__FILE__))& + call error_handler("IN FieldGather", error) if (localpet == 0) then - call read_static_file('slope_type', i_target, j_target, tile, data_one_tile) + call read_static_file('slope_type', i_target, j_target, tile, data_one_tile, land_frac_target_tile) endif print*,"- CALL FieldScatter FOR TARGET GRID SLOPE TYPE." call ESMF_FieldScatter(slope_type_target_grid, data_one_tile, rootpet=0, tile=tile, rc=error) @@ -88,8 +94,11 @@ subroutine get_static_fields(localpet) !------------------------------------------------------------------------------ do tile = 1, num_tiles_target_grid + call ESMF_FieldGather(land_frac_target_grid, land_frac_target_tile, rootPet=0,tile=tile, rc=error) + if(ESMF_logFoundError(rcToCheck=error,msg=ESMF_LOGERR_PASSTHRU,line=__LINE__,file=__FILE__))& + call error_handler("IN FieldGather", error) if (localpet == 0) then - call read_static_file('maximum_snow_albedo', i_target, j_target, tile, data_one_tile) + call read_static_file('maximum_snow_albedo', i_target, j_target, tile, data_one_tile, land_frac_target_tile) endif print*,"- CALL FieldScatter FOR TARGET GRID MAXIMUM SNOW ALBEDO." call ESMF_FieldScatter(mxsno_albedo_target_grid, data_one_tile, rootpet=0, tile=tile, rc=error) @@ -102,8 +111,11 @@ subroutine get_static_fields(localpet) !------------------------------------------------------------------------------ do tile = 1, num_tiles_target_grid + call ESMF_FieldGather(land_frac_target_grid, land_frac_target_tile, rootPet=0,tile=tile, rc=error) + if(ESMF_logFoundError(rcToCheck=error,msg=ESMF_LOGERR_PASSTHRU,line=__LINE__,file=__FILE__))& + call error_handler("IN FieldGather", error) if (localpet == 0) then - call read_static_file('soil_type', i_target, j_target, tile, data_one_tile) + call read_static_file('soil_type', i_target, j_target, tile, data_one_tile, land_frac_target_tile) endif print*,"- CALL FieldScatter FOR TARGET GRID SOIL TYPE." call ESMF_FieldScatter(soil_type_target_grid, data_one_tile, rootpet=0, tile=tile, rc=error) @@ -116,8 +128,11 @@ subroutine get_static_fields(localpet) !------------------------------------------------------------------------------ do tile = 1, num_tiles_target_grid + call ESMF_FieldGather(land_frac_target_grid, land_frac_target_tile, rootPet=0,tile=tile, rc=error) + if(ESMF_logFoundError(rcToCheck=error,msg=ESMF_LOGERR_PASSTHRU,line=__LINE__,file=__FILE__))& + call error_handler("IN FieldGather", error) if (localpet == 0) then - call read_static_file('vegetation_type', i_target, j_target, tile, data_one_tile) + call read_static_file('vegetation_type', i_target, j_target, tile, data_one_tile, land_frac_target_tile) endif print*,"- CALL FieldScatter FOR TARGET GRID VEGETATION TYPE." call ESMF_FieldScatter(veg_type_target_grid, data_one_tile, rootpet=0, tile=tile, rc=error) @@ -138,9 +153,12 @@ subroutine get_static_fields(localpet) endif do tile = 1, num_tiles_target_grid + call ESMF_FieldGather(land_frac_target_grid, land_frac_target_tile, rootPet=0,tile=tile, rc=error) + if(ESMF_logFoundError(rcToCheck=error,msg=ESMF_LOGERR_PASSTHRU,line=__LINE__,file=__FILE__))& + call error_handler("IN FieldGather", error) if (localpet == 0) then call read_static_file('vegetation_greenness', i_target, j_target, tile, data_one_tile, & - max_data_one_tile, min_data_one_tile) + land_frac_target_tile, max_data_one_tile, min_data_one_tile) endif print*,"- CALL FieldScatter FOR TARGET GRID VEGETATION GREENNESS." call ESMF_FieldScatter(veg_greenness_target_grid, data_one_tile, rootpet=0, tile=tile, rc=error) @@ -163,8 +181,11 @@ subroutine get_static_fields(localpet) !------------------------------------------------------------------------------ do tile = 1, num_tiles_target_grid + call ESMF_FieldGather(land_frac_target_grid, land_frac_target_tile, rootPet=0,tile=tile, rc=error) + if(ESMF_logFoundError(rcToCheck=error,msg=ESMF_LOGERR_PASSTHRU,line=__LINE__,file=__FILE__))& + call error_handler("IN FieldGather", error) if (localpet == 0) then - call read_static_file('substrate_temperature', i_target, j_target, tile, data_one_tile) + call read_static_file('substrate_temperature', i_target, j_target, tile, data_one_tile, land_frac_target_tile) endif print*,"- CALL FieldScatter FOR TARGET GRID SUBSTRATE TEMPERATURE." call ESMF_FieldScatter(substrate_temp_target_grid, data_one_tile, rootpet=0, tile=tile, rc=error) @@ -177,8 +198,11 @@ subroutine get_static_fields(localpet) !------------------------------------------------------------------------------ do tile = 1, num_tiles_target_grid + call ESMF_FieldGather(land_frac_target_grid, land_frac_target_tile, rootPet=0,tile=tile, rc=error) + if(ESMF_logFoundError(rcToCheck=error,msg=ESMF_LOGERR_PASSTHRU,line=__LINE__,file=__FILE__))& + call error_handler("IN FieldGather", error) if (localpet == 0) then - call read_static_file('visible_black_sky_albedo', i_target, j_target, tile, data_one_tile) + call read_static_file('visible_black_sky_albedo', i_target, j_target, tile, data_one_tile, land_frac_target_tile) endif print*,"- CALL FieldScatter FOR TARGET GRID ALVSF." call ESMF_FieldScatter(alvsf_target_grid, data_one_tile, rootpet=0, tile=tile, rc=error) @@ -187,8 +211,11 @@ subroutine get_static_fields(localpet) enddo do tile = 1, num_tiles_target_grid + call ESMF_FieldGather(land_frac_target_grid, land_frac_target_tile, rootPet=0,tile=tile, rc=error) + if(ESMF_logFoundError(rcToCheck=error,msg=ESMF_LOGERR_PASSTHRU,line=__LINE__,file=__FILE__))& + call error_handler("IN FieldGather", error) if (localpet == 0) then - call read_static_file('visible_white_sky_albedo', i_target, j_target, tile, data_one_tile) + call read_static_file('visible_white_sky_albedo', i_target, j_target, tile, data_one_tile, land_frac_target_tile) endif print*,"- CALL FieldScatter FOR TARGET GRID ALVWF." call ESMF_FieldScatter(alvwf_target_grid, data_one_tile, rootpet=0, tile=tile, rc=error) @@ -197,8 +224,11 @@ subroutine get_static_fields(localpet) enddo do tile = 1, num_tiles_target_grid + call ESMF_FieldGather(land_frac_target_grid, land_frac_target_tile, rootPet=0,tile=tile, rc=error) + if(ESMF_logFoundError(rcToCheck=error,msg=ESMF_LOGERR_PASSTHRU,line=__LINE__,file=__FILE__))& + call error_handler("IN FieldGather", error) if (localpet == 0) then - call read_static_file('near_IR_black_sky_albedo', i_target, j_target, tile, data_one_tile) + call read_static_file('near_IR_black_sky_albedo', i_target, j_target, tile, data_one_tile, land_frac_target_tile) endif print*,"- CALL FieldScatter FOR TARGET GRID ALNSF." call ESMF_FieldScatter(alnsf_target_grid, data_one_tile, rootpet=0, tile=tile, rc=error) @@ -207,8 +237,11 @@ subroutine get_static_fields(localpet) enddo do tile = 1, num_tiles_target_grid + call ESMF_FieldGather(land_frac_target_grid, land_frac_target_tile, rootPet=0,tile=tile, rc=error) + if(ESMF_logFoundError(rcToCheck=error,msg=ESMF_LOGERR_PASSTHRU,line=__LINE__,file=__FILE__))& + call error_handler("IN FieldGather", error) if (localpet == 0) then - call read_static_file('near_IR_white_sky_albedo', i_target, j_target, tile, data_one_tile) + call read_static_file('near_IR_white_sky_albedo', i_target, j_target, tile, data_one_tile, land_frac_target_tile) endif print*,"- CALL FieldScatter FOR TARGET GRID ALNWF." call ESMF_FieldScatter(alnwf_target_grid, data_one_tile, rootpet=0, tile=tile, rc=error) @@ -221,8 +254,11 @@ subroutine get_static_fields(localpet) !------------------------------------------------------------------------------ do tile = 1, num_tiles_target_grid + call ESMF_FieldGather(land_frac_target_grid, land_frac_target_tile, rootPet=0,tile=tile, rc=error) + if(ESMF_logFoundError(rcToCheck=error,msg=ESMF_LOGERR_PASSTHRU,line=__LINE__,file=__FILE__))& + call error_handler("IN FieldGather", error) if (localpet == 0) then - call read_static_file('facsf', i_target, j_target, tile, data_one_tile) + call read_static_file('facsf', i_target, j_target, tile, data_one_tile, land_frac_target_tile) endif print*,"- CALL FieldScatter FOR TARGET GRID FACSF." call ESMF_FieldScatter(facsf_target_grid, data_one_tile, rootpet=0, tile=tile, rc=error) @@ -252,6 +288,7 @@ end subroutine get_static_fields !! @param[in] i_target the "i" dimension of the target model tile !! @param[in] j_target the "j" dimension of the target model tile !! @param[in] tile the tile number of be processed +!! @param[in] land_frac landcover fraction on the target model tile !! @param[out] data_one_tile the processed surface data on the tile !! @param[out] max_data_one_tile for fields with multiple time periods, the max !! yearly value on the tile @@ -259,7 +296,7 @@ end subroutine get_static_fields !! yearly value on the tile !! @author George Gayno NCEP/EMC subroutine read_static_file(field, i_target, j_target, tile, & - data_one_tile, max_data_one_tile, & + data_one_tile, land_frac, max_data_one_tile, & min_data_one_tile) use netcdf @@ -271,10 +308,11 @@ subroutine read_static_file(field, i_target, j_target, tile, & character(len=*), intent(in) :: field character(len=100) :: filename - character(len=500) :: the_file + character(len=500) :: the_file, err_msg integer, intent(in) :: i_target, j_target, tile + real(esmf_kind_r8), intent(in) :: land_frac(i_target,j_target) real(esmf_kind_r8), intent(out) :: data_one_tile(i_target,j_target) real(esmf_kind_r8), intent(out), optional :: max_data_one_tile(i_target,j_target) real(esmf_kind_r8), intent(out), optional :: min_data_one_tile(i_target,j_target) @@ -370,9 +408,16 @@ subroutine read_static_file(field, i_target, j_target, tile, & deallocate(days_since) else ! file contains only one time record - - data_one_tile = dummy(:,:,1) - + + do j = 1, j_target + do i = 1, i_target + if(land_frac(i,j) > 0.0 .and. dummy(i,j,1) == -999) then + err_msg = "Detected missing data point in " // trim(filename) // ". Static data may be outdated. Please use static data created with the latest UFS_UTILS release that supports fractional land coverage" + call error_handler(err_msg,-1) + endif + data_one_tile(i,j) = dummy(i,j,1) + enddo + enddo endif if (trim(field) == 'vegetation_greenness') then From b93817c2eaf67c3990ddd45aa84f6395dff87c0a Mon Sep 17 00:00:00 2001 From: "Henry R. Winterbottom" <49202169+HenryWinterbottom-NOAA@users.noreply.github.com> Date: Thu, 21 Mar 2024 13:04:30 -0600 Subject: [PATCH 11/25] RDHPCS Hera Rocky-8 support (#924) Update Hera build modules to use Rocky8 versions of spack-stack. Update two regression test driver scripts to use Rocky8 versions of WGRIB2 and grib_utils. Fixes #913. --- modulefiles/build.hera.gnu.lua | 4 +--- modulefiles/build.hera.intel.lua | 2 +- reg_tests/ice_blend/driver.hera.sh | 10 ++++------ reg_tests/snow2mdl/driver.hera.sh | 11 +++++++++-- 4 files changed, 15 insertions(+), 12 deletions(-) diff --git a/modulefiles/build.hera.gnu.lua b/modulefiles/build.hera.gnu.lua index 95173c330..7f4d8ac57 100644 --- a/modulefiles/build.hera.gnu.lua +++ b/modulefiles/build.hera.gnu.lua @@ -5,9 +5,7 @@ Load environment to compile UFS_UTILS on Hera using Gnu hpss_ver=os.getenv("hpss_ver") or "" load(pathJoin("hpss", hpss_ver)) -prepend_path("MODULEPATH", "/scratch1/NCEPDEV/nems/role.epic/spack-stack/spack-stack-1.6.0/envs/unified-env/install/modulefiles/Core") --- For openmpi: -prepend_path("MODULEPATH", "/scratch1/NCEPDEV/jcsda/jedipara/spack-stack/modulefiles") +prepend_path("MODULEPATH", "/scratch1/NCEPDEV/nems/role.epic/spack-stack/spack-stack-1.6.0/envs/unified-env-rocky8/install/modulefiles/Core") stack_gcc_ver=os.getenv("stack_gcc_ver") or "9.2" load(pathJoin("stack-gcc", gnu_ver)) diff --git a/modulefiles/build.hera.intel.lua b/modulefiles/build.hera.intel.lua index 01490d7f0..f3280393a 100644 --- a/modulefiles/build.hera.intel.lua +++ b/modulefiles/build.hera.intel.lua @@ -5,7 +5,7 @@ Load environment to compile UFS_UTILS on Hera using Intel hpss_ver=os.getenv("hpss_ver") or "" load(pathJoin("hpss", hpss_ver)) -prepend_path("MODULEPATH", "/scratch1/NCEPDEV/nems/role.epic/spack-stack/spack-stack-1.6.0/envs/unified-env/install/modulefiles/Core") +prepend_path("MODULEPATH", "/scratch1/NCEPDEV/nems/role.epic/spack-stack/spack-stack-1.6.0/envs/unified-env-rocky8/install/modulefiles/Core") stack_intel_ver=os.getenv("stack_intel_ver") or "2021.5.0" load(pathJoin("stack-intel", stack_intel_ver)) diff --git a/reg_tests/ice_blend/driver.hera.sh b/reg_tests/ice_blend/driver.hera.sh index b86f370d1..ec31d773f 100755 --- a/reg_tests/ice_blend/driver.hera.sh +++ b/reg_tests/ice_blend/driver.hera.sh @@ -34,6 +34,10 @@ compiler=${compiler:-"intel"} source ../../sorc/machine-setup.sh > /dev/null 2>&1 module use ../../modulefiles module load build.$target.$compiler +module load grib-util +# Because of a bug in the grib-util module, the wgrib2 module +# must be loaded last. +module load wgrib2/2.0.8 module list export DATA="${WORK_DIR:-/scratch2/NCEPDEV/stmp1/$LOGNAME}" @@ -50,12 +54,6 @@ if [ "$UPDATE_BASELINE" = "TRUE" ]; then source ../get_hash.sh fi -export WGRIB=/scratch2/NCEPDEV/nwprod/NCEPLIBS/utils/grib_util.v1.1.1/exec/wgrib -export WGRIB2=/scratch2/NCEPDEV/nwprod/NCEPLIBS/utils/grib_util.v1.1.1/exec/wgrib2 -export COPYGB=/scratch2/NCEPDEV/nwprod/NCEPLIBS/utils/grib_util.v1.1.1/exec/copygb -export COPYGB2=/scratch2/NCEPDEV/nwprod/NCEPLIBS/utils/grib_util.v1.1.1/exec/copygb2 -export CNVGRIB=/scratch2/NCEPDEV/nwprod/NCEPLIBS/utils/grib_util.v1.1.1/exec/cnvgrib - export HOMEreg=/scratch1/NCEPDEV/nems/role.ufsutils/ufs_utils/reg_tests/ice_blend export HOMEgfs=$PWD/../.. diff --git a/reg_tests/snow2mdl/driver.hera.sh b/reg_tests/snow2mdl/driver.hera.sh index bc80942d8..f2c07ba5a 100755 --- a/reg_tests/snow2mdl/driver.hera.sh +++ b/reg_tests/snow2mdl/driver.hera.sh @@ -25,8 +25,17 @@ compiler=${compiler:-"intel"} source ../../sorc/machine-setup.sh > /dev/null 2>&1 module use ../../modulefiles module load build.$target.$compiler +module load grib-util +module load wgrib2/2.0.8 module list +# Because of a bug in the grib-util module, need to construct this +# variable. +WGRIB=${grib_util_ROOT}/bin/wgrib + +export WGRIB +export WGRIB2 + DATA_ROOT="${WORK_DIR:-/scratch2/NCEPDEV/stmp1/$LOGNAME}" DATA_ROOT="${DATA_ROOT}/reg-tests/snow2mdl" @@ -48,8 +57,6 @@ fi export HOMEreg=/scratch1/NCEPDEV/nems/role.ufsutils/ufs_utils/reg_tests/snow2mdl export HOMEgfs=$PWD/../.. -export WGRIB=/scratch2/NCEPDEV/nwprod/NCEPLIBS/utils/grib_util.v1.1.1/exec/wgrib -export WGRIB2=/scratch2/NCEPDEV/nwprod/NCEPLIBS/utils/grib_util.v1.1.1/exec/wgrib2 # The first test mimics GFS OPS. From f42fae239d0824f7b9a83c9afdc3d980894c7df8 Mon Sep 17 00:00:00 2001 From: GeorgeGayno-NOAA <52789452+GeorgeGayno-NOAA@users.noreply.github.com> Date: Fri, 22 Mar 2024 09:55:41 -0400 Subject: [PATCH 12/25] Fix error handling in the orog code (#914) Update the netcdf_error routine to stop processing if an error occurs. Update error messages to comply with NCO requirements. Cleanup/fix the error handling for the read of the RAMP data. If read of RAMP data fails, stop program execution. Fixes #909. --- .../orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F | 96 ++++++++----------- sorc/orog_mask_tools.fd/orog.fd/netcdf_io.F90 | 3 +- 2 files changed, 40 insertions(+), 59 deletions(-) diff --git a/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F b/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F index 58b5ecb86..9e65956d5 100644 --- a/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F +++ b/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F @@ -116,7 +116,8 @@ if( trim(OUTGRID) .NE. "none" ) then inquire(file=trim(OUTGRID), exist=fexist) if(.not. fexist) then - print*, "file "//trim(OUTGRID)//" does not exist" + print*, "FATAL ERROR: file "//trim(OUTGRID) + print*, " does not exist." CALL ERREXIT(4) endif do ncid = 103, 512 @@ -203,7 +204,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, integer :: mskocn,notocn integer :: i,j,nx,ny,ncid,js,jn,iw,ie,k,it,jt,error,id_dim integer :: id_var,nx_in,ny_in,fsize,wgta,IN,INW,INE,IS,ISW,ISE - integer :: M,N,IMT,IRET,ios,iosg,latg2,istat,itest,jtest + integer :: M,N,IMT,IRET,ios,latg2,istat,itest,jtest integer :: i_south_pole,j_south_pole,i_north_pole,j_north_pole integer :: maxc3,maxc4,maxc5,maxc6,maxc7,maxc8 integer(1) :: i3save @@ -459,43 +460,35 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, ! Read 30-sec Antarctica RAMP data. Points scan from South ! to North, and from Greenwich to Greenwich. -! The error handling here needs to be cleaned up. - iosg = 0 error=NF__OPEN("./topography.antarctica.ramp.30s.nc", & NF_NOWRITE,fsize,ncid) call netcdf_err(error, 'Opening RAMP topo file' ) error=nf_inq_varid(ncid, 'topo', id_var) call netcdf_err(error, 'Inquire varid of RAMP topo') error=nf_get_var_real(ncid, id_var, GICE) - iosg=error call netcdf_err(error, 'Inquire data of RAMP topo') error = nf_close(ncid) - if(iosg .ne. 0 ) then - print *,' *** Err on reading GICE record, iosg=',iosg - print *,' exec continues but NO GICE correction done ' - else - print *,' GICE 30" Antarctica RAMP orog 43201x3601 read OK' - print *,' Processing! ' - print *,' Processing! ' - print *,' Processing! ' - do j = 1, 3601 - do i = 1, IMN - zsave1 = ZAVG(i,j) - zsave2 = ZSLM(i,j) - if( GICE(i,j) .ne. -99. .and. GICE(i,j) .ne. -1.0 ) then - if ( GICE(i,j) .gt. 0.) then - ZAVG(i,j) = int( GICE(i,j) + 0.5 ) + print *,' GICE 30" Antarctica RAMP orog 43201x3601 read OK' + print *,' Processing! ' + print *,' Processing! ' + print *,' Processing! ' + do j = 1, 3601 + do i = 1, IMN + zsave1 = ZAVG(i,j) + zsave2 = ZSLM(i,j) + if( GICE(i,j) .ne. -99. .and. GICE(i,j) .ne. -1.0 ) then + if ( GICE(i,j) .gt. 0.) then + ZAVG(i,j) = int( GICE(i,j) + 0.5 ) !! --- for GICE values less than or equal to 0 (0, -1, or -99) then !! --- radar-sat (RAMP) values are not valid and revert back to old orog ZSLM(i,j) = 1 - endif endif + endif 152 format(1x,' ZAVG(i=',i4,' j=',i4,')=',i5,i3, &' orig:',i5,i4,' Lat=',f7.3,f8.2,'E',' GICE=',f8.1) - enddo - enddo - endif + enddo + enddo deallocate (GICE) @@ -581,7 +574,8 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, grid_from_file = .true. inquire(file=trim(OUTGRID), exist=fexist) if(.not. fexist) then - print*, "file "//trim(OUTGRID)//" does not exist" + print*, "FATAL ERROR: file "//trim(OUTGRID) + print*, "does not exist." CALL ERREXIT(4) endif do ncid = 103, 512 @@ -597,25 +591,6 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, & trim(OUTGRID) ) nx = 2*IM ny = 2*JM -! error=nf_inq_dimlen(ncid,id_dim,nx) -! print*, "nx = ", nx, id_dim -! call netcdf_err(error, 'inquire dimension nx length '// -! & 'from file '//trim(OUTGRID) ) -! error=nf_inq_dimid(ncid, 'ny', id_dim) -! call netcdf_err(error, 'inquire dimension ny from file '// -! & trim(OUTGRID) ) -! error=nf_inq_dimlen(ncid,id_dim,ny) -! call netcdf_err(error, 'inquire dimension ny length '// -! & 'from file '//trim(OUTGRID) ) -! IM should equal nx/2 and JM should equal ny/2 -! if(IM .ne. nx/2) then -! print*, "IM=",IM, " /= grid file nx/2=",nx/2 -! CALL ERREXIT(4) -! endif -! if(JM .ne. ny/2) then -! print*, "JM=",JM, " /= grid file ny/2=",ny/2 -! CALL ERREXIT(4) -! endif print*, "Read the grid from file "//trim(OUTGRID) allocate(tmpvar(nx+1,ny+1)) @@ -770,12 +745,12 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, & IM,JM,IMN,JMN,geolon_c,geolat_c) lake_frac=9999.9 ELSE - print*,'got here - read in external mask ',merge_file + print*,'Read in external mask ',merge_file CALL READ_MASK(MERGE_FILE,SLM,land_frac,lake_frac,im,jm) ENDIF IF (MASK_ONLY) THEN - print*,'got here computing mask only.' + print*,'Computing mask only.' CALL WRITE_MASK_NETCDF(IM,JM,SLM,land_frac, 1 1,1,GEOLON,GEOLAT) @@ -1775,11 +1750,11 @@ SUBROUTINE get_index(IMN,JMN,npts,lonO,latO,DELXN, endif enddo if(ist<1 .OR. ist>IMN) then - print*, "ist<1 .or. ist>IMN" + print*, "FATAL ERROR: ist<1 .or. ist>IMN" call ABORT() endif if(ien<1 .OR. ien>IMN) then - print*, "iend<1 .or. iend>IMN" + print*, "FATAL ERROR: iend<1 .or. iend>IMN" call ABORT() endif @@ -1888,7 +1863,8 @@ SUBROUTINE MAKE_MASK(zslm,SLM,land_frac, XNSUM_ALL = XNSUM_ALL + 1. nsum_all = nsum_all+1 if(nsum_all > MAXSUM) then - print*, "nsum_all is greater than MAXSUM, increase MAXSUM" + print*, "FATAL ERROR: nsum_all is greater than MAXSUM," + print*, "increase MAXSUM." call ABORT() endif @@ -1900,7 +1876,8 @@ SUBROUTINE MAKE_MASK(zslm,SLM,land_frac, XNSUM = XNSUM + 1. nsum = nsum+1 if(nsum > MAXSUM) then - print*, "nsum is greater than MAXSUM, increase MAXSUM" + print*, "FATAL ERROR: nsum is greater than MAXSUM," + print*, "increase MAXSUM." call ABORT() endif endif @@ -1917,7 +1894,7 @@ SUBROUTINE MAKE_MASK(zslm,SLM,land_frac, land_frac(i,j) = XLAND_ALL/XNSUM _ALL SLM(I,J) = FLOAT(NINT(XLAND_ALL/XNSUM_ALL)) ELSE - print*, "no source points in MAKE_MASK" + print*, "FATAL ERROR: no source points in MAKE_MASK." call ABORT() ENDIF ENDDO @@ -2045,7 +2022,8 @@ SUBROUTINE MAKEMT2(ZAVG,ZSLM,ORO,SLM,VAR,VAR4, HEIGHT_ALL = FLOAT(ZAVG(ii,jj)) nsum_all = nsum_all+1 if(nsum_all > MAXSUM) then - print*, "nsum_all is greater than MAXSUM, increase MAXSUM" + print*, "FATAL ERROR: nsum_all is greater than MAXSUM," + print*, "increase MAXSUM." call ABORT() endif hgt_1d_all(nsum_all) = HEIGHT_ALL @@ -2064,7 +2042,8 @@ SUBROUTINE MAKEMT2(ZAVG,ZSLM,ORO,SLM,VAR,VAR4, HEIGHT = FLOAT(ZAVG(ii,jj)) nsum = nsum+1 if(nsum > MAXSUM) then - print*, "nsum is greater than MAXSUM, increase MAXSUM" + print*, "FATAL ERROR: nsum is greater than MAXSUM," + print*, "increase MAXSUM." call ABORT() endif hgt_1d(nsum) = HEIGHT @@ -2133,7 +2112,7 @@ SUBROUTINE MAKEMT2(ZAVG,ZSLM,ORO,SLM,VAR,VAR4, VAR4(I,J) = MIN(XW4_ALL/XNSUM_ALL/VAR(I,J) **4,10.) ENDIF ELSE - print*, "no source points in MAKEMT2" + print*, "FATAL ERROR: no source points in MAKEMT2." call ABORT() ENDIF @@ -3278,7 +3257,7 @@ SUBROUTINE MAKEOA2(ZAVG,zslm,VAR,GLAT,OA4,OL,IOA4,ELVMAX, !--- adjust dlat if the points are close to pole. if( lat-dlat*0.5<-90.) then print*, "at i,j =", i,j, lat, dlat, lat-dlat*0.5 - print*, "ERROR: lat-dlat*0.5<-90." + print*, "FATAL ERROR: lat-dlat*0.5<-90." call ERREXIT(4) endif if( lat+dlat*2 > 90.) then @@ -3607,7 +3586,8 @@ subroutine get_mismatch_index(im_in, jm_in, geolon_in,geolat_in, if(iindx(n) ==0) then print*, "lon,lat=", lon,lat print*, "jstart, jend=", jstart, jend, dist - print*, "ERROR in get mismatch_index: not find nearest points" + print*, "FATAL ERROR in get mismatch_index: " + print*, "did not find nearest points." call ERREXIT(4) endif enddo @@ -3911,7 +3891,7 @@ SUBROUTINE MAKEOA3(ZAVG,zslm,VAR,GLAT,OA4,OL,IOA4,ELVMAX, & lons_land_output, ibo, & bitmap_output, output_data_land, iret) if (iret /= 0) then - print*,'- ERROR IN IPOLATES ',iret + print*,'- FATAL ERROR IN IPOLATES ',iret call ERREXIT(4) endif @@ -3983,7 +3963,7 @@ SUBROUTINE MAKEOA3(ZAVG,zslm,VAR,GLAT,OA4,OL,IOA4,ELVMAX, & lons_land_output, ibo, & bitmap_output, output_data_land, iret) if (iret /= 0) then - print*,'- ERROR IN IPOLATES ',iret + print*,'- FATAL ERROR IN IPOLATES ',iret call ERREXIT(4) endif diff --git a/sorc/orog_mask_tools.fd/orog.fd/netcdf_io.F90 b/sorc/orog_mask_tools.fd/orog.fd/netcdf_io.F90 index 7ff8ce725..4e13fc8ef 100644 --- a/sorc/orog_mask_tools.fd/orog.fd/netcdf_io.F90 +++ b/sorc/orog_mask_tools.fd/orog.fd/netcdf_io.F90 @@ -223,7 +223,8 @@ subroutine netcdf_err( err, string ) if( err.EQ.NF_NOERR )return errmsg = NF_STRERROR(err) - print*, trim(string), ': ', trim(errmsg) + print*, 'FATAL ERROR: ', trim(string), ': ', trim(errmsg) + call abort return end subroutine netcdf_err From 8781eb80e42db90ad40e26457b4cd8c0625add2f Mon Sep 17 00:00:00 2001 From: Denise Worthen Date: Wed, 10 Apr 2024 15:30:12 -0400 Subject: [PATCH 13/25] cpld_gridgen: Fix angles on Bu for use by CICE (#922) Remove find_anq routine and instead calculate Bu angles using the Ct angle on the opposite side of the seam. Add angchk to verify CICE internally calculated Ct angle against both MOM6 and CICE model output. Remove resetting of longitudes to 0:360. Add tripole:tripole bilinear mapping for creation of CICE IC downscaling and add required Bu<->Ct mapping on tripole grid. Clean up log messages, remove duplicate weight file and update documentation. Fixes #921. --- .../RegressionTests_hera.intel.log | 29 +- .../RegressionTests_hercules.intel.log | 150 ++++++ .../RegressionTests_jet.intel.log | 45 +- .../RegressionTests_orion.intel.log | 29 +- .../RegressionTests_wcoss2.intel.log | 29 +- reg_tests/cpld_gridgen/parm/grid.nml.IN | 6 +- reg_tests/cpld_gridgen/rt.sh | 5 +- sorc/cpld_gridgen.fd/angles.F90 | 193 ++++---- sorc/cpld_gridgen.fd/docs/cpld_gridgen.md | 53 ++- sorc/cpld_gridgen.fd/gen_fixgrid.F90 | 158 ++++--- sorc/cpld_gridgen.fd/grdvars.F90 | 20 +- sorc/cpld_gridgen.fd/inputnml.F90 | 20 +- sorc/cpld_gridgen.fd/topoedits.F90 | 4 +- sorc/cpld_gridgen.fd/tripolegrid.F90 | 14 +- tests/cpld_gridgen/ftst_find_angq.F90 | 439 +++++++++++++----- 15 files changed, 828 insertions(+), 366 deletions(-) create mode 100644 reg_tests/cpld_gridgen/RegressionTests_hercules.intel.log diff --git a/reg_tests/cpld_gridgen/RegressionTests_hera.intel.log b/reg_tests/cpld_gridgen/RegressionTests_hera.intel.log index 494bc2d38..316fa4295 100644 --- a/reg_tests/cpld_gridgen/RegressionTests_hera.intel.log +++ b/reg_tests/cpld_gridgen/RegressionTests_hera.intel.log @@ -1,8 +1,8 @@ -Fri Dec 15 20:03:43 UTC 2023 +Mon Apr 8 19:09:11 UTC 2024 Start Regression test -Working dir = /scratch1/NCEPDEV/stmp4/Denise.Worthen/CPLD_GRIDGEN/rt_148527/025 -Baseline dir = /scratch1/NCEPDEV/stmp4/Denise.Worthen/CPLD_GRIDGEN/BASELINE/025 +Working dir = /scratch1/NCEPDEV/stmp4/Denise.Worthen/CPLD_GRIDGEN/rt_1965110/025 +Baseline dir = /scratch1/NCEPDEV/nems/role.ufsutils/ufs_utils/reg_tests/cpld_gridgen/baseline_data/025 Checking test 025 results .... Comparing Bu.mx025_SCRIP.nc........OK @@ -25,6 +25,7 @@ Comparing rect.0p50_SCRIP.nc........OK Comparing rect.1p00_SCRIP.nc........OK Comparing rect.5p00_SCRIP.nc........OK Comparing tripole.mx025.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx025.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx025.Ct.to.rect.0p25.bilinear.nc........OK Comparing tripole.mx025.Ct.to.rect.0p25.conserve.nc........OK Comparing tripole.mx025.Ct.to.rect.0p50.bilinear.nc........OK @@ -38,8 +39,8 @@ Comparing tripole.mx025.Cv.to.Ct.bilinear.nc........OK Comparing tripole.mx025.nc........OK -Working dir = /scratch1/NCEPDEV/stmp4/Denise.Worthen/CPLD_GRIDGEN/rt_148527/050 -Baseline dir = /scratch1/NCEPDEV/stmp4/Denise.Worthen/CPLD_GRIDGEN/BASELINE/050 +Working dir = /scratch1/NCEPDEV/stmp4/Denise.Worthen/CPLD_GRIDGEN/rt_1965110/050 +Baseline dir = /scratch1/NCEPDEV/nems/role.ufsutils/ufs_utils/reg_tests/cpld_gridgen/baseline_data/050 Checking test 050 results .... Comparing Bu.mx050_SCRIP.nc........OK @@ -61,8 +62,10 @@ Comparing mesh.mx050.nc........OK Comparing rect.0p50_SCRIP.nc........OK Comparing rect.1p00_SCRIP.nc........OK Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Ct.to.mx050.Ct.bilinear.nc........OK Comparing tripole.mx025.Ct.to.mx050.Ct.neareststod.nc........OK Comparing tripole.mx050.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx050.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx050.Ct.to.rect.0p50.bilinear.nc........OK Comparing tripole.mx050.Ct.to.rect.0p50.conserve.nc........OK Comparing tripole.mx050.Ct.to.rect.1p00.bilinear.nc........OK @@ -74,8 +77,8 @@ Comparing tripole.mx050.Cv.to.Ct.bilinear.nc........OK Comparing tripole.mx050.nc........OK -Working dir = /scratch1/NCEPDEV/stmp4/Denise.Worthen/CPLD_GRIDGEN/rt_148527/100 -Baseline dir = /scratch1/NCEPDEV/stmp4/Denise.Worthen/CPLD_GRIDGEN/BASELINE/100 +Working dir = /scratch1/NCEPDEV/stmp4/Denise.Worthen/CPLD_GRIDGEN/rt_1965110/100 +Baseline dir = /scratch1/NCEPDEV/nems/role.ufsutils/ufs_utils/reg_tests/cpld_gridgen/baseline_data/100 Checking test 100 results .... Comparing Bu.mx100_SCRIP.nc........OK @@ -96,8 +99,10 @@ Comparing kmtu_cice_NEMS_mx100.nc........OK Comparing mesh.mx100.nc........OK Comparing rect.1p00_SCRIP.nc........OK Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Ct.to.mx100.Ct.bilinear.nc........OK Comparing tripole.mx025.Ct.to.mx100.Ct.neareststod.nc........OK Comparing tripole.mx100.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx100.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx100.Ct.to.rect.1p00.bilinear.nc........OK Comparing tripole.mx100.Ct.to.rect.1p00.conserve.nc........OK Comparing tripole.mx100.Ct.to.rect.5p00.bilinear.nc........OK @@ -108,8 +113,8 @@ Comparing tripole.mx100.nc........OK Comparing ufs.topo_edits_011818.nc........OK -Working dir = /scratch1/NCEPDEV/stmp4/Denise.Worthen/CPLD_GRIDGEN/rt_148527/500 -Baseline dir = /scratch1/NCEPDEV/stmp4/Denise.Worthen/CPLD_GRIDGEN/BASELINE/500 +Working dir = /scratch1/NCEPDEV/stmp4/Denise.Worthen/CPLD_GRIDGEN/rt_1965110/500 +Baseline dir = /scratch1/NCEPDEV/nems/role.ufsutils/ufs_utils/reg_tests/cpld_gridgen/baseline_data/500 Checking test 500 results .... Comparing Bu.mx500_SCRIP.nc........OK @@ -129,8 +134,10 @@ Comparing grid_cice_NEMS_mx500.nc........OK Comparing kmtu_cice_NEMS_mx500.nc........OK Comparing mesh.mx500.nc........OK Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Ct.to.mx500.Ct.bilinear.nc........OK Comparing tripole.mx025.Ct.to.mx500.Ct.neareststod.nc........OK Comparing tripole.mx500.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx500.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx500.Ct.to.rect.5p00.bilinear.nc........OK Comparing tripole.mx500.Ct.to.rect.5p00.conserve.nc........OK Comparing tripole.mx500.Cu.to.Ct.bilinear.nc........OK @@ -139,5 +146,5 @@ Comparing tripole.mx500.nc........OK REGRESSION TEST WAS SUCCESSFUL -Fri Dec 15 20:29:09 UTC 2023 -Elapsed time: 00h:25m:28s. Have a nice day! +Mon Apr 8 19:31:41 UTC 2024 +Elapsed time: 00h:23m:55s. Have a nice day! diff --git a/reg_tests/cpld_gridgen/RegressionTests_hercules.intel.log b/reg_tests/cpld_gridgen/RegressionTests_hercules.intel.log new file mode 100644 index 000000000..d92854007 --- /dev/null +++ b/reg_tests/cpld_gridgen/RegressionTests_hercules.intel.log @@ -0,0 +1,150 @@ +Tue Apr 9 06:44:10 CDT 2024 +Start Regression test + +Working dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/rt_1693683/025 +Baseline dir = /work/noaa/nems/role-nems/ufs_utils.hercules/reg_tests/cpld_gridgen/baseline_data/025 + +Checking test 025 results .... +Comparing Bu.mx025_SCRIP.nc........OK +Comparing C384.mx025.tile1.nc........OK +Comparing C384.mx025.tile2.nc........OK +Comparing C384.mx025.tile3.nc........OK +Comparing C384.mx025.tile4.nc........OK +Comparing C384.mx025.tile5.nc........OK +Comparing C384.mx025.tile6.nc........OK +Comparing Ct.mx025.to.C384.nc........OK +Comparing Ct.mx025_SCRIP.nc........OK +Comparing Ct.mx025_SCRIP_land.nc........OK +Comparing Cu.mx025_SCRIP.nc........OK +Comparing Cv.mx025_SCRIP.nc........OK +Comparing grid_cice_NEMS_mx025.nc........OK +Comparing kmtu_cice_NEMS_mx025.nc........OK +Comparing mesh.mx025.nc........OK +Comparing rect.0p25_SCRIP.nc........OK +Comparing rect.0p50_SCRIP.nc........OK +Comparing rect.1p00_SCRIP.nc........OK +Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx025.Ct.to.Bu.bilinear.nc........OK +Comparing tripole.mx025.Ct.to.rect.0p25.bilinear.nc........OK +Comparing tripole.mx025.Ct.to.rect.0p25.conserve.nc........OK +Comparing tripole.mx025.Ct.to.rect.0p50.bilinear.nc........OK +Comparing tripole.mx025.Ct.to.rect.0p50.conserve.nc........OK +Comparing tripole.mx025.Ct.to.rect.1p00.bilinear.nc........OK +Comparing tripole.mx025.Ct.to.rect.1p00.conserve.nc........OK +Comparing tripole.mx025.Ct.to.rect.5p00.bilinear.nc........OK +Comparing tripole.mx025.Ct.to.rect.5p00.conserve.nc........OK +Comparing tripole.mx025.Cu.to.Ct.bilinear.nc........OK +Comparing tripole.mx025.Cv.to.Ct.bilinear.nc........OK +Comparing tripole.mx025.nc........OK + + +Working dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/rt_1693683/050 +Baseline dir = /work/noaa/nems/role-nems/ufs_utils.hercules/reg_tests/cpld_gridgen/baseline_data/050 + +Checking test 050 results .... +Comparing Bu.mx050_SCRIP.nc........OK +Comparing C192.mx050.tile1.nc........OK +Comparing C192.mx050.tile2.nc........OK +Comparing C192.mx050.tile3.nc........OK +Comparing C192.mx050.tile4.nc........OK +Comparing C192.mx050.tile5.nc........OK +Comparing C192.mx050.tile6.nc........OK +Comparing Ct.mx025_SCRIP.nc........OK +Comparing Ct.mx050.to.C192.nc........OK +Comparing Ct.mx050_SCRIP.nc........OK +Comparing Ct.mx050_SCRIP_land.nc........OK +Comparing Cu.mx050_SCRIP.nc........OK +Comparing Cv.mx050_SCRIP.nc........OK +Comparing grid_cice_NEMS_mx050.nc........OK +Comparing kmtu_cice_NEMS_mx050.nc........OK +Comparing mesh.mx050.nc........OK +Comparing rect.0p50_SCRIP.nc........OK +Comparing rect.1p00_SCRIP.nc........OK +Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Ct.to.mx050.Ct.bilinear.nc........OK +Comparing tripole.mx025.Ct.to.mx050.Ct.neareststod.nc........OK +Comparing tripole.mx050.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx050.Ct.to.Bu.bilinear.nc........OK +Comparing tripole.mx050.Ct.to.rect.0p50.bilinear.nc........OK +Comparing tripole.mx050.Ct.to.rect.0p50.conserve.nc........OK +Comparing tripole.mx050.Ct.to.rect.1p00.bilinear.nc........OK +Comparing tripole.mx050.Ct.to.rect.1p00.conserve.nc........OK +Comparing tripole.mx050.Ct.to.rect.5p00.bilinear.nc........OK +Comparing tripole.mx050.Ct.to.rect.5p00.conserve.nc........OK +Comparing tripole.mx050.Cu.to.Ct.bilinear.nc........OK +Comparing tripole.mx050.Cv.to.Ct.bilinear.nc........OK +Comparing tripole.mx050.nc........OK + + +Working dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/rt_1693683/100 +Baseline dir = /work/noaa/nems/role-nems/ufs_utils.hercules/reg_tests/cpld_gridgen/baseline_data/100 + +Checking test 100 results .... +Comparing Bu.mx100_SCRIP.nc........OK +Comparing C96.mx100.tile1.nc........OK +Comparing C96.mx100.tile2.nc........OK +Comparing C96.mx100.tile3.nc........OK +Comparing C96.mx100.tile4.nc........OK +Comparing C96.mx100.tile5.nc........OK +Comparing C96.mx100.tile6.nc........OK +Comparing Ct.mx025_SCRIP.nc........OK +Comparing Ct.mx100.to.C96.nc........OK +Comparing Ct.mx100_SCRIP.nc........OK +Comparing Ct.mx100_SCRIP_land.nc........OK +Comparing Cu.mx100_SCRIP.nc........OK +Comparing Cv.mx100_SCRIP.nc........OK +Comparing grid_cice_NEMS_mx100.nc........OK +Comparing kmtu_cice_NEMS_mx100.nc........OK +Comparing mesh.mx100.nc........OK +Comparing rect.1p00_SCRIP.nc........OK +Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Ct.to.mx100.Ct.bilinear.nc........OK +Comparing tripole.mx025.Ct.to.mx100.Ct.neareststod.nc........OK +Comparing tripole.mx100.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx100.Ct.to.Bu.bilinear.nc........OK +Comparing tripole.mx100.Ct.to.rect.1p00.bilinear.nc........OK +Comparing tripole.mx100.Ct.to.rect.1p00.conserve.nc........OK +Comparing tripole.mx100.Ct.to.rect.5p00.bilinear.nc........OK +Comparing tripole.mx100.Ct.to.rect.5p00.conserve.nc........OK +Comparing tripole.mx100.Cu.to.Ct.bilinear.nc........OK +Comparing tripole.mx100.Cv.to.Ct.bilinear.nc........OK +Comparing tripole.mx100.nc........OK +Comparing ufs.topo_edits_011818.nc........OK + + +Working dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/rt_1693683/500 +Baseline dir = /work/noaa/nems/role-nems/ufs_utils.hercules/reg_tests/cpld_gridgen/baseline_data/500 + +Checking test 500 results .... +Comparing Bu.mx500_SCRIP.nc........OK +Comparing C48.mx500.tile1.nc........OK +Comparing C48.mx500.tile2.nc........OK +Comparing C48.mx500.tile3.nc........OK +Comparing C48.mx500.tile4.nc........OK +Comparing C48.mx500.tile5.nc........OK +Comparing C48.mx500.tile6.nc........OK +Comparing Ct.mx025_SCRIP.nc........OK +Comparing Ct.mx500.to.C48.nc........OK +Comparing Ct.mx500_SCRIP.nc........OK +Comparing Ct.mx500_SCRIP_land.nc........OK +Comparing Cu.mx500_SCRIP.nc........OK +Comparing Cv.mx500_SCRIP.nc........OK +Comparing grid_cice_NEMS_mx500.nc........OK +Comparing kmtu_cice_NEMS_mx500.nc........OK +Comparing mesh.mx500.nc........OK +Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Ct.to.mx500.Ct.bilinear.nc........OK +Comparing tripole.mx025.Ct.to.mx500.Ct.neareststod.nc........OK +Comparing tripole.mx500.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx500.Ct.to.Bu.bilinear.nc........OK +Comparing tripole.mx500.Ct.to.rect.5p00.bilinear.nc........OK +Comparing tripole.mx500.Ct.to.rect.5p00.conserve.nc........OK +Comparing tripole.mx500.Cu.to.Ct.bilinear.nc........OK +Comparing tripole.mx500.Cv.to.Ct.bilinear.nc........OK +Comparing tripole.mx500.nc........OK + + +REGRESSION TEST WAS SUCCESSFUL +Tue Apr 9 07:04:50 CDT 2024 +Elapsed time: 00h:21m:53s. Have a nice day! diff --git a/reg_tests/cpld_gridgen/RegressionTests_jet.intel.log b/reg_tests/cpld_gridgen/RegressionTests_jet.intel.log index dab42d567..350d3e3c8 100644 --- a/reg_tests/cpld_gridgen/RegressionTests_jet.intel.log +++ b/reg_tests/cpld_gridgen/RegressionTests_jet.intel.log @@ -1,8 +1,8 @@ -Sat Dec 16 18:41:27 UTC 2023 +Mon Apr 8 19:07:01 UTC 2024 Start Regression test -Working dir = /lfs4/HFIP/h-nems/Denise.Worthen/CPLD_GRIDGEN/rt_97163/025 -Baseline dir = /lfs4/HFIP/h-nems/Denise.Worthen/CPLD_GRIDGEN/BASELINE/025 +Working dir = /lfs4/HFIP/h-nems/Denise.Worthen/CPLD_GRIDGEN/rt_2966831/025 +Baseline dir = /lfs4/HFIP/hfv3gfs/emc.nemspara/role.ufsutils/ufs_utils/reg_tests/cpld_gridgen/baseline_data/025 Checking test 025 results .... Comparing Bu.mx025_SCRIP.nc........OK @@ -12,9 +12,9 @@ Comparing C384.mx025.tile3.nc........OK Comparing C384.mx025.tile4.nc........OK Comparing C384.mx025.tile5.nc........OK Comparing C384.mx025.tile6.nc........OK -Comparing Ct.mx025.to.C384.nc........OK -Comparing Ct.mx025_SCRIP.nc........OK Comparing Ct.mx025_SCRIP_land.nc........OK +Comparing Ct.mx025_SCRIP.nc........OK +Comparing Ct.mx025.to.C384.nc........OK Comparing Cu.mx025_SCRIP.nc........OK Comparing Cv.mx025_SCRIP.nc........OK Comparing grid_cice_NEMS_mx025.nc........OK @@ -25,6 +25,7 @@ Comparing rect.0p50_SCRIP.nc........OK Comparing rect.1p00_SCRIP.nc........OK Comparing rect.5p00_SCRIP.nc........OK Comparing tripole.mx025.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx025.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx025.Ct.to.rect.0p25.bilinear.nc........OK Comparing tripole.mx025.Ct.to.rect.0p25.conserve.nc........OK Comparing tripole.mx025.Ct.to.rect.0p50.bilinear.nc........OK @@ -38,8 +39,8 @@ Comparing tripole.mx025.Cv.to.Ct.bilinear.nc........OK Comparing tripole.mx025.nc........OK -Working dir = /lfs4/HFIP/h-nems/Denise.Worthen/CPLD_GRIDGEN/rt_97163/050 -Baseline dir = /lfs4/HFIP/h-nems/Denise.Worthen/CPLD_GRIDGEN/BASELINE/050 +Working dir = /lfs4/HFIP/h-nems/Denise.Worthen/CPLD_GRIDGEN/rt_2966831/050 +Baseline dir = /lfs4/HFIP/hfv3gfs/emc.nemspara/role.ufsutils/ufs_utils/reg_tests/cpld_gridgen/baseline_data/050 Checking test 050 results .... Comparing Bu.mx050_SCRIP.nc........OK @@ -50,9 +51,9 @@ Comparing C192.mx050.tile4.nc........OK Comparing C192.mx050.tile5.nc........OK Comparing C192.mx050.tile6.nc........OK Comparing Ct.mx025_SCRIP.nc........OK -Comparing Ct.mx050.to.C192.nc........OK -Comparing Ct.mx050_SCRIP.nc........OK Comparing Ct.mx050_SCRIP_land.nc........OK +Comparing Ct.mx050_SCRIP.nc........OK +Comparing Ct.mx050.to.C192.nc........OK Comparing Cu.mx050_SCRIP.nc........OK Comparing Cv.mx050_SCRIP.nc........OK Comparing grid_cice_NEMS_mx050.nc........OK @@ -61,8 +62,10 @@ Comparing mesh.mx050.nc........OK Comparing rect.0p50_SCRIP.nc........OK Comparing rect.1p00_SCRIP.nc........OK Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Ct.to.mx050.Ct.bilinear.nc........OK Comparing tripole.mx025.Ct.to.mx050.Ct.neareststod.nc........OK Comparing tripole.mx050.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx050.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx050.Ct.to.rect.0p50.bilinear.nc........OK Comparing tripole.mx050.Ct.to.rect.0p50.conserve.nc........OK Comparing tripole.mx050.Ct.to.rect.1p00.bilinear.nc........OK @@ -74,8 +77,8 @@ Comparing tripole.mx050.Cv.to.Ct.bilinear.nc........OK Comparing tripole.mx050.nc........OK -Working dir = /lfs4/HFIP/h-nems/Denise.Worthen/CPLD_GRIDGEN/rt_97163/100 -Baseline dir = /lfs4/HFIP/h-nems/Denise.Worthen/CPLD_GRIDGEN/BASELINE/100 +Working dir = /lfs4/HFIP/h-nems/Denise.Worthen/CPLD_GRIDGEN/rt_2966831/100 +Baseline dir = /lfs4/HFIP/hfv3gfs/emc.nemspara/role.ufsutils/ufs_utils/reg_tests/cpld_gridgen/baseline_data/100 Checking test 100 results .... Comparing Bu.mx100_SCRIP.nc........OK @@ -86,9 +89,9 @@ Comparing C96.mx100.tile4.nc........OK Comparing C96.mx100.tile5.nc........OK Comparing C96.mx100.tile6.nc........OK Comparing Ct.mx025_SCRIP.nc........OK -Comparing Ct.mx100.to.C96.nc........OK -Comparing Ct.mx100_SCRIP.nc........OK Comparing Ct.mx100_SCRIP_land.nc........OK +Comparing Ct.mx100_SCRIP.nc........OK +Comparing Ct.mx100.to.C96.nc........OK Comparing Cu.mx100_SCRIP.nc........OK Comparing Cv.mx100_SCRIP.nc........OK Comparing grid_cice_NEMS_mx100.nc........OK @@ -96,8 +99,10 @@ Comparing kmtu_cice_NEMS_mx100.nc........OK Comparing mesh.mx100.nc........OK Comparing rect.1p00_SCRIP.nc........OK Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Ct.to.mx100.Ct.bilinear.nc........OK Comparing tripole.mx025.Ct.to.mx100.Ct.neareststod.nc........OK Comparing tripole.mx100.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx100.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx100.Ct.to.rect.1p00.bilinear.nc........OK Comparing tripole.mx100.Ct.to.rect.1p00.conserve.nc........OK Comparing tripole.mx100.Ct.to.rect.5p00.bilinear.nc........OK @@ -108,8 +113,8 @@ Comparing tripole.mx100.nc........OK Comparing ufs.topo_edits_011818.nc........OK -Working dir = /lfs4/HFIP/h-nems/Denise.Worthen/CPLD_GRIDGEN/rt_97163/500 -Baseline dir = /lfs4/HFIP/h-nems/Denise.Worthen/CPLD_GRIDGEN/BASELINE/500 +Working dir = /lfs4/HFIP/h-nems/Denise.Worthen/CPLD_GRIDGEN/rt_2966831/500 +Baseline dir = /lfs4/HFIP/hfv3gfs/emc.nemspara/role.ufsutils/ufs_utils/reg_tests/cpld_gridgen/baseline_data/500 Checking test 500 results .... Comparing Bu.mx500_SCRIP.nc........OK @@ -120,17 +125,19 @@ Comparing C48.mx500.tile4.nc........OK Comparing C48.mx500.tile5.nc........OK Comparing C48.mx500.tile6.nc........OK Comparing Ct.mx025_SCRIP.nc........OK -Comparing Ct.mx500.to.C48.nc........OK -Comparing Ct.mx500_SCRIP.nc........OK Comparing Ct.mx500_SCRIP_land.nc........OK +Comparing Ct.mx500_SCRIP.nc........OK +Comparing Ct.mx500.to.C48.nc........OK Comparing Cu.mx500_SCRIP.nc........OK Comparing Cv.mx500_SCRIP.nc........OK Comparing grid_cice_NEMS_mx500.nc........OK Comparing kmtu_cice_NEMS_mx500.nc........OK Comparing mesh.mx500.nc........OK Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Ct.to.mx500.Ct.bilinear.nc........OK Comparing tripole.mx025.Ct.to.mx500.Ct.neareststod.nc........OK Comparing tripole.mx500.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx500.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx500.Ct.to.rect.5p00.bilinear.nc........OK Comparing tripole.mx500.Ct.to.rect.5p00.conserve.nc........OK Comparing tripole.mx500.Cu.to.Ct.bilinear.nc........OK @@ -139,5 +146,5 @@ Comparing tripole.mx500.nc........OK REGRESSION TEST WAS SUCCESSFUL -Sat Dec 16 19:13:08 UTC 2023 -Elapsed time: 00h:31m:44s. Have a nice day! +Mon Apr 8 19:38:29 UTC 2024 +Elapsed time: 00h:33m:11s. Have a nice day! diff --git a/reg_tests/cpld_gridgen/RegressionTests_orion.intel.log b/reg_tests/cpld_gridgen/RegressionTests_orion.intel.log index 34f40612f..4dde38fb8 100644 --- a/reg_tests/cpld_gridgen/RegressionTests_orion.intel.log +++ b/reg_tests/cpld_gridgen/RegressionTests_orion.intel.log @@ -1,8 +1,8 @@ -Fri Dec 15 11:10:53 CST 2023 +Mon Apr 8 18:04:38 CDT 2024 Start Regression test -Working dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/rt_408740/025 -Baseline dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/BASELINE/025 +Working dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/rt_325586/025 +Baseline dir = /work/noaa/nems/role-nems/ufs_utils/reg_tests/cpld_gridgen/baseline_data/025 Checking test 025 results .... Comparing Bu.mx025_SCRIP.nc........OK @@ -25,6 +25,7 @@ Comparing rect.0p50_SCRIP.nc........OK Comparing rect.1p00_SCRIP.nc........OK Comparing rect.5p00_SCRIP.nc........OK Comparing tripole.mx025.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx025.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx025.Ct.to.rect.0p25.bilinear.nc........OK Comparing tripole.mx025.Ct.to.rect.0p25.conserve.nc........OK Comparing tripole.mx025.Ct.to.rect.0p50.bilinear.nc........OK @@ -38,8 +39,8 @@ Comparing tripole.mx025.Cv.to.Ct.bilinear.nc........OK Comparing tripole.mx025.nc........OK -Working dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/rt_408740/050 -Baseline dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/BASELINE/050 +Working dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/rt_325586/050 +Baseline dir = /work/noaa/nems/role-nems/ufs_utils/reg_tests/cpld_gridgen/baseline_data/050 Checking test 050 results .... Comparing Bu.mx050_SCRIP.nc........OK @@ -61,8 +62,10 @@ Comparing mesh.mx050.nc........OK Comparing rect.0p50_SCRIP.nc........OK Comparing rect.1p00_SCRIP.nc........OK Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Ct.to.mx050.Ct.bilinear.nc........OK Comparing tripole.mx025.Ct.to.mx050.Ct.neareststod.nc........OK Comparing tripole.mx050.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx050.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx050.Ct.to.rect.0p50.bilinear.nc........OK Comparing tripole.mx050.Ct.to.rect.0p50.conserve.nc........OK Comparing tripole.mx050.Ct.to.rect.1p00.bilinear.nc........OK @@ -74,8 +77,8 @@ Comparing tripole.mx050.Cv.to.Ct.bilinear.nc........OK Comparing tripole.mx050.nc........OK -Working dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/rt_408740/100 -Baseline dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/BASELINE/100 +Working dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/rt_325586/100 +Baseline dir = /work/noaa/nems/role-nems/ufs_utils/reg_tests/cpld_gridgen/baseline_data/100 Checking test 100 results .... Comparing Bu.mx100_SCRIP.nc........OK @@ -96,8 +99,10 @@ Comparing kmtu_cice_NEMS_mx100.nc........OK Comparing mesh.mx100.nc........OK Comparing rect.1p00_SCRIP.nc........OK Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Ct.to.mx100.Ct.bilinear.nc........OK Comparing tripole.mx025.Ct.to.mx100.Ct.neareststod.nc........OK Comparing tripole.mx100.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx100.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx100.Ct.to.rect.1p00.bilinear.nc........OK Comparing tripole.mx100.Ct.to.rect.1p00.conserve.nc........OK Comparing tripole.mx100.Ct.to.rect.5p00.bilinear.nc........OK @@ -108,8 +113,8 @@ Comparing tripole.mx100.nc........OK Comparing ufs.topo_edits_011818.nc........OK -Working dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/rt_408740/500 -Baseline dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/BASELINE/500 +Working dir = /work/noaa/stmp/dworthen/CPLD_GRIDGEN/rt_325586/500 +Baseline dir = /work/noaa/nems/role-nems/ufs_utils/reg_tests/cpld_gridgen/baseline_data/500 Checking test 500 results .... Comparing Bu.mx500_SCRIP.nc........OK @@ -129,8 +134,10 @@ Comparing grid_cice_NEMS_mx500.nc........OK Comparing kmtu_cice_NEMS_mx500.nc........OK Comparing mesh.mx500.nc........OK Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Ct.to.mx500.Ct.bilinear.nc........OK Comparing tripole.mx025.Ct.to.mx500.Ct.neareststod.nc........OK Comparing tripole.mx500.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx500.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx500.Ct.to.rect.5p00.bilinear.nc........OK Comparing tripole.mx500.Ct.to.rect.5p00.conserve.nc........OK Comparing tripole.mx500.Cu.to.Ct.bilinear.nc........OK @@ -139,5 +146,5 @@ Comparing tripole.mx500.nc........OK REGRESSION TEST WAS SUCCESSFUL -Fri Dec 15 11:33:32 CST 2023 -Elapsed time: 00h:22m:41s. Have a nice day! +Mon Apr 8 18:29:13 CDT 2024 +Elapsed time: 00h:26m:29s. Have a nice day! diff --git a/reg_tests/cpld_gridgen/RegressionTests_wcoss2.intel.log b/reg_tests/cpld_gridgen/RegressionTests_wcoss2.intel.log index a7fa76fd6..6d17bf427 100644 --- a/reg_tests/cpld_gridgen/RegressionTests_wcoss2.intel.log +++ b/reg_tests/cpld_gridgen/RegressionTests_wcoss2.intel.log @@ -1,8 +1,8 @@ -Sat Dec 16 15:19:48 UTC 2023 +Mon Apr 8 20:57:57 UTC 2024 Start Regression test -Working dir = /lfs/h2/emc/stmp/denise.worthen/CPLD_GRIDGEN/rt_194650/025 -Baseline dir = /lfs/h2/emc/stmp/denise.worthen/CPLD_GRIDGEN/BASELINE/025 +Working dir = /lfs/h2/emc/stmp/denise.worthen/CPLD_GRIDGEN/rt_15449/025 +Baseline dir = /lfs/h2/emc/nems/noscrub/emc.nems/UFS_UTILS/reg_tests/cpld_gridgen/baseline_data/025 Checking test 025 results .... Comparing Bu.mx025_SCRIP.nc........OK @@ -25,6 +25,7 @@ Comparing rect.0p50_SCRIP.nc........OK Comparing rect.1p00_SCRIP.nc........OK Comparing rect.5p00_SCRIP.nc........OK Comparing tripole.mx025.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx025.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx025.Ct.to.rect.0p25.bilinear.nc........OK Comparing tripole.mx025.Ct.to.rect.0p25.conserve.nc........OK Comparing tripole.mx025.Ct.to.rect.0p50.bilinear.nc........OK @@ -38,8 +39,8 @@ Comparing tripole.mx025.Cv.to.Ct.bilinear.nc........OK Comparing tripole.mx025.nc........OK -Working dir = /lfs/h2/emc/stmp/denise.worthen/CPLD_GRIDGEN/rt_194650/050 -Baseline dir = /lfs/h2/emc/stmp/denise.worthen/CPLD_GRIDGEN/BASELINE/050 +Working dir = /lfs/h2/emc/stmp/denise.worthen/CPLD_GRIDGEN/rt_15449/050 +Baseline dir = /lfs/h2/emc/nems/noscrub/emc.nems/UFS_UTILS/reg_tests/cpld_gridgen/baseline_data/050 Checking test 050 results .... Comparing Bu.mx050_SCRIP.nc........OK @@ -61,8 +62,10 @@ Comparing mesh.mx050.nc........OK Comparing rect.0p50_SCRIP.nc........OK Comparing rect.1p00_SCRIP.nc........OK Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Ct.to.mx050.Ct.bilinear.nc........OK Comparing tripole.mx025.Ct.to.mx050.Ct.neareststod.nc........OK Comparing tripole.mx050.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx050.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx050.Ct.to.rect.0p50.bilinear.nc........OK Comparing tripole.mx050.Ct.to.rect.0p50.conserve.nc........OK Comparing tripole.mx050.Ct.to.rect.1p00.bilinear.nc........OK @@ -74,8 +77,8 @@ Comparing tripole.mx050.Cv.to.Ct.bilinear.nc........OK Comparing tripole.mx050.nc........OK -Working dir = /lfs/h2/emc/stmp/denise.worthen/CPLD_GRIDGEN/rt_194650/100 -Baseline dir = /lfs/h2/emc/stmp/denise.worthen/CPLD_GRIDGEN/BASELINE/100 +Working dir = /lfs/h2/emc/stmp/denise.worthen/CPLD_GRIDGEN/rt_15449/100 +Baseline dir = /lfs/h2/emc/nems/noscrub/emc.nems/UFS_UTILS/reg_tests/cpld_gridgen/baseline_data/100 Checking test 100 results .... Comparing Bu.mx100_SCRIP.nc........OK @@ -96,8 +99,10 @@ Comparing kmtu_cice_NEMS_mx100.nc........OK Comparing mesh.mx100.nc........OK Comparing rect.1p00_SCRIP.nc........OK Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Ct.to.mx100.Ct.bilinear.nc........OK Comparing tripole.mx025.Ct.to.mx100.Ct.neareststod.nc........OK Comparing tripole.mx100.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx100.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx100.Ct.to.rect.1p00.bilinear.nc........OK Comparing tripole.mx100.Ct.to.rect.1p00.conserve.nc........OK Comparing tripole.mx100.Ct.to.rect.5p00.bilinear.nc........OK @@ -108,8 +113,8 @@ Comparing tripole.mx100.nc........OK Comparing ufs.topo_edits_011818.nc........OK -Working dir = /lfs/h2/emc/stmp/denise.worthen/CPLD_GRIDGEN/rt_194650/500 -Baseline dir = /lfs/h2/emc/stmp/denise.worthen/CPLD_GRIDGEN/BASELINE/500 +Working dir = /lfs/h2/emc/stmp/denise.worthen/CPLD_GRIDGEN/rt_15449/500 +Baseline dir = /lfs/h2/emc/nems/noscrub/emc.nems/UFS_UTILS/reg_tests/cpld_gridgen/baseline_data/500 Checking test 500 results .... Comparing Bu.mx500_SCRIP.nc........OK @@ -129,8 +134,10 @@ Comparing grid_cice_NEMS_mx500.nc........OK Comparing kmtu_cice_NEMS_mx500.nc........OK Comparing mesh.mx500.nc........OK Comparing rect.5p00_SCRIP.nc........OK +Comparing tripole.mx025.Ct.to.mx500.Ct.bilinear.nc........OK Comparing tripole.mx025.Ct.to.mx500.Ct.neareststod.nc........OK Comparing tripole.mx500.Bu.to.Ct.bilinear.nc........OK +Comparing tripole.mx500.Ct.to.Bu.bilinear.nc........OK Comparing tripole.mx500.Ct.to.rect.5p00.bilinear.nc........OK Comparing tripole.mx500.Ct.to.rect.5p00.conserve.nc........OK Comparing tripole.mx500.Cu.to.Ct.bilinear.nc........OK @@ -139,5 +146,5 @@ Comparing tripole.mx500.nc........OK REGRESSION TEST WAS SUCCESSFUL -Sat Dec 16 15:39:10 UTC 2023 -Elapsed time: 00h:19m:23s. Have a nice day! +Mon Apr 8 21:22:18 UTC 2024 +Elapsed time: 00h:25m:33s. Have a nice day! diff --git a/reg_tests/cpld_gridgen/parm/grid.nml.IN b/reg_tests/cpld_gridgen/parm/grid.nml.IN index 8776b58c8..05f1ac04f 100644 --- a/reg_tests/cpld_gridgen/parm/grid.nml.IN +++ b/reg_tests/cpld_gridgen/parm/grid.nml.IN @@ -4,10 +4,10 @@ nj=NJ_GLB dirsrc='FIXDIR' dirout='OUTDIR' fv3dir='MOSAICDIR' -topofile=TOPOGFILE -editsfile=EDITSFILE +topofile='TOPOGFILE' +editsfile='EDITSFILE' res=RESNAME -atmres=MOSAICRES +atmres='MOSAICRES' npx=NPX editmask=DO_MASKEDIT debug=DO_DEBUG diff --git a/reg_tests/cpld_gridgen/rt.sh b/reg_tests/cpld_gridgen/rt.sh index 70d11c048..bfdf4795a 100755 --- a/reg_tests/cpld_gridgen/rt.sh +++ b/reg_tests/cpld_gridgen/rt.sh @@ -108,9 +108,9 @@ TESTS_FILE="$PATHRT/rt.conf" export TEST_NAME= # for C3072 on hera, use WLCLK=60 and MEM="--exclusive" -WLCLK_dflt=20 +WLCLK_dflt=35 export WLCLK=$WLCLK_dflt -MEM_dflt="--mem=12g" +MEM_dflt="--mem=16g" export MEM=$MEM_dflt cd $PATHRT @@ -170,7 +170,6 @@ elif [[ $target = jet ]]; then NCCMP=nccmp PARTITION=xjet ulimit -s unlimited - WLCLK=25 SBATCH_COMMAND="./cpld_gridgen.sh" fi NEW_BASELINE_ROOT=$STMP/CPLD_GRIDGEN/BASELINE diff --git a/sorc/cpld_gridgen.fd/angles.F90 b/sorc/cpld_gridgen.fd/angles.F90 index 31ac0bbcb..54af32bbb 100644 --- a/sorc/cpld_gridgen.fd/angles.F90 +++ b/sorc/cpld_gridgen.fd/angles.F90 @@ -10,125 +10,130 @@ module angles use gengrid_kinds, only : dbl_kind, int_kind - use grdvars, only : ni,nj,nx,ny - use grdvars, only : x,y,xsgp1,ysgp1,sg_maxlat - use grdvars, only : latBu,lonBu,lonCt - use grdvars, only : angq,anglet use grdvars, only : debug implicit none contains - !> Find the rotation angle on corner grid (Bu) points using the full MOM6 supergrid + !> Find the rotation angle on center (Bu) grid points !! + !! @param[in] iind the start/end index in the i-dimension of the grid + !! @param[in] jind the start/end index in the j-dimension of the grid + !! @param[in] xangCt the angle across the tripole seam + !! @param[in] anglet the rotation angle on Ct points + !! @param[out] angle the rotation angle on Bu points !! @author Denise.Worthen@noaa.gov - subroutine find_angq - ! local variables - integer :: i,j,i1,i2,m,n + subroutine find_angq(iind,jind,xangCt,anglet,angle) - ! pole locations on SG - integer(int_kind) :: ipolesg(2) + integer , intent(in) :: iind(2),jind(2) + real(dbl_kind), intent(in) :: xangCt(:) + real(dbl_kind), intent(in) :: anglet(:,:) + real(dbl_kind), intent(out) :: angle(:,:) - ! from geolonB fix in MOM6 - real(dbl_kind) :: len_lon ! The periodic range of longitudes, usually 360 degrees. - real(dbl_kind) :: pi_720deg ! One quarter the conversion factor from degrees to radians. - real(dbl_kind) :: lonB(2,2) ! The longitude of a point, shifted to have about the same value. - real(dbl_kind) :: lon_scale = 0.0 + ! local variables + integer :: i,j + + real(dbl_kind) :: angle_0, angle_w, angle_s, angle_sw + real(dbl_kind) :: p25 = 0.25 !--------------------------------------------------------------------- - ! to find angleq on seam, replicate supergrid values across seam + ! find the angle on corners using the same relationship CICE uses + ! internally to calculate angles on Ct using angles on Bu + ! + ! w-----------------0 Ct(i+1,j+1) + ! | | + ! ----------Bu(i,j)---------- Bu lies on seam at j=nj + ! | | + ! Ct(i,j) sw----------------s + ! !--------------------------------------------------------------------- - angq = 0.0 - xsgp1 = 0.0; ysgp1 = 0.0 - !pole on supergrid - ipolesg = -1 - j = ny - do i = 1,nx/2 - if(y(i,j) .eq. sg_maxlat)ipolesg(1) = i - enddo - do i = nx/2+1,nx - if(y(i,j) .eq. sg_maxlat)ipolesg(2) = i + angle = 0.0 + do j = jind(1)+1,jind(2) + do i = iind(1),iind(2)-1 + if (j .lt. jind(2)) then + angle_0 = anglet(i+1,j+1) + angle_w = anglet(i, j+1) + angle_s = anglet(i+1,j ) + angle_sw = anglet(i ,j ) + else + angle_0 = xangCt(i+1 ) + angle_w = xangCt(i ) + angle_s = anglet(i+1,j) + angle_sw = anglet(i, j) + end if + angle(i,j) = atan2(p25*(sin(angle_0) + sin(angle_w) + sin(angle_s) + sin(angle_sw)), & + p25*(cos(angle_0) + cos(angle_w) + cos(angle_s) + cos(angle_sw))) + + if (abs(angle(i,j)) .le. 1.0e-10)angle(i,j) = 0.0 + enddo enddo - if(debug)print *,'poles found at ',ipolesg - xsgp1(:,0:ny) = x(:,0:ny) - ysgp1(:,0:ny) = y(:,0:ny) + end subroutine find_angq - !check - do i = ipolesg(1)-5,ipolesg(1)+5 - i2 = ipolesg(2)+(ipolesg(1)-i)+1 - if(debug)print *,i,i2 - enddo - print * - do i = ipolesg(2)-5,ipolesg(2)+5 - i2 = ipolesg(2)+(ipolesg(1)-i)+1 - if(debug)print *,i,i2 - enddo + !> Verify the rotation angle on center (Ct) grid points using angle on corner + !! (Bu) grid points + !! + !! @param[in] iind the start/end index in the i-dimension of the grid + !! @param[in] jind the start/end index in the j-dimension of the grid + !! @param[in] angle the rotation angle on Bu points + !! @param[out] angchk the rotation angle on Ct points + !! @author Denise.Worthen@noaa.gov + subroutine find_angchk(iind,jind,angle,angchk) - !replicate supergrid across pole - do i = 1,nx - i2 = ipolesg(2)+(ipolesg(1)-i) - xsgp1(i,ny+1) = xsgp1(i2,ny) - ysgp1(i,ny+1) = ysgp1(i2,ny) - enddo + integer , intent(in) :: iind(2),jind(2) + real(dbl_kind), intent(in) :: angle(:,:) + real(dbl_kind), intent(out) :: angchk(:,:) - !check - j = ny+1 - i1 = ipolesg(1); i2 = ipolesg(2)-(ipolesg(1)-i1) - print *,'replicate X across seam on SG' - print *,xsgp1(i1-2,j),xsgp1(i2+2,j) - print *,xsgp1(i1-1,j),xsgp1(i2+1,j) - print *,xsgp1(i1, j),xsgp1(i2, j) - print *,xsgp1(i1+1,j),xsgp1(i2-1,j) - print *,xsgp1(i1+2,j),xsgp1(i2-2,j) - - print *,'replicate Y across seam on SG' - print *,ysgp1(i1-2,j),ysgp1(i2+2,j) - print *,ysgp1(i1-1,j),ysgp1(i2+1,j) - print *,ysgp1(i1, j),ysgp1(i2, j) - print *,ysgp1(i1+1,j),ysgp1(i2-1,j) - print *,ysgp1(i1+2,j),ysgp1(i2-2,j) + ! local variables + integer :: i,j + real(dbl_kind) :: angle_0, angle_w, angle_s, angle_sw + real(dbl_kind) :: p25 = 0.25 !--------------------------------------------------------------------- - ! rotation angle on supergrid vertices - ! lonB: x(i-1,j-1) has same relationship to x(i,j) on SG as - ! geolonT(i,j) has to geolonBu(i,j) on the reduced grid + ! check: calculate anglet from angle on corners as CICE does internally. + ! since angle changes sign between CICE and MOM6, (-1)*angchk ~ anglet + ! + ! w-----------------0 Bu(i,j) + ! | | + ! | Ct(i,j) | + ! | | + ! Bu(i-1,j-1) sw----------------s + ! !--------------------------------------------------------------------- - ! constants as defined in MOM - pi_720deg = atan(1.0) / 180.0 - len_lon = 360.0 - do j=1,ny ; do i=1,nx-1 - do n=1,2 ; do m=1,2 - lonB(m,n) = modulo_around_point(xsgp1(I+m-2,J+n-2), xsgp1(i-1,j-1), len_lon) - enddo; enddo - lon_scale = cos(pi_720deg*(ysgp1(i-1,j-1) + ysgp1(i+1,j-1) + & - ysgp1(i-1,j+1) + ysgp1(i+1,j+1)) ) - angq(i,j) = atan2(lon_scale*((lonB(1,2) - lonB(2,1)) + (lonB(2,2) - lonB(1,1))), & - ysgp1(i-1,j+1) + ysgp1(i+1,j+1) - & - ysgp1(i-1,j-1) - ysgp1(i+1,j-1) ) - enddo; enddo - - !check - if(debug) then - j = ny - i1 = ipolesg(1); i2 = ipolesg(2)-(ipolesg(1)-i1) - print *,'angq along seam on SG' - print *,angq(i1-2,j),angq(i2+2,j) - print *,angq(i1-1,j),angq(i2+1,j) - print *,angq(i1, j),angq(i2, j) - print *,angq(i1+1,j),angq(i2-1,j) - print *,angq(i1+2,j),angq(i2-2,j) - end if + angchk = 0.0 + do j = jind(1)+1,jind(2) + do i = iind(1)+1,iind(2) + angle_0 = angle(i ,j ) + angle_w = angle(i-1,j ) + angle_s = angle(i, j-1) + angle_sw = angle(i-1,j-1) + angchk(i,j) = atan2(p25*(sin(angle_0) + sin(angle_w) + sin(angle_s) + sin(angle_sw)), & + p25*(cos(angle_0) + cos(angle_w) + cos(angle_s) + cos(angle_sw))) + enddo + enddo - end subroutine find_angq + end subroutine find_angchk !> Find the rotation angle on center (Ct) grid points !! + !! @param[in] iind the start/end index in the i-dimension of the grid + !! @param[in] jind the start/end index in the j-dimension of the grid + !! @param[in] lonBu the longitudes of the corner grid points + !! @param[in] latBu the latitudes of the corner grid points + !! @param[in] lonCt the longitudes of the center grid points + !! @param[out] anglet the rotation angle on Ct points !! @author Denise.Worthen@noaa.gov - subroutine find_ang + + subroutine find_ang(iind,jind,lonBu,latBu,lonCt,anglet) + + integer , intent(in) :: iind(2),jind(2) + real(dbl_kind), intent(in) :: lonBu(:,:) + real(dbl_kind), intent(in) :: latBu(:,:) + real(dbl_kind), intent(in) :: lonCt(:,:) + real(dbl_kind), intent(out) :: anglet(:,:) ! local variables integer :: i,j,m,n @@ -151,17 +156,17 @@ subroutine find_ang anglet = 0.0 pi_720deg = atan(1.0) / 180.0 len_lon = 360.0 - do j=1,nj; do i = 1,ni + do j=jind(1),jind(2); do i = iind(1),iind(2) do n=1,2 ; do m=1,2 jj = J+n-2; ii = I+m-2 if(jj .eq. 0)jj = 1 - if(ii .eq. 0)ii = ni + if(ii .eq. 0)ii = iind(2) lonB(m,n) = modulo_around_point(LonBu(ii,jj), LonCt(i,j), len_lon) ! lonB(m,n) = modulo_around_point(LonBu(I+m-2,J+n-2), LonCt(i,j), len_lon) enddo; enddo jj = j-1; ii = i-1 if(jj .eq. 0)jj = 1 - if(ii .eq. 0)ii = ni + if(ii .eq. 0)ii = iind(2) lon_scale = cos(pi_720deg*((LatBu(ii,jj) + LatBu(I,J)) + & (LatBu(I,jj) + LatBu(ii,J)) ) ) anglet(i,j) = atan2(lon_scale*((lonB(1,2) - lonB(2,1)) + (lonB(2,2) - lonB(1,1))), & diff --git a/sorc/cpld_gridgen.fd/docs/cpld_gridgen.md b/sorc/cpld_gridgen.fd/docs/cpld_gridgen.md index 9e34d1ccf..e0572adee 100644 --- a/sorc/cpld_gridgen.fd/docs/cpld_gridgen.md +++ b/sorc/cpld_gridgen.fd/docs/cpld_gridgen.md @@ -20,7 +20,10 @@ For the UFS coupled model application S2S or S2SW, the following fix files are r - The mapped ocean mask on the FV3 tiles -- The ESMF regridding weights required to create the CICE6 IC from CPC (SIS2) reanalysis. +- The ESMF regridding weights required to create the CICE6 IC from CPC (SIS2) reanalysis or to map a 1/4 deg MOM6 or CICE6 tripole restart file +to a lower tripole resolution. + +- The latitude,longitude,depth and mask arrays required by WW3 to create a mod_def file. - The ESMF regridding weights required to remap the CICE6 or MOM6 output from tripole grid to a rectilinear grid (optional). @@ -30,7 +33,7 @@ Since MOM6 creates the model grid at runtime (including adjusting the land mask, ### MOM6 grids -The MOM6 supergrid contains a MOM6 grid at twice the desired resolution. The indexing of the supergrid vs the reduced grid is: +The MOM6 supergrid contains a MOM6 grid at twice the desired resolution. The indexing of the supergrid vs the reduced grid is: Super Grid Reduced Grid @@ -62,7 +65,7 @@ MOM6 uses an Arakawa C grid. Within cpld_gridgen, these are referred to as "stag ### Rotation angles -For the tripole grid, a rotation angle is defined to translate vectors to/from the grid (i-j) orientation from/to true E-W. The rotation angle is calculated at run-time in MOM6 (src/initialization/MOM_shared_initialization.F90). However, CICE6 requires a rotation at the corner (``Bu``) grid points, which for points along the tripole seam requires values on the other side of the tripole fold. In cpld_gridgen, these values are found by "flipping over" the values on the last row of the MOM6 super-grid. If ``ipL`` and ``ipR`` are the i-indices of the poles along the last j-row: +For the tripole grid, a rotation angle is defined to translate vectors to/from the grid (i-j) orientation from/to true E-W. The rotation angle on ``Ct`` grid points is calculated at run-time in MOM6 (src/initialization/MOM_shared_initialization.F90). However, CICE6 requires a rotation at the corner (``Bu``) grid points. To find these angles, the rotation angle on ``Ct`` points on the opposite side of the tripole fold are used. In cpld_gridgen, these values are found by "flipping over" and changing the sign of the values on the last row of the MOM6 grid. If ``ipL`` and ``ipR`` are the i-indices of the poles along the last j-row: ipL-1 ipL ipL+1 ipR-1 ipR ipR+1 @@ -77,14 +80,14 @@ then after folding along the tripole seam, ``ipL`` and ``ipR`` must align: x-------x-------x -Using the folded seam, the values required for calculating the rotation angle on the ``Bu`` grid points are available and can be calculated in the same way as MOM6 calculates rotation angles for the ``Ct`` grid points. +Using the folded seam, the values of the rotation on ``Ct`` points across the seam are known. The same procedure that CICE uses internally to calculate the ``Ct`` angles from the ``Bu`` angles can be used to instead calculate the ``Bu`` angles knowing the ``Ct`` angles. ### SCRIP format files -For calculating interpolation weights using ESMF, a SCRIP file needs to be provided. A SCIP file contains the both the grid locations of any stagger grid location (e.g. ``Ct``) and the associated grid vertices for that point. As seen from the above diagram, for the ``Ct`` points, those grid vertices are given by the ``Bu`` grid locations. +For calculating interpolation weights using ESMF, a SCRIP file needs to be provided. A SCIP file contains the both the grid locations of any stagger grid location (e.g. ``Ct``) and the associated grid vertices for that point. As seen from the above diagram, for the ``Ct`` points, those grid vertices are given by the ``Bu`` grid locations. SCRIP requires that the vertices be ordered counter-clockwise so that the center grid point is always to the left of the vertex. In cpld_gridgen, vertices are defined counter-clockwise from upper right. ``Ct`` vertices are located on the ``Bu`` grid (as shown above), ``Cu`` vertices on the ``Cv`` grid, ``Cv`` vertices on the ``Cu`` grid and ``Bu`` vertices on the ``Ct`` grid. For example, for the ``Ct`` grid, the vertices are: - + Vertex #2 Vertex #1 Bu(i-1,j) Bu(i,j) Ct(i,j) @@ -96,7 +99,7 @@ so that the vertices for the ``Ct`` grid are found as off-sets of the i,j index iVertCt(4) = (/0, -1, -1, 0/) jVertCt(4) = (/0, 0, -1, -1/) - + Careful examination of the remaining stagger locations lead to similar definitions for the i,j offsets required to extract the vertices, all of which can be defined in terms of the ``iVertCt`` and ``jVertCt`` values. Special treatment is require at the bottom of the grid, where the vertices of the ``Ct`` and ``Cu`` grid must be set manually (note, these points are on land.) The top of the grid also requires special treatment because the required vertices are located across the tripole seam. This is accomplished by creating 1-d arrays which hold the ``Ct`` and ``Cu`` grid point locations across the matched seam. @@ -111,7 +114,7 @@ The cpld_gridgen program and associated script related functions perform the fol 3. create the CICE6 grid variables and writes the required CICE6 grid file 4. create a SCRIP file for the center stagger (``Ct``) grid points and a second SCRIP file also containing the land mask 5. create the ESMF conservative regridding weights to map the ocean mask to the FV3 tiles and write the mapped mask to 6 tile files -6. create the ESMF regridding weights to map the 1/4 CICE6 restart file to a lower resolution tripole grid +6. create the ESMF regridding weights to map a 1/4 deg ice or ocean restart file to a lower resolution tripole grid 7. optionally call a routine to generate ESMF regridding weights to map the tripole grid to a set of rectilinear grids 8. use the command line command *ESMF_Scrip2Unstruct* to generate the ocean mesh from the SCRIP file containing the land mask (item 4) 9. use an NCO command line command to generate the CICE6 land mask file from the CICE6 grid file @@ -134,11 +137,11 @@ The exact list of files produced by the *cpld_gridgen.sh* script will vary depen mesh.mx025.nc the ocean and ice mesh file used at runtime by CICE6, MOM6, and CMEPS C384.mx025.tile[1-6].nc the mapped ocean mask on the ATM tiles used to create ATM ICs consistent with the
fractional grid - +
- -* If the optional post-weights are generated, the following files will be produced in the output location: - + +* If the optional post-weights are generated, the following files will be produced in the output location: + @@ -147,27 +150,29 @@ The exact list of files produced by the *cpld_gridgen.sh* script will vary depen same tripole grid using bilinear mapping
Optional post-weights files for 1/4deg
tripole.mx025.Ct.to.rect.[destination resolution].[bilinear][conserve].nc the ESMF weights for mapping variables
on the center (Ct) stagger location on
the tripole grid to a rectilinear grid
with [destination resolution] using
either bilinear or conservative mapping
- +
-* If a resolution other than 1/4 degree is used in *cpld_gridgen.sh*, the following file will be produced in the output location: - - +* The following file will be produced in the output location to map 1/4 degree tripole values to a tripole grid of lower resolution. + - +
Output files for CICE6 IC creation at tripole destination resolutionOutput files for down-sampled IC creation at tripole destination resolution
File name Function -
tripole.mx025.Ct.to.mx[destination resolution].Ct.neareststod.nc the ESMF weights for mapping the 1/4 CICE ICs to
- a tripole [destination resolution] using nearest
source-to-destination mapping +
tripole.mx025.Ct.to.mx[destination resolution].Ct.neareststod.nc the ESMF weights for mapping 1/4 deg tripole ICs to
+ a lower tripole destination resolution using nearest
source-to-destination mapping +
tripole.mx025.Ct.to.mx[destination resolution].Ct.bilinear.nc the ESMF weights for mapping 1/4 deg tripole ICs to
+ a lower tripole destination resolution using
bilinear mapping +
tripole.[destination resolution].Ct.to.[Cu][Cv][Bu].bilinear.nc the ESMF weights for mapping downscaled IC values on a
+ tripole grid from Ct locations to the native stagger locations
- +
- + * If run-time land mask changes for MOM6 are requested, the following file will be produced in the output location: - - + +
Output files for run-time modification of MOM6 land mask
File name Function
ufs.[Default filename].nc Topo-edits required for UFS application. These are appended to the existing default topo
edits file and implemented at run time with the parameter flag
``ALLOW_LANDMASK_CHANGES=true``. All files produced by the *cpld_gridgen.sh* will be
consistent with this run-time land mask.
- diff --git a/sorc/cpld_gridgen.fd/gen_fixgrid.F90 b/sorc/cpld_gridgen.fd/gen_fixgrid.F90 index 36f118eed..f2fe0dc88 100644 --- a/sorc/cpld_gridgen.fd/gen_fixgrid.F90 +++ b/sorc/cpld_gridgen.fd/gen_fixgrid.F90 @@ -20,7 +20,7 @@ program gen_fixgrid use grdvars use inputnml use gengrid_kinds, only: CL, CS, dbl_kind, real_kind, int_kind - use angles, only: find_angq, find_ang + use angles, only: find_ang, find_angq, find_angchk use vertices, only: fill_vertices, fill_bottom, fill_top use mapped_mask, only: make_frac_land use postwgts, only: make_postwgts @@ -49,7 +49,7 @@ program gen_fixgrid integer :: rc,ncid,id,xtype integer :: i,j,k,i2,j2 - integer :: ii,jj + integer :: ii integer :: localPet, nPet logical :: fexist = .false. @@ -147,9 +147,6 @@ program gen_fixgrid if(xtype.eq. 6)wet4 = real(wet8,4) - !print *,minval(wet8),maxval(wet8) - !print *,minval(wet4),maxval(wet4) - !--------------------------------------------------------------------- ! read the MOM6 depth file !--------------------------------------------------------------------- @@ -201,17 +198,17 @@ program gen_fixgrid ! this modified topoedits file !--------------------------------------------------------------------- - fsrc = trim(dirsrc)//'/'//trim(editsfile) - if(editmask)fsrc = trim(dirout)//'/'//'ufs.'//trim(editsfile) - - if (trim(editsfile) /= 'none') then - inquire(file=trim(fsrc),exist=fexist) - if (.not. fexist) then - print '(a)', 'Required topoedits file '//trim(fsrc)//' is missing ' - call abort() - end if + fsrc = trim(dirsrc)//'/'//trim(editsfile) + if(editmask)fsrc = trim(dirout)//'/'//'ufs.'//trim(editsfile) + + if (trim(editsfile) /= 'none') then + inquire(file=trim(fsrc),exist=fexist) + if (.not. fexist) then + print '(a)', 'Required topoedits file '//trim(fsrc)//' is missing ' + call abort() end if - call apply_topoedits(fsrc) + end if + call apply_topoedits(fsrc) !--------------------------------------------------------------------- ! read MOM6 supergrid file @@ -236,15 +233,9 @@ program gen_fixgrid rc = nf90_get_var(ncid, id, dy) rc = nf90_close(ncid) - !print *,'super grid size ',size(y,1),size(y,2) - !print *,'max lat in super grid ',maxval(y) sg_maxlat = maxval(y) - - !--------------------------------------------------------------------- - ! find the angle on corners---this requires the supergrid - !--------------------------------------------------------------------- - - call find_angq + write(logmsg,'(a,f12.2)')'max lat in super grid ',maxval(y) + print '(a)',trim(logmsg) !--------------------------------------------------------------------- ! fill grid variables @@ -256,8 +247,6 @@ program gen_fixgrid !deg->rad ulon(i,j) = x(i2,j2)*deg2rad ulat(i,j) = y(i2,j2)*deg2rad - !in rad already - angle(i,j) = -angq(i2,j2) !m->cm htn(i,j) = (dx(i2-1,j2) + dx(i2,j2))*100._dbl_kind hte(i,j) = (dy(i2,j2-1) + dy(i2,j2))*100._dbl_kind @@ -280,12 +269,63 @@ program gen_fixgrid enddo !--------------------------------------------------------------------- - ! find the angle on centers---this does not requires the supergrid + ! locate the ith index of the two poles on j=nj + ! the corner points must lie on the pole + !--------------------------------------------------------------------- + + ipole = -1 + j = nj + do i = 1,ni/2 + if(latBu(i,j) .eq. sg_maxlat)ipole(1) = i + enddo + do i = ni/2+1,ni + if(latBu(i,j) .eq. sg_maxlat)ipole(2) = i + enddo + write(logmsg,'(a,2i6,2f12.2)')'poles found at i = ',ipole, latBu(ipole(1),nj), & + latBu(ipole(2),nj) + print '(a)',trim(logmsg) + + !--------------------------------------------------------------------- + ! find the angle on centers using the same procedure as MOM6 !--------------------------------------------------------------------- - call find_ang - print *,'ANGLET ',minval(anglet),maxval(anglet) - print *,'ANGLE ',minval(angle),maxval(angle) + call find_ang((/1,ni/),(/1,nj/),lonBu,latBu,lonCt,anglet) + write(logmsg,'(a,2f12.2)')'ANGLET min,max: ',minval(anglet),maxval(anglet) + print '(a)',trim(logmsg) + write(logmsg,'(a,2f12.2)')'ANGLET edges i=1,i=ni: ',anglet(1,nj),anglet(ni,nj) + print '(a)',trim(logmsg) + + xangCt(:) = 0.0 + do i = 1,ni + i2 = ipole(2)+(ipole(1)-i)+1 + xangCt(i) = -anglet(i2,nj) ! angle changes sign across seam + end do + + !--------------------------------------------------------------------- + ! find the angle on corners using the same procedure as CICE6 + !--------------------------------------------------------------------- + + call find_angq((/1,ni/),(/1,nj/),xangCt,anglet,angle) + angle(ni,:) = -angle(1,:) + ! reverse angle for CICE + angle = -angle + write(logmsg,'(a,2f12.2)')'ANGLE min,max: ',minval(angle),maxval(angle) + print '(a)',trim(logmsg) + write(logmsg,'(a,2f12.2)')'ANGLE edges i=1,i=ni: ',angle(1,nj),angle(ni,nj) + print '(a)',trim(logmsg) + + !--------------------------------------------------------------------- + ! check the Bu angle + !--------------------------------------------------------------------- + + call find_angchk((/1,ni/),(/1,nj/),angle,angchk) + angchk(1,:) = -angchk(ni,:) + ! reverse angle for MOM6 + angchk = -angchk + write(logmsg,'(a,2f12.2)')'ANGCHK min,max: ',minval(angchk),maxval(angchk) + print '(a)',trim(logmsg) + write(logmsg,'(a,2f12.2)')'ANGCHK edges i=1,i=ni: ',angchk(1,nj),angchk(ni,nj) + print '(a)',trim(logmsg) !--------------------------------------------------------------------- ! For the 1/4deg grid, hte at j=720 and j = 1440 is identically=0.0 for @@ -295,8 +335,7 @@ program gen_fixgrid ! hte < 1.0 !--------------------------------------------------------------------- - write(logmsg,'(a,2e12.5)')'min vals of hte at folds ', & - minval(hte(ni/2,:)),minval(hte(ni,:)) + write(logmsg,'(a,2e12.5)')'min vals of hte at folds ', minval(hte(ni/2,:)),minval(hte(ni,:)) print '(a)',trim(logmsg) do j = 1,nj ii = ni/2 @@ -304,37 +343,13 @@ program gen_fixgrid ii = ni if(hte(ii,j) .le. 1.0)hte(ii,j) = 0.5*(hte(ii-1,j) + hte( 1,j)) enddo - write(logmsg,'(a,2e12.5)')'min vals of hte at folds ', & - minval(hte(ni/2,:)),minval(hte(ni,:)) + write(logmsg,'(a,2e12.5)')'min vals of hte at folds ', minval(hte(ni/2,:)),minval(hte(ni,:)) print '(a)',trim(logmsg) !--------------------------------------------------------------------- - ! + ! find required extended values for setting all vertices !--------------------------------------------------------------------- - where(lonCt .lt. 0.0)lonCt = lonCt + 360._dbl_kind - where(lonCu .lt. 0.0)lonCu = lonCu + 360._dbl_kind - where(lonCv .lt. 0.0)lonCv = lonCv + 360._dbl_kind - where(lonBu .lt. 0.0)lonBu = lonBu + 360._dbl_kind - - !--------------------------------------------------------------------- - ! some basic error checking - ! find the i-th index of the poles at j= nj - ! the corner points must lie on the pole - !--------------------------------------------------------------------- - - ipole = -1 - j = nj - do i = 1,ni/2 - if(latBu(i,j) .eq. sg_maxlat)ipole(1) = i - enddo - do i = ni/2+1,ni - if(latBu(i,j) .eq. sg_maxlat)ipole(2) = i - enddo - write(logmsg,'(a,2i6,2f12.2)')'poles found at i = ',ipole,latBu(ipole(1),nj), & - latBu(ipole(2),nj) - print '(a)',trim(logmsg) - if(debug)call checkseam do i = 1,ni @@ -435,7 +450,7 @@ program gen_fixgrid write(form1,'(a)')'('//trim(cnx)//'f14.8)' write(form2,'(a)')'('//trim(cnx)//'i2)' - allocate(ww3mask(1:ni,1:nj)); ww3mask = wet4 + allocate(ww3mask(1:ni,1:nj)); ww3mask = int(wet4) allocate(ww3dpth(1:ni,1:nj)); ww3dpth = dp4 where(latCt .ge. maximum_lat)ww3mask = 3 @@ -504,7 +519,17 @@ program gen_fixgrid fwgt = trim(dirout)//'/'//'tripole.mx025.Ct.to.mx'//trim(res)//'.Ct.neareststod.nc' logmsg = 'creating weight file '//trim(fwgt) print '(a)',trim(logmsg) + call ESMF_RegridWeightGen(srcFile=trim(fsrc),dstFile=trim(fdst), & + weightFile=trim(fwgt), regridmethod=method, & + ignoreDegenerate=.true., unmappedaction=ESMF_UNMAPPEDACTION_IGNORE, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & + line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT) + method=ESMF_REGRIDMETHOD_BILINEAR + fdst = trim(dirout)//'/'//'Ct.mx'//trim(res)//'_SCRIP.nc' + fwgt = trim(dirout)//'/'//'tripole.mx025.Ct.to.mx'//trim(res)//'.Ct.bilinear.nc' + logmsg = 'creating weight file '//trim(fwgt) + print '(a)',trim(logmsg) call ESMF_RegridWeightGen(srcFile=trim(fsrc),dstFile=trim(fdst), & weightFile=trim(fwgt), regridmethod=method, & ignoreDegenerate=.true., unmappedaction=ESMF_UNMAPPEDACTION_IGNORE, rc=rc) @@ -517,6 +542,19 @@ program gen_fixgrid end if end if + ! tripole Ct->tripole Bu for CICE are only for CICE IC creation + fsrc = trim(dirout)//'/'//'Ct.mx'//trim(res)//'_SCRIP.nc' + fdst = trim(dirout)//'/'//'Bu.mx'//trim(res)//'_SCRIP.nc' + fwgt = trim(dirout)//'/'//'tripole.mx'//trim(res)//'.Ct.to.Bu.bilinear.nc' + logmsg = 'creating weight file '//trim(fwgt) + print '(a)',trim(logmsg) + + call ESMF_RegridWeightGen(srcFile=trim(fsrc),dstFile=trim(fdst), & + weightFile=trim(fwgt), regridmethod=method, & + ignoreDegenerate=.true., unmappedaction=ESMF_UNMAPPEDACTION_IGNORE, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & + line=__LINE__, file=__FILE__)) call ESMF_Finalize(endflag=ESMF_END_ABORT) + !--------------------------------------------------------------------- ! !--------------------------------------------------------------------- @@ -527,8 +565,8 @@ program gen_fixgrid ! clean up !--------------------------------------------------------------------- - deallocate(x,y, angq, dx, dy, xsgp1, ysgp1) - deallocate(areaCt, anglet, angle) + deallocate(x, y, dx, dy) + deallocate(areaCt, anglet, angle, angchk) deallocate(latCt, lonCt) deallocate(latCv, lonCv) deallocate(latCu, lonCu) diff --git a/sorc/cpld_gridgen.fd/grdvars.F90 b/sorc/cpld_gridgen.fd/grdvars.F90 index 578ac269a..9c549a494 100644 --- a/sorc/cpld_gridgen.fd/grdvars.F90 +++ b/sorc/cpld_gridgen.fd/grdvars.F90 @@ -68,17 +68,10 @@ module grdvars ! Super-grid source grid variables real(dbl_kind), allocatable, dimension(:,:) :: x !< The longitudes of the MOM6 supergrid real(dbl_kind), allocatable, dimension(:,:) :: y !< The latitudes of the MOM6 supergrid - real(dbl_kind), allocatable, dimension(:,:) :: angq !< The grid rotation angle at the Bu (or corner) - !! grid points of the super grid - real(dbl_kind), allocatable, dimension(:,:) :: dx !< The grid cell width in meters of the supergrid !! in the x-direction (i-dimension) real(dbl_kind), allocatable, dimension(:,:) :: dy !< The grid cell width in meters of the supergrid !! in the y-direction (j-dimension) - real(dbl_kind), allocatable, dimension(:,:) :: xsgp1 !< The longitudes of the super-grid replicated - !! across the tripole seam - real(dbl_kind), allocatable, dimension(:,:) :: ysgp1 !< The latitudes of the super-grid replicated - !! across the tripole seam ! Output grid variables real(dbl_kind), allocatable, dimension(:,:) :: latCt !< The latitude of the center (tracer) grid points @@ -103,6 +96,8 @@ module grdvars real(dbl_kind), allocatable, dimension(:,:) :: anglet !< The rotation angle on Ct points (opposite sense !! from angle) real(dbl_kind), allocatable, dimension(:,:) :: angle !< The rotation angle on Bu points + real(dbl_kind), allocatable, dimension(:,:) :: angchk !< The rotation angle on Ct points, as calculated by + !! CICE internally using angle on Bu real(dbl_kind), allocatable, dimension(:,:,:) :: latCt_vert !< The latitudes of the 4 vertices of each Ct grid !! point @@ -129,6 +124,9 @@ module grdvars !! opposite side of the tripole seam real(dbl_kind), allocatable, dimension(:) :: xlatCt !< The latitude of the Ct grid points on the !! opposite side of the tripole seam + real(dbl_kind), allocatable, dimension(:) :: xangCt !< The rotation angle on the Ct grid points on the + !! opposite side of the tripole seam + real(dbl_kind), allocatable, dimension(:) :: xlonCu !< The longitude of the Cu grid points on the !! opposite side of the tripole seam real(dbl_kind), allocatable, dimension(:) :: xlatCu !< The latitude of the Cu grid points on the @@ -172,24 +170,22 @@ module grdvars subroutine allocate_all - allocate( x(0:nx,0:ny), y(0:nx,0:ny), angq(0:nx,0:ny) ) + allocate( x(0:nx,0:ny), y(0:nx,0:ny) ) allocate( dx(nx,0:ny), dy(0:nx,ny) ) - allocate( xsgp1(0:nx,0:ny+1), ysgp1(0:nx,0:ny+1) ) - allocate( latCt(ni,nj), lonCt(ni,nj) ) allocate( latCv(ni,nj), lonCv(ni,nj) ) allocate( latCu(ni,nj), lonCu(ni,nj) ) allocate( latBu(ni,nj), lonBu(ni,nj) ) - allocate( areaCt(ni,nj), anglet(ni,nj), angle(ni,nj) ) + allocate( areaCt(ni,nj), anglet(ni,nj), angle(ni,nj), angchk(ni,nj)) allocate( latCt_vert(ni,nj,nv), lonCt_vert(ni,nj,nv) ) allocate( latCv_vert(ni,nj,nv), lonCv_vert(ni,nj,nv) ) allocate( latCu_vert(ni,nj,nv), lonCu_vert(ni,nj,nv) ) allocate( latBu_vert(ni,nj,nv), lonBu_vert(ni,nj,nv) ) - allocate( xlonCt(ni), xlatCt(ni) ) + allocate( xlonCt(ni), xlatCt(ni), xangCt(ni) ) allocate( xlonCu(ni), xlatCu(ni) ) allocate( dlatBu(ni), dlatCv(ni) ) diff --git a/sorc/cpld_gridgen.fd/inputnml.F90 b/sorc/cpld_gridgen.fd/inputnml.F90 index 6e363657b..96d86f4c3 100644 --- a/sorc/cpld_gridgen.fd/inputnml.F90 +++ b/sorc/cpld_gridgen.fd/inputnml.F90 @@ -26,7 +26,8 @@ subroutine read_inputnml(fname) character(len=*), intent(in) :: fname ! local variables - integer :: stderr, iounit, rc + integer :: iounit, rc + character(len=200) :: tmpstr namelist /grid_nml/ ni, nj, dirsrc, dirout, fv3dir, topofile, editsfile, & res, atmres, npx, editmask, debug, & @@ -34,24 +35,25 @@ subroutine read_inputnml(fname) ! Check whether file exists. inquire (file=trim(fname), iostat=rc) - if (rc /= 0) then - write (stderr, '(3a)') 'Error: input file "', trim(fname), '" does not exist.' - return + write (0, '(3a)') 'Error: input file "', trim(fname), '" does not exist.' + stop 1 end if ! Open and read Namelist file. open (action='read', file=trim(fname), iostat=rc, newunit=iounit) read (nml=grid_nml, iostat=rc, unit=iounit) + if (rc /= 0) then + backspace(iounit) + read(iounit,'(a)')tmpstr + write (6, '(a)') 'Error: invalid Namelist format '//trim(tmpstr) + stop 1 + end if + close(iounit) ! set supergrid dimensions nx = ni*2 ny = nj*2 - if (rc /= 0) then - write (stderr, '(a)') 'Error: invalid Namelist format.' - end if - - close (iounit) end subroutine read_inputnml end module inputnml diff --git a/sorc/cpld_gridgen.fd/topoedits.F90 b/sorc/cpld_gridgen.fd/topoedits.F90 index 6e1889474..e0b617b01 100644 --- a/sorc/cpld_gridgen.fd/topoedits.F90 +++ b/sorc/cpld_gridgen.fd/topoedits.F90 @@ -52,8 +52,8 @@ subroutine add_topoedits(fsrc,fdst) ! return the existing values allocate(ieds1(cnt1)); ieds1 = 0 - allocate(jeds1(cnt1)); jeds2 = 0 - allocate(zeds1(cnt1)); zeds2 = 0.0 + allocate(jeds1(cnt1)); jeds1 = 0 + allocate(zeds1(cnt1)); zeds1 = 0.0 rc = nf90_open(fsrc, nf90_nowrite, ncid) rc = nf90_inq_varid(ncid, 'iEdit', id) diff --git a/sorc/cpld_gridgen.fd/tripolegrid.F90 b/sorc/cpld_gridgen.fd/tripolegrid.F90 index feb287101..c707833ca 100644 --- a/sorc/cpld_gridgen.fd/tripolegrid.F90 +++ b/sorc/cpld_gridgen.fd/tripolegrid.F90 @@ -13,7 +13,7 @@ module tripolegrid use grdvars, only: lonCu,latCu,lonCu_vert,latCu_vert use grdvars, only: lonCv,latCv,lonCv_vert,latCv_vert use grdvars, only: lonBu,latBu,lonBu_vert,latBu_vert - use grdvars, only: wet4,areaCt,angleT,dp4 + use grdvars, only: wet4,areaCt,angleT,dp4,angle,angchk use charstrings, only: logmsg,history use vartypedefs, only: maxvars, fixvars, fixvars_typedefine use netcdf @@ -67,6 +67,12 @@ subroutine write_tripolegrid(fname) !angleT rc = nf90_def_var(ncid, 'anglet', nf90_double, dim2, id) rc = nf90_put_att(ncid, id, 'units', 'radians') + !angle (angBu) + rc = nf90_def_var(ncid, 'angle', nf90_double, dim2, id) + rc = nf90_put_att(ncid, id, 'units', 'radians') + !angchk + rc = nf90_def_var(ncid, 'angchk', nf90_double, dim2, id) + rc = nf90_put_att(ncid, id, 'units', 'radians') !bathymetry rc = nf90_def_var(ncid, 'depth', nf90_float, dim2, id) rc = nf90_put_att(ncid, id, 'units', 'm') @@ -102,6 +108,12 @@ subroutine write_tripolegrid(fname) rc = nf90_inq_varid(ncid,'anglet', id) rc = nf90_put_var(ncid, id, anglet) + rc = nf90_inq_varid(ncid, 'angle', id) + rc = nf90_put_var(ncid, id, angle) + + rc = nf90_inq_varid(ncid,'angchk', id) + rc = nf90_put_var(ncid, id, angchk) + rc = nf90_inq_varid(ncid, 'depth', id) rc = nf90_put_var(ncid, id, dp4) diff --git a/tests/cpld_gridgen/ftst_find_angq.F90 b/tests/cpld_gridgen/ftst_find_angq.F90 index bcddfb8eb..b24c6965e 100644 --- a/tests/cpld_gridgen/ftst_find_angq.F90 +++ b/tests/cpld_gridgen/ftst_find_angq.F90 @@ -1,108 +1,335 @@ -! Unit test for cpld_gridgen routine "find_angq". -! -! Reads a sample MOM6 supergrid and calculates the -! rotation angle on corner points -! -! Author Denise Worthen 2/08/2022 +!> @file +!! @brief unit test for angles +!! @author Denise.Worthen@noaa.gov +!! +!! Given a 5x5 block of coords, calculate the angle, anglet and angchk +!! and compare against values from MOM6 and CICE6 history output. The +!! 3 5x5 blocks are for the 1deg tripole grid. The first two blocks contain +!! the two polar points. The final block is located in the lower right quad +!! of the tripole region (i=300:304,j=306:310). Because the angle calculations +!! involve points outside of the 5x5 blocks (eg, i+1), not all 5x5 points are +!! checked. +!! +!! @author Denise.Worthen@noaa.gov program ftst_find_angq - - use netcdf - use grdvars, only : ni,nj,nx,ny - use grdvars, only : x,y,xsgp1,ysgp1,sg_maxlat - use grdvars, only : angq - use angles, only : find_angq - - implicit none - - integer :: i,j,i1, i2 - integer :: rc, ncid, id - - logical :: mastertask = .false. - logical :: debug = .false. - - ! pole locations on SG - integer :: ipolesg(2) - ! unit test values - real(kind=8) :: puny = 1.0e-12 - real(kind=8) :: delta(15) - real(kind=8) :: sumdelta - - print *,"Starting test of cpld_gridgen routine find_angq" - - ! 1deg MOM6 dimensions - ni = 360 - nj = 320 - ! super grid dimensions - nx = 2*ni - ny = 2*nj - - ! supergrid x,y and angles on corners - allocate (x(0:nx,0:ny), y(0:nx,0:ny), angq(0:nx,0:ny)) - ! supergrid "plus 1" arrays - allocate (xsgp1(0:nx,0:ny+1), ysgp1(0:nx,0:ny+1)) - - !open the supergrid file and read the x,y coords - rc = nf90_open('./data/ocean_hgrid.nc', nf90_nowrite, ncid) - rc = nf90_inq_varid(ncid, 'x', id) !lon - rc = nf90_get_var(ncid, id, x) - - rc = nf90_inq_varid(ncid, 'y', id) !lat - rc = nf90_get_var(ncid, id, y) - rc = nf90_close(ncid) - - ! max lat on supergrid - sg_maxlat = maxval(y) - - !pole index on supergrid - ipolesg = -1 - j = ny - do i = 1,nx/2 - if(y(i,j) .eq. sg_maxlat)ipolesg(1) = i - enddo - do i = nx/2+1,nx - if(y(i,j) .eq. sg_maxlat)ipolesg(2) = i - enddo - - ! test angleq calculation - call find_angq - - ! required for checking longitudes across seam - where(xsgp1 .lt. 0.0)xsgp1 = xsgp1 + 360.0 - - j = ny+1 - i1 = ipolesg(1); i2 = ipolesg(2)-(ipolesg(1)-i1) - delta = 0.0 - ! check lons match across seam - delta( 1) = xsgp1(i1-2,j)-xsgp1(i2+2,j) - delta( 2) = xsgp1(i1-1,j)-xsgp1(i2+1,j) - delta( 3) = xsgp1(i1, j)-xsgp1(i2, j) - delta( 4) = xsgp1(i1+1,j)-xsgp1(i2-1,j) - delta( 5) = xsgp1(i1+2,j)-xsgp1(i2-2,j) - ! check lats match across seam - delta( 6) = ysgp1(i1-2,j)-ysgp1(i2+2,j) - delta( 7) = ysgp1(i1-1,j)-ysgp1(i2+1,j) - delta( 8) = ysgp1(i1, j)-ysgp1(i2, j) - delta( 9) = ysgp1(i1+1,j)-ysgp1(i2-1,j) - delta(10) = ysgp1(i1+2,j)-ysgp1(i2-2,j) - ! check angq match across seam - j = ny - delta(11)=angq(i1-2,j)-angq(i2-2,j) - delta(12)=angq(i1-1,j)-angq(i2-1,j) - delta(13)=angq(i1, j)-angq(i2, j) - delta(14)=angq(i1+1,j)-angq(i2+1,j) - delta(15)=angq(i1+2,j)-angq(i2+2,j) - - sumdelta = 0.0 - sumdelta = sum(delta) - - if (sumdelta >= puny) then - print *,'OK' - print *,'SUCCESS!' - deallocate(x,y,xsgp1,ysgp1,angq) - else - print *,'ftst_find_angq failed' - stop 1 - endif - - end program ftst_find_angq + + use gengrid_kinds, only : dbl_kind, real_kind + use angles , only : find_angq, find_ang, find_angchk + + implicit none + + integer , parameter :: ni = 5, nj = 5, nblocks=3 + + ! verification values from model history files, mx100 + ! blocks 1 and 2 center on the left and right tripole + ! block 3 is in lower right quad of the arctic at (300:304,306:310) + real(dbl_kind), dimension(ni,nj,nblocks) :: cice6_angle + real(dbl_kind), dimension(ni,nj,nblocks) :: cice6_anglet + real(dbl_kind), dimension(ni,nj,nblocks) :: mom6_sinrot + + ! block grid point values + real(dbl_kind), dimension(ni,nj,nblocks) :: lonct, lonbu, latbu + + ! test values + real(dbl_kind), dimension(ni,nj,nblocks) :: anglet, angle, angchk + real(dbl_kind), dimension(ni,nblocks) :: xangCt + + real(dbl_kind) :: dmax, diff + character(len=1) :: ck + + integer :: i,i2,j,k + integer :: ipole(nblocks) + logical :: debug = .false. + + !CICE6 + data ((cice6_angle(i,j,1), i=1,ni), j=1,nj) / & + 0.55923, 0.29966, 0.00000, -0.29966, -0.55923, & + 0.70064, 0.38859, 0.00000, -0.38859, -0.70064, & + 0.90943, 0.53371, 0.00000, -0.53371, -0.90943, & + 1.20822, 0.75580, 0.00000, -0.75580, -1.20822, & + 1.38117, 0.89328, 0.00000, -0.89328, -1.38117/ + data ((cice6_angle(i,j,2), i=1,ni), j=1,nj) / & + 0.55923, 0.29966, 0.00000, -0.29966, -0.55923, & + 0.70064, 0.38859, 0.00000, -0.38859, -0.70064, & + 0.90943, 0.53371, 0.00000, -0.53371, -0.90943, & + 1.20822, 0.75580, 0.00000, -0.75580, -1.20822, & + 1.38117, 0.89328, 0.00000, -0.89328, -1.38117/ + data ((cice6_angle(i,j,3), i=1,ni), j=1,nj) / & + -1.14949, -1.16044, -1.17076, -1.18049, -1.18968, & + -1.18188, -1.19217, -1.20185, -1.21097, -1.21957, & + -1.21425, -1.22383, -1.23284, -1.24131, -1.24930, & + -1.24649, -1.25534, -1.26364, -1.27145, -1.27880, & + -1.27854, -1.28661, -1.29419, -1.30130, -1.30799 / + + data ((cice6_anglet(i,j,1), i=1,ni), j=1,nj) / & + 0.60483, 0.38996, 0.13521, -0.13521, -0.38996, & + 0.73204, 0.48692, 0.17198, -0.17198, -0.48692, & + 0.90492, 0.63285, 0.23026, -0.23026, -0.63285, & + 1.13597, 0.85126, 0.32134, -0.32134, -0.85126, & + 1.34026, 1.05945, 0.41175, -0.41175, -1.05945/ + data ((cice6_anglet(i,j,2), i=1,ni), j=1,nj) / & + 0.60483, 0.38996, 0.13521, -0.13521, -0.38996, & + 0.73204, 0.48692, 0.17198, -0.17198, -0.48692, & + 0.90492, 0.63285, 0.23026, -0.23026, -0.63285, & + 1.13597, 0.85126, 0.32134, -0.32134, -0.85126, & + 1.34026, 1.05945, 0.41175, -0.41175, -1.05945/ + data ((cice6_anglet(i,j,3), i=1,ni), j=1,nj) / & + -1.12735, -1.13895, -1.14989, -1.16020, -1.16995, & + -1.16004, -1.17100, -1.18130, -1.19102, -1.20018, & + -1.19278, -1.20303, -1.21267, -1.22174, -1.23029, & + -1.22546, -1.23498, -1.24391, -1.25231, -1.26021, & + -1.25800, -1.26675, -1.27495, -1.28264, -1.28988 / + + ! MOM6 sin_rot + data ((mom6_sinrot(i,j,1), i=1,ni), j=1,nj) / & + -0.57231, -0.38355, -0.13608, 0.13608, 0.38355, & + -0.67404, -0.47460, -0.17342, 0.17342, 0.47460, & + -0.79354, -0.60678, -0.23239, 0.23239, 0.60678, & + -0.91250, -0.79512, -0.32692, 0.32692, 0.79512, & + -0.98949, -0.97272, -0.43486, 0.43486, 0.97272/ + data ((mom6_sinrot(i,j,2), i=1,ni), j=1,nj) / & + -0.57231, -0.38355, -0.13608, 0.13608, 0.38355, & + -0.67404, -0.47460, -0.17342, 0.17342, 0.47460, & + -0.79354, -0.60678, -0.23239, 0.23239, 0.60678, & + -0.91250, -0.79512, -0.32692, 0.32692, 0.79512, & + -0.98949, -0.97272, -0.43486, 0.43486, 0.97272/ + data ((mom6_sinrot(i,j,3), i=1,ni), j=1,nj) / & + 0.90334, 0.90826, 0.91278, 0.91694, 0.92079, & + 0.91689, 0.92120, 0.92516, 0.92881, 0.93216, & + 0.92946, 0.93320, 0.93662, 0.93976, 0.94264, & + 0.94103, 0.94420, 0.94711, 0.94977, 0.95221, & + 0.95154, 0.95419, 0.95661, 0.95882, 0.96085 / + + ! lon Ct,Bu and latBu + data ((lonct(i,j,1), i=1,ni), j=1,nj) / & + -245.26937, -232.98519, -218.04609, -201.95391, -187.01481, & + -252.74561, -239.00064, -220.46665, -199.53335, -180.99936, & + -262.76769, -248.28139, -224.73626, -195.26374, -171.71861, & + -275.86250, -263.23478, -234.03925, -185.96075, -156.76522, & + -291.65329, -286.25383, -263.72015, -156.27985, -133.74617 / + data ((lonbu(i,j,1), i=1,ni), j=1,nj) / & + -242.68153, -227.77943, -210.00000, -192.22057, -177.31847, & + -251.01234, -233.49458, -210.00000, -186.50542, -168.98766, & + -262.99894, -243.55611, -210.00000, -176.44389, -157.00106, & + -279.68531, -263.47519, -210.00000, -156.52481, -140.31469, & + -300.00000, -300.00000, -210.00000, -120.00000, -120.00000 / + data ((latbu(i,j,1), i=1,ni), j=1,nj) / & + 88.99001, 89.10706, 89.14964, 89.10706, 88.99001, & + 89.16921, 89.31628, 89.37292, 89.31628, 89.16921, & + 89.31750, 89.50699, 89.58912, 89.50699, 89.31750, & + 89.41886, 89.66093, 89.79818, 89.66093, 89.41886, & + 89.45503, 89.72754, 90.00000, 89.72754, 89.45503 / + + data ((lonct(i,j,2), i=1,ni), j=1,nj) / & + -65.26937, -52.98519, -38.04609, -21.95391, -7.01481, & + -72.74561, -59.00064, -40.46665, -19.53335, -0.99936, & + -82.76769, -68.28139, -44.73626, -15.26374, 8.28139, & + -95.86250, -83.23478, -54.03925, -5.96075, 23.23478, & + -111.65329, -106.25383, -83.72015, 23.72015, 46.25383 / + data ((lonbu(i,j,2), i=1,ni), j=1,nj) / & + -62.68153, -47.77943, -30.00000, -12.22057, 2.68153, & + -71.01234, -53.49458, -30.00000, -6.50542, 11.01234, & + -82.99894, -63.55611, -30.00000, 3.55611, 22.99894, & + -99.68531, -83.47519, -30.00000, 23.47519, 39.68531, & + -120.00000, -120.00000, -30.00000, 60.00000, 60.00000 / + data ((latbu(i,j,2), i=1,ni), j=1,nj) / & + 88.99001, 89.10706, 89.14964, 89.10706, 88.99001, & + 89.16921, 89.31628, 89.37292, 89.31628, 89.16921, & + 89.31750, 89.50699, 89.58912, 89.50699, 89.31750, & + 89.41886, 89.66093, 89.79818, 89.66093, 89.41886, & + 89.45503, 89.72754, 90.00000, 89.72754, 89.45503 / + + data ((lonct(i,j,3), i=1,ni), j=1,nj) / & + 38.08144, 38.86965, 39.62025, 40.33605, 41.01961, & + 39.67340, 40.41426, 41.11877, 41.78975, 42.42974, & + 41.27339, 41.96446, 42.62075, 43.24505, 43.83985, & + 42.87655, 43.51566, 44.12188, 44.69789, 45.24611, & + 44.47789, 45.06321, 45.61778, 46.14417, 46.64470 / + data ((lonbu(i,j,3), i=1,ni), j=1,nj) / & + 39.26336, 40.00933, 40.71974, 41.39725, 42.04431, & + 40.83545, 41.53354, 42.19746, 42.82988, 43.43321, & + 42.41197, 43.06002, 43.67560, 44.26131, 44.81949, & + 43.98813, 44.58428, 45.14993, 45.68757, 46.19946, & + 45.55910, 46.10182, 46.61624, 47.10473, 47.56942 / + data ((latbu(i,j,3), i=1,ni), j=1,nj) / & + 80.97195, 80.70286, 80.43134, 80.15742, 79.88114, & + 81.07710, 80.80479, 80.53019, 80.25334, 79.97425, & + 81.17218, 80.89690, 80.61947, 80.33992, 80.05826, & + 81.25741, 80.97942, 80.69942, 80.41741, 80.13341, & + 81.33309, 81.05265, 80.77032, 80.48610, 80.19999 / + + print *,"Starting test of cpld_gridgen routine find_angq" + + angle = 0.0 + anglet = 0.0 + angchk = 0.0 + ipole = 0 + + j = nj + do i = 1,ni + if(latBu(i,j,1) .eq. 90.00)ipole(1) = i + if(latBu(i,j,2) .eq. 90.00)ipole(2) = i + enddo + + !------------------------------------------ + ! find anglet and test against mom6 values + !------------------------------------------ + + call find_ang((/2,5/),(/2,5/),lonBu(:,:,1),latBu(:,:,1),lonCt(:,:,1),anglet(:,:,1)) + call find_ang((/2,5/),(/2,5/),lonBu(:,:,2),latBu(:,:,2),lonCt(:,:,2),anglet(:,:,2)) + call find_ang((/2,5/),(/2,5/),lonBu(:,:,3),latBu(:,:,3),lonCt(:,:,3),anglet(:,:,3)) + + do k = 1,nblocks + write(ck,'(i1.1)')k + dmax = 1.0e-30 + do j = 1,nj + do i = 1,ni + if (abs(anglet(i,j,k)) .gt. 0.0)then + diff = abs(anglet(i,j,k) - asin(mom6_sinrot(i,j,k))) + dmax = max(diff,dmax) + end if + end do + end do + call passfail(dmax, 1.0e-4, 'MOM6 anglet, block '//trim(ck)) + end do + + if (debug) then + print *,'anglet ' + print *,'left' + do j = 1,nj + print '(5f15.6)',(anglet(i,j,1), i = 1,ni) + end do + print *,'right' + do j = 1,nj + print '(5f15.6)',(anglet(i,j,2), i = 1,ni) + end do + print *,'quad' + do j = 1,nj + print '(5f15.6)',(anglet(i,j,3), i = 1,ni) + end do + end if + + !-------------------------------------------------------- + ! + !--------------------------------------------------------- + + xangCt = 0.0 + do i = 2,ni + i2 = ipole(2)+(ipole(1)-i)+1 + xangCt(i,1) = -anglet(i2,nj,1) ! angle changes sign across seam + xangCt(i,2) = -anglet(i2,nj,2) + xangCt(i,3) = anglet(i,nj,3) + end do + + !------------------------------------------- + ! find angle and test against cice6 values + !------------------------------------------- + + call find_angq((/2,5/),(/2,5/),xangCt(:,1),anglet(:,:,1),angle(:,:,1)) + call find_angq((/2,5/),(/2,5/),xangCt(:,2),anglet(:,:,2),angle(:,:,2)) + call find_angq((/2,5/),(/2,4/),xangCt(:,3),anglet(:,:,3),angle(:,:,3)) + ! reverse angle for CICE + angle = -angle + + do k = 1,nblocks + write(ck,'(i1.1)')k + dmax = 1.0e-30 + do j = 1,nj + do i = 1,ni + if (abs(angle(i,j,k)) .gt. 0.0) then + diff = abs(angle(i,j,k) - cice6_angle(i,j,k)) + dmax = max(diff,dmax) + end if + end do + end do + call passfail(dmax, 1.0e-4, 'CICE6 angle, block '//trim(ck)) + end do + + if (debug) then + print *,'angle' + print *,'left' + do j = 1,nj + print '(5f15.6)',(angle(i,j,1), i = 1,ni) + end do + print *,'right' + do j = 1,nj + print '(5f15.6)',(angle(i,j,2), i = 1,ni) + end do + print *,'quad' + do j = 1,nj + print '(5f15.6)',(angle(i,j,3), i = 1,ni) + end do + end if + + !----------------------------------------------------------------------- + ! find anglet calculated by CICE and test against cice6 and mom6 values + !----------------------------------------------------------------------- + call find_angchk((/2,4/),(/3,4/),angle(:,:,1),angchk(:,:,1)) + call find_angchk((/2,4/),(/3,4/),angle(:,:,2),angchk(:,:,2)) + call find_angchk((/2,4/),(/3,4/),angle(:,:,3),angchk(:,:,3)) + ! reverse angle for MOM6 + angchk = -angchk + + do k = 1,nblocks + write(ck,'(i1.1)')k + dmax = 1.0e-30 + do j = 1,nj + do i = 1,ni + if (abs(angchk(i,j,k)) .gt. 0.0) then + diff = abs(-angchk(i,j,k) - cice6_anglet(i,j,k)) + dmax = max(diff,dmax) + end if + end do + end do + call passfail(dmax, 1.0e-4, 'angchk vs CICE6 anglet, block '//trim(ck)) + end do + + do k = 1,nblocks + write(ck,'(i1.1)')k + dmax = 1.0e-30 + do j = 1,nj + do i = 1,ni + if (abs(angchk(i,j,k)) .gt. 0.0) then + diff = abs(angchk(i,j,k) - asin(mom6_sinrot(i,j,k))) + dmax = max(diff,dmax) + end if + end do + end do + call passfail(dmax, 1.2e-2, 'angchk vs MOM6 anglet, block '//trim(ck)) + end do + + if (debug) then + print *,'angchk' + print *,'left' + do j = 1,nj + print '(5f15.6)',(angchk(i,j,1), i = 1,ni) + end do + print *,'right' + do j = 1,nj + print '(5f15.6)',(angchk(i,j,2), i = 1,ni) + end do + print *,'quad' + do j = 1,nj + print '(5f15.6)',(angchk(i,j,3), i = 1,ni) + end do + end if + end program + + subroutine passfail(dmax,tolerance,msg) + + use gengrid_kinds, only : dbl_kind + + implicit none + + real(dbl_kind), intent(in) :: dmax + real(dbl_kind), intent(in) :: tolerance + character(len=*), intent(in) :: msg + + if (dmax .le. tolerance) then + print '(a,2f12.8)','SUCCESS! '//trim(msg)//' ',dmax,tolerance + else + print '(a,2f12.8)','FAIL! '//trim(msg)//' ',dmax,tolerance + stop 1 + endif + end subroutine passfail From 46b500f25654323609c25cfb875ed8c5f17391d3 Mon Sep 17 00:00:00 2001 From: GeorgeGayno-NOAA <52789452+GeorgeGayno-NOAA@users.noreply.github.com> Date: Fri, 12 Apr 2024 07:56:30 -0400 Subject: [PATCH 14/25] Update wgrib2 version on Jet (#931) The version of wgrib2 used by the ice_blend and snow2mdl consistency tests on Jet was removed. Point to another version. Fixes #930. --- reg_tests/ice_blend/driver.jet.sh | 4 +--- reg_tests/snow2mdl/driver.jet.sh | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/reg_tests/ice_blend/driver.jet.sh b/reg_tests/ice_blend/driver.jet.sh index 02a0599f0..a844a381b 100755 --- a/reg_tests/ice_blend/driver.jet.sh +++ b/reg_tests/ice_blend/driver.jet.sh @@ -31,8 +31,7 @@ set -x source ../../sorc/machine-setup.sh > /dev/null 2>&1 module use ../../modulefiles module load build.$target.intel -module load gnu/9.2.0 -module load wgrib2/3.1.1_ncep +module load wgrib2/2.0.8 set +x module list set -x @@ -52,7 +51,6 @@ if [ "$UPDATE_BASELINE" = "TRUE" ]; then fi export WGRIB=/apps/wgrib/1.8.1.0b/bin/wgrib -export WGRIB2=${WGRIB2_ROOT}/bin/wgrib2 export COPYGB=/lfs4/HFIP/hfv3gfs/role.epic/spack-stack/spack-stack-1.6.0/envs/unified-env-rocky8/install/intel/2021.5.0/grib-util-1.3.0-hrqavdi/bin/copygb export COPYGB2=/lfs4/HFIP/hfv3gfs/role.epic/spack-stack/spack-stack-1.6.0/envs/unified-env-rocky8/install/intel/2021.5.0/grib-util-1.3.0-hrqavdi/bin/copygb2 export CNVGRIB=/mnt/lfs4/HFIP/hfv3gfs/role.epic/spack-stack/spack-stack-1.6.0/envs/unified-env-rocky8/install/intel/2021.5.0/grib-util-1.3.0-hrqavdi/bin/cnvgrib diff --git a/reg_tests/snow2mdl/driver.jet.sh b/reg_tests/snow2mdl/driver.jet.sh index 62bd63664..3df21ee0d 100755 --- a/reg_tests/snow2mdl/driver.jet.sh +++ b/reg_tests/snow2mdl/driver.jet.sh @@ -24,8 +24,7 @@ set -x source ../../sorc/machine-setup.sh > /dev/null 2>&1 module use ../../modulefiles module load build.$target.intel -module load gnu/9.2.0 -module load wgrib2/3.1.1_ncep +module load wgrib2/2.0.8 set +x module list set -x @@ -50,7 +49,6 @@ fi export HOMEreg=/lfs4/HFIP/hfv3gfs/emc.nemspara/role.ufsutils/ufs_utils/reg_tests/snow2mdl export HOMEgfs=$PWD/../.. export WGRIB=/apps/wgrib/1.8.1.0b/bin/wgrib -export WGRIB2=${WGRIB2_ROOT}/bin/wgrib2 rm -fr $DATA_ROOT From 25ef8afc6087bf58eb96f3cdbf32ba1098255113 Mon Sep 17 00:00:00 2001 From: GeorgeGayno-NOAA <52789452+GeorgeGayno-NOAA@users.noreply.github.com> Date: Fri, 12 Apr 2024 11:31:42 -0400 Subject: [PATCH 15/25] Update 'readthedocs' for new AWS fixed files host (#927) Update the 'readthedocs' chgres_cube.rst file as follows: Link to the global workflow group's AWS bucket for the global model 'fixed' files ('grid', mosaic, 'oro' and surface). Update the text for the new 'oro' and surface file naming convention, which uses the ocean resolution in the file name. Fixes #926. --- docs/source/chgres_cube.rst | 44 ++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/docs/source/chgres_cube.rst b/docs/source/chgres_cube.rst index b976e500a..1f5ede13c 100644 --- a/docs/source/chgres_cube.rst +++ b/docs/source/chgres_cube.rst @@ -3,7 +3,7 @@ Introduction ------------ -The chgres_cube program creates initial condition files to “coldstart” the forecast model. The initial conditions are created from either Finite-Volume Sphere (FV3) Global Forecast System (GFS), North American Mesoscale Forecast System (NAM), Rapid Refresh (RAP), or High Resolution Rapid Refresh (HRRR) gridded binary version 2 (GRIB2) data. +The chgres_cube program creates initial condition files to coldstart the forecast model. The initial conditions are created from either Finite-Volume Sphere (FV3) Global Forecast System (GFS), North American Mesoscale Forecast System (NAM), Rapid Refresh (RAP), or High Resolution Rapid Refresh (HRRR) gridded binary version 2 (GRIB2) data. Code structure -------------- @@ -55,7 +55,7 @@ Program inputs and outputs for global applications **Inputs** -Users may create their own global grids, or use the pre-defined files located `here `_. +Users may create their own global grids, or use the pre-defined files located in the `./CRES directories `_. (where CRES is the atmospheric resolution and mxRES is the ocean resolution). * FV3 mosaic file - (NetCDF format) * CRES_mosaic.nc @@ -69,22 +69,22 @@ Users may create their own global grids, or use the pre-defined files located `h * CRES_grid.tile6.nc * FV3 orography files - (NetCDF format) - * CRES_oro_data.tile1.nc - * CRES_oro_data.tile2.nc - * CRES_oro_data.tile3.nc - * CRES_oro_data.tile4.nc - * CRES_oro_data.tile5.nc - * CRES_oro_data.tile6.nc - - * FV3 surface climatological files - Located under the `./fix_sfc `_ sub-directory. One file for each tile. NetCDF format. - * CRES.facsf.tileX.nc (fractional coverage for strong/weak zenith angle dependent albedo) - * CRES.maximum_snow_albedo.tileX.nc (maximum snow albedo) - * CRES.slope_type.tileX.nc (slope type) - * CRES.snowfree_albedo.tileX.nc (snow-free albedo) - * CRES.soil_type.tileX.nc (soil type) - * CRES.subtrate_temperature.tileX.nc (soil substrate temperature) - * CRES.vegetation_greenness.tileX.nc (vegetation greenness) - * CRES.vegetation_type.tileX.nc (vegetation type) + * CRES.mxRES_oro_data.tile1.nc + * CRES.mxRES_oro_data.tile2.nc + * CRES.mxRES_oro_data.tile3.nc + * CRES.mxRES_oro_data.tile4.nc + * CRES.mxRES_oro_data.tile5.nc + * CRES.mxRES_oro_data.tile6.nc + + * FV3 surface climatological files - Located under the `./CRES/sfc `_ subdirectories. One file for each tile. NetCDF format. + * CRES.mxRES.facsf.tileX.nc (fractional coverage for strong/weak zenith angle dependent albedo) + * CRES.mxRES.maximum_snow_albedo.tileX.nc (maximum snow albedo) + * CRES.mxRES.slope_type.tileX.nc (slope type) + * CRES.mxRES.snowfree_albedo.tileX.nc (snow-free albedo) + * CRES.mxRES.soil_type.tileX.nc (soil type) + * CRES.mxRES.subtrate_temperature.tileX.nc (soil substrate temperature) + * CRES.mxRES.vegetation_greenness.tileX.nc (vegetation greenness) + * CRES.mxRES.vegetation_type.tileX.nc (vegetation type) * FV3 vertical coordinate file. Text file. `Located here `_. * global_hyblev.l$LEVS.txt @@ -93,7 +93,7 @@ Users may create their own global grids, or use the pre-defined files located `h **Outputs** - * Atmospheric “coldstart” files. NetCDF. + * Atmospheric coldstart files. NetCDF. * out.atm.tile1.nc * out.atm.tile2.nc * out.atm.tile3.nc @@ -101,7 +101,7 @@ Users may create their own global grids, or use the pre-defined files located `h * out.atm.tile5.nc * out.atm.tile6.nc - * Surface/Near Sea Surface Temperature (NSST) “coldstart” files. NetCDF + * Surface/Near Sea Surface Temperature (NSST) coldstart files. NetCDF * out.sfc.tile1.nc * out.sfc.tile1.nc * out.sfc.tile1.nc @@ -264,10 +264,10 @@ The following four sets of files/directories should all be located in the same d **Outputs** - * Atmospheric “coldstart” file. NetCDF. + * Atmospheric coldstart file. NetCDF. * out.atm.tile7.nc - * Surface “coldstart” file. NetCDF. + * Surface coldstart file. NetCDF. * out.sfc.tile7.nc Where to find FV3GFS, NAM, HRRR, and RAP GRIB2 data for regional applications From ef3a1d2c39d0620692ec1bb8026d587cb40dca4f Mon Sep 17 00:00:00 2001 From: GeorgeGayno-NOAA <52789452+GeorgeGayno-NOAA@users.noreply.github.com> Date: Thu, 18 Apr 2024 14:39:12 -0400 Subject: [PATCH 16/25] chgres_cube: option to build without nemsio, sigio and sfcio libraries (#928) Add logic to build chgres_cube with or without the nemsio, sigio and sfcio libraries. The default will be to build chgres_cube without these libraries, which means it will only support GRIB2 or netcdf data as input. Fixes #925. --- .github/PULL_REQUEST_TEMPLATE | 6 +- CMakeLists.txt | 12 +- docs/source/ufs_utils.rst | 2 +- modulefiles/build.hera.gnu.lua | 5 +- modulefiles/build.hera.intel.lua | 5 +- modulefiles/build.hercules.intel.lua | 5 +- modulefiles/build.jet.intel.lua | 5 +- modulefiles/build.noaacloud.intel.lua | 5 +- modulefiles/build.orion.intel.lua | 5 +- modulefiles/build.s4.intel.lua | 5 +- modulefiles/build.wcoss2.intel.lua | 5 +- reg_tests/chgres_cube/driver.hera.sh | 134 ++++++++++++++--------- reg_tests/chgres_cube/driver.hercules.sh | 113 ++++++++----------- reg_tests/chgres_cube/driver.jet.sh | 81 +++++--------- reg_tests/chgres_cube/driver.orion.sh | 113 ++++++++----------- reg_tests/chgres_cube/driver.s4.sh | 87 +++++---------- reg_tests/chgres_cube/driver.wcoss2.sh | 80 +++++--------- sorc/chgres_cube.fd/CMakeLists.txt | 16 ++- sorc/chgres_cube.fd/atm_input_data.F90 | 12 ++ sorc/chgres_cube.fd/model_grid.F90 | 56 ++++++---- sorc/chgres_cube.fd/nst_input_data.F90 | 12 ++ sorc/chgres_cube.fd/program_setup.F90 | 2 + sorc/chgres_cube.fd/sfc_input_data.F90 | 10 ++ tests/chgres_cube/CMakeLists.txt | 67 +++++++----- tests/chgres_cube/ftst_program_setup.F90 | 46 ++++---- util/gdas_init/config | 42 ++++--- util/gdas_init/driver.hera.sh | 15 +-- util/gdas_init/driver.jet.sh | 15 +-- util/gdas_init/driver.s4.sh | 9 +- util/gdas_init/driver.wcoss2.sh | 15 +-- util/gdas_init/get_v15.data.sh | 34 ++---- util/gdas_init/run_v15.chgres.gfs.sh | 64 ----------- util/gdas_init/run_v15.chgres.sh | 2 +- 33 files changed, 484 insertions(+), 601 deletions(-) delete mode 100755 util/gdas_init/run_v15.chgres.gfs.sh diff --git a/.github/PULL_REQUEST_TEMPLATE b/.github/PULL_REQUEST_TEMPLATE index 8e59f465c..1aee3fe56 100644 --- a/.github/PULL_REQUEST_TEMPLATE +++ b/.github/PULL_REQUEST_TEMPLATE @@ -20,7 +20,11 @@ If there are changes to the build or source code, the tests below must be conduc - [ ] Compile branch on Hera using GNU. - [ ] Compile branch in 'Debug' mode on WCOSS2. - [ ] Run unit tests locally on any Tier 1 machine. -- [ ] Run relevant consistency tests locally on all Tier 1 machine. +- [ ] Run relevant consistency tests locally on all Tier 1 machines. + +Optional test. + +- [ ] Run full set of chgres_cube consistency tests on Hera. Describe any additional tests performed. diff --git a/CMakeLists.txt b/CMakeLists.txt index 60a0cf105..23470e23b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -23,6 +23,7 @@ option(GCYCLE "Enable building global_cycle.fd" ON) option(FRENCTOOLS "Enable building fre-nctools.fd" ON) option(GRIDTOOLS "Enable building grid_tools.fd" ON) option(CHGRES "Enable building chgres_cube.fd" ON) +option(CHGRES_ALL "Build chgres with all input data options." OFF) option(OROG_MASK_TOOLS "Enable building orog_mask_tools.fd" ON) # OROG_MASK_TOOLS must be ON for OROG_NETCDF_TOOLS to build. option(OROG_NETCDF_TOOLS "Enable building orog_netcdf_tools.fd" OFF) @@ -99,17 +100,20 @@ if(OPENMP) find_package(OpenMP REQUIRED COMPONENTS Fortran) endif() -find_package(sfcio 1.4.0 REQUIRED) +if(CHGRES_ALL) + find_package(sfcio 1.4.0 REQUIRED) +endif() find_package(w3emc 2.9.0 REQUIRED) find_package(bacio 2.4.0 REQUIRED) -find_package(nemsio 2.5.0 REQUIRED) -find_package(sigio 2.3.0 REQUIRED) +if(CHGRES_ALL OR GBLEVENTS) + find_package(nemsio 2.5.0 REQUIRED) + find_package(sigio 2.3.0 REQUIRED) +endif() find_package(ip 3.3.3 REQUIRED) if(ip_VERSION LESS 5.0) find_package(sp 2.3.3 REQUIRED) endif() find_package(g2 3.4.3 REQUIRED) -find_package(sigio 2.3.0 REQUIRED) # If doxygen documentation we enabled, build it. This must come before # adding the source code directories; the main documentation build diff --git a/docs/source/ufs_utils.rst b/docs/source/ufs_utils.rst index 739110194..ed400585e 100644 --- a/docs/source/ufs_utils.rst +++ b/docs/source/ufs_utils.rst @@ -711,7 +711,7 @@ Edit the variables in the 'config' file for your experiment: * **EXTRACT_DIR** - Directory where data extracted from HPSS is stored. * **EXTRACT_DATA** - Set to 'yes' to extract data from HPSS. If data has been extracted and is located in EXTRACT_DIR, set to 'no'. On 's4' this step can't be run. Instead, the data must be pulled from another machine. * **RUN_CHGRES** - To run chgres, set to 'yes'. To extract data only, set to 'no'. - * **yy/mm/dd/hh** - The year/month/day/hour of your desired experiment. Currently, does not support pre-ENKF GFS data, prior to 2012 May 21 00z. Use two digits. + * **yy/mm/dd/hh** - The year/month/day/hour of your desired experiment. Use a four digit year and two digits for month/day/hour. **NOTE:** *The standard build of chgres_cube does NOT support experiments prior to June 12, 2019. To coldstart an experiment prior to these dates, contact a repository manager for assistance.* * **LEVS** - Number of hybrid levels plus 1. To run with 127 levels, set LEVS to 128. * **CRES_HIRES** - Resolution of the hires component of your experiment. Example: C768. * **CRES_ENKF** - Resolution of the enkf component of the experiments. diff --git a/modulefiles/build.hera.gnu.lua b/modulefiles/build.hera.gnu.lua index 7f4d8ac57..82be39c6f 100644 --- a/modulefiles/build.hera.gnu.lua +++ b/modulefiles/build.hera.gnu.lua @@ -34,8 +34,9 @@ load(pathJoin("sp", sp_ver)) w3emc_ver=os.getenv("w3emc_ver") or "2.10.0" load(pathJoin("w3emc", w3emc_ver)) -sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" -load(pathJoin("sfcio", sfcio_ver)) +-- Uncomment when CHGRES_ALL is ON +--sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" +--load(pathJoin("sfcio", sfcio_ver)) sigio_ver=os.getenv("sigio_ver") or "2.3.2" load(pathJoin("sigio", sigio_ver)) diff --git a/modulefiles/build.hera.intel.lua b/modulefiles/build.hera.intel.lua index f3280393a..377b0dda6 100644 --- a/modulefiles/build.hera.intel.lua +++ b/modulefiles/build.hera.intel.lua @@ -34,8 +34,9 @@ load(pathJoin("sp", sp_ver)) w3emc_ver=os.getenv("w3emc_ver") or "2.10.0" load(pathJoin("w3emc", w3emc_ver)) -sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" -load(pathJoin("sfcio", sfcio_ver)) +-- Uncomment when CHGRES_ALL is ON +--sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" +--load(pathJoin("sfcio", sfcio_ver)) sigio_ver=os.getenv("sigio_ver") or "2.3.2" load(pathJoin("sigio", sigio_ver)) diff --git a/modulefiles/build.hercules.intel.lua b/modulefiles/build.hercules.intel.lua index 4e2ef73e4..0cdb92e13 100644 --- a/modulefiles/build.hercules.intel.lua +++ b/modulefiles/build.hercules.intel.lua @@ -31,8 +31,9 @@ load(pathJoin("sp", sp_ver)) w3emc_ver=os.getenv("w3emc_ver") or "2.10.0" load(pathJoin("w3emc", w3emc_ver)) -sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" -load(pathJoin("sfcio", sfcio_ver)) +-- Uncomment when CHGRES_ALL is ON +--sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" +--load(pathJoin("sfcio", sfcio_ver)) sigio_ver=os.getenv("sigio_ver") or "2.3.2" load(pathJoin("sigio", sigio_ver)) diff --git a/modulefiles/build.jet.intel.lua b/modulefiles/build.jet.intel.lua index ce9e9bdaa..1626b2ca2 100644 --- a/modulefiles/build.jet.intel.lua +++ b/modulefiles/build.jet.intel.lua @@ -34,8 +34,9 @@ load(pathJoin("sp", sp_ver)) w3emc_ver=os.getenv("w3emc_ver") or "2.10.0" load(pathJoin("w3emc", w3emc_ver)) -sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" -load(pathJoin("sfcio", sfcio_ver)) +-- Uncomment when CHGRES_ALL is ON. +--sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" +--load(pathJoin("sfcio", sfcio_ver)) sigio_ver=os.getenv("sigio_ver") or "2.3.2" load(pathJoin("sigio", sigio_ver)) diff --git a/modulefiles/build.noaacloud.intel.lua b/modulefiles/build.noaacloud.intel.lua index 296c0b9b5..a10eeda27 100644 --- a/modulefiles/build.noaacloud.intel.lua +++ b/modulefiles/build.noaacloud.intel.lua @@ -29,8 +29,9 @@ load(pathJoin("sp", sp_ver)) w3emc_ver=os.getenv("w3emc_ver") or "2.9.2" load(pathJoin("w3emc", w3emc_ver)) -sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" -load(pathJoin("sfcio", sfcio_ver)) +-- Uncomment when CHGRES_ALL is ON +--sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" +--load(pathJoin("sfcio", sfcio_ver)) sigio_ver=os.getenv("sigio_ver") or "2.3.2" load(pathJoin("sigio", sigio_ver)) diff --git a/modulefiles/build.orion.intel.lua b/modulefiles/build.orion.intel.lua index fa0455b2e..a59456328 100644 --- a/modulefiles/build.orion.intel.lua +++ b/modulefiles/build.orion.intel.lua @@ -31,8 +31,9 @@ load(pathJoin("sp", sp_ver)) w3emc_ver=os.getenv("w3emc_ver") or "2.10.0" load(pathJoin("w3emc", w3emc_ver)) -sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" -load(pathJoin("sfcio", sfcio_ver)) +-- Uncomment when CHGRES_ALL is ON +--sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" +--load(pathJoin("sfcio", sfcio_ver)) sigio_ver=os.getenv("sigio_ver") or "2.3.2" load(pathJoin("sigio", sigio_ver)) diff --git a/modulefiles/build.s4.intel.lua b/modulefiles/build.s4.intel.lua index a107f2398..e2ab40feb 100644 --- a/modulefiles/build.s4.intel.lua +++ b/modulefiles/build.s4.intel.lua @@ -31,8 +31,9 @@ load(pathJoin("sp", sp_ver)) w3emc_ver=os.getenv("w3emc_ver") or "2.10.0" load(pathJoin("w3emc", w3emc_ver)) -sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" -load(pathJoin("sfcio", sfcio_ver)) +-- Uncomment when CHGRES_ALL is ON +--sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" +--load(pathJoin("sfcio", sfcio_ver)) sigio_ver=os.getenv("sigio_ver") or "2.3.2" load(pathJoin("sigio", sigio_ver)) diff --git a/modulefiles/build.wcoss2.intel.lua b/modulefiles/build.wcoss2.intel.lua index 967e41b48..585107fc3 100644 --- a/modulefiles/build.wcoss2.intel.lua +++ b/modulefiles/build.wcoss2.intel.lua @@ -35,8 +35,9 @@ load(pathJoin("netcdf", netcdf_ver)) bacio_ver=os.getenv("bacio_ver") or "2.4.1" load(pathJoin("bacio", bacio_ver)) -sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" -load(pathJoin("sfcio", sfcio_ver)) +-- Uncomment when CHGRES_ALL is ON. +--sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" +--load(pathJoin("sfcio", sfcio_ver)) w3emc_ver=os.getenv("w3emc_ver") or "2.9.2" load(pathJoin("w3emc", w3emc_ver)) diff --git a/reg_tests/chgres_cube/driver.hera.sh b/reg_tests/chgres_cube/driver.hera.sh index 406e4bb22..cd817ad43 100755 --- a/reg_tests/chgres_cube/driver.hera.sh +++ b/reg_tests/chgres_cube/driver.hera.sh @@ -21,6 +21,10 @@ # determined by the "nccmp" utility. The baseline files are stored in # HOMEreg. # +# The nemsio and sigio/sfcio tests are not run by default because +# the standard build of chgres does not support these data types. +# To run the full set of tests, set $XTRA_TESTS to 'TRUE'. +# #----------------------------------------------------------------------------- set -x @@ -38,6 +42,9 @@ export OUTDIR="${OUTDIR}/reg-tests/chgres-cube" PROJECT_CODE="${PROJECT_CODE:-fv3-cpu}" QUEUE="${QUEUE:-batch}" +# When TRUE, run the nemsio and sigio tests. +XTRA_TESTS="${XTRA_TESTS:-FALSE}" + #----------------------------------------------------------------------------- # Should not have to change anything below here. HOMEufs is the root # directory of your UFS_UTILS clone. HOMEreg contains the input data @@ -55,9 +62,7 @@ export HOMEufs=$PWD/../.. export HOMEreg=/scratch1/NCEPDEV/nems/role.ufsutils/ufs_utils/reg_tests/chgres_cube -LOG_FILE=consistency.log -SUM_FILE=summary.log -rm -f $LOG_FILE* $SUM_FILE +rm -f consistency.log* summary*log export OMP_STACKSIZE=1024M @@ -83,141 +88,162 @@ export OMP_NUM_THREADS=1 # should match cpus-per-task TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c192.fv3.history \ -o $LOG_FILE -e $LOG_FILE ./c192.fv3.history.sh) -#----------------------------------------------------------------------------- -# Initialize C96 using FV3 gaussian nemsio files. -#----------------------------------------------------------------------------- - -LOG_FILE=consistency.log03 -export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST3=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.nemsio \ - -o $LOG_FILE -e $LOG_FILE ./c96.fv3.nemsio.sh) - -#----------------------------------------------------------------------------- -# Initialize C96 using spectral GFS sigio/sfcio files. -#----------------------------------------------------------------------------- - -LOG_FILE=consistency.log04 -export OMP_NUM_THREADS=6 # should match cpus-per-task -TEST4=$(sbatch --parsable --ntasks-per-node=3 --cpus-per-task=6 --nodes=2 -t 0:25:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.sigio \ - -o $LOG_FILE -e $LOG_FILE ./c96.gfs.sigio.sh) - -#----------------------------------------------------------------------------- -# Initialize C96 using spectral GFS gaussian nemsio files. -#----------------------------------------------------------------------------- - -LOG_FILE=consistency.log05 -export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST5=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.nemsio \ - -o $LOG_FILE -e $LOG_FILE ./c96.gfs.nemsio.sh) - #----------------------------------------------------------------------------- # Initialize C96 using FV3 gaussian netcdf files. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log06 +LOG_FILE=consistency.log03 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST6=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf \ +TEST3=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf \ -o $LOG_FILE -e $LOG_FILE ./c96.fv3.netcdf.sh) #----------------------------------------------------------------------------- # Initialize global C192 using GFS GRIB2 files. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log07 +LOG_FILE=consistency.log04 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST7=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gfs.grib2 \ +TEST4=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gfs.grib2 \ -o $LOG_FILE -e $LOG_FILE ./c192.gfs.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 25-KM USING GFS GRIB2 files. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log08 +LOG_FILE=consistency.log05 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST8=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.grib2.conus \ +TEST5=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.grib2.conus \ -o $LOG_FILE -e $LOG_FILE ./25km.conus.gfs.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GFS PHYSICS. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log09 +LOG_FILE=consistency.log06 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST9=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.gfssdf.grib2.conus \ +TEST6=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.gfssdf.grib2.conus \ -o $LOG_FILE -e $LOG_FILE ./3km.conus.hrrr.gfssdf.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GSD PHYSICS AND SFC VARS FROM FILE. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log10 +LOG_FILE=consistency.log07 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST10=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.newsfc.grib2.conus \ +TEST7=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.newsfc.grib2.conus \ -o $LOG_FILE -e $LOG_FILE ./3km.conus.hrrr.newsfc.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 13-KM USING NAM GRIB2 file WITH GFS PHYSICS . #----------------------------------------------------------------------------- -LOG_FILE=consistency.log11 +LOG_FILE=consistency.log08 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST11=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.nam.grib2.conus \ +TEST8=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.nam.grib2.conus \ -o $LOG_FILE -e $LOG_FILE ./13km.conus.nam.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 13-KM USING RAP GRIB2 file WITH GSD PHYSICS . #----------------------------------------------------------------------------- -LOG_FILE=consistency.log12 +LOG_FILE=consistency.log09 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST12=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.rap.grib2.conus \ +TEST9=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.rap.grib2.conus \ -o $LOG_FILE -e $LOG_FILE ./13km.conus.rap.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 13-KM NA USING NCEI GFS GRIB2 file WITH GFS PHYSICS . #----------------------------------------------------------------------------- -LOG_FILE=consistency.log13 +LOG_FILE=consistency.log10 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST13=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.na.gfs.ncei.grib2.conus \ +TEST10=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.na.gfs.ncei.grib2.conus \ -o $LOG_FILE -e $LOG_FILE ./13km.na.gfs.ncei.grib2.sh) #----------------------------------------------------------------------------- # Initialize C96 WAM IC using FV3 gaussian netcdf files. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log14 +LOG_FILE=consistency.log11 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST14=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf2wam \ +TEST11=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf2wam \ -o $LOG_FILE -e $LOG_FILE ./c96.fv3.netcdf2wam.sh) #----------------------------------------------------------------------------- # Initialize CONUS 25-KM USING GFS PGRIB2+BGRIB2 files. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log15 +LOG_FILE=consistency.log12 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST15=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.pbgrib2.conus \ +TEST12=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.pbgrib2.conus \ -o $LOG_FILE -e $LOG_FILE ./25km.conus.gfs.pbgrib2.sh) #----------------------------------------------------------------------------- # Initialize global C96 using GEFS GRIB2 files. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log16 +LOG_FILE=consistency.log13 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST16=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c96.gefs.grib2 \ +TEST13=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c96.gefs.grib2 \ -o $LOG_FILE -e $LOG_FILE ./c96.gefs.grib2.sh) #----------------------------------------------------------------------------- # Create summary log. #----------------------------------------------------------------------------- + LOG_FILE=consistency.log sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J chgres_summary -o $LOG_FILE -e $LOG_FILE \ --open-mode=append -q $QUEUE -d\ - afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7:$TEST8:$TEST9:$TEST10:$TEST11:$TEST12:$TEST13:$TEST14:$TEST15:$TEST16 << EOF + afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7:$TEST8:$TEST9:$TEST10:$TEST11:$TEST12:$TEST13 << EOF +#!/bin/bash +grep -a '<<<' ${LOG_FILE}?? > summary.log +EOF + +#----------------------------------------------------------------------------- +# These extra tests may be run using the full build of chgres_cube. +#----------------------------------------------------------------------------- + +if [ "$XTRA_TESTS" = "TRUE" ]; then + +#----------------------------------------------------------------------------- +# Initialize C96 using FV3 gaussian nemsio files. +#----------------------------------------------------------------------------- + + LOG_FILE=consistency.xtra.log01 + export OMP_NUM_THREADS=1 # should match cpus-per-task + TEST1X=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.nemsio \ + -o $LOG_FILE -e $LOG_FILE ./c96.fv3.nemsio.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using spectral GFS sigio/sfcio files. +#----------------------------------------------------------------------------- + + LOG_FILE=consistency.xtra.log02 + export OMP_NUM_THREADS=6 # should match cpus-per-task + TEST2X=$(sbatch --parsable --ntasks-per-node=3 --cpus-per-task=6 --nodes=2 -t 0:25:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.sigio \ + -o $LOG_FILE -e $LOG_FILE ./c96.gfs.sigio.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using spectral GFS gaussian nemsio files. +#----------------------------------------------------------------------------- + + LOG_FILE=consistency.xtra.log03 + export OMP_NUM_THREADS=1 # should match cpus-per-task + TEST3X=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.nemsio \ + -o $LOG_FILE -e $LOG_FILE ./c96.gfs.nemsio.sh) + +#----------------------------------------------------------------------------- +# Create summary log. +#----------------------------------------------------------------------------- + + LOG_FILE=consistency.xtra.log + sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J chgres_summaryx -o $LOG_FILE -e $LOG_FILE \ + --open-mode=append -q $QUEUE -d\ + afterok:$TEST1X:$TEST2X:$TEST3X << EOF #!/bin/bash -grep -a '<<<' $LOG_FILE* > $SUM_FILE +grep -a '<<<' ${LOG_FILE}?? > summary.xtra.log EOF +fi + exit 0 diff --git a/reg_tests/chgres_cube/driver.hercules.sh b/reg_tests/chgres_cube/driver.hercules.sh index d9a6a8358..2ef2800b3 100755 --- a/reg_tests/chgres_cube/driver.hercules.sh +++ b/reg_tests/chgres_cube/driver.hercules.sh @@ -15,9 +15,9 @@ # consistency tests will be submitted. To check the queue, type: # "squeue -u $LOGNAME". # -# The run output will be stored in OUTDIR. Log output from the suite -# will be in LOG_FILE. Once the suite has completed, a summary is -# placed in SUM_FILE. +# The run output will be stored in OUTDIR. Standard output from +# each test will be placed in its own log file. Once the suite +# has completed, a summary of results is placed in SUM_FILE. # # A test fails when its output does not match the baseline files as # determined by the "nccmp" utility. The baseline files are stored in @@ -90,130 +90,103 @@ TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:15:00 -A --open-mode=append -o $LOG_FILE2 -e $LOG_FILE2 ./c192.fv3.history.sh) #----------------------------------------------------------------------------- -# Initialize C96 using FV3 gaussian nemsio files. +# Initialize global C96 using FV3 gaussian netcdf files. #----------------------------------------------------------------------------- LOG_FILE3=${LOG_FILE}03 export OMP_NUM_THREADS=1 # needs to match cpus-per-task -TEST3=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.nemsio \ - --open-mode=append -o $LOG_FILE3 -e $LOG_FILE3 ./c96.fv3.nemsio.sh) +TEST3=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf \ + --open-mode=append -o $LOG_FILE3 -e $LOG_FILE3 ./c96.fv3.netcdf.sh) #----------------------------------------------------------------------------- -# Initialize C96 using spectral GFS sigio/sfcio files. +# Initialize global C192 using GFS GRIB2 files. #----------------------------------------------------------------------------- LOG_FILE4=${LOG_FILE}04 -export OMP_NUM_THREADS=6 # needs to match cpus-per-task -TEST4=$(sbatch --parsable --ntasks-per-node=3 --cpus-per-task=6 --nodes=2 --mem=75G -t 0:25:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.sigio \ - --open-mode=append -o $LOG_FILE4 -e $LOG_FILE4 ./c96.gfs.sigio.sh) +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST4=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gfs.grib2 \ + --open-mode=append -o $LOG_FILE4 -e $LOG_FILE4 ./c192.gfs.grib2.sh) #----------------------------------------------------------------------------- -# Initialize C96 using spectral GFS gaussian nemsio files. +# Initialize CONUS 25-KM USING GFS GRIB2 files. #----------------------------------------------------------------------------- LOG_FILE5=${LOG_FILE}05 -export OMP_NUM_THREADS=1 # needs to match cpus-per-task -TEST5=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.nemsio \ - --open-mode=append -o $LOG_FILE5 -e $LOG_FILE5 ./c96.gfs.nemsio.sh) +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST5=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.grib2 \ + --open-mode=append -o $LOG_FILE5 -e $LOG_FILE5 ./25km.conus.gfs.grib2.sh) #----------------------------------------------------------------------------- -# Initialize C96 using GEFS GRIB2 file. +# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GFS PHYSICS. #----------------------------------------------------------------------------- LOG_FILE6=${LOG_FILE}06 -export OMP_NUM_THREADS=1 # needs to match cpus-per-task -TEST6=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c96.gefs.grib2 \ - --open-mode=append -o $LOG_FILE6 -e $LOG_FILE6 ./c96.gefs.grib2.sh) +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST6=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.gfssdf.grib2 \ + --open-mode=append -o $LOG_FILE6 -e $LOG_FILE6 ./3km.conus.hrrr.gfssdf.grib2.sh) #----------------------------------------------------------------------------- -# Initialize global C192 using GFS GRIB2 files. +# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GSD PHYSICS AND SFC VARS FROM FILE. #----------------------------------------------------------------------------- LOG_FILE7=${LOG_FILE}07 -export OMP_NUM_THREADS=1 # needs to match cpus-per-task -TEST7=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gfs.grib2 \ - --open-mode=append -o $LOG_FILE7 -e $LOG_FILE7 ./c192.gfs.grib2.sh) +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST7=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.newsfc.grib2 \ + --open-mode=append -o $LOG_FILE7 -e $LOG_FILE7 ./3km.conus.hrrr.newsfc.grib2.sh) #----------------------------------------------------------------------------- -# Initialize global C96 using FV3 gaussian netcdf files. +# Initialize CONUS 13-KM USING NAM GRIB2 file WITH GFS PHYSICS . #----------------------------------------------------------------------------- LOG_FILE8=${LOG_FILE}08 -export OMP_NUM_THREADS=1 # needs to match cpus-per-task -TEST8=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf \ - --open-mode=append -o $LOG_FILE8 -e $LOG_FILE8 ./c96.fv3.netcdf.sh) +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST8=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.nam.grib2 \ + --open-mode=append -o $LOG_FILE8 -e $LOG_FILE8 ./13km.conus.nam.grib2.sh) #----------------------------------------------------------------------------- -# Initialize C96 WAM IC using FV3 gaussian netcdf files. +# Initialize CONUS 13-KM USING RAP GRIB2 file WITH GSD PHYSICS . #----------------------------------------------------------------------------- LOG_FILE9=${LOG_FILE}09 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST9=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=100G -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf2wam \ - --open-mode=append -o $LOG_FILE9 -e $LOG_FILE9 ./c96.fv3.netcdf2wam.sh) +TEST9=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.rap.grib2 \ + --open-mode=append -o $LOG_FILE9 -e $LOG_FILE9 ./13km.conus.rap.grib2.sh) #----------------------------------------------------------------------------- -# Initialize CONUS 25-KM USING GFS GRIB2 files. +# Initialize CONUS 13-KM NA USING NCEI GFS GRIB2 file WITH GFS PHYSICS . #----------------------------------------------------------------------------- LOG_FILE10=${LOG_FILE}10 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST10=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.grib2 \ - --open-mode=append -o $LOG_FILE10 -e $LOG_FILE10 ./25km.conus.gfs.grib2.sh) +TEST10=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.na.gfs.ncei.grib2 \ + --open-mode=append -o $LOG_FILE10 -e $LOG_FILE10 ./13km.na.gfs.ncei.grib2.sh) #----------------------------------------------------------------------------- -# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GFS PHYSICS. +# Initialize C96 WAM IC using FV3 gaussian netcdf files. #----------------------------------------------------------------------------- LOG_FILE11=${LOG_FILE}11 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST11=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.gfssdf.grib2 \ - --open-mode=append -o $LOG_FILE11 -e $LOG_FILE11 ./3km.conus.hrrr.gfssdf.grib2.sh) +TEST11=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=100G -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf2wam \ + --open-mode=append -o $LOG_FILE11 -e $LOG_FILE11 ./c96.fv3.netcdf2wam.sh) #----------------------------------------------------------------------------- -# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GSD PHYSICS AND SFC VARS FROM FILE. +# Initialize CONUS 25-KM USING GFS PGRIB2+BGRIB2 files. #----------------------------------------------------------------------------- LOG_FILE12=${LOG_FILE}12 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST12=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.newsfc.grib2 \ - --open-mode=append -o $LOG_FILE12 -e $LOG_FILE12 ./3km.conus.hrrr.newsfc.grib2.sh) +TEST12=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.pbgrib2 \ + --open-mode=append -o $LOG_FILE12 -e $LOG_FILE12 ./25km.conus.gfs.pbgrib2.sh) #----------------------------------------------------------------------------- -# Initialize CONUS 13-KM USING NAM GRIB2 file WITH GFS PHYSICS . +# Initialize C96 using GEFS GRIB2 file. #----------------------------------------------------------------------------- LOG_FILE13=${LOG_FILE}13 -export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST13=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.nam.grib2 \ - --open-mode=append -o $LOG_FILE13 -e $LOG_FILE13 ./13km.conus.nam.grib2.sh) - -#----------------------------------------------------------------------------- -# Initialize CONUS 13-KM USING RAP GRIB2 file WITH GSD PHYSICS . -#----------------------------------------------------------------------------- - -LOG_FILE14=${LOG_FILE}14 -export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST14=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.rap.grib2 \ - --open-mode=append -o $LOG_FILE14 -e $LOG_FILE14 ./13km.conus.rap.grib2.sh) - -#----------------------------------------------------------------------------- -# Initialize CONUS 13-KM NA USING NCEI GFS GRIB2 file WITH GFS PHYSICS . -#----------------------------------------------------------------------------- - -LOG_FILE15=${LOG_FILE}15 -export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST15=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.na.gfs.ncei.grib2 \ - --open-mode=append -o $LOG_FILE15 -e $LOG_FILE15 ./13km.na.gfs.ncei.grib2.sh) - -#----------------------------------------------------------------------------- -# Initialize CONUS 25-KM USING GFS PGRIB2+BGRIB2 files. -#----------------------------------------------------------------------------- - -LOG_FILE16=${LOG_FILE}16 -export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST16=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.pbgrib2 \ - --open-mode=append -o $LOG_FILE16 -e $LOG_FILE16 ./25km.conus.gfs.pbgrib2.sh) +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST13=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c96.gefs.grib2 \ + --open-mode=append -o $LOG_FILE13 -e $LOG_FILE13 ./c96.gefs.grib2.sh) #----------------------------------------------------------------------------- # Create summary log. @@ -221,7 +194,7 @@ TEST16=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 - sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J chgres_summary -o $LOG_FILE -e $LOG_FILE \ --open-mode=append -q $QUEUE \ - -d afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7:$TEST8:$TEST9:$TEST10:$TEST11:$TEST12:$TEST13:$TEST14:$TEST15:$TEST16 << EOF + -d afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7:$TEST8:$TEST9:$TEST10:$TEST11:$TEST12:$TEST13 << EOF #!/bin/bash grep -a '<<<' ${LOG_FILE}* > $SUM_FILE EOF diff --git a/reg_tests/chgres_cube/driver.jet.sh b/reg_tests/chgres_cube/driver.jet.sh index 2143f2b4d..6f5f0e923 100755 --- a/reg_tests/chgres_cube/driver.jet.sh +++ b/reg_tests/chgres_cube/driver.jet.sh @@ -7,15 +7,15 @@ # Set WORK_DIR to a general working location outside the UFS_UTILS directory. # The exact working directory (OUTDIR) will be WORK_DIR/reg_tests/chgres-cube. # Set the PROJECT_CODE and QUEUE as appropriate. To see which projects you -#are authorized to use, type "account_params". +# are authorized to use, type "account_params". # # Invoke the script with no arguments. A series of daisy- # chained jobs will be submitted. To check the queue, type: # "squeue -u USERNAME". # -# The run output will be stored in OUTDIR. Log output from the suite -# will be in LOG_FILE. Once the suite has completed, a summary is -# placed in SUM_FILE. +# The run output will be stored in OUTDIR. Standard output from +# each test will be placed in its own log file. Once the suite +# has completed, a summary of results is placed in SUM_FILE. # # A test fails when its output does not match the baseline files as # determined by the "nccmp" utility. The baseline files are stored in @@ -84,130 +84,103 @@ export OMP_NUM_THREADS=1 TEST2=$(sbatch --parsable --partition=xjet --nodes=2 --ntasks-per-node=6 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J c192.fv3.history \ --exclusive -o $LOG_FILE -e $LOG_FILE ./c192.fv3.history.sh) -#----------------------------------------------------------------------------- -# Initialize C96 using FV3 gaussian nemsio files. -#----------------------------------------------------------------------------- - -LOG_FILE=consistency.log03 -export OMP_NUM_THREADS=1 -TEST3=$(sbatch --parsable --partition=xjet --nodes=2 --ntasks-per-node=6 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.nemsio \ - --exclusive -o $LOG_FILE -e $LOG_FILE ./c96.fv3.nemsio.sh) - -#----------------------------------------------------------------------------- -# Initialize C96 using spectral GFS sigio/sfcio files. -#----------------------------------------------------------------------------- - -LOG_FILE=consistency.log04 -export OMP_NUM_THREADS=6 # should match cpus-per-task -TEST4=$(sbatch --parsable --partition=xjet --nodes=4 --ntasks-per-node=3 --cpus-per-task=6 -t 0:25:00 \ - --exclusive -A $PROJECT_CODE -q $QUEUE -J c96.gfs.sigio -o $LOG_FILE -e $LOG_FILE ./c96.gfs.sigio.sh) - -#----------------------------------------------------------------------------- -# Initialize C96 using spectral GFS gaussian nemsio files. -#----------------------------------------------------------------------------- - -LOG_FILE=consistency.log05 -export OMP_NUM_THREADS=1 -TEST5=$(sbatch --parsable --partition=xjet --nodes=1 --ntasks-per-node=6 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.nemsio \ - --exclusive -o $LOG_FILE -e $LOG_FILE ./c96.gfs.nemsio.sh) - #----------------------------------------------------------------------------- # Initialize C96 using FV3 gaussian netcdf files. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log06 +LOG_FILE=consistency.log03 export OMP_NUM_THREADS=1 -TEST6=$(sbatch --parsable --partition=xjet --nodes=2 --ntasks-per-node=6 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf \ +TEST3=$(sbatch --parsable --partition=xjet --nodes=2 --ntasks-per-node=6 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf \ --exclusive -o $LOG_FILE -e $LOG_FILE ./c96.fv3.netcdf.sh) #----------------------------------------------------------------------------- # Initialize C192 using GFS GRIB2 data. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log07 +LOG_FILE=consistency.log04 export OMP_NUM_THREADS=1 -TEST7=$(sbatch --parsable --partition=xjet --nodes=1 --ntasks-per-node=6 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gfs.grib2 \ +TEST4=$(sbatch --parsable --partition=xjet --nodes=1 --ntasks-per-node=6 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gfs.grib2 \ --exclusive -o $LOG_FILE -e $LOG_FILE ./c192.gfs.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 25-KM USING GFS GRIB2 files. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log08 +LOG_FILE=consistency.log05 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST8=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.grib2.conus \ +TEST5=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.grib2.conus \ --exclusive -o $LOG_FILE -e $LOG_FILE ./25km.conus.gfs.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GFS PHYSICS. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log09 +LOG_FILE=consistency.log06 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST9=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=2 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.gfssdf.grib2.conus \ +TEST6=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=2 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.gfssdf.grib2.conus \ --exclusive -o $LOG_FILE -e $LOG_FILE ./3km.conus.hrrr.gfssdf.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GSD PHYSICS AND SFC VARS FROM FILE. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log10 +LOG_FILE=consistency.log07 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST10=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=3 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.newsfc.grib2.conus \ +TEST7=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=3 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.newsfc.grib2.conus \ --exclusive -o $LOG_FILE -e $LOG_FILE ./3km.conus.hrrr.newsfc.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 13-KM USING NAM GRIB2 file WITH GFS PHYSICS . #----------------------------------------------------------------------------- -LOG_FILE=consistency.log11 +LOG_FILE=consistency.log08 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST11=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.nam.grib2.conus \ +TEST8=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.nam.grib2.conus \ --exclusive -o $LOG_FILE -e $LOG_FILE ./13km.conus.nam.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 13-KM USING RAP GRIB2 file WITH GSD PHYSICS . #----------------------------------------------------------------------------- -LOG_FILE=consistency.log12 +LOG_FILE=consistency.log09 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST12=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.rap.grib2.conus \ +TEST9=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.rap.grib2.conus \ --exclusive -o $LOG_FILE -e $LOG_FILE ./13km.conus.rap.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 13-KM NA USING NCEI GFS GRIB2 file WITH GFS PHYSICS . #----------------------------------------------------------------------------- -LOG_FILE=consistency.log13 +LOG_FILE=consistency.log10 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST13=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=2 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.na.gfs.ncei.grib2.conus \ +TEST10=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=2 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.na.gfs.ncei.grib2.conus \ --exclusive -o $LOG_FILE -e $LOG_FILE ./13km.na.gfs.ncei.grib2.sh) #----------------------------------------------------------------------------- # Initialize C96 WAM IC using FV3 gaussian netcdf files. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log14 +LOG_FILE=consistency.log11 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST14=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=2 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf2wam \ +TEST11=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=2 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf2wam \ --exclusive -o $LOG_FILE -e $LOG_FILE ./c96.fv3.netcdf2wam.sh) #----------------------------------------------------------------------------- # Initialize CONUS 25-KM USING GFS PGRIB2+BGRIB2 files. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log15 +LOG_FILE=consistency.log12 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST15=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.pbgrib2.conus \ +TEST12=$(sbatch --parsable --partition=xjet --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.pbgrib2.conus \ --exclusive -o $LOG_FILE -e $LOG_FILE ./25km.conus.gfs.pbgrib2.sh) #----------------------------------------------------------------------------- # Initialize C96 using GEFS GRIB2 data. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log16 +LOG_FILE=consistency.log13 export OMP_NUM_THREADS=1 -TEST16=$(sbatch --parsable --partition=xjet --nodes=1 --ntasks-per-node=6 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c96.gefs.grib2 \ +TEST13=$(sbatch --parsable --partition=xjet --nodes=1 --ntasks-per-node=6 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c96.gefs.grib2 \ --exclusive -o $LOG_FILE -e $LOG_FILE ./c96.gefs.grib2.sh) #----------------------------------------------------------------------------- @@ -217,7 +190,7 @@ TEST16=$(sbatch --parsable --partition=xjet --nodes=1 --ntasks-per-node=6 -t 0:0 LOG_FILE=consistency.log sbatch --partition=xjet --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J chgres_summary -o $LOG_FILE -e $LOG_FILE \ --open-mode=append -q $QUEUE -d\ - afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7:$TEST8:$TEST9:$TEST10:$TEST11:$TEST12:$TEST13:$TEST14:$TEST15:$TEST16 << EOF + afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7:$TEST8:$TEST9:$TEST10:$TEST11:$TEST12:$TEST13 << EOF #!/bin/bash grep -a '<<<' $LOG_FILE* > $SUM_FILE EOF diff --git a/reg_tests/chgres_cube/driver.orion.sh b/reg_tests/chgres_cube/driver.orion.sh index 15660b068..ae1ebf8ce 100755 --- a/reg_tests/chgres_cube/driver.orion.sh +++ b/reg_tests/chgres_cube/driver.orion.sh @@ -13,9 +13,9 @@ # consistency tests will be submitted. To check the queue, type: # "squeue -u $LOGNAME". # -# The run output will be stored in OUTDIR. Log output from the suite -# will be in LOG_FILE. Once the suite has completed, a summary is -# placed in SUM_FILE. +# The run output will be stored in OUTDIR. Standard output from +# each test will be placed in its own log file. Once the suite +# has completed, a summary of results is placed in SUM_FILE. # # A test fails when its output does not match the baseline files as # determined by the "nccmp" utility. The baseline files are stored in @@ -88,130 +88,103 @@ TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:15:00 -A --open-mode=append -o $LOG_FILE2 -e $LOG_FILE2 ./c192.fv3.history.sh) #----------------------------------------------------------------------------- -# Initialize C96 using FV3 gaussian nemsio files. +# Initialize global C96 using FV3 gaussian netcdf files. #----------------------------------------------------------------------------- LOG_FILE3=${LOG_FILE}03 export OMP_NUM_THREADS=1 # needs to match cpus-per-task -TEST3=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.nemsio \ - --open-mode=append -o $LOG_FILE3 -e $LOG_FILE3 ./c96.fv3.nemsio.sh) +TEST3=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf \ + --open-mode=append -o $LOG_FILE3 -e $LOG_FILE3 ./c96.fv3.netcdf.sh) #----------------------------------------------------------------------------- -# Initialize C96 using spectral GFS sigio/sfcio files. +# Initialize global C192 using GFS GRIB2 files. #----------------------------------------------------------------------------- LOG_FILE4=${LOG_FILE}04 -export OMP_NUM_THREADS=6 # needs to match cpus-per-task -TEST4=$(sbatch --parsable --ntasks-per-node=3 --cpus-per-task=6 --nodes=2 --mem=75G -t 0:25:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.sigio \ - --open-mode=append -o $LOG_FILE4 -e $LOG_FILE4 ./c96.gfs.sigio.sh) +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST4=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gfs.grib2 \ + --open-mode=append -o $LOG_FILE4 -e $LOG_FILE4 ./c192.gfs.grib2.sh) #----------------------------------------------------------------------------- -# Initialize C96 using spectral GFS gaussian nemsio files. +# Initialize CONUS 25-KM USING GFS GRIB2 files. #----------------------------------------------------------------------------- LOG_FILE5=${LOG_FILE}05 -export OMP_NUM_THREADS=1 # needs to match cpus-per-task -TEST5=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.nemsio \ - --open-mode=append -o $LOG_FILE5 -e $LOG_FILE5 ./c96.gfs.nemsio.sh) +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST5=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.grib2 \ + --open-mode=append -o $LOG_FILE5 -e $LOG_FILE5 ./25km.conus.gfs.grib2.sh) #----------------------------------------------------------------------------- -# Initialize C96 using GEFS GRIB2 file. +# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GFS PHYSICS. #----------------------------------------------------------------------------- LOG_FILE6=${LOG_FILE}06 -export OMP_NUM_THREADS=1 # needs to match cpus-per-task -TEST6=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c96.gefs.grib2 \ - --open-mode=append -o $LOG_FILE6 -e $LOG_FILE6 ./c96.gefs.grib2.sh) +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST6=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.gfssdf.grib2 \ + --open-mode=append -o $LOG_FILE6 -e $LOG_FILE6 ./3km.conus.hrrr.gfssdf.grib2.sh) #----------------------------------------------------------------------------- -# Initialize global C192 using GFS GRIB2 files. +# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GSD PHYSICS AND SFC VARS FROM FILE. #----------------------------------------------------------------------------- LOG_FILE7=${LOG_FILE}07 -export OMP_NUM_THREADS=1 # needs to match cpus-per-task -TEST7=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gfs.grib2 \ - --open-mode=append -o $LOG_FILE7 -e $LOG_FILE7 ./c192.gfs.grib2.sh) +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST7=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.newsfc.grib2 \ + --open-mode=append -o $LOG_FILE7 -e $LOG_FILE7 ./3km.conus.hrrr.newsfc.grib2.sh) #----------------------------------------------------------------------------- -# Initialize global C96 using FV3 gaussian netcdf files. +# Initialize CONUS 13-KM USING NAM GRIB2 file WITH GFS PHYSICS . #----------------------------------------------------------------------------- LOG_FILE8=${LOG_FILE}08 -export OMP_NUM_THREADS=1 # needs to match cpus-per-task -TEST8=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf \ - --open-mode=append -o $LOG_FILE8 -e $LOG_FILE8 ./c96.fv3.netcdf.sh) +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST8=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.nam.grib2 \ + --open-mode=append -o $LOG_FILE8 -e $LOG_FILE8 ./13km.conus.nam.grib2.sh) #----------------------------------------------------------------------------- -# Initialize C96 WAM IC using FV3 gaussian netcdf files. +# Initialize CONUS 13-KM USING RAP GRIB2 file WITH GSD PHYSICS . #----------------------------------------------------------------------------- LOG_FILE9=${LOG_FILE}09 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST9=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=100G -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf2wam \ - --open-mode=append -o $LOG_FILE9 -e $LOG_FILE9 ./c96.fv3.netcdf2wam.sh) +TEST9=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.rap.grib2 \ + --open-mode=append -o $LOG_FILE9 -e $LOG_FILE9 ./13km.conus.rap.grib2.sh) #----------------------------------------------------------------------------- -# Initialize CONUS 25-KM USING GFS GRIB2 files. +# Initialize CONUS 13-KM NA USING NCEI GFS GRIB2 file WITH GFS PHYSICS . #----------------------------------------------------------------------------- LOG_FILE10=${LOG_FILE}10 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST10=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.grib2 \ - --open-mode=append -o $LOG_FILE10 -e $LOG_FILE10 ./25km.conus.gfs.grib2.sh) +TEST10=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.na.gfs.ncei.grib2 \ + --open-mode=append -o $LOG_FILE10 -e $LOG_FILE10 ./13km.na.gfs.ncei.grib2.sh) #----------------------------------------------------------------------------- -# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GFS PHYSICS. +# Initialize C96 WAM IC using FV3 gaussian netcdf files. #----------------------------------------------------------------------------- LOG_FILE11=${LOG_FILE}11 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST11=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.gfssdf.grib2 \ - --open-mode=append -o $LOG_FILE11 -e $LOG_FILE11 ./3km.conus.hrrr.gfssdf.grib2.sh) +TEST11=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=100G -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf2wam \ + --open-mode=append -o $LOG_FILE11 -e $LOG_FILE11 ./c96.fv3.netcdf2wam.sh) #----------------------------------------------------------------------------- -# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GSD PHYSICS AND SFC VARS FROM FILE. +# Initialize CONUS 25-KM USING GFS PGRIB2+BGRIB2 files. #----------------------------------------------------------------------------- LOG_FILE12=${LOG_FILE}12 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST12=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.newsfc.grib2 \ - --open-mode=append -o $LOG_FILE12 -e $LOG_FILE12 ./3km.conus.hrrr.newsfc.grib2.sh) +TEST12=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.pbgrib2 \ + --open-mode=append -o $LOG_FILE12 -e $LOG_FILE12 ./25km.conus.gfs.pbgrib2.sh) #----------------------------------------------------------------------------- -# Initialize CONUS 13-KM USING NAM GRIB2 file WITH GFS PHYSICS . +# Initialize C96 using GEFS GRIB2 file. #----------------------------------------------------------------------------- LOG_FILE13=${LOG_FILE}13 -export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST13=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.nam.grib2 \ - --open-mode=append -o $LOG_FILE13 -e $LOG_FILE13 ./13km.conus.nam.grib2.sh) - -#----------------------------------------------------------------------------- -# Initialize CONUS 13-KM USING RAP GRIB2 file WITH GSD PHYSICS . -#----------------------------------------------------------------------------- - -LOG_FILE14=${LOG_FILE}14 -export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST14=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.rap.grib2 \ - --open-mode=append -o $LOG_FILE14 -e $LOG_FILE14 ./13km.conus.rap.grib2.sh) - -#----------------------------------------------------------------------------- -# Initialize CONUS 13-KM NA USING NCEI GFS GRIB2 file WITH GFS PHYSICS . -#----------------------------------------------------------------------------- - -LOG_FILE15=${LOG_FILE}15 -export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST15=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 13km.na.gfs.ncei.grib2 \ - --open-mode=append -o $LOG_FILE15 -e $LOG_FILE15 ./13km.na.gfs.ncei.grib2.sh) - -#----------------------------------------------------------------------------- -# Initialize CONUS 25-KM USING GFS PGRIB2+BGRIB2 files. -#----------------------------------------------------------------------------- - -LOG_FILE16=${LOG_FILE}16 -export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST16=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.pbgrib2 \ - --open-mode=append -o $LOG_FILE16 -e $LOG_FILE16 ./25km.conus.gfs.pbgrib2.sh) +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST13=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 --mem=75G -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c96.gefs.grib2 \ + --open-mode=append -o $LOG_FILE13 -e $LOG_FILE13 ./c96.gefs.grib2.sh) #----------------------------------------------------------------------------- # Create summary log. @@ -219,7 +192,7 @@ TEST16=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 --mem=75G -t 0:10:00 - sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J chgres_summary -o $LOG_FILE -e $LOG_FILE \ --open-mode=append -q $QUEUE \ - -d afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7:$TEST8:$TEST9:$TEST10:$TEST11:$TEST12:$TEST13:$TEST14:$TEST15:$TEST16 << EOF + -d afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7:$TEST8:$TEST9:$TEST10:$TEST11:$TEST12:$TEST13 << EOF #!/bin/bash grep -a '<<<' ${LOG_FILE}* > $SUM_FILE EOF diff --git a/reg_tests/chgres_cube/driver.s4.sh b/reg_tests/chgres_cube/driver.s4.sh index 4e6e60119..86a68b77b 100755 --- a/reg_tests/chgres_cube/driver.s4.sh +++ b/reg_tests/chgres_cube/driver.s4.sh @@ -84,139 +84,112 @@ export OMP_NUM_THREADS=1 # should match cpus-per-task TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c192.fv3.history \ -o $LOG_FILE -e $LOG_FILE ./c192.fv3.history.sh) -#----------------------------------------------------------------------------- -# Initialize C96 using FV3 gaussian nemsio files. -#----------------------------------------------------------------------------- - -LOG_FILE=consistency.log03 -export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST3=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.nemsio \ - -o $LOG_FILE -e $LOG_FILE ./c96.fv3.nemsio.sh) - -#----------------------------------------------------------------------------- -# Initialize C96 using spectral GFS sigio/sfcio files. -#----------------------------------------------------------------------------- - -LOG_FILE=consistency.log04 -export OMP_NUM_THREADS=6 # should match cpus-per-task -TEST4=$(sbatch --parsable --ntasks-per-node=3 --cpus-per-task=6 --nodes=2 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.sigio \ - -o $LOG_FILE -e $LOG_FILE ./c96.gfs.sigio.sh) - -#----------------------------------------------------------------------------- -# Initialize C96 using spectral GFS gaussian nemsio files. -#----------------------------------------------------------------------------- - -LOG_FILE=consistency.log05 -export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST5=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.nemsio \ - -o $LOG_FILE -e $LOG_FILE ./c96.gfs.nemsio.sh) - -#----------------------------------------------------------------------------- -# Initialize regional C96 using FV3 gaussian nemsio files. -#----------------------------------------------------------------------------- - -LOG_FILE=consistency.log06 -export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST6=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.regional \ - -o $LOG_FILE -e $LOG_FILE ./c96.regional.sh) - #----------------------------------------------------------------------------- # Initialize C96 using FV3 gaussian netcdf files. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log07 +LOG_FILE=consistency.log03 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST7=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf \ +TEST3=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf \ -o $LOG_FILE -e $LOG_FILE ./c96.fv3.netcdf.sh) #----------------------------------------------------------------------------- # Initialize global C192 using GFS GRIB2 files. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log08 +LOG_FILE=consistency.log04 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST8=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gfs.grib2 \ +TEST4=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gfs.grib2 \ -o $LOG_FILE -e $LOG_FILE ./c192.gfs.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 25-KM USING GFS GRIB2 files. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log09 +LOG_FILE=consistency.log05 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST9=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.grib2.conus \ +TEST5=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.grib2.conus \ -o $LOG_FILE -e $LOG_FILE ./25km.conus.gfs.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GFS PHYSICS. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log10 +LOG_FILE=consistency.log06 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST10=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.gfssdf.grib2.conus \ +TEST6=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.gfssdf.grib2.conus \ -o $LOG_FILE -e $LOG_FILE ./3km.conus.hrrr.gfssdf.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GSD PHYSICS AND SFC VARS FROM FILE. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log11 +LOG_FILE=consistency.log07 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST11=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.newsfc.grib2.conus \ +TEST7=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J 3km.conus.hrrr.newsfc.grib2.conus \ -o $LOG_FILE -e $LOG_FILE ./3km.conus.hrrr.newsfc.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 13-KM USING NAM GRIB2 file WITH GFS PHYSICS . #----------------------------------------------------------------------------- -LOG_FILE=consistency.log12 +LOG_FILE=consistency.log08 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST12=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.nam.grib2.conus \ +TEST8=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.nam.grib2.conus \ -o $LOG_FILE -e $LOG_FILE ./13km.conus.nam.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 13-KM USING RAP GRIB2 file WITH GSD PHYSICS . #----------------------------------------------------------------------------- -LOG_FILE=consistency.log13 +LOG_FILE=consistency.log09 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST13=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.rap.grib2.conus \ +TEST9=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.conus.rap.grib2.conus \ -o $LOG_FILE -e $LOG_FILE ./13km.conus.rap.grib2.sh) #----------------------------------------------------------------------------- # Initialize CONUS 13-KM NA USING NCEI GFS GRIB2 file WITH GFS PHYSICS . #----------------------------------------------------------------------------- -LOG_FILE=consistency.log14 +LOG_FILE=consistency.log10 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST14=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.na.gfs.ncei.grib2.conus \ +TEST10=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 13km.na.gfs.ncei.grib2.conus \ -o $LOG_FILE -e $LOG_FILE ./13km.na.gfs.ncei.grib2.sh) #----------------------------------------------------------------------------- # Initialize C96 WAM IC using FV3 gaussian netcdf files. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log15 +LOG_FILE=consistency.log11 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST15=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf2wam \ +TEST11=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf2wam \ -o $LOG_FILE -e $LOG_FILE ./c96.fv3.netcdf2wam.sh) #----------------------------------------------------------------------------- # Initialize CONUS 25-KM USING GFS PGRIB2+BGRIB2 files. #----------------------------------------------------------------------------- -LOG_FILE=consistency.log16 +LOG_FILE=consistency.log12 export OMP_NUM_THREADS=1 # should match cpus-per-task -TEST16=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.pbgrib2.conus \ +TEST12=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J 25km.conus.gfs.pbgrib2.conus \ -o $LOG_FILE -e $LOG_FILE ./25km.conus.gfs.pbgrib2.sh) +#----------------------------------------------------------------------------- +# Initialize C96 using GEFS GRIB2 file. +#----------------------------------------------------------------------------- + +LOG_FILE=consistency.log13 +export OMP_NUM_THREADS=1 # should match cpus-per-task +TEST13=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c96.gefs.grib2 \ + -o $LOG_FILE -e $LOG_FILE ./c96.gefs.grib2.sh) + #----------------------------------------------------------------------------- # Create summary log. #----------------------------------------------------------------------------- LOG_FILE=consistency.log sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J chgres_summary -o $LOG_FILE -e $LOG_FILE \ --open-mode=append -q $QUEUE -d\ - afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7:$TEST8:$TEST9:$TEST10:$TEST11:$TEST12:$TEST13:$TEST14:$TEST15:$TEST16 << EOF + afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7:$TEST8:$TEST9:$TEST10:$TEST11:$TEST12:$TEST13 << EOF #!/bin/bash grep -a '<<<' $LOG_FILE* > $SUM_FILE EOF diff --git a/reg_tests/chgres_cube/driver.wcoss2.sh b/reg_tests/chgres_cube/driver.wcoss2.sh index 2638b3f6d..4fd7f7750 100755 --- a/reg_tests/chgres_cube/driver.wcoss2.sh +++ b/reg_tests/chgres_cube/driver.wcoss2.sh @@ -85,131 +85,103 @@ TEST2=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime -N c192.fv3.history -l select=1:ncpus=6:ompthreads=1:mem=10GB $PWD/c192.fv3.history.sh) #----------------------------------------------------------------------------- -# Initialize C96 using FV3 gaussian nemsio files. +# Initialize C96 using FV3 gaussian netcdf files. #----------------------------------------------------------------------------- LOG_FILE=consistency.log03 -export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" +export APRUN="mpiexec -n 12 -ppn 12 --cpu-bind core" TEST3=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ - -N c96.fv3.nemsio -l select=1:ncpus=6:ompthreads=1:mem=45GB $PWD/c96.fv3.nemsio.sh) + -N c96.fv3.netcdf -l select=1:ncpus=12:ompthreads=1:mem=80GB $PWD/c96.fv3.netcdf.sh) #----------------------------------------------------------------------------- -# Initialize C96 using spectral GFS sigio/sfcio files. +# Initialize global C192 using GFS GRIB2 files. #----------------------------------------------------------------------------- LOG_FILE=consistency.log04 -export OMP_PLACES=cores -export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core --depth 4" -TEST4=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:10:00 \ - -N c96.gfs.sigio -l select=1:ncpus=24:ompthreads=4:mem=45GB $PWD/c96.gfs.sigio.sh) +export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" +TEST4=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N c192.gfs.grib2 -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/c192.gfs.grib2.sh) #----------------------------------------------------------------------------- -# Initialize C96 using spectral GFS gaussian nemsio files. +# Initialize CONUS 25-KM USING GFS GRIB2 files. #----------------------------------------------------------------------------- LOG_FILE=consistency.log05 export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" TEST5=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ - -N c96.gfs.nemsio -l select=1:ncpus=6:ompthreads=1:mem=35GB $PWD/c96.gfs.nemsio.sh) + -N 25km.conus.gfs.grib2.conus -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/25km.conus.gfs.grib2.sh) #----------------------------------------------------------------------------- -# Initialize global C96 using GEFS GRIB2 files. +# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GFS PHYSICS. #----------------------------------------------------------------------------- LOG_FILE=consistency.log06 export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" TEST6=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ - -N c96.gefs.grib2 -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/c96.gefs.grib2.sh) + -N 3km.conus.hrrr.gfssdf.grib2.conus -l select=1:ncpus=6:ompthreads=1:mem=75GB $PWD/3km.conus.hrrr.gfssdf.grib2.sh) #----------------------------------------------------------------------------- -# Initialize C96 using FV3 gaussian netcdf files. +# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GSD PHYSICS AND SFC VARS FROM FILE. #----------------------------------------------------------------------------- LOG_FILE=consistency.log07 export APRUN="mpiexec -n 12 -ppn 12 --cpu-bind core" TEST7=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ - -N c96.fv3.netcdf -l select=1:ncpus=12:ompthreads=1:mem=80GB $PWD/c96.fv3.netcdf.sh) + -N 3km.conus.hrrr.newsfc.grib2.conus -l select=1:ncpus=12:ompthreads=1:mem=75GB $PWD/3km.conus.hrrr.newsfc.grib2.sh) #----------------------------------------------------------------------------- -# Initialize global C192 using GFS GRIB2 files. +# Initialize CONUS 13-KM USING NAM GRIB2 file WITH GFS PHYSICS . #----------------------------------------------------------------------------- LOG_FILE=consistency.log08 export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" TEST8=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ - -N c192.gfs.grib2 -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/c192.gfs.grib2.sh) + -N 13km.conus.nam.grib2.conus -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/13km.conus.nam.grib2.sh) #----------------------------------------------------------------------------- -# Initialize CONUS 25-KM USING GFS GRIB2 files. +# Initialize CONUS 13-KM USING RAP GRIB2 file WITH GSD PHYSICS . #----------------------------------------------------------------------------- LOG_FILE=consistency.log09 export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" TEST9=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ - -N 25km.conus.gfs.grib2.conus -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/25km.conus.gfs.grib2.sh) + -N 13km.conus.rap.grib2.conus -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/13km.conus.rap.grib2.sh) #----------------------------------------------------------------------------- -# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GFS PHYSICS. +# Initialize CONUS 13-KM NA USING NCEI GFS GRIB2 file WITH GFS PHYSICS . #----------------------------------------------------------------------------- LOG_FILE=consistency.log10 export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" TEST10=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ - -N 3km.conus.hrrr.gfssdf.grib2.conus -l select=1:ncpus=6:ompthreads=1:mem=75GB $PWD/3km.conus.hrrr.gfssdf.grib2.sh) + -N 13km.na.gfs.ncei.grib2.conus -l select=1:ncpus=6:ompthreads=1:mem=25GB $PWD/13km.na.gfs.ncei.grib2.sh) #----------------------------------------------------------------------------- -# Initialize CONUS 3-KM USING HRRR GRIB2 file WITH GSD PHYSICS AND SFC VARS FROM FILE. +# Initialize C96 WAM IC using FV3 gaussian netcdf files. #----------------------------------------------------------------------------- LOG_FILE=consistency.log11 export APRUN="mpiexec -n 12 -ppn 12 --cpu-bind core" TEST11=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ - -N 3km.conus.hrrr.newsfc.grib2.conus -l select=1:ncpus=12:ompthreads=1:mem=75GB $PWD/3km.conus.hrrr.newsfc.grib2.sh) + -N c96.fv3.netcdf2wam -l select=1:ncpus=12:ompthreads=1:mem=75GB $PWD/c96.fv3.netcdf2wam.sh) #----------------------------------------------------------------------------- -# Initialize CONUS 13-KM USING NAM GRIB2 file WITH GFS PHYSICS . +# Initialize CONUS 25-KM USING GFS PGRIB2+BGRIB2 files. #----------------------------------------------------------------------------- LOG_FILE=consistency.log12 export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" TEST12=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ - -N 13km.conus.nam.grib2.conus -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/13km.conus.nam.grib2.sh) + -N 25km.conus.gfs.pbgrib2.conus -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/25km.conus.gfs.pbgrib2.sh) #----------------------------------------------------------------------------- -# Initialize CONUS 13-KM USING RAP GRIB2 file WITH GSD PHYSICS . +# Initialize global C96 using GEFS GRIB2 files. #----------------------------------------------------------------------------- LOG_FILE=consistency.log13 export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" TEST13=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ - -N 13km.conus.rap.grib2.conus -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/13km.conus.rap.grib2.sh) - -#----------------------------------------------------------------------------- -# Initialize CONUS 13-KM NA USING NCEI GFS GRIB2 file WITH GFS PHYSICS . -#----------------------------------------------------------------------------- - -LOG_FILE=consistency.log14 -export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" -TEST14=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ - -N 13km.na.gfs.ncei.grib2.conus -l select=1:ncpus=6:ompthreads=1:mem=25GB $PWD/13km.na.gfs.ncei.grib2.sh) - -#----------------------------------------------------------------------------- -# Initialize C96 WAM IC using FV3 gaussian netcdf files. -#----------------------------------------------------------------------------- - -LOG_FILE=consistency.log15 -export APRUN="mpiexec -n 12 -ppn 12 --cpu-bind core" -TEST15=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ - -N c96.fv3.netcdf2wam -l select=1:ncpus=12:ompthreads=1:mem=75GB $PWD/c96.fv3.netcdf2wam.sh) - -#----------------------------------------------------------------------------- -# Initialize CONUS 25-KM USING GFS PGRIB2+BGRIB2 files. -#----------------------------------------------------------------------------- - -LOG_FILE=consistency.log16 -export APRUN="mpiexec -n 6 -ppn 6 --cpu-bind core" -TEST16=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ - -N 25km.conus.gfs.pbgrib2.conus -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/25km.conus.gfs.pbgrib2.sh) + -N c96.gefs.grib2 -l select=1:ncpus=6:ompthreads=1:mem=15GB $PWD/c96.gefs.grib2.sh) #----------------------------------------------------------------------------- # Create summary log. @@ -218,7 +190,7 @@ TEST16=$(qsub -V -o $LOG_FILE -e $LOG_FILE -q $QUEUE -A $PROJECT_CODE -l walltim LOG_FILE=consistency.log qsub -V -o ${LOG_FILE} -e ${LOG_FILE} -q $QUEUE -A $PROJECT_CODE -l walltime=00:01:00 \ -N chgres_summary -l select=1:ncpus=1:mem=100MB \ - -W depend=afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7:$TEST8:$TEST9:$TEST10:$TEST11:$TEST12:$TEST13:$TEST14:$TEST15:$TEST16 << EOF + -W depend=afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7:$TEST8:$TEST9:$TEST10:$TEST11:$TEST12:$TEST13 << EOF #!/bin/bash cd ${this_dir} grep -a '<<<' ${LOG_FILE}?? | grep -v echo > $SUM_FILE diff --git a/sorc/chgres_cube.fd/CMakeLists.txt b/sorc/chgres_cube.fd/CMakeLists.txt index 3a1803569..94f0906bc 100644 --- a/sorc/chgres_cube.fd/CMakeLists.txt +++ b/sorc/chgres_cube.fd/CMakeLists.txt @@ -31,6 +31,10 @@ elseif(CMAKE_Fortran_COMPILER_ID MATCHES "^(GNU)$") set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -ffree-line-length-0 -fdefault-real-8") endif() +if(CHGRES_ALL) + set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -DCHGRES_ALL") +endif() + set(exe_name chgres_cube) add_library(chgres_cube_lib STATIC ${lib_src}) @@ -40,13 +44,19 @@ set(mod_dir "${CMAKE_CURRENT_BINARY_DIR}/mod") set_target_properties(chgres_cube_lib PROPERTIES Fortran_MODULE_DIRECTORY ${mod_dir}) target_include_directories(chgres_cube_lib INTERFACE ${mod_dir}) +if(CHGRES_ALL) + target_link_libraries( + chgres_cube_lib + PUBLIC + nemsio::nemsio + sfcio::sfcio + sigio::sigio) +endif() + target_link_libraries( chgres_cube_lib PUBLIC g2::g2_d - nemsio::nemsio - sfcio::sfcio - sigio::sigio bacio::bacio_4 ip::ip_d w3emc::w3emc_d diff --git a/sorc/chgres_cube.fd/atm_input_data.F90 b/sorc/chgres_cube.fd/atm_input_data.F90 index fd7d254c1..14b475529 100644 --- a/sorc/chgres_cube.fd/atm_input_data.F90 +++ b/sorc/chgres_cube.fd/atm_input_data.F90 @@ -15,7 +15,9 @@ module atm_input_data use esmf use netcdf +#ifdef CHGRES_ALL use nemsio_module +#endif use program_setup, only : data_dir_input_grid, & atm_files_input_grid, & @@ -108,6 +110,7 @@ subroutine read_input_atm_data(localpet) ! Read the gaussian history files in nemsio format. !------------------------------------------------------------------------------- +#ifdef CHGRES_ALL elseif (trim(input_type) == "gaussian_nemsio") then ! fv3gfs gaussian nemsio call read_input_atm_gaussian_nemsio_file(localpet) @@ -128,6 +131,7 @@ subroutine read_input_atm_data(localpet) call read_input_atm_gfs_sigio_file(localpet) +#endif !------------------------------------------------------------------------------- ! Read fv3gfs data in grib2 format. !------------------------------------------------------------------------------- @@ -253,6 +257,7 @@ subroutine init_atm_esmf_fields end subroutine init_atm_esmf_fields +#ifdef CHGRES_ALL !> Read input atmospheric data from spectral gfs (old sigio format). !! !! @note Format used prior to July 19, 2017. @@ -491,6 +496,9 @@ subroutine read_input_atm_gfs_sigio_file(localpet) endif end subroutine read_input_atm_gfs_sigio_file +#endif + +#ifdef CHGRES_ALL !> Read input atmospheric data from spectral gfs (global gaussian in !! nemsio format. Starting July 19, 2017). @@ -745,6 +753,9 @@ subroutine read_input_atm_gfs_gaussian_nemsio_file(localpet) deallocate(pi) end subroutine read_input_atm_gfs_gaussian_nemsio_file +#endif + +#ifdef CHGRES_ALL !> Read input grid atmospheric fv3 gaussian nemsio files. !! @@ -1024,6 +1035,7 @@ subroutine read_input_atm_gaussian_nemsio_file(localpet) call ESMF_FieldDestroy(dpres_input_grid, rc=rc) end subroutine read_input_atm_gaussian_nemsio_file +#endif !> Read input grid fv3 atmospheric data 'warm' restart files. !! diff --git a/sorc/chgres_cube.fd/model_grid.F90 b/sorc/chgres_cube.fd/model_grid.F90 index af3d2952f..74f4c341d 100644 --- a/sorc/chgres_cube.fd/model_grid.F90 +++ b/sorc/chgres_cube.fd/model_grid.F90 @@ -147,7 +147,9 @@ end subroutine define_input_grid !! @author George Gayno NCEP/EMC subroutine define_input_grid_gaussian(npets) +#ifdef CHGRES_ALL use nemsio_module +#endif use program_setup, only : data_dir_input_grid, & atm_files_input_grid, & @@ -155,8 +157,10 @@ subroutine define_input_grid_gaussian(npets) input_type, & convert_atm, convert_sfc +#ifdef CHGRES_ALL use sfcio_module use sigio_module +#endif use netcdf implicit none @@ -166,8 +170,10 @@ subroutine define_input_grid_gaussian(npets) character(len=250) :: the_file integer :: i, j, rc, clb(2), cub(2), ncid, id_grid +#ifdef CHGRES_ALL integer(sfcio_intkind) :: rc2 integer(sigio_intkind) :: rc3 +#endif real(esmf_kind_r8), allocatable :: latitude(:,:) real(esmf_kind_r8), allocatable :: longitude(:,:) @@ -178,10 +184,14 @@ subroutine define_input_grid_gaussian(npets) real(esmf_kind_r8) :: deltalon real(esmf_kind_r8), allocatable :: slat(:), wlat(:) +#ifdef CHGRES_ALL type(nemsio_gfile) :: gfile +#endif type(esmf_polekind_flag) :: polekindflag(2) +#ifdef CHGRES_ALL type(sfcio_head) :: sfchead type(sigio_head) :: sighead +#endif print*,"- DEFINE INPUT GRID OBJECT FOR GAUSSIAN DATA." @@ -193,8 +203,28 @@ subroutine define_input_grid_gaussian(npets) the_file=trim(data_dir_input_grid) // "/" // trim(atm_files_input_grid(1)) endif - if (trim(input_type) == "gfs_sigio") then ! sigio/sfcio format, used by - ! spectral gfs prior to 7/19/2017. + if (trim(input_type) == "gaussian_netcdf") then + + print*,'- OPEN AND READ: ',trim(the_file) + rc=nf90_open(trim(the_file),nf90_nowrite,ncid) + call netcdf_err(rc, 'opening file') + + print*,"- READ grid_xt" + rc=nf90_inq_dimid(ncid, 'grid_xt', id_grid) + call netcdf_err(rc, 'reading grid_xt id') + rc=nf90_inquire_dimension(ncid,id_grid,len=i_input) + call netcdf_err(rc, 'reading grid_xt') + + print*,"- READ grid_yt" + rc=nf90_inq_dimid(ncid, 'grid_yt', id_grid) + call netcdf_err(rc, 'reading grid_yt id') + rc=nf90_inquire_dimension(ncid,id_grid,len=j_input) + call netcdf_err(rc, 'reading grid_yt') + + rc = nf90_close(ncid) + +#ifdef CHGRES_ALL + elseif (trim(input_type) == "gfs_sigio") then ! sigio/sfcio format, used by if (convert_sfc) then ! sfcio format print*,"- OPEN AND READ ", trim(the_file) @@ -216,26 +246,6 @@ subroutine define_input_grid_gaussian(npets) j_input = sighead%latb endif - elseif (trim(input_type) == "gaussian_netcdf") then - - print*,'- OPEN AND READ: ',trim(the_file) - rc=nf90_open(trim(the_file),nf90_nowrite,ncid) - call netcdf_err(rc, 'opening file') - - print*,"- READ grid_xt" - rc=nf90_inq_dimid(ncid, 'grid_xt', id_grid) - call netcdf_err(rc, 'reading grid_xt id') - rc=nf90_inquire_dimension(ncid,id_grid,len=i_input) - call netcdf_err(rc, 'reading grid_xt') - - print*,"- READ grid_yt" - rc=nf90_inq_dimid(ncid, 'grid_yt', id_grid) - call netcdf_err(rc, 'reading grid_yt id') - rc=nf90_inquire_dimension(ncid,id_grid,len=j_input) - call netcdf_err(rc, 'reading grid_yt') - - rc = nf90_close(ncid) - else ! nemsio format call nemsio_init(iret=rc) @@ -248,6 +258,8 @@ subroutine define_input_grid_gaussian(npets) if (rc /= 0) call error_handler("READING FILE", rc) call nemsio_close(gfile) + +#endif endif diff --git a/sorc/chgres_cube.fd/nst_input_data.F90 b/sorc/chgres_cube.fd/nst_input_data.F90 index 121e14360..9decac2b2 100644 --- a/sorc/chgres_cube.fd/nst_input_data.F90 +++ b/sorc/chgres_cube.fd/nst_input_data.F90 @@ -14,7 +14,9 @@ module nst_input_data !! @author George Gayno NCEP/EMC use esmf use netcdf +#ifdef CHGRES_ALL use nemsio_module +#endif use program_setup, only : data_dir_input_grid, & sfc_files_input_grid, & @@ -205,6 +207,7 @@ subroutine read_input_nst_data(localpet) ! spectral GFS nemsio file. !-------------------------------------------------------------------------- +#ifdef CHGRES_ALL if (trim(input_type) == "gaussian_nemsio" .or. trim(input_type) == "gfs_gaussian_nemsio") then call read_input_nst_nemsio_file(localpet) @@ -219,6 +222,11 @@ subroutine read_input_nst_data(localpet) call read_input_nst_netcdf_file(localpet) endif +#else + + call read_input_nst_netcdf_file(localpet) + +#endif end subroutine read_input_nst_data @@ -502,6 +510,8 @@ subroutine read_input_nst_netcdf_file(localpet) end subroutine read_input_nst_netcdf_file +#ifdef CHGRES_ALL + !> Read input grid nst data from fv3 gaussian nemsio history file or !! spectral GFS nemsio file. !! @@ -775,6 +785,8 @@ subroutine read_input_nst_nemsio_file(localpet) if (localpet == 0) call nemsio_close(gfile) end subroutine read_input_nst_nemsio_file + +#endif !> Free up memory associated with nst data. !! diff --git a/sorc/chgres_cube.fd/program_setup.F90 b/sorc/chgres_cube.fd/program_setup.F90 index 895f71c28..9c3190e80 100644 --- a/sorc/chgres_cube.fd/program_setup.F90 +++ b/sorc/chgres_cube.fd/program_setup.F90 @@ -289,12 +289,14 @@ subroutine read_setup_namelist(filename) print*,'- INPUT DATA FROM FV3 TILED RESTART FILES.' case ("history") print*,'- INPUT DATA FROM FV3 TILED HISTORY FILES.' +#ifdef CHGRES_ALL case ("gaussian_nemsio") print*,'- INPUT DATA FROM FV3 GAUSSIAN NEMSIO FILE.' case ("gfs_gaussian_nemsio") print*,'- INPUT DATA FROM SPECTRAL GFS GAUSSIAN NEMSIO FILE.' case ("gfs_sigio") print*,'- INPUT DATA FROM SPECTRAL GFS SIGIO/SFCIO FILE.' +#endif() case ("gaussian_netcdf") print*,'- INPUT DATA FROM FV3 GAUSSIAN NETCDF FILE.' case ("grib2") diff --git a/sorc/chgres_cube.fd/sfc_input_data.F90 b/sorc/chgres_cube.fd/sfc_input_data.F90 index c5d5d7c0f..3f8e05633 100644 --- a/sorc/chgres_cube.fd/sfc_input_data.F90 +++ b/sorc/chgres_cube.fd/sfc_input_data.F90 @@ -14,7 +14,9 @@ module sfc_input_data !! @author George Gayno NCEP/EMC use esmf use netcdf +#ifdef CHGRES_ALL use nemsio_module +#endif use program_setup, only : data_dir_input_grid, & sfc_files_input_grid, & @@ -121,6 +123,7 @@ subroutine read_input_sfc_data(localpet) ! Read the gaussian history files in nemsio format. !------------------------------------------------------------------------------- +#ifdef CHGRES_ALL elseif (trim(input_type) == "gaussian_nemsio") then call read_input_sfc_gaussian_nemsio_file(localpet) @@ -141,6 +144,7 @@ subroutine read_input_sfc_data(localpet) call read_input_sfc_gfs_sfcio_file(localpet) +#endif !------------------------------------------------------------------------------- ! Read fv3gfs surface data in grib2 format. !------------------------------------------------------------------------------- @@ -153,6 +157,7 @@ subroutine read_input_sfc_data(localpet) end subroutine read_input_sfc_data +#ifdef CHGRES_ALL !> Read input grid surface data from a spectral gfs gaussian sfcio !! file. !! @@ -376,7 +381,9 @@ subroutine read_input_sfc_gfs_sfcio_file(localpet) call sfcio_sclose(23, iret) end subroutine read_input_sfc_gfs_sfcio_file +#endif +#ifdef CHGRES_ALL !> Read input grid surface data from a spectral gfs gaussian nemsio !! file. !! @@ -728,7 +735,9 @@ subroutine read_input_sfc_gfs_gaussian_nemsio_file(localpet) if (localpet == 0) call nemsio_close(gfile) end subroutine read_input_sfc_gfs_gaussian_nemsio_file +#endif +#ifdef CHGRES_ALL !> Read input grid surface data from an fv3 gaussian nemsio file. !! !! @param[in] localpet ESMF local persistent execution thread @@ -1077,6 +1086,7 @@ subroutine read_input_sfc_gaussian_nemsio_file(localpet) if (localpet == 0) call nemsio_close(gfile) end subroutine read_input_sfc_gaussian_nemsio_file +#endif !> Read input grid surface data from fv3 tiled warm 'restart' files. !! diff --git a/tests/chgres_cube/CMakeLists.txt b/tests/chgres_cube/CMakeLists.txt index 47c7d7e9e..a5bb4ffff 100644 --- a/tests/chgres_cube/CMakeLists.txt +++ b/tests/chgres_cube/CMakeLists.txt @@ -8,8 +8,10 @@ set(CHGRES_URL "https://ftp.emc.ncep.noaa.gov/static_files/public/UFS/ufs_utils/ set(V16SFC_FILE "gfs.v16.sfc.history.nc") set(V16SFC_FILE2 "gfs.v16.sfc2.history.nc") set(V16ATM_FILE "gfs.v16.atm.history.nc") -set(V14SFC_FILE "gfs.v14.sfc.history.nemsio") -set(V15SFC_FILE "gfs.v15.sfc.history.nemsio") +if(CHGRES_ALL) + set(V14SFC_FILE "gfs.v14.sfc.history.nemsio") + set(V15SFC_FILE "gfs.v15.sfc.history.nemsio") +endif() set(GFS_GRIB_FILE "gfs.t00z.pgrb2.0p50.f000") foreach(THE_FILE IN LISTS V16SFC_FILE V16SFC_FILE2 V16ATM_FILE V14SFC_FILE V15SFC_FILE GFS_GRIB_FILE) @@ -23,6 +25,9 @@ if(CMAKE_Fortran_COMPILER_ID MATCHES "^(Intel)$") elseif(CMAKE_Fortran_COMPILER_ID MATCHES "^(GNU)$") set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -ffree-line-length-0 -fdefault-real-8") endif() +if(CHGRES_ALL) + set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -DCHGRES_ALL") +endif() include_directories(${PROJECT_SOURCE_DIR}) @@ -32,14 +37,16 @@ execute_process( COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/data/config_fv3_tiled.nml ${CMAKE_CURRENT_BINARY_DIR}/data/config_fv3_tiled.nml) execute_process( COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/data/config_fv3_tiled_warm_restart.nml ${CMAKE_CURRENT_BINARY_DIR}/data/config_fv3_tiled_warm_restart.nml) -execute_process( COMMAND ${CMAKE_COMMAND} -E copy - ${CMAKE_CURRENT_SOURCE_DIR}/data/config_gaussian_nemsio.nml ${CMAKE_CURRENT_BINARY_DIR}/data/config_gaussian_nemsio.nml) +if(CHGRES_ALL) + execute_process( COMMAND ${CMAKE_COMMAND} -E copy + ${CMAKE_CURRENT_SOURCE_DIR}/data/config_gaussian_nemsio.nml ${CMAKE_CURRENT_BINARY_DIR}/data/config_gaussian_nemsio.nml) + execute_process( COMMAND ${CMAKE_COMMAND} -E copy + ${CMAKE_CURRENT_SOURCE_DIR}/data/config_spectral_sigio.nml ${CMAKE_CURRENT_BINARY_DIR}/data/config_spectral_sigio.nml) +endif() execute_process( COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/data/config_gfs_grib2.nml ${CMAKE_CURRENT_BINARY_DIR}/data/config_gfs_grib2.nml) execute_process( COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/data/global_hyblev.l28.txt ${CMAKE_CURRENT_BINARY_DIR}/data/global_hyblev.l28.txt) -execute_process( COMMAND ${CMAKE_COMMAND} -E copy - ${CMAKE_CURRENT_SOURCE_DIR}/data/config_spectral_sigio.nml ${CMAKE_CURRENT_BINARY_DIR}/data/config_spectral_sigio.nml) execute_process( COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/data/GFSphys_varmap.txt ${CMAKE_CURRENT_BINARY_DIR}/data/GFSphys_varmap.txt) execute_process( COMMAND ${CMAKE_COMMAND} -E copy @@ -48,7 +55,7 @@ execute_process( COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/data/msis2.1_test_ref_dp.txt ${CMAKE_CURRENT_BINARY_DIR}/data/msis2.1_test_ref_dp.txt) # This one does not end up in the data directory. execute_process( COMMAND ${CMAKE_COMMAND} -E copy - ${CMAKE_CURRENT_SOURCE_DIR}/data/config_gaussian_nemsio.nml ${CMAKE_CURRENT_BINARY_DIR}/fort.41) + ${CMAKE_CURRENT_SOURCE_DIR}/data/config_fv3_tiled.nml ${CMAKE_CURRENT_BINARY_DIR}/fort.41) execute_process( COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/LSanSuppress.supp ${CMAKE_CURRENT_BINARY_DIR}/LSanSuppress.supp) @@ -131,15 +138,6 @@ add_mpi_test(chgres_cube-ftst_convert_winds NUMPROCS 3 TIMEOUT 60) -add_executable(ftst_read_sfc_gfs_nemsio ftst_read_sfc_gfs_nemsio.F90) -target_link_libraries(ftst_read_sfc_gfs_nemsio chgres_cube_lib) - -# Cause test to be run with MPI. -add_mpi_test(chgres_cube-ftst_read_sfc_gfs_nemsio - EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/ftst_read_sfc_gfs_nemsio - NUMPROCS 1 - TIMEOUT 60) - ## Comment out this unit test until ESMF memory leaks are solved ## add_executable(ftst_surface_interp ftst_surface_interp.F90) ## target_link_libraries(ftst_surface_interp chgres_cube_lib) @@ -150,14 +148,25 @@ add_mpi_test(chgres_cube-ftst_read_sfc_gfs_nemsio ## NUMPROCS 1 ## TIMEOUT 60) -add_executable(ftst_read_sfc_nemsio ftst_read_sfc_nemsio.F90) -target_link_libraries(ftst_read_sfc_nemsio chgres_cube_lib) +if(CHGRES_ALL) + add_executable(ftst_read_sfc_gfs_nemsio ftst_read_sfc_gfs_nemsio.F90) + target_link_libraries(ftst_read_sfc_gfs_nemsio chgres_cube_lib) # Cause test to be run with MPI. -add_mpi_test(chgres_cube-ftst_read_sfc_nemsio - EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/ftst_read_sfc_nemsio - NUMPROCS 1 - TIMEOUT 60) + add_mpi_test(chgres_cube-ftst_read_sfc_gfs_nemsio + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/ftst_read_sfc_gfs_nemsio + NUMPROCS 1 + TIMEOUT 60) + + add_executable(ftst_read_sfc_nemsio ftst_read_sfc_nemsio.F90) + target_link_libraries(ftst_read_sfc_nemsio chgres_cube_lib) + +# Cause test to be run with MPI. + add_mpi_test(chgres_cube-ftst_read_sfc_nemsio + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/ftst_read_sfc_nemsio + NUMPROCS 1 + TIMEOUT 60) +endif() add_executable(ftst_read_sfc_netcdf ftst_read_sfc_netcdf.F90) target_link_libraries(ftst_read_sfc_netcdf chgres_cube_lib) @@ -177,14 +186,16 @@ add_mpi_test(chgres_cube-ftst_read_nst_netcdf NUMPROCS 1 TIMEOUT 60) -add_executable(ftst_read_nst_nemsio ftst_read_nst_nemsio.F90) -target_link_libraries(ftst_read_nst_nemsio chgres_cube_lib) +if(CHGRES_ALL) + add_executable(ftst_read_nst_nemsio ftst_read_nst_nemsio.F90) + target_link_libraries(ftst_read_nst_nemsio chgres_cube_lib) # Cause test to be run with MPI. -add_mpi_test(chgres_cube-ftst_read_nst_nemsio - EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/ftst_read_nst_nemsio - NUMPROCS 1 - TIMEOUT 60) + add_mpi_test(chgres_cube-ftst_read_nst_nemsio + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/ftst_read_nst_nemsio + NUMPROCS 1 + TIMEOUT 60) +endif() add_executable(ftst_read_atm_gaussian_netcdf ftst_read_atm_gaussian_netcdf.F90) target_link_libraries(ftst_read_atm_gaussian_netcdf chgres_cube_lib) diff --git a/tests/chgres_cube/ftst_program_setup.F90 b/tests/chgres_cube/ftst_program_setup.F90 index 45210e687..30359fcdc 100644 --- a/tests/chgres_cube/ftst_program_setup.F90 +++ b/tests/chgres_cube/ftst_program_setup.F90 @@ -19,30 +19,27 @@ program ftst_program_setup if (my_rank .eq. 0) print*, "Starting test of program_setup." if (my_rank .eq. 0) print*, "testing read_setup_namelist with file fort.41..." call read_setup_namelist() - if (cycle_mon .ne. 7 .or. cycle_day .ne. 4 .or. cycle_hour .ne. 12) stop 4 - if (.not. convert_atm .or. .not. convert_sfc .or. .not. convert_nst) stop 5 - if (regional .ne. 0 .or. halo_bndy .ne. 0 .or. halo_blend .ne. 0) stop 6 - if (trim(mosaic_file_target_grid) .ne. "/scratch1/NCEPDEV/da/George.Gayno/noscrub/reg_tests/chgres_cube/fix/C96/C96_mosaic.nc") stop 7 - if (trim(fix_dir_target_grid) .ne. "/scratch1/NCEPDEV/da/George.Gayno/noscrub/reg_tests/chgres_cube/fix/C96/fix_sfc") stop 8 - if (trim(orog_dir_target_grid) .ne. "/scratch1/NCEPDEV/da/George.Gayno/noscrub/reg_tests/chgres_cube/fix/C96/") stop 9 - if (trim(vcoord_file_target_grid) .ne. "/scratch1/NCEPDEV/da/George.Gayno/ufs_utils.git/UFS_UTILS/reg_tests/chgres_cube/../../fix/fix_am/global_hyblev.l64.txt") stop 10 - if (trim(data_dir_input_grid) .ne. "/scratch1/NCEPDEV/da/George.Gayno/noscrub/reg_tests/chgres_cube/input_data/fv3.nemsio") stop 11 - if (trim(atm_files_input_grid(1)) .ne. 'gfs.t12z.atmf000.nemsio') stop 12 - if (trim(sfc_files_input_grid(1)) .ne. 'gfs.t12z.sfcf000.nemsio') stop 13 - if (varmap_file .ne. "NULL") stop 14 - if (thomp_mp_climo_file .ne. "NULL") stop 16 - if (trim(cres_target_grid) .ne. "C96") stop 17 - if (atm_weight_file .ne. "NULL") stop 18 - if (trim(input_type) .ne. "gaussian_nemsio") stop 19 - if (trim(external_model) .ne. "GFS") stop 20 - if (num_tracers .ne. 7) stop 21 - if (tracers(1) .ne. "sphum" .or. tracers(2) .ne. "liq_wat" .or. tracers(3) .ne. "o3mr" .or. & - tracers(4) .ne. "ice_wat" .or. tracers(5) .ne. "rainwat" .or. tracers(6) .ne. "snowwat" .or. & - tracers(7) .ne. "graupel") stop 22 + if (cycle_mon .ne. 10 .or. cycle_day .ne. 3 .or. cycle_hour .ne. 0) stop 3 + if (.not. convert_atm .or. .not. convert_sfc .or. .not. convert_nst) stop 4 + if (regional .ne. 0 .or. halo_bndy .ne. 0 .or. halo_blend .ne. 0) stop 5 + if (trim(mosaic_file_target_grid) .ne. "/scratch1/NCEPDEV/da/George.Gayno/noscrub/reg_tests/chgres_cube/fix/C192/C192_mosaic.nc") stop 6 + if (trim(fix_dir_target_grid) .ne. "/scratch1/NCEPDEV/da/George.Gayno/noscrub/reg_tests/chgres_cube/fix/C192/fix_sfc") stop 7 + if (trim(orog_dir_target_grid) .ne. "/scratch1/NCEPDEV/da/George.Gayno/noscrub/reg_tests/chgres_cube/fix/C192/") stop 8 + if (trim(vcoord_file_target_grid) .ne. "/scratch1/NCEPDEV/da/George.Gayno/ufs_utils.git/UFS_UTILS/reg_tests/chgres_cube/../../fix/fix_am/global_hyblev.l64.txt") stop 9 + if (trim(data_dir_input_grid) .ne. "/scratch1/NCEPDEV/da/George.Gayno/noscrub/reg_tests/chgres_cube/input_data/fv3.history") stop 10 + if (trim(atm_files_input_grid(1)) .ne. 'dynf000.tile1.nc') stop 11 + if (trim(sfc_files_input_grid(1)) .ne. 'phyf000.tile1.nc') stop 12 + if (varmap_file .ne. "NULL") stop 13 + if (thomp_mp_climo_file .ne. "NULL") stop 14 + if (trim(cres_target_grid) .ne. "C192") stop 15 + if (atm_weight_file .ne. "NULL") stop 16 + if (trim(input_type) .ne. "history") stop 17 + if (trim(external_model) .ne. "GFS") stop 18 + if (num_tracers .ne. 3) stop 19 + if (tracers(1) .ne. "sphum" .or. tracers(2) .ne. "liq_wat" .or. tracers(3) .ne. "o3mr") stop 20 if (tracers_input(1) .ne. "spfh" .or. tracers_input(2) .ne. "clwmr" .or. & - tracers_input(3) .ne. "o3mr" .or. tracers_input(4) .ne. "icmr" .or. & - tracers_input(5) .ne. "rwmr" .or. tracers_input(6) .ne. "snmr" .or. & - tracers_input(7) .ne. "grle") stop 23 + tracers_input(3) .ne. "o3mr") stop 200 + ! Reset the tracers array. do is = 1, max_tracers @@ -140,6 +137,7 @@ program ftst_program_setup enddo print*, "OK" +#ifdef CHGRES_ALL if (my_rank .eq. 0) print*, "testing read_setup_namelist with config_gaussian_nemsio..." call read_setup_namelist("data/config_gaussian_nemsio.nml") if (cycle_mon .ne. 7 .or. cycle_day .ne. 4 .or. cycle_hour .ne. 12) stop 74 @@ -203,6 +201,7 @@ program ftst_program_setup tracers_input(is) = "NULL" enddo if (my_rank .eq. 0) print*, "OK" +#endif if (my_rank .eq. 0) print*, "testing read_setup_namelist with config_gfs_grib2..." call read_setup_namelist("data/config_gfs_grib2.nml") @@ -215,7 +214,6 @@ program ftst_program_setup if (trim(vcoord_file_target_grid) .ne. "/scratch1/NCEPDEV/da/George.Gayno/ufs_utils.git/UFS_UTILS/reg_tests/chgres_cube/../../fix/fix_am/global_hyblev.l65.txt") stop 100 if (trim(data_dir_input_grid) .ne. "/scratch1/NCEPDEV/da/George.Gayno/noscrub/reg_tests/chgres_cube/input_data/gfs.grib2") stop 101 if (trim(grib2_file_input_grid) .ne. 'gfs.t00z.pgrb2.0p50.f000') stop 102 - if (trim(sfc_files_input_grid(1)) .ne. 'gdas.t00z.sfcanl') stop 103 if (varmap_file .ne. "/scratch1/NCEPDEV/da/George.Gayno/ufs_utils.git/UFS_UTILS/reg_tests/chgres_cube/../../parm/varmap_tables/GFSphys_var_map.txt") stop 1010 if (thomp_mp_climo_file .ne. "NULL") stop 106 if (trim(cres_target_grid) .ne. "C192") stop 107 diff --git a/util/gdas_init/config b/util/gdas_init/config index 7a0a74571..01a98bbf7 100644 --- a/util/gdas_init/config +++ b/util/gdas_init/config @@ -23,9 +23,13 @@ # RUN_CHGRES - To run chgres, set to 'yes'. To extract # data only, set to 'no'. # yy/mm/dd/hh - The year/month/day/hour of your desired -# experiment. Currently, does not support -# pre-ENKF GFS data, prior to -# 2012 May 21 00z. Use two digits. +# experiment. Use a four digit year and +# two digits for mon/day/hour. NOTE: The +# standard build of chgres_cube does NOT +# support experiments prior to June 12, +# 2019. To coldstart an experiment +# prior to these dates, contact a repository +# manager for assistance. # LEVS - Number of hybrid levels plus 1. To # run with 64 levels, set LEVS to 65. # CRES_HIRES - Resolution of the hires component of @@ -48,20 +52,20 @@ #----------------------------------------------------------- EXTRACT_DIR=/lfs/h2/emc/stmp/$USER/gdas.init/input -EXTRACT_DATA=no +EXTRACT_DATA=yes RUN_CHGRES=yes -yy=2022 -mm=05 -dd=06 +yy=2020 +mm=01 +dd=01 hh=06 use_v16retro=no LEVS=65 -CDUMP=gdas +CDUMP=gfs CRES_HIRES=C192 CRES_ENKF=C96 @@ -88,14 +92,26 @@ else # No ENKF data prior to 2012/05/21/00z if [ $yy$mm$dd$hh -lt 2012052100 ]; then set +x - echo FATAL ERROR: SCRIPTS DO NOT SUPPORT OLD GFS DATA + echo "FATAL ERROR: SCRIPTS DO NOT SUPPORT OLD GFS DATA." exit 2 elif [ $yy$mm$dd$hh -lt 2016051000 ]; then - gfs_ver=v12 +# gfs_ver=v12 + set +x + echo "FATAL ERROR: V12 DATA NOT SUPPORTED." + echo "CONTACT A REPOSITORY MANAGER FOR ASSISTANCE." + exit 2 elif [ $yy$mm$dd$hh -lt 2017072000 ]; then - gfs_ver=v13 +# gfs_ver=v13 + set +x + echo "FATAL ERROR: V13 DATA NOT SUPPORTED." + echo "CONTACT A REPOSITORY MANAGER FOR ASSISTANCE." + exit 2 elif [ $yy$mm$dd$hh -lt 2019061200 ]; then - gfs_ver=v14 +# gfs_ver=v14 + set +x + echo "FATAL ERROR: V14 DATA NOT SUPPORTED." + echo "CONTACT A REPOSITORY MANAGER FOR ASSISTANCE." + exit 2 elif [ $yy$mm$dd$hh -lt 2021032100 ]; then gfs_ver=v15 # The way the v16 switch over was done, there is no complete @@ -104,7 +120,7 @@ else # tarballs were archived starting 2021032106. elif [ $yy$mm$dd$hh -lt 2021032106 ]; then set +x - echo FATAL ERROR: NO V15 OR V16 DATA FOR 2021032100 + echo "FATAL ERROR: NO V15 OR V16 DATA FOR 2021032100." exit 1 fi diff --git a/util/gdas_init/driver.hera.sh b/util/gdas_init/driver.hera.sh index 58a0adf80..9f2387835 100755 --- a/util/gdas_init/driver.hera.sh +++ b/util/gdas_init/driver.hera.sh @@ -55,13 +55,11 @@ if [ $EXTRACT_DATA == yes ]; then fi ;; v15) + DATAH=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_${CDUMP} \ + -o log.data.${CDUMP} -e log.data.${CDUMP} ./get_v15.data.sh ${CDUMP}) if [ "$CDUMP" = "gfs" ] ; then - DATAH=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_${CDUMP} \ - -o log.data.${CDUMP} -e log.data.${CDUMP} ./get_v15.data.sh ${CDUMP}) DEPEND="-d afterok:$DATAH" else - DATAH=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_${CDUMP} \ - -o log.data.${CDUMP} -e log.data.${CDUMP} ./get_v15.data.sh ${CDUMP}) DATA1=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_grp1 \ -o log.data.grp1 -e log.data.grp1 ./get_v15.data.sh grp1) DATA2=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_grp2 \ @@ -143,13 +141,8 @@ if [ $RUN_CHGRES == yes ]; then -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} run_v14.chgres.sh ${CDUMP} ;; v15) - if [ "$CDUMP" = "gdas" ]; then - sbatch --parsable --ntasks-per-node=6 --nodes=${NODES} -t $WALLT -A $PROJECT_CODE -q $QUEUE -J chgres_${CDUMP} \ - -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} run_v15.chgres.sh ${CDUMP} - else - sbatch --parsable --ntasks-per-node=6 --nodes=${NODES} -t $WALLT -A $PROJECT_CODE -q $QUEUE -J chgres_${CDUMP} \ - -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} run_v15.chgres.gfs.sh - fi + sbatch --parsable --ntasks-per-node=6 --nodes=${NODES} -t $WALLT -A $PROJECT_CODE -q $QUEUE -J chgres_${CDUMP} \ + -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} run_v15.chgres.sh ${CDUMP} ;; v16retro) if [ "$CDUMP" = "gdas" ] ; then diff --git a/util/gdas_init/driver.jet.sh b/util/gdas_init/driver.jet.sh index 7d587ba3e..fb240844f 100755 --- a/util/gdas_init/driver.jet.sh +++ b/util/gdas_init/driver.jet.sh @@ -57,13 +57,11 @@ if [ $EXTRACT_DATA == yes ]; then fi ;; v15) + DATAH=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_${CDUMP} \ + -o log.data.${CDUMP} -e log.data.${CDUMP} ./get_v15.data.sh ${CDUMP}) if [ "$CDUMP" = "gfs" ] ; then - DATAH=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_${CDUMP} \ - -o log.data.${CDUMP} -e log.data.${CDUMP} ./get_v15.data.sh ${CDUMP}) DEPEND="-d afterok:$DATAH" else - DATAH=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_${CDUMP} \ - -o log.data.${CDUMP} -e log.data.${CDUMP} ./get_v15.data.sh ${CDUMP}) DATA1=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_grp1 \ -o log.data.grp1 -e log.data.grp1 ./get_v15.data.sh grp1) DATA2=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_grp2 \ @@ -145,13 +143,8 @@ if [ $RUN_CHGRES == yes ]; then -o log.${CDUMP} -e log.${CDUMP} --partition=${PARTITION} ${DEPEND} run_v14.chgres.sh ${CDUMP} ;; v15) - if [ "$CDUMP" = "gdas" ]; then - sbatch --parsable --ntasks-per-node=6 --nodes=${NODES} -t $WALLT -A $PROJECT_CODE -q $QUEUE -J chgres_${CDUMP} \ - -o log.${CDUMP} -e log.${CDUMP} --partition=${PARTITION} ${DEPEND} run_v15.chgres.sh ${CDUMP} - else - sbatch --parsable --ntasks-per-node=6 --nodes=${NODES} -t $WALLT -A $PROJECT_CODE -q $QUEUE -J chgres_${CDUMP} \ - -o log.${CDUMP} -e log.${CDUMP} --partition=${PARTITION} ${DEPEND} run_v15.chgres.gfs.sh - fi + sbatch --parsable --ntasks-per-node=6 --nodes=${NODES} -t $WALLT -A $PROJECT_CODE -q $QUEUE -J chgres_${CDUMP} \ + -o log.${CDUMP} -e log.${CDUMP} --partition=${PARTITION} ${DEPEND} run_v15.chgres.sh ${CDUMP} ;; v16retro) if [ "$CDUMP" = "gdas" ] ; then diff --git a/util/gdas_init/driver.s4.sh b/util/gdas_init/driver.s4.sh index 34a015929..5a1d7b9ab 100755 --- a/util/gdas_init/driver.s4.sh +++ b/util/gdas_init/driver.s4.sh @@ -60,13 +60,8 @@ if [ $RUN_CHGRES == yes ]; then -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} run_v14.chgres.sh ${CDUMP} ;; v15) - if [ "$CDUMP" = "gdas" ]; then - sbatch --parsable --ntasks-per-node=6 --nodes=${NODES} -t $WALLT -A $PROJECT_CODE -q $QUEUE -J chgres_${CDUMP} \ - -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} run_v15.chgres.sh ${CDUMP} - else - sbatch --parsable --ntasks-per-node=6 --nodes=${NODES} -t $WALLT -A $PROJECT_CODE -q $QUEUE -J chgres_${CDUMP} \ - -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} run_v15.chgres.gfs.sh - fi + sbatch --parsable --ntasks-per-node=6 --nodes=${NODES} -t $WALLT -A $PROJECT_CODE -q $QUEUE -J chgres_${CDUMP} \ + -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} run_v15.chgres.sh ${CDUMP} ;; v16retro) if [ "$CDUMP" = "gdas" ] ; then diff --git a/util/gdas_init/driver.wcoss2.sh b/util/gdas_init/driver.wcoss2.sh index b765c8d78..a4d4d55c1 100755 --- a/util/gdas_init/driver.wcoss2.sh +++ b/util/gdas_init/driver.wcoss2.sh @@ -56,13 +56,11 @@ if [ $EXTRACT_DATA == yes ]; then fi ;; v15) + DATAH=$(qsub -V -o log.data.${CDUMP} -e log.data.${CDUMP} -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ + -N get_${CDUMP} -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v15.data.sh ${CDUMP}) if [ "$CDUMP" = "gfs" ] ; then - DATAH=$(qsub -V -o log.data.${CDUMP} -e log.data.${CDUMP} -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ - -N get_${CDUMP} -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v15.data.sh ${CDUMP}) DEPEND="-W depend=afterok:$DATAH" else - DATAH=$(qsub -V -o log.data.${CDUMP} -e log.data.${CDUMP} -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ - -N get_${CDUMP} -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v15.data.sh ${CDUMP}) DATA1=$(qsub -V -o log.data.grp1 -e log.data.grp1 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ -N get_grp1 -l select=1:ncpus=1:mem=$MEM -- ${this_dir}/get_v15.data.sh grp1) DATA2=$(qsub -V -o log.data.grp2 -e log.data.grp2 -q $QUEUE -A $PROJECT_CODE -l walltime=$WALLT \ @@ -157,13 +155,8 @@ if [ $RUN_CHGRES == yes ]; then -N chgres_${CDUMP} -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} -- ${this_dir}/run_v14.chgres.sh ${CDUMP} ;; v15) - if [ "$CDUMP" = "gdas" ]; then - qsub -V -l select=${NODES}:ncpus=${NCPUS}:ompthreads=1:mem=${MEM} -l walltime=$WALLT -A $PROJECT_CODE -q $QUEUE \ - -N chgres_${CDUMP} -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} -- ${this_dir}/run_v15.chgres.sh ${CDUMP} - else - qsub -V -l select=${NODES}:ncpus=${NCPUS}:ompthreads=1:mem=${MEM} -l walltime=$WALLT -A $PROJECT_CODE -q $QUEUE \ - -N chgres_${CDUMP} -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} ${this_dir}/run_v15.chgres.gfs.sh - fi + qsub -V -l select=${NODES}:ncpus=${NCPUS}:ompthreads=1:mem=${MEM} -l walltime=$WALLT -A $PROJECT_CODE -q $QUEUE \ + -N chgres_${CDUMP} -o log.${CDUMP} -e log.${CDUMP} ${DEPEND} -- ${this_dir}/run_v15.chgres.sh ${CDUMP} ;; v16retro) if [ "$CDUMP" = "gdas" ] ; then diff --git a/util/gdas_init/get_v15.data.sh b/util/gdas_init/get_v15.data.sh index 55c31f1da..1f5a5b7bd 100755 --- a/util/gdas_init/get_v15.data.sh +++ b/util/gdas_init/get_v15.data.sh @@ -21,33 +21,15 @@ dd_m6=$(echo $date10_m6 | cut -c7-8) hh_m6=$(echo $date10_m6 | cut -c9-10) #---------------------------------------------------------------------- -# Read the nemsio analysis files from the gfs bundle. -#---------------------------------------------------------------------- - -if [ $bundle = 'gfs' ]; then - - directory=/NCEPPROD/hpssprod/runhistory/rh${yy}/${yy}${mm}/${yy}${mm}${dd} - if [ $yy$mm$dd$hh -lt 2020022600 ]; then - file=gpfs_dell1_nco_ops_com_gfs_prod_gfs.${yy}${mm}${dd}_${hh}.gfs_nemsioa.tar - else - file=com_gfs_prod_gfs.${yy}${mm}${dd}_${hh}.gfs_nemsioa.tar - fi - - htar -xvf $directory/$file ./gfs.${yy}${mm}${dd}/${hh}/gfs.t${hh}z.atmanl.nemsio - rc=$? - [ $rc != 0 ] && exit $rc - - htar -xvf $directory/$file ./gfs.${yy}${mm}${dd}/${hh}/gfs.t${hh}z.sfcanl.nemsio - rc=$? - [ $rc != 0 ] && exit $rc - -#---------------------------------------------------------------------- -# For GDAS, use the tiled restart files. Need to use the 6-hour -# forecast files from the previous cycle as they are not saved -# at the current cycle. +# Because the use of nemsio data is being phased out from chgres_cube, +# use the GDAS tiled restart files (which are netcdf) for both +# the GDAS high-res and the GFS free forecast runs. +# +# Note: Need to use the 6-hour forecast files from the previous +# cycle as they are not saved at the current cycle. #---------------------------------------------------------------------- -elif [ $bundle = 'gdas' ]; then +if [ "$bundle" == "gdas" ] || [ "$bundle" == "gfs" ] ; then directory=/NCEPPROD/hpssprod/runhistory/rh${yy_m6}/${yy_m6}${mm_m6}/${yy_m6}${mm_m6}${dd_m6} if [ $date10_m6 -lt 2020022600 ]; then @@ -71,6 +53,8 @@ elif [ $bundle = 'gdas' ]; then rm -f ./list.hires* + [ "$bundle" == "gfs" ] && exit 0 + #---------------------------------------------------------------------- # Get the 'abias' and 'radstat' files from current cycle #---------------------------------------------------------------------- diff --git a/util/gdas_init/run_v15.chgres.gfs.sh b/util/gdas_init/run_v15.chgres.gfs.sh deleted file mode 100755 index b24920922..000000000 --- a/util/gdas_init/run_v15.chgres.gfs.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash - -#---------------------------------------------------------------- -# Run chgres using v15 nemsio data as input. This is used -# for initializing GFS free forecasts. -#---------------------------------------------------------------- - -set -x - -FIX_FV3=$UFS_DIR/fix -FIX_ORO=${FIX_FV3}/orog -FIX_AM=${FIX_FV3}/am - -WORKDIR=${WORKDIR:-$OUTDIR/work.gfs} - -CTAR=${CRES_HIRES} -INPUT_DATA_DIR="${EXTRACT_DIR}/gfs.${yy}${mm}${dd}/${hh}" -ATMFILE="gfs.t${hh}z.atmanl.nemsio" -SFCFILE="gfs.t${hh}z.sfcanl.nemsio" - -rm -fr $WORKDIR -mkdir -p $WORKDIR -cd $WORKDIR - -source $GDAS_INIT_DIR/set_fixed_files.sh - -cat << EOF > fort.41 - -&config - fix_dir_target_grid="${FIX_ORO}/${ORO_DIR}/sfc" - mosaic_file_target_grid="${FIX_ORO}/${ORO_DIR}/${CTAR}_mosaic.nc" - orog_dir_target_grid="${FIX_ORO}/${ORO_DIR}" - orog_files_target_grid="${ORO_NAME}.tile1.nc","${ORO_NAME}.tile2.nc","${ORO_NAME}.tile3.nc","${ORO_NAME}.tile4.nc","${ORO_NAME}.tile5.nc","${ORO_NAME}.tile6.nc" - data_dir_input_grid="${INPUT_DATA_DIR}" - atm_files_input_grid="$ATMFILE" - sfc_files_input_grid="$SFCFILE" - vcoord_file_target_grid="${FIX_AM}/global_hyblev.l${LEVS}.txt" - cycle_mon=$mm - cycle_day=$dd - cycle_hour=$hh - convert_atm=.true. - convert_sfc=.true. - convert_nst=.true. - input_type="gaussian_nemsio" - tracers="sphum","liq_wat","o3mr","ice_wat","rainwat","snowwat","graupel" - tracers_input="spfh","clwmr","o3mr","icmr","rwmr","snmr","grle" -/ -EOF - -$APRUN $EXEC_DIR/chgres_cube -rc=$? - -if [ $rc != 0 ]; then - exit $rc -fi - -$GDAS_INIT_DIR/copy_coldstart_files.sh gfs $OUTDIR $yy $mm $dd $hh $INPUT_DATA_DIR - -rm -fr $WORKDIR - -set +x -echo CHGRES COMPLETED FOR MEMBER gfs - -exit 0 diff --git a/util/gdas_init/run_v15.chgres.sh b/util/gdas_init/run_v15.chgres.sh index 585b60e18..47f02f073 100755 --- a/util/gdas_init/run_v15.chgres.sh +++ b/util/gdas_init/run_v15.chgres.sh @@ -22,7 +22,7 @@ YMDH=${yy}${mm}${dd}.${hh}0000 WORKDIR=${WORKDIR:-$OUTDIR/work.${MEMBER}} -if [ ${MEMBER} == 'gdas' ]; then +if [ "${MEMBER}" == "gdas" ] || [ "${MEMBER}" == "gfs" ] ; then CINP=${CINP:-"C768"} CTAR=${CRES_HIRES} INPUT_DATA_DIR="${EXTRACT_DIR}/gdas.${yy_d}${mm_d}${dd_d}/${hh_d}/RESTART" From d902d3959a9d57386d7790ded8cf81d48b5381e8 Mon Sep 17 00:00:00 2001 From: Wei Huang Date: Mon, 29 Apr 2024 08:29:57 -0600 Subject: [PATCH 17/25] Add and modify build module files for the cloud (#937) Supports compilation on AWS, Azure and Google CSP. Fixes #936. --- build_all.sh | 14 ++---- modulefiles/build.noaacloud.intel.lua | 62 ++++++--------------------- modulefiles/common4noaacloud.lua | 60 ++++++++++++++++++++++++++ 3 files changed, 76 insertions(+), 60 deletions(-) create mode 100644 modulefiles/common4noaacloud.lua diff --git a/build_all.sh b/build_all.sh index 3d4bb0805..eb57c7b2d 100755 --- a/build_all.sh +++ b/build_all.sh @@ -15,10 +15,11 @@ else readonly DIR_ROOT=$(cd "$(dirname "$(readlink -f -n "${BASH_SOURCE[0]}" )" )" && pwd -P) fi +source "${DIR_ROOT}/sorc/machine-setup.sh" + # User Options target=${target:-"NULL"} compiler=${compiler:-"intel"} -PW_CSP=${PW_CSP:-} # TODO: This is an implementation from EPIC and consistent with the UFS WM build system. if [[ "$target" == "linux.*" || "$target" == "macosx.*" ]]; then unset -f module @@ -27,17 +28,10 @@ if [[ "$target" == "linux.*" || "$target" == "macosx.*" ]]; then set -x else set +x - source "${DIR_ROOT}/sorc/machine-setup.sh" - if [[ "${target}" == "noaacloud" ]]; then - #TODO: This will need to be revisited once the EPIC supported-stacks come online. - #TODO: This is a hack due to how the spack-stack module files are generated; there may be a better way to do this. - source /contrib/global-workflow/spack-stack/envs/spack_2021.0.3.env - else - module use "${DIR_ROOT}/modulefiles" - fi + module use "${DIR_ROOT}/modulefiles" module load "build.$target.$compiler" > /dev/null module list - set -x + set -x fi # Ensure the submodules have been initialized. diff --git a/modulefiles/build.noaacloud.intel.lua b/modulefiles/build.noaacloud.intel.lua index a10eeda27..eb6b7a6e0 100644 --- a/modulefiles/build.noaacloud.intel.lua +++ b/modulefiles/build.noaacloud.intel.lua @@ -2,59 +2,21 @@ help([[ Load environment to compile UFS_UTILS on NOAA CSPs using Intel ]]) -cmake_ver=os.getenv("cmake_ver") or "3.16.1" -load(pathJoin("cmake", cmake_ver)) - -hpc_intel_ver=os.getenv("hpc_intel_ver") or "2021.3.0" -load(pathJoin("intel", hpc_intel_ver)) - -impi_ver=os.getenv("impi_ver") or "2021.3.0" -load(pathJoin("impi", impi_ver)) - -bacio_ver=os.getenv("bacio_ver") or "2.4.1" -load(pathJoin("bacio", bacio_ver)) - -g2_ver=os.getenv("g2_ver") or "3.4.5" -load(pathJoin("g2", g2_ver)) - -ip_ver=os.getenv("ip_ver") or "4.0.0" -load(pathJoin("ip", ip_ver)) - -nemsio_ver=os.getenv("nemsio_ver") or "2.5.4" -load(pathJoin("nemsio", nemsio_ver)) +prepend_path("MODULEPATH", "/contrib/spack-stack/spack-stack-1.6.0/envs/unified-env/install/modulefiles/Core") -sp_ver=os.getenv("sp_ver") or "2.5.0" -load(pathJoin("sp", sp_ver)) +stack_intel_ver=os.getenv("stack_intel_ver") or "2021.3.0" +load(pathJoin("stack-intel", stack_intel_ver)) -w3emc_ver=os.getenv("w3emc_ver") or "2.9.2" -load(pathJoin("w3emc", w3emc_ver)) +stack_impi_ver=os.getenv("stack_impi_ver") or "2021.3.0" +load(pathJoin("stack-intel-oneapi-mpi", stack_impi_ver)) --- Uncomment when CHGRES_ALL is ON ---sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" ---load(pathJoin("sfcio", sfcio_ver)) - -sigio_ver=os.getenv("sigio_ver") or "2.3.2" -load(pathJoin("sigio", sigio_ver)) - -zlib_ver=os.getenv("zlib_ver") or "1.2.11" -load(pathJoin("zlib", zlib_ver)) - -png_ver=os.getenv("png_ver") or "1.6.35" -load(pathJoin("libpng", png_ver)) - -hdf5_ver=os.getenv("hdf5_ver") or "1.10.6" -load(pathJoin("hdf5", hdf5_ver)) - -netcdf_ver=os.getenv("netcdf_ver") or "4.6.1" -load(pathJoin("netcdf", netcdf_ver)) - -nccmp_ver=os.getenv("nccmp_ver") or "1.8.9.0" -load(pathJoin("nccmp", nccmp_ver)) +cmake_ver=os.getenv("cmake_ver") or "3.23.1" +load(pathJoin("cmake", cmake_ver)) -esmf_ver=os.getenv("esmf_ver") or "8.4.0b08" -load(pathJoin("esmf", esmf_ver)) +load("common4noaacloud") -nco_ver=os.getenv("nco_ver") or "4.9.1" -load(pathJoin("nco", nco_ver)) +setenv("CC", "mpiicc") +setenv("CXX", "mpiicpc") +setenv("FC", "mpiifort") -whatis("Description: UFS_UTILS build environment") +whatis("Description: UFS_UTILS build environment on NOAA Cloud") diff --git a/modulefiles/common4noaacloud.lua b/modulefiles/common4noaacloud.lua new file mode 100644 index 000000000..a7028dd9d --- /dev/null +++ b/modulefiles/common4noaacloud.lua @@ -0,0 +1,60 @@ +help([[ +Load environment to compile UFS_UTILS on NOAA CSPs using Intel +]]) + +cmake_ver=os.getenv("cmake_ver") or "3.16.1" +load(pathJoin("cmake", cmake_ver)) + +hpc_intel_ver=os.getenv("hpc_intel_ver") or "2021.3.0" +load(pathJoin("intel", hpc_intel_ver)) + +impi_ver=os.getenv("impi_ver") or "2021.3.0" +load(pathJoin("impi", impi_ver)) + +bacio_ver=os.getenv("bacio_ver") or "2.4.1" +load(pathJoin("bacio", bacio_ver)) + +g2_ver=os.getenv("g2_ver") or "3.4.5" +load(pathJoin("g2", g2_ver)) + +ip_ver=os.getenv("ip_ver") or "4.3.0" +load(pathJoin("ip", ip_ver)) + +nemsio_ver=os.getenv("nemsio_ver") or "2.5.4" +load(pathJoin("nemsio", nemsio_ver)) + +sp_ver=os.getenv("sp_ver") or "2.5.0" +load(pathJoin("sp", sp_ver)) + +w3emc_ver=os.getenv("w3emc_ver") or "2.10.0" +load(pathJoin("w3emc", w3emc_ver)) + +-- Uncomment when CHGRES_ALL is ON +--sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" +--load(pathJoin("sfcio", sfcio_ver)) + +sigio_ver=os.getenv("sigio_ver") or "2.3.2" +load(pathJoin("sigio", sigio_ver)) + +zlib_ver=os.getenv("zlib_ver") or "1.2.13" +load(pathJoin("zlib", zlib_ver)) + +png_ver=os.getenv("png_ver") or "1.6.37" +load(pathJoin("libpng", png_ver)) + +hdf5_ver=os.getenv("hdf5_ver") or "1.10.6" +load(pathJoin("hdf5", hdf5_ver)) + +netcdf_ver=os.getenv("netcdf_ver") or "4.6.1" +load(pathJoin("netcdf", netcdf_ver)) + +nccmp_ver=os.getenv("nccmp_ver") or "1.9.0.1" +load(pathJoin("nccmp", nccmp_ver)) + +esmf_ver=os.getenv("esmf_ver") or "8.6.0" +load(pathJoin("esmf", esmf_ver)) + +nco_ver=os.getenv("nco_ver") or "4.9.1" +load(pathJoin("nco", nco_ver)) + +whatis("Description: UFS_UTILS build environment") From 65a386476c1832af567e4fd72e700d5d80ed37a2 Mon Sep 17 00:00:00 2001 From: DavidBurrows-NCO <82525974+DavidBurrows-NCO@users.noreply.github.com> Date: Wed, 1 May 2024 11:47:00 -0400 Subject: [PATCH 18/25] Build ufs_utils on Gaea-C5 (#935) Update/create the following: the build module files. the link_fixdirs.sh script, and the README.md. Fixes #934 --- README.md | 2 +- fix/link_fixdirs.sh | 10 +++-- modulefiles/build.gaea.intel | 22 ----------- modulefiles/build.gaea.intel.lua | 66 ++++++++++++++++++++++++++++++++ sorc/machine-setup.sh | 40 ++++--------------- 5 files changed, 80 insertions(+), 60 deletions(-) delete mode 100644 modulefiles/build.gaea.intel create mode 100644 modulefiles/build.gaea.intel.lua diff --git a/README.md b/README.md index fa857f1f8..ce4bbbe07 100644 --- a/README.md +++ b/README.md @@ -59,7 +59,7 @@ It also uses the following repositories: ## Installing -On Orion, Hercules, Jet, Hera and WCOSS2 do the following: +On Orion, Hercules, Jet, Hera, S4, Gaea and WCOSS2 do the following: 1) Set the 'fixed' directories using the `link_fixdirs.sh` script in `./fix`. Usage: `./link_fixdirs.sh $RUN_ENVIR $machine`, diff --git a/fix/link_fixdirs.sh b/fix/link_fixdirs.sh index 00c17a10b..3fb91a1b2 100755 --- a/fix/link_fixdirs.sh +++ b/fix/link_fixdirs.sh @@ -9,7 +9,7 @@ set -ex # 'nco' (copies data). # # $machine - is the machine. Choices are: -# 'wcoss2', 'hera', 'jet', 'orion', 'hercules', 's4' +# 'wcoss2', 'hera', 'jet', 'orion', 'hercules', 's4', 'gaea' RUN_ENVIR=${1} machine=${2} @@ -17,7 +17,7 @@ machine=${2} if [ $# -lt 2 ]; then set +x echo '***ERROR*** must specify two arguements: (1) RUN_ENVIR, (2) machine' - echo ' Syntax: link_fv3gfs.sh ( nco | emc ) ( wcoss2 | hera | jet | orion | hercules | s4 )' + echo ' Syntax: link_fv3gfs.sh ( nco | emc ) ( wcoss2 | hera | jet | orion | hercules | s4 | gaea )' exit 1 fi @@ -28,10 +28,10 @@ if [ $RUN_ENVIR != emc -a $RUN_ENVIR != nco ]; then exit 1 fi -if [ $machine != wcoss2 -a $machine != hera -a $machine != jet -a $machine != orion -a $machine != s4 -a $machine != hercules ]; then +if [ $machine != wcoss2 -a $machine != hera -a $machine != jet -a $machine != orion -a $machine != s4 -a $machine != hercules -a $machine != gaea ]; then set +x echo '***ERROR*** unsupported machine' - echo 'Syntax: link_fv3gfs.sh ( nco | emc ) ( wcoss2 | hera | jet | orion | hercules | s4 )' + echo 'Syntax: link_fv3gfs.sh ( nco | emc ) ( wcoss2 | hera | jet | orion | hercules | s4 | gaea )' exit 1 fi @@ -54,6 +54,8 @@ elif [ $machine = "wcoss2" ]; then FIX_DIR="/lfs/h2/emc/global/noscrub/emc.global/FIX/fix" elif [ $machine = "s4" ]; then FIX_DIR="/data/prod/glopara/fix" +elif [ $machine = "gaea" ]; then + FIX_DIR="/gpfs/f5/epic/proj-shared/global/glopara/data/fix" fi am_ver=${am_ver:-20220805} diff --git a/modulefiles/build.gaea.intel b/modulefiles/build.gaea.intel deleted file mode 100644 index d1e278e36..000000000 --- a/modulefiles/build.gaea.intel +++ /dev/null @@ -1,22 +0,0 @@ -#%Module##################################################### -## Build module for Gaea -############################################################# - -module switch intel/18.0.6.288 - -module load git/2.26.0 -module load cmake/3.17.0 - -# hpc-stack installed as a flat install at: -# /ncrc/home2/Rahul.Mahajan/dev/opt - -setenv ESMFMKFILE /ncrc/home2/Rahul.Mahajan/dev/opt/lib/esmf.mk - -prepend-path PATH /ncrc/home2/Rahul.Mahajan/dev/opt/bin -prepend-path CMAKE_PREFIX_PATH /ncrc/home2/Rahul.Mahajan/dev/opt -prepend-path LD_LIBRARY_PATH /ncrc/home2/Rahul.Mahajan/dev/opt/lib -prepend-path LD_LIBRARY_PATH /ncrc/home2/Rahul.Mahajan/dev/opt/lib64 - -setenv CMAKE_C_COMPILER cc -setenv CMAKE_CXX_COMPILER CC -setenv CMAKE_Fortran_COMPILER ftn diff --git a/modulefiles/build.gaea.intel.lua b/modulefiles/build.gaea.intel.lua new file mode 100644 index 000000000..2898c3a29 --- /dev/null +++ b/modulefiles/build.gaea.intel.lua @@ -0,0 +1,66 @@ +help([[ +Load environment to compile UFS_UTILS on Gaea using Intel +]]) + +prepend_path("MODULEPATH", "/sw/rdtn/modulefiles") +load("hsi") + +prepend_path("MODULEPATH", "/ncrc/proj/epic/spack-stack/spack-stack-1.6.0/envs/unified-env/install/modulefiles/Core") + +stack_intel_ver=os.getenv("stack_intel_ver") or "2023.1.0" +load(pathJoin("stack-intel", stack_intel_ver)) + +stack_cray_mpich_ver=os.getenv("stack_cray_mpich_ver") or "8.1.25" +load(pathJoin("stack-cray-mpich", stack_cray_mpich_ver)) + +cmake_ver=os.getenv("cmake_ver") or "3.23.1" +load(pathJoin("cmake", cmake_ver)) + +bacio_ver=os.getenv("bacio_ver") or "2.4.1" +load(pathJoin("bacio", bacio_ver)) + +g2_ver=os.getenv("g2_ver") or "3.4.5" +load(pathJoin("g2", g2_ver)) + +ip_ver=os.getenv("ip_ver") or "4.3.0" +load(pathJoin("ip", ip_ver)) + +nemsio_ver=os.getenv("nemsio_ver") or "2.5.4" +load(pathJoin("nemsio", nemsio_ver)) + +sp_ver=os.getenv("sp_ver") or "2.5.0" +load(pathJoin("sp", sp_ver)) + +w3emc_ver=os.getenv("w3emc_ver") or "2.10.0" +load(pathJoin("w3emc", w3emc_ver)) + +-- Uncomment when CHGRES_ALL is ON +--sfcio_ver=os.getenv("sfcio_ver") or "1.4.1" +--load(pathJoin("sfcio", sfcio_ver)) + +sigio_ver=os.getenv("sigio_ver") or "2.3.2" +load(pathJoin("sigio", sigio_ver)) + +zlib_ver=os.getenv("zlib_ver") or "1.2.13" +load(pathJoin("zlib", zlib_ver)) + +png_ver=os.getenv("png_ver") or "1.6.37" +load(pathJoin("libpng", png_ver)) + +netcdf_c_ver=os.getenv("netcdf_c_ver") or "4.9.2" +load(pathJoin("netcdf-c", netcdf_c_ver)) + +netcdf_fortran_ver=os.getenv("netcdf_fortran_ver") or "4.6.1" +load(pathJoin("netcdf-fortran", netcdf_fortran_ver)) + +nccmp_ver=os.getenv("nccmp_ver") or "1.9.0.1" +load(pathJoin("nccmp", nccmp_ver)) + +esmf_ver=os.getenv("esmf_ver") or "8.6.0" +load(pathJoin("esmf", esmf_ver)) + +nco_ver=os.getenv("nco_ver") or "5.0.6" +load(pathJoin("nco", nco_ver)) + +whatis("Description: UFS_UTILS build environment") + diff --git a/sorc/machine-setup.sh b/sorc/machine-setup.sh index 9d7267697..18388c66f 100644 --- a/sorc/machine-setup.sh +++ b/sorc/machine-setup.sh @@ -38,43 +38,17 @@ elif [[ -d /scratch1 ]] ; then fi target=hera module purge -elif [[ -d /lustre && -d /ncrc ]] ; then +elif [[ -d /gpfs && -d /ncrc ]] ; then # We are on GAEA. if ( ! eval module help > /dev/null 2>&1 ) ; then - # We cannot simply load the module command. The GAEA - # /etc/profile modifies a number of module-related variables - # before loading the module command. Without those variables, - # the module command fails. Hence we actually have to source - # /etc/profile here. - source /etc/profile - __ms_source_etc_profile=yes - else - __ms_source_etc_profile=no - fi - module purge > /dev/null 2>&1 - module purge -# clean up after purge - unset _LMFILES_ - unset _LMFILES_000 - unset _LMFILES_001 - unset LOADEDMODULES - module load modules - if [[ -d /opt/cray/ari/modulefiles ]] ; then - module use -a /opt/cray/ari/modulefiles - fi - if [[ -d /opt/cray/pe/ari/modulefiles ]] ; then - module use -a /opt/cray/pe/ari/modulefiles - fi - if [[ -d /opt/cray/pe/craype/default/modulefiles ]] ; then - module use -a /opt/cray/pe/craype/default/modulefiles - fi - if [[ -s /etc/opt/cray/pe/admin-pe/site-config ]] ; then - source /etc/opt/cray/pe/admin-pe/site-config - fi - if [[ "$__ms_source_etc_profile" == yes ]] ; then + # We cannot simply load the module command. The GAEA + # /etc/profile modifies a number of module-related variables + # before loading the module command. Without those variables, + # the module command fails. Hence we actually have to source + # /etc/profile here. source /etc/profile - unset __ms_source_etc_profile fi + module reset target=gaea elif [[ "$(hostname)" =~ "Orion" ]]; then target="orion" From e0cfc4ba4d9985134b7612bfa15936eda3ce12d3 Mon Sep 17 00:00:00 2001 From: yuanxue2870 <136842224+yuanxue2870@users.noreply.github.com> Date: Tue, 7 May 2024 15:04:30 -0400 Subject: [PATCH 19/25] global_cycle: Update soil adjustment to use JEDI data (#894) Add read of JEDI-based soil increments on the cubed-sphere grid. Add write of GSI-based soil increments on the cubed-sphere grid. Revise current soil ice adjustment routine based on synthetic DA experiments. Combine JEDI and GSI-based soil increment ingest and adjustment capability. New unit test - ftst_read_increments - to read JEDI-based soil increments on the cubed-sphere grid. New regression test - C192.jedi_lndincsoilnoahmp.sh - to ingest JEDI-based soil increments and apply adjustment. See #894 and #872 for full list changes. --- ...noahmp.sh => C192.gsi_lndincsoilnoahmp.sh} | 23 +- .../C192.jedi_lndincsoilnoahmp.sh | 88 +++++ reg_tests/global_cycle/C768.lndincsnow.sh | 3 +- reg_tests/global_cycle/driver.hera.sh | 12 +- reg_tests/global_cycle/driver.hercules.sh | 12 +- reg_tests/global_cycle/driver.jet.sh | 12 +- reg_tests/global_cycle/driver.orion.sh | 12 +- reg_tests/global_cycle/driver.wcoss2.sh | 9 +- sorc/global_cycle.fd/cycle.f90 | 174 ++++++--- sorc/global_cycle.fd/land_increments.f90 | 334 +++++++++++------- sorc/global_cycle.fd/read_write_data.f90 | 109 +++++- .../noah.fd/set_soilveg_snippet.f90 | 59 +++- tests/global_cycle/CMakeLists.txt | 21 +- tests/global_cycle/ftst_land_increments.F90 | 78 ++-- tests/global_cycle/ftst_read_increments.F90 | 168 +++++++++ ush/global_cycle.sh | 17 +- ush/global_cycle_driver.sh | 17 +- 17 files changed, 894 insertions(+), 254 deletions(-) rename reg_tests/global_cycle/{C192.lndincsoilnoahmp.sh => C192.gsi_lndincsoilnoahmp.sh} (73%) create mode 100755 reg_tests/global_cycle/C192.jedi_lndincsoilnoahmp.sh create mode 100644 tests/global_cycle/ftst_read_increments.F90 diff --git a/reg_tests/global_cycle/C192.lndincsoilnoahmp.sh b/reg_tests/global_cycle/C192.gsi_lndincsoilnoahmp.sh similarity index 73% rename from reg_tests/global_cycle/C192.lndincsoilnoahmp.sh rename to reg_tests/global_cycle/C192.gsi_lndincsoilnoahmp.sh index 58b25d26f..bd16365a3 100755 --- a/reg_tests/global_cycle/C192.lndincsoilnoahmp.sh +++ b/reg_tests/global_cycle/C192.gsi_lndincsoilnoahmp.sh @@ -29,8 +29,6 @@ export OCNRES=99999 export COMIN=$HOMEreg/input_data_noahmp -export LND_SOI_FILE=$COMIN/sfcincr_gsi - export JCAP=1534 export LONB=3072 export LATB=1536 @@ -40,6 +38,7 @@ export use_ufo=.true. export DO_SFCCYCLE=".FALSE." export DO_LNDINC=".TRUE." +export DO_SOI_INC_GSI=".true." export VERBOSE=YES export CYCLVARS=FSNOL=-2.,FSNOS=99999., @@ -49,7 +48,7 @@ $HOMEgfs/ush/global_cycle_driver.sh iret=$? if [ $iret -ne 0 ]; then set +x - echo "<<< C192 LANDINC SOIL NOAHMP CYCLE TEST FAILED. >>>" + echo "<<< C192 GSI based LANDINC SOIL NOAHMP CYCLE TEST FAILED. >>>" exit $iret fi @@ -60,7 +59,19 @@ for files in *tile*.nc do if [ -f $files ]; then echo CHECK $files - $NCCMP -dmfqS $files $HOMEreg/baseline_data/c192.lndincsoilnoahmp/$files + $NCCMP -dmfqS $files $HOMEreg/baseline_data/c192.gsi_lndincsoilnoahmp/$files + iret=$? + if [ $iret -ne 0 ]; then + test_failed=1 + fi + fi +done + +for files in *gaussian_interp* +do + if [ -f $files ]; then + echo CHECK $files + $NCCMP -dmfqS $files $HOMEreg/baseline_data/c192.gsi_lndincsoilnoahmp/$files iret=$? if [ $iret -ne 0 ]; then test_failed=1 @@ -72,7 +83,7 @@ set +x if [ $test_failed -ne 0 ]; then echo echo "**********************************************" - echo "<<< C192 LANDINC SOIL-NOAHMP CYCLE TEST FAILED. >>>" + echo "<<< C192 GSI based LANDINC SOIL-NOAHMP CYCLE TEST FAILED. >>>" echo "**********************************************" if [ "$UPDATE_BASELINE" = "TRUE" ]; then $HOMEgfs/reg_tests/update_baseline.sh $HOMEreg "c192.lndincsoilnoahmp" $commit_num @@ -80,7 +91,7 @@ if [ $test_failed -ne 0 ]; then else echo echo "*****************************************" - echo "<<< C192 LANDINC SOIL-NOAHMP CYCLE TEST PASSED. >>>" + echo "<<< C192 GSI based LANDINC SOIL-NOAHMP CYCLE TEST PASSED. >>>" echo "*****************************************" fi diff --git a/reg_tests/global_cycle/C192.jedi_lndincsoilnoahmp.sh b/reg_tests/global_cycle/C192.jedi_lndincsoilnoahmp.sh new file mode 100755 index 000000000..ebbc868e7 --- /dev/null +++ b/reg_tests/global_cycle/C192.jedi_lndincsoilnoahmp.sh @@ -0,0 +1,88 @@ +#!/bin/bash + +#------------------------------------------------------------------ +# Run global_cycle for a C192 case to test the ingest and +# application of soil moisture and temperature increments +# on the cubed-sphere grid into Noah-MP restarts, which +# yields (almost) identical results as compared with the GSI case +# given the same day of increments on two different grids. +# Compare output to a baseline set of files using the 'nccmp' +# utility. +#------------------------------------------------------------------ + +set -x + +NCCMP=${NCCMP:-$(which nccmp)} + +export MAX_TASKS_CY=6 + +export HOMEgfs=$NWPROD + +export FIXgfs=$HOMEreg/fix + +export CYCLEXEC=$HOMEgfs/exec/global_cycle + +export CDATE=2019073000 +export FHOUR=00 +export DELTSFC=6 + +export CASE=C192 +export OCNRES=99999 + +export COMIN=$HOMEreg/input_data_noahmp + +export JCAP=1534 +export LONB=3072 +export LATB=1536 + +export DONST="NO" +export use_ufo=.true. + +export DO_SFCCYCLE=".FALSE." +export DO_LNDINC=".TRUE." +export DO_SOI_INC_JEDI=".true." + +export VERBOSE=YES +export CYCLVARS=FSNOL=-2.,FSNOS=99999., + +$HOMEgfs/ush/global_cycle_driver.sh + +iret=$? +if [ $iret -ne 0 ]; then + set +x + echo "<<< C192 JEDI based LANDINC SOIL NOAHMP CYCLE TEST FAILED. >>>" + exit $iret +fi + +test_failed=0 + +cd $DATA +for files in *tile*.nc +do + if [ -f $files ]; then + echo CHECK $files + $NCCMP -dmfqS $files $HOMEreg/baseline_data/c192.jedi_lndincsoilnoahmp/$files + iret=$? + if [ $iret -ne 0 ]; then + test_failed=1 + fi + fi +done + +set +x +if [ $test_failed -ne 0 ]; then + echo + echo "**********************************************" + echo "<<< C192 JEDI based LANDINC SOIL-NOAHMP CYCLE TEST FAILED. >>>" + echo "**********************************************" + if [ "$UPDATE_BASELINE" = "TRUE" ]; then + $HOMEgfs/reg_tests/update_baseline.sh $HOMEreg "c192.jedi_lndincsoilnoahmp" $commit_num + fi +else + echo + echo "*****************************************" + echo "<<< C192 JEDI based LANDINC SOIL-NOAHMP CYCLE TEST PASSED. >>>" + echo "*****************************************" +fi + +exit diff --git a/reg_tests/global_cycle/C768.lndincsnow.sh b/reg_tests/global_cycle/C768.lndincsnow.sh index b6455ebd7..a32239d5e 100755 --- a/reg_tests/global_cycle/C768.lndincsnow.sh +++ b/reg_tests/global_cycle/C768.lndincsnow.sh @@ -31,7 +31,8 @@ export FNSNOA=$COMIN/gdas.t00z.snogrb_t1534.3072.1536 export FNACNA=$COMIN/gdas.t00z.seaice.5min.blend.grb export NST_FILE=$COMIN/gdas.t00z.dtfanl.nc -export DO_SNO_INC=.true. # must be lower-case. +export DO_SNO_INC_JEDI=.true. # must be lower-case. +export DO_SOI_INC_JEDI=.false. export JCAP=1534 export LONB=3072 export LATB=1536 diff --git a/reg_tests/global_cycle/driver.hera.sh b/reg_tests/global_cycle/driver.hera.sh index db23cc6f1..b0f4f938c 100755 --- a/reg_tests/global_cycle/driver.hera.sh +++ b/reg_tests/global_cycle/driver.hera.sh @@ -64,8 +64,8 @@ TEST1=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_C LOG_FILE=consistency.log02 export DATA="${DATA_DIR}/test2" export COMOUT=$DATA -TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.lndincsoilnoahmp \ - -o $LOG_FILE -e $LOG_FILE ./C192.lndincsoilnoahmp.sh) +TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gsi_lndincsoilnoahmp \ + -o $LOG_FILE -e $LOG_FILE ./C192.gsi_lndincsoilnoahmp.sh) LOG_FILE=consistency.log03 export DATA="${DATA_DIR}/test3" @@ -79,10 +79,16 @@ export COMOUT=$DATA TEST4=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c48.noahmp.frac \ -o $LOG_FILE -e $LOG_FILE ./C48.noahmp.fracgrid.sh) +LOG_FILE=consistency.log05 +export DATA="${DATA_DIR}/test5" +export COMOUT=$DATA +TEST5=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.jedi_lndincsoilnoahmp \ + -o $LOG_FILE -e $LOG_FILE ./C192.jedi_lndincsoilnoahmp.sh) + LOG_FILE=consistency.log sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J chgres_summary -o $LOG_FILE -e $LOG_FILE \ --open-mode=append -q $QUEUE -d\ - afterok:$TEST1:$TEST2:$TEST3:$TEST4 << EOF + afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5 << EOF #!/bin/bash grep -a '<<<' ${LOG_FILE}* > summary.log EOF diff --git a/reg_tests/global_cycle/driver.hercules.sh b/reg_tests/global_cycle/driver.hercules.sh index b522ad1bc..db1cf8431 100755 --- a/reg_tests/global_cycle/driver.hercules.sh +++ b/reg_tests/global_cycle/driver.hercules.sh @@ -64,8 +64,8 @@ TEST1=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_C LOG_FILE=consistency.log02 export DATA="${DATA_DIR}/test2" export COMOUT=$DATA -TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.lndincsoilnoahmp \ - -o $LOG_FILE -e $LOG_FILE ./C192.lndincsoilnoahmp.sh) +TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gsi_lndincsoilnoahmp \ + -o $LOG_FILE -e $LOG_FILE ./C192.gsi_lndincsoilnoahmp.sh) LOG_FILE=consistency.log03 export DATA="${DATA_DIR}/test3" @@ -79,10 +79,16 @@ export COMOUT=$DATA TEST4=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c48.noahmp.frac \ -o $LOG_FILE -e $LOG_FILE ./C48.noahmp.fracgrid.sh) +LOG_FILE=consistency.log05 +export DATA="${DATA_DIR}/test5" +export COMOUT=$DATA +TEST5=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.jedi_lndincsoilnoahmp \ + -o $LOG_FILE -e $LOG_FILE ./C192.jedi_lndincsoilnoahmp.sh) + LOG_FILE=consistency.log sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J chgres_summary -o $LOG_FILE -e $LOG_FILE \ --open-mode=append -q $QUEUE -d\ - afterok:$TEST1:$TEST2:$TEST3:$TEST4 << EOF + afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5 << EOF #!/bin/bash grep -a '<<<' ${LOG_FILE}* > summary.log EOF diff --git a/reg_tests/global_cycle/driver.jet.sh b/reg_tests/global_cycle/driver.jet.sh index d680dc023..e88d8b21b 100755 --- a/reg_tests/global_cycle/driver.jet.sh +++ b/reg_tests/global_cycle/driver.jet.sh @@ -62,8 +62,8 @@ TEST1=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_C LOG_FILE=consistency.log02 export DATA="${DATA_DIR}/test2" export COMOUT=$DATA -TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.lndincsoilnoahmp \ - --partition=xjet -o $LOG_FILE -e $LOG_FILE ./C192.lndincsoilnoahmp.sh) +TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gsi_lndincsoilnoahmp \ + --partition=xjet -o $LOG_FILE -e $LOG_FILE ./C192.gsi_lndincsoilnoahmp.sh) LOG_FILE=consistency.log03 export DATA="${DATA_DIR}/test3" @@ -77,10 +77,16 @@ export COMOUT=$DATA TEST4=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c48.noahmp.frac \ --partition=xjet -o $LOG_FILE -e $LOG_FILE ./C48.noahmp.fracgrid.sh) +LOG_FILE=consistency.log05 +export DATA="${DATA_DIR}/test5" +export COMOUT=$DATA +TEST5=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.jedi_lndincsoilnoahmp \ + -o $LOG_FILE -e $LOG_FILE ./C192.jedi_lndincsoilnoahmp.sh) + LOG_FILE=consistency.log sbatch --partition=xjet --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J summary -o $LOG_FILE -e $LOG_FILE \ --open-mode=append -q $QUEUE -d\ - afterok:$TEST1:$TEST2:$TEST3:$TEST4 << EOF + afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5 << EOF #!/bin/bash grep -a '<<<' ${LOG_FILE}* > ./summary.log EOF diff --git a/reg_tests/global_cycle/driver.orion.sh b/reg_tests/global_cycle/driver.orion.sh index d3f4f6415..472608dcf 100755 --- a/reg_tests/global_cycle/driver.orion.sh +++ b/reg_tests/global_cycle/driver.orion.sh @@ -62,8 +62,8 @@ TEST1=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_C LOG_FILE=consistency.log02 export DATA="${DATA_DIR}/test2" export COMOUT=$DATA -TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.lndincsoilnoahmp \ - -o $LOG_FILE -e $LOG_FILE ./C192.lndincsoilnoahmp.sh) +TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gsi_lndincsoilnoahmp \ + -o $LOG_FILE -e $LOG_FILE ./C192.gsi_lndincsoilnoahmp.sh) LOG_FILE=consistency.log03 export DATA="${DATA_DIR}/test3" @@ -77,10 +77,16 @@ export COMOUT=$DATA TEST4=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c48.noahmp.frac \ -o $LOG_FILE -e $LOG_FILE ./C48.noahmp.fracgrid.sh) +LOG_FILE=consistency.log05 +export DATA="${DATA_DIR}/test5" +export COMOUT=$DATA +TEST5=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.jedi_lndincsoilnoahmp \ + -o $LOG_FILE -e $LOG_FILE ./C192.jedi_lndincsoilnoahmp.sh) + LOG_FILE=consistency.log sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J chgres_summary -o $LOG_FILE -e $LOG_FILE \ --open-mode=append -q $QUEUE -d\ - afterok:$TEST1:$TEST2:$TEST3:$TEST4 << EOF + afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5 << EOF #!/bin/bash grep -a '<<<' ${LOG_FILE}* > summary.log EOF diff --git a/reg_tests/global_cycle/driver.wcoss2.sh b/reg_tests/global_cycle/driver.wcoss2.sh index 077896cd3..1655a4bb4 100755 --- a/reg_tests/global_cycle/driver.wcoss2.sh +++ b/reg_tests/global_cycle/driver.wcoss2.sh @@ -69,7 +69,7 @@ TEST1=$(qsub -V -o ${LOG_FILE}01 -e ${LOG_FILE}01 -q $QUEUE -A $PROJECT_CODE -l export DATA="${DATA_DIR}/test2" export COMOUT=$DATA TEST2=$(qsub -V -o ${LOG_FILE}02 -e ${LOG_FILE}02 -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ - -N c192.lndincsoilnoahmp -l select=1:ncpus=12:mem=8GB $PWD/C192.lndincsoilnoahmp.sh) + -N c192.gsi_lndincsoilnoahmp -l select=1:ncpus=12:mem=8GB $PWD/C192.gsi_lndincsoilnoahmp.sh) export DATA="${DATA_DIR}/test3" export COMOUT=$DATA @@ -81,8 +81,13 @@ export COMOUT=$DATA TEST4=$(qsub -V -o ${LOG_FILE}04 -e ${LOG_FILE}04 -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ -N c48.noahmp.frac -l select=1:ncpus=12:mem=8GB $PWD/C48.noahmp.fracgrid.sh) +export DATA="${DATA_DIR}/test5" +export COMOUT=$DATA +TEST5=$(qsub -V -o ${LOG_FILE}05 -e ${LOG_FILE}05 -q $QUEUE -A $PROJECT_CODE -l walltime=00:05:00 \ + -N c192.jedi_lndincsoilnoahmp -l select=1:ncpus=12:mem=8GB $PWD/C192.jedi_lndincsoilnoahmp.sh) + qsub -V -o ${LOG_FILE} -e ${LOG_FILE} -q $QUEUE -A $PROJECT_CODE -l walltime=00:01:00 \ - -N cycle_summary -l select=1:ncpus=1:mem=100MB -W depend=afterok:$TEST1:$TEST2:$TEST3:$TEST4 << EOF + -N cycle_summary -l select=1:ncpus=1:mem=100MB -W depend=afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5 << EOF #!/bin/bash cd $reg_dir grep -a '<<<' ${LOG_FILE}?? | grep -v echo > summary.log diff --git a/sorc/global_cycle.fd/cycle.f90 b/sorc/global_cycle.fd/cycle.f90 index 859fbd0c0..160c65e20 100644 --- a/sorc/global_cycle.fd/cycle.f90 +++ b/sorc/global_cycle.fd/cycle.f90 @@ -42,11 +42,14 @@ !! file. !! - $NST_FILE Gaussian GSI file which contains NSST !! TREF increments -!! - $LND_SOI_FILE.$NNN Gaussian GSI file which contains soil state +!! - $sfcincr_gsi.$NNN Gaussian GSI file which contains soil state !! increments -!! - xainc.$NNN The cubed-sphere increment file (contains +!! - snow_xainc.$NNN The cubed-sphere snow increment file (contains !! increments calculated by JEDI on the native !! model grid). +!! - soil_xainc.$NNN The cubed-sphere soil increment file (contains +!! soil temperature and soil moisture increments +!! calculated by JEDI on the native model grid). !! !! OUTPUT FILES: !! - fnbgso.$NNN The updated sfc/nsst restart file. @@ -70,8 +73,17 @@ !! -DO_SFCCYCLE Call sfccycle routine to update surface fields !! -DO_LNDINC Read in land increment files, and add increments to !! relevant states. -!! -DO_SOI_INC Do land increments to soil states. -!! -DO_SNO_INC Do land increments to snow states. +!! NOTE: We do not have a GSI snow analysis +!! -DO_SOI_INC_GSI Do land increments to soil states on Gaussian grids. +!! -DO_SOI_INC_JEDI Do land increments to soil states on cubed-sphere tiles. +!! -DO_SNO_INC_JEDI Do land increments to snow states on cubed-sphere tiles +!! (Noah land model only). +!! -LSOIL_INCR Number of soil layers (from top) to apply soil increments to. +!! LSOIL_INCR is currently set to 3 by default. +!! Extra cautions are needed on layer#3 across permafrost regions due to +!! over sensitivity of moisture change when temperature approaches tfreez. +!! Please feel free to contact Yuan Xue (yuan.xue@noaa.gov) for further +!! concerns regarding this issue. !! - ISOT Use statsgo soil type when '1'. Use zobler when '0'. !! - IVEGSRC Use igbp veg type when '1'. Use sib when '2'. !! - ZSEA1/2_MM When running with NSST model, this is the lower/ @@ -85,8 +97,6 @@ !! (max_tasks-1). !! -NST_FILE path/name of the gaussian GSI file which contains NSST !! TREF increments. -!! -LND_SOI_FILE path/name of the gaussian GSI file which contains soil -!! state increments. !! !! -2005-02-03: Iredell for global_analysis !! -2014-11-30: xuli add nst_anl @@ -304,8 +314,9 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & USE READ_WRITE_DATA use machine USE MPI - USE LAND_INCREMENTS, ONLY: ADD_INCREMENT_SOIL, & - ADD_INCREMENT_SNOW, & + USE LAND_INCREMENTS, ONLY: GAUSSIAN_TO_FV3_INTERP, & + ADD_INCREMENT_SOIL, & + ADD_JEDI_INCREMENT_SNOW, & CALCULATE_LANDINC_MASK, & APPLY_LAND_DA_ADJUSTMENTS_SOIL, & APPLY_LAND_DA_ADJUSTMENTS_SND, & @@ -327,7 +338,7 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & CHARACTER(LEN=5) :: TILE_NUM CHARACTER(LEN=500) :: NST_FILE - CHARACTER(LEN=500) :: LND_SOI_FILE + CHARACTER(LEN=500) :: GSI_SOI_FILE,JEDI_SOI_FILE,JEDI_SNO_FILE CHARACTER(LEN=4) :: INPUT_NML_FILE(SZ_NML) INTEGER :: I, IERR @@ -375,22 +386,26 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & real, dimension(lensfc) :: tf_clm_tile,tf_trd_tile,sal_clm_tile INTEGER :: veg_type_landice INTEGER, DIMENSION(LENSFC) :: STC_UPDATED, SLC_UPDATED + REAL, DIMENSION(LENSFC,LSOIL) :: STCINC, SLCINC - LOGICAL :: FILE_EXISTS, DO_SOI_INC, DO_SNO_INC + LOGICAL :: FILE_EXISTS, DO_SOI_INC_GSI, DO_SOI_INC_JEDI, DO_SNO_INC_JEDI CHARACTER(LEN=3) :: RANKCH + INTEGER :: lsoil_incr !-------------------------------------------------------------------------------- ! NST_FILE is the path/name of the gaussian GSI file which contains NSST ! increments. !-------------------------------------------------------------------------------- - NAMELIST/NAMSFCD/ NST_FILE, LND_SOI_FILE, DO_SNO_INC + NAMELIST/NAMSFCD/ NST_FILE, lsoil_incr, DO_SNO_INC_JEDI, DO_SOI_INC_JEDI, DO_SOI_INC_GSI DATA NST_FILE/'NULL'/ - DATA LND_SOI_FILE/'NULL'/ - DO_SNO_INC = .FALSE. - DO_SOI_INC = .FALSE. + DO_SNO_INC_JEDI = .FALSE. + DO_SOI_INC_GSI = .FALSE. + DO_SOI_INC_JEDI = .FALSE. + lsoil_incr = 3 !default + SIG1T = 0.0 ! Not a dead start! @@ -455,15 +470,19 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & IF (DO_LNDINC) THEN ! identify variables to be updated, and allocate arrays. - IF (TRIM(LND_SOI_FILE) .NE. "NULL") THEN - DO_SOI_INC = .TRUE. + IF (DO_SOI_INC_GSI .and. DO_SOI_INC_JEDI) THEN + PRINT* + PRINT*, 'FATAL ERROR: Can not do gsi and jedi soil updates at the same time, choose one!' + CALL MPI_ABORT(MPI_COMM_WORLD, 15, IERR) + ENDIF + IF (DO_SOI_INC_GSI .or. DO_SOI_INC_JEDI) THEN PRINT* - PRINT*," APPLYING SOIL INCREMENTS FROM THE GSI" + PRINT*," APPLYING SOIL INCREMENTS FROM GSI OR JEDI" ALLOCATE(STC_BCK(LENSFC, LSOIL), SMC_BCK(LENSFC, LSOIL), SLC_BCK(LENSFC,LSOIL)) ALLOCATE(LANDINC_MASK_FG(LENSFC)) ENDIF ! FOR NOW, CODE SO CAN DO BOTH, BUT MIGHT NEED TO THINK ABOUT THIS SOME MORE. - IF (DO_SNO_INC) THEN + IF (DO_SNO_INC_JEDI) THEN ! ideally, would check here that sfcsub snow DA update is not also requested ! but latter is controlled by fnsol, which is read in within that routine. ! should be done at script level. @@ -484,7 +503,7 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & ! READ THE INPUT SURFACE DATA ON THE CUBED-SPHERE TILE. !-------------------------------------------------------------------------------- - CALL READ_DATA(LSOIL,LENSFC,DO_NSST,.false.,IS_NOAHMP=IS_NOAHMP, & + CALL READ_DATA(LSOIL,LENSFC,DO_NSST,DO_SNO_INC_JEDI,DO_SOI_INC_JEDI,.false.,IS_NOAHMP=IS_NOAHMP, & TSFFCS=TSFFCS,SMCFCS=SMCFCS, & SWEFCS=SWEFCS,STCFCS=STCFCS,TG3FCS=TG3FCS,ZORFCS=ZORFCS, & CVFCS=CVFCS, CVBFCS=CVBFCS,CVTFCS=CVTFCS,ALBFCS=ALBFCS, & @@ -501,7 +520,7 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & call MPI_ABORT(MPI_COMM_WORLD, 18, IERR) ENDIF - IF (IS_NOAHMP .AND. DO_SNO_INC) THEN + IF (IS_NOAHMP .AND. DO_SNO_INC_JEDI) THEN print *, 'FATAL ERROR: Snow increment update does not work with NOAH_MP.' call MPI_ABORT(MPI_COMM_WORLD, 29, IERR) ENDIF @@ -540,7 +559,7 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & ! CALCULATE MASK FOR LAND INCREMENTS IF (DO_LNDINC) & - CALL CALCULATE_LANDINC_MASK(SMCFCS(:,1),SWEFCS, VETFCS, & + CALL CALCULATE_LANDINC_MASK(SWEFCS, VETFCS, SOTFCS, & LENSFC,VEG_TYPE_LANDICE, LANDINC_MASK) !-------------------------------------------------------------------------------- @@ -660,24 +679,35 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & ENDIF !-------------------------------------------------------------------------------- -! READ IN AND APPLY LAND INCREMENTS FROM THE GSI +! READ IN AND APPLY LAND INCREMENTS FROM THE GSI/JEDI !-------------------------------------------------------------------------------- IF (DO_LNDINC) THEN ! SNOW INCREMENTS ! do snow first, as temperature updates will use snow analaysis - IF (DO_SNO_INC) THEN + IF (DO_SNO_INC_JEDI) THEN ! updates are made to snow depth only over land (and not-land ice). ! SWE is then updated from the snow depth analysis, using the model ! forecast density + ! make sure incr. files exist + WRITE(RANKCH, '(I3.3)') (MYRANK+1) + JEDI_SNO_FILE = "snow_xainc." // RANKCH + + INQUIRE(FILE=trim(JEDI_SNO_FILE), EXIST=file_exists) + IF (.not. file_exists) then + print *, 'FATAL ERROR: snow increment (fv3 grid) update requested, & + but file does not exist : ', trim(jedi_sno_file) + call MPI_ABORT(MPI_COMM_WORLD, 10, IERR) + ENDIF + !-------------------------------------------------------------------------------- ! read increments in !-------------------------------------------------------------------------------- ! Only coded for DA on native model grid (would always be the case for cycling DA) - CALL READ_DATA(LSOIL,LENSFC,.false.,.true.,SNDFCS=SND_INC) + CALL READ_DATA(LSOIL,LENSFC,.false.,.true.,.false.,.true.,SNDFCS=SND_INC) !-------------------------------------------------------------------------------- ! add increments to state vars @@ -687,7 +717,7 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & SND_BCK = SNDFCS SWE_BCK = SWEFCS - CALL ADD_INCREMENT_SNOW(SND_INC,LANDINC_MASK,LENSFC,SNDFCS) + CALL ADD_JEDI_INCREMENT_SNOW(SND_INC,LANDINC_MASK,LENSFC,SNDFCS) !-------------------------------------------------------------------------------- ! make any necessary adjustments to dependent variables @@ -696,72 +726,102 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & CALL APPLY_LAND_DA_ADJUSTMENTS_SND(LSM, LENSFC, LANDINC_MASK, SWE_BCK, SND_BCK, & SNDFCS, SWEFCS) + ENDIF ! jedi snow increments + + !re-calculate soilsnow mask if snow has been updated. + LANDINC_MASK_FG = LANDINC_MASK + + IF (DO_SFCCYCLE .OR. DO_SNO_INC_JEDI) THEN + CALL CALCULATE_LANDINC_MASK(SWEFCS, VETFCS, SOTFCS, LENSFC, & + VEG_TYPE_LANDICE, LANDINC_MASK) ENDIF + ! store background states + STC_BCK = STCFCS + SMC_BCK = SMCFCS + SLC_BCK = SLCFCS + ! SOIL INCREMENTS - IF (DO_SOI_INC) THEN + IF (DO_SOI_INC_GSI) THEN !-------------------------------------------------------------------------------- - ! re-calculate soilsnow mask if snow has been updated. + ! read increments in !-------------------------------------------------------------------------------- + ! make sure incr. files exist + WRITE(RANKCH, '(I3.3)') (MYRANK+1) + GSI_SOI_FILE = "sfcincr_gsi." // RANKCH + + INQUIRE(FILE=trim(GSI_SOI_FILE), EXIST=file_exists) + IF (.not. file_exists) then + print *, 'FATAL ERROR: gsi soil increment (gaussian grid) update requested, & + but file does not exist : ', trim(gsi_soi_file) + call MPI_ABORT(MPI_COMM_WORLD, 10, IERR) + ENDIF - LANDINC_MASK_FG = LANDINC_MASK + CALL READ_GSI_DATA(GSI_SOI_FILE, 'LND', LSOIL=LSOIL) - IF (DO_SFCCYCLE .OR. DO_SNO_INC) THEN - CALL CALCULATE_LANDINC_MASK(SMCFCS(:,1),SWEFCS, VETFCS, LENSFC, & - VEG_TYPE_LANDICE, LANDINC_MASK ) - ENDIF + !-------------------------------------------------------------------------------- + ! interpolate increments to cubed sphere tiles + !-------------------------------------------------------------------------------- + + CALL GAUSSIAN_TO_FV3_INTERP(LSOIL_INCR,RLA,RLO,& + STCINC,SLCINC,LANDINC_MASK,LENSFC,LSOIL,IDIM,JDIM,LSM,MYRANK) + + !-------------------------------------------------------------------------------- + ! save interpolated increments + !-------------------------------------------------------------------------------- + CALL WRITE_DATA(LENSFC,IDIM,JDIM,LSOIL,DO_NSST,.true.,NSST, & + STCINC=STCINC,SLCINC=SLCINC) + + ENDIF ! end reading and interpolating gsi soil increments + + IF (DO_SOI_INC_JEDI) THEN !-------------------------------------------------------------------------------- ! read increments in !-------------------------------------------------------------------------------- - + ! make sure incr. files exist WRITE(RANKCH, '(I3.3)') (MYRANK+1) + JEDI_SOI_FILE = "soil_xainc." // RANKCH - LND_SOI_FILE = trim(LND_SOI_FILE) // "." // RANKCH - - INQUIRE(FILE=trim(LND_SOI_FILE), EXIST=file_exists) + INQUIRE(FILE=trim(JEDI_SOI_FILE), EXIST=file_exists) IF (.not. file_exists) then - print *, 'FATAL ERROR: land increment update requested, but file does not exist: ', & - trim(lnd_soi_file) + print *, 'FATAL ERROR: soil increment (fv3 grid) update requested, but file & + does not exist: ', trim(jedi_soi_file) call MPI_ABORT(MPI_COMM_WORLD, 10, IERR) ENDIF - CALL READ_GSI_DATA(LND_SOI_FILE, 'LND', LSOIL=LSOIL) + CALL READ_DATA(LSOIL,LENSFC,.false.,.false.,.true., & + .true.,IS_NOAHMP=IS_NOAHMP, & + STCINC=STCINC,SLCINC=SLCINC) + + ENDIF ! end reading jedi soil increments + + IF (DO_SOI_INC_GSI .or. DO_SOI_INC_JEDI) THEN !-------------------------------------------------------------------------------- ! add increments to state vars !-------------------------------------------------------------------------------- - ! when applying increments, will often need to adjust other land states in response - ! to the changes made. Need to store bacground, apply the increments, then make - ! secondart adjustments. When updating more than one state, be careful about the - ! order if increments and secondary adjustments. - - ! store background states - STC_BCK = STCFCS - SMC_BCK = SMCFCS - SLC_BCK = SLCFCS ! below updates [STC/SMC/STC]FCS to hold the analysis - CALL ADD_INCREMENT_SOIL(RLA,RLO,STCFCS,SMCFCS,SLCFCS,STC_UPDATED, SLC_UPDATED, & - LANDINC_MASK,LANDINC_MASK_FG,LENSFC,LSOIL,IDIM,JDIM,LSM,MYRANK) + CALL ADD_INCREMENT_SOIL(LSOIL_INCR,STCINC,SLCINC,STCFCS,SMCFCS,SLCFCS,STC_UPDATED, & + SLC_UPDATED,LANDINC_MASK,LANDINC_MASK_FG,LENSFC,LSOIL,LSM,MYRANK) !-------------------------------------------------------------------------------- ! make any necessary adjustments to dependent variables !-------------------------------------------------------------------------------- + CALL APPLY_LAND_DA_ADJUSTMENTS_SOIL(LSOIL_INCR, LSM, ISOT, IVEGSRC,LENSFC, LSOIL, & + SOTFCS, LANDINC_MASK_FG, STC_BCK, STCFCS, SMCFCS, SLCFCS, STC_UPDATED, & + SLC_UPDATED,ZSOIL) - CALL APPLY_LAND_DA_ADJUSTMENTS_SOIL(LSM, ISOT, IVEGSRC,LENSFC, LSOIL, & - SOTFCS, LANDINC_MASK_FG, STC_BCK, STCFCS, SMCFCS, SLCFCS, STC_UPDATED, & - SLC_UPDATED,ZSOIL) - ENDIF ! soil increments + ENDIF ! end applying soil increments and making adjustments !-------------------------------------------------------------------------------- ! clean up !-------------------------------------------------------------------------------- - ! to do - save and write out STC_INC? (soil temperature increments) IF(ALLOCATED(LANDINC_MASK_FG)) DEALLOCATE(LANDINC_MASK_FG) IF(ALLOCATED(LANDINC_MASK)) DEALLOCATE(LANDINC_MASK) IF(ALLOCATED(STC_BCK)) DEALLOCATE(STC_BCK) @@ -778,14 +838,14 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & IF (LSM==LSM_NOAHMP) THEN - CALL WRITE_DATA(LENSFC,IDIM,JDIM,LSOIL,DO_NSST,NSST,VEGFCS=VEGFCS, & + CALL WRITE_DATA(LENSFC,IDIM,JDIM,LSOIL,DO_NSST,.false.,NSST,VEGFCS=VEGFCS, & SLCFCS=SLCFCS,SMCFCS=SMCFCS,STCFCS=STCFCS,& SICFCS=SICFCS,SIHFCS=SIHFCS) ELSEIF (LSM==LSM_NOAH) THEN CALL WRITE_DATA(LENSFC,IDIM,JDIM,LSOIL, & - DO_NSST,NSST,SLIFCS=SLIFCS,TSFFCS=TSFFCS,VEGFCS=VEGFCS, & + DO_NSST,.false.,NSST,SLIFCS=SLIFCS,TSFFCS=TSFFCS,VEGFCS=VEGFCS, & SWEFCS=SWEFCS,TG3FCS=TG3FCS,ZORFCS=ZORFCS, & ALBFCS=ALBFCS,ALFFCS=ALFFCS,CNPFCS=CNPFCS, & F10M=F10M,T2M=T2M,Q2M=Q2M,VETFCS=VETFCS, & diff --git a/sorc/global_cycle.fd/land_increments.f90 b/sorc/global_cycle.fd/land_increments.f90 index b9738250c..3cff0c163 100644 --- a/sorc/global_cycle.fd/land_increments.f90 +++ b/sorc/global_cycle.fd/land_increments.f90 @@ -6,8 +6,9 @@ module land_increments private + public gaussian_to_fv3_interp public add_increment_soil - public add_increment_snow + public add_jedi_increment_snow public calculate_landinc_mask public apply_land_da_adjustments_soil public apply_land_da_adjustments_snd @@ -18,42 +19,32 @@ module land_increments !! copied from GFS_typedefs.F90 ! control state for soil analysis: - integer, parameter :: lsoil_incr=3 !< number of layers to add incrments to real, parameter :: tfreez=273.16 !< con_t0c in physcons contains !> Read in gsi file with soil state increments (on the gaussian - !! grid), interpolate increments to the cubed-sphere tile, and - !! add to the soil states. Adapted from adjust_nsst. - !! Currently only coded for soil temperature. Soil moisture will - !! need the model soil moisture paramaters for regridding. - !! - !! Does not make a temperature update if snow differ - !! between fg and anal (allow correction of snow to - !! address temperature error first), or if snow is present - !! (will eventually updating of snow temperature in this case) + !! grid), interpolate increments to the cubed-sphere tile + !! Adapted from adjust_nsst. !! !! @param[inout] RLA Latitude on the cubed-sphere tile !! @param[inout] RLO Longitude on the cubed-sphere tile - !! @param[inout] STC_STATE Soil temperature state vector - !! @param[inout] SMC_STATE Soil moisture (liquid plus solid) state vector - !! @param[inout] SLC_STATE Liquid soil moisture state vector - !! @param[out] stc_updated Integer to record whether STC in each grid cell was updated - !! @param[out] slc_updated Integer to record whether SMC in each grid cell was updated !! @param[in] SOILSNOW_TILE Land mask for increments on the cubed-sphere tile - !! @param[in] SOILSNOW_FG_TILE First guess land mask for increments on the cubed-sphere tile !! @param[in] LENSFC Number of land points on a tile !! @param[in] LSOIL Number of soil layers + !! @param[in] LSOIL_INCR Number of soil layers (from top) to apply soil increments to !! @param[in] IDIM 'I' dimension of a tile !! @param[in] JDIM 'J' dimension of a tile !! @param[in] lsm Integer flag indicating which land model is used (1-Noah, 2-Noah-MP) !! @param[in] MYRANK MPI rank number + !! @param[out] stcinc Soil temperature increments on the cubed-sphere tile + !! @param[out] slcinc Liquid soil moisture increments on the cubed-sphere tile !! !! @author Clara Draper. @date March 2021 + !! @author Yuan Xue. @date Mar 2024 -subroutine add_increment_soil(rla,rlo,stc_state,smc_state,slc_state,stc_updated, slc_updated, & - soilsnow_tile,soilsnow_fg_tile,lensfc,lsoil,idim,jdim,lsm, myrank) +subroutine gaussian_to_fv3_interp(lsoil_incr,rla,rlo, & + stcinc,slcinc,soilsnow_tile,lensfc,lsoil,idim,jdim,lsm, myrank) use utils use gdswzd_mod @@ -63,18 +54,17 @@ subroutine add_increment_soil(rla,rlo,stc_state,smc_state,slc_state,stc_updated, implicit none - integer, intent(in) :: lensfc, lsoil, idim, jdim, myrank, lsm + integer, intent(in) :: lsoil_incr, lensfc, lsoil, idim, jdim, myrank, lsm - integer, intent(in) :: soilsnow_tile(lensfc), soilsnow_fg_tile(lensfc) + integer, intent(in) :: soilsnow_tile(lensfc) real, intent(inout) :: rla(lensfc), rlo(lensfc) - real, intent(inout) :: stc_state(lensfc, lsoil) - real, intent(inout) :: slc_state(lensfc, lsoil) - real, intent(inout) :: smc_state(lensfc, lsoil) - integer, intent(out) :: stc_updated(lensfc), slc_updated(lensfc) + + real, intent(out) :: stcinc(lensfc,lsoil) + real, intent(out) :: slcinc(lensfc,lsoil) integer :: iopt, nret, kgds_gaus(200) integer :: igaus, jgaus, ij - integer :: mask_tile, mask_fg_tile + integer :: mask_tile integer :: itile, jtile integer :: j, ierr integer :: igausp1, jgausp1 @@ -90,12 +80,9 @@ subroutine add_increment_soil(rla,rlo,stc_state,smc_state,slc_state,stc_updated, real, allocatable :: dum2d(:,:), lats_rad(:), lons_rad(:) real, allocatable :: agrid(:,:,:), s2c(:,:,:) - integer :: k, nother, nsnowupd, nnosoilnear - integer :: nstcupd, nslcupd, nfrozen, nfrozen_upd - logical :: gaus_has_soil, soil_freeze, soil_ice + integer :: k + logical :: gaus_has_soil - stc_updated=0 - slc_updated=0 ! this produces the same lat/lon as can be read in from the file kgds_gaus = 0 @@ -122,12 +109,8 @@ subroutine add_increment_soil(rla,rlo,stc_state,smc_state,slc_state,stc_updated, upd_slc=.true. endif - print* - print*,'adjust soil using gsi increments on gaussian grid' - print*,'updating soil temps', upd_stc - print*,'updating soil moisture', upd_slc - print*,'adjusting first ', lsoil_incr, ' surface layers only' - + print*,' Start interpolating first ', lsoil_incr, ' surface layers only' + !---------------------------------------------------------------------- ! call gdswzd to compute the lat/lon of each gsi gaussian grid point. !---------------------------------------------------------------------- @@ -189,35 +172,14 @@ subroutine add_increment_soil(rla,rlo,stc_state,smc_state,slc_state,stc_updated, lons_rad, lats_rad, id1, id2, jdc, s2c, agrid ) deallocate(lons_rad, lats_rad, agrid) - ! - ! initialize variables for counts statitics to be zeros - ! - - ! - nother = 0 ! grid cells not land - nsnowupd = 0 ! grid cells with snow (temperature not yet updated) - nnosoilnear = 0 ! grid cells where model has soil, but 4 closest gaus grids don't - ! (no update made here) - nslcupd = 0 ! grid cells that are updated - nstcupd = 0 ! grid cells that are updated - nfrozen = 0 ! not update as frozen soil - nfrozen_upd = 0 ! not update as frozen soil + ! initialize matrix + stcinc = 0.0 + slcinc = 0.0 ij_loop : do ij = 1, lensfc mask_tile = soilsnow_tile(ij) - mask_fg_tile = soilsnow_fg_tile(ij) - - !---------------------------------------------------------------------- - ! mask: 1 - soil, 2 - snow, 0 - land-ice, -1 - not land - !---------------------------------------------------------------------- - - if (mask_tile <= 0) then ! skip if neither soil nor snow - nother = nother + 1 - cycle ij_loop - endif - ! get i,j index on array of (idim,jdim) from known ij @@ -225,15 +187,6 @@ subroutine add_increment_soil(rla,rlo,stc_state,smc_state,slc_state,stc_updated, itile = mod(ij,idim) if (itile==0) itile = idim - !---------------------------------------------------------------------- - ! if snow is present before or after snow update, skip soil analysis - !---------------------------------------------------------------------- - - if (mask_fg_tile == 2 .or. mask_tile == 2) then - nsnowupd = nsnowupd + 1 - cycle ij_loop - endif - !---------------------------------------------------------------------- ! do update to soil temperature grid cells, using bilinear interp !---------------------------------------------------------------------- @@ -253,7 +206,6 @@ subroutine add_increment_soil(rla,rlo,stc_state,smc_state,slc_state,stc_updated, soilsnow_gaus(igaus,jgausp1) == 1) gaus_has_soil = .true. if (.not. gaus_has_soil) then - nnosoilnear = nnosoilnear + 1 cycle ij_loop endif @@ -304,8 +256,123 @@ subroutine add_increment_soil(rla,rlo,stc_state,smc_state,slc_state,stc_updated, ! normalize increments do k = 1, lsoil_incr stc_inc(k) = stc_inc(k) / wsum + stcinc(ij,k) = stc_inc(k) slc_inc(k) = slc_inc(k) / wsum + slcinc(ij,k) = slc_inc(k) enddo + + endif ! if soil/snow point + + enddo ij_loop + + write(*,'(a,i2)') ' Finish soil increments interpolation for rank : ', myrank + + deallocate(id1, id2, jdc, s2c) + +end subroutine gaussian_to_fv3_interp + + !> Read in soil state increments (on the cubed-sphere + !! grid),and add to the soil states. Adapted from original add_gsi_increment_soil routine. + !! + !! @param[in] SLCINC Liquid soil moisture increments on the cubed-sphere tile + !! @param[in] STCINC Soil temperature increments on the cubed-sphere tile + !! @param[inout] STC_STATE Soil temperature state vector + !! @param[inout] SMC_STATE Soil moisture (liquid plus solid) state vector + !! @param[inout] SLC_STATE Liquid soil moisture state vector + !! @param[out] stc_updated Integer to record whether STC in each grid cell was updated + !! @param[out] slc_updated Integer to record whether SMC in each grid cell was updated + !! @param[in] SOILSNOW_TILE Land mask for increments on the cubed-sphere tile + !! @param[in] SOILSNOW_FG_TILE First guess land mask for increments on the + !! cubed-sphere tile + !! @param[in] LENSFC Number of land points on a tile + !! @param[in] LSOIL Number of soil layers + !! @param[in] LSOIL_INCR Number of soil layers (from top) to apply soil increments to + !! @param[in] lsm Integer flag indicating which land model is used + !! @param[in] MYRANK MPI rank number + !! + !! @author Yuan Xue. 11/2023 + +subroutine add_increment_soil(lsoil_incr,stcinc,slcinc,stc_state,smc_state,slc_state,stc_updated,& + slc_updated,soilsnow_tile,soilsnow_fg_tile,lensfc,lsoil,lsm,myrank) + + use mpi + + implicit none + + integer, intent(in) :: lsoil_incr, lensfc, lsoil, myrank, lsm + + integer, intent(in) :: soilsnow_tile(lensfc), soilsnow_fg_tile(lensfc) + real, intent(inout) :: stc_state(lensfc, lsoil) + real, intent(inout) :: slc_state(lensfc, lsoil) + real, intent(inout) :: smc_state(lensfc, lsoil) + integer, intent(out) :: stc_updated(lensfc), slc_updated(lensfc) + + + integer :: ij + integer :: mask_tile, mask_fg_tile + logical :: upd_slc, upd_stc + + real :: stcinc(lensfc,lsoil) + real :: slcinc(lensfc,lsoil) + + integer :: k, nother, nsnowupd + integer :: nstcupd, nslcupd, nfrozen, nfrozen_upd + logical :: soil_freeze, soil_ice + + stc_updated=0 + slc_updated=0 + + if (lsm==lsm_noah) then + upd_stc=.true. + upd_slc=.false. ! not coded + elseif (lsm==lsm_noahmp) then + upd_stc=.true. + upd_slc=.true. + endif + + print* + print*,'adjust soil using increments on cubed-sphere tiles' + print*,'updating soil temps', upd_stc + print*,'updating soil moisture', upd_slc + print*,'adjusting first ', lsoil_incr, ' surface layers only' + + ! initialize variables for counts statitics to be zeros + nother = 0 ! grid cells not land + nsnowupd = 0 ! grid cells with snow (temperature not yet updated) + nslcupd = 0 ! grid cells that are updated + nstcupd = 0 ! grid cells that are updated + nfrozen = 0 ! not update as frozen soil + nfrozen_upd = 0 ! not update as frozen soil + + ij_loop : do ij = 1, lensfc + + mask_tile = soilsnow_tile(ij) + mask_fg_tile = soilsnow_fg_tile(ij) + + !---------------------------------------------------------------------- + ! mask: 1 - soil, 2 - snow, 0 - land-ice, -1 - not land + !---------------------------------------------------------------------- + + if (mask_tile <= 0) then ! skip if neither soil nor snow + nother = nother + 1 + cycle ij_loop + endif + + !---------------------------------------------------------------------- + ! if snow is present before or after snow update, skip soil analysis + !---------------------------------------------------------------------- + + if (mask_fg_tile == 2 .or. mask_tile == 2) then + nsnowupd = nsnowupd + 1 + cycle ij_loop + endif + + !---------------------------------------------------------------------- + ! do update to soil temperature grid cells + !---------------------------------------------------------------------- + + if (mask_tile == 1) then + !---------------------------------------------------------------------- ! add the interpolated increment to the background !---------------------------------------------------------------------- @@ -318,26 +385,27 @@ subroutine add_increment_soil(rla,rlo,stc_state,smc_state,slc_state,stc_updated, if ( smc_state(ij,k) - slc_state(ij,k) > 0.001 ) soil_ice=.true. if (upd_stc) then - stc_state(ij,k) = stc_state(ij,k) + stc_inc(k) - if (k==1) then + stc_state(ij,k) = stc_state(ij,k) + stcinc(ij,k) + if (k==1) then stc_updated(ij) = 1 nstcupd = nstcupd + 1 endif endif - if ( (stc_state(ij,k) < tfreez) .and. (.not. soil_freeze) .and. (k==1) ) & - nfrozen_upd = nfrozen_upd + 1 + if ( (stc_state(ij,k) < tfreez) .and. (.not. soil_freeze) .and. (k==1) )& + nfrozen_upd = nfrozen_upd + 1 ! do not do updates if this layer or any above is frozen - if ( (.not. soil_freeze ) .and. (.not. soil_ice ) ) then - if (upd_slc) then - if (k==1) then + if ( (.not. soil_freeze ) .and. (.not. soil_ice ) ) then + if (upd_slc) then + if (k==1) then nslcupd = nslcupd + 1 slc_updated(ij) = 1 endif - ! apply zero limit here (higher, model-specific limits are later) - slc_state(ij,k) = max(slc_state(ij,k) + slc_inc(k), 0.0) - smc_state(ij,k) = max(smc_state(ij,k) + slc_inc(k), 0.0) + ! apply zero limit here (higher, model-specific limits are + ! later) + slc_state(ij,k) = max(slc_state(ij,k) + slcinc(ij,k), 0.0) + smc_state(ij,k) = max(smc_state(ij,k) + slcinc(ij,k), 0.0) endif else if (k==1) nfrozen = nfrozen+1 @@ -347,22 +415,20 @@ subroutine add_increment_soil(rla,rlo,stc_state,smc_state,slc_state,stc_updated, endif ! if soil/snow point - enddo ij_loop - - write(*,'(a,i2)') 'statistics of grids number processed for rank : ', myrank - write(*,'(a,i8)') ' soil grid total', lensfc - write(*,'(a,i8)') ' soil grid cells slc updated = ',nslcupd - write(*,'(a,i8)') ' soil grid cells stc updated = ',nstcupd - write(*,'(a,i8)') ' soil grid cells not updated, frozen = ',nfrozen - write(*,'(a,i8)') ' soil grid cells update, became frozen = ',nfrozen_upd - write(*,'(a,i8)') ' (not updated) soil grid cells, no soil nearby on gsi grid = ',nnosoilnear - write(*,'(a,i8)') ' (not updated yet) snow grid cells = ', nsnowupd - write(*,'(a,i8)') ' grid cells, without soil or snow = ', nother + enddo ij_loop - deallocate(id1, id2, jdc, s2c) + write(*,'(a,i2)') ' statistics of grids number processed for rank : ', myrank + write(*,'(a,i8)') ' soil grid total', lensfc + write(*,'(a,i8)') ' soil grid cells slc updated = ',nslcupd + write(*,'(a,i8)') ' soil grid cells stc updated = ',nstcupd + write(*,'(a,i8)') ' soil grid cells not updated, frozen = ',nfrozen + write(*,'(a,i8)') ' soil grid cells update, became frozen = ',nfrozen_upd + write(*,'(a,i8)') ' (not updated yet) snow grid cells = ', nsnowupd + write(*,'(a,i8)') ' grid cells, without soil or snow = ', nother end subroutine add_increment_soil + !> Add snow depth increment to model snow depth state, !! and limit output to be non-negative. JEDI increments are !! calculated globally, so must be screened to land-only locations @@ -375,7 +441,7 @@ end subroutine add_increment_soil !! !! @author Clara Draper. @date August 2021 -subroutine add_increment_snow(snd_inc,mask,lensfc,snd) +subroutine add_jedi_increment_snow(snd_inc,mask,lensfc,snd) implicit none @@ -393,25 +459,26 @@ subroutine add_increment_snow(snd_inc,mask,lensfc,snd) endif enddo -end subroutine add_increment_snow +end subroutine add_jedi_increment_snow !> Calculate soil mask for land on model grid. !! Output is 1 - soil, 2 - snow-covered, 0 - land ice, -1 not land. !! !! @param[in] lensfc Number of land points for this tile !! @param[in] veg_type_landice Value of vegetion class that indicates land-ice -!! @param[in] smc Model soil moisture. +!! @param[in] stype Soil type !! @param[in] swe Model snow water equivalent !! @param[in] vtype Model vegetation type !! @param[out] mask Land mask for increments !! @author Clara Draper @date March 2021 -subroutine calculate_landinc_mask(smc,swe,vtype,lensfc,veg_type_landice,mask) +!! @author Yuan Xue: introduce stype to make the mask calculation more generic +subroutine calculate_landinc_mask(swe,vtype,stype,lensfc,veg_type_landice,mask) implicit none integer, intent(in) :: lensfc, veg_type_landice - real, intent(in) :: smc(lensfc), swe(lensfc) - real, intent(in) :: vtype(lensfc) + real, intent(in) :: swe(lensfc) + real, intent(in) :: vtype(lensfc),stype(lensfc) integer, intent(out) :: mask(lensfc) integer :: i @@ -420,7 +487,7 @@ subroutine calculate_landinc_mask(smc,swe,vtype,lensfc,veg_type_landice,mask) ! land (but not land-ice) do i=1,lensfc - if (smc(i) .LT. 0.99) then + if (nint(stype(i)) .GT. 0) then if (swe(i) .GT. 0.001) then ! snow covered land mask(i) = 2 else ! non-snow covered land @@ -439,18 +506,19 @@ end subroutine calculate_landinc_mask !! if full for Noah LSM. !! For Noah LSM, copy relevent code blocks from model code (same as has !! been done in sfc_sub). -!! For Noah-MP, have inserted place-holders to simply reset the model -!! variables back to the analysis if adjustments are needed. Later, will replace -!! this with appropriate adjustmenets (in summary, for now we do not -!! make STC updates if soils are frozen, and are also not applying the -!! appropriate max. values for SMC). -!! Here: adjust (frozen) soil moisture to be consistent with changes in -!! soil temperature from DA +!! For Noah-MP, the adjustment scheme shown below as of 11/09/2023: +!! Case 1: frozen ==> frozen, recalculate slc following opt_frz=1, smc remains +!! Case 2: unfrozen ==> frozen, recalculate slc following opt_frz=1, smc remains +!! Case 3: frozen ==> unfrozen, melt all soil ice (if any) +!! Case 4: unfrozen ==> unfrozen along with other cases, (e.g., soil temp=tfrz),do nothing +!! Note: For Case 3, Yuan Xue thoroughly evaluated a total of four options and +!! current option is found to be the best as of 11/09/2023 !! @param[in] lsm Integer code for the LSM !! @param[in] isot Integer code for the soil type data set !! @param[in] ivegsrc Integer code for the vegetation type data set !! @param[in] lensfc Number of land points for this tile !! @param[in] lsoil Number of soil layers +!! @param[in] lsoil_incr Number of soil layers (from top) to apply soil increments to !! @param[in] rsoiltype Array of input soil types !! @param[in] mask Mask indicating surface type !! @param[in] stc_bck Background soil temperature states @@ -462,17 +530,17 @@ end subroutine calculate_landinc_mask !! @param[in] zsoil Depth of bottom of each soil layer !! @author Clara Draper @date April 2021 -subroutine apply_land_da_adjustments_soil(lsm, isot, ivegsrc,lensfc, & +subroutine apply_land_da_adjustments_soil(lsoil_incr, lsm, isot, ivegsrc,lensfc, & lsoil, rsoiltype, mask, stc_bck, stc_adj, smc_adj, slc_adj, & stc_updated, slc_updated, zsoil) use mpi - use set_soilveg_snippet_mod, only: set_soilveg + use set_soilveg_snippet_mod, only: set_soilveg_noah,set_soilveg_noahmp use sflx_snippet, only: frh2o implicit none - integer, intent(in) :: lsm, lensfc, lsoil, isot, ivegsrc + integer, intent(in) :: lsoil_incr, lsm, lensfc, lsoil, isot, ivegsrc real, intent(in) :: rsoiltype(lensfc) ! soil types, as real integer, intent(in) :: mask(lensfc) real, intent(in) :: stc_bck(lensfc, lsoil) @@ -485,7 +553,7 @@ subroutine apply_land_da_adjustments_soil(lsm, isot, ivegsrc,lensfc, & logical :: frzn_bck, frzn_anl logical :: soil_freeze, soil_ice - integer :: i, l, n_freeze, n_thaw, ierr, n_revert + integer :: i, l, n_freeze, n_thaw, ierr integer :: myrank, soiltype, iret, n_stc, n_slc logical :: upd_slc, upd_stc @@ -495,6 +563,10 @@ subroutine apply_land_da_adjustments_soil(lsm, isot, ivegsrc,lensfc, & real, dimension(30) :: maxsmc, bb, satpsi real, dimension(4) :: dz ! layer thickness + real, parameter :: hfus=0.3336e06 !< latent heat of fusion(j/kg) + real, parameter :: grav=9.80616 !< gravity accel.(m/s2) + real :: smp !< for computing supercooled water + call mpi_comm_rank(mpi_comm_world, myrank, ierr) if (lsm==lsm_noah) then @@ -508,9 +580,9 @@ subroutine apply_land_da_adjustments_soil(lsm, isot, ivegsrc,lensfc, & select case (lsm ) case(lsm_noah) ! initialise soil properties - call set_soilveg(isot, ivegsrc, maxsmc, bb, satpsi, iret) + call set_soilveg_noah(isot, ivegsrc, maxsmc, bb, satpsi, iret) if (iret < 0) then - print *, 'FATAL ERROR: problem in set_soilveg' + print *, 'FATAL ERROR: problem in set_soilveg_noah' call mpi_abort(mpi_comm_world, 10, ierr) endif @@ -549,30 +621,39 @@ subroutine apply_land_da_adjustments_soil(lsm, isot, ivegsrc,lensfc, & if (upd_stc) then - print *, 'Reverting frozen noah-mp model stc back to background' - n_revert=0 + call set_soilveg_noahmp(isot, ivegsrc, maxsmc, bb, satpsi, iret) + if (iret < 0) then + print *, 'FATAL ERROR: problem in set_soilveg_noahmp' + call mpi_abort(mpi_comm_world, 10, ierr) + endif + n_stc = 0 n_slc = 0 do i=1,lensfc - if (stc_updated(i) == 1 ) then + if (stc_updated(i) == 1 ) then ! soil-only location n_stc = n_stc+1 - ! remove soil temperature increments if frozen - soil_freeze=.false. - soil_ice=.false. + soiltype = nint(rsoiltype(i)) do l = 1, lsoil_incr - if ( min(stc_bck(i,l),stc_adj(i,l)) < tfreez) soil_freeze=.true. - if ( smc_adj(i,l) - slc_adj(i,l) > 0.001 ) soil_ice=.true. - if ( soil_freeze .or. soil_ice ) then - ! for now, revert update. Later, adjust SMC/SLC for update. - if (l==1) n_revert = n_revert+1 - stc_adj(i,l)=stc_bck(i,l) + !case 1: frz ==> frz, recalculate slc, smc remains + !case 2: unfrz ==> frz, recalculate slc, smc remains + !both cases are considered in the following if case + if (stc_adj(i,l) .LT. tfreez )then + !recompute supercool liquid water,smc_anl remain unchanged + smp = hfus*(tfreez-stc_adj(i,l))/(grav*stc_adj(i,l)) !(m) + slc_new=maxsmc(soiltype)*(smp/satpsi(soiltype))**(-1./bb(soiltype)) + slc_adj(i,l) = max( min( slc_new, smc_adj(i,l)), 0.0 ) + endif + !case 3: frz ==> unfrz, melt all soil ice (if any) + if (stc_adj(i,l) .GT. tfreez )then !do not rely on stc_bck + slc_adj(i,l)=smc_adj(i,l) endif enddo - endif + endif enddo endif + if (upd_slc) then dz(1) = -zsoil(1) @@ -604,7 +685,6 @@ subroutine apply_land_da_adjustments_soil(lsm, isot, ivegsrc,lensfc, & write(*,'(a,i8)') ' soil grid total', lensfc write(*,'(a,i8)') ' soil grid cells with slc update', n_slc write(*,'(a,i8)') ' soil grid cells with stc update', n_stc - write(*,'(a,i8)') ' soil grid cells reverted', n_revert end subroutine apply_land_da_adjustments_soil diff --git a/sorc/global_cycle.fd/read_write_data.f90 b/sorc/global_cycle.fd/read_write_data.f90 index 71dabb465..5221f840e 100644 --- a/sorc/global_cycle.fd/read_write_data.f90 +++ b/sorc/global_cycle.fd/read_write_data.f90 @@ -79,6 +79,7 @@ MODULE READ_WRITE_DATA !! @param[in] lensfc Total number of points on a tile. !! @param[in] lsoil Number of soil layers. !! @param[in] do_nsst When true, nsst fields were processed. + !! @param[in] inc_file When true, write out increments to files !! @param[in] nsst Data structure containing nsst fields. !! @param[in] slifcs Land-sea mask. !! @param[in] tsffcs Skin temperature. @@ -114,17 +115,20 @@ MODULE READ_WRITE_DATA !! @param[in] slcfcs Liquid portion of volumetric soil moisture. !! @param[in] smcfcs Total volumetric soil moisture. !! @param[in] stcfcs Soil temperature. + !! @param[in] stcinc Soil temperature increments on the cubed-sphere tiles + !! @param[in] slcinc Liquid soil moisture increments on the cubed-sphere tiles !! !! @author George Gayno NOAA/EMC subroutine write_data(lensfc,idim,jdim,lsoil, & - do_nsst,nsst,slifcs,tsffcs,vegfcs,swefcs, & + do_nsst,inc_file,nsst,slifcs,tsffcs,vegfcs,swefcs, & tg3fcs,zorfcs,albfcs,alffcs, & cnpfcs,f10m,t2m,q2m,vetfcs, & sotfcs,ustar,fmm,fhh,sicfcs, & sihfcs,sitfcs,tprcp,srflag, & swdfcs,vmnfcs,vmxfcs,slpfcs, & - absfcs,slcfcs,smcfcs,stcfcs) + absfcs,slcfcs,smcfcs,stcfcs, & + stcinc, slcinc) use mpi @@ -134,6 +138,7 @@ subroutine write_data(lensfc,idim,jdim,lsoil, & integer, intent(in) :: idim, jdim logical, intent(in) :: do_nsst + logical, intent(in) :: inc_file real, intent(in), optional :: slifcs(lensfc),tsffcs(lensfc) real, intent(in), optional :: swefcs(lensfc),tg3fcs(lensfc) @@ -150,10 +155,12 @@ subroutine write_data(lensfc,idim,jdim,lsoil, & real, intent(in), optional :: vmxfcs(lensfc), slpfcs(lensfc) real, intent(in), optional :: absfcs(lensfc), slcfcs(lensfc,lsoil) real, intent(in), optional :: smcfcs(lensfc,lsoil), stcfcs(lensfc,lsoil) + real, intent(in), optional :: stcinc(lensfc,lsoil) + real, intent(in), optional :: slcinc(lensfc,lsoil) type(nsst_data), intent(in) :: nsst - integer :: dim_x, dim_y, dim_time, dims_3d(3) + integer :: dim_x, dim_y, dim_soil, dim_time, dims_3d(3) real :: dum2d(idim,jdim), dum3d(idim,jdim,lsoil) @@ -161,11 +168,14 @@ subroutine write_data(lensfc,idim,jdim,lsoil, & character(len=3) :: rankch integer :: myrank, error, ncid, id_var + integer :: varid_stc, varid_slc call mpi_comm_rank(mpi_comm_world, myrank, error) write(rankch, '(i3.3)') (myrank+1) + if (.NOT.(inc_file)) then + fnbgso = "./fnbgso." // rankch print* @@ -472,6 +482,47 @@ subroutine write_data(lensfc,idim,jdim,lsoil, & call remove_checksum(ncid, id_var) endif + else + + fnbgso = "./gaussian_interp." // rankch + print* + print*,"Write increments onto cubed sphere tiles to: ", trim(fnbgso) + + error=nf90_create(trim(fnbgso),NF90_64BIT_OFFSET,ncid) + CALL netcdf_err(error, 'OPENING FILE: '//trim(fnbgso) ) + + ! Define dimensions in the file. + error = nf90_def_dim(ncid, "xaxis_1", idim, dim_x) + call netcdf_err(error, 'defining xaxis_1') + + error = nf90_def_dim(ncid, "yaxis_1", jdim, dim_y) + call netcdf_err(error, 'defining yaxis_1') + + error = nf90_def_dim(ncid, "soil_levels",lsoil, dim_soil) + call netcdf_err(error, 'defining soil_levels') + + ! Define variables in the file. + error=nf90_def_var(ncid, "slc_inc", NF90_DOUBLE, & + (/dim_x,dim_y,dim_soil/),varid_slc) + call netcdf_err(error, 'defining slc_inc'); + + error=nf90_def_var(ncid, "stc_inc", NF90_DOUBLE, & + (/dim_x,dim_y,dim_soil/),varid_stc) + call netcdf_err(error, 'defining stc_inc'); + + error = nf90_enddef(ncid) + + ! Put variables in the file. + dum3d = reshape(stcinc, (/idim,jdim,lsoil/)) + error = nf90_put_var( ncid, varid_stc, dum3d) + call netcdf_err(error, 'writing stc_inc record' ) + + dum3d = reshape(slcinc, (/idim,jdim,lsoil/)) + error = nf90_put_var( ncid, varid_slc, dum3d) + call netcdf_err(error, 'writing slc_inc record' ) + + endif + if(do_nsst) then error=nf90_inq_varid(ncid, "tref", id_var) @@ -984,8 +1035,11 @@ END SUBROUTINE READ_GSI_DATA !! @param[in] LSOIL Number of soil layers. !! @param[in] LENSFC Total number of points on a tile. !! @param[in] DO_NSST When true, nsst fields are read. + !! @param[in] DO_SNO_INC_JEDI When true, read in snow increment file + !! @param[in] DO_SOI_INC_JEDI When true, read in soil increment file !! @param[in] INC_FILE When true, read from an increment file. !! False reads from a restart file. + !! increments are on the cubed-sphere tiles !! @param[out] IS_NOAHMP When true, process for the Noah-MP LSM. !! @param[out] TSFFCS Skin Temperature. !! @param[out] SMCFCS Total volumetric soil moisture. @@ -1025,8 +1079,13 @@ END SUBROUTINE READ_GSI_DATA !! @param[out] SLMASK Land-sea mask without ice flag. !! @param[out] ZSOIL Soil layer thickness. !! @param[out] NSST Data structure containing nsst fields. + !! @param[in] SLCINC Liquid soil moisture increments on the cubed-sphere tiles + !! @param[in] STCINC Soil temperature increments on the cubed-sphere tiles !! @author George Gayno NOAA/EMC - SUBROUTINE READ_DATA(LSOIL,LENSFC,DO_NSST,INC_FILE,IS_NOAHMP, & + !! @author Yuan Xue: add capability to read soil related increments on the + !! cubed-sphere tiles directly + SUBROUTINE READ_DATA(LSOIL,LENSFC,DO_NSST,DO_SNO_INC_JEDI,& + DO_SOI_INC_JEDI,INC_FILE,IS_NOAHMP, & TSFFCS,SMCFCS,SWEFCS,STCFCS, & TG3FCS,ZORFCS, & CVFCS,CVBFCS,CVTFCS,ALBFCS, & @@ -1036,6 +1095,7 @@ SUBROUTINE READ_DATA(LSOIL,LENSFC,DO_NSST,INC_FILE,IS_NOAHMP, & SIHFCS,SICFCS,SITFCS, & TPRCP,SRFLAG,SNDFCS, & VMNFCS,VMXFCS,SLCFCS, & + STCINC,SLCINC, & SLPFCS,ABSFCS,T2M,Q2M,SLMASK, & ZSOIL,NSST) USE MPI @@ -1044,6 +1104,7 @@ SUBROUTINE READ_DATA(LSOIL,LENSFC,DO_NSST,INC_FILE,IS_NOAHMP, & INTEGER, INTENT(IN) :: LSOIL, LENSFC LOGICAL, INTENT(IN) :: DO_NSST, INC_FILE + LOGICAL, INTENT(IN) :: DO_SNO_INC_JEDI, DO_SOI_INC_JEDI LOGICAL, OPTIONAL, INTENT(OUT) :: IS_NOAHMP @@ -1065,6 +1126,8 @@ SUBROUTINE READ_DATA(LSOIL,LENSFC,DO_NSST,INC_FILE,IS_NOAHMP, & REAL, OPTIONAL, INTENT(OUT) :: SLCFCS(LENSFC,LSOIL) REAL, OPTIONAL, INTENT(OUT) :: SMCFCS(LENSFC,LSOIL) REAL, OPTIONAL, INTENT(OUT) :: STCFCS(LENSFC,LSOIL) + REAL, OPTIONAL, INTENT(OUT) :: STCINC(LENSFC,LSOIL) + REAL, OPTIONAL, INTENT(OUT) :: SLCINC(LENSFC,LSOIL) REAL(KIND=4), OPTIONAL, INTENT(OUT) :: ZSOIL(LSOIL) TYPE(NSST_DATA), OPTIONAL :: NSST ! intent(out) will crash @@ -1083,8 +1146,10 @@ SUBROUTINE READ_DATA(LSOIL,LENSFC,DO_NSST,INC_FILE,IS_NOAHMP, & WRITE(RANKCH, '(I3.3)') (MYRANK+1) - IF (INC_FILE) THEN - FNBGSI = "./xainc." // RANKCH + IF ((INC_FILE) .and. (DO_SNO_INC_JEDI)) THEN + FNBGSI = "./snow_xainc." // RANKCH + ELSEIF ((INC_FILE) .and. (DO_SOI_INC_JEDI)) THEN + FNBGSI = "./soil_xainc." // RANKCH ELSE FNBGSI = "./fnbgsi." // RANKCH ENDIF @@ -1095,6 +1160,20 @@ SUBROUTINE READ_DATA(LSOIL,LENSFC,DO_NSST,INC_FILE,IS_NOAHMP, & ERROR=NF90_OPEN(TRIM(FNBGSI),NF90_NOWRITE,NCID) CALL NETCDF_ERR(ERROR, 'OPENING FILE: '//TRIM(FNBGSI) ) + IF ((INC_FILE) .and. (DO_SOI_INC_JEDI)) THEN + + ERROR=NF90_INQ_DIMID(NCID, 'grid_xt', ID_DIM) + CALL NETCDF_ERR(ERROR, 'READING grid_xt' ) + ERROR=NF90_INQUIRE_DIMENSION(NCID,ID_DIM,LEN=IDIM) + CALL NETCDF_ERR(ERROR, 'READING grid_xt' ) + + ERROR=NF90_INQ_DIMID(NCID, 'grid_yt', ID_DIM) + CALL NETCDF_ERR(ERROR, 'READING grid_yt' ) + ERROR=NF90_INQUIRE_DIMENSION(NCID,ID_DIM,LEN=JDIM) + CALL NETCDF_ERR(ERROR, 'READING grid_yt' ) + + ELSE + ERROR=NF90_INQ_DIMID(NCID, 'xaxis_1', ID_DIM) CALL NETCDF_ERR(ERROR, 'READING xaxis_1' ) ERROR=NF90_INQUIRE_DIMENSION(NCID,ID_DIM,LEN=IDIM) @@ -1105,6 +1184,8 @@ SUBROUTINE READ_DATA(LSOIL,LENSFC,DO_NSST,INC_FILE,IS_NOAHMP, & ERROR=NF90_INQUIRE_DIMENSION(NCID,ID_DIM,LEN=JDIM) CALL NETCDF_ERR(ERROR, 'READING yaxis_1' ) + ENDIF + IF ((IDIM*JDIM) /= LENSFC) THEN PRINT*,'FATAL ERROR: DIMENSIONS WRONG.' CALL MPI_ABORT(MPI_COMM_WORLD, 88, IERR) @@ -1512,6 +1593,22 @@ SUBROUTINE READ_DATA(LSOIL,LENSFC,DO_NSST,INC_FILE,IS_NOAHMP, & STCFCS = RESHAPE(DUMMY3D, (/LENSFC,LSOIL/)) ENDIF + IF (PRESENT(SLCINC)) THEN + ERROR=NF90_INQ_VARID(NCID, "soill", ID_VAR) + CALL NETCDF_ERR(ERROR, 'READING soill ID' ) + ERROR=NF90_GET_VAR(NCID, ID_VAR, dummy3d) + CALL NETCDF_ERR(ERROR, 'READING slc increments' ) + SLCINC = RESHAPE(DUMMY3D, (/LENSFC,LSOIL/)) + ENDIF + + IF (PRESENT(STCINC)) THEN + ERROR=NF90_INQ_VARID(NCID, "soilt", ID_VAR) + CALL NETCDF_ERR(ERROR, 'READING soilt ID' ) + ERROR=NF90_GET_VAR(NCID, ID_VAR, dummy3d) + CALL NETCDF_ERR(ERROR, 'READING stc increments' ) + STCINC = RESHAPE(DUMMY3D, (/LENSFC,LSOIL/)) + ENDIF + DEALLOCATE(DUMMY3D) ! cloud fields not in warm restart files. set to zero? diff --git a/sorc/lsm_routines.fd/noah.fd/set_soilveg_snippet.f90 b/sorc/lsm_routines.fd/noah.fd/set_soilveg_snippet.f90 index 0ba432ba1..ec2200d92 100644 --- a/sorc/lsm_routines.fd/noah.fd/set_soilveg_snippet.f90 +++ b/sorc/lsm_routines.fd/noah.fd/set_soilveg_snippet.f90 @@ -5,13 +5,21 @@ !> Below was extracted from namelist_soilveg.f and set_soilveg.f !! (couldn't get above to compile for doxygen) +!> Add Noah-MP LSM soil and veg params needed for global_cycle +!> Noah-MP related parameters were extracted from noahmp_table.f +!> isot (soil type) = 1: STATSGO must be selected if NoahMP is used +!> ivet (vegetation type) = 1: IBGP is used by UFS offline Land DA for Noah-MP +!> as of 07/13/2023 +!> @author Yuan Xue + module set_soilveg_snippet_mod implicit none private - public set_soilveg + public set_soilveg_noah + public set_soilveg_noahmp contains @@ -23,7 +31,7 @@ module set_soilveg_snippet_mod !! @param[out] bb B exponent for each soil type !! @param[out] satpsi Saturated matric potential for each soil type !! @param[out] iret Return integer -subroutine set_soilveg(isot,ivet, maxsmc, bb, satpsi, iret) +subroutine set_soilveg_noah(isot,ivet, maxsmc, bb, satpsi, iret) implicit none integer, intent(in) :: isot,ivet @@ -85,6 +93,51 @@ subroutine set_soilveg(isot,ivet, maxsmc, bb, satpsi, iret) iret = 0 -end subroutine set_soilveg +end subroutine set_soilveg_noah + +!> This subroutine initializes soil and vegetation +!! parameters needed in global_cycle/land_increment.f90 for noah-mp +!! @param[in] isot Soil type +!! @param[in] ivet Vegetation type +!! @param[out] maxsmc Maximum soil moisture for each soil type +!! @param[out] bb B exponent for each soil type +!! @param[out] satpsi Saturated matric potential for each soil type +!! @param[out] iret Return integer +subroutine set_soilveg_noahmp(isot,ivet, maxsmc, bb, satpsi,iret) + + implicit none + + integer, intent(in) :: isot,ivet !ivet is *not* used for now + real, dimension(30), intent(out) :: maxsmc, bb, satpsi + integer, intent(out) :: iret + + if (isot .eq. 1) then + +! set soil-dependent params (STATSGO is the only option for UFS, 07/13/2023) + maxsmc= (/0.339, 0.421, 0.434, 0.476, 0.484,& + & 0.439, 0.404, 0.464, 0.465, 0.406, 0.468, 0.468, & + & 0.439, 1.000, 0.200, 0.421, 0.468, 0.200, & + & 0.339, 0.339, 0.000, 0.000, 0.000, 0.000, & + & 0.000, 0.000, 0.000, 0.000, 0.000, 0.000/) + bb= (/2.79, 4.26, 4.74, 5.33, 3.86, 5.25,& + & 6.77, 8.72, 8.17, 10.73, 10.39, 11.55, & + & 5.25, 0.0, 2.79, 4.26, 11.55, 2.79, & + & 2.79, 0.00, 0.00, 0.00, 0.00, 0.00, & + & 0.00, 0.00, 0.00, 0.00, 0.00, 0.00/) + satpsi= (/0.069, 0.036, 0.141, 0.759, 0.955, & + & 0.355, 0.135, 0.617, 0.263, 0.098, 0.324, 0.468, & + & 0.355, 0.00, 0.069, 0.036, 0.468, 0.069, & + & 0.069, 0.00, 0.00, 0.00, 0.00, 0.00, & + & 0.00, 0.00, 0.00, 0.00, 0.00, 0.00/) + + else + print*, 'For Noah-MP, set_soilveg is not supported for soil type ', isot + iret = -1 + return + + endif + + iret = 0 +end subroutine set_soilveg_noahmp end module set_soilveg_snippet_mod diff --git a/tests/global_cycle/CMakeLists.txt b/tests/global_cycle/CMakeLists.txt index 2427551f2..7937d229c 100644 --- a/tests/global_cycle/CMakeLists.txt +++ b/tests/global_cycle/CMakeLists.txt @@ -3,6 +3,19 @@ # # George Gayno, Lin Gan, Ed Hartnett, Larissa Reames +set(CYCLE_URL "https://ftp.emc.ncep.noaa.gov/static_files/public/UFS/ufs_utils/unit_tests/global_cycle") + +set(FILE1 "soil_sfcincr_jedi.001") +set(FILE2 "soil_sfcincr_jedi.002") +set(FILE3 "soil_sfcincr_jedi.003") +set(FILE4 "soil_sfcincr_jedi.004") +set(FILE5 "soil_sfcincr_jedi.005") +set(FILE6 "soil_sfcincr_jedi.006") + +foreach(THE_FILE IN LISTS FILE1 FILE2 FILE3 FILE4 FILE5 FILE6) + PULL_DATA(${CYCLE_URL} ${THE_FILE}) +endforeach() + # Include cmake to allow parallel I/O tests. include (LibMPI) @@ -14,12 +27,14 @@ endif() include_directories(${PROJECT_SOURCE_DIR}) -execute_process( COMMAND ${CMAKE_COMMAND} -E copy - ${CMAKE_CURRENT_SOURCE_DIR}/LSanSuppress.supp ${CMAKE_CURRENT_BINARY_DIR}/LSanSuppress.supp) - add_executable(ftst_land_increments ftst_land_increments.F90) target_link_libraries(ftst_land_increments global_cycle_lib) +add_executable(ftst_read_increments ftst_read_increments.F90) +target_link_libraries(ftst_read_increments global_cycle_lib) add_mpi_test(global_cycle-ftst_land_increments EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/ftst_land_increments NUMPROCS 1 TIMEOUT 60) +add_mpi_test(global_cycle-ftst_read_increments + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/ftst_read_increments + NUMPROCS 6 TIMEOUT 60) diff --git a/tests/global_cycle/ftst_land_increments.F90 b/tests/global_cycle/ftst_land_increments.F90 index 8519e178b..e8fd59e01 100644 --- a/tests/global_cycle/ftst_land_increments.F90 +++ b/tests/global_cycle/ftst_land_increments.F90 @@ -3,6 +3,10 @@ program ftst_land_increments ! Test "apply_land_da_adjustments" using sample points. ! ! author: George Gayno (george.gayno@noaa.gov) +! author: Yuan Xue: add a total of four testing points to test out +! the newly added frozen soil ice fraction calculations +! under different scenarios after ingesting +! soil temp increments (08/2023) use mpi use land_increments @@ -10,7 +14,7 @@ program ftst_land_increments implicit none integer :: my_rank, ierr, lsm, isot, ivegsrc - integer :: lensfc, lsoil, l + integer :: lensfc, lsoil, l, lsoil_incr real, parameter :: EPSILON=0.001 @@ -31,8 +35,9 @@ program ftst_land_increments isot = 1 ! STATSGO soil type. ivegsrc = 1 ! IGBP vegetation type. lsoil = 4 ! Number of soil layers. + lsoil_incr = 3 ! Number of soil layers (from top) to apply increments to. - lensfc= 3 ! Number of test points. + lensfc= 4 ! Number of test points. allocate(zsoil(lsoil)) allocate(rsoiltype(lensfc)) ! Soil type. @@ -53,7 +58,8 @@ program ftst_land_increments ! Point 1 is above freezing before the adjustment ! and above freezing after the adjustment. Therefore, -! the increments to STC and SLC will be retained. +! the increments to STC will be retained. +! temp: unfrozen ==> unfrozen rsoiltype(1) = 5. mask(1) = 1 @@ -61,53 +67,71 @@ program ftst_land_increments smc_anl(1,:) = .25 slc_anl(1,:) = .25 - stc_anl(1,1:3) = 281.0 ! DA only updates 3 layers + stc_anl(1,1:lsoil_incr) = 281.0 ! Point 2 is below freezing before the adjustment ! and above freezing after the adjustment. Therefore, -! the increment to STC will be removed, and SMC / SLC -! are unchanged. +! all soil ice will be melted +! temp: frozen ==> unfrozen rsoiltype(2) = 5. mask(2) = 1 - stc_bck(2,:) = 270.0 + stc_bck(2,:) = 271.0 smc_anl(2,:) = .25 slc_anl(2,:) = .20 - stc_anl(2,1:3) = 274.0 + stc_anl(2,1:lsoil_incr) = 275.0 -! Point 3 freezes. Therefore, -! the increment to STC will be removed, and SMC / SLC -! are unchanged. +! Point 3 freezes before and after the adjustment. Therefore, +! SLC will be recomputed, SMC remained unchanged +! temp: frozen ==> frozen rsoiltype(3) = 5. mask(3) = 1 - stc_bck(3,:) = 274.0 + stc_bck(3,:) = 272.0 smc_anl(3,:) = .25 - slc_anl(3,:) = .25 - stc_anl(3,1:3) = 271.0 + slc_anl(3,:) = .20 + stc_anl(3,1:lsoil_incr) = 271.0 - call apply_land_da_adjustments_soil(lsm, isot, ivegsrc,lensfc, & +! Point 4 is above freezing before and below freezing after the adjustment +! Therfore, SLC will be recomputed, SMC remained unchanged +! temp: unfrozen ==> frozen + + rsoiltype(4) = 5. + mask(4) = 1 + stc_bck(4,:) = 280.0 + + smc_anl(4,:) = .25 + slc_anl(4,:) = .25 + stc_anl(4,1:lsoil_incr) = 271.0 + + call apply_land_da_adjustments_soil(lsoil_incr, lsm, isot, ivegsrc,lensfc, & lsoil, rsoiltype, mask, stc_bck, stc_anl, smc_anl, slc_anl, & stc_updated, slc_updated,zsoil) - do l = 1, 3 - if (abs(smc_anl(1,l) - 0.25) > EPSILON) stop 2 - if (abs(slc_anl(1,l) - 0.25) > EPSILON) stop 4 - if (abs(stc_anl(1,l) - 281.) > EPSILON) stop 3 + do l = 1,lsoil_incr + if (abs(smc_anl(1,l) - 0.25) > EPSILON) stop 2 + if (abs(slc_anl(1,l) - 0.25) > EPSILON) stop 3 + if (abs(stc_anl(1,l) - 281.) > EPSILON) stop 4 + enddo + + do l = 1,lsoil_incr + if (abs(smc_anl(2,l) - 0.25) > EPSILON) stop 5 + if (abs(slc_anl(2,l) - 0.25) > EPSILON) stop 6 + if (abs(stc_anl(2,l) - 275.) > EPSILON) stop 7 enddo - do l = 1, 3 - if (abs(smc_anl(2,l) - 0.25) > EPSILON) stop 6 - if (abs(slc_anl(2,l) - 0.20) > EPSILON) stop 8 - if (abs(stc_anl(2,l) - 270.) > EPSILON) stop 5 + do l = 1,lsoil_incr + if (abs(smc_anl(3,l) - 0.25) > EPSILON) stop 8 + if (abs(slc_anl(3,l) - 0.112) > EPSILON) stop 9 + if (abs(stc_anl(3,l) - 271.) > EPSILON) stop 10 enddo - do l = 1, 3 - if (abs(smc_anl(3,l) - 0.25) > EPSILON) stop 10 - if (abs(slc_anl(3,l) - 0.25) > EPSILON) stop 12 - if (abs(stc_anl(3,l) - 274.) > EPSILON) stop 11 + do l = 1,lsoil_incr + if (abs(smc_anl(4,l) - 0.25) > EPSILON) stop 11 + if (abs(slc_anl(4,l) - 0.112) > EPSILON) stop 12 + if (abs(stc_anl(4,l) - 271.) > EPSILON) stop 13 enddo call mpi_finalize(ierr) diff --git a/tests/global_cycle/ftst_read_increments.F90 b/tests/global_cycle/ftst_read_increments.F90 new file mode 100644 index 000000000..151673508 --- /dev/null +++ b/tests/global_cycle/ftst_read_increments.F90 @@ -0,0 +1,168 @@ +! Unit test for global_cycle routine "read_data". +! +! Reads a sample soil increment file from file soil_xainc.$NNN +! NOTE: $NNN corresponds to (mpi rank + 1) +! Total number of processes= 6 (corresponds to 6 tiles) +! Each process is assigned one increment file on one tile to read +! If any portion of the variable does not match expected values, +! the test fails. +! +! Author: Yuan Xue, 07/17/2023 + +module chdir_mod + implicit none + interface + integer function c_chdir(path) bind(C,name="chdir") + use iso_c_binding + character(kind=c_char) :: path(*) + end function + end interface +contains + subroutine chdir(path, err) + use iso_c_binding + character(*) :: path + integer, optional, intent(out) :: err + integer :: loc_err + + loc_err = c_chdir(path//c_null_char) + if (present(err)) err = loc_err + end subroutine +end module chdir_mod + + program read_increments + + use read_write_data, only : read_data + use mpi + use chdir_mod + + implicit none + + integer:: ierr,my_rank + integer:: lsoil, l + integer:: lensfc, ij_input + + integer, parameter:: NUM_VALUES=4 + real, parameter :: EPSILON=0.0001 + + real, allocatable:: SLCINC(:,:),STCINC(:,:) + real:: stc_inc_expected_values_tile1(NUM_VALUES) + real:: stc_inc_expected_values_tile2(NUM_VALUES) + real:: stc_inc_expected_values_tile3(NUM_VALUES) + real:: stc_inc_expected_values_tile4(NUM_VALUES) + real:: stc_inc_expected_values_tile5(NUM_VALUES) + real:: stc_inc_expected_values_tile6(NUM_VALUES) + real:: slc_inc_expected_values_tile1(NUM_VALUES) + real:: slc_inc_expected_values_tile2(NUM_VALUES) + real:: slc_inc_expected_values_tile3(NUM_VALUES) + real:: slc_inc_expected_values_tile4(NUM_VALUES) + real:: slc_inc_expected_values_tile5(NUM_VALUES) + real:: slc_inc_expected_values_tile6(NUM_VALUES) + + !expected values were extracted from MATLAB, which directly reads in xainc file + !each tile is examined separately here + data stc_inc_expected_values_tile1 / 3.1428, 2.9983, 2.9786, 2.9634 / + data stc_inc_expected_values_tile2 / 2.9330, 2.9121, 2.9103, 2.9069 / + data stc_inc_expected_values_tile3 / 2.7236, 2.7308, 2.7315, 2.7295 / + data stc_inc_expected_values_tile4 / 3.0229, 3.0229, 3.0229, 3.0229 / + data stc_inc_expected_values_tile5 / 2.8595, 2.8825, 2.8878, 2.8948 / + data stc_inc_expected_values_tile6 / 2.7238, 2.7238, 2.7238, 2.7238 / + data slc_inc_expected_values_tile1 / 0.0007, 0.0018, 0.0018, 0.0018 / + data slc_inc_expected_values_tile2 / 0.0034, 0.0031, 0.0029, 0.0029 / + data slc_inc_expected_values_tile3 / 0.0003, 0.0005, 0.0011, 0.0008 / + data slc_inc_expected_values_tile4 / 0.01, 0.01, 0.01, 0.01 / + data slc_inc_expected_values_tile5 / 0.0019, 0.0019, 0.0020, 0.0024 / + data slc_inc_expected_values_tile6 / 0.01, 0.01, 0.01, 0.01 / + + call mpi_init(ierr) + call MPI_Comm_rank(MPI_COMM_WORLD, my_rank, ierr) + + lsoil=4 + lensfc=36864 + ij_input = 28541 + + allocate(SLCINC(lensfc,lsoil)) + allocate(STCINC(lensfc,lsoil)) + + if (my_rank .eq. 0) print*,"Starting test of global_cycle routine read_data." + if (my_rank .eq. 0) print*,"Call routine read_data" + + call chdir("./data") + call rename("soil_sfcincr_jedi.001", "soil_xainc.001") + call rename("soil_sfcincr_jedi.002", "soil_xainc.002") + call rename("soil_sfcincr_jedi.003", "soil_xainc.003") + call rename("soil_sfcincr_jedi.004", "soil_xainc.004") + call rename("soil_sfcincr_jedi.005", "soil_xainc.005") + call rename("soil_sfcincr_jedi.006", "soil_xainc.006") + + call read_data(lsoil,lensfc,.false.,.false.,.true.,.true.,STCINC=STCINC,SLCINC=SLCINC) + + if (my_rank .eq. 0) then + do l = 1,4 + if (abs(STCINC(ij_input,l) - stc_inc_expected_values_tile1(l))& + > EPSILON) stop 20 + if (abs(SLCINC(ij_input,l) - slc_inc_expected_values_tile1(l))& + > EPSILON) stop 40 + enddo + print*, "tile#", my_rank+1, "reads OK" + endif + + if (my_rank .eq. 1) then + do l = 1,4 + if (abs(STCINC(ij_input,l) - stc_inc_expected_values_tile2(l))& + > EPSILON) stop 21 + if (abs(SLCINC(ij_input,l) - slc_inc_expected_values_tile2(l))& + > EPSILON) stop 41 + enddo + print*, "tile#", my_rank+1, "reads OK" + endif + + if (my_rank .eq. 2) then + do l = 1,4 + if (abs(STCINC(ij_input,l) - stc_inc_expected_values_tile3(l))& + > EPSILON) stop 22 + if (abs(SLCINC(ij_input,l) - slc_inc_expected_values_tile3(l))& + > EPSILON) stop 42 + enddo + print*, "tile#", my_rank+1, "reads OK" + endif + + if (my_rank .eq. 3) then + do l = 1,4 + if (abs(STCINC(ij_input,l) - stc_inc_expected_values_tile4(l))& + > EPSILON) stop 23 + if (abs(SLCINC(ij_input,l) - slc_inc_expected_values_tile4(l))& + > EPSILON) stop 43 + enddo + print*, "tile#", my_rank+1, "reads OK" + endif + + if (my_rank .eq. 4) then + do l = 1,4 + if (abs(STCINC(ij_input,l) - stc_inc_expected_values_tile5(l))& + > EPSILON) stop 24 + if (abs(SLCINC(ij_input,l) - slc_inc_expected_values_tile5(l))& + > EPSILON) stop 44 + enddo + print*, "tile#", my_rank+1, "reads OK" + endif + + if (my_rank .eq. 5) then + do l = 1,4 + if (abs(STCINC(ij_input,l) - stc_inc_expected_values_tile6(l))& + > EPSILON) stop 25 + if (abs(SLCINC(ij_input,l) - slc_inc_expected_values_tile6(l))& + > EPSILON) stop 45 + enddo + print*, "tile#", my_rank+1, "reads OK" + endif + + call MPI_Barrier(MPI_COMM_WORLD, ierr) + + if (my_rank .eq. 0) print*, "ALL is OK" + if (my_rank .eq. 0) print*, "SUCCESS!" + + deallocate(SLCINC,STCINC) + call mpi_finalize(ierr) + + end program read_increments + diff --git a/ush/global_cycle.sh b/ush/global_cycle.sh index 5c69fd0df..4ce053751 100755 --- a/ush/global_cycle.sh +++ b/ush/global_cycle.sh @@ -139,8 +139,10 @@ # between the filtered and unfiltered terrain. Default is true. # DONST Process NST records when using NST model. Default is 'no'. # DO_SFCCYCLE Call sfcsub routine -# DO_LNDINC Call routine to update soil states with increment files -# DO_SNO_INC Call routine to update snow states with increment files +# DO_LNDINC Call routine to update snow/soil states with increment files +# DO_SOI_INC_GSI Call routine to update soil states with gsi(gaussian) increment files +# DO_SNO_INC_JEDI Call routine to update snow states with jedi increment files +# DO_SOI_INC_JEDI Call routine to update soil states with jedi increment files # zsea1/zsea2 When running with NST model, this is the lower/upper bound # of depth of sea temperature. In whole mm. # MAX_TASKS_CY Normally, program should be run with a number of mpi tasks @@ -263,7 +265,9 @@ use_ufo=${use_ufo:-.true.} DONST=${DONST:-"NO"} DO_SFCCYCLE=${DO_SFCCYCLE:-.true.} DO_LNDINC=${DO_LNDINC:-.false.} -DO_SNO_INC=${DO_SNO_INC:-.false.} +DO_SOI_INC_GSI=${DO_SOI_INC_GSI:-.false.} +DO_SNO_INC_JEDI=${DO_SNO_INC_JEDI:-.false.} +DO_SOI_INC_JEDI=${DO_SOI_INC_JEDI:-.false.} zsea1=${zsea1:-0} zsea2=${zsea2:-0} MAX_TASKS_CY=${MAX_TASKS_CY:-99999} @@ -289,7 +293,6 @@ FNVMXC=${FNVMXC:-${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_green FNSLPC=${FNSLPC:-${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.slope_type.tileX.nc} FNMSKH=${FNMSKH:-${FIXgfs}/am/global_slmask.t1534.3072.1536.grb} NST_FILE=${NST_FILE:-"NULL"} -LND_SOI_FILE=${LND_SOI_FILE:-"NULL"} FNTSFA=${FNTSFA:-${COMIN}/${PREINP}sstgrb${SUFINP}} FNACNA=${FNACNA:-${COMIN}/${PREINP}engicegrb${SUFINP}} FNSNOA=${FNSNOA:-${COMIN}/${PREINP}snogrb${SUFINP}} @@ -384,8 +387,10 @@ EOF cat << EOF > fort.37 &NAMSFCD NST_FILE="$NST_FILE", - LND_SOI_FILE="$LND_SOI_FILE", - DO_SNO_INC=$DO_SNO_INC + DO_SOI_INC_GSI=$DO_SOI_INC_GSI, + DO_SNO_INC_JEDI=$DO_SNO_INC_JEDI, + DO_SOI_INC_JEDI=$DO_SOI_INC_JEDI, + lsoil_incr=3, / EOF diff --git a/ush/global_cycle_driver.sh b/ush/global_cycle_driver.sh index 745f8caf9..3f8b4d2e6 100755 --- a/ush/global_cycle_driver.sh +++ b/ush/global_cycle_driver.sh @@ -52,8 +52,9 @@ fi export DO_SFCCYLE=${DO_SFCCYCLE:-".true."} export DO_LNDINC=${DO_LNDINC:-".false."} -export LND_SOI_FILE=${LND_SOI_FILE:-"NULL"} -export DO_SNO_INC=${DO_SNO_INC:-".false."} +export DO_SOI_INC_GSI=${DO_SOI_INC_GSI:-".false."} +export DO_SNO_INC_JEDI=${DO_SNO_INC_JEDI:-".false."} +export DO_SOI_INC_JEDI=${DO_SOI_INC_JEDI:-".false."} export FRAC_GRID=${FRAC_GRID:-".false."} CRES=$(echo $CASE | cut -c 2-) @@ -93,8 +94,16 @@ for n in $(seq 1 $ntiles); do ln -fs $FIXgfs/orog/${CASE}/C${CRES}.mx${OCNRES}_oro_data.tile${n}.nc $DATA/fnorog.00$n fi - if [[ "$DO_SNO_INC" == ".true." ]] ; then - ln -fs $COMIN/$PDY.${cyc}0000.xainc.tile${n}.nc $DATA/xainc.00$n + if [[ "$DO_SNO_INC_JEDI" == ".true." ]] ; then + ln -fs $COMIN/$PDY.${cyc}0000.xainc.tile${n}.nc $DATA/snow_xainc.00$n + fi + + if [[ "$DO_SOI_INC_JEDI" == ".true." ]] ; then + ln -fs $COMIN/soil_sfcincr_jedi.00$n $DATA/soil_xainc.00$n + fi + + if [[ "$DO_SOI_INC_GSI" == ".true." ]] ; then + ln -fs $COMIN/sfcincr_gsi.00$n $DATA/sfcincr_gsi.00$n fi done From a915a2305e55d4a26588d736a7457bd6c9833605 Mon Sep 17 00:00:00 2001 From: Cory Martin Date: Thu, 9 May 2024 18:02:56 +0000 Subject: [PATCH 20/25] Include fre-nctools.fd in GFS builds (#941) Build the fregrid executable when CMake option -DGFS is set to ON. --- CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 23470e23b..82500be16 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -41,7 +41,6 @@ option(GFS "Enable building GFS-only utilities" OFF) # When building the GFS, the following need not be built if(GFS) message(STATUS "Building utilities specific to the GFS") - set(FRENCTOOLS OFF CACHE BOOL "Disable building fre-nctools.fd" FORCE) set(GRIDTOOLS OFF CACHE BOOL "Disable building grid_tools.fd" FORCE) set(OROG_MASK_TOOLS OFF CACHE BOOL "Disable building orog_mask_tools.fd" FORCE) set(SFC_CLIMO_GEN OFF CACHE BOOL "Disable building sfc_climo_gen.fd" FORCE) From 7034628893ab392105df8e81c451eced960d8959 Mon Sep 17 00:00:00 2001 From: GeorgeGayno-NOAA <52789452+GeorgeGayno-NOAA@users.noreply.github.com> Date: Mon, 13 May 2024 16:19:22 -0400 Subject: [PATCH 21/25] Minor cleanup of 'orog' program and its run script (#938) Remove unused namelist variables and functions from orog.fd. Adjust all scripts that call orog.fd accordingly. Add prolog, improve error handling and remove old options from ./ush/fv3gfs_make_orog.sh. Fixes #932. --- driver_scripts/driver_grid.hercules.sh | 4 +- .../orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F | 200 +++++------------- ush/fv3gfs_driver_grid.sh | 4 +- ush/fv3gfs_make_orog.sh | 170 ++++++++------- ush/fv3gfs_ocean_merge.sh | 13 +- 5 files changed, 157 insertions(+), 234 deletions(-) diff --git a/driver_scripts/driver_grid.hercules.sh b/driver_scripts/driver_grid.hercules.sh index cd4afee51..c76366005 100644 --- a/driver_scripts/driver_grid.hercules.sh +++ b/driver_scripts/driver_grid.hercules.sh @@ -154,8 +154,8 @@ fi #----------------------------------------------------------------------- export home_dir=$SLURM_SUBMIT_DIR/.. -export TEMP_DIR=/work/noaa/stmp/$LOGNAME/fv3_grid.$gtype -export out_dir=/work/noaa/stmp/$LOGNAME/my_grids +export TEMP_DIR=/work2/noaa/stmp/$LOGNAME/fv3_grid.$gtype +export out_dir=/work2/noaa/stmp/$LOGNAME/my_grids #----------------------------------------------------------------------- # Should not need to change anything below here. diff --git a/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F b/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F index 9e65956d5..f05c39aa5 100644 --- a/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F +++ b/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F @@ -77,28 +77,19 @@ character(len=256) :: INPUTOROG = "none" character(len=256) :: merge_file = "none" logical :: mask_only = .false. - integer :: MTNRES,IM,JM,NM,NR,NF0,NF1,EFAC,BLAT,NW + integer :: MTNRES,IM,JM,NM,NR,NF0,NF1,EFAC,NW fsize=65536 - READ(5,*) MTNRES,IM,JM,NM,NR,NF0,NF1,EFAC,BLAT READ(5,*) OUTGRID - READ(5,*) INPUTOROG READ(5,*) mask_only READ(5,*) merge_file -! MTNRES=1 -! IM=48 -! JM=48 -! NM=46 -! NF0=0 -! NF1=0 -! efac=0 -! blat=0 -! NR=0 -! OUTGRID = "C48_grid.tile1.nc" -! INPUTOROG = "oro.288x144.nc" - print*, "INPUTOROG=", trim(INPUTOROG) - print*, "IM,JM=", IM, JM + NM=0 + NF0=0 + NF1=0 + EFAC=0 + NR=0 + print*, "INPUTOROG= ", trim(INPUTOROG) print*, "MASK_ONLY", mask_only - print*, "MERGE_FILE", trim(merge_file) + print*, "MERGE_FILE ", trim(merge_file) ! --- MTNRES defines the input (highest) elev resolution ! --- =1 is topo30 30" in units of 1/2 minute. ! so MTNRES for old values must be *2. @@ -106,58 +97,47 @@ ! --- other possibilities are =8 for 4' and =4 for 2' see ! HJ for T1000 test. Must set to 1 for now. MTNRES=1 - print*, MTNRES,IM,JM,NM,NR,NF0,NF1,EFAC,BLAT + print*, MTNRES,NM,NR,NF0,NF1,EFAC NW=(NM+1)*((NR+1)*NM+2) IMN = 360*120/MTNRES JMN = 180*120/MTNRES print *, ' Starting terr12 mtnlm7_slm30.f IMN,JMN:',IMN,JMN -! --- read the grid resolution if the OUTGRID exists. - if( trim(OUTGRID) .NE. "none" ) then - inquire(file=trim(OUTGRID), exist=fexist) - if(.not. fexist) then - print*, "FATAL ERROR: file "//trim(OUTGRID) - print*, " does not exist." - CALL ERREXIT(4) - endif - do ncid = 103, 512 - inquire( ncid,OPENED=opened ) - if( .NOT.opened )exit - end do +! --- read the grid resolution from OUTGRID. + inquire(file=trim(OUTGRID), exist=fexist) + if(.not. fexist) then + print*, "FATAL ERROR: file "//trim(OUTGRID) + print*, " does not exist." + CALL ERREXIT(4) + endif + do ncid = 103, 512 + inquire( ncid,OPENED=opened ) + if( .NOT.opened )exit + end do - print*, "outgrid=", trim(outgrid) - error=NF__OPEN(trim(OUTGRID),NF_NOWRITE,fsize,ncid) - call netcdf_err(error, 'Open file '//trim(OUTGRID) ) - error=nf_inq_dimid(ncid, 'nx', id_dim) - call netcdf_err(error, 'inquire dimension nx from file '// + print*, "READ outgrid=", trim(outgrid) + error=NF__OPEN(trim(OUTGRID),NF_NOWRITE,fsize,ncid) + call netcdf_err(error, 'Open file '//trim(OUTGRID) ) + error=nf_inq_dimid(ncid, 'nx', id_dim) + call netcdf_err(error, 'inquire dimension nx from file '// & trim(OUTGRID) ) - error=nf_inq_dimlen(ncid,id_dim,nx) - call netcdf_err(error, 'inquire dimension nx length '// + error=nf_inq_dimlen(ncid,id_dim,nx) + call netcdf_err(error, 'inquire dimension nx length '// & 'from file '//trim(OUTGRID) ) - error=nf_inq_dimid(ncid, 'ny', id_dim) - call netcdf_err(error, 'inquire dimension ny from file '// + error=nf_inq_dimid(ncid, 'ny', id_dim) + call netcdf_err(error, 'inquire dimension ny from file '// & trim(OUTGRID) ) - error=nf_inq_dimlen(ncid,id_dim,ny) - call netcdf_err(error, 'inquire dimension ny length '// + error=nf_inq_dimlen(ncid,id_dim,ny) + call netcdf_err(error, 'inquire dimension ny length '// & 'from file '//trim(OUTGRID) ) - print*, "nx = ", nx - if(IM .ne. nx/2) then - print*, "IM=",IM, " /= grid file nx/2=",nx/2 - print*, "Set IM = ", nx/2 - IM = nx/2 - endif - if(JM .ne. ny/2) then - print*, "JM=",JM, " /= grid file ny/2=",ny/2 - print*, "Set JM = ", ny/2 - JM = ny/2 - endif - error=nf_close(ncid) - call netcdf_err(error, 'close file '//trim(OUTGRID) ) - - endif + IM = nx/2 + JM = ny/2 + print*, "nx, ny, im, jm = ", nx, ny, im, jm + error=nf_close(ncid) + call netcdf_err(error, 'close file '//trim(OUTGRID) ) - CALL TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, + CALL TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, & OUTGRID,INPUTOROG,MASK_ONLY,MERGE_FILE) STOP END @@ -174,8 +154,6 @@ !! @param[in] NF1 Second order spectral filter parameters. !! @param[in] NW Number of waves. !! @param[in] EFAC Factor to adjust orography by its variance. -!! @param[in] BLAT When less than zero, reverse latitude/ -!! longitude for output. !! @param[in] OUTGRID The 'grid' file for the model tile. !! @param[in] INPUTOROG Input orography/GWD file on gaussian !! grid. When specified, will be interpolated to model tile. @@ -184,7 +162,7 @@ !! @param[in] MASK_ONLY Flag to generate the Land Mask only !! @param[in] MERGE_FILE Ocean merge file !! @author Jordan Alpert NOAA/EMC - SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, + SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, & OUTGRID,INPUTOROG,MASK_ONLY,MERGE_FILE) implicit none include 'netcdf.inc' @@ -200,7 +178,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, real, PARAMETER :: PI=3.1415926535897931 integer, PARAMETER :: NMT=14 - integer :: efac,blat,zsave1,zsave2 + integer :: efac,zsave1,zsave2 integer :: mskocn,notocn integer :: i,j,nx,ny,ncid,js,jn,iw,ie,k,it,jt,error,id_dim integer :: id_var,nx_in,ny_in,fsize,wgta,IN,INW,INE,IS,ISW,ISE @@ -246,7 +224,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, complex :: ffj(im/2+1) logical :: grid_from_file,output_binary,fexist,opened - logical :: SPECTR, REVLAT, FILTER + logical :: SPECTR, FILTER logical :: is_south_pole(IM,JM), is_north_pole(IM,JM) logical :: LB(IM*JM) @@ -275,7 +253,6 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, DEGRAD = 180./PI SPECTR = NM .GT. 0 ! if NM <=0 grid is assumed lat/lon FILTER = .TRUE. ! Spectr Filter defaults true and set by NF1 & NF0 - REVLAT = BLAT .LT. 0 ! Reverse latitude/longitude for output MSKOCN = 1 ! Ocean land sea mask =1, =0 if not present NOTOCN = 1 ! =1 Ocean lsm input reverse: Ocean=1, land=0 ! --- The LSM Gaussian file from the ocean model sometimes arrives with @@ -329,8 +306,8 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, ! ! --- IMN,JMN - print*, ' IM, JM, NM, NR, NF0, NF1, EFAC, BLAT' - print*, IM,JM,NM,NR,NF0,NF1,EFAC,BLAT + print*, ' IM, JM, NM, NR, NF0, NF1, EFAC' + print*, IM,JM,NM,NR,NF0,NF1,EFAC print *,' imn,jmn,glob(imn,jmn)=',imn,jmn,glob(imn,jmn) print *,' UBOUND ZAVG=',UBOUND(ZAVG) print *,' UBOUND glob=',UBOUND(glob) @@ -409,7 +386,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, ! This code assumes that lat runs from north to south for gg! ! - print *,' SPECTR=',SPECTR,' REVLAT=',REVLAT,' ** with GICE-07 **' + print *,' SPECTR=',SPECTR,' ** with GICE-07 **' IF (SPECTR) THEN CALL SPLAT(4,JM,COSCLT,WGTCLT) DO J=1,JM/2 @@ -936,10 +913,10 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, & //trim(INPUTOROG) ) print*, "calling MAKEOA3 to compute OA, OL" - CALL MAKEOA3(ZAVG,zslm,VAR,GLAT,OA,OL,IWORK,ELVMAX,ORO,SLM, + CALL MAKEOA3(ZAVG,VAR,GLAT,OA,OL,IWORK,ELVMAX,ORO,SLM, 1 WORK1,WORK2,WORK3,WORK4,WORK5,WORK6, 2 IM,JM,IMN,JMN,geolon_c,geolat_c, - 3 geolon,geolat,is_south_pole,is_north_pole,nx_in,ny_in, + 3 geolon,geolat,nx_in,ny_in, 4 oa_in,ol_in,slm_in,lon_in,lat_in) deallocate(oa_in,ol_in,slm_in,lon_in,lat_in) @@ -1290,13 +1267,6 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, enddo ELSE - IF (REVLAT) THEN - CALL REVERS(IM, JM, numi, SLM, WORK1) - CALL REVERS(IM, JM, numi, ORO, WORK1) - DO IMT=1,NMT - CALL REVERS(IM, JM, numi, HPRIME(1,1,IMT), WORK1) - ENDDO - ENDIF ORS=0. ORF=ORO ENDIF @@ -1507,7 +1477,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC,BLAT, tend=timef() write(6,*)' Total runtime time= ',tend-tbeg1 RETURN - END + END SUBROUTINE TERSUB !> Create the orography, land-mask, standard deviation of !! orography and the convexity on a model gaussian grid. @@ -1545,9 +1515,7 @@ SUBROUTINE MAKEMT(ZAVG,ZSLM,ORO,SLM,VAR,VAR4, INTEGER ZAVG(IMN,JMN),ZSLM(IMN,JMN) DIMENSION ORO(IM,JM),SLM(IM,JM),VAR(IM,JM),VAR4(IM,JM) DIMENSION IST(IM,jm),IEN(IM,jm),JST(JM),JEN(JM),numi(jm) - LOGICAL FLAG, DEBUG -C==== DATA DEBUG/.TRUE./ - DATA DEBUG/.FALSE./ + LOGICAL FLAG C ! ---- OCLSM holds the ocean (im,jm) grid print *,' _____ SUBROUTINE MAKEMT ' @@ -3627,7 +3595,6 @@ end subroutine interpolate_mismatch !! is computed from the high-resolution orography data. !! !! @param[in] zavg High-resolution orography data. -!! @param[in] zslm High-resolution land-mask data. Not used. !! @param[in] var Standard deviation of orography on the model grid. !! @param[out] glat Latitude of each row of input terrain dataset. !! @param[out] oa4 Orographic asymmetry on the model grid. Four @@ -3654,8 +3621,6 @@ end subroutine interpolate_mismatch !! @param[in] lat_c Corner point latitudes of the model grid points. !! @param[in] lon_t Center point longitudes of the model grid points. !! @param[in] lat_t Center point latitudes of the model grid points. -!! @param[in] is_south_pole Not used. -!! @param[in] is_north_pole Not used. !! @param[in] imi 'i' dimension of input gfs orography data. !! @param[in] jmi 'j' dimension of input gfs orography data. !! @param[in] oa_in Asymmetry on the input gfs orography data. @@ -3664,10 +3629,10 @@ end subroutine interpolate_mismatch !! @param[in] lon_in Longitude on the input gfs orography data. !! @param[in] lat_in Latitude on the input gfs orography data. !! @author Jordan Alpert NOAA/EMC - SUBROUTINE MAKEOA3(ZAVG,zslm,VAR,GLAT,OA4,OL,IOA4,ELVMAX, + SUBROUTINE MAKEOA3(ZAVG,VAR,GLAT,OA4,OL,IOA4,ELVMAX, 1 ORO,SLM,oro1,XNSUM,XNSUM1,XNSUM2,XNSUM3,XNSUM4, 2 IM,JM,IMN,JMN,lon_c,lat_c,lon_t,lat_t, - 3 is_south_pole,is_north_pole,IMI,JMI,OA_IN,OL_IN, + 3 IMI,JMI,OA_IN,OL_IN, 4 slm_in,lon_in,lat_in) ! Required when using iplib v4.0 or higher. @@ -3681,7 +3646,7 @@ SUBROUTINE MAKEOA3(ZAVG,zslm,VAR,GLAT,OA4,OL,IOA4,ELVMAX, real, PARAMETER :: R2D=180./3.14159265358979 integer IM,JM,IMN,JMN,IMI,JMI real GLAT(JMN) - INTEGER ZAVG(IMN,JMN),ZSLM(IMN,JMN) + INTEGER ZAVG(IMN,JMN) real SLM(IM,JM) real ORO(IM,JM),ORO1(IM,JM),ELVMAX(IM,JM),ZMAX(IM,JM) real OA4(IM,JM,4) @@ -3691,26 +3656,16 @@ SUBROUTINE MAKEOA3(ZAVG,zslm,VAR,GLAT,OA4,OL,IOA4,ELVMAX, real lon_in(IMI,JMI), lat_in(IMI,JMI) real lon_c(IM+1,JM+1), lat_c(IM+1,JM+1) real lon_t(IM,JM), lat_t(IM,JM) - logical is_south_pole(IM,JM), is_north_pole(IM,JM) real XNSUM(IM,JM),XNSUM1(IM,JM),XNSUM2(IM,JM) real XNSUM3(IM,JM),XNSUM4(IM,JM) real VAR(IM,JM),OL(IM,JM,4) - LOGICAL FLAG integer i,j,ilist(IMN),numx,i1,j1,ii1 - integer KWD,II,npts + integer KWD real LONO(4),LATO(4),LONI,LATI - real DELXN,HC,HEIGHT,XNPU,XNPD,T + real DELXN,HC,HEIGHT,T integer NS0,NS1,NS2,NS3,NS4,NS5,NS6 logical inside_a_polygon - real lon,lat,dlon,dlat,dlat_old - real lon1,lat1,lon2,lat2 - real xnsum11,xnsum12,xnsum21,xnsum22,xnsumx - real HC_11, HC_12, HC_21, HC_22 - real xnsum1_11,xnsum1_12,xnsum1_21,xnsum1_22 - real xnsum2_11,xnsum2_12,xnsum2_21,xnsum2_22 - real get_lon_angle, get_lat_angle, get_xnsum - integer ist, ien, jst, jen - real xland,xwatr,xl1,xs1,oroavg + integer jst, jen integer int_opt, ipopt(20), kgds_input(200), kgds_output(200) integer count_land_output integer ij, ijmdl_output, iret, num_mismatch_land, num @@ -4055,45 +4010,6 @@ SUBROUTINE MAKEOA3(ZAVG,zslm,VAR,GLAT,OA4,OL,IOA4,ELVMAX, C RETURN END SUBROUTINE MAKEOA3 - -!> Reverse the east-west and north-south axes -!! in a two-dimensional array. -!! -!! @param [in] im 'i' dimension of the 2-d array. -!! @param [in] jm 'j' dimension of the 2-d array. -!! @param [in] numi Not used. -!! @param [inout] f The two-dimensional array to -!! be processed. -!! @param [out] wrk Two-dimensional work array. -!! @author Jordan Alpert NOAA/EMC - SUBROUTINE REVERS(IM, JM, numi, F, WRK) -! - REAL F(IM,JM), WRK(IM,JM) - integer numi(jm) - imb2 = im / 2 - do i=1,im*jm - WRK(i,1) = F(i,1) - enddo - do j=1,jm - jr = jm - j + 1 - do i=1,im - ir = i + imb2 - if (ir .gt. im) ir = ir - im - f(ir,jr) = WRK(i,j) - enddo - enddo -! - tem = 0.0 - do i=1,im - tem= tem + F(I,1) - enddo - tem = tem / im - do i=1,im - F(I,1) = tem - enddo -! - RETURN - END !> Convert from a reduced grid to a full grid. !! @@ -4340,7 +4256,7 @@ subroutine maxmin(ia,len,tile) ccmr integer*2 ia(len) character*7 tile - integer iaamax, iaamin, len, j, m, ja, kount + integer iaamax, iaamin, len, m, ja, kount integer(8) sum2,std,mean,isum integer i_count_notset,kount_9 ! --- missing is -9999 @@ -4723,13 +4639,13 @@ subroutine get_xnsum2(lon1,lat1,lon2,lat2,IMN,JMN, real, intent(out) :: xnsum1,xnsum2,HC logical verbose - real lon1,lat1,lon2,lat2,oro,delxn + real lon1,lat1,lon2,lat2,delxn integer IMN,JMN real glat(JMN) integer zavg(IMN,JMN) integer i, j, ist, ien, jst, jen, i1 real HEIGHT, var - real XW1,XW2,slm,xnsum + real XW1,XW2,xnsum !---figure out ist,ien,jst,jen do j = 1, JMN if( GLAT(J) .GT. lat1 ) then @@ -4817,13 +4733,12 @@ subroutine get_xnsum3(lon1,lat1,lon2,lat2,IMN,JMN, implicit none real, intent(out) :: xnsum1,xnsum2 - real lon1,lat1,lon2,lat2,oro,delxn + real lon1,lat1,lon2,lat2,delxn integer IMN,JMN real glat(JMN) integer zavg(IMN,JMN) integer i, j, ist, ien, jst, jen, i1 real HEIGHT, HC - real XW1,XW2,slm,xnsum !---figure out ist,ien,jst,jen ! if lat1 or lat 2 is 90 degree. set jst = JMN jst = JMN @@ -4895,7 +4810,6 @@ subroutine nanc(a,l,c) data inaq3/x'FFC00000'/ data inaq4/x'FFFFFFFF'/ c - real(kind=8)a(l),rtc,t1,t2 character*(*) c c t1=rtc() cgwv print *, ' nanc call ',c diff --git a/ush/fv3gfs_driver_grid.sh b/ush/fv3gfs_driver_grid.sh index 710e9fbfb..a0b48f420 100755 --- a/ush/fv3gfs_driver_grid.sh +++ b/ush/fv3gfs_driver_grid.sh @@ -197,7 +197,7 @@ if [ $gtype = uniform ] || [ $gtype = stretch ] || [ $gtype = nest ]; then echo "............ Execute fv3gfs_make_orog.sh for tile $tile .................." echo set -x - $script_dir/fv3gfs_make_orog.sh $res $tile $grid_dir $orog_dir $script_dir $topo + $script_dir/fv3gfs_make_orog.sh $res $tile $grid_dir $orog_dir $topo err=$? if [ $err != 0 ]; then exit $err @@ -399,7 +399,7 @@ elif [ $gtype = regional_gfdl ] || [ $gtype = regional_esg ]; then echo "............ Execute fv3gfs_make_orog.sh for tile $tile .................." echo set -x - $script_dir/fv3gfs_make_orog.sh $res $tile $grid_dir $orog_dir $script_dir $topo + $script_dir/fv3gfs_make_orog.sh $res $tile $grid_dir $orog_dir $topo err=$? if [ $err != 0 ]; then exit $err diff --git a/ush/fv3gfs_make_orog.sh b/ush/fv3gfs_make_orog.sh index 6fcff6673..56f824041 100755 --- a/ush/fv3gfs_make_orog.sh +++ b/ush/fv3gfs_make_orog.sh @@ -1,101 +1,109 @@ #!/bin/bash +#------------------------------------------------------------------- +# Program Name: fv3gfs_make_orog +# +# Run the orography ('orog') program to create mask, terrain and +# GWD fields on the model tile. +# +# Author: GFDL Programmer +# +# History Log: +# 01/2018: Initial version. +# 04/2024: Some clean up. +# +# Usage: +# Arguments: +# res - "C" Resolution of model grid - 48, 96, 768, etc. +# tile - Tile number. +# griddir - Location of model 'grid' file. +# outdir - Location of the model orography file output by +# the 'orog' program. +# indir - Location of input land mask and terrain data. +# +# Input Files: +# $GRIDFILE - The model 'grid' file +# containing georeference info. +# topography.antarctica.ramp.30s.nc - RAMP terrain data. +# landcover.umd.30s.nc - Global land mask data. +# topography.gmted2010.30s.nc - Global USGS GMTED 2010 +# terrain data. +# +# Output Files: +# out.oro.nc - The model orography file (single tile). +# +# Condition codes: +# 0 - Normal termination. +# 1 - Incorrect number of script arguments. +# 2 - Program executable does not exits. +# 3 - Error running program. +#------------------------------------------------------------------- + set -eux nargv=$# -inorogexist=0 - -if [ $nargv -eq 5 ]; then # lat-lon grid - lonb=$1 - latb=$2 - outdir=$3 - script_dir=$4 - is_latlon=1 - orogfile="none" - hist_dir=$5 - workdir=$TEMP_DIR/latlon/orog/latlon_${lonb}x${latb} -elif [ $nargv -eq 6 ]; then # cubed-sphere grid - res=$1 - lonb=$1 - latb=$1 - tile=$2 - griddir=$3 - outdir=$4 - script_dir=$5 - is_latlon=0 - orogfile="none" - hist_dir=$6 - workdir=$TEMP_DIR/C${res}/orog/tile$tile -elif [ $nargv -eq 8 ]; then # input your own orography files +if [ $nargv -eq 5 ]; then res=$1 - lonb=$1 - latb=$1 tile=$2 griddir=$3 outdir=$4 - is_latlon=0 - inputorog=$5 - script_dir=$6 - orogfile=$inputorog:t - inorogexist=1 - hist_dir=$7 - workdir=$TEMP_DIR/C${res}/orog/tile$tile + indir=$5 else - echo "Number of arguments must be 6 for cubic sphere grid" - echo "Usage for cubic sphere grid: $0 resolution tile griddir outdir script_dir hist_dir" + set +x + echo "FATAL ERROR: Number of arguments must be 5." + echo "Usage: $0 resolution tile griddir outdir indir." + set -x exit 1 fi -indir=$hist_dir executable=$exec_dir/orog if [ ! -s $executable ]; then - echo "executable does not exist" - exit 1 + set +x + echo "FATAL ERROR, ${executable} does not exist." + set -x + exit 2 fi +workdir=$TEMP_DIR/C${res}/orog/tile$tile + if [ ! -s $workdir ]; then mkdir -p $workdir ;fi if [ ! -s $outdir ]; then mkdir -p $outdir ;fi -#jcap is for Gaussian grid -#jcap=`expr $latb - 2 ` -jcap=0 -NF1=0 -NF2=0 -mtnres=1 -efac=0 -blat=0 -NR=0 - -if [ $is_latlon -eq 1 ]; then - OUTGRID="none" -else - OUTGRID="C${res}_grid.tile${tile}.nc" -fi +GRIDFILE="C${res}_grid.tile${tile}.nc" # Make Orograraphy -echo "OUTGRID = $OUTGRID" +set +x +echo "GRIDFILE = $GRIDFILE" echo "workdir = $workdir" echo "outdir = $outdir" echo "indir = $indir" +set -x cd $workdir -cp ${indir}/topography.antarctica.ramp.30s.nc . -cp ${indir}/landcover.umd.30s.nc . -cp ${indir}/topography.gmted2010.30s.nc . -if [ $inorogexist -eq 1 ]; then - cp $inputorog . -fi - -if [ $is_latlon -eq 0 ]; then - cp ${griddir}/$OUTGRID . -fi -cp $executable . - -echo $mtnres $lonb $latb $jcap $NR $NF1 $NF2 $efac $blat > INPS -echo $OUTGRID >> INPS -echo $orogfile >> INPS +ln -fs ${indir}/topography.antarctica.ramp.30s.nc . +ln -fs ${indir}/landcover.umd.30s.nc . +ln -fs ${indir}/topography.gmted2010.30s.nc . +ln -fs ${griddir}/$GRIDFILE . +ln -fs $executable . + +#------------------------------------------------------------------- +# Set up program namelist. The entries are: +# +# 1 - GRIDFILE - model 'grid' file. +# 2 - Logical to output land mask only. When creating a grid +# for the coupled model ("ocn" resolution is specified) +# this is true. The mask is then tweaked during the +# ocean merge step before the 'orog' program is run again +# (in fv3gfs_ocean_merge.sh) to create the full 'orog' +# file. When false, the 'orog' program outputs the +# full orography file. +# 3 - The input file from the ocean merge step. Defaults +# to 'none' for this script. +#------------------------------------------------------------------- + +echo $GRIDFILE > INPS if [ -z ${ocn+x} ]; then echo ".false." >> INPS else @@ -105,21 +113,19 @@ echo "none" >> INPS cat INPS time $executable < INPS +rc=$? -if [ $? -ne 0 ]; then - echo "ERROR in running $executable " - exit 1 +if [ $rc -ne 0 ]; then + set +x + echo "FATAL ERROR running $executable." + set -x + exit 3 else - if [ $is_latlon -eq 1 ]; then - outfile=oro.${lonb}x${latb}.nc - else - outfile=oro.C${res}.tile${tile}.nc - fi - + outfile=oro.C${res}.tile${tile}.nc mv ./out.oro.nc $outdir/$outfile - echo "file $outdir/$outfile is created" - echo "Successfully running $executable " + set +x + echo "Successfully ran ${executable}." + echo "File $outdir/$outfile is created." + set -x exit 0 fi - -exit diff --git a/ush/fv3gfs_ocean_merge.sh b/ush/fv3gfs_ocean_merge.sh index 500ccbd0a..519a2e9da 100755 --- a/ush/fv3gfs_ocean_merge.sh +++ b/ush/fv3gfs_ocean_merge.sh @@ -45,19 +45,22 @@ EOF for tnum in '1' '2' '3' '4' '5' '6' do cd ${TEMP_DIR}/C${res}/orog/tile$tnum - echo $tnum $res $res 0 0 0 0 0 0 > INPS - echo C${res}_grid.tile${tnum}.nc >> INPS - echo none >> INPS + echo C${res}_grid.tile${tnum}.nc > INPS echo ".false." >> INPS echo '"'${TEMP_DIR}/ocean_merged/C${res}.mx${ocn}/C${res}.mx${ocn}.tile${tnum}.nc'"' >> INPS cat INPS time ${exec_dir}/orog < INPS + rc=$? + + if [[ $rc -ne 0 ]] ; then + echo "FATAL ERROR running orog." + exit $rc + fi + ncks -4 -O ${TEMP_DIR}/ocean_merged/C${res}.mx${ocn}/C${res}.mx${ocn}.tile${tnum}.nc ${TEMP_DIR}/ocean_merged/C${res}.mx${ocn}/C${res}.mx${ocn}.tile${tnum}.nc ncks -A -v lake_frac,lake_depth ${TEMP_DIR}/ocean_merged/C${res}.mx${ocn}/C${res}.mx${ocn}.tile${tnum}.nc out.oro.nc - #cp out.oro.nc $out_dir/oro_C${res}.mx${ocn}.tile${tnum}.nc cp out.oro.nc $orog_dir/oro.C${res}.tile${tnum}.nc - #cp C${res}_grid.tile${tnum}.nc $out_dir/C${res}_grid.tile${tnum}.nc done From c6efbbda8111f3b36ec2ca4c9c93ae3871548dc9 Mon Sep 17 00:00:00 2001 From: GeorgeGayno-NOAA <52789452+GeorgeGayno-NOAA@users.noreply.github.com> Date: Mon, 20 May 2024 11:28:11 -0400 Subject: [PATCH 22/25] Add threading to the filter_topo and orog_gsl codes (#948) Include threading in both programs. Update the "regional.gsl.gwd" regression test - which runs both programs - to run twice with varying thread counts. Fixes #939. --- reg_tests/grid_gen/driver.hera.sh | 13 ++++-- reg_tests/grid_gen/driver.hercules.sh | 14 ++++-- reg_tests/grid_gen/driver.jet.sh | 11 ++++- reg_tests/grid_gen/driver.orion.sh | 13 ++++-- reg_tests/grid_gen/driver.wcoss2.sh | 13 ++++-- reg_tests/grid_gen/regional.gsl.gwd.sh | 13 +++--- .../filter_topo.fd/CMakeLists.txt | 4 ++ .../filter_topo.fd/filter_topo.F90 | 44 ++++++++++++++++++- .../orog_gsl.fd/CMakeLists.txt | 4 ++ .../orog_gsl.fd/gsl_oro_data.f90 | 12 ++++- .../module_gsl_oro_data_lg_scale.f90 | 16 +++---- .../module_gsl_oro_data_sm_scale.f90 | 20 +++------ 12 files changed, 132 insertions(+), 45 deletions(-) diff --git a/reg_tests/grid_gen/driver.hera.sh b/reg_tests/grid_gen/driver.hera.sh index 66673ab34..00dbc1e79 100755 --- a/reg_tests/grid_gen/driver.hera.sh +++ b/reg_tests/grid_gen/driver.hera.sh @@ -103,19 +103,26 @@ TEST5=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:07:00 -A $PROJECT_ -o $LOG_FILE5 -e $LOG_FILE5 ./esg.regional.pct.cat.sh) #----------------------------------------------------------------------------- -# Regional GSL gravity wave drag test. +# Regional GSL gravity wave drag test. This test is run with varying +# thread counts. #----------------------------------------------------------------------------- +export nthreads=12 LOG_FILE6=${LOG_FILE}06 -TEST6=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:07:00 -A $PROJECT_CODE -q $QUEUE -J reg.gsl.gwd \ +TEST6=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 -t 0:07:00 -A $PROJECT_CODE -q $QUEUE -J reg.gsl.gwd.12 \ -o $LOG_FILE6 -e $LOG_FILE6 ./regional.gsl.gwd.sh) +export nthreads=24 +LOG_FILE7=${LOG_FILE}07 +TEST7=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:07:00 -A $PROJECT_CODE -q $QUEUE -J reg.gsl.gwd.24 \ + -o $LOG_FILE7 -e $LOG_FILE7 ./regional.gsl.gwd.sh) + #----------------------------------------------------------------------------- # Create summary log. #----------------------------------------------------------------------------- sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J grid_summary -o $LOG_FILE -e $LOG_FILE \ - --open-mode=append -q $QUEUE -d afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6 << EOF + --open-mode=append -q $QUEUE -d afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7 << EOF #!/bin/bash grep -a '<<<' ${LOG_FILE}* > $SUM_FILE EOF diff --git a/reg_tests/grid_gen/driver.hercules.sh b/reg_tests/grid_gen/driver.hercules.sh index 5f4a81478..b5da4c807 100755 --- a/reg_tests/grid_gen/driver.hercules.sh +++ b/reg_tests/grid_gen/driver.hercules.sh @@ -100,19 +100,27 @@ TEST5=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:10:00 -A $PROJECT_ -o $LOG_FILE5 -e $LOG_FILE5 ./esg.regional.pct.cat.sh) #----------------------------------------------------------------------------- -# Regional grid with GSL gravity wave drag fields. +# Regional grid with GSL gravity wave drag fields. Run with varying +# thread counts. #----------------------------------------------------------------------------- +export nthreads=12 LOG_FILE6=${LOG_FILE}06 -TEST6=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J reg.gsl.gwd \ +TEST6=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J reg.gsl.gwd.12 \ -o $LOG_FILE6 -e $LOG_FILE6 ./regional.gsl.gwd.sh) + +export nthreads=24 +LOG_FILE7=${LOG_FILE}07 +TEST7=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J reg.gsl.gwd.24 \ + -o $LOG_FILE7 -e $LOG_FILE7 ./regional.gsl.gwd.sh) + #----------------------------------------------------------------------------- # Create summary log. #----------------------------------------------------------------------------- sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J grid_summary -o $LOG_FILE -e $LOG_FILE \ - -q $QUEUE -d afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6 << EOF + -q $QUEUE -d afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7 << EOF #!/bin/bash grep -a '<<<' ${LOG_FILE}* > $SUM_FILE EOF diff --git a/reg_tests/grid_gen/driver.jet.sh b/reg_tests/grid_gen/driver.jet.sh index 2a4c76a1f..e8d024fba 100755 --- a/reg_tests/grid_gen/driver.jet.sh +++ b/reg_tests/grid_gen/driver.jet.sh @@ -101,13 +101,20 @@ TEST5=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:07:00 -A $PROJECT_ --partition=xjet -o $LOG_FILE5 -e $LOG_FILE5 ./esg.regional.pct.cat.sh) #----------------------------------------------------------------------------- -# Regional GSL gravity wave drag. +# Regional GSL gravity wave drag. Run with varying +# thread counts. #----------------------------------------------------------------------------- +export nthreads=12 LOG_FILE6=${LOG_FILE}06 -TEST6=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:07:00 -A $PROJECT_CODE -q $QUEUE -J reg.gsl.gwd \ +TEST6=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 -t 0:07:00 -A $PROJECT_CODE -q $QUEUE -J reg.gsl.gwd.12 \ --partition=xjet -o $LOG_FILE6 -e $LOG_FILE6 ./regional.gsl.gwd.sh) +export nthreads=24 +LOG_FILE7=${LOG_FILE}07 +TEST7=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:07:00 -A $PROJECT_CODE -q $QUEUE -J reg.gsl.gwd.24 \ + --partition=xjet -o $LOG_FILE7 -e $LOG_FILE7 ./regional.gsl.gwd.sh) + #----------------------------------------------------------------------------- # Create summary log. #----------------------------------------------------------------------------- diff --git a/reg_tests/grid_gen/driver.orion.sh b/reg_tests/grid_gen/driver.orion.sh index 03ff3ed6c..18fda2e69 100755 --- a/reg_tests/grid_gen/driver.orion.sh +++ b/reg_tests/grid_gen/driver.orion.sh @@ -99,19 +99,26 @@ TEST5=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:10:00 -A $PROJECT_ -o $LOG_FILE5 -e $LOG_FILE5 ./esg.regional.pct.cat.sh) #----------------------------------------------------------------------------- -# Regional grid with GSL gravity wave drag fields. +# Regional grid with GSL gravity wave drag fields. Run with varying +# thread counts. #----------------------------------------------------------------------------- +export nthreads=12 LOG_FILE6=${LOG_FILE}06 -TEST6=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J reg.gsl.gwd \ +TEST6=$(sbatch --parsable --ntasks-per-node=12 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J reg.gsl.gwd.12 \ -o $LOG_FILE6 -e $LOG_FILE6 ./regional.gsl.gwd.sh) +export nthreads=24 +LOG_FILE7=${LOG_FILE}07 +TEST7=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J reg.gsl.gwd.24 \ + -o $LOG_FILE7 -e $LOG_FILE7 ./regional.gsl.gwd.sh) + #----------------------------------------------------------------------------- # Create summary log. #----------------------------------------------------------------------------- sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J grid_summary -o $LOG_FILE -e $LOG_FILE \ - -q $QUEUE -d afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6 << EOF + -q $QUEUE -d afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7 << EOF #!/bin/bash grep -a '<<<' ${LOG_FILE}* > $SUM_FILE EOF diff --git a/reg_tests/grid_gen/driver.wcoss2.sh b/reg_tests/grid_gen/driver.wcoss2.sh index 35c077f34..4e381c797 100755 --- a/reg_tests/grid_gen/driver.wcoss2.sh +++ b/reg_tests/grid_gen/driver.wcoss2.sh @@ -104,19 +104,26 @@ TEST5=$(qsub -V -o $LOG_FILE5 -e $LOG_FILE5 -q $QUEUE -A $PROJECT_CODE -l wallti -N esg.regional.pct.cat -l select=1:ncpus=30:mem=40GB $PWD/esg.regional.pct.cat.sh) #----------------------------------------------------------------------------- -# Regional GSL gravity wave drag test. +# Regional GSL gravity wave drag test. Run with varying +# thread counts. #----------------------------------------------------------------------------- +export nthreads=15 LOG_FILE6=${LOG_FILE}06 TEST6=$(qsub -V -o $LOG_FILE6 -e $LOG_FILE6 -q $QUEUE -A $PROJECT_CODE -l walltime=00:07:00 \ - -N rsg.gsl.gwd -l select=1:ncpus=30:mem=40GB $PWD/regional.gsl.gwd.sh) + -N reg.gsl.gwd.15 -l select=1:ncpus=15:mem=40GB $PWD/regional.gsl.gwd.sh) + +export nthreads=30 +LOG_FILE7=${LOG_FILE}07 +TEST7=$(qsub -V -o $LOG_FILE7 -e $LOG_FILE7 -q $QUEUE -A $PROJECT_CODE -l walltime=00:07:00 \ + -N reg.gsl.gwd -l select=1:ncpus=30:mem=40GB $PWD/regional.gsl.gwd.sh) #----------------------------------------------------------------------------- # Create summary log. #----------------------------------------------------------------------------- qsub -V -o ${LOG_FILE} -e ${LOG_FILE} -q $QUEUE -A $PROJECT_CODE -l walltime=00:02:00 \ - -N grid_summary -l select=1:ncpus=1:mem=100MB -W depend=afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6 << EOF + -N grid_summary -l select=1:ncpus=1:mem=100MB -W depend=afterok:$TEST1:$TEST2:$TEST3:$TEST4:$TEST5:$TEST6:$TEST7 << EOF #!/bin/bash cd ${this_dir} grep -a '<<<' ${LOG_FILE}* | grep -v echo > $SUM_FILE diff --git a/reg_tests/grid_gen/regional.gsl.gwd.sh b/reg_tests/grid_gen/regional.gsl.gwd.sh index 311116c1d..5ef2c8047 100755 --- a/reg_tests/grid_gen/regional.gsl.gwd.sh +++ b/reg_tests/grid_gen/regional.gsl.gwd.sh @@ -8,8 +8,11 @@ set -x -export TEMP_DIR=${WORK_DIR}/regional.gsl.gwd.work -export out_dir=${WORK_DIR}/regional.gsl.gwd +nthreads=${nthreads:-6} +export OMP_NUM_THREADS=$nthreads + +export TEMP_DIR=${WORK_DIR}/regional.gsl.gwd.${nthreads}.work +export out_dir=${WORK_DIR}/regional.gsl.gwd.${nthreads} export gtype=regional_esg export make_gsl_orog=true # Create GSL gravity wave drag fields @@ -34,7 +37,7 @@ $home_dir/ush/fv3gfs_driver_grid.sh iret=$? if [ $iret -ne 0 ]; then set +x - echo "<<< REGIONAL GSL GWD TEST FAILED. <<<" + echo "<<< REGIONAL ${nthreads} THREAD GSL GWD TEST FAILED. <<<" exit $iret fi @@ -61,12 +64,12 @@ done set +x if [ $test_failed -ne 0 ]; then - echo "<<< REGIONAL GSL GWD TEST FAILED. >>>" + echo "<<< REGIONAL ${nthreads} THREAD GSL GWD TEST FAILED. >>>" if [ "$UPDATE_BASELINE" = "TRUE" ]; then $home_dir/reg_tests/update_baseline.sh "${HOMEreg}/.." "regional.gsl.gwd" $commit_num fi else - echo "<<< REGIONAL GSL GWD TEST PASSED. >>>" + echo "<<< REGIONAL ${nthreads} THREAD GSL GWD TEST PASSED. >>>" fi exit 0 diff --git a/sorc/grid_tools.fd/filter_topo.fd/CMakeLists.txt b/sorc/grid_tools.fd/filter_topo.fd/CMakeLists.txt index 0f1db12b7..e8789caaf 100644 --- a/sorc/grid_tools.fd/filter_topo.fd/CMakeLists.txt +++ b/sorc/grid_tools.fd/filter_topo.fd/CMakeLists.txt @@ -24,6 +24,10 @@ target_link_libraries( PUBLIC NetCDF::NetCDF_Fortran) +if(OpenMP_Fortran_FOUND) + target_link_libraries(filter_topo_lib PUBLIC OpenMP::OpenMP_Fortran) +endif() + target_link_libraries(${exe_name} PRIVATE filter_topo_lib) install(TARGETS ${exe_name}) diff --git a/sorc/grid_tools.fd/filter_topo.fd/filter_topo.F90 b/sorc/grid_tools.fd/filter_topo.fd/filter_topo.F90 index d4b5d672a..4bc01b448 100644 --- a/sorc/grid_tools.fd/filter_topo.fd/filter_topo.F90 +++ b/sorc/grid_tools.fd/filter_topo.fd/filter_topo.F90 @@ -9,6 +9,7 @@ !! @author Zhi Liang (GFDL) who packaged it into a standalone application. program filter_topo + use omp_lib use utils implicit none @@ -34,7 +35,7 @@ program filter_topo real:: peak_fac ! overshoot factor for the mountain peak real:: max_slope ! max allowable terrain slope: 1 --> 45 deg - integer :: n_del2_weak + integer :: n_del2_weak, tid, nthreads integer :: ntiles = 0 @@ -50,6 +51,17 @@ program filter_topo integer :: is,ie,js,je,isd,ied,jsd,jed integer,parameter :: ng = 3 integer :: nx, ny, npx, npy, nx_nest, ny_nest, npx_nest, npy_nest, is_nest, ie_nest, js_nest, je_nest, isd_nest, ied_nest, jsd_nest, jed_nest + +!$OMP PARALLEL PRIVATE(TID) + tid = omp_get_thread_num() + if (tid == 0) then + nthreads = omp_get_num_threads() + print* + print*,'- BEGIN EXECUTION WITH NUMBER OF THREADS = ',nthreads + print* + endif +!$OMP END PARALLEL + !--- read namelist call read_namelist() @@ -69,6 +81,9 @@ program filter_topo !--- write out the data call write_topo_file(is,ie,js,je,ntiles,oro(is:ie,js:je,:),regional ) + print* + print*,'- NORMAL TERMINATION.' + contains !> ??? @@ -760,6 +775,7 @@ subroutine read_grid_file(regional) geolat_t(:,:,:) = -1.e25 do t = 1, ntiles +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J,G1,G2,G3,G4,G5) do j=js,je ; do i=is,ie g1(1) = geolon_c(i,j,t); g1(2) = geolat_c(i,j,t) g2(1) = geolon_c(i+1,j,t); g2(2) = geolat_c(i+1,j,t) @@ -769,8 +785,8 @@ subroutine read_grid_file(regional) geolon_t(i,j,t) = g5(1) geolat_t(i,j,t) = g5(2) enddo ; enddo +!$OMP END PARALLEL DO enddo - if( .not. regional ) then call fill_cubic_grid_halo(geolon_t, geolon_t, ng, 0, 0, 1, 1) @@ -783,6 +799,7 @@ subroutine read_grid_file(regional) allocate(dx(isd:ied,jsd:jed+1,ntiles)) allocate(dy(isd:ied+1,jsd:jed,ntiles)) do t = 1, ntiles +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J,G1,G2) do j = js, je+1 ; do i = is, ie g1(1) = geolon_c(i ,j,t) g1(2) = geolat_c(i ,j,t) @@ -790,8 +807,10 @@ subroutine read_grid_file(regional) g2(2) = geolat_c(i+1,j,t) dx(i,j,t) = great_circle_dist( g2, g1, radius ) enddo ; enddo +!$OMP END PARALLEL DO enddo do t = 1, ntiles +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J,G1,G2) do j = js, je do i = is, ie+1 g1(1) = geolon_c(i,j, t) @@ -801,6 +820,7 @@ subroutine read_grid_file(regional) dy(i,j,t) = great_circle_dist( g2, g1, radius ) enddo enddo +!$OMP END PARALLEL DO enddo if( .not. regional ) then @@ -831,6 +851,7 @@ subroutine read_grid_file(regional) allocate(dxa(isd:ied,jsd:jed,ntiles)) allocate(dya(isd:ied,jsd:jed,ntiles)) do t = 1, ntiles +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J,G1,G2,G3,G4) do j=js,je ; do i=is,ie g1(1) = geolon_c(i,j,t); g1(2) = geolat_c(i,j,t) g2(1) = geolon_c(i,j+1,t); g2(2) = geolat_c(i,j+1,t) @@ -847,6 +868,7 @@ subroutine read_grid_file(regional) call mid_pt_sphere(g1, g2, g4) dya(i,j,t) = great_circle_dist( g4, g3, radius ) enddo; enddo +!$OMP END PARALLEL DO enddo if( .not.regional ) then @@ -860,6 +882,8 @@ subroutine read_grid_file(regional) allocate(dxc(isd:ied+1,jsd:jed,ntiles)) allocate(dyc(isd:ied,jsd:jed+1,ntiles)) do t = 1, ntiles + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J,G1,G2) do j=jsd,jed do i=isd+1,ied g1(1) = geolon_c(i,j,t); g1(2) = geolat_c(i,j,t) @@ -869,7 +893,9 @@ subroutine read_grid_file(regional) dxc(isd,j,t) = dxc(isd+1,j,t) dxc(ied+1,j,t) = dxc(ied,j,t) enddo +!$OMP END PARALLEL DO +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J,G1,G2) do j=jsd+1,jed do i=isd,ied g1(1) = geolon_c(i,j,t); g1(2) = geolat_c(i,j,t) @@ -877,15 +903,20 @@ subroutine read_grid_file(regional) dyc(i,j,t) = great_circle_dist(g1, g2, radius) enddo enddo +!$OMP END PARALLEL DO + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I) do i=isd,ied dyc(i,jsd,t) = dyc(i,jsd+1,t) dyc(i,jed+1,t) = dyc(i,jed,t) end do +!$OMP END PARALLEL DO enddo !--- compute area allocate(area(isd:ied,jsd:jed,ntiles)) do t = 1, ntiles +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J,p_lL,p_uL,p_lr,p_uR) do j=js,je do i=is,ie p_lL(1) = geolon_c(i ,j ,t) ; p_lL(2) = geolat_c(i ,j ,t) @@ -897,6 +928,7 @@ subroutine read_grid_file(regional) area(i,j,t) = get_area(p_lL, p_uL, p_lR, p_uR, radius) enddo enddo +!$OMP END PARALLEL DO enddo if( .not.regional ) then @@ -918,6 +950,8 @@ subroutine read_grid_file(regional) ! | | ! 6---2---7 do t = 1, ntiles + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J,G1) do j=js,je+1 do i = is,ie+1 g1(1) = geolon_c(i,j,t) @@ -925,6 +959,9 @@ subroutine read_grid_file(regional) call latlon2xyz(g1, grid3(:,i,j)) enddo enddo +!$OMP END PARALLEL DO + +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J,G1,P1,P3) do j=js,je do i=is,ie g1(1) = geolon_t(i,j,t); g1(2) = geolat_t(i,j,t) @@ -939,13 +976,16 @@ subroutine read_grid_file(regional) cos_sg(4,i,j) = cos_angle( p1, grid3(1,i,j+1), p3 ) enddo enddo +!$OMP END PARALLEL DO do ip=1,4 +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,J) do j=js,je do i=is,ie sin_sg(ip,i,j,t) = min(1.0, sqrt( max(0., 1.-cos_sg(ip,i,j)**2) ) ) enddo enddo +!$OMP END PARALLEL DO enddo enddo diff --git a/sorc/orog_mask_tools.fd/orog_gsl.fd/CMakeLists.txt b/sorc/orog_mask_tools.fd/orog_gsl.fd/CMakeLists.txt index 8704e4d54..e37889f46 100644 --- a/sorc/orog_mask_tools.fd/orog_gsl.fd/CMakeLists.txt +++ b/sorc/orog_mask_tools.fd/orog_gsl.fd/CMakeLists.txt @@ -26,6 +26,10 @@ target_link_libraries( PUBLIC NetCDF::NetCDF_Fortran) +if(OpenMP_Fortran_FOUND) + target_link_libraries(orog_gsl_lib PUBLIC OpenMP::OpenMP_Fortran) +endif() + target_link_libraries(${exe_name} PRIVATE orog_gsl_lib) install(TARGETS ${exe_name}) diff --git a/sorc/orog_mask_tools.fd/orog_gsl.fd/gsl_oro_data.f90 b/sorc/orog_mask_tools.fd/orog_gsl.fd/gsl_oro_data.f90 index e629dd2de..4e36ee9da 100644 --- a/sorc/orog_mask_tools.fd/orog_gsl.fd/gsl_oro_data.f90 +++ b/sorc/orog_mask_tools.fd/orog_gsl.fd/gsl_oro_data.f90 @@ -35,12 +35,13 @@ !! @return 0 for success, error code otherwise. program gsl_oro_data +use omp_lib + use gsl_oro_data_sm_scale, only: calc_gsl_oro_data_sm_scale use gsl_oro_data_lg_scale, only: calc_gsl_oro_data_lg_scale implicit none - character(len=2) :: tile_num ! tile number entered by user character(len=7) :: res_indx ! grid-resolution index, e.g., 96, 192, 384, 768, ! etc. entered by user @@ -49,7 +50,7 @@ program gsl_oro_data logical :: duplicate_oro_data_file ! flag for whether oro_data_ls file is a duplicate ! of oro_data_ss due to minimum grid size being less than 7.5km - +integer :: tid, nthreads ! Read in FV3GFS grid info print * @@ -67,6 +68,13 @@ program gsl_oro_data print *, "Halo = ", halo print * +!$OMP PARALLEL PRIVATE(TID) + tid = omp_get_thread_num() + if (tid==0) then + nthreads = omp_get_num_threads() + print*,'Number of threads = ', nthreads + endif +!$OMP END PARALLEL call calc_gsl_oro_data_sm_scale(tile_num,res_indx,halo,duplicate_oro_data_file) diff --git a/sorc/orog_mask_tools.fd/orog_gsl.fd/module_gsl_oro_data_lg_scale.f90 b/sorc/orog_mask_tools.fd/orog_gsl.fd/module_gsl_oro_data_lg_scale.f90 index f2d07d138..a9cee90a2 100644 --- a/sorc/orog_mask_tools.fd/orog_gsl.fd/module_gsl_oro_data_lg_scale.f90 +++ b/sorc/orog_mask_tools.fd/orog_gsl.fd/module_gsl_oro_data_lg_scale.f90 @@ -349,11 +349,15 @@ subroutine calc_gsl_oro_data_lg_scale(tile_num,res_indx,halo) ! ol1,...,ol4 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -cell_count = 1 - +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,CELL_COUNT,DLTA_LAT,DLTA_LON) & +!$OMP PRIVATE(I_BLK,J_BLK,LON_BLK,LAT_BLK,S_II,S_JJ,E_II,E_JJ,II_M,JJ_M) & +!$OMP PRIVATE(HGT_M_COARSE_ON_FINE,JJ,JJ_LOC,II,II_LOC,ZS,ZS_ACCUM,ZS_MEAN) & +!$OMP PRIVATE(HGT_M_COARSE,SUM2,SUM4,NFINEPOINTS,VAR,NU,ND,RATIO,NW,NT) do j = 1,dimY_FV3 do i = 1,dimX_FV3 + cell_count = ( (j-1) * dimX_FV3 ) + i + ! Calculate approximate side-lengths of square lat-long "coarse" grid ! cell centered on FV3 cell (units = radians) dlta_lat = sqrt(area_FV3(i,j))/ae @@ -552,7 +556,6 @@ subroutine calc_gsl_oro_data_lg_scale(tile_num,res_indx,halo) OL4(cell_count) = 0._real_kind if ( detrend_topography ) deallocate (HGT_M_coarse_on_fine) deallocate(zs) - cell_count = cell_count + 1 cycle ! move on to next (coarse) grid cell end if @@ -784,17 +787,12 @@ subroutine calc_gsl_oro_data_lg_scale(tile_num,res_indx,halo) OL4(cell_count) = 0._real_kind end if - - if ( detrend_topography ) deallocate (HGT_M_coarse_on_fine) deallocate (zs) - cell_count = cell_count + 1 - end do ! j = 1,dimY_FV3 end do ! i = 1,dimX_FV3 - - +!$OMP END PARALLEL DO ! ! Output GWD statistics fields to netCDF file diff --git a/sorc/orog_mask_tools.fd/orog_gsl.fd/module_gsl_oro_data_sm_scale.f90 b/sorc/orog_mask_tools.fd/orog_gsl.fd/module_gsl_oro_data_sm_scale.f90 index d91238375..067101894 100644 --- a/sorc/orog_mask_tools.fd/orog_gsl.fd/module_gsl_oro_data_sm_scale.f90 +++ b/sorc/orog_mask_tools.fd/orog_gsl.fd/module_gsl_oro_data_sm_scale.f90 @@ -119,7 +119,6 @@ subroutine calc_gsl_oro_data_sm_scale(tile_num,res_indx,halo, & logical :: fexist - print *, "Creating oro_data_ss file" print * @@ -365,11 +364,15 @@ subroutine calc_gsl_oro_data_sm_scale(tile_num,res_indx,halo, & ! ol1,...,ol4 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -cell_count = 1 - +!$OMP PARALLEL DO DEFAULT(SHARED) PRIVATE(I,DLTA_LAT,DLTA_LON) & +!$OMP PRIVATE(I_BLK,J_BLK,S_II,S_JJ,E_II,E_JJ,LON_BLK,LAT_BLK,II_M,JJ_M) & +!$OMP PRIVATE(ZS,II,JJ,II_LOC,JJ_LOC,SUM2,NFINEPOINTS,CELL_COUNT,ZS_ACCUM,ZS_MEAN) & +!$OMP PRIVATE(SUM4,VAR,NU,ND,RATIO,NW,NT) do j = 1,dimY_FV3 do i = 1,dimX_FV3 + cell_count = ( (j-1) * dimX_FV3 ) + i + ! Calculate approximate side-lengths of square lat-long "coarse" grid ! cell centered on FV3 cell (units = radians) dlta_lat = sqrt(area_FV3(i,j))/ae @@ -499,7 +502,6 @@ subroutine calc_gsl_oro_data_sm_scale(tile_num,res_indx,halo, & OL3(cell_count) = 0._real_kind OL4(cell_count) = 0._real_kind deallocate(zs) - cell_count = cell_count + 1 cycle ! move on to next (coarse) grid cell end if @@ -527,7 +529,6 @@ subroutine calc_gsl_oro_data_sm_scale(tile_num,res_indx,halo, & end do std_dev(cell_count) = sqrt( sum2/real(nfinepoints,real_kind) ) - ! ! Calculate convexity of sub-grid-scale terrain ! @@ -731,16 +732,11 @@ subroutine calc_gsl_oro_data_sm_scale(tile_num,res_indx,halo, & OL4(cell_count) = 0._real_kind end if - - deallocate (zs) - cell_count = cell_count + 1 - end do ! j = 1,dimY_FV3 end do ! i = 1,dimX_FV3 - - +!$OMP END PARALLEL DO ! ! Output GWD statistics fields to netCDF file @@ -1283,6 +1279,4 @@ subroutine netcdf_err(err,string) return end subroutine netcdf_err - - end module gsl_oro_data_sm_scale From 77622d0dfd13316c97b7b7faae87beef636e9a40 Mon Sep 17 00:00:00 2001 From: GeorgeGayno-NOAA <52789452+GeorgeGayno-NOAA@users.noreply.github.com> Date: Fri, 24 May 2024 10:27:30 -0400 Subject: [PATCH 23/25] orog.fd - Remove binary output option and logic for reduced grid. (#949) In the FV3 era, the 'orog' files are tiled NetCDF. Remove the binary and GRIB1 output options, which are no longer used. Remove logic for the 'reduced' gaussian grid, which is left over from the spectral GFS era. Includes some other minor cleanup of obsolete code. Fixes #940. --- .../orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F | 452 ++---------------- 1 file changed, 33 insertions(+), 419 deletions(-) diff --git a/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F b/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F index f05c39aa5..041c9be5b 100644 --- a/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F +++ b/sorc/orog_mask_tools.fd/orog.fd/mtnlm7_oclsm.F @@ -182,14 +182,13 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, integer :: mskocn,notocn integer :: i,j,nx,ny,ncid,js,jn,iw,ie,k,it,jt,error,id_dim integer :: id_var,nx_in,ny_in,fsize,wgta,IN,INW,INE,IS,ISW,ISE - integer :: M,N,IMT,IRET,ios,latg2,istat,itest,jtest + integer :: M,N,ios,istat,itest,jtest integer :: i_south_pole,j_south_pole,i_north_pole,j_north_pole integer :: maxc3,maxc4,maxc5,maxc6,maxc7,maxc8 integer(1) :: i3save integer(2) :: i2save - integer, allocatable :: JST(:),JEN(:),KPDS(:),KGDS(:),numi(:) - integer, allocatable :: lonsperlat(:) + integer, allocatable :: JST(:),JEN(:),numi(:) integer, allocatable :: IST(:,:),IEN(:,:),ZSLMX(:,:) integer, allocatable :: ZAVG(:,:),ZSLM(:,:) @@ -199,7 +198,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, integer, allocatable :: IWORK(:,:,:) real :: DEGRAD,maxlat, minlat,timef,tbeg,tend,tbeg1 - real :: PHI,DELXN,RS,RN,slma,oroa,vara,var4a,xn,XS,FFF,WWW + real :: PHI,DELXN,slma,oroa,vara,var4a,xn,XS,FFF,WWW real :: sumdif,avedif real, allocatable :: COSCLT(:),WGTCLT(:),RCLT(:),XLAT(:),DIFFX(:) @@ -220,22 +219,15 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, real, allocatable :: OA(:,:,:),OL(:,:,:),HPRIME(:,:,:) real, allocatable :: oa_in(:,:,:), ol_in(:,:,:) - - complex :: ffj(im/2+1) - - logical :: grid_from_file,output_binary,fexist,opened + logical :: grid_from_file,fexist,opened logical :: SPECTR, FILTER - logical :: is_south_pole(IM,JM), is_north_pole(IM,JM) - logical :: LB(IM*JM) - output_binary = .false. tbeg1=timef() tbeg=timef() fsize = 65536 ! integers - allocate (JST(JM),JEN(JM),KPDS(200),KGDS(200),numi(jm)) - allocate (lonsperlat(jm/2)) + allocate (JST(JM),JEN(JM),numi(jm)) allocate (IST(IM,jm),IEN(IM,jm),ZSLMX(2700,1350)) allocate (glob(IMN,JMN)) @@ -361,27 +353,11 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, ! --- The center of pixel (1,1) is 89.9958333N/179.9958333W with dx/dy ! --- spacing of 1/120 degrees. ! -! READ REDUCED GRID EXTENTS IF GIVEN -! - read(20,*,iostat=ios) latg2,lonsperlat - if(ios.ne.0.or.2*latg2.ne.jm) then - do j=1,jm - numi(j)=im - enddo - print *,ios,latg2,'COMPUTE TERRAIN ON A FULL GAUSSIAN GRID' - else - do j=1,jm/2 - numi(j)=lonsperlat(j) - enddo - do j=jm/2+1,jm - numi(j)=lonsperlat(jm+1-j) - enddo - print *,ios,latg2,'COMPUTE TERRAIN ON A REDUCED GAUSSIAN GRID', - & numi -C print *,ios,latg2,'COMPUTE TERRAIN ON A REDUCED GAUSSIAN GRID' - endif -! print *,ios,latg2,'TERRAIN ON GAUSSIAN GRID',numi - +! When the gaussian grid routines makemt, makepc and makeoa are +! removed, numi can be removed. + do j=1,jm + numi(j)=im + enddo ! ! This code assumes that lat runs from north to south for gg! ! @@ -506,7 +482,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, ! --- remember, that lake mask is in zslm to be assigned in MAKEMT. if ( mskocn .eq. 1 ) then DO J = 1,JM - DO I = 1,numi(j) + DO I = 1,IM if ( notocn .eq. 0 ) then slmi(i,j) = float(NINT(OCLSM(i,j))) else @@ -749,15 +725,10 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, call minmxj(IM,JM,SLM,' SLM') call minmxj(IM,JM,VAR,' VAR') call minmxj(IM,JM,VAR4,' VAR4') -C --- check for nands in above -! call nanc(ORO,IM*JM,"MAKEMT_ORO") -! call nanc(SLM,IM*JM,"MAKEMT_SLM") -! call nanc(VAR,IM*JM,"MAKEMT_VAR") -! call nanc(VAR4,IM*JM,"MAKEMT_VAR4") ! C check antarctic pole ! DO J = 1,JM -! DO I = 1,numi(j) +! DO I = 1,IM ! if ( i .le. 100 .and. i .ge. 1 )then ! if (j .ge. JM-1 )then ! if (height .eq. 0.) print *,'I,J,SLM:',I,J,SLM(I,J) @@ -785,10 +756,6 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, call minmxj(IM,JM,GAMMA,' GAMMA') call minmxj(IM,JM,SIGMA,' SIGMA') -C --- check for nands in above -! call nanc(THETA,IM*JM,"MAKEPC_THETA") -! call nanc(GAMMA,IM*JM,"MAKEPC_GAMMA") -! call nanc(SIGMA,IM*JM,"MAKEPC_SIGMA") C C COMPUTE MOUNTAIN DATA : OA OL C @@ -943,16 +910,6 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, call minmxj(IM,JM,OL,' OL') call minmxj(IM,JM,ELVMAX,' ELVMAX') call minmxj(IM,JM,ORO,' ORO') -C --- check for nands in above -! call nanc(OA(1,1,1), IM*JM,"MAKEOA_OA(1,1,1)") -! call nanc(OA(1,1,2), IM*JM,"MAKEOA_OA(1,1,2)") -! call nanc(OA(1,1,3), IM*JM,"MAKEOA_OA(1,1,3)") -! call nanc(OA(1,1,4), IM*JM,"MAKEOA_OA(1,1,4)") -! call nanc(OL(1,1,1), IM*JM,"MAKEOA_OL(1,1,1)") -! call nanc(OL(1,1,2), IM*JM,"MAKEOA_OL(1,1,2)") -! call nanc(OL(1,1,3), IM*JM,"MAKEOA_OL(1,1,3)") -! call nanc(OL(1,1,4), IM*JM,"MAKEOA_OL(1,1,4)") -! call nanc(ELVMAX, IM*JM,"MAKEPC_ELVMAX") maxc3 = 0 maxc4 = 0 @@ -961,7 +918,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, maxc7 = 0 maxc8 = 0 DO J = 1,JM - DO I = 1,numi(j) + DO I = 1,IM if (ELVMAX(I,J) .gt. 3000.) maxc3 = maxc3 +1 if (ELVMAX(I,J) .gt. 4000.) maxc4 = maxc4 +1 if (ELVMAX(I,J) .gt. 5000.) maxc5 = maxc5 +1 @@ -979,7 +936,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, print *,' ===> if ELVMAX<=ORO replace with proxy <=== ' print *,' ===> the sum of mean orog (ORO) and std dev <=== ' DO J = 1,JM - DO I = 1,numi(j) + DO I = 1,IM if (ELVMAX(I,J) .lt. ORO(I,J) ) then C--- subtracting off ORO leaves std dev (this should never happen) ELVMAX(I,J) = MAX( 3. * VAR(I,J),0.) @@ -995,7 +952,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, maxc7 = 0 maxc8 = 0 DO J = 1,JM - DO I = 1,numi(j) + DO I = 1,IM if (ELVMAX(I,J) .gt. 3000.) maxc3 = maxc3 +1 if (ELVMAX(I,J) .gt. 4000.) maxc4 = maxc4 +1 if (ELVMAX(I,J) .gt. 5000.) maxc5 = maxc5 +1 @@ -1015,7 +972,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, print *,' SLM(itest,jtest)=',slm(itest,jtest),itest,jtest print *,' ORO(itest,jtest)=',oro(itest,jtest),itest,jtest DO J = 1,JM - DO I = 1,numi(j) + DO I = 1,IM IF(SLM(I,J).EQ.0.) THEN C VAR(I,J) = 0. VAR4(I,J) = 0. @@ -1051,7 +1008,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, MSK_OCN : if ( mskocn .eq. 1 ) then DO j = 1,jm - DO i = 1,numi(j) + DO i = 1,im if (abs (oro(i,j)) .lt. 1. ) then slm(i,j) = slmi(i,j) else @@ -1075,9 +1032,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, iso_loop : DO J=2,JM-1 JN=J-1 JS=J+1 - RN=REAL(NUMI(JN))/REAL(NUMI(J)) - RS=REAL(NUMI(JS))/REAL(NUMI(J)) - DO I=1,NUMI(J) + DO I=1,IM IW=MOD(I+IM-2,IM)+1 IE=MOD(I,IM)+1 SLMA=SLM(IW,J)+SLM(IE,J) @@ -1090,11 +1045,11 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, OLA(K)=OL(IW,J,K)+OL(IE,J,K) ENDDO WGTA=2 - XN=RN*(I-1)+1 + XN=(I-1)+1 IF(ABS(XN-NINT(XN)).LT.1.E-2) THEN - IN=MOD(NINT(XN)-1,NUMI(JN))+1 - INW=MOD(IN+NUMI(JN)-2,NUMI(JN))+1 - INE=MOD(IN,NUMI(JN))+1 + IN=MOD(NINT(XN)-1,IM)+1 + INW=MOD(IN+IM-2,IM)+1 + INE=MOD(IN,IM)+1 SLMA=SLMA+SLM(INW,JN)+SLM(IN,JN)+SLM(INE,JN) OROA=OROA+ORO(INW,JN)+ORO(IN,JN)+ORO(INE,JN) VARA=VARA+VAR(INW,JN)+VAR(IN,JN)+VAR(INE,JN) @@ -1106,7 +1061,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, WGTA=WGTA+3 ELSE INW=INT(XN) - INE=MOD(INW,NUMI(JN))+1 + INE=MOD(INW,IM)+1 SLMA=SLMA+SLM(INW,JN)+SLM(INE,JN) OROA=OROA+ORO(INW,JN)+ORO(INE,JN) VARA=VARA+VAR(INW,JN)+VAR(INE,JN) @@ -1117,11 +1072,11 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, ENDDO WGTA=WGTA+2 ENDIF - XS=RS*(I-1)+1 + XS=(I-1)+1 IF(ABS(XS-NINT(XS)).LT.1.E-2) THEN - IS=MOD(NINT(XS)-1,NUMI(JS))+1 - ISW=MOD(IS+NUMI(JS)-2,NUMI(JS))+1 - ISE=MOD(IS,NUMI(JS))+1 + IS=MOD(NINT(XS)-1,IM)+1 + ISW=MOD(IS+IM-2,IM)+1 + ISE=MOD(IS,IM)+1 SLMA=SLMA+SLM(ISW,JS)+SLM(IS,JS)+SLM(ISE,JS) OROA=OROA+ORO(ISW,JS)+ORO(IS,JS)+ORO(ISE,JS) VARA=VARA+VAR(ISW,JS)+VAR(IS,JS)+VAR(ISE,JS) @@ -1133,7 +1088,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, WGTA=WGTA+3 ELSE ISW=INT(XS) - ISE=MOD(ISW,NUMI(JS))+1 + ISE=MOD(ISW,IM)+1 SLMA=SLMA+SLM(ISW,JS)+SLM(ISE,JS) OROA=OROA+ORO(ISW,JS)+ORO(ISE,JS) VARA=VARA+VAR(ISW,JS)+VAR(ISE,JS) @@ -1179,7 +1134,6 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, C--- print for testing after isolated points removed print *,' after isolated points removed' call minmxj(IM,JM,ORO,' ORO') -C print *,' JM=',JM,' numi=',numi print *,' ORO(itest,jtest)=',oro(itest,jtest) print *,' VAR(itest,jtest)=',var(itest,jtest) print *,' VAR4(itest,jtest)=',var4(itest,jtest) @@ -1202,7 +1156,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, C DO J=1,JM - DO I=1,numi(j) + DO I=1,IM ORO(I,J) = ORO(I,J) + EFAC*VAR(I,J) HPRIME(I,J,1) = VAR(I,J) HPRIME(I,J,2) = VAR4(I,J) @@ -1235,13 +1189,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, print *,' NF1, NF0, FILTER=',NF1,NF0,FILTER IF (FILTER) THEN C SPECTRALLY TRUNCATE AND FILTER OROGRAPHY - do j=1,jm - if(numi(j).lt.im) then - ffj=cmplx(0.,0.) - call spfft1(numi(j),im/2+1,numi(j),1,ffj,oro(1,j),-1) - call spfft1(im,im/2+1,im,1,ffj,oro(1,j),+1) - endif - enddo + CALL SPTEZ(NR,NM,4,IM,JM,ORS,ORO,-1) ! print *,' about to apply spectral filter ' @@ -1259,12 +1207,6 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, ENDDO ! CALL SPTEZ(NR,NM,4,IM,JM,ORS,ORF,+1) - do j=1,jm - if(numi(j).lt.im) then - call spfft1(im,im/2+1,im,1,ffj,orf(1,j),-1) - call spfft1(numi(j),im/2+1,numi(j),1,ffj,orf(1,j),+1) - endif - enddo ELSE ORS=0. @@ -1279,15 +1221,6 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, call minmxj(IM,JM,ORO,' ORO') call minmxj(IM,JM,ORF,' ORF') C -C USE NEAREST NEIGHBOR INTERPOLATION TO FILL FULL GRIDS - call rg2gg(im,jm,numi,slm) - call rg2gg(im,jm,numi,oro) - call rg2gg(im,jm,numi,orf) -C --- not apply to new prin coord and ELVMAX (*j*) - do imt=1,10 - call rg2gg(im,jm,numi,hprime(1,1,imt)) - enddo -C print *,' after nearest neighbor interpolation applied ' call minmxj(IM,JM,ORO,' ORO') call minmxj(IM,JM,ORF,' ORF') @@ -1299,7 +1232,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, C check antarctic pole DO J = 1,JM - DO I = 1,numi(j) + DO I = 1,IM if ( i .le. 21 .and. i .ge. 1 )then if (j .eq. JM )write(6,153)i,j,ORO(i,j),ELVMAX(i,j),SLM(i,j) 153 format(1x,' ORO,ELVMAX(i=',i4,' j=',i4,')=',2E14.5,f5.1) @@ -1308,130 +1241,6 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, ENDDO tend=timef() write(6,*)' Timer 5 time= ',tend-tbeg - if (output_binary) then - tbeg=timef() -C OUTPUT BINARY FIELDS - print *,' OUTPUT BINARY FIELDS' - WRITE(51) REAL(SLM,4) - WRITE(52) REAL(ORF,4) - WRITE(53) REAL(HPRIME,4) - WRITE(54) REAL(ORS,4) - WRITE(55) REAL(ORO,4) - WRITE(66) REAL(THETA,4) - WRITE(67) REAL(GAMMA,4) - WRITE(68) REAL(SIGMA,4) -! --- OCLSM is real(4) write only if ocean mask is present - if ( mskocn .eq. 1 ) then - ios=0 - WRITE(27,iostat=ios) OCLSM - print *,' write OCLSM input:',ios -! print *,' LSM:',OCLSM(1,1),OCLSM(50,50),OCLSM(75,75),OCLSM(IM,JM) - endif -C - call minmxj(IM,JM,ORO,' ORO') - print *,' IM=',IM,' JM=',JM,' SPECTR=',SPECTR -C--- Test binary file output: - WRITE(71) REAL(SLM,4) - DO IMT=1,NMT - WRITE(71) REAL(HPRIME(:,:,IMT),4) - print *,' HPRIME(',itest,jtest,imt,')=',HPRIME(itest,jtest,imt) - ENDDO - WRITE(71) REAL(ORO,4) - IF (SPECTR) THEN - WRITE(71) REAL(ORF,4) ! smoothed spectral orography! - ENDIF -C OUTPUT GRIB FIELDS - KPDS=0 - KPDS(1)=7 - KPDS(2)=78 - KPDS(3)=255 - KPDS(4)=128 - KPDS(5)=81 - KPDS(6)=1 - kpds(8)=2004 - KPDS(9)=1 - KPDS(10)=1 - KPDS(13)=4 - KPDS(15)=1 - KPDS(16)=51 - KPDS(17)=1 - KPDS(18)=1 - KPDS(19)=1 - KPDS(21)=20 - KPDS(22)=0 - KGDS=0 - KGDS(1)=4 - KGDS(2)=IM - KGDS(3)=JM - KGDS(4)=90000-180000/PI*RCLT(1) - KGDS(6)=128 - KGDS(7)=180000/PI*RCLT(1)-90000 - KGDS(8)=-NINT(360000./IM) - KGDS(9)=NINT(360000./IM) - KGDS(10)=JM/2 - KGDS(20)=255 -! --- SLM - CALL BAOPEN(56,'fort.56',IRET) - if (iret .ne. 0) print *,' BAOPEN ERROR UNIT 56: IRET=',IRET - CALL PUTGB(56,IM*JM,KPDS,KGDS,LB,SLM,IRET) - print *,' SLM: putgb-KPDS(22,5),iret:',KPDS(22),KPDS(5),IRET - if (iret .ne. 0) print *,' SLM PUTGB ERROR: UNIT 56: IRET=',IRET - print *,' SLM: putgb-KPDS(22,5),iret:',KPDS(22),KPDS(5),IRET -! --- OCLSM if present -! if ( mskocn .eq. 1 ) then -! CALL BAOPEN(27,'fort.27',IRET) -! if (iret .ne. 0) print *,' OCLSM BAOPEN ERROR UNIT 27:IRET=',IRET -! CALL PUTGB(27,IM*JM,KPDS,KGDS,LB,OCLSM,IRET) -! if (iret .ne. 0) print *,' OCLSM PUTGB ERROR: UNIT 27:IRET=',IRET -! print *,' OCLSM: putgb-KPDS(22,5),iret:',KPDS(22),KPDS(5),IRET -! endif - - KPDS(5)=8 - IF (SPECTR) THEN - CALL BAOPEN(57,'fort.57',IRET) - CALL PUTGB(57,IM*JM,KPDS,KGDS,LB,ORF,IRET) - print *,' ORF (ORO): putgb-KPDS(22,5),iret:',KPDS(22),KPDS(5),IRET - ENDIF -C -C === write out theta (angle of land to East) using #101 (wave dir) -C === [radians] and since < 1 scale adjust kpds(22) -C - KPDS(5)=101 - CALL BAOPEN(58,'fort.58',IRET) - CALL PUTGB(58,IM*JM,KPDS,KGDS,LB,THETA,IRET) - print *,' THETA: putgb-KPDS(22,5),iret:',KPDS(22),KPDS(5),IRET -C -C === write out (land aspect ratio or anisotropy) using #102 -C === (as in wind wave hgt) -C - KPDS(22)=2 - KPDS(5)=102 - CALL BAOPEN(60,'fort.60',IRET) - CALL PUTGB(60,IM*JM,KPDS,KGDS,LB,SIGMA,IRET) - print *,' SIGMA: putgb-KPDS(22,5),iret:',KPDS(22),KPDS(5),IRET -C -C === write out (slope parameter sigma) using #9 -C === (as in std hgt) -C - KPDS(22)=1 - KPDS(5)=103 - CALL BAOPEN(59,'fort.59',IRET) - CALL PUTGB(59,IM*JM,KPDS,KGDS,LB,GAMMA,IRET) - print *,' GAMMA: putgb-KPDS(22,5),iret:',KPDS(22),KPDS(5),IRET -C - KPDS(22)=1 - KPDS(5)=9 - CALL BAOPEN(61,'fort.61',IRET) - CALL PUTGB(61,IM*JM,KPDS,KGDS,LB,HPRIME,IRET) - print *,' HPRIME: putgb-KPDS(22,5),iret:',KPDS(22),KPDS(5),IRET -C -C - KPDS(22)=0 - KPDS(5)=8 - CALL BAOPEN(62,'fort.62',IRET) - CALL PUTGB(62,IM*JM,KPDS,KGDS,LB,ELVMAX,IRET) - print *,' ELVMAX: putgb-KPDS(22,5),iret:',KPDS(22),KPDS(5),IRET - endif ! output_binary C DELXN = 360./IM do i=1,im @@ -1452,8 +1261,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, xlon(i) = geolon(i,1) enddo endif - tend=timef() - write(6,*)' Binary output time= ',tend-tbeg + tbeg=timef() CALL WRITE_NETCDF(IM,JM,SLM,land_frac,ORO,ORF,HPRIME,1,1, 1 GEOLON(1:IM,1:JM),GEOLAT(1:IM,1:JM), XLON,XLAT) @@ -1464,7 +1272,7 @@ SUBROUTINE TERSUB(IMN,JMN,IM,JM,NM,NR,NF0,NF1,NW,EFAC, print *,' ===== Deallocate Arrays and ENDING MTN VAR OROG program' ! Deallocate 1d vars - deallocate(JST,JEN,KPDS,KGDS,numi,lonsperlat) + deallocate(JST,JEN,numi) deallocate(COSCLT,WGTCLT,RCLT,XLAT,DIFFX,XLON,ORS,oaa,ola,GLAT) ! Deallocate 2d vars @@ -4011,58 +3819,6 @@ SUBROUTINE MAKEOA3(ZAVG,VAR,GLAT,OA4,OL,IOA4,ELVMAX, RETURN END SUBROUTINE MAKEOA3 -!> Convert from a reduced grid to a full grid. -!! -!! @param[in] im 'i' dimension of the full grid. -!! @param[in] jm 'j' dimension of the full grid. -!! @param[in] numi Number of 'i' points for each -!! row of the reduced grid. -!! @param[inout] a The data to be converted. -!! @author Jordan Alpert NOAA/EMC - subroutine rg2gg(im,jm,numi,a) - implicit none - integer,intent(in):: im,jm,numi(jm) - real,intent(inout):: a(im,jm) - integer j,ir,ig - real r,t(im) - do j=1,jm - r=real(numi(j))/real(im) - do ig=1,im - ir=mod(nint((ig-1)*r),numi(j))+1 - t(ig)=a(ir,j) - enddo - do ig=1,im - a(ig,j)=t(ig) - enddo - enddo - end subroutine - -!> Convert from a full grid to a reduced grid. -!! -!! @param[in] im 'i' dimension of the full grid. -!! @param[in] jm 'j' dimension of the full grid. -!! @param[in] numi Number of 'i' points for each -!! row of the reduced grid. -!! @param[inout] a The data to be converted. -!! @author Jordan Alpert NOAA/EMC - subroutine gg2rg(im,jm,numi,a) - implicit none - integer,intent(in):: im,jm,numi(jm) - real,intent(inout):: a(im,jm) - integer j,ir,ig - real r,t(im) - do j=1,jm - r=real(numi(j))/real(im) - do ir=1,numi(j) - ig=nint((ir-1)/r)+1 - t(ir)=a(ig,j) - enddo - do ir=1,numi(j) - a(ir,j)=t(ir) - enddo - enddo - end subroutine - !> Print out the maximum and minimum values of !! an array. !! @@ -4134,85 +3890,6 @@ SUBROUTINE mnmxja(IM,JM,A,imax,jmax,title) RETURN END -!> Perform multiple fast fourier transforms. -!! -!! This subprogram performs multiple fast fourier transforms -!! between complex amplitudes in fourier space and real values -!! in cyclic physical space. -!! -!! Subprograms called (NCEPLIB SP Library): -!! - scrft Complex to real fourier transform -!! - dcrft Complex to real fourier transform -!! - srcft Real to complex fourier transform -!! - drcft Real to complex fourier transform -!! -!! Program history log: -!! 1998-12-18 Mark Iredell -!! -!! @param[in] imax Integer number of values in the cyclic physical -!! space. See limitations on imax in remarks below. -!! @param[in] incw Integer first dimension of the complex amplitude array. -!! (incw >= imax/2+1). -!! @param[in] incg Integer first dimension of the real value array. -!! (incg >= imax). -!! @param[in] kmax Integer number of transforms to perform. -!! @param[in] w Complex amplitudes on input if idir>0, and on output -!! if idir<0. -!! @param[in] g Real values on input if idir<0, and on output if idir>0. -!! @param[in] idir Integer direction flag. idir>0 to transform from -!! fourier to physical space. idir<0 to transform from physical to -!! fourier space. -!! -!! @note The restrictions on imax are that it must be a multiple -!! of 1 to 25 factors of two, up to 2 factors of three, -!! and up to 1 factor of five, seven and eleven. -!! -!! @author Mark Iredell ORG: W/NMC23 @date 96-02-20 - SUBROUTINE SPFFT1(IMAX,INCW,INCG,KMAX,W,G,IDIR) - IMPLICIT NONE - INTEGER,INTENT(IN):: IMAX,INCW,INCG,KMAX,IDIR - COMPLEX,INTENT(INOUT):: W(INCW,KMAX) - REAL,INTENT(INOUT):: G(INCG,KMAX) - REAL:: AUX1(25000+INT(0.82*IMAX)) - REAL:: AUX2(20000+INT(0.57*IMAX)) - INTEGER:: NAUX1,NAUX2 -C - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - NAUX1=25000+INT(0.82*IMAX) - NAUX2=20000+INT(0.57*IMAX) -C - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -C FOURIER TO PHYSICAL TRANSFORM. - SELECT CASE(IDIR) - CASE(1:) - SELECT CASE(DIGITS(1.)) - CASE(DIGITS(1._4)) - CALL SCRFT(1,W,INCW,G,INCG,IMAX,KMAX,-1,1., - & AUX1,NAUX1,AUX2,NAUX2,0.,0) - CALL SCRFT(0,W,INCW,G,INCG,IMAX,KMAX,-1,1., - & AUX1,NAUX1,AUX2,NAUX2,0.,0) - CASE(DIGITS(1._8)) - CALL DCRFT(1,W,INCW,G,INCG,IMAX,KMAX,-1,1., - & AUX1,NAUX1,AUX2,NAUX2,0.,0) - CALL DCRFT(0,W,INCW,G,INCG,IMAX,KMAX,-1,1., - & AUX1,NAUX1,AUX2,NAUX2,0.,0) - END SELECT -C - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -C PHYSICAL TO FOURIER TRANSFORM. - CASE(:-1) - SELECT CASE(DIGITS(1.)) - CASE(DIGITS(1._4)) - CALL SRCFT(1,G,INCG,W,INCW,IMAX,KMAX,+1,1./IMAX, - & AUX1,NAUX1,AUX2,NAUX2,0.,0) - CALL SRCFT(0,G,INCG,W,INCW,IMAX,KMAX,+1,1./IMAX, - & AUX1,NAUX1,AUX2,NAUX2,0.,0) - CASE(DIGITS(1._8)) - CALL DRCFT(1,G,INCG,W,INCW,IMAX,KMAX,+1,1./IMAX, - & AUX1,NAUX1,AUX2,NAUX2,0.,0) - CALL DRCFT(0,G,INCG,W,INCW,IMAX,KMAX,+1,1./IMAX, - & AUX1,NAUX1,AUX2,NAUX2,0.,0) - END SELECT - END SELECT - END SUBROUTINE - !> Read input global 30-arc second orography data. !! !! @param[out] glob The orography data. @@ -4532,7 +4209,6 @@ function get_xnsum(lon1,lat1,lon2,lat2,IMN,JMN, implicit none real get_xnsum - logical verbose real, intent(in) :: lon1,lat1,lon2,lat2,delxn integer, intent(in) :: IMN,JMN real, intent(in) :: glat(JMN) @@ -4559,7 +4235,6 @@ function get_xnsum(lon1,lat1,lon2,lat2,IMN,JMN, ien = lon2/delxn if(ist .le.0) ist = ist + IMN if(ien < ist) ien = ien + IMN -! if(verbose) print*, "ist,ien=",ist,ien,jst,jen !--- compute average oro oro = 0.0 @@ -4602,7 +4277,6 @@ function get_xnsum(lon1,lat1,lon2,lat2,IMN,JMN, IF ( HEIGHT .gt. ORO ) get_xnsum = get_xnsum + 1 enddo enddo -! if(verbose) print*, "get_xnsum=", get_xnsum, oro end function get_xnsum @@ -4638,7 +4312,6 @@ subroutine get_xnsum2(lon1,lat1,lon2,lat2,IMN,JMN, implicit none real, intent(out) :: xnsum1,xnsum2,HC - logical verbose real lon1,lat1,lon2,lat2,delxn integer IMN,JMN real glat(JMN) @@ -4665,7 +4338,6 @@ subroutine get_xnsum2(lon1,lat1,lon2,lat2,IMN,JMN, ien = lon2/delxn if(ist .le.0) ist = ist + IMN if(ien < ist) ien = ien + IMN -! if(verbose) print*, "ist,ien=",ist,ien,jst,jen !--- compute average oro xnsum = 0 @@ -4761,7 +4433,6 @@ subroutine get_xnsum3(lon1,lat1,lon2,lat2,IMN,JMN, ien = lon2/delxn if(ist .le.0) ist = ist + IMN if(ien < ist) ien = ien + IMN -! if(verbose) print*, "ist,ien=",ist,ien,jst,jen xnsum1 = 0 xnsum2 = 0 @@ -4777,63 +4448,6 @@ subroutine get_xnsum3(lon1,lat1,lon2,lat2,IMN,JMN, enddo end subroutine get_xnsum3 - -!> Report NaNS and NaNQ within an address range for -!! 8-byte real words. -!! -!! This routine prints a single line for each call -!! and prints a message and returns to the caller on -!! detection of the FIRST NaN in the range. If no NaN values -!! are found it returns silently. -!! -!! @param[in] A Real*8 variable or array -!! @param[in] L Number of words to scan (length of array) -!! @param[in] C Distinctive message set in caller to indicate where -!! the routine was called. -!! @author Jordan Alpert NOAA/EMC - subroutine nanc(a,l,c) - integer inan1,inan2,inan3,inan4,inaq1,inaq2,inaq3,inaq4 - real word - integer itest - equivalence (itest,word) -c -c signaling NaN - data inan1/x'7F800001'/ - data inan2/x'7FBFFFFF'/ - data inan3/x'FF800001'/ - data inan4/x'FFBFFFFF'/ -c -c quiet NaN -c - data inaq1/x'7FC00000'/ - data inaq2/x'7FFFFFFF'/ - data inaq3/x'FFC00000'/ - data inaq4/x'FFFFFFFF'/ -c - character*(*) c -c t1=rtc() -cgwv print *, ' nanc call ',c - do k=1,l - word=a(k) - if( (itest .GE. inan1 .AND. itest .LE. inan2) .OR. - * (itest .GE. inan3 .AND. itest .LE. inan4) ) then - print *,' NaNs detected at word',k,' ',c - return - endif - if( (itest .GE. inaq1 .AND. itest .LE. inaq2) .OR. - * (itest .GE. inaq3 .AND. itest .LE. inaq4) ) then - print *,' NaNq detected at word',k,' ',c - return - endif - - 101 format(e20.10) - end do -c t2=rtc() -cgwv print 102,l,t2-t1,c - 102 format(' time to check ',i9,' words is ',f10.4,' ',a24) - return - end - !> Get the date/time for the system clock. !! !! @author Mark Iredell From c6e032b96bc51e901f3780a624c6798ede688e2c Mon Sep 17 00:00:00 2001 From: GeorgeGayno-NOAA <52789452+GeorgeGayno-NOAA@users.noreply.github.com> Date: Tue, 28 May 2024 08:07:29 -0400 Subject: [PATCH 24/25] global_cycle - Update version of sfcsub.F (#950) Point to latest version of sfcsub.F from the ccpp-physics repository. This version contains a bug fix in the interpolation of GLDAS data. This sfcsub.F version also updates soil color. This field is not yet processed by global_cycle, so add a dummy soil color argument. Fixes #933. --- ccpp-physics | 2 +- sorc/global_cycle.fd/CMakeLists.txt | 2 +- sorc/global_cycle.fd/cycle.f90 | 6 ++++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/ccpp-physics b/ccpp-physics index 3a306a493..89ddce75d 160000 --- a/ccpp-physics +++ b/ccpp-physics @@ -1 +1 @@ -Subproject commit 3a306a493a9a0b6c3c39c7b50d356f0ddb7c5c94 +Subproject commit 89ddce75d4d64252f1871c095a362ad391065b89 diff --git a/sorc/global_cycle.fd/CMakeLists.txt b/sorc/global_cycle.fd/CMakeLists.txt index 7adfae13f..f52c2d86d 100644 --- a/sorc/global_cycle.fd/CMakeLists.txt +++ b/sorc/global_cycle.fd/CMakeLists.txt @@ -6,7 +6,7 @@ set(lib_src machine.f90 num_parthds.f90 - ../../ccpp-physics/physics/sfcsub.F + ../../ccpp-physics/physics/Interstitials/UFS_SCM_NEPTUNE/sfcsub.F read_write_data.f90 utils.F90 land_increments.f90) diff --git a/sorc/global_cycle.fd/cycle.f90 b/sorc/global_cycle.fd/cycle.f90 index 160c65e20..ef6221eae 100644 --- a/sorc/global_cycle.fd/cycle.f90 +++ b/sorc/global_cycle.fd/cycle.f90 @@ -368,7 +368,7 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & REAL :: VMNFCS(LENSFC), T2M(LENSFC) REAL :: Q2M(LENSFC), SLPFCS(LENSFC) REAL :: ABSFCS(LENSFC), OROG_UF(LENSFC) - REAL :: USTAR(LENSFC) + REAL :: USTAR(LENSFC), SOCFCS(LENSFC) REAL :: FMM(LENSFC), FHH(LENSFC) REAL :: RLA(LENSFC), RLO(LENSFC) REAL(KIND=4) :: ZSOIL(LSOIL) @@ -616,6 +616,8 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & endif ENDDO + SOCFCS=0 ! Soil color. Not used yet. + num_threads = num_parthds() PRINT* PRINT*,"CALL SFCCYCLE TO UPDATE SURFACE FIELDS." @@ -626,7 +628,7 @@ SUBROUTINE SFCDRV(LUGB, IDIM,JDIM,LENSFC,LSOIL,DELTSFC, & VMNFCS,VMXFCS,SLPFCS,ABSFCS, & TSFFCS,SWEFCS,ZORFCS,ALBFCS,TG3FCS, & CNPFCS,SMCFCS,STCFCS,SLIFCS,AISFCS, & - VEGFCS,VETFCS,SOTFCS,ALFFCS, & + VEGFCS,VETFCS,SOTFCS,SOCFCS,ALFFCS, & CVFCS,CVBFCS,CVTFCS,MYRANK,num_threads, NLUNIT, & SZ_NML, INPUT_NML_FILE, & min_ice, & From 2794d413d083b43d9ba37a15375d5c61b610d29e Mon Sep 17 00:00:00 2001 From: Rahul Mahajan Date: Tue, 4 Jun 2024 09:35:04 -0400 Subject: [PATCH 25/25] Use FIXorog instead of FIXgfs/orog in global_cycle scripts (#956) Replace FIXgfs/orog with variable FIXorog in global_cycle_driver.sh and global_cycle.sh. FIXorog is defaulted to FIXgfs/orog. Fixes #955. --- ush/global_cycle.sh | 42 ++++++++++++++++++++------------------ ush/global_cycle_driver.sh | 7 ++++--- 2 files changed, 26 insertions(+), 23 deletions(-) diff --git a/ush/global_cycle.sh b/ush/global_cycle.sh index 4ce053751..53e8324f4 100755 --- a/ush/global_cycle.sh +++ b/ush/global_cycle.sh @@ -30,6 +30,7 @@ # PACKAGEROOT/gfs.v15.0.0. # PACKAGEROOT Location of gfs package. # FIXgfs Directory for fixed data. Default is $HOMEgfs/fix. +# FIXorog Directory for fixed orography data. Default is $FIXgfs/orog # EXECgfs Directory of the program executable. Defaults to # $HOMEgfs/exec # DATA Working directory @@ -63,29 +64,29 @@ # FNVETC must be set to igbp file: # ${FIXgfs}/am/global_vegtype.igbp.t$JCAP_CASE.$LONB_CASE.$LATB_CASE.rg.grb # FNALBC Input 4-component albedo climatology GRIB file. -# defaults to ${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.snowfree_albedo.tileX.nc +# defaults to ${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.snowfree_albedo.tileX.nc # FNALBC2 Input 'facsf' and 'facwf' albedo climatology GRIB file. -# Defaults to ${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.facsf.tileX.nc +# Defaults to ${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.facsf.tileX.nc # FNAISC Input sea ice climatology GRIB file. # Defaults to ${FIXgfs}/am/IMS-NIC.blended.ice.monthly.clim.grb # FNTG3C Input deep soil temperature climatology GRIB file. -# Defaults to ${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.substrate_temperature.tileX.nc +# Defaults to ${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.substrate_temperature.tileX.nc # FNVEGC Input vegetation fraction climatology GRIB file. -# Defaults to ${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_greenness.tileX.nc +# Defaults to ${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_greenness.tileX.nc # FNVETC Input vegetation type climatology GRIB file. -# Defaults to ${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_type.tileX.nc +# Defaults to ${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_type.tileX.nc # FNSOTC Input soil type climatology GRIB file. -# Defaults to ${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.soil_type.tileX.nc +# Defaults to ${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.soil_type.tileX.nc # FNSMCC Input soil moisture climatology GRIB file. # Defaults to ${FIXgfs}/am/global_soilmgldas.statsgo.t$JCAP_CASE.$LONB_CASE.$LATB_CASE.grb # FNVMNC Input min veg frac climatology GRIB file. -# Defaults to ${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_greenness.tileX.nc +# Defaults to ${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_greenness.tileX.nc # FNVMXC Input max veg frac climatology GRIB file. -# Defaults to ${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_greenness.tileX.nc +# Defaults to ${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_greenness.tileX.nc # FNSLPC Input slope type climatology GRIB file. -# Defaults to ${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.slope_type.tileX.nc +# Defaults to ${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.slope_type.tileX.nc # FNABSC Input max snow albedo climatology GRIB file. -# Defaults to ${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.maximum_snow_albedo.tileX.nc +# Defaults to ${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.maximum_snow_albedo.tileX.nc # FNMSKH Input high resolution land mask GRIB file. Use to set mask for # some of the input climatology fields. This is NOT the model mask. # Defaults to ${FIXgfs}/am/global_slmask.t1534.3072.1536.grb @@ -233,6 +234,7 @@ PACKAGEROOT=${PACKAGEROOT:-/lfs/h1/ops/prod/packages} HOMEgfs=${HOMEgfs:-${PACKAGEROOT}/gfs_ver.${gfs_ver}} EXECgfs=${EXECgfs:-$HOMEgfs/exec} FIXgfs=${FIXgfs:-$HOMEgfs/fix} +FIXorog=${FIXorog:-$FIXgfs/orog} DATA=${DATA:-$(pwd)} COMIN=${COMIN:-$(pwd)} COMOUT=${COMOUT:-$(pwd)} @@ -281,16 +283,16 @@ FNSNOC=${FNSNOC:-${FIXgfs}/am/global_snoclim.1.875.grb} FNZORC=${FNZORC:-igbp} FNAISC=${FNAISC:-${FIXgfs}/am/IMS-NIC.blended.ice.monthly.clim.grb} FNSMCC=${FNSMCC:-${FIXgfs}/am/global_soilmgldas.statsgo.t$JCAP_CASE.$LONB_CASE.$LATB_CASE.grb} -FNALBC2=${FNALBC2:-${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.facsf.tileX.nc} -FNTG3C=${FNTG3C:-${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.substrate_temperature.tileX.nc} -FNVEGC=${FNVEGC:-${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_greenness.tileX.nc} -FNALBC=${FNALBC:-${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.snowfree_albedo.tileX.nc} -FNVETC=${FNVETC:-${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_type.tileX.nc} -FNSOTC=${FNSOTC:-${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.soil_type.tileX.nc} -FNABSC=${FNABSC:-${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.maximum_snow_albedo.tileX.nc} -FNVMNC=${FNVMNC:-${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_greenness.tileX.nc} -FNVMXC=${FNVMXC:-${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_greenness.tileX.nc} -FNSLPC=${FNSLPC:-${FIXgfs}/orog/${CASE}/sfc/${CASE}.mx${OCNRES}.slope_type.tileX.nc} +FNALBC2=${FNALBC2:-${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.facsf.tileX.nc} +FNTG3C=${FNTG3C:-${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.substrate_temperature.tileX.nc} +FNVEGC=${FNVEGC:-${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_greenness.tileX.nc} +FNALBC=${FNALBC:-${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.snowfree_albedo.tileX.nc} +FNVETC=${FNVETC:-${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_type.tileX.nc} +FNSOTC=${FNSOTC:-${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.soil_type.tileX.nc} +FNABSC=${FNABSC:-${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.maximum_snow_albedo.tileX.nc} +FNVMNC=${FNVMNC:-${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_greenness.tileX.nc} +FNVMXC=${FNVMXC:-${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.vegetation_greenness.tileX.nc} +FNSLPC=${FNSLPC:-${FIXorog}/${CASE}/sfc/${CASE}.mx${OCNRES}.slope_type.tileX.nc} FNMSKH=${FNMSKH:-${FIXgfs}/am/global_slmask.t1534.3072.1536.grb} NST_FILE=${NST_FILE:-"NULL"} FNTSFA=${FNTSFA:-${COMIN}/${PREINP}sstgrb${SUFINP}} diff --git a/ush/global_cycle_driver.sh b/ush/global_cycle_driver.sh index 3f8b4d2e6..a22915d53 100755 --- a/ush/global_cycle_driver.sh +++ b/ush/global_cycle_driver.sh @@ -20,6 +20,7 @@ export PACKAGEROOT=${PACKAGEROOT:-/lfs/h1/ops/prod/packages} export gfs_ver=${gfs_ver:-v15.0.0} export HOMEgfs=${HOMEgfs:-${PACKAGEROOT}/gfs.${gfs_ver}} export FIXgfs=${FIXgfs:-$HOMEgfs/fix} +export FIXorog=${FIXorog:-$FIXgfs/orog} ntiles=${ntiles:-6} DONST=${DONST:-"NO"} @@ -87,11 +88,11 @@ for n in $(seq 1 $ntiles); do chmod 644 $COMOUT/$PDY.${cyc}0000.sfcanl_data.tile${n}.nc ln -fs $COMOUT/$PDY.${cyc}0000.sfcanl_data.tile${n}.nc $DATA/fnbgso.00$n - ln -fs $FIXgfs/orog/${CASE}/C${CRES}_grid.tile${n}.nc $DATA/fngrid.00$n + ln -fs $FIXorog/${CASE}/C${CRES}_grid.tile${n}.nc $DATA/fngrid.00$n if (( OCNRES > 9999 ));then - ln -fs $FIXgfs/orog/${CASE}/C${CRES}_oro_data.tile${n}.nc $DATA/fnorog.00$n + ln -fs $FIXorog/${CASE}/C${CRES}_oro_data.tile${n}.nc $DATA/fnorog.00$n else - ln -fs $FIXgfs/orog/${CASE}/C${CRES}.mx${OCNRES}_oro_data.tile${n}.nc $DATA/fnorog.00$n + ln -fs $FIXorog/${CASE}/C${CRES}.mx${OCNRES}_oro_data.tile${n}.nc $DATA/fnorog.00$n fi if [[ "$DO_SNO_INC_JEDI" == ".true." ]] ; then