diff --git a/doc/source/user_guide/bilinear_interp.md b/doc/source/user_guide/bilinear_interp.md index ae4e5dd3..36f8484d 100644 --- a/doc/source/user_guide/bilinear_interp.md +++ b/doc/source/user_guide/bilinear_interp.md @@ -18,7 +18,9 @@ data = bilinear_interp(ilon,ilat,idata,lon,lat) 5. `lat`: output latitude #### Options + - `fill_value`: invalid value - `dtype`: output data type + - `extrapolate`: extrapolate points #### Outputs - `data`: interpolated data diff --git a/notebooks/Plot Antarctic Tidal Currents.ipynb b/notebooks/Plot Antarctic Tidal Currents.ipynb index 20dc400a..6c6bc392 100644 --- a/notebooks/Plot Antarctic Tidal Currents.ipynb +++ b/notebooks/Plot Antarctic Tidal Currents.ipynb @@ -261,9 +261,10 @@ "nx = np.int((xlimits[1]-xlimits[0])/spacing[0])+1\n", "ny = np.int((ylimits[0]-ylimits[1])/spacing[1])+1\n", "#-- convert image coordinates from polar stereographic to latitude/longitude\n", - "proj1 = pyproj.Proj(\"+init=EPSG:{0:d}\".format(4326))\n", - "proj2 = pyproj.Proj(\"+init=EPSG:{0:d}\".format(3031))\n", - "lon,lat = pyproj.transform(proj2, proj1, xgrid.flatten(), ygrid.flatten())" + "crs1 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(3031))\n", + "crs2 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(4326))\n", + "transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)\n", + "lon,lat = transformer.transform(xgrid.flatten(), ygrid.flatten())" ] }, { @@ -363,8 +364,8 @@ " ax.set_xlim(xlimits)\n", " ax.set_ylim(ylimits)\n", " # stronger linewidth on frame\n", - " ax.outline_patch.set_linewidth(2.0)\n", - " ax.outline_patch.set_capstyle('projecting')\n", + " ax.spines['geo'].set_linewidth(2.0)\n", + " ax.spines['geo'].set_capstyle('projecting')\n", "\n", "#-- Add colorbar with a colorbar axis\n", "#-- Add an axes at position rect [left, bottom, width, height]\n", diff --git a/notebooks/Plot Antarctic Tide Range.ipynb b/notebooks/Plot Antarctic Tide Range.ipynb index 877e7adb..3b0525a9 100644 --- a/notebooks/Plot Antarctic Tide Range.ipynb +++ b/notebooks/Plot Antarctic Tide Range.ipynb @@ -251,9 +251,10 @@ "nx = np.int((xlimits[1]-xlimits[0])/spacing[0])+1\n", "ny = np.int((ylimits[0]-ylimits[1])/spacing[1])+1\n", "#-- convert image coordinates from polar stereographic to latitude/longitude\n", - "proj1 = pyproj.Proj(\"+init=EPSG:{0:d}\".format(4326))\n", - "proj2 = pyproj.Proj(\"+init=EPSG:{0:d}\".format(3031))\n", - "lon,lat = pyproj.transform(proj2, proj1, xgrid.flatten(), ygrid.flatten())" + "crs1 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(3031))\n", + "crs2 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(4326))\n", + "transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)\n", + "lon,lat = transformer.transform(xgrid.flatten(), ygrid.flatten())" ] }, { @@ -409,8 +410,8 @@ "ax.set_ylim(ylimits)\n", "\n", "#-- stronger linewidth on frame\n", - "ax.outline_patch.set_linewidth(2.0)\n", - "ax.outline_patch.set_capstyle('projecting')\n", + "ax.spines['geo'].set_linewidth(2.0)\n", + "ax.spines['geo'].set_capstyle('projecting')\n", "#-- adjust subplot within figure\n", "fig.subplots_adjust(left=0.02,right=0.98,bottom=0.05,top=0.98)\n", "#-- show the plot\n", diff --git a/notebooks/Plot Ross Ice Shelf Map.ipynb b/notebooks/Plot Ross Ice Shelf Map.ipynb index cb473ad8..10bd6b6f 100644 --- a/notebooks/Plot Ross Ice Shelf Map.ipynb +++ b/notebooks/Plot Ross Ice Shelf Map.ipynb @@ -268,9 +268,10 @@ "nx = np.int((xlimits[1]-xlimits[0])/spacing[0])+1\n", "ny = np.int((ylimits[0]-ylimits[1])/spacing[1])+1\n", "#-- convert image coordinates from polar stereographic to latitude/longitude\n", - "proj1 = pyproj.Proj(\"+init=EPSG:{0:d}\".format(4326))\n", - "proj2 = pyproj.Proj(\"+init=EPSG:{0:d}\".format(3031))\n", - "lon,lat = pyproj.transform(proj2, proj1, xgrid.flatten(), ygrid.flatten())" + "crs1 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(3031))\n", + "crs2 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(4326))\n", + "transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)\n", + "lon,lat = transformer.transform(xgrid.flatten(), ygrid.flatten())" ] }, { @@ -382,8 +383,8 @@ "ax.set_ylim(ylimits)\n", "\n", "# stronger linewidth on frame\n", - "ax.outline_patch.set_linewidth(2.0)\n", - "ax.outline_patch.set_capstyle('projecting')\n", + "ax.spines['geo'].set_linewidth(2.0)\n", + "ax.spines['geo'].set_capstyle('projecting')\n", "# adjust subplot within figure\n", "fig.subplots_adjust(left=0.02,right=0.98,bottom=0.05,top=0.98)\n", " \n", diff --git a/pyTMD/bilinear_interp.py b/pyTMD/bilinear_interp.py index f5af0190..a72cf179 100644 --- a/pyTMD/bilinear_interp.py +++ b/pyTMD/bilinear_interp.py @@ -14,7 +14,9 @@ lon: output longitude OPTIONS: + fill_value: invalid value dtype: output data type + extrapolate: extrapolate points OUTPUT: data: interpolated data @@ -26,6 +28,7 @@ UPDATE HISTORY: Updated 08/2020: check that output coordinates are within bounds + allow small extrapolations if individual grid cells are invalid Updated 07/2020: split into separate function Updated 06/2020: use argmin and argmax in bilinear interpolation Updated 09/2017: Rewritten in Python @@ -33,7 +36,8 @@ import numpy as np #-- PURPOSE: bilinear interpolation of input data to output data -def bilinear_interp(ilon,ilat,idata,lon,lat,dtype=np.float): +def bilinear_interp(ilon,ilat,idata,lon,lat,fill_value=np.nan, + dtype=np.float,extrapolate=False): """ Bilinear interpolation of input data to output coordinates @@ -47,61 +51,60 @@ def bilinear_interp(ilon,ilat,idata,lon,lat,dtype=np.float): Keyword arguments ----------------- + fill_value: invalid value dtype: output data type + extrapolate: extrapolate points Returns ------- data: interpolated data """ - #-- degrees to radians - dtr = np.pi/180.0 #-- grid step size of tide model dlon = np.abs(ilon[1] - ilon[0]) dlat = np.abs(ilat[1] - ilat[0]) #-- find valid points (within bounds) valid, = np.nonzero((lon >= ilon.min()) & (lon <= ilon.max()) & (lat > ilat.min()) & (lat < ilat.max())) - #-- Convert input coordinates to radians - phi = ilon*dtr - th = (90.0 - ilat)*dtr - #-- Convert output data coordinates to radians - xphi = lon*dtr - xth = (90.0 - lat)*dtr #-- interpolate gridded data values to data npts = len(lon) - data = np.ma.zeros((npts),dtype=dtype) + #-- allocate to output interpolated data array + data = np.ma.zeros((npts),dtype=dtype,fill_value=fill_value) data.mask = np.ones((npts),dtype=np.bool) - data.mask[valid] = False + #-- initially set all data to fill value + data.data[:] = data.fill_value #-- for each valid point for i in valid: #-- calculating the indices for the original grid - dx = (ilon - np.floor(lon[i]/dlon)*dlon)**2 - dy = (ilat - np.floor(lat[i]/dlat)*dlat)**2 - iph = np.argmin(dx) - ith = np.argmin(dy) + ix, = np.nonzero((ilon[0:-1] <= lon[i]) & (ilon[1:] > lon[i])) + iy, = np.nonzero((ilat[0:-1] <= lat[i]) & (ilat[1:] > lat[i])) + #-- corner data values for adjacent grid cells + IM = np.ma.zeros((4),fill_value=fill_value,dtype=dtype) + IM.mask = np.ones((4),dtype=np.bool) + #-- corner weight values for adjacent grid cells + WM = np.zeros((4)) + #-- build data and weight arrays + for j,XI,YI in zip([0,1,2,3],[ix,ix+1,ix,ix+1],[iy,iy,iy+1,iy+1]): + IM.data[j], = idata.data[YI,XI] + IM.mask[j], = idata.mask[YI,XI] + WM[j], = np.abs(lon[i]-ilon[XI])*np.abs(lat[i]-ilat[YI]) #-- if on corner value: use exact - if ((lat[i] == ilat[ith]) & (lon[i] == ilon[iph])): - data.data[i] = idata[ith,iph] - elif ((lat[i] == ilat[ith+1]) & (lon[i] == ilon[iph])): - data.data[i] = idata[ith+1,iph] - elif ((lat[i] == ilat[ith]) & (lon[i] == ilon[iph+1])): - data.data[i] = idata[ith,iph+1] - elif ((lat[i] == ilat[ith+1]) & (lon[i] == ilon[iph+1])): - data.data[i] = idata[ith+1,iph+1] - else: - #-- corner weight values for i,j - Wa = (xphi[i]-phi[iph])*(xth[i]-th[ith]) - Wb = (phi[iph+1]-xphi[i])*(xth[i]-th[ith]) - Wc = (xphi[i]-phi[iph])*(th[ith+1]-xth[i]) - Wd = (phi[iph+1]-xphi[i])*(th[ith+1]-xth[i]) - #-- divisor weight value - W = (phi[iph+1]-phi[iph])*(th[ith+1]-th[ith]) - #-- corner data values for i,j - Ia = idata[ith,iph]#-- (0,0) - Ib = idata[ith,iph+1]#-- (1,0) - Ic = idata[ith+1,iph]#-- (0,1) - Id = idata[ith+1,iph+1]#-- (1,1) + if ((lat[i] == ilat[iy]) & (lon[i] == ilon[ix])): + data.data[i] = idata.data[iy,ix] + data.mask[i] = idata.mask[iy,ix] + elif ((lat[i] == ilat[iy+1]) & (lon[i] == ilon[ix])): + data.data[i] = idata.data[iy+1,ix] + data.mask[i] = idata.mask[iy+1,ix] + elif ((lat[i] == ilat[iy]) & (lon[i] == ilon[ix+1])): + data.data[i] = idata.data[iy,ix+1] + data.mask[i] = idata.mask[iy,ix+1] + elif ((lat[i] == ilat[iy+1]) & (lon[i] == ilon[ix+1])): + data.data[i] = idata.data[iy+1,ix+1] + data.mask[i] = idata.mask[iy+1,ix+1] + elif np.all(np.isfinite(IM) & (~IM.mask)) or extrapolate: + #-- find valid indices for data summation and weight matrix + ii, = np.nonzero(np.isfinite(IM) & (~IM.mask)) #-- calculate interpolated value for i - data.data[i] = (Ia*Wa + Ib*Wb + Ic*Wc + Id*Wd)/W + data.data[i] = np.sum(WM[ii]*IM[ii])/np.sum(WM[ii]) + data.mask[i] = np.all(IM.mask[ii]) #-- return interpolated values return data diff --git a/pyTMD/calc_astrol_longitudes.py b/pyTMD/calc_astrol_longitudes.py index 0e83892b..9ec69b7c 100644 --- a/pyTMD/calc_astrol_longitudes.py +++ b/pyTMD/calc_astrol_longitudes.py @@ -56,7 +56,7 @@ def polynomial_sum(coefficients, t): Arguments --------- coefficients: leading coefficient of polynomials of increasing order - t: delta time in units for a given astrological longitudes calculation + t: delta time in units for a given astronomical longitudes calculation """ #-- convert time to array if importing a single value t = np.array([t]) if (np.ndim(t) == 0) else np.copy(t) diff --git a/pyTMD/infer_minor_corrections.py b/pyTMD/infer_minor_corrections.py index 4e860cd3..88467b17 100755 --- a/pyTMD/infer_minor_corrections.py +++ b/pyTMD/infer_minor_corrections.py @@ -36,6 +36,7 @@ UPDATE HISTORY: Updated 08/2020: change time variable names to not overwrite functions + update nodal corrections for FES models Updated 07/2020: added function docstrings reduce list of minor constituents if in list of major values Updated 11/2019: output as numpy masked arrays instead of nan-filled arrays @@ -55,7 +56,7 @@ def infer_minor_corrections(t,zmajor,constituents,DELTAT=0.0,CORRECTIONS=''): Arguments --------- t: days relative to 1992-01-01T00:00:00 - zmajor: Complex HC for GIVEN constituents/points + zmajor: Complex HC for given constituents/points constituents: tidal constituent IDs Keyword arguments @@ -79,62 +80,77 @@ def infer_minor_corrections(t,zmajor,constituents,DELTAT=0.0,CORRECTIONS=''): #-- convert time from days relative to Jan 1, 1992 to Modified Julian Days MJD = 48622.0 + t #-- major constituents used for inferring minor tides - cindex = ['q1','o1','p1','k1','n2','m2','s2','k2'] + cindex = ['q1','o1','p1','k1','n2','m2','s2','k2','2n2'] #-- re-order major tides to correspond to order of cindex - z8 = np.ma.zeros((n,8),dtype=np.complex64) - ni = 0 + z = np.ma.zeros((n,9),dtype=np.complex64) + nz = 0 for i,c in enumerate(cindex): - j = [j for j,val in enumerate(constituents) if val == c] + j = [j for j,val in enumerate(constituents) if (val == c)] if j: j1, = j - z8[:,i] = zmajor[:,j1] - ni += 1 + z[:,i] = zmajor[:,j1] + nz += 1 - if (ni < 6): + if (nz < 6): raise Exception('Not enough constituents for inference') #-- list of minor constituents - minor = ['2q1','sigma1','rho1','m1','m1','chi1','pi1','phi1','theta1','j1', - 'oo1','2n2','mu2','nu2','lambda2','l2','l2','t2'] + minor = ['2q1','sigma1','rho1','m12','m11','chi1','pi1','phi1','theta1', + 'j1','oo1','2n2','mu2','nu2','lambda2','l2','l2','t2','eps2','eta2'] #-- only add minor constituents that are not on the list of major values minor_indices = [i for i,m in enumerate(minor) if m not in constituents] #-- relationship between major and minor constituent amplitude and phase - zmin = np.zeros((n,18),dtype=np.complex64) - zmin[:,0] = 0.263*z8[:,0] - 0.0252*z8[:,1]#-- 2Q1 - zmin[:,1] = 0.297*z8[:,0] - 0.0264*z8[:,1]#-- sigma1 - zmin[:,2] = 0.164*z8[:,0] + 0.0048*z8[:,1]#-- rho1 - zmin[:,3] = 0.0140*z8[:,1] + 0.0101*z8[:,3]#-- M1 - zmin[:,4] = 0.0389*z8[:,1] + 0.0282*z8[:,3]#-- M1 - zmin[:,5] = 0.0064*z8[:,1] + 0.0060*z8[:,3]#-- chi1 - zmin[:,6] = 0.0030*z8[:,1] + 0.0171*z8[:,3]#-- pi1 - zmin[:,7] = -0.0015*z8[:,1] + 0.0152*z8[:,3]#-- phi1 - zmin[:,8] = -0.0065*z8[:,1] + 0.0155*z8[:,3]#-- theta1 - zmin[:,9] = -0.0389*z8[:,1] + 0.0836*z8[:,3]#-- J1 - zmin[:,10] = -0.0431*z8[:,1] + 0.0613*z8[:,3]#-- OO1 - zmin[:,11] = 0.264*z8[:,4] - 0.0253*z8[:,5]#-- 2N2 - zmin[:,12] = 0.298*z8[:,4] - 0.0264*z8[:,5]#-- mu2 - zmin[:,13] = 0.165*z8[:,4] + 0.00487*z8[:,5]#-- nu2 - zmin[:,14] = 0.0040*z8[:,5] + 0.0074*z8[:,6]#-- lambda2 - zmin[:,15] = 0.0131*z8[:,5] + 0.0326*z8[:,6]#-- L2 - zmin[:,16] = 0.0033*z8[:,5] + 0.0082*z8[:,6]#-- L2 - zmin[:,17] = 0.0585*z8[:,6]#-- t2 + zmin = np.zeros((n,20),dtype=np.complex64) + zmin[:,0] = 0.263*z[:,0] - 0.0252*z[:,1]#-- 2Q1 + zmin[:,1] = 0.297*z[:,0] - 0.0264*z[:,1]#-- sigma1 + zmin[:,2] = 0.164*z[:,0] + 0.0048*z[:,1]#-- rho1 + zmin[:,3] = 0.0140*z[:,1] + 0.0101*z[:,3]#-- M12 + zmin[:,4] = 0.0389*z[:,1] + 0.0282*z[:,3]#-- M11 + zmin[:,5] = 0.0064*z[:,1] + 0.0060*z[:,3]#-- chi1 + zmin[:,6] = 0.0030*z[:,1] + 0.0171*z[:,3]#-- pi1 + zmin[:,7] = -0.0015*z[:,1] + 0.0152*z[:,3]#-- phi1 + zmin[:,8] = -0.0065*z[:,1] + 0.0155*z[:,3]#-- theta1 + zmin[:,9] = -0.0389*z[:,1] + 0.0836*z[:,3]#-- J1 + zmin[:,10] = -0.0431*z[:,1] + 0.0613*z[:,3]#-- OO1 + zmin[:,11] = 0.264*z[:,4] - 0.0253*z[:,5]#-- 2N2 + zmin[:,12] = 0.298*z[:,4] - 0.0264*z[:,5]#-- mu2 + zmin[:,13] = 0.165*z[:,4] + 0.00487*z[:,5]#-- nu2 + zmin[:,14] = 0.0040*z[:,5] + 0.0074*z[:,6]#-- lambda2 + zmin[:,15] = 0.0131*z[:,5] + 0.0326*z[:,6]#-- L2 + zmin[:,16] = 0.0033*z[:,5] + 0.0082*z[:,6]#-- L2 + zmin[:,17] = 0.0585*z[:,6]#-- t2 + #-- additional coefficients for FES models + if CORRECTIONS in ('FES',): + #-- spline coefficients for admittances + mu2 = [0.069439968323, 0.351535557706, -0.046278307672] + nu2 = [-0.006104695053, 0.156878802427, 0.006755704028] + l2 = [0.077137765667, -0.051653455134, 0.027869916824] + t2 = [0.180480173707, -0.020101177502, 0.008331518844] + lda2 = [0.016503557465, -0.013307812292, 0.007753383202] + zmin[:,12] = mu2[0]*z[:,7] + mu2[1]*z[:,4] + mu2[2]*z[:,5]#-- mu2 + zmin[:,13] = nu2[0]*z[:,7] + nu2[1]*z[:,4] + nu2[2]*z[:,5]#-- nu2 + zmin[:,14] = lda2[0]*z[:,7] + lda2[1]*z[:,4] + lda2[2]*z[:,5]#-- lambda2 + zmin[:,16] = l2[0]*z[:,7] + l2[1]*z[:,4] + l2[2]*z[:,5]#-- L2 + zmin[:,17] = t2[0]*z[:,7] + t2[1]*z[:,4] + t2[2]*z[:,5]#-- t2 + zmin[:,18] = 0.53285*z[:,8] - 0.03304*z[:,4]#-- eps2 + zmin[:,19] = -0.0034925*z[:,5] + 0.0831707*z[:,7]#-- eta2 hour = (t % 1)*24.0 t1 = 15.0*hour t2 = 30.0*hour - #-- set function for astrological longitudes + #-- set function for astronomical longitudes ASTRO5 = True if CORRECTIONS in ('GOT','FES') else False #-- convert from Modified Julian Dates into Ephemeris Time S,H,P,omega,pp = calc_astrol_longitudes(MJD+DELTAT, ASTRO5=ASTRO5) #-- determine equilibrium tidal arguments - arg = np.zeros((n,18)) + arg = np.zeros((n,20)) arg[:,0] = t1 - 4.0*S + H + 2.0*P - 90.0#-- 2Q1 arg[:,1] = t1 - 4.0*S + 3.0*H - 90.0#-- sigma1 arg[:,2] = t1 - 3.0*S + 3.0*H - P - 90.0#-- rho1 - arg[:,3] = t1 - S + H - P + 90.0#-- M1 - arg[:,4] = t1 - S + H + P + 90.0#-- M1 + arg[:,3] = t1 - S + H - P + 90.0#-- M12 + arg[:,4] = t1 - S + H + P + 90.0#-- M11 arg[:,5] = t1 - S + 3.0*H - P + 90.0#-- chi1 arg[:,6] = t1 - 2.0*H + pp - 90.0#-- pi1 arg[:,7] = t1 + 3.0*H + 90.0#-- phi1 @@ -148,6 +164,8 @@ def infer_minor_corrections(t,zmajor,constituents,DELTAT=0.0,CORRECTIONS=''): arg[:,15] = t2 - S + 2.0*H - P + 180.0#-- L2 arg[:,16] = t2 - S + 2.0*H + P#-- L2 arg[:,17] = t2 - H + pp#-- t2 + arg[:,18] = t2 - 5.0*S + 4.0*H + P #-- eps2 + arg[:,19] = t2 + S + 2.0*H - pp #-- eta2 #-- determine nodal corrections f and u sinn = np.sin(omega*dtr) @@ -155,13 +173,13 @@ def infer_minor_corrections(t,zmajor,constituents,DELTAT=0.0,CORRECTIONS=''): sin2n = np.sin(2.0*omega*dtr) cos2n = np.cos(2.0*omega*dtr) - f = np.ones((n,18)) + f = np.ones((n,20)) f[:,0] = np.sqrt((1.0 + 0.189*cosn - 0.0058*cos2n)**2 + (0.189*sinn - 0.0058*sin2n)**2)#-- 2Q1 f[:,1] = f[:,0]#-- sigma1 f[:,2] = f[:,0]#-- rho1 - f[:,3] = np.sqrt((1.0 + 0.185*cosn)**2 + (0.185*sinn)**2)#-- M1 - f[:,4] = np.sqrt((1.0 + 0.201*cosn)**2 + (0.201*sinn)**2)#-- M1 + f[:,3] = np.sqrt((1.0 + 0.185*cosn)**2 + (0.185*sinn)**2)#-- M12 + f[:,4] = np.sqrt((1.0 + 0.201*cosn)**2 + (0.201*sinn)**2)#-- M11 f[:,5] = np.sqrt((1.0 + 0.221*cosn)**2 + (0.221*sinn)**2)#-- chi1 f[:,9] = np.sqrt((1.0 + 0.198*cosn)**2 + (0.198*sinn)**2)#-- J1 f[:,10] = np.sqrt((1.0 + 0.640*cosn + 0.134*cos2n)**2 + @@ -172,13 +190,13 @@ def infer_minor_corrections(t,zmajor,constituents,DELTAT=0.0,CORRECTIONS=''): f[:,15] = f[:,11]#-- L2 f[:,16] = np.sqrt((1.0 + 0.441*cosn)**2 + (0.441*sinn)**2)#-- L2 - u = np.zeros((n,18)) + u = np.zeros((n,20)) u[:,0] = np.arctan2(0.189*sinn - 0.0058*sin2n, 1.0 + 0.189*cosn - 0.0058*sin2n)/dtr#-- 2Q1 u[:,1] = u[:,0]#-- sigma1 u[:,2] = u[:,0]#-- rho1 - u[:,3] = np.arctan2( 0.185*sinn, 1.0 + 0.185*cosn)/dtr#-- M1 - u[:,4] = np.arctan2(-0.201*sinn, 1.0 + 0.201*cosn)/dtr#-- M1 + u[:,3] = np.arctan2( 0.185*sinn, 1.0 + 0.185*cosn)/dtr#-- M12 + u[:,4] = np.arctan2(-0.201*sinn, 1.0 + 0.201*cosn)/dtr#-- M11 u[:,5] = np.arctan2(-0.221*sinn, 1.0 + 0.221*cosn)/dtr#-- chi1 u[:,9] = np.arctan2(-0.198*sinn, 1.0 + 0.198*cosn)/dtr#-- J1 u[:,10] = np.arctan2(-0.640*sinn - 0.134*sin2n, @@ -189,6 +207,52 @@ def infer_minor_corrections(t,zmajor,constituents,DELTAT=0.0,CORRECTIONS=''): u[:,15] = u[:,11]#-- L2 u[:,16] = np.arctan2(-0.441*sinn, 1.0 + 0.441*cosn)/dtr#-- L2 + if CORRECTIONS in ('FES',): + #-- additional astronomical terms for FES models + II = np.arccos(0.913694997 - 0.035692561*np.cos(omega*dtr)) + at1 = np.arctan(1.01883*np.tan(omega*dtr/2.0)) + at2 = np.arctan(0.64412*np.tan(omega*dtr/2.0)) + xi = -at1 - at2 + omega*dtr + xi[xi > np.pi] -= 2.0*np.pi + nu = at1 - at2 + I2 = np.tan(II/2.0) + Ra1 = np.sqrt(1.0 - 12.0*(I2**2)*np.cos(2.0*(P - xi)) + 36.0*(I2**4)) + P2 = np.sin(2.0*(P - xi)) + Q2 = 1.0/(6.0*(I2**2)) - np.cos(2.0*(P - xi)) + R = np.arctan(P2/Q2) + + f[:,0] = np.sin(II)*(np.cos(II/2.0)**2)/0.38 #-- 2Q1 + f[:,1] = f[:,0] #-- sigma1 + f[:,2] = f[:,0] #-- rho1 + f[:,3] = f[:,0] #-- M12 + f[:,4] = np.sin(2.0*II)/0.7214 #-- M11 + f[:,5] = f[:,4] #-- chi1 + f[:,9] = f[:,5] #-- J1 + f[:,10] = np.sin(II)*np.power(np.sin(II/2.0),2.0)/0.01640 #-- OO1 + f[:,11] = np.power(np.cos(II/2.0),4.0)/0.9154 #-- 2N2 + f[:,12] = f[:,11] #-- mu2 + f[:,13] = f[:,11] #-- nu2 + f[:,14] = f[:,11] #-- lambda2 + f[:,15] = f[:,11]*Ra1 #-- L2 + f[:,18] = f[:,11] #-- eps2 + f[:,19] = np.power(np.sin(II),2.0)/0.1565 #-- eta2 + + u[:,0] = (2.0*xi - nu)/dtr #-- 2Q1 + u[:,1] = u[:,0] #-- sigma1 + u[:,2] = u[:,0] #-- rho1 + u[:,3] = u[:,0] #-- M12 + u[:,4] = -nu/dtr #-- M11 + u[:,5] = u[:,4] #-- chi1 + u[:,9] = u[:,4] #-- J1 + u[:,10] = (-2.0*xi - nu)/dtr #-- OO1 + u[:,11] = (2.0*xi - 2.0*nu)/dtr #-- 2N2 + u[:,12] = u[:,11] #-- mu2 + u[:,13] = u[:,11] #-- nu2 + u[:,14] = (2.0*xi - 2.0*nu)/dtr #-- lambda2 + u[:,15] = (2.0*xi - 2.0*nu - R)/dtr#-- L2 + u[:,18] = u[:,12] #-- eps2 + u[:,19] = -2.0*nu/dtr #-- eta2 + #-- sum over the minor tidal constituents of interest for k in minor_indices: th = (arg[:,k] + u[:,k])*dtr diff --git a/pyTMD/load_nodal_corrections.py b/pyTMD/load_nodal_corrections.py index df652534..4fcda77a 100755 --- a/pyTMD/load_nodal_corrections.py +++ b/pyTMD/load_nodal_corrections.py @@ -36,6 +36,7 @@ UPDATE HISTORY: Updated 08/2020: change time variable names to not overwrite functions + update nodal corrections for FES models Updated 07/2020: added function docstrings. add shallow water constituents Updated 09/2019: added netcdf option to CORRECTIONS option Updated 08/2018: added correction option ATLAS for localized OTIS solutions @@ -78,7 +79,7 @@ def load_nodal_corrections(MJD,constituents,DELTAT=0.0,CORRECTIONS='OTIS'): #-- degrees to radians dtr = np.pi/180.0 - #-- set function for astrological longitudes + #-- set function for astronomical longitudes ASTRO5 = True if CORRECTIONS in ('GOT','FES') else False #-- convert from Modified Julian Dates into Ephemeris Time s,h,p,omega,pp = calc_astrol_longitudes(MJD+DELTAT, ASTRO5=ASTRO5) @@ -166,7 +167,7 @@ def load_nodal_corrections(MJD,constituents,DELTAT=0.0,CORRECTIONS='OTIS'): f = np.zeros((nt,60)) u = np.zeros((nt,60)) #-- determine nodal corrections f and u for each model type - if CORRECTIONS in ('OTIS','ATLAS','netcdf','FES'): + if CORRECTIONS in ('OTIS','ATLAS','netcdf'): f[:,0] = 1.0 #-- Sa f[:,1] = 1.0 #-- Ssa f[:,2] = 1.0 - 0.130*cosn #-- Mm @@ -188,8 +189,8 @@ def load_nodal_corrections(MJD,constituents,DELTAT=0.0,CORRECTIONS='OTIS'): # Mtmp1 = 2.0*np.cos(p*dtr) + 0.4*np.cos((p-omega)*dtr) # Mtmp2 = np.sin(p*dtr) + 0.2*np.sin((p-omega)*dtr) #-- Ray's - Mtmp1 = 1.36*np.cos(p*dtr) + 0.267*np.cos((p-omega)*dtr) - Mtmp2 = 0.64*np.sin(p*dtr) + 0.135*np.sin((p-omega)*dtr) + Mtmp1 = 1.36*np.cos(p*dtr) + 0.267*np.cos((p-omega)*dtr) + Mtmp2 = 0.64*np.sin(p*dtr) + 0.135*np.sin((p-omega)*dtr) f[:,13] = np.sqrt(Mtmp1**2 + Mtmp2**2) #-- M1 f[:,14] = np.sqrt((1.0+0.221*cosn)**2+(0.221*sinn)**2) #-- chi1 f[:,15] = 1.0 #-- pi1 @@ -215,9 +216,9 @@ def load_nodal_corrections(MJD,constituents,DELTAT=0.0,CORRECTIONS='OTIS'): f[:,29] = f[:,24] #-- M2 f[:,30] = 1.0 #-- M2b f[:,31] = 1.0 #-- lambda2 - temp1 = 1.0 - 0.25*np.cos(2*p*dtr) - 0.11*np.cos((2.0*p-omega)*dtr) - 0.04*cosn - temp2 = 0.25*np.sin(2*p*dtr) + 0.11*np.sin((2.0*p-omega)*dtr) + 0.04*sinn - f[:,32] = np.sqrt(temp1**2 + temp2**2) #-- L2 + Ltmp1 = 1.0 - 0.25*np.cos(2*p*dtr) - 0.11*np.cos((2.0*p-omega)*dtr) - 0.04*cosn + Ltmp2 = 0.25*np.sin(2*p*dtr) + 0.11*np.sin((2.0*p-omega)*dtr) + 0.04*sinn + f[:,32] = np.sqrt(Ltmp1**2 + Ltmp2**2) #-- L2 f[:,33] = 1.0 #-- T2 f[:,34] = 1.0 #-- S2 f[:,35] = 1.0 #-- R2 @@ -290,7 +291,7 @@ def load_nodal_corrections(MJD,constituents,DELTAT=0.0,CORRECTIONS='OTIS'): u[:,29] = u[:,24] #-- M2 u[:,30] = 0.0 #-- M2b u[:,31] = 0.0 #-- lambda2 - u[:,32] = np.arctan(-temp2/temp1)/dtr #-- L2 + u[:,32] = np.arctan(-Ltmp2/Ltmp1)/dtr #-- L2 u[:,33] = 0.0 #-- T2 u[:,34] = 0.0 #-- S2 u[:,35] = 0.0 #-- R2 @@ -313,13 +314,150 @@ def load_nodal_corrections(MJD,constituents,DELTAT=0.0,CORRECTIONS='OTIS'): u[:,50] = 0.0 #-- S6 u[:,51] = 0.0 #-- S7 u[:,52] = 0.0 #-- S8 + #-- mean sea level + u[:,59] = 0.0 #-- Z0 + + elif CORRECTIONS in ('FES',): + #-- additional astronomical terms for FES models + II = np.arccos(0.913694997 - 0.035692561*np.cos(omega*dtr)) + at1 = np.arctan(1.01883*np.tan(omega*dtr/2.0)) + at2 = np.arctan(0.64412*np.tan(omega*dtr/2.0)) + xi = -at1 - at2 + omega*dtr + xi[xi > np.pi] -= 2.0*np.pi + nu = at1 - at2 + I2 = np.tan(II/2.0) + Ra1 = np.sqrt(1.0 - 12.0*(I2**2)*np.cos(2.0*(p - xi)) + 36.0*(I2**4)) + P2 = np.sin(2.0*(p - xi)) + Q2 = 1.0/(6.0*(I2**2)) - np.cos(2.0*(p - xi)) + R = np.arctan(P2/Q2) + P_prime = np.sin(2.0*II)*np.sin(nu) + Q_prime = np.sin(2.0*II)*np.cos(nu) + 0.3347 + nu_prime = np.arctan(P_prime/Q_prime) + P_sec = (np.sin(II)**2)*np.sin(2.0*nu) + Q_sec = (np.sin(II)**2)*np.cos(2.0*nu) + 0.0727 + nu_sec = 0.5*np.arctan(P_sec/Q_sec) + + f[:,0] = 1.0 #-- Sa + f[:,1] = 1.0 #-- Ssa + f[:,2] = (2.0/3.0 - np.power(np.sin(II),2.0))/0.5021 #-- Mm + f[:,3] = 1.0 #-- MSf + f[:,4] = np.power(np.sin(II),2.0)/0.1578 #-- Mf + f[:,7] = np.sin(II)*(np.cos(II/2.0)**2)/0.38 #-- 2Q1 + f[:,8] = f[:,7] #-- sigma1 + f[:,9] = f[:,7] #-- q1 + f[:,10] = f[:,7] #-- rho1 + f[:,11] = f[:,7] #-- O1 + #-- Ray's + Mtmp1 = 1.36*np.cos(p*dtr) + 0.267*np.cos((p-omega)*dtr) + Mtmp2 = 0.64*np.sin(p*dtr) + 0.135*np.sin((p-omega)*dtr) + f[:,13] = np.sqrt(Mtmp1**2 + Mtmp2**2) #-- M1 + f[:,14] = np.sin(2.0*II) / 0.7214 #-- chi1 + f[:,15] = 1.0 #-- pi1 + f[:,16] = 1.0 #-- P1 + f[:,17] = 1.0 #-- S1 + temp1 = 0.8965*np.power(np.sin(2.0*II),2.0) + temp2 = 0.6001*np.sin(2.0*II)*np.cos(nu) + f[:,18] = np.sqrt(temp1 + temp1 + 0.1006) #-- K1 + f[:,19] = 1.0 #-- psi1 + f[:,20] = 1.0 #-- phi1 + f[:,21] = f[:,14] #-- theta1 + f[:,22] = f[:,14] #-- J1 + f[:,23] = np.sin(II)*np.power(np.sin(II/2.0),2.0)/0.01640 #-- OO1 + f[:,24] = np.power(np.cos(II/2.0),4.0)/0.9154 #-- 2N2 + f[:,25] = f[:,24] #-- mu2 + f[:,26] = f[:,24] #-- N2 + f[:,27] = f[:,24] #-- nu2 + f[:,28] = 1.0 #-- M2a + f[:,29] = f[:,24] #-- M2 + f[:,30] = 1.0 #-- M2b + f[:,31] = f[:,29] #-- lambda2 + f[:,32] = f[:,29]*Ra1 #-- L2 + f[:,33] = 1.0 #-- T2 + f[:,34] = 1.0 #-- S2 + f[:,35] = 1.0 #-- R2 + temp1 = 19.0444 * np.power(np.sin(II),4.0) + temp2 = 2.7702 * np.power(np.sin(II),2.0) * np.cos(2.0*nu) + f[:,36] = np.sqrt(temp1 + temp2 + 0.0981) #-- K2 + f[:,37] = np.power(np.sin(II),2.0)/0.1565 #-- eta2 + f[:,38] = f[:,29]**2 #-- MNS2 + f[:,39] = f[:,29] #-- 2SM2 + f[:,40] = np.power(np.cos(II/2.0), 6.0) / 0.8758 #-- M3 + f[:,41] = f[:,18]*f[:,29] #-- MK3 + f[:,42] = 1.0 #-- S3 + f[:,43] = f[:,29]**2 #-- MN4 + f[:,44] = f[:,43] #-- M4 + f[:,45] = f[:,29] #-- MS4 + f[:,46] = f[:,29]*f[:,36] #-- MK4 + f[:,47] = 1.0 #-- S4 + f[:,48] = 1.0 #-- S5 + f[:,49] = f[:,29]**3 #-- M6 + f[:,50] = 1.0 #-- S6 + f[:,51] = 1.0 #-- S7 + f[:,52] = 1.0 #-- S8 + #-- shallow water constituents + f[:,53] = f[:,29]**4 #-- m8 + f[:,54] = f[:,29]*f[:,36] #-- mks2 + f[:,55] = f[:,4] #-- msqm + f[:,56] = f[:,4] #-- mtm + f[:,57] = f[:,29]**2 #-- n4 + f[:,58] = f[:,29] #-- eps2 + #-- mean sea level + f[:,59] = 1.0 #-- Z0 + + u[:,0] = 0.0 #-- Sa + u[:,1] = 0.0 #-- Ssa + u[:,2] = 0.0 #-- Mm + u[:,3] = (2.0*xi - 2.0*nu)/dtr #-- MSf + u[:,4] = -2.0*xi/dtr #-- Mf + u[:,7] = (2.0*xi - nu)/dtr #-- 2Q1 + u[:,8] = u[:,7] #-- sigma1 + u[:,9] = u[:,7] #-- q1 + u[:,10] = u[:,7] #-- rho1 + u[:,11] = u[:,7] #-- O1 + u[:,13] = np.arctan2(Mtmp2,Mtmp1)/dtr #-- M1 + u[:,14] = -nu/dtr #-- chi1 + u[:,15] = 0.0 #-- pi1 + u[:,16] = 0.0 #-- P1 + u[:,17] = 0.0 #-- S1 + u[:,18] = -nu_prime/dtr #-- K1 + u[:,19] = 0.0 #-- psi1 + u[:,20] = 0.0 #-- phi1 + u[:,21] = -nu/dtr #-- theta1 + u[:,22] = u[:,21] #-- J1 + u[:,23] = (-2.0*xi - nu)/dtr #-- OO1 + u[:,24] = (2.0*xi - 2.0*nu)/dtr #-- 2N2 + u[:,25] = u[:,24] #-- mu2 + u[:,26] = u[:,24] #-- N2 + u[:,27] = u[:,24] #-- nu2 + u[:,29] = u[:,24] #-- M2 + u[:,31] = (2.0*xi - 2.0*nu)/dtr #-- lambda2 + u[:,32] = (2.0*xi - 2.0*nu - R)/dtr #-- L2 + u[:,33] = 0.0 #-- T2 + u[:,34] = 0.0 #-- S2 + u[:,35] = 0.0 #-- R2 + u[:,36] = -2.0*nu_sec/dtr #-- K2 + u[:,37] = -2.0*nu/dtr #-- eta2 + u[:,38] = (4.0*xi - 4.0*nu)/dtr #-- mns2 + u[:,39] = (2.0*xi - 2.0*nu)/dtr #-- 2SM2 + u[:,40] = (3.0*xi - 3.0*nu)/dtr #-- M3 + u[:,41] = (2.0*xi - 2.0*nu - 2.0*nu_prime)/dtr #-- MK3 + u[:,42] = 0.0 #-- S3 + u[:,43] = (4.0*xi - 4.0*nu)/dtr #-- MN4 + u[:,44] = (4.0*xi - 4.0*nu)/dtr #-- M4 + u[:,45] = (2.0*xi - 2.0*nu)/dtr #-- MS4 + u[:,46] = (2.0*xi - 2.0*nu - 2.0*nu_sec)/dtr #-- MK4 + u[:,47] = 0.0 #-- S4 + u[:,48] = 0.0 #-- S5 + u[:,49] = (6.0*xi - 6.0*nu)/dtr #-- M6 + u[:,50] = 0.0 #-- S6 + u[:,51] = 0.0 #-- S7 + u[:,52] = 0.0 #-- S8 #-- shallow water constituents - u[:,53] = 4.0*u[:,29] #-- m8 - u[:,54] = u[:,29] + u[:,36] #-- mks2 + u[:,53] = (8.0*xi - 8.0*nu)/dtr #-- m8 + u[:,54] = (2.0*xi - 2.0*nu - 2.0*nu_sec)/dtr #-- mks2 u[:,55] = u[:,4] #-- msqm u[:,56] = u[:,4] #-- mtm - u[:,57] = 2.0*u[:,29] #-- n4 - u[:,57] = 2.0*u[:,29] #-- MN4 + u[:,57] = (4.0*xi - 4.0*nu)/dtr #-- n4 u[:,58] = u[:,29] #-- eps2 #-- mean sea level u[:,59] = 0.0 #-- Z0 diff --git a/pyTMD/read_FES_model.py b/pyTMD/read_FES_model.py index a0f5a234..599ceb6e 100644 --- a/pyTMD/read_FES_model.py +++ b/pyTMD/read_FES_model.py @@ -134,7 +134,7 @@ def extract_FES_constants(ilon, ilat, directory, model_files, #-- use quick bilinear to interpolate values hci.data[:] = bilinear_interp(lon,lat,hc,ilon,ilat,dtype=hc.dtype) #-- replace nan values with fill_value - hci.mask[:] = np.isnan(hci.data) + hci.mask[:] |= np.isnan(hci.data) hci.data[hci.mask] = hci.fill_value elif (METHOD == 'spline'): #-- interpolate complex form of the constituent with scipy @@ -149,9 +149,9 @@ def extract_FES_constants(ilon, ilat, directory, model_files, hci.mask[:] = f3.ev(ilon,ilat).astype(np.bool) else: #-- use scipy regular grid to interpolate values for a given method - r1 = scipy.interpolate.RegularGridInterpolator((lon,lat), + r1 = scipy.interpolate.RegularGridInterpolator((lat,lon), hc.data, method=METHOD) - r2 = scipy.interpolate.RegularGridInterpolator((lon,lat), + r2 = scipy.interpolate.RegularGridInterpolator((lat,lon), hc.mask, method=METHOD) hci.data[:] = r1.__call__(np.c_[ilat,ilon]) hci.mask[:] = np.ceil(r2.__call__(np.c_[ilat,ilon])).astype(np.bool) diff --git a/pyTMD/read_GOT_model.py b/pyTMD/read_GOT_model.py index e1c3ae2b..38d529c5 100644 --- a/pyTMD/read_GOT_model.py +++ b/pyTMD/read_GOT_model.py @@ -119,7 +119,7 @@ def extract_GOT_constants(ilon, ilat, directory, model_files, #-- use quick bilinear to interpolate values hci.data[:] = bilinear_interp(lon,lat,hc,ilon,ilat,dtype=hc.dtype) #-- replace nan values with fill_value - hci.mask[:] = np.isnan(hci.data) + hci.mask[:] |= np.isnan(hci.data) hci.data[hci.mask] = hci.fill_value elif (METHOD == 'spline'): #-- interpolate complex form of the constituent with scipy @@ -134,9 +134,9 @@ def extract_GOT_constants(ilon, ilat, directory, model_files, hci.mask[:] = f3.ev(ilon,ilat).astype(np.bool) else: #-- use scipy regular grid to interpolate values for a given method - r1 = scipy.interpolate.RegularGridInterpolator((lon,lat), + r1 = scipy.interpolate.RegularGridInterpolator((lat,lon), hc.data, method=METHOD) - r2 = scipy.interpolate.RegularGridInterpolator((lon,lat), + r2 = scipy.interpolate.RegularGridInterpolator((lat,lon), hc.mask, method=METHOD) hci.data[:] = r1.__call__(np.c_[ilat,ilon]) hci.mask[:] = np.ceil(r2.__call__(np.c_[ilat,ilon])).astype(np.bool) diff --git a/test/fes_slev.txt.gz b/test/fes_slev.txt.gz new file mode 100644 index 00000000..2ccc0cd9 Binary files /dev/null and b/test/fes_slev.txt.gz differ diff --git a/pyTMD/data/opoleloadcmcor.test b/test/opoleloadcmcor.test similarity index 100% rename from pyTMD/data/opoleloadcmcor.test rename to test/opoleloadcmcor.test diff --git a/test/test_fes_predict.py b/test/test_fes_predict.py new file mode 100644 index 00000000..c62fcd77 --- /dev/null +++ b/test/test_fes_predict.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python +u""" +test_fes_predict.py (08/2020) +Tests that FES2014 data can be downloaded from AWS S3 bucket +Tests the read program to verify that constituents are being extracted +Tests that interpolated results are comparable to FES2014 program + +PYTHON DEPENDENCIES: + numpy: Scientific Computing Tools For Python + https://numpy.org + https://numpy.org/doc/stable/user/numpy-for-matlab-users.html + scipy: Scientific Tools for Python + https://docs.scipy.org/doc/ + boto3: Amazon Web Services (AWS) SDK for Python + https://boto3.amazonaws.com/v1/documentation/api/latest/index.html + +UPDATE HISTORY: + Written 08/2020 +""" +import os +import gzip +import boto3 +import shutil +import pytest +import inspect +import warnings +import posixpath +import numpy as np +import pyTMD.time +import pyTMD.utilities +import pyTMD.read_FES_model +import pyTMD.predict_tide_drift +import pyTMD.infer_minor_corrections +import pyTMD.calc_delta_time + +#-- current file path +filename = inspect.getframeinfo(inspect.currentframe()).filename +filepath = os.path.dirname(os.path.abspath(filename)) + +#-- PURPOSE: Download FES2014 constituents from AWS S3 bucket +def test_download_FES2014(aws_access_key_id,aws_secret_access_key,aws_region_name): + #-- get aws session object + session = boto3.Session( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + region_name=aws_region_name) + #-- get s3 object and bucket object for pytmd data + s3 = session.resource('s3') + bucket = s3.Bucket('pytmd') + + #-- model parameters for FES2014 + model_directory = os.path.join(filepath,'fes2014','ocean_tide') + model_files = ['2n2.nc.gz','k1.nc.gz','k2.nc.gz','m2.nc.gz','m4.nc.gz', + 'mf.nc.gz','mm.nc.gz','msqm.nc.gz','mtm.nc.gz','n2.nc.gz','o1.nc.gz', + 'p1.nc.gz','q1.nc.gz','s1.nc.gz','s2.nc.gz'] + #-- recursively create model directory + os.makedirs(model_directory) + #-- retrieve each model file from s3 + for f in model_files: + #-- retrieve constituent file + obj = bucket.Object(key=posixpath.join('fes2014','ocean_tide',f)) + response = obj.get() + #-- save constituent data + with open(os.path.join(model_directory,f), 'wb') as destination: + shutil.copyfileobj(response['Body'], destination) + assert os.access(os.path.join(model_directory,f), os.F_OK) + +#-- PURPOSE: Tests that interpolated results are comparable to FES program +def test_verify_FES2014(): + #-- model parameters for FES2014 + model_directory = os.path.join(filepath,'fes2014','ocean_tide') + #-- constituent files included in test + model_files = ['2n2.nc.gz','k1.nc.gz','k2.nc.gz','m2.nc.gz','m4.nc.gz', + 'mf.nc.gz','mm.nc.gz','msqm.nc.gz','mtm.nc.gz','n2.nc.gz','o1.nc.gz', + 'p1.nc.gz','q1.nc.gz','s1.nc.gz','s2.nc.gz'] + c = ['2n2','k1','k2','m2','m4','mf','mm','msqm','mtm','n2','o1', + 'p1','q1','s1','s2'] + model_format = 'FES' + VERSION = 'FES2014' + TYPE = 'z' + SCALE = 1.0/100.0 + + #-- read validation dataset + #-- extract time (Modified Julian Days), latitude, longitude, and tide data + names = ('CNES','Hour','Latitude','Longitude','Short_tide','LP_tide', + 'Pure_tide','Geo_tide','Rad_tide') + formats = ('f','i','f','f','f','f','f','f','f') + file_contents = np.loadtxt(os.path.join(filepath,'fes_slev.txt.gz'), + skiprows=1,dtype=dict(names=names,formats=formats)) + longitude = file_contents['Longitude'] + latitude = file_contents['Latitude'] + #-- convert short tide estimates to meters + validation = file_contents['Short_tide']/100.0 + npts = len(file_contents) + + #-- convert time from CNES Julian Days to days since 1992-01-01T00:00:00 + #-- CNES Julian Days = Days relative to 1950-01-01 (MJD:33282) + tide_time = file_contents['CNES'] - 15340.0 + + #-- extract amplitude and phase from tide model + amp,ph = pyTMD.read_FES_model.extract_FES_constants(longitude, latitude, + model_directory, model_files, TYPE=TYPE, VERSION=VERSION, + METHOD='spline', SCALE=SCALE) + #-- interpolate delta times from calendar dates to tide time + delta_file = pyTMD.utilities.get_data_path(['data','merged_deltat.data']) + deltat = pyTMD.calc_delta_time(delta_file, tide_time) + #-- calculate complex phase in radians for Euler's + #-- calculate constituent oscillations + hc = amp*np.exp(-1j*ph*np.pi/180.0) + + #-- allocate for out tides at point + tide = np.ma.zeros((npts)) + tide.mask = np.zeros((npts),dtype=np.bool) + #-- predict tidal elevations at time and infer minor corrections + tide.mask[:] = np.any(hc.mask, axis=1) + tide.data[:] = pyTMD.predict_tide_drift(tide_time, hc, c, + DELTAT=deltat, CORRECTIONS=model_format) + minor = pyTMD.infer_minor_corrections(tide_time, hc, c, + DELTAT=deltat, CORRECTIONS=model_format) + tide.data[:] += minor.data[:] + + #-- will verify differences between model outputs are within tolerance + eps = 0.05 + #-- calculate differences between fes2014 and python version + difference = np.ma.zeros((npts)) + difference.data[:] = tide.data - validation + difference.mask = np.copy(tide.mask) + if not np.all(difference.mask): + assert np.all(np.abs(difference) <= eps) diff --git a/test/test_ocean_pole_tide.py b/test/test_ocean_pole_tide.py index 59b4b754..b0415a25 100755 --- a/test/test_ocean_pole_tide.py +++ b/test/test_ocean_pole_tide.py @@ -4,6 +4,7 @@ """ import os import re +import inspect import warnings import pytest import numpy as np @@ -15,6 +16,10 @@ from pyTMD.read_iers_EOP import read_iers_EOP from pyTMD.read_ocean_pole_tide import read_ocean_pole_tide +#-- current file path +filename = inspect.getframeinfo(inspect.currentframe()).filename +filepath = os.path.dirname(os.path.abspath(filename)) + #-- parameterize interpolation method @pytest.mark.parametrize("METHOD", ['spline','nearest','linear']) #-- test the interpolation of ocean pole tide values @@ -47,7 +52,7 @@ def test_ocean_pole_tide(METHOD): assert (np.abs(K - 5.3394043696e+03) < eps) #-- read test file for values - ocean_pole_test_file = get_data_path(['data','opoleloadcmcor.test']) + ocean_pole_test_file = os.path.join(filepath,'opoleloadcmcor.test') names = ('MJD','xbar_p','ybar_p','x_p','y_p','m1','m2','u_radial','u_north') formats = ('i','f','f','f','f','f','f','f','f') validation = np.loadtxt(ocean_pole_test_file,skiprows=26, diff --git a/test/test_perth3_read.py b/test/test_perth3_read.py index 39c0f1ba..73549aeb 100644 --- a/test/test_perth3_read.py +++ b/test/test_perth3_read.py @@ -3,7 +3,7 @@ test_perth3_read.py (08/2020) Tests that GOT4.7 data can be downloaded from AWS S3 bucket Tests the read program to verify that constituents are being extracted -Tests that interpolated results are comparable to NASA PERTH3 program program +Tests that interpolated results are comparable to NASA PERTH3 program PYTHON DEPENDENCIES: numpy: Scientific Computing Tools For Python @@ -31,12 +31,14 @@ import pyTMD.read_GOT_model import pyTMD.predict_tide_drift import pyTMD.infer_minor_corrections +import pyTMD.compute_tide_corrections import pyTMD.calc_delta_time #-- current file path filename = inspect.getframeinfo(inspect.currentframe()).filename filepath = os.path.dirname(os.path.abspath(filename)) +#-- PURPOSE: Download GOT4.7 constituents from AWS S3 bucket def test_download_GOT47(aws_access_key_id,aws_secret_access_key,aws_region_name): #-- get aws session object session = boto3.Session( @@ -63,8 +65,10 @@ def test_download_GOT47(aws_access_key_id,aws_secret_access_key,aws_region_name) shutil.copyfileobj(response['Body'], destination) assert os.access(os.path.join(model_directory,f), os.F_OK) +#-- parameterize interpolation method +@pytest.mark.parametrize("METHOD", ['spline','linear','bilinear']) #-- PURPOSE: Tests that interpolated results are comparable to PERTH3 program -def test_verify_GOT47(): +def test_verify_GOT47(METHOD): #-- model parameters for GOT4.7 model_directory = os.path.join(filepath,'GOT4.7','grids_oceantide') #-- perth3 test program infers m4 tidal constituent @@ -98,7 +102,7 @@ def test_verify_GOT47(): #-- extract amplitude and phase from tide model amp,ph = pyTMD.read_GOT_model.extract_GOT_constants(longitude, latitude, - model_directory, model_files, METHOD='spline', SCALE=SCALE) + model_directory, model_files, METHOD=METHOD, SCALE=SCALE) #-- interpolate delta times from calendar dates to tide time delta_file = pyTMD.utilities.get_data_path(['data','merged_deltat.data']) deltat = pyTMD.calc_delta_time(delta_file, tide_time) @@ -126,3 +130,26 @@ def test_verify_GOT47(): difference.mask = (tide.mask | validation.mask) if not np.all(difference.mask): assert np.all(np.abs(difference) <= eps) + +#-- parameterize interpolation method +@pytest.mark.parametrize("METHOD", ['spline','nearest','bilinear']) +#-- PURPOSE: test the tide correction wrapper function +def test_Ross_Ice_Shelf(METHOD): + #-- create an image around the Ross Ice Shelf + xlimits = np.array([-740000,520000]) + ylimits = np.array([-1430000,-300000]) + spacing = np.array([5e3,-5e3]) + #-- x and y coordinates + x = np.arange(xlimits[0],xlimits[1]+spacing[0],spacing[0]) + y = np.arange(ylimits[1],ylimits[0]+spacing[1],spacing[1]) + xgrid,ygrid = np.meshgrid(x,y) + #-- x and y dimensions + nx = np.int((xlimits[1]-xlimits[0])/spacing[0])+1 + ny = np.int((ylimits[0]-ylimits[1])/spacing[1])+1 + #-- time dimension + delta_time = 0.0 + #-- calculate tide map + tide = pyTMD.compute_tide_corrections(xgrid, ygrid, delta_time, + DIRECTORY=filepath, MODEL='GOT4.7', EPOCH=(2018,1,1,0,0,0), + TYPE='grid', TIME='GPS', EPSG=3031, METHOD=METHOD) + assert np.any(tide)