diff --git a/ci/requirements-py36.yml b/ci/requirements-py36.yml index 0790f20764d..fd63fe26130 100644 --- a/ci/requirements-py36.yml +++ b/ci/requirements-py36.yml @@ -20,6 +20,7 @@ dependencies: - rasterio - bottleneck - zarr + - pseudonetcdf>=3.0.1 - pip: - coveralls - pytest-cov diff --git a/doc/installing.rst b/doc/installing.rst index bb42129deea..33f01b8c770 100644 --- a/doc/installing.rst +++ b/doc/installing.rst @@ -28,6 +28,9 @@ For netCDF and IO - `cftime `__: recommended if you want to encode/decode datetimes for non-standard calendars or dates before year 1678 or after year 2262. +- `PseudoNetCDF `__: recommended + for accessing CAMx, GEOS-Chem (bpch), NOAA ARL files, ICARTT files + (ffi1001) and many other. For accelerating xarray ~~~~~~~~~~~~~~~~~~~~~~~ @@ -65,9 +68,9 @@ with its recommended dependencies using the conda command line tool:: .. _conda: http://conda.io/ -We recommend using the community maintained `conda-forge `__ channel if you need difficult\-to\-build dependencies such as cartopy or pynio:: +We recommend using the community maintained `conda-forge `__ channel if you need difficult\-to\-build dependencies such as cartopy, pynio or PseudoNetCDF:: - $ conda install -c conda-forge xarray cartopy pynio + $ conda install -c conda-forge xarray cartopy pynio pseudonetcdf New releases may also appear in conda-forge before being updated in the default channel. diff --git a/doc/io.rst b/doc/io.rst index 668416e714d..e92ecd01cb4 100644 --- a/doc/io.rst +++ b/doc/io.rst @@ -650,7 +650,26 @@ We recommend installing PyNIO via conda:: .. _PyNIO: https://www.pyngl.ucar.edu/Nio.shtml -.. _combining multiple files: +.. _io.PseudoNetCDF: + +Formats supported by PseudoNetCDF +--------------------------------- + +xarray can also read CAMx, BPCH, ARL PACKED BIT, and many other file +formats supported by PseudoNetCDF_, if PseudoNetCDF is installed. +PseudoNetCDF can also provide Climate Forecasting Conventions to +CMAQ files. In addition, PseudoNetCDF can automatically register custom +readers that subclass PseudoNetCDF.PseudoNetCDFFile. PseudoNetCDF can +identify readers heuristically, or format can be specified via a key in +`backend_kwargs`. + +To use PseudoNetCDF to read such files, supply +``engine='pseudonetcdf'`` to :py:func:`~xarray.open_dataset`. + +Add ``backend_kwargs={'format': ''}`` where `` +options are listed on the PseudoNetCDF page. + +.. _PseuodoNetCDF: http://github.com/barronh/PseudoNetCDF Formats supported by Pandas @@ -662,6 +681,8 @@ exporting your objects to pandas and using its broad range of `IO tools`_. .. _IO tools: http://pandas.pydata.org/pandas-docs/stable/io.html +.. _combining multiple files: + Combining multiple files ------------------------ diff --git a/doc/whats-new.rst b/doc/whats-new.rst index c4c8db243d4..bfa24340bcd 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -41,6 +41,10 @@ Enhancements dask<0.17.4. (related to :issue:`2203`) By `Keisuke Fujii `_. + - :py:meth:`~DataArray.cumsum` and :py:meth:`~DataArray.cumprod` now support aggregation over multiple dimensions at the same time. This is the default behavior when dimensions are not specified (previously this raised an error). diff --git a/xarray/backends/__init__.py b/xarray/backends/__init__.py index d85893afb0b..47a2011a3af 100644 --- a/xarray/backends/__init__.py +++ b/xarray/backends/__init__.py @@ -10,6 +10,7 @@ from .pynio_ import NioDataStore from .scipy_ import ScipyDataStore from .h5netcdf_ import H5NetCDFStore +from .pseudonetcdf_ import PseudoNetCDFDataStore from .zarr import ZarrStore __all__ = [ @@ -21,4 +22,5 @@ 'ScipyDataStore', 'H5NetCDFStore', 'ZarrStore', + 'PseudoNetCDFDataStore', ] diff --git a/xarray/backends/api.py b/xarray/backends/api.py index c3b2aa59fcd..753f8394a7b 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -152,9 +152,10 @@ def _finalize_store(write, store): def open_dataset(filename_or_obj, group=None, decode_cf=True, - mask_and_scale=True, decode_times=True, autoclose=False, + mask_and_scale=None, decode_times=True, autoclose=False, concat_characters=True, decode_coords=True, engine=None, - chunks=None, lock=None, cache=None, drop_variables=None): + chunks=None, lock=None, cache=None, drop_variables=None, + backend_kwargs=None): """Load and decode a dataset from a file or file-like object. Parameters @@ -178,7 +179,8 @@ def open_dataset(filename_or_obj, group=None, decode_cf=True, taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will - be replaced by NA. + be replaced by NA. mask_and_scale defaults to True except for the + pseudonetcdf backend. decode_times : bool, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers. @@ -194,7 +196,7 @@ def open_dataset(filename_or_obj, group=None, decode_cf=True, decode_coords : bool, optional If True, decode the 'coordinates' attribute to identify coordinates in the resulting dataset. - engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio'}, optional + engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'pseudonetcdf'}, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for 'netcdf4'. @@ -219,6 +221,10 @@ def open_dataset(filename_or_obj, group=None, decode_cf=True, A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. + backend_kwargs: dictionary, optional + A dictionary of keyword arguments to pass on to the backend. This + may be useful when backend options would improve performance or + allow user control of dataset processing. Returns ------- @@ -229,6 +235,10 @@ def open_dataset(filename_or_obj, group=None, decode_cf=True, -------- open_mfdataset """ + + if mask_and_scale is None: + mask_and_scale = not engine == 'pseudonetcdf' + if not decode_cf: mask_and_scale = False decode_times = False @@ -238,6 +248,9 @@ def open_dataset(filename_or_obj, group=None, decode_cf=True, if cache is None: cache = chunks is None + if backend_kwargs is None: + backend_kwargs = {} + def maybe_decode_store(store, lock=False): ds = conventions.decode_cf( store, mask_and_scale=mask_and_scale, decode_times=decode_times, @@ -303,18 +316,26 @@ def maybe_decode_store(store, lock=False): if engine == 'netcdf4': store = backends.NetCDF4DataStore.open(filename_or_obj, group=group, - autoclose=autoclose) + autoclose=autoclose, + **backend_kwargs) elif engine == 'scipy': store = backends.ScipyDataStore(filename_or_obj, - autoclose=autoclose) + autoclose=autoclose, + **backend_kwargs) elif engine == 'pydap': - store = backends.PydapDataStore.open(filename_or_obj) + store = backends.PydapDataStore.open(filename_or_obj, + **backend_kwargs) elif engine == 'h5netcdf': store = backends.H5NetCDFStore(filename_or_obj, group=group, - autoclose=autoclose) + autoclose=autoclose, + **backend_kwargs) elif engine == 'pynio': store = backends.NioDataStore(filename_or_obj, - autoclose=autoclose) + autoclose=autoclose, + **backend_kwargs) + elif engine == 'pseudonetcdf': + store = backends.PseudoNetCDFDataStore.open( + filename_or_obj, autoclose=autoclose, **backend_kwargs) else: raise ValueError('unrecognized engine for open_dataset: %r' % engine) @@ -334,9 +355,10 @@ def maybe_decode_store(store, lock=False): def open_dataarray(filename_or_obj, group=None, decode_cf=True, - mask_and_scale=True, decode_times=True, autoclose=False, + mask_and_scale=None, decode_times=True, autoclose=False, concat_characters=True, decode_coords=True, engine=None, - chunks=None, lock=None, cache=None, drop_variables=None): + chunks=None, lock=None, cache=None, drop_variables=None, + backend_kwargs=None): """Open an DataArray from a netCDF file containing a single data variable. This is designed to read netCDF files with only one data variable. If @@ -363,7 +385,8 @@ def open_dataarray(filename_or_obj, group=None, decode_cf=True, taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will - be replaced by NA. + be replaced by NA. mask_and_scale defaults to True except for the + pseudonetcdf backend. decode_times : bool, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers. @@ -403,6 +426,10 @@ def open_dataarray(filename_or_obj, group=None, decode_cf=True, A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. + backend_kwargs: dictionary, optional + A dictionary of keyword arguments to pass on to the backend. This + may be useful when backend options would improve performance or + allow user control of dataset processing. Notes ----- @@ -417,13 +444,15 @@ def open_dataarray(filename_or_obj, group=None, decode_cf=True, -------- open_dataset """ + dataset = open_dataset(filename_or_obj, group=group, decode_cf=decode_cf, mask_and_scale=mask_and_scale, decode_times=decode_times, autoclose=autoclose, concat_characters=concat_characters, decode_coords=decode_coords, engine=engine, chunks=chunks, lock=lock, cache=cache, - drop_variables=drop_variables) + drop_variables=drop_variables, + backend_kwargs=backend_kwargs) if len(dataset.data_vars) != 1: raise ValueError('Given file dataset contains more than one data ' diff --git a/xarray/backends/pseudonetcdf_.py b/xarray/backends/pseudonetcdf_.py new file mode 100644 index 00000000000..c481bf848b9 --- /dev/null +++ b/xarray/backends/pseudonetcdf_.py @@ -0,0 +1,101 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +import numpy as np + +from .. import Variable +from ..core.pycompat import OrderedDict +from ..core.utils import (FrozenOrderedDict, Frozen) +from ..core import indexing + +from .common import AbstractDataStore, DataStorePickleMixin, BackendArray + + +class PncArrayWrapper(BackendArray): + + def __init__(self, variable_name, datastore): + self.datastore = datastore + self.variable_name = variable_name + array = self.get_array() + self.shape = array.shape + self.dtype = np.dtype(array.dtype) + + def get_array(self): + self.datastore.assert_open() + return self.datastore.ds.variables[self.variable_name] + + def __getitem__(self, key): + key, np_inds = indexing.decompose_indexer( + key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR) + + with self.datastore.ensure_open(autoclose=True): + array = self.get_array()[key.tuple] # index backend array + + if len(np_inds.tuple) > 0: + # index the loaded np.ndarray + array = indexing.NumpyIndexingAdapter(array)[np_inds] + return array + + +class PseudoNetCDFDataStore(AbstractDataStore, DataStorePickleMixin): + """Store for accessing datasets via PseudoNetCDF + """ + @classmethod + def open(cls, filename, format=None, writer=None, + autoclose=False, **format_kwds): + from PseudoNetCDF import pncopen + opener = functools.partial(pncopen, filename, **format_kwds) + ds = opener() + mode = format_kwds.get('mode', 'r') + return cls(ds, mode=mode, writer=writer, opener=opener, + autoclose=autoclose) + + def __init__(self, pnc_dataset, mode='r', writer=None, opener=None, + autoclose=False): + + if autoclose and opener is None: + raise ValueError('autoclose requires an opener') + + self._ds = pnc_dataset + self._autoclose = autoclose + self._isopen = True + self._opener = opener + self._mode = mode + super(PseudoNetCDFDataStore, self).__init__() + + def open_store_variable(self, name, var): + with self.ensure_open(autoclose=False): + data = indexing.LazilyOuterIndexedArray( + PncArrayWrapper(name, self) + ) + attrs = OrderedDict((k, getattr(var, k)) for k in var.ncattrs()) + return Variable(var.dimensions, data, attrs) + + def get_variables(self): + with self.ensure_open(autoclose=False): + return FrozenOrderedDict((k, self.open_store_variable(k, v)) + for k, v in self.ds.variables.items()) + + def get_attrs(self): + with self.ensure_open(autoclose=True): + return Frozen(dict([(k, getattr(self.ds, k)) + for k in self.ds.ncattrs()])) + + def get_dimensions(self): + with self.ensure_open(autoclose=True): + return Frozen(self.ds.dimensions) + + def get_encoding(self): + encoding = {} + encoding['unlimited_dims'] = set( + [k for k in self.ds.dimensions + if self.ds.dimensions[k].isunlimited()]) + return encoding + + def close(self): + if self._isopen: + self.ds.close() + self._isopen = False diff --git a/xarray/tests/__init__.py b/xarray/tests/__init__.py index 3acd26235ce..e93d9a80145 100644 --- a/xarray/tests/__init__.py +++ b/xarray/tests/__init__.py @@ -68,6 +68,7 @@ def _importorskip(modname, minversion=None): has_netCDF4, requires_netCDF4 = _importorskip('netCDF4') has_h5netcdf, requires_h5netcdf = _importorskip('h5netcdf') has_pynio, requires_pynio = _importorskip('Nio') +has_pseudonetcdf, requires_pseudonetcdf = _importorskip('PseudoNetCDF') has_cftime, requires_cftime = _importorskip('cftime') has_dask, requires_dask = _importorskip('dask') has_bottleneck, requires_bottleneck = _importorskip('bottleneck') diff --git a/xarray/tests/data/example.ict b/xarray/tests/data/example.ict new file mode 100644 index 00000000000..bc04888fb80 --- /dev/null +++ b/xarray/tests/data/example.ict @@ -0,0 +1,31 @@ +27, 1001 +Henderson, Barron +U.S. EPA +Example file with artificial data +JUST_A_TEST +1, 1 +2018, 04, 27, 2018, 04, 27 +0 +Start_UTC +7 +1, 1, 1, 1, 1 +-9999, -9999, -9999, -9999, -9999 +lat, degrees_north +lon, degrees_east +elev, meters +TEST_ppbv, ppbv +TESTM_ppbv, ppbv +0 +8 +ULOD_FLAG: -7777 +ULOD_VALUE: N/A +LLOD_FLAG: -8888 +LLOD_VALUE: N/A, N/A, N/A, N/A, 0.025 +OTHER_COMMENTS: www-air.larc.nasa.gov/missions/etc/IcarttDataFormat.htm +REVISION: R0 +R0: No comments for this revision. +Start_UTC, lat, lon, elev, TEST_ppbv, TESTM_ppbv +43200, 41.00000, -71.00000, 5, 1.2345, 2.220 +46800, 42.00000, -72.00000, 15, 2.3456, -9999 +50400, 42.00000, -73.00000, 20, 3.4567, -7777 +50400, 42.00000, -74.00000, 25, 4.5678, -8888 \ No newline at end of file diff --git a/xarray/tests/data/example.uamiv b/xarray/tests/data/example.uamiv new file mode 100644 index 00000000000..fcedcd53097 Binary files /dev/null and b/xarray/tests/data/example.uamiv differ diff --git a/xarray/tests/test_backends.py b/xarray/tests/test_backends.py index 1e7a09fa55a..0e6151b2db5 100644 --- a/xarray/tests/test_backends.py +++ b/xarray/tests/test_backends.py @@ -32,7 +32,7 @@ assert_identical, has_dask, has_netCDF4, has_scipy, network, raises_regex, requires_dask, requires_h5netcdf, requires_netCDF4, requires_pathlib, requires_pydap, requires_pynio, requires_rasterio, requires_scipy, - requires_scipy_or_netCDF4, requires_zarr, + requires_scipy_or_netCDF4, requires_zarr, requires_pseudonetcdf, requires_cftime) from .test_dataset import create_test_data @@ -63,6 +63,13 @@ def open_example_dataset(name, *args, **kwargs): *args, **kwargs) +def open_example_mfdataset(names, *args, **kwargs): + return open_mfdataset( + [os.path.join(os.path.dirname(__file__), 'data', name) + for name in names], + *args, **kwargs) + + def create_masked_and_scaled_data(): x = np.array([np.nan, np.nan, 10, 10.1, 10.2], dtype=np.float32) encoding = {'_FillValue': -1, 'add_offset': 10, @@ -2483,6 +2490,229 @@ class PyNioTestAutocloseTrue(PyNioTest): autoclose = True +@requires_pseudonetcdf +class PseudoNetCDFFormatTest(TestCase): + autoclose = True + + def open(self, path, **kwargs): + return open_dataset(path, engine='pseudonetcdf', + autoclose=self.autoclose, + **kwargs) + + @contextlib.contextmanager + def roundtrip(self, data, save_kwargs={}, open_kwargs={}, + allow_cleanup_failure=False): + with create_tmp_file( + allow_cleanup_failure=allow_cleanup_failure) as path: + self.save(data, path, **save_kwargs) + with self.open(path, **open_kwargs) as ds: + yield ds + + def test_ict_format(self): + """ + Open a CAMx file and test data variables + """ + ictfile = open_example_dataset('example.ict', + engine='pseudonetcdf', + autoclose=False, + backend_kwargs={'format': 'ffi1001'}) + stdattr = { + 'fill_value': -9999.0, + 'missing_value': -9999, + 'scale': 1, + 'llod_flag': -8888, + 'llod_value': 'N/A', + 'ulod_flag': -7777, + 'ulod_value': 'N/A' + } + + def myatts(**attrs): + outattr = stdattr.copy() + outattr.update(attrs) + return outattr + + input = { + 'coords': {}, + 'attrs': { + 'fmt': '1001', 'n_header_lines': 27, + 'PI_NAME': 'Henderson, Barron', + 'ORGANIZATION_NAME': 'U.S. EPA', + 'SOURCE_DESCRIPTION': 'Example file with artificial data', + 'MISSION_NAME': 'JUST_A_TEST', + 'VOLUME_INFO': '1, 1', + 'SDATE': '2018, 04, 27', 'WDATE': '2018, 04, 27', + 'TIME_INTERVAL': '0', + 'INDEPENDENT_VARIABLE': 'Start_UTC', + 'ULOD_FLAG': '-7777', 'ULOD_VALUE': 'N/A', + 'LLOD_FLAG': '-8888', + 'LLOD_VALUE': ('N/A, N/A, N/A, N/A, 0.025'), + 'OTHER_COMMENTS': ('www-air.larc.nasa.gov/missions/etc/' + + 'IcarttDataFormat.htm'), + 'REVISION': 'R0', + 'R0': 'No comments for this revision.', + 'TFLAG': 'Start_UTC' + }, + 'dims': {'POINTS': 4}, + 'data_vars': { + 'Start_UTC': { + 'data': [43200.0, 46800.0, 50400.0, 50400.0], + 'dims': ('POINTS',), + 'attrs': myatts( + units='Start_UTC', + standard_name='Start_UTC', + ) + }, + 'lat': { + 'data': [41.0, 42.0, 42.0, 42.0], + 'dims': ('POINTS',), + 'attrs': myatts( + units='degrees_north', + standard_name='lat', + ) + }, + 'lon': { + 'data': [-71.0, -72.0, -73.0, -74.], + 'dims': ('POINTS',), + 'attrs': myatts( + units='degrees_east', + standard_name='lon', + ) + }, + 'elev': { + 'data': [5.0, 15.0, 20.0, 25.0], + 'dims': ('POINTS',), + 'attrs': myatts( + units='meters', + standard_name='elev', + ) + }, + 'TEST_ppbv': { + 'data': [1.2345, 2.3456, 3.4567, 4.5678], + 'dims': ('POINTS',), + 'attrs': myatts( + units='ppbv', + standard_name='TEST_ppbv', + ) + }, + 'TESTM_ppbv': { + 'data': [2.22, -9999.0, -7777.0, -8888.0], + 'dims': ('POINTS',), + 'attrs': myatts( + units='ppbv', + standard_name='TESTM_ppbv', + llod_value=0.025 + ) + } + } + } + chkfile = Dataset.from_dict(input) + assert_identical(ictfile, chkfile) + + def test_ict_format_write(self): + fmtkw = {'format': 'ffi1001'} + expected = open_example_dataset('example.ict', + engine='pseudonetcdf', + autoclose=False, + backend_kwargs=fmtkw) + with self.roundtrip(expected, save_kwargs=fmtkw, + open_kwargs={'backend_kwargs': fmtkw}) as actual: + assert_identical(expected, actual) + + def test_uamiv_format_read(self): + """ + Open a CAMx file and test data variables + """ + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', category=UserWarning, + message=('IOAPI_ISPH is assumed to be ' + + '6370000.; consistent with WRF')) + camxfile = open_example_dataset('example.uamiv', + engine='pseudonetcdf', + autoclose=True, + backend_kwargs={'format': 'uamiv'}) + data = np.arange(20, dtype='f').reshape(1, 1, 4, 5) + expected = xr.Variable(('TSTEP', 'LAY', 'ROW', 'COL'), data, + dict(units='ppm', long_name='O3'.ljust(16), + var_desc='O3'.ljust(80))) + actual = camxfile.variables['O3'] + assert_allclose(expected, actual) + + data = np.array(['2002-06-03'], 'datetime64[ns]') + expected = xr.Variable(('TSTEP',), data, + dict(bounds='time_bounds', + long_name=('synthesized time coordinate ' + + 'from SDATE, STIME, STEP ' + + 'global attributes'))) + actual = camxfile.variables['time'] + assert_allclose(expected, actual) + camxfile.close() + + def test_uamiv_format_mfread(self): + """ + Open a CAMx file and test data variables + """ + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', category=UserWarning, + message=('IOAPI_ISPH is assumed to be ' + + '6370000.; consistent with WRF')) + camxfile = open_example_mfdataset( + ['example.uamiv', + 'example.uamiv'], + engine='pseudonetcdf', + autoclose=True, + concat_dim='TSTEP', + backend_kwargs={'format': 'uamiv'}) + + data1 = np.arange(20, dtype='f').reshape(1, 1, 4, 5) + data = np.concatenate([data1] * 2, axis=0) + expected = xr.Variable(('TSTEP', 'LAY', 'ROW', 'COL'), data, + dict(units='ppm', long_name='O3'.ljust(16), + var_desc='O3'.ljust(80))) + actual = camxfile.variables['O3'] + assert_allclose(expected, actual) + + data1 = np.array(['2002-06-03'], 'datetime64[ns]') + data = np.concatenate([data1] * 2, axis=0) + expected = xr.Variable(('TSTEP',), data, + dict(bounds='time_bounds', + long_name=('synthesized time coordinate ' + + 'from SDATE, STIME, STEP ' + + 'global attributes'))) + actual = camxfile.variables['time'] + assert_allclose(expected, actual) + camxfile.close() + + def test_uamiv_format_write(self): + fmtkw = {'format': 'uamiv'} + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', category=UserWarning, + message=('IOAPI_ISPH is assumed to be ' + + '6370000.; consistent with WRF')) + expected = open_example_dataset('example.uamiv', + engine='pseudonetcdf', + autoclose=False, + backend_kwargs=fmtkw) + with self.roundtrip(expected, + save_kwargs=fmtkw, + open_kwargs={'backend_kwargs': fmtkw}) as actual: + assert_identical(expected, actual) + + def save(self, dataset, path, **save_kwargs): + import PseudoNetCDF as pnc + pncf = pnc.PseudoNetCDFFile() + pncf.dimensions = {k: pnc.PseudoNetCDFDimension(pncf, k, v) + for k, v in dataset.dims.items()} + pncf.variables = {k: pnc.PseudoNetCDFVariable(pncf, k, v.dtype.char, + v.dims, + values=v.data[...], + **v.attrs) + for k, v in dataset.variables.items()} + for pk, pv in dataset.attrs.items(): + setattr(pncf, pk, pv) + + pnc.pncwrite(pncf, path, **save_kwargs) + + @requires_rasterio @contextlib.contextmanager def create_tmp_geotiff(nx=4, ny=3, nz=3,