diff --git a/lib/iris/fileformats/_nc_load_rules/helpers.py b/lib/iris/fileformats/_nc_load_rules/helpers.py index 35c2e96924..fa63002f09 100644 --- a/lib/iris/fileformats/_nc_load_rules/helpers.py +++ b/lib/iris/fileformats/_nc_load_rules/helpers.py @@ -708,13 +708,13 @@ def build_and_add_global_attributes(engine: Engine): ), ) if problem is not None: - stack_notes = problem.stack_trace.__notes__ + stack_notes = problem.stack_trace.__notes__ # type: ignore[attr-defined] if stack_notes is None: stack_notes = [] stack_notes.append( f"Skipping disallowed global attribute '{attr_name}' (see above error)" ) - problem.stack_trace.__notes__ = stack_notes + problem.stack_trace.__notes__ = stack_notes # type: ignore[attr-defined] ################################################################################ @@ -1536,14 +1536,14 @@ def build_and_add_dimension_coordinate( ) if problem is not None: coord_var_name = str(cf_coord_var.cf_name) - stack_notes = problem.stack_trace.__notes__ + stack_notes = problem.stack_trace.__notes__ # type: ignore[attr-defined] if stack_notes is None: stack_notes = [] stack_notes.append( f"Failed to create {coord_var_name} dimension coordinate:\n" f"Gracefully creating {coord_var_name!r} auxiliary coordinate instead." ) - problem.stack_trace.__notes__ = stack_notes + problem.stack_trace.__notes__ = stack_notes # type: ignore[attr-defined] problem.handled = True _ = _add_or_capture( @@ -1643,9 +1643,13 @@ def _add_auxiliary_coordinate( # Determine the name of the dimension/s shared between the CF-netCDF data variable # and the coordinate being built. - common_dims = [ - dim for dim in cf_coord_var.dimensions if dim in engine.cf_var.dimensions - ] + coord_dims = cf_coord_var.dimensions + if cf._is_str_dtype(cf_coord_var): + coord_dims = coord_dims[:-1] + datavar_dims = engine.cf_var.dimensions + if cf._is_str_dtype(engine.cf_var): + datavar_dims = datavar_dims[:-1] + common_dims = [dim for dim in coord_dims if dim in datavar_dims] data_dims = None if common_dims: # Calculate the offset of each common dimension. diff --git a/lib/iris/fileformats/cf.py b/lib/iris/fileformats/cf.py index 2b6568c315..050d02567a 100644 --- a/lib/iris/fileformats/cf.py +++ b/lib/iris/fileformats/cf.py @@ -15,6 +15,7 @@ """ from abc import ABCMeta, abstractmethod +import codecs from collections.abc import Iterable, MutableMapping import os import re @@ -89,6 +90,11 @@ def __init__(self, name, data): self.cf_data = data """NetCDF4 Variable data instance.""" + # Note: *always* disable encoding/decoding translations + # To avoid current known problems + # See https://github.com/Unidata/netcdf4-python/issues/1440 + data.set_auto_chartostring(False) + # ALSO NOTE: not stored. NetCDFDataProxy must re-assert when re-loading. """File source of the NetCDF content.""" try: @@ -790,25 +796,73 @@ def cf_label_data(self, cf_data_var): # Determine the name of the label string (or length) dimension by # finding the dimension name that doesn't exist within the data dimensions. - str_dim_name = list(set(self.dimensions) - set(cf_data_var.dimensions)) + str_dim_names = list(set(self.dimensions) - set(cf_data_var.dimensions)) + n_nondata_dims = len(str_dim_names) + + if n_nondata_dims == 0: + # *All* dims are shared with the data-variable. + # This is only ok if the data-var is *also* a string type. + dim_ok = _is_str_dtype(cf_data_var) + # In this case, we must just *assume* that the last dimension is "the" + # string dimension + str_dim_name = self.dimensions[-1] + else: + # If there is exactly one non-data dim, that is the one we want + dim_ok = len(str_dim_names) == 1 + (str_dim_name,) = str_dim_names - if len(str_dim_name) != 1: + if not dim_ok: raise ValueError( "Invalid string dimensions for CF-netCDF label variable %r" % self.cf_name ) - str_dim_name = str_dim_name[0] label_data = self[:] if ma.isMaskedArray(label_data): - label_data = label_data.filled() + label_data = label_data.filled(b"\0") + + default_encoding = "utf-8" + encoding = getattr(self, "_Encoding", None) + if encoding is None: + # utf-8 is a reasonable "safe" default, equivalent to 'ascii' for ascii data + encoding = default_encoding + else: + try: + # Accept + normalise naming of encodings + encoding = codecs.lookup(encoding).name + # NOTE: if encoding does not suit data, errors can occur. + # For example, _Encoding = "ascii", with non-ascii content. + except LookupError: + # Replace some invalid setting with "safe"(ish) fallback. + encoding = default_encoding + + def string_from_1d_bytearray(array, encoding): + r"""Because numpy bytes arrays behave very oddly. + + Elements which "should" contain a zero byte b'\0' instead appear to contain + an *empty* byte b''. So a "b''.join()" will *omit* any zero bytes. + """ + assert array.dtype.kind == "S" and array.dtype.itemsize == 1 + assert array.ndim == 1 + bytelist = [b"\0" if byte == b"" else byte for byte in array] + bytes = b"".join(bytelist) + assert len(bytes) == array.shape[0] + try: + string = bytes.decode(encoding=encoding) + except UnicodeDecodeError: + # if encoding == "ascii": + # print("\n\n*** FIX !!") + # string = bytes.decode("utf-8") + # else: + raise + result = string.strip() + return result # Determine whether we have a string-valued scalar label # i.e. a character variable that only has one dimension (the length of the string). if self.ndim == 1: - label_string = b"".join(label_data).strip() - label_string = label_string.decode("utf8") + label_string = string_from_1d_bytearray(label_data, encoding) data = np.array([label_string]) else: # Determine the index of the string dimension. @@ -829,9 +883,10 @@ def cf_label_data(self, cf_data_var): else: label_index = index + (slice(None, None),) - label_string = b"".join(label_data[label_index]).strip() - label_string = label_string.decode("utf8") - data[index] = label_string + label_string = string_from_1d_bytearray( + label_data[label_index], encoding + ) + data[index] = label_string.strip() return data diff --git a/lib/iris/fileformats/netcdf/_thread_safe_nc.py b/lib/iris/fileformats/netcdf/_thread_safe_nc.py index 35588eb2c4..d76574ad85 100644 --- a/lib/iris/fileformats/netcdf/_thread_safe_nc.py +++ b/lib/iris/fileformats/netcdf/_thread_safe_nc.py @@ -310,14 +310,39 @@ def fromcdl(cls, *args, **kwargs): class NetCDFDataProxy: """A reference to the data payload of a single NetCDF file variable.""" - __slots__ = ("shape", "dtype", "path", "variable_name", "fill_value") - - def __init__(self, shape, dtype, path, variable_name, fill_value): + __slots__ = ( + "shape", + "dtype", + "path", + "variable_name", + "fill_value", + "is_bytes", + "encoding", + "string_length", + ) + + def __init__( + self, + shape, + dtype, + path, + variable_name, + fill_value, + encoding: str | None = None, + string_length: int = 0, + ): self.shape = shape self.dtype = dtype self.path = path self.variable_name = variable_name self.fill_value = fill_value + self.is_bytes = dtype.kind == "S" and dtype.itemsize == 1 + if self.is_bytes: + # We will be returning a different shape : the last dim is the byte-length + self.shape = self.shape[:-1] + self.dtype = np.dtype(f"U{string_length}") + self.encoding = encoding + self.string_length = string_length @property def ndim(self): @@ -336,11 +361,26 @@ def __getitem__(self, keys): dataset = netCDF4.Dataset(self.path) try: variable = dataset.variables[self.variable_name] + # ALWAYS disable byte encoding/decoding + # To avoid current known problems + # See https://github.com/Unidata/netcdf4-python/issues/1440 + variable.set_auto_chartostring(False) + # Get the NetCDF variable data and slice. - var = variable[keys] + data = variable[keys] + + # If bytes, decode to strings + if self.is_bytes: + from iris.util import convert_bytesarray_to_strings + + data = convert_bytesarray_to_strings( + data, + encoding=self.encoding, + string_length=self.string_length, + ) finally: dataset.close() - return np.asanyarray(var) + return np.asanyarray(data) def __repr__(self): fmt = ( @@ -388,6 +428,8 @@ def __setitem__(self, keys, array_data): try: dataset = netCDF4.Dataset(self.path, "r+") var = dataset.variables[self.varname] + # **Always** disable encode/decode of bytes to strings + var.set_auto_chartostring(False) var[keys] = array_data finally: try: diff --git a/lib/iris/fileformats/netcdf/loader.py b/lib/iris/fileformats/netcdf/loader.py index e8d283beb8..6c0b599a4d 100644 --- a/lib/iris/fileformats/netcdf/loader.py +++ b/lib/iris/fileformats/netcdf/loader.py @@ -11,6 +11,7 @@ """ +import codecs from collections.abc import Iterable, Iterator, Mapping from contextlib import contextmanager from copy import deepcopy @@ -269,10 +270,36 @@ def _get_cf_var_data(cf_var): # Normal NCVariable type: total_bytes = cf_var.size * cf_var.dtype.itemsize + default_encoding = "utf-8" + encoding = getattr(cf_var, "_Encoding", None) + if encoding is None: + # utf-8 is a reasonable "safe" default, equivalent to 'ascii' for ascii data + encoding = default_encoding + else: + try: + # Accept + normalise naming of encodings + encoding = codecs.lookup(encoding).name + # NOTE: if encoding does not suit data, errors can occur. + # For example, _Encoding = "ascii", with non-ascii content. + except LookupError: + # Replace some invalid setting with "safe"(ish) fallback. + encoding = default_encoding + + string_length = getattr(cf_var, "iris_string_length", None) + if total_bytes < _LAZYVAR_MIN_BYTES: # Don't make a lazy array, as it will cost more memory AND more time to access. result = cf_var[:] + if result.dtype.kind == "S": + from iris.util import convert_bytesarray_to_strings + + result = convert_bytesarray_to_strings( + result, + encoding=encoding, + string_length=string_length, + ) + # Special handling of masked scalar value; this will be returned as # an `np.ma.masked` instance which will lose the original dtype. # Workaround for this it return a 1-element masked array of the @@ -295,8 +322,17 @@ def _get_cf_var_data(cf_var): "_FillValue", _thread_safe_nc.default_fillvals[fill_dtype], ) + + # NOTE: if the data is bytes which need to be converted to strings on read, + # the data-proxy will do that (and it modifies its shape + dtype). proxy = NetCDFDataProxy( - cf_var.shape, dtype, cf_var.filename, cf_var.cf_name, fill_value + cf_var.shape, + dtype, + cf_var.filename, + cf_var.cf_name, + fill_value, + encoding=encoding, + string_length=string_length, ) # Get the chunking specified for the variable : this is either a shape, or # maybe the string "contiguous". diff --git a/lib/iris/fileformats/netcdf/saver.py b/lib/iris/fileformats/netcdf/saver.py index 5177749c07..a9654b6641 100644 --- a/lib/iris/fileformats/netcdf/saver.py +++ b/lib/iris/fileformats/netcdf/saver.py @@ -14,6 +14,7 @@ """ +import codecs import collections from itertools import repeat, zip_longest import os @@ -759,7 +760,7 @@ def _create_cf_dimensions(self, cube, dimension_names, unlimited_dimensions=None # used for a different one pass else: - dim_name = self._get_coord_variable_name(cube, coord) + dim_name = self._get_element_variable_name(cube, coord) unlimited_dim_names.append(dim_name) for dim_name in dimension_names: @@ -990,12 +991,12 @@ def _add_aux_coords( ] # Include any relevant mesh location coordinates. - mesh: MeshXY | None = getattr(cube, "mesh") - mesh_location: str | None = getattr(cube, "location") + mesh: MeshXY | None = getattr(cube, "mesh") # type: ignore[annotation-unchecked] + mesh_location: str | None = getattr(cube, "location") # type: ignore[annotation-unchecked] if mesh and mesh_location: location_coords: MeshNodeCoords | MeshEdgeCoords | MeshFaceCoords = getattr( mesh, f"{mesh_location}_coords" - ) + ) # type: ignore[annotation-unchecked] coords_to_add.extend(list(location_coords)) return self._add_inner_related_vars( @@ -1365,7 +1366,7 @@ def record_dimension(names_list, dim_name, length, matching_coords=None): if dim_name is None: # Not already present : create a unique dimension name # from the coord. - dim_name = self._get_coord_variable_name(cube, coord) + dim_name = self._get_element_variable_name(cube, coord) # Disambiguate if it has the same name as an # existing dimension. # OR if it matches an existing file variable name. @@ -1541,38 +1542,14 @@ def _create_cf_bounds(self, coord, cf_var, cf_name, /, *, compression_kwargs=Non ) self._lazy_stream_data(data=bounds, cf_var=cf_var_bounds) - def _get_cube_variable_name(self, cube): - """Return a CF-netCDF variable name for the given cube. - - Parameters - ---------- - cube : :class:`iris.cube.Cube` - An instance of a cube for which a CF-netCDF variable - name is required. - - Returns - ------- - str - A CF-netCDF variable name as a string. - - """ - if cube.var_name is not None: - cf_name = cube.var_name - else: - # Convert to lower case and replace whitespace by underscores. - cf_name = "_".join(cube.name().lower().split()) - - cf_name = self.cf_valid_var_name(cf_name) - return cf_name - - def _get_coord_variable_name(self, cube_or_mesh, coord): - """Return a CF-netCDF variable name for a given coordinate-like element. + def _get_element_variable_name(self, cube_or_mesh, element): + """Return a CF-netCDF variable name for a given coordinate-like element, or cube. Parameters ---------- cube_or_mesh : :class:`iris.cube.Cube` or :class:`iris.mesh.MeshXY` The Cube or Mesh being saved to the netCDF file. - coord : :class:`iris.coords._DimensionalMetadata` + element : :class:`iris.coords._DimensionalMetadata` | :class:``iris.cube.Cube`` An instance of a coordinate (or similar), for which a CF-netCDF variable name is required. @@ -1592,17 +1569,21 @@ def _get_coord_variable_name(self, cube_or_mesh, coord): cube = None mesh = cube_or_mesh - if coord.var_name is not None: - cf_name = coord.var_name + if element.var_name is not None: + cf_name = element.var_name + elif isinstance(element, Cube): + # Make name for a Cube without a var_name. + cf_name = "_".join(element.name().lower().split()) else: - name = coord.standard_name or coord.long_name + # Make name for a Coord-like element without a var_name + name = element.standard_name or element.long_name if not name or set(name).intersection(string.whitespace): # We need to invent a name, based on its associated dimensions. - if cube is not None and cube.coords(coord): + if cube is not None and cube.coords(element): # It is a regular cube coordinate. # Auto-generate a name based on the dims. name = "" - for dim in cube.coord_dims(coord): + for dim in cube.coord_dims(element): name += f"dim{dim}" # Handle scalar coordinate (dims == ()). if not name: @@ -1616,8 +1597,8 @@ def _get_coord_variable_name(self, cube_or_mesh, coord): # At present, a location-coord cannot be nameless, as the # MeshXY code relies on guess_coord_axis. - assert isinstance(coord, Connectivity) - location = coord.cf_role.split("_")[0] + assert isinstance(element, Connectivity) + location = element.cf_role.split("_")[0] location_dim_attr = f"{location}_dimension" name = getattr(mesh, location_dim_attr) @@ -1693,6 +1674,8 @@ def _create_mesh(self, mesh): return cf_mesh_name def _set_cf_var_attributes(self, cf_var, element): + from iris.cube import Cube + # Deal with CF-netCDF units, and add the name+units properties. if isinstance(element, iris.coords.Coord): # Fix "degree" units if needed. @@ -1700,34 +1683,50 @@ def _set_cf_var_attributes(self, cf_var, element): else: units_str = str(element.units) - if cf_units.as_unit(units_str).is_udunits(): - _setncattr(cf_var, "units", units_str) - - standard_name = element.standard_name - if standard_name is not None: - _setncattr(cf_var, "standard_name", standard_name) - - long_name = element.long_name - if long_name is not None: - _setncattr(cf_var, "long_name", long_name) + # NB this bit is a nasty hack to preserve existing behaviour through a refactor: + # The attributes for Coords are created in the order units, standard_name, + # whereas for data-variables (aka Cubes) it is the other way around. + # Needed now that this routine is also called from _create_cf_data_variable. + # TODO: when we can break things, rationalise these to be the same. + def add_units_attr(): + if cf_units.as_unit(units_str).is_udunits(): + _setncattr(cf_var, "units", units_str) + + def add_names_attrs(): + standard_name = element.standard_name + if standard_name is not None: + _setncattr(cf_var, "standard_name", standard_name) + + long_name = element.long_name + if long_name is not None: + _setncattr(cf_var, "long_name", long_name) + + if isinstance(element, Cube): + add_names_attrs() + add_units_attr() + else: + add_units_attr() + add_names_attrs() # Add the CF-netCDF calendar attribute. if element.units.calendar: _setncattr(cf_var, "calendar", str(element.units.calendar)) - # Add any other custom coordinate attributes. - for name in sorted(element.attributes): - value = element.attributes[name] + if not isinstance(element, Cube): + # Add any other custom coordinate attributes. + # N.B. not Cube, which has specific handling in _create_cf_data_variable + for name in sorted(element.attributes): + value = element.attributes[name] - if name == "STASH": - # Adopting provisional Metadata Conventions for representing MO - # Scientific Data encoded in NetCDF Format. - name = "um_stash_source" - value = str(value) + if name == "STASH": + # Adopting provisional Metadata Conventions for representing MO + # Scientific Data encoded in NetCDF Format. + name = "um_stash_source" + value = str(value) - # Don't clobber existing attributes. - if not hasattr(cf_var, name): - _setncattr(cf_var, name, value) + # Don't clobber existing attributes. + if not hasattr(cf_var, name): + _setncattr(cf_var, name, value) def _create_generic_cf_array_var( self, @@ -1739,6 +1738,8 @@ def _create_generic_cf_array_var( element_dims=None, fill_value=None, compression_kwargs=None, + packing_controls: dict | None = None, + is_dataless=False, ): """Create theCF-netCDF variable given dimensional_metadata. @@ -1791,7 +1792,7 @@ def _create_generic_cf_array_var( # Work out the var-name to use. # N.B. the only part of this routine that may use a mesh _or_ a cube. - cf_name = self._get_coord_variable_name(cube_or_mesh, element) + cf_name = self._get_element_variable_name(cube_or_mesh, element) while cf_name in self._dataset.variables: cf_name = self._increment_name(cf_name) @@ -1804,28 +1805,85 @@ def _create_generic_cf_array_var( # Get the data values, in a way which works for any element type, as # all are subclasses of _DimensionalMetadata. # (e.g. =points if a coord, =data if an ancillary, etc) - data = element._core_values() + if isinstance(element, Cube): + data = element.core_data() + else: + data = element._core_values() # This compression contract is *not* applicable to a mesh. - if cube and cube.shape != data.shape: + if cube is not None and data is not None and cube.shape != data.shape: compression_kwargs = {} - if np.issubdtype(data.dtype, np.str_): + if not is_dataless and np.issubdtype(data.dtype, np.str_): # Deal with string-type variables. # Typically CF label variables, but also possibly ancil-vars ? - string_dimension_depth = data.dtype.itemsize - if data.dtype.kind == "U": - string_dimension_depth //= 4 - string_dimension_name = "string%d" % string_dimension_depth + + # Encode data into bytes, and determine the string-dimension length. + # * we can't work this out without first encoding the data + # * UNLESS the target length is given (.iris_string_dimlength) + # * we can't create the dimension before we know the length + # * we can't create the variable before creating the dim (if needed) + # TODO: we can keep data lazy IFF there is a user-specified string-length + + # Calculate encoding to apply. + default_encoding = "utf-8" + encoding = element.attributes.get("_Encoding", None) + if encoding is None: + # utf-8 is a reasonable "safe" default, equivalent to 'ascii' for ascii data + encoding = default_encoding + else: + try: + # Accept + normalise naming of encodings + encoding = codecs.lookup(encoding).name + # NOTE: if encoding does not suit data, errors can occur. + # For example, _Encoding = "ascii", with non-ascii content. + except LookupError: + # Replace some invalid setting with "safe"(ish) fallback. + encoding = default_encoding + + # Convert data from an array of strings into a character array + # with an extra string-length dimension. + + # TODO: support lazy in some cases?? + # (N.B. can do when 'iris_string_dimlength' is provided) + if is_lazy_data(data): + data = dask.compute(data) + + element_shape = data.shape + max_length = 1 # this is a MINIMUM - i.e. not zero! + data_elements = np.zeros(element_shape, dtype=object) + for index in np.ndindex(element_shape): + data_element = data[index].encode(encoding) + element_length = len(data_element) + data_elements[index] = data_element + if element_length > max_length: + max_length = element_length + + string_dimension_length = element.attributes.get( + "iris_string_dimlength", None + ) + if string_dimension_length is None: + string_dimension_length = max_length + + # We already encoded all the strings, but stored them in an object-array as + # we didn't yet know the fixed byte-length to convert to. + # Now convert to fixed-width char array + data = np.zeros(element_shape + (string_dimension_length,), dtype="S1") + right_pad = b"\0" * string_dimension_length + for index in np.ndindex(element_shape): + bytes = data_elements[index] + bytes = (bytes + right_pad)[:string_dimension_length] + data[index] = [bytes[i : i + 1] for i in range(string_dimension_length)] # Determine whether to create the string length dimension. + string_dimension_name = f"string{string_dimension_length}" if string_dimension_name not in self._dataset.dimensions: while string_dimension_name in self._dataset.variables: # Also avoid collision with variable names. # See '_get_dim_names' for reason. string_dimension_name = self._increment_name(string_dimension_name) self._dataset.createDimension( - string_dimension_name, string_dimension_depth + string_dimension_name, string_dimension_length ) # Add the string length dimension to the variable dimensions. @@ -1833,29 +1891,24 @@ def _create_generic_cf_array_var( # Create the label coordinate variable. cf_var = self._dataset.createVariable(cf_name, "|S1", element_dims) + # Force to always exchange data as byte arrays + # TODO: ?remove when bug fixed + # see : https://github.com/Unidata/netcdf4-python/issues/1440 + cf_var.set_auto_chartostring(False) - # Convert data from an array of strings into a character array - # with an extra string-length dimension. - if len(element_dims) == 1: - data_first = data[0] - if is_lazy_data(data_first): - data_first = dask.compute(data_first) - data = list("%- *s" % (string_dimension_depth, data_first)) - else: - orig_shape = data.shape - new_shape = orig_shape + (string_dimension_depth,) - new_data = np.zeros(new_shape, cf_var.dtype) - for index in np.ndindex(orig_shape): - index_slice = tuple(list(index) + [slice(None, None)]) - new_data[index_slice] = list( - "%- *s" % (string_dimension_depth, data[index]) - ) - data = new_data else: # A normal (numeric) variable. # ensure a valid datatype for the file format. - element_type = type(element).__name__ - data = self._ensure_valid_dtype(data, element_type, element) + if is_dataless: + dtype = self._DATALESS_DTYPE + fill_value = self._DATALESS_FILLVALUE + else: + element_type = type(element).__name__ + data = self._ensure_valid_dtype(data, element_type, element) + if not packing_controls: + dtype = data.dtype.newbyteorder("=") + else: + dtype = packing_controls["dtype"] # Check if this is a dim-coord. is_dimcoord = cube is not None and element in cube.dim_coords @@ -1869,7 +1922,7 @@ def _create_generic_cf_array_var( # Create the CF-netCDF variable. cf_var = self._dataset.createVariable( cf_name, - data.dtype.newbyteorder("="), + dtype, element_dims, fill_value=fill_value, **compression_kwargs, @@ -1887,7 +1940,12 @@ def _create_generic_cf_array_var( ) # Add the data to the CF-netCDF variable. - self._lazy_stream_data(data=data, cf_var=cf_var) + if not is_dataless: + if packing_controls: + # We must set packing attributes (if any), before assigning values. + for key, value in packing_controls["attributes"]: + _setncattr(cf_var, key, value) + self._lazy_stream_data(data=data, cf_var=cf_var) # Add names + units self._set_cf_var_attributes(cf_var, element) @@ -2238,9 +2296,9 @@ def _create_cf_grid_mapping(self, cube, cf_var_cube): cfvar = self._name_coord_map.name(coord) if not cfvar: # not found - create and store it: - cfvar = self._get_coord_variable_name(cube, coord) + cfvar = self._get_element_variable_name(cube, coord) self._name_coord_map.append( - cfvar, self._get_coord_variable_name(cube, coord) + cfvar, self._get_element_variable_name(cube, coord) ) cfvar_names.append(cfvar) @@ -2320,18 +2378,10 @@ def _create_cf_data_variable( # be removed. # Get the values in a form which is valid for the file format. is_dataless = cube.is_dataless() - if is_dataless: - data = None - else: - data = self._ensure_valid_dtype(cube.core_data(), "cube", cube) - if is_dataless: - # The variable must have *some* dtype, and it must be maskable - dtype = self._DATALESS_DTYPE - fill_value = self._DATALESS_FILLVALUE - elif not packing: - dtype = data.dtype.newbyteorder("=") - else: + packing_controls = None + if packing and not is_dataless: + data = self._ensure_valid_dtype(cube.core_data(), "cube", cube) if isinstance(packing, dict): if "dtype" not in packing: msg = "The dtype attribute is required for packing." @@ -2370,45 +2420,29 @@ def _create_cf_data_variable( else: add_offset = cmin + 2 ** (n - 1) * scale_factor - def set_packing_ncattrs(cfvar): - """Set netCDF packing attributes. - - NOTE: cfvar needs to be a _thread_safe_nc._ThreadSafeWrapper subclass. - - """ - assert hasattr(cfvar, "THREAD_SAFE_FLAG") - if packing: - if scale_factor: - _setncattr(cfvar, "scale_factor", scale_factor) - if add_offset: - _setncattr(cfvar, "add_offset", add_offset) - - cf_name = self._get_cube_variable_name(cube) - while cf_name in self._dataset.variables: - cf_name = self._increment_name(cf_name) + packing_controls = { + "dtype": dtype, + "attributes": [ + ("scale_factor", scale_factor), + ("add_offset", add_offset), + ], + } # Create the cube CF-netCDF data variable with data payload. - cf_var = self._dataset.createVariable( - cf_name, dtype, dimension_names, fill_value=fill_value, **kwargs + cf_name = self._create_generic_cf_array_var( + cube, + dimension_names, + cube, + element_dims=dimension_names, + fill_value=fill_value, + compression_kwargs=kwargs, + packing_controls=packing_controls, + is_dataless=is_dataless, ) + cf_var = self._dataset.variables[cf_name] - if not is_dataless: - set_packing_ncattrs(cf_var) - self._lazy_stream_data(data=data, cf_var=cf_var) - - if cube.standard_name: - _setncattr(cf_var, "standard_name", cube.standard_name) - - if cube.long_name: - _setncattr(cf_var, "long_name", cube.long_name) - - if cube.units.is_udunits(): - _setncattr(cf_var, "units", str(cube.units)) - - # Add the CF-netCDF calendar attribute. - if cube.units.calendar: - _setncattr(cf_var, "calendar", cube.units.calendar) - + # Set general attrs: NB this part is cube-specific (not the same for components) + # - so 'set_cf_var_attributes' *doesn't* set these, if element is a Cube if iris.FUTURE.save_split_attrs: attr_names = cube.attributes.locals.keys() else: diff --git a/lib/iris/tests/integration/netcdf/test_chararrays.py b/lib/iris/tests/integration/netcdf/test_chararrays.py new file mode 100644 index 0000000000..0eb211c8b0 --- /dev/null +++ b/lib/iris/tests/integration/netcdf/test_chararrays.py @@ -0,0 +1,234 @@ +# Copyright Iris contributors +# +# This file is part of Iris and is released under the BSD license. +# See LICENSE in the root of the repository for full licensing details. +"""Integration tests for string data handling.""" + +import subprocess + +import numpy as np +import pytest + +import iris +from iris.coords import AuxCoord, DimCoord +from iris.cube import Cube +from iris.fileformats.netcdf import _thread_safe_nc +from iris.tests import env_bin_path + +NX, N_STRLEN = 3, 64 +TEST_STRINGS = ["Münster", "London", "Amsterdam"] +TEST_COORD_VALS = ["bun", "éclair", "sandwich"] + +# VARS_COORDS_SHARE_STRING_DIM = True +VARS_COORDS_SHARE_STRING_DIM = False +if VARS_COORDS_SHARE_STRING_DIM: + TEST_COORD_VALS[-1] = "Xsandwich" # makes the max coord strlen same as data one + + +# Ensure all tests run with "split attrs" turned on. +@pytest.fixture(scope="module", autouse=True) +def enable_split_attrs(): + with iris.FUTURE.context(save_split_attrs=True): + yield + + +def convert_strings_to_chararray(string_array_1d, maxlen, encoding="utf-8"): + bbytes = [text.encode(encoding) for text in string_array_1d] + pad = b"\0" * maxlen + bbytes = [(x + pad)[:maxlen] for x in bbytes] + chararray = np.array([[bb[i : i + 1] for i in range(maxlen)] for bb in bbytes]) + return chararray + + +def convert_bytesarray_to_strings( + byte_array, encoding="utf-8", string_length: int | None = None +): + """Convert bytes to strings. + + N.B. for now at least, we assume the string dim is **always the last one**. + """ + bytes_shape = byte_array.shape + var_shape = bytes_shape[:-1] + if string_length is None: + string_length = bytes_shape[-1] + string_dtype = f"U{string_length}" + result = np.empty(var_shape, dtype=string_dtype) + for ndindex in np.ndindex(var_shape): + element_bytes = byte_array[ndindex] + bytes = b"".join([b if b else b"\0" for b in element_bytes]) + string = bytes.decode(encoding) + result[ndindex] = string + return result + + +INCLUDE_COORD = True +# INCLUDE_COORD = False + +INCLUDE_NUMERIC_AUXCOORD = True +# INCLUDE_NUMERIC_AUXCOORD = False + + +def make_testfile(filepath, chararray, coordarray, encoding_str=None): + ds = _thread_safe_nc.DatasetWrapper(filepath, "w") + try: + ds.createDimension("x", NX) + ds.createDimension("nstr", N_STRLEN) + vx = ds.createVariable("x", int, dimensions=("x")) + vx[:] = np.arange(NX) + if INCLUDE_COORD: + ds.createDimension("nstr2", N_STRLEN) + v_co = ds.createVariable( + "v_co", + "S1", + dimensions=( + "x", + "nstr2", + ), + ) + v_co[:] = coordarray + if encoding_str is not None: + v_co._Encoding = encoding_str + if INCLUDE_NUMERIC_AUXCOORD: + v_num = ds.createVariable( + "v_num", + float, + dimensions=("x",), + ) + v_num[:] = np.arange(NX) + v = ds.createVariable( + "v", + "S1", + dimensions=( + "x", + "nstr", + ), + ) + v[:] = chararray + if encoding_str is not None: + v._Encoding = encoding_str + if INCLUDE_COORD: + coords_str = "v_co" + if INCLUDE_NUMERIC_AUXCOORD: + coords_str += " v_num" + v.coordinates = coords_str + finally: + ds.close() + + +def make_testcube( + dataarray, + coordarray, # for now, these are always *string* arrays + encoding_str: str | None = None, +): + cube = Cube(dataarray, var_name="v") + cube.add_dim_coord(DimCoord(np.arange(NX), var_name="x"), 0) + if encoding_str is not None: + cube.attributes["_Encoding"] = encoding_str + if INCLUDE_COORD: + co_x = AuxCoord(coordarray, var_name="v_co") + if encoding_str is not None: + co_x.attributes["_Encoding"] = encoding_str + cube.add_aux_coord(co_x, 0) + return cube + + +NCDUMP_PATHSTR = str(env_bin_path("ncdump")) + + +def ncdump(nc_path: str, *args): + """Call ncdump to print a dump of a file.""" + call_args = [NCDUMP_PATHSTR, nc_path] + list(*args) + subprocess.run(call_args, check=True) + + +def show_result(filepath): + print(f"File {filepath}") + print("NCDUMP:") + ncdump(filepath) + # with nc.Dataset(filepath, "r") as ds: + # v = ds.variables["v"] + # print("\n----\nNetcdf data readback (basic)") + # try: + # print(repr(v[:])) + # except UnicodeDecodeError as err: + # print(repr(err)) + # print("..raw:") + # v.set_auto_chartostring(False) + # print(repr(v[:])) + print("\nAs iris cube..") + try: + iris.loading.LOAD_PROBLEMS.reset() + cube = iris.load_cube(filepath) + print(cube) + if iris.loading.LOAD_PROBLEMS.problems: + print(iris.loading.LOAD_PROBLEMS) + print( + "\n".join(iris.loading.LOAD_PROBLEMS.problems[0].stack_trace.format()) + ) + print("-data-") + print(repr(cube.data)) + print("-numeric auxcoord data-") + print(repr(cube.coord("x").points)) + if INCLUDE_COORD: + print("-string auxcoord data-") + try: + print(repr(cube.coord("v_co").points)) + except Exception as err2: + print(repr(err2)) + except UnicodeDecodeError as err: + print(repr(err)) + + +@pytest.fixture(scope="session") +def save_dir(tmp_path_factory): + return tmp_path_factory.mktemp("save_files") + + +# TODO: the tests don't test things properly yet, they just exercise the code and print +# things for manual debugging. +tsts = ( + None, + "ascii", + "utf-8", + "utf-32", +) +# tsts = ("utf-8",) +# tsts = ("utf-8", "utf-32",) +# tsts = ("utf-32",) +# tsts = ("utf-8", "ascii", "utf-8") + + +@pytest.mark.parametrize("encoding", tsts) +def test_load_encodings(encoding, save_dir): + # small change + print(f"\n=========\nTesting encoding: {encoding}") + filepath = save_dir / f"tmp_load_{str(encoding)}.nc" + do_as = encoding + if encoding != "utf-32": + do_as = "utf-8" + TEST_CHARARRAY = convert_strings_to_chararray( + TEST_STRINGS, N_STRLEN, encoding=do_as + ) + TEST_COORDARRAY = convert_strings_to_chararray( + TEST_COORD_VALS, N_STRLEN, encoding=do_as + ) + make_testfile(filepath, TEST_CHARARRAY, TEST_COORDARRAY, encoding_str=encoding) + show_result(filepath) + + +@pytest.mark.parametrize("encoding", tsts) +def test_save_encodings(encoding, save_dir): + cube = make_testcube( + dataarray=TEST_STRINGS, coordarray=TEST_COORD_VALS, encoding_str=encoding + ) + print(cube) + filepath = save_dir / f"tmp_save_{str(encoding)}.nc" + if encoding == "ascii": + with pytest.raises( + UnicodeEncodeError, + match="'ascii' codec can't encode character.*not in range", + ): + iris.save(cube, filepath) + else: + iris.save(cube, filepath) + show_result(filepath) diff --git a/lib/iris/util.py b/lib/iris/util.py index b3ce7941c5..d154d42e16 100644 --- a/lib/iris/util.py +++ b/lib/iris/util.py @@ -2999,3 +2999,24 @@ def set( # Global CML settings object for use as context manager CML_SETTINGS: CMLSettings = CMLSettings() + + +def convert_bytesarray_to_strings( + byte_array, encoding="utf-8", string_length: int | None = None +): + """Convert bytes to strings. + + N.B. for now at least, we assume the string dim is **always the last one**. + """ + bytes_shape = byte_array.shape + var_shape = bytes_shape[:-1] + if string_length is None: + string_length = bytes_shape[-1] + string_dtype = f"U{string_length}" + result = np.empty(var_shape, dtype=string_dtype) + for ndindex in np.ndindex(var_shape): + element_bytes = byte_array[ndindex] + bytes = b"".join([b if b else b"\0" for b in element_bytes]) + string = bytes.decode(encoding) + result[ndindex] = string + return result