diff --git a/.buildinfo b/.buildinfo new file mode 100644 index 00000000..fff65eec --- /dev/null +++ b/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: e46396e2729ca20a214e1785278bdb15 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/.doctrees/api/astrodata.AstroData.doctree b/.doctrees/api/astrodata.AstroData.doctree new file mode 100644 index 00000000..d32eb343 Binary files /dev/null and b/.doctrees/api/astrodata.AstroData.doctree differ diff --git a/.doctrees/api/astrodata.AstroDataError.doctree b/.doctrees/api/astrodata.AstroDataError.doctree new file mode 100644 index 00000000..06333e6e Binary files /dev/null and b/.doctrees/api/astrodata.AstroDataError.doctree differ diff --git a/.doctrees/api/astrodata.AstroDataMixin.doctree b/.doctrees/api/astrodata.AstroDataMixin.doctree new file mode 100644 index 00000000..482881e1 Binary files /dev/null and b/.doctrees/api/astrodata.AstroDataMixin.doctree differ diff --git a/.doctrees/api/astrodata.NDAstroData.doctree b/.doctrees/api/astrodata.NDAstroData.doctree new file mode 100644 index 00000000..e7b8e511 Binary files /dev/null and b/.doctrees/api/astrodata.NDAstroData.doctree differ diff --git a/.doctrees/api/astrodata.Section.doctree b/.doctrees/api/astrodata.Section.doctree new file mode 100644 index 00000000..0e34f7e4 Binary files /dev/null and b/.doctrees/api/astrodata.Section.doctree differ diff --git a/.doctrees/api/astrodata.TagSet.doctree b/.doctrees/api/astrodata.TagSet.doctree new file mode 100644 index 00000000..be721372 Binary files /dev/null and b/.doctrees/api/astrodata.TagSet.doctree differ diff --git a/.doctrees/api/astrodata.add_header_to_table.doctree b/.doctrees/api/astrodata.add_header_to_table.doctree new file mode 100644 index 00000000..0f539f2c Binary files /dev/null and b/.doctrees/api/astrodata.add_header_to_table.doctree differ diff --git a/.doctrees/api/astrodata.astro_data_descriptor.doctree b/.doctrees/api/astrodata.astro_data_descriptor.doctree new file mode 100644 index 00000000..e7a9e92f Binary files /dev/null and b/.doctrees/api/astrodata.astro_data_descriptor.doctree differ diff --git a/.doctrees/api/astrodata.astro_data_tag.doctree b/.doctrees/api/astrodata.astro_data_tag.doctree new file mode 100644 index 00000000..7095536c Binary files /dev/null and b/.doctrees/api/astrodata.astro_data_tag.doctree differ diff --git a/.doctrees/api/astrodata.create.doctree b/.doctrees/api/astrodata.create.doctree new file mode 100644 index 00000000..0ca1a8a7 Binary files /dev/null and b/.doctrees/api/astrodata.create.doctree differ diff --git a/.doctrees/api/astrodata.from_file.doctree b/.doctrees/api/astrodata.from_file.doctree new file mode 100644 index 00000000..6a30e1e5 Binary files /dev/null and b/.doctrees/api/astrodata.from_file.doctree differ diff --git a/.doctrees/api/astrodata.open.doctree b/.doctrees/api/astrodata.open.doctree new file mode 100644 index 00000000..d249d7f6 Binary files /dev/null and b/.doctrees/api/astrodata.open.doctree differ diff --git a/.doctrees/api/astrodata.returns_list.doctree b/.doctrees/api/astrodata.returns_list.doctree new file mode 100644 index 00000000..0f8638b0 Binary files /dev/null and b/.doctrees/api/astrodata.returns_list.doctree differ diff --git a/.doctrees/api/astrodata.version.doctree b/.doctrees/api/astrodata.version.doctree new file mode 100644 index 00000000..5bb39130 Binary files /dev/null and b/.doctrees/api/astrodata.version.doctree differ diff --git a/.doctrees/environment.pickle b/.doctrees/environment.pickle new file mode 100644 index 00000000..038a4894 Binary files /dev/null and b/.doctrees/environment.pickle differ diff --git a/.doctrees/index.doctree b/.doctrees/index.doctree new file mode 100644 index 00000000..2443f3ef Binary files /dev/null and b/.doctrees/index.doctree differ diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/_modules/astrodata.html b/_modules/astrodata.html new file mode 100644 index 00000000..62051151 --- /dev/null +++ b/_modules/astrodata.html @@ -0,0 +1,193 @@ + + + + + + + astrodata — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for astrodata

+"""This package adds an abstraction layer to astronomical data by parsing the
+information contained in the headers as attributes. To do so, one must subclass
+:class:`astrodata.AstroData` and add parse methods accordingly to the
+:class:`~astrodata.TagSet` received.
+
+.. |AstroData| replace:: :class:`~astrodata.AstroData`
+.. |AstroDataError| replace:: :class:`~astrodata.AstroDataError`
+.. |AstroDataMixin| replace:: :class:`~astrodata.AstroDataMixin`
+.. |NDAstroData| replace:: :class:`~astrodata.NDAstroData`
+.. |Section| replace:: :class:`~astrodata.Section`
+.. |TagSet| replace:: :class:`~astrodata.TagSet`
+.. |astro_data_descriptor| replace:: :func:`~astrodata.astro_data_descriptor`
+.. |astro_data_tag| replace:: :func:`~astrodata.astro_data_tag`
+.. |create| replace:: :func:`~astrodata.create`
+.. |open| replace:: :func:`~astrodata.open`
+.. |return_list| replace:: :func:`~astrodata.return_list`
+.. |version| replace:: :func:`~astrodata.version`
+"""
+
+__all__ = [
+    "AstroData",
+    "AstroDataError",
+    "AstroDataMixin",
+    "NDAstroData",
+    "Section",
+    "TagSet",
+    "__version__",
+    "add_header_to_table",
+    "astro_data_descriptor",
+    "astro_data_tag",
+    "from_file",
+    "create",
+    "open",
+    "returns_list",
+    "version",
+]
+
+
+from .core import AstroData
+from .factory import AstroDataFactory, AstroDataError
+from .fits import add_header_to_table
+from .nddata import NDAstroData, AstroDataMixin
+from .utils import (
+    Section,
+    TagSet,
+    astro_data_descriptor,
+    astro_data_tag,
+    returns_list,
+    deprecated,
+)
+from ._version import version
+
+__version__ = version()
+
+factory = AstroDataFactory()
+# Let's make sure that there's at least one class that matches the data
+# (if we're dealing with a FITS file)
+factory.add_class(AstroData)
+
+
+# TODO: Need to replace this with a name that doesn't override the builtin.
+# This makes it so that the following will cause unexpected behavior:
+#     from astrodata import *
+#     file_stream = open("some_file.fits")
+# Without raising a warning or error.
+
+[docs] +@deprecated( + "Use 'astrodata.from_file'. astrodata.open is deprecated, " + "and will be removed in a future version." +) +def open(*args, **kwargs): # pylint: disable=redefined-builtin + """Return an |AstroData| object from a file.""" + return factory.get_astro_data(*args, **kwargs)
+ + + +from_file = factory.get_astro_data +create = factory.create_from_scratch +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/astrodata/_version.html b/_modules/astrodata/_version.html new file mode 100644 index 00000000..29b2305a --- /dev/null +++ b/_modules/astrodata/_version.html @@ -0,0 +1,150 @@ + + + + + + + astrodata._version — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for astrodata._version

+#!/usr/bin/env python
+"""
+Holds the DRAGONS version to be propagated throught all the DRAGONS package
+and to be used in the documentation.
+"""
+
+# --- Setup Version Here ---
+API = 3
+FEATURE = 2
+BUG = 0
+TAG = ""
+
+
+
+[docs] +def version(short=False, tag=TAG): + """ + Returns DRAGONS's version based on the api, + feature and bug numbers. + + Returns + ------- + str : formatted version + """ + + if short: + _version = "{:d}.{:d}".format(API, FEATURE) + + else: + _tag = "_{:s}".format(tag) if tag else "" + _version = "{:d}.{:d}.{:d}".format(API, FEATURE, BUG) + _tag + + return _version
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/astrodata/core.html b/_modules/astrodata/core.html new file mode 100644 index 00000000..0eebc618 --- /dev/null +++ b/_modules/astrodata/core.html @@ -0,0 +1,1637 @@ + + + + + + + astrodata.core — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for astrodata.core

+"""This is the core module of the AstroData package. It provides the
+`AstroData` class, which is the main interface to manipulate astronomical
+data sets.
+"""
+import inspect
+import logging
+import os
+import re
+import textwrap
+import warnings
+from collections import OrderedDict
+from contextlib import suppress
+from copy import deepcopy
+from functools import partial
+
+import numpy as np
+
+from astropy.io import fits
+from astropy.nddata import NDData
+from astropy.table import Table
+from astropy.utils import format_doc
+
+from .fits import (
+    DEFAULT_EXTENSION,
+    FitsHeaderCollection,
+    _process_table,
+    read_fits,
+    write_fits,
+)
+from .nddata import ADVarianceUncertainty
+from .nddata import NDAstroData as NDDataObject
+from .utils import (
+    assign_only_single_slice,
+    astro_data_descriptor,
+    deprecated,
+    normalize_indices,
+    returns_list,
+)
+
+NO_DEFAULT = object()
+
+
+_ARIT_DOC = """
+    Performs {name} by evaluating ``self {op} operand``.
+
+    Parameters
+    ----------
+    oper : number or object
+        The operand to perform the operation  ``self {op} operand``.
+
+    Returns
+    --------
+    `AstroData` instance
+"""
+
+
+
+[docs] +class AstroData: + """Base class for the AstroData software package. It provides an interface + to manipulate astronomical data sets. + + Parameters + ---------- + nddata : `astrodata.NDAstroData` or list of `astrodata.NDAstroData` + List of NDAstroData objects. + + tables : dict[name, `astropy.table.Table`] + Dict of table objects. + + phu : `astropy.io.fits.Header` + Primary header. + + indices : list of int + List of indices mapping the `astrodata.NDAstroData` objects that this + object will access to. This is used when slicing an object, then the + sliced AstroData will have the ``.nddata`` list from its parent and + access the sliced NDAstroData through this list of indices. + """ + + # Derived classes may provide their own __keyword_dict. Being a private + # variable, each class will preserve its own, and there's no risk of + # overriding the whole thing + __keyword_dict = { + "instrument": "INSTRUME", + "object": "OBJECT", + "telescope": "TELESCOP", + "ut_date": "DATE-OBS", + } + + def __init__( + self, nddata=None, tables=None, phu=None, indices=None, is_single=False + ): + if nddata is None: + nddata = [] + + # Check that nddata is either a single or iterable of NDAstroData + # objects + is_nddata = isinstance(nddata, NDDataObject) + + try: + is_nddata_iterable = isinstance(nddata[0], NDDataObject) + + except IndexError: + # Fall back on checking if it's a list or tuple---could be empty. + is_nddata_iterable = isinstance(nddata, (list, tuple)) + + if not (is_nddata or is_nddata_iterable): + raise TypeError( + f"nddata must be an NDAstroData object or a list of " + f"NDAstroData objects, not {type(nddata)} ({nddata})." + ) + + # If nddata is a single NDAstroData object, make it a list. + if not isinstance(nddata, (list, tuple)): + nddata = [nddata] + + # _all_nddatas contains all the extensions from the original file or + # object. And _indices is used to map extensions for sliced objects. + self._all_nddatas = nddata + self._indices = indices + + # TODO: Is there no way to know if this is a single frame without + # passing an arg? + self.is_single = is_single + + # If this data provider represents a single slice out of a whole + # dataset, return True. Otherwise, return False. + if tables is not None and not isinstance(tables, dict): + raise ValueError("tables must be a dict") + + self._tables = tables or {} + + self._phu = phu or fits.Header() + self._fixed_settable = { + "data", + "uncertainty", + "mask", + "variance", + "wcs", + "path", + "filename", + } + self._logger = logging.getLogger(__name__) + self._orig_filename = None + self._path = None + + def __deepcopy__(self, memo): + """Returns a new instance of this class. + + Parameters + ---------- + memo : dict + See the documentation on `deepcopy` for an explanation on how + this works. + + """ + obj = self.__class__() + + for attr in ("_phu", "_path", "_orig_filename", "_tables"): + obj.__dict__[attr] = deepcopy(self.__dict__[attr]) + + obj.__dict__["_all_nddatas"] = [deepcopy(nd) for nd in self._nddata] + return obj + + def _keyword_for(self, name): + """Returns the FITS keyword name associated to ``name``. + + Parameters + ---------- + name : str + The common "key" name for which we want to know the associated + FITS keyword. + + Returns + ------- + str + The desired keyword name. + + Raises + ------ + AttributeError + If there is no keyword for the specified ``name``. + + """ + for cls in self.__class__.mro(): + with suppress(AttributeError, KeyError): + # __keyword_dict is a mangled variable + return getattr(self, f"_{cls.__name__}__keyword_dict")[name] + + raise AttributeError(f"No match for '{name}'") + + def _process_tags(self): + """Return the tag set (as a set of str) for the current instance.""" + results = [] + # Calling inspect.getmembers on `self` would trigger all the + # properties (tags, phu, hdr, etc.), and that's undesirable. To + # prevent that, we'll inspect the *class*. + members = inspect.getmembers( + self.__class__, lambda x: hasattr(x, "tag_method") + ) + + for _, method in members: + ts = method(self) + if ts.add or ts.remove or ts.blocks: + results.append(ts) + + # Sort by the length of substractions... those that substract + # from others go first + results = sorted( + results, key=lambda x: len(x.remove) + len(x.blocks), reverse=True + ) + + # Sort by length of blocked_by, those that are never disabled go first + results = sorted(results, key=lambda x: len(x.blocked_by)) + + # Sort by length of if_present... those that need other tags to + # be present go last + results = sorted(results, key=lambda x: len(x.if_present)) + + tags = set() + removals = set() + blocked = set() + for plus, minus, blocked_by, blocks, is_present in results: + if is_present: + # If this TagSet requires other tags to be present, make + # sure that all of them are. Otherwise, skip... + if len(tags & is_present) != len(is_present): + continue + + allowed = (len(tags & blocked_by) + len(plus & blocked)) == 0 + if allowed: + # This set is not being blocked by others... + removals.update(minus) + tags.update(plus - removals) + blocked.update(blocks) + + return tags + + @staticmethod + def _matches_data(source): + # This one is trivial. Will be more specific for subclasses. + logging.debug("Using default _matches_data with %s", source) + return True + + @property + def path(self): + """Return the file path.""" + return self._path + + @path.setter + def path(self, value): + if self._path is None and value is not None: + self._orig_filename = os.path.basename(value) + self._path = value + + @property + def filename(self): + """Return the file name.""" + if self.path is not None: + return os.path.basename(self.path) + + return self.path + + @filename.setter + def filename(self, value): + if os.path.isabs(value): + raise ValueError("Cannot set the filename to an absolute path!") + + if self.path is None: + self.path = os.path.abspath(value) + + else: + dirname = os.path.dirname(self.path) + self.path = os.path.join(dirname, value) + + @property + def orig_filename(self): + """Return the original file name (before it was modified).""" + return self._orig_filename + + @orig_filename.setter + def orig_filename(self, value): + self._orig_filename = value + + @property + def phu(self): + """Return the primary header.""" + return self._phu + + @phu.setter + def phu(self, phu): + self._phu = phu + + @property + def hdr(self): + """Return all headers, as a `astrodata.fits.FitsHeaderCollection`.""" + if not self.nddata: + return None + headers = [nd.meta["header"] for nd in self._nddata] + return headers[0] if self.is_single else FitsHeaderCollection(headers) + + @property + @deprecated( + "Access to headers through this property is deprecated and " + "will be removed in the future. Use '.hdr' instead." + ) + def header(self): + """Deprecated header access. Use ``.hdr`` instead.""" + return [self.phu] + [ndd.meta["header"] for ndd in self._nddata] + + @property + def tags(self): + """A set of strings that represent the tags defining this instance.""" + return self._process_tags() + + @property + def descriptors(self): + """Returns a sequence of names for the methods that have been + decorated as descriptors. + + Returns + -------- + tuple of str + """ + members = inspect.getmembers( + self.__class__, lambda x: hasattr(x, "descriptor_method") + ) + return tuple(mname for (mname, method) in members) + + @property + def id(self): + """Returns the extension identifier (1-based extension number) + for sliced objects. + """ + if self.is_single: + return self._indices[0] + 1 + + raise ValueError( + "Cannot return id for an AstroData object " + "that is not a single slice" + ) + + @property + def indices(self): + """Returns the extensions indices for sliced objects.""" + return self._indices if self._indices else list(range(len(self))) + + @property + def is_sliced(self): + """If this data provider instance represents the whole dataset, return + False. If it represents a slice out of the whole, return True. + """ + return self._indices is not None + +
+[docs] + def is_settable(self, attr): + """Return True if the attribute is meant to be modified.""" + if self.is_sliced and attr in {"path", "filename"}: + return False + return attr in self._fixed_settable or attr.isupper()
+ + + @property + def _nddata(self): + """Return the list of `astrodata.NDAstroData` objects. Contrary to + ``self.nddata`` this always returns a list. + """ + if self._indices is not None: + return [self._all_nddatas[i] for i in self._indices] + + return self._all_nddatas + + @property + def nddata(self): + """Return the list of `astrodata.NDAstroData` objects. + + If the `AstroData` object is sliced, this returns only the NDData + objects of the sliced extensions. And if this is a single extension + object, the NDData object is returned directly (i.e. not a list). + """ + return self._nddata[0] if self.is_single else self._nddata + +
+[docs] + def table(self): + """Return a dictionary of `astropy.table.Table` objects. + + Notes + ----- + This returns a _copy_ of the tables, so modifying them will not + affect the original ones. + """ + # FIXME: do we need this in addition to .tables ? + return self._tables.copy()
+ + + @property + def tables(self): + """Return the names of the `astropy.table.Table` objects associated to + the top-level object. + """ + return set(self._tables) + + @property + def ext_tables(self): + """Return the names of the `astropy.table.Table` objects associated to + an extension. + """ + if not self.is_single: + raise AttributeError("this is only available for extensions") + + return set( + key + for key, obj in self.nddata.meta["other"].items() + if isinstance(obj, Table) + ) + + @property + @returns_list + def shape(self): + """Return the shape of the data array for each extension as a list of + shapes. + """ + return [nd.shape for nd in self._nddata] + + @property + @returns_list + def data(self): + """A list of the arrays (or single array, if this is a single slice) + corresponding to the science data attached to each extension. + """ + return [nd.data for nd in self._nddata] + + @data.setter + @assign_only_single_slice + def data(self, value): + # Setting the ._data in the NDData is a bit kludgy, but we're all + # grown adults and know what we're doing, isn't it? + if hasattr(value, "shape"): + self.nddata._data = value + + else: + raise AttributeError( + "Trying to assign data to be something with no shape" + ) + + @property + @returns_list + def uncertainty(self): + """A list of the uncertainty objects (or a single object, if this is + a single slice) attached to the science data, for each extension. + + The objects are instances of AstroPy's `astropy.nddata.NDUncertainty`, + or `None` where no information is available. + + See also + -------- + variance : The actual array supporting the uncertainty object. + + """ + return [nd.uncertainty for nd in self._nddata] + + @uncertainty.setter + @assign_only_single_slice + def uncertainty(self, value): + self.nddata.uncertainty = value + + @property + @returns_list + def mask(self): + """A list of the mask arrays (or a single array, if this is a single + slice) attached to the science data, for each extension. + + For objects that miss a mask, `None` will be provided instead. + """ + return [nd.mask for nd in self._nddata] + + @mask.setter + @assign_only_single_slice + def mask(self, value): + self.nddata.mask = value + + @property + @returns_list + def variance(self): + """A list of the variance arrays (or a single array, if this is a + single slice) attached to the science data, for each extension. + + For objects that miss uncertainty information, `None` will be provided + instead. + + See also + --------- + uncertainty : The uncertainty objects used under the hood. + + """ + return [nd.variance for nd in self._nddata] + + @variance.setter + @assign_only_single_slice + def variance(self, value): + if value is None: + self.nddata.uncertainty = None + else: + self.nddata.uncertainty = ADVarianceUncertainty(value) + + @property + def wcs(self): + """Returns the list of WCS objects for each extension.""" + if self.is_single: + return self.nddata.wcs + + raise ValueError( + "Cannot return WCS for an AstroData object " + "that is not a single slice" + ) + + @wcs.setter + @assign_only_single_slice + def wcs(self, value): + self.nddata.wcs = value + + def __iter__(self): + if self.is_single: + yield self + else: + for n in range(len(self)): + yield self[n] + + def __getitem__(self, idx): + """Returns a sliced view of the instance. It supports the standard + Python indexing syntax. + + Parameters + ---------- + slice : int, `slice` + An integer or an instance of a Python standard `slice` object + + Raises + ------- + TypeError + If trying to slice an object when it doesn't make sense (e.g. + slicing a single slice) + + ValueError + If `slice` does not belong to one of the recognized types + + IndexError + If an index is out of range + """ + if self.is_single: + raise TypeError("Can't slice a single slice!") + + indices, _ = normalize_indices(idx, nitems=len(self)) + + if self._indices: + indices = [self._indices[i] for i in indices] + + is_single = not isinstance(idx, (tuple, slice)) + + obj = self.__class__( + self._all_nddatas, + tables=self._tables, + phu=self.phu, + indices=indices, + is_single=is_single, + ) + + obj._path = self.path + obj._orig_filename = self.orig_filename + + return obj + + def __delitem__(self, idx): + """Called to implement deletion of ``self[idx]``. Supports standard + Python syntax (including negative indices). + + Parameters + ---------- + idx : int + This index represents the order of the element that you want + to remove. + + Raises + ------- + IndexError + If `idx` is out of range. + """ + if self.is_sliced: + raise TypeError("Can't remove items from a sliced object") + del self._all_nddatas[idx] + + def __getattr__(self, attribute): + """Called when an attribute lookup has not found the attribute in the + usual places (not an instance attribute, and not in the class tree for + ``self``). + + Parameters + ---------- + attribute : str + The attribute's name. + + Raises + ------- + AttributeError + If the attribute could not be found/computed. + """ + # I we're working with single slices, let's look some things up + # in the ND object + if self.is_single and attribute.isupper(): + with suppress(KeyError): + return self.nddata.meta["other"][attribute] + + if attribute in self._tables: + return self._tables[attribute] + + raise AttributeError( + f"{self.__class__.__name__!r} object has no " + f"attribute {attribute!r}" + ) + + def __setattr__(self, attribute, value): + """Called when an attribute assignment is attempted, instead of the + normal mechanism. + + Parameters + ---------- + attribute : str + The attribute's name. + + value : object + The value to be assigned to the attribute. + """ + + def _my_attribute(attr): + return attr in self.__dict__ or attr in self.__class__.__dict__ + + if ( + attribute.isupper() + and self.is_settable(attribute) + and not _my_attribute(attribute) + ): + # This method is meant to let the user set certain attributes of + # the NDData objects. First we check if the attribute belongs to + # this object's dictionary. Otherwise, see if we can pass it down. + # + if self.is_sliced and not self.is_single: + raise TypeError( + "This attribute can only be " + "assigned to a single-slice object" + ) + + if attribute == DEFAULT_EXTENSION: + raise AttributeError( + f"{attribute} extensions should be " + "appended with .append" + ) + + if attribute in {"DQ", "VAR"}: + raise AttributeError( + f"{attribute} should be set on the " "nddata object" + ) + + add_to = self.nddata if self.is_single else None + self._append(value, name=attribute, add_to=add_to) + + return + + super().__setattr__(attribute, value) + + def __delattr__(self, attribute): + """Implements attribute removal.""" + if not attribute.isupper(): + super().__delattr__(attribute) + return + + if self.is_sliced: + if not self.is_single: + raise TypeError("Can't delete attributes on non-single slices") + + other = self.nddata.meta["other"] + if attribute in other: + del other[attribute] + else: + raise AttributeError( + f"{self.__class__.__name__!r} sliced " + "object has no attribute {attribute!r}" + ) + else: + if attribute in self._tables: + del self._tables[attribute] + else: + raise AttributeError( + f"'{attribute}' is not a global table " "for this instance" + ) + + def __contains__(self, attribute): + """Implements the ability to use the ``in`` operator with an + `AstroData` object. + + Parameters + ---------- + attribute : str + An attribute name. + + Returns + -------- + bool + """ + return attribute in self.exposed + + def __len__(self): + """Return the number of independent extensions stored by the object.""" + if self._indices is not None: + return len(self._indices) + + return len(self._all_nddatas) + + @property + def exposed(self): + """A collection of strings with the names of objects that can be + accessed directly by name as attributes of this instance, and that are + not part of its standard interface (i.e. data objects that have been + added dynamically). + + Examples + --------- + >>> ad[0].exposed # doctest: +SKIP + set(['OBJMASK', 'OBJCAT']) + + """ + exposed = set(self._tables) + if self.is_single: + exposed |= set(self.nddata.meta["other"]) + + return exposed + + def _pixel_info(self): + for idx, nd in enumerate(self._nddata): + other_objects = [] + uncer = nd.uncertainty + fixed = ( + ("variance", None if uncer is None else uncer), + ("mask", nd.mask), + ) + + for name, other in fixed + tuple(sorted(nd.meta["other"].items())): + if other is None: + continue + + if isinstance(other, Table): + other_objects.append( + { + "attr": name, + "type": "Table", + "dim": str((len(other), len(other.columns))), + "data_type": "n/a", + } + ) + + else: + dim = "" + if hasattr(other, "dtype"): + dt = other.dtype.name + dim = str(other.shape) + + elif hasattr(other, "data"): + dt = other.data.dtype.name + dim = str(other.data.shape) + + elif hasattr(other, "array"): + dt = other.array.dtype.name + dim = str(other.array.shape) + + else: + dt = "unknown" + + obj_dict = { + "attr": name, + "type": type(other).__name__, + "dim": dim, + "data_type": dt, + } + + other_objects.append(obj_dict) + + main_dict = { + "content": "science", + "type": type(nd).__name__, + "dim": str(nd.data.shape), + "data_type": nd.data.dtype.name, + } + + out_dict = { + "idx": f"[{idx:2}]", + "main": main_dict, + "other": other_objects, + } + + yield out_dict + +
+[docs] + def info(self): + """Prints out information about the contents of this instance.""" + + print(f"Filename: {self.path if self.path else 'Unknown'}") + # This is fixed. We don't support opening for update + # print("Mode: readonly") + + text = "Tags: " + " ".join(sorted(self.tags)) + textwrapper = textwrap.TextWrapper(width=80, subsequent_indent=" ") + for line in textwrapper.wrap(text): + print(line) + + if len(self) > 0: + main_fmt = "{:6} {:24} {:17} {:14} {}" + other_fmt = " .{:20} {:17} {:14} {}" + print("\nPixels Extensions") + print( + main_fmt.format( + "Index", "Content", "Type", "Dimensions", "Format" + ) + ) + for pi in self._pixel_info(): + main_obj = pi["main"] + print( + main_fmt.format( + pi["idx"], + main_obj["content"][:24], + main_obj["type"][:17], + main_obj["dim"], + main_obj["data_type"], + ) + ) + for other in pi["other"]: + print( + other_fmt.format( + other["attr"][:20], + other["type"][:17], + other["dim"], + other["data_type"], + ) + ) + + # NOTE: This covers tables, only. Study other cases before + # implementing a more general solution + if self._tables: + print("\nOther Extensions") + print(" Type Dimensions") + for name, table in sorted(self._tables.items()): + if isinstance(table, list): + # This is not a free floating table + continue + + print( + f".{name[:13]:13s} {'Table':11s} {len(table), len(table.columns)}" + )
+ + + def _oper(self, operator, operand): + ind = self.indices + ndd = self._all_nddatas + if isinstance(operand, AstroData): + if len(operand) != len(self): + raise ValueError("Operands are not the same size") + for n in range(len(self)): + try: + data = ( + operand.nddata + if operand.is_single + else operand.nddata[n] + ) + ndd[ind[n]] = operator(ndd[ind[n]], data) + except TypeError: + # This may happen if operand is a sliced, single + # AstroData object + ndd[ind[n]] = operator(ndd[ind[n]], operand.nddata) + op_table = operand.table() + ltab, rtab = set(self._tables), set(op_table) + for tab in rtab - ltab: + self._tables[tab] = op_table[tab] + + else: + for n in range(len(self)): + ndd[ind[n]] = operator(ndd[ind[n]], operand) + + def _standard_nddata_op(self, fn, operand): + return self._oper( + partial(fn, handle_mask=np.bitwise_or, handle_meta="first_found"), + operand, + ) + + @format_doc(_ARIT_DOC, name="addition", op="+") + def __add__(self, oper): + copy = deepcopy(self) + copy += oper + return copy + + @format_doc(_ARIT_DOC, name="subtraction", op="-") + def __sub__(self, oper): + copy = deepcopy(self) + copy -= oper + return copy + + @format_doc(_ARIT_DOC, name="multiplication", op="*") + def __mul__(self, oper): + copy = deepcopy(self) + copy *= oper + return copy + + @format_doc(_ARIT_DOC, name="division", op="/") + def __truediv__(self, oper): + copy = deepcopy(self) + copy /= oper + return copy + + @format_doc(_ARIT_DOC, name="inplace addition", op="+=") + def __iadd__(self, oper): + self._standard_nddata_op(NDDataObject.add, oper) + return self + + @format_doc(_ARIT_DOC, name="inplace subtraction", op="-=") + def __isub__(self, oper): + self._standard_nddata_op(NDDataObject.subtract, oper) + return self + + @format_doc(_ARIT_DOC, name="inplace multiplication", op="*=") + def __imul__(self, oper): + self._standard_nddata_op(NDDataObject.multiply, oper) + return self + + @format_doc(_ARIT_DOC, name="inplace division", op="/=") + def __itruediv__(self, oper): + self._standard_nddata_op(NDDataObject.divide, oper) + return self + + add = __iadd__ + subtract = __isub__ + multiply = __imul__ + divide = __itruediv__ + + __radd__ = __add__ + __rmul__ = __mul__ + + def __rsub__(self, oper): + copy = (deepcopy(self) - oper) * -1 + return copy + + def _rdiv(self, ndd, operand): + # Divide method works with the operand first + return NDDataObject.divide(operand, ndd) + + def __rtruediv__(self, oper): + obj = deepcopy(self) + obj._oper(obj._rdiv, oper) + return obj + + def _process_pixel_plane( + self, pixim, name=None, top_level=False, custom_header=None + ): + # Assume that we get an ImageHDU or something that can be + # turned into one + if isinstance(pixim, fits.ImageHDU): + nd = NDDataObject(pixim.data, meta={"header": pixim.header}) + elif isinstance(pixim, NDDataObject): + nd = pixim + else: + nd = NDDataObject(pixim) + + if custom_header is not None: + nd.meta["header"] = custom_header + + header = nd.meta.setdefault("header", fits.Header()) + currname = header.get("EXTNAME") + + if currname is None: + header["EXTNAME"] = name if name is not None else DEFAULT_EXTENSION + + if top_level: + nd.meta.setdefault("other", OrderedDict()) + + return nd + + def _append_array(self, data, name=None, header=None, add_to=None): + if name in {"DQ", "VAR"}: + raise ValueError( + f"'{name}' need to be associated to a " + f"'{DEFAULT_EXTENSION}' one" + ) + + if add_to is None: + # Top level extension + if name is not None: + hname = name + elif header is not None: + hname = header.get("EXTNAME", DEFAULT_EXTENSION) + else: + hname = DEFAULT_EXTENSION + + hdu = fits.ImageHDU(data, header=header) + hdu.header["EXTNAME"] = hname + ret = self._append_imagehdu( + hdu, name=hname, header=None, add_to=None + ) + else: + ret = add_to.meta["other"][name] = data + + return ret + + def _append_imagehdu(self, hdu, name, header, add_to): + if name in {"DQ", "VAR"} or add_to is not None: + return self._append_array(hdu.data, name=name, add_to=add_to) + + nd = self._process_pixel_plane( + hdu, name=name, top_level=True, custom_header=header + ) + return self._append_nddata(nd, name, add_to=None) + + def _append_raw_nddata(self, raw_nddata, name, header, add_to): + logging.debug("Appending data to nddata: %s", name) + + # We want to make sure that the instance we add is whatever we specify + # as NDDataObject, instead of the random one that the user may pass + top_level = add_to is None + + if not isinstance(raw_nddata, NDDataObject): + raw_nddata = NDDataObject(raw_nddata) + + processed_nddata = self._process_pixel_plane( + raw_nddata, top_level=top_level, custom_header=header + ) + return self._append_nddata(processed_nddata, name=name, add_to=add_to) + + def _append_nddata(self, new_nddata, name, add_to): + # NOTE: This method is only used by others that have constructed NDData + # according to our internal format. We don't accept new headers at this + # point, and that's why it's missing from the signature. 'name' is + # ignored. It's there just to comply with the _append_XXX signature. + if add_to is not None: + raise TypeError( + "You can only append NDData derived instances " + "at the top level" + ) + + hd = new_nddata.meta["header"] + hname = hd.get("EXTNAME", DEFAULT_EXTENSION) + + if hname == DEFAULT_EXTENSION: + self._all_nddatas.append(new_nddata) + + else: + raise ValueError( + f"Arbitrary image extensions can only be added " + f"in association to a '{DEFAULT_EXTENSION}'" + ) + + logging.debug("Appending data to nddata: %s", name) + + return new_nddata + + def _append_table(self, new_table, name, header, add_to): + tb = _process_table(new_table, name, header) + hname = tb.meta["header"].get("EXTNAME") + + def find_next_num(tables): + table_num = 1 + while f"TABLE{table_num}" in tables: + table_num += 1 + return f"TABLE{table_num}" + + if add_to is None: + # Find table names for all extensions + ext_tables = set() + for nd in self._nddata: + ext_tables |= set( + key + for key, obj in nd.meta["other"].items() + if isinstance(obj, Table) + ) + + if hname is None: + hname = find_next_num(set(self._tables) | ext_tables) + elif hname in ext_tables: + raise ValueError( + f"Cannot append table '{hname}' because it " + "would hide an extension table" + ) + + self._tables[hname] = tb + else: + if hname in self._tables: + raise ValueError( + f"Cannot append table '{hname}' because it " + "would hide a top-level table" + ) + + add_to.meta["other"][hname] = tb + + return tb + + def _append_astrodata(self, ad, name, header, add_to): + logging.debug("Appending astrodata object: %s", name) + + if not ad.is_single: + raise ValueError( + "Cannot append AstroData instances that are " + "not single slices" + ) + + if add_to is not None: + raise ValueError( + "Cannot append an AstroData slice to another slice" + ) + + new_nddata = deepcopy(ad.nddata) + if header is not None: + new_nddata.meta["header"] = deepcopy(header) + + return self._append_nddata(new_nddata, name=None, add_to=None) + + def _append(self, ext, name=None, header=None, add_to=None): + """ + Internal method to dispatch to the type specific methods. This is + called either by ``.append`` to append on top-level objects only or + by ``__setattr__``. In the second case ``name`` cannot be None, so + this is always the case when appending to extensions (add_to != None). + """ + dispatcher = ( + (NDData, self._append_raw_nddata), + ((Table, fits.TableHDU, fits.BinTableHDU), self._append_table), + (fits.ImageHDU, self._append_imagehdu), + (AstroData, self._append_astrodata), + ) + + for bases, method in dispatcher: + if isinstance(ext, bases): + return method(ext, name=name, header=header, add_to=add_to) + + # Assume that this is an array for a pixel plane + return self._append_array(ext, name=name, header=header, add_to=add_to) + +
+[docs] + def append(self, ext, name=None, header=None): + """ + Adds a new top-level extension. + + Parameters + ---------- + ext : array, `astropy.nddata.NDData`, `astropy.table.Table`, other + The contents for the new extension. The exact accepted types depend + on the class implementing this interface. Implementations specific + to certain data formats may accept specialized types (eg. a FITS + provider will accept an `astropy.io.fits.ImageHDU` and extract the + array out of it). + name : str, optional + A name that may be used to access the new object, as an attribute + of the provider. The name is typically ignored for top-level + (global) objects, and required for the others. If the name cannot + be derived from the metadata associated to ``ext``, you will + have to provider one. + It can consist in a combination of numbers and letters, with the + restriction that the letters have to be all capital, and the first + character cannot be a number ("[A-Z][A-Z0-9]*"). + + Returns + -------- + The same object, or a new one, if it was necessary to convert it to + a more suitable format for internal use. + + Raises + ------- + TypeError + If adding the object in an invalid situation (eg. ``name`` is + `None` when adding to a single slice). + ValueError + Raised if the extension is of a proper type, but its value is + illegal somehow. + + """ + if self.is_sliced: + raise TypeError( + "Can't append objects to slices, use " + "'ext.NAME = obj' instead" + ) + + # NOTE: Most probably, if we want to copy the input argument, we + # should do it here... + if isinstance(ext, fits.PrimaryHDU): + raise ValueError( + "Only one Primary HDU allowed. " + "Use .phu if you really need to set one" + ) + + if isinstance(ext, Table): + raise ValueError( + "Tables should be set directly as attribute, " + "i.e. 'ad.MYTABLE = table'" + ) + + if name is not None and not name.isupper(): + warnings.warn( + f"extension name '{name}' should be uppercase", UserWarning + ) + name = name.upper() + + return self._append(ext, name=name, header=header)
+ + +
+[docs] + @classmethod + def read(cls, source, extname_parser=None): + """Read from a file, file object, HDUList, etc.""" + return read_fits(cls, source, extname_parser=extname_parser)
+ + + load = read # for backward compatibility + +
+[docs] + def write(self, filename=None, overwrite=False): + """ + Write the object to disk. + + Parameters + ---------- + filename : str, optional + If the filename is not given, ``self.path`` is used. + overwrite : bool + If True, overwrites existing file. + + """ + if filename is None: + if self.path is None: + raise ValueError("A filename needs to be specified") + filename = self.path + + write_fits(self, filename, overwrite=overwrite)
+ + +
+[docs] + def operate(self, operator, *args, **kwargs): + """ + Applies a function to the main data array on each extension, replacing + the data with the result. The data will be passed as the first argument + to the function. + + It will be applied to the mask and variance of each extension, too, if + they exist. + + This is a convenience method, which is equivalent to:: + + for ext in ad: + ext.data = operator(ext.data, *args, **kwargs) + if ext.mask is not None: + ext.mask = operator(ext.mask, *args, **kwargs) + if ext.variance is not None: + ext.variance = operator(ext.variance, *args, **kwargs) + + with the additional advantage that it will work on single slices, too. + + Parameters + ---------- + operator : callable + A function that takes an array (and, maybe, other arguments) + and returns an array. + args, kwargs : optional + Additional arguments to be passed to the ``operator``. + + Examples + --------- + >>> import numpy as np + >>> ad.operate(np.squeeze) # doctest: +SKIP + + """ + # Ensure we can iterate, even on a single slice + for ext in [self] if self.is_single else self: + ext.data = operator(ext.data, *args, **kwargs) + if ext.mask is not None: + ext.mask = operator(ext.mask, *args, **kwargs) + if ext.variance is not None: + ext.variance = operator(ext.variance, *args, **kwargs)
+ + +
+[docs] + def reset(self, data, mask=NO_DEFAULT, variance=NO_DEFAULT, check=True): + """ + Sets the ``.data``, and optionally ``.mask`` and ``.variance`` + attributes of a single-extension AstroData slice. This function will + optionally check whether these attributes have the same shape. + + Parameters + ---------- + data : ndarray + The array to assign to the ``.data`` attribute ("SCI"). + mask : ndarray, optional + The array to assign to the ``.mask`` attribute ("DQ"). + variance: ndarray, optional + The array to assign to the ``.variance`` attribute ("VAR"). + check: bool + If set, then the function will check that the mask and variance + arrays have the same shape as the data array. + + Raises + ------- + TypeError + if an attempt is made to set the .mask or .variance attributes + with something other than an array + ValueError + if the .mask or .variance attributes don't have the same shape as + .data, OR if this is called on an AD instance that isn't a single + extension slice + + """ + if not self.is_single: + raise ValueError("Trying to reset a non-sliced AstroData object") + + # In case data is an NDData object + try: + self.data = data.data + except AttributeError: + self.data = data + # Set mask, with checking if required + try: + if mask.shape != self.data.shape and check: + raise ValueError("Mask shape incompatible with data shape") + + except AttributeError as err: + if mask is None: + self.mask = mask + + elif mask == NO_DEFAULT: + if hasattr(data, "mask"): + self.mask = data.mask + + else: + raise TypeError("Attempt to set mask inappropriately") from err + + else: + self.mask = mask + + # Set variance, with checking if required + try: + if variance.shape != self.data.shape and check: + raise ValueError("Variance shape incompatible with data shape") + + except AttributeError as err: + if variance is None: + self.uncertainty = None + + elif variance == NO_DEFAULT: + if hasattr(data, "uncertainty"): + self.uncertainty = data.uncertainty + + else: + raise TypeError( + "Attempt to set variance inappropriately" + ) from err + + else: + self.variance = variance + + if hasattr(data, "wcs"): + self.wcs = data.wcs
+ + +
+[docs] + def update_filename(self, prefix=None, suffix=None, strip=False): + """Update the "filename" attribute of the AstroData object. + + A prefix and/or suffix can be specified. If ``strip=True``, these will + replace the existing prefix/suffix; if ``strip=False``, they will + simply be prepended/appended. + + The current filename is broken down into its existing prefix, root, and + suffix using the ``ORIGNAME`` phu keyword, if it exists and is + contained within the current filename. Otherwise, the filename is split + at the last underscore and the part before is assigned as the root and + the underscore and part after the suffix. No prefix is assigned. + + Note that, if ``strip=True``, a prefix or suffix will only be stripped + if '' is specified. + + Parameters + ---------- + prefix: str, optional + New prefix (None => leave alone) + + suffix: str, optional + New suffix (None => leave alone) + + strip: bool, optional + Strip existing prefixes and suffixes if new ones are given? + + Raises + ------ + ValueError + If the filename cannot be determined + """ + if self.filename is None: + if "ORIGNAME" in self.phu: + self.filename = self.phu["ORIGNAME"] + else: + raise ValueError( + "A filename needs to be set before it can be updated" + ) + + # Set the ORIGNAME keyword if it's not there + if "ORIGNAME" not in self.phu: + self.phu.set( + "ORIGNAME", + self.orig_filename, + "Original filename prior to processing", + ) + + if strip: + root, filetype = os.path.splitext(self.phu["ORIGNAME"]) + filename, filetype = os.path.splitext(self.filename) + m = re.match(f"(.*){re.escape(root)}(.*)", filename) + + # Do not strip a prefix/suffix unless a new one is provided + if m: + if prefix is None: + prefix = m.groups()[0] + + existing_suffix = m.groups()[1] + + if "_" in existing_suffix: + last_underscore = existing_suffix.rfind("_") + root += existing_suffix[:last_underscore] + existing_suffix = existing_suffix[last_underscore:] + + else: + try: + root, existing_suffix = filename.rsplit("_", 1) + existing_suffix = "_" + existing_suffix + + except ValueError as err: + logging.info( + "Could not split filename (ValueError): %s", err + ) + root, existing_suffix = filename, "" + + if suffix is None: + suffix = existing_suffix + + else: + root, filetype = os.path.splitext(self.filename) + + # Cope with prefix or suffix as None + self.filename = (prefix or "") + root + (suffix or "") + filetype
+ + + def _crop_nd(self, nd, x1, y1, x2, y2): + y_start, y_end = y1, y2 + 1 + x_start, x_end = x1, x2 + 1 + + nd.data = nd.data[y_start:y_end, x_start:x_end] + + if nd.uncertainty is not None: + nd.uncertainty = nd.uncertainty[y_start:y_end, x_start:x_end] + + if nd.mask is not None: + nd.mask = nd.mask[y_start:y_end, x_start:x_end] + +
+[docs] + def crop(self, x1, y1, x2, y2): + """Crop the NDData objects given indices. + + Parameters + ---------- + x1, y1, x2, y2 : int + Minimum and maximum indices for the x and y axis. + + """ + # TODO: Consider cropping of objects in the meta section + for nd in self._nddata: + orig_shape = nd.data.shape + self._crop_nd(nd, x1, y1, x2, y2) + for o in nd.meta["other"].values(): + try: + if o.shape == orig_shape: + self._crop_nd(o, x1, y1, x2, y2) + except AttributeError: + # No 'shape' attribute in the object. It's probably + # not array-like + pass
+ + +
+[docs] + @astro_data_descriptor + def instrument(self): + """Returns the name of the instrument making the observation.""" + return self.phu.get(self._keyword_for("instrument"))
+ + +
+[docs] + @astro_data_descriptor + def object(self): + """Returns the name of the object being observed.""" + return self.phu.get(self._keyword_for("object"))
+ + +
+[docs] + @astro_data_descriptor + def telescope(self): + """Returns the name of the telescope.""" + return self.phu.get(self._keyword_for("telescope"))
+
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/astrodata/factory.html b/_modules/astrodata/factory.html new file mode 100644 index 00000000..4e851cb5 --- /dev/null +++ b/_modules/astrodata/factory.html @@ -0,0 +1,341 @@ + + + + + + + astrodata.factory — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for astrodata.factory

+"""Factory for AstroData objects."""
+import logging
+import os
+from contextlib import contextmanager
+from copy import deepcopy
+
+from astropy.io import fits
+
+from .utils import deprecated
+
+LOGGER = logging.getLogger(__name__)
+
+
+
+[docs] +class AstroDataError(Exception): + """Exception raised when there is a problem with the AstroData class."""
+ + + +class AstroDataFactory: + """Factory class for AstroData objects.""" + + _file_openers = (fits.open,) + + def __init__(self): + self._registry = set() + + @deprecated( + "Renamed to _open_file, please use that method instead: " + "astrodata.factory.AstroDataFactory._open_file" + ) + @staticmethod + @contextmanager + def _openFile(source): # pylint: disable=invalid-name + return AstroDataFactory._open_file(source) + + @staticmethod + @contextmanager + def _open_file(source): + """Internal static method that takes a ``source``, assuming that it is + a string pointing to a file to be opened. + + If this is the case, it will try to open the file and return an + instance of the appropriate native class to be able to manipulate it + (eg. ``HDUList``). + + If ``source`` is not a string, it will be returned verbatim, assuming + that it represents an already opened file. + """ + if isinstance(source, (str, os.PathLike)): + stats = os.stat(source) + if stats.st_size == 0: + LOGGER.warning("File %s is zero size", source) + + # try vs all handlers + for func in AstroDataFactory._file_openers: + try: + fp = func(source) + yield fp + + except Exception as err: # pylint: disable=broad-except + # TODO: Should be more specific than this. + # Log the exception, if it's a serious error then + # re-raise it, e.g., user exits with Ctrl-C. + LOGGER.error( + "Failed to open %s with %s, got error: %s", + source, + func, + err, + ) + + else: + if hasattr(fp, "close"): + fp.close() + + return + + raise AstroDataError( + f"No access, or not supported format for: {source}" + ) + + yield source + + @deprecated( + "Renamed to add_class, please use that method instead: " + "astrodata.factory.AstroDataFactory.add_class" + ) + def addClass(self, cls): # pylint: disable=invalid-name + """Add a new class to the AstroDataFactory registry. It will be used + when instantiating an AstroData class for a FITS file. + """ + self.add_class(cls) + + def add_class(self, cls): + """Add a new class to the AstroDataFactory registry. It will be used + when instantiating an AstroData class for a FITS file. + """ + if not hasattr(cls, "_matches_data"): + raise AttributeError( + f"Class '{cls.__name__}' has no '_matches_data' method" + ) + + self._registry.add(cls) + + @deprecated( + "Renamed to get_astro_data, please use that method instead: " + "astrodata.factory.AstroDataFactory.get_astro_data" + ) + def getAstroData(self, source): # pylint: disable=invalid-name + """Deprecated, see |get_astro_data|.""" + self.get_astro_data(source) + + def get_astro_data(self, source): + """Takes either a string (with the path to a file) or an HDUList as + input, and tries to return an AstroData instance. + + It will raise exceptions if the file is not found, or if there is no + match for the HDUList, among the registered AstroData classes. + + Returns an instantiated object, or raises AstroDataError if it was + not possible to find a match + + Parameters + ---------- + source : `str` or `pathlib.Path` or `fits.HDUList` + The file path or HDUList to read. + """ + candidates = [] + with self._open_file(source) as opened: + for adclass in self._registry: + try: + # TODO: accessing protected member + # pylint: disable=protected-access + if adclass._matches_data(opened): + candidates.append(adclass) + + except Exception as err: # pylint: disable=broad-except + # TODO: Should be more specific than this. + + LOGGER.error( + "Failed to open %s with %s, got error: %s", + source, + adclass, + err, + ) + + # For every candidate in the list, remove the ones that are base + # classes for other candidates. That way we keep only the more + # specific ones. + final_candidates = [] + for cnd in candidates: + if any(cnd in x.mro() for x in candidates if x != cnd): + continue + + final_candidates.append(cnd) + + if len(final_candidates) > 1: + raise AstroDataError( + "More than one class is candidate for this dataset" + ) + + if not final_candidates: + raise AstroDataError("No class matches this dataset") + + return final_candidates[0].read(source) + + @deprecated( + "Renamed to create_from_scratch, please use that method instead: " + "astrodata.factory.AstroDataFactory.create_from_scratch" + ) + def createFromScratch( + self, + phu, + extensions=None, + ): # pylint: disable=invalid-name + """Deprecated, see |create_from_scratch|.""" + self.create_from_scratch(phu=phu, extensions=extensions) + + def create_from_scratch(self, phu, extensions=None): + """Creates an AstroData object from a collection of objects. + + Parameters + ---------- + phu : `fits.PrimaryHDU` or `fits.Header` or `dict` or `list` + FITS primary HDU or header, or something that can be used to create + a fits.Header (a dict, a list of "cards"). + + extensions : list of HDUs + List of HDU objects. + + Returns + ------- + `astrodata.AstroData` + An AstroData instance. + + Raises + ------ + ValueError + If ``phu`` is not a valid object. + """ + lst = fits.HDUList() + if phu is not None: + if isinstance(phu, fits.PrimaryHDU): + lst.append(deepcopy(phu)) + + elif isinstance(phu, fits.Header): + lst.append(fits.PrimaryHDU(header=deepcopy(phu))) + + elif isinstance(phu, (dict, list, tuple)): + p = fits.PrimaryHDU() + p.header.update(phu) + lst.append(p) + + else: + raise ValueError( + "phu must be a PrimaryHDU or a valid header object" + ) + + # TODO: Verify the contents of extensions... + if extensions is not None: + for ext in extensions: + lst.append(ext) + + return self.get_astro_data(lst) +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/astrodata/fits.html b/_modules/astrodata/fits.html new file mode 100644 index 00000000..b0ab6a1e --- /dev/null +++ b/_modules/astrodata/fits.html @@ -0,0 +1,1314 @@ + + + + + + + astrodata.fits — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for astrodata.fits

+"""Functions used when interacting with FITS files and HDUs.
+
+.. |NDData| replace:: :class:`~astropy.nddata.NDData`
+.. |NDDataRef| replace:: :class:`~astropy.nddata.NDDataRef`
+.. |BinTableHDU| replace:: :class:`~astropy.io.fits.BinTableHDU`
+.. |TableHDU| replace:: :class:`~astropy.io.fits.TableHDU`
+.. |NDAstroData| replace:: :class:`~astrodata.nddata.NDAstroData`
+.. |NDAstroDataRef| replace:: :class:`~astrodata.nddata.NDAstroDataRef`
+"""
+from collections import OrderedDict
+from copy import deepcopy
+from io import BytesIO
+from itertools import product as cart_product, zip_longest
+import gc
+import logging
+import os
+import traceback
+import warnings
+
+from astropy import units as u
+from astropy.io import fits
+from astropy.io.fits import (
+    BinTableHDU,
+    Column,
+    DELAYED,
+    HDUList,
+    ImageHDU,
+    PrimaryHDU,
+    TableHDU,
+)
+from astropy.nddata import NDData
+
+# NDDataRef is still not in the stable astropy, but this should be the one
+# we use in the future...
+# from astropy.nddata import NDData, NDDataRef as NDDataObject
+from astropy.table import Table
+
+import asdf
+import astropy
+import jsonschema
+import numpy as np
+
+
+from gwcs.wcs import WCS as gWCS
+
+from .nddata import ADVarianceUncertainty, NDAstroData as NDDataObject
+from .utils import deprecated
+from .wcs import fitswcs_to_gwcs, gwcs_to_fits
+
+DEFAULT_EXTENSION = "SCI"
+NO_DEFAULT = object()
+LOGGER = logging.getLogger(__name__)
+
+
+class FitsHeaderCollection:
+    """Group access to a list of FITS Header-like objects.
+
+    It exposes a number of methods (``set``, ``get``, etc.) that operate over
+    all the headers at the same time. It can also be iterated.
+
+    Parameters
+    ----------
+    headers : list of `astropy.io.fits.Header`
+        List of Header objects.
+    """
+
+    def __init__(self, headers):
+        self._headers = list(headers)
+
+    def _insert(self, idx, header):
+        self._headers.insert(idx, header)
+
+    def __iter__(self):
+        yield from self._headers
+
+    def __setitem__(self, key, value):
+        if isinstance(value, tuple):
+            self.set(key, value=value[0], comment=value[1])
+        else:
+            self.set(key, value=value)
+
+    def set(self, key, value=None, comment=None):
+        """Set a keyword in all the headers."""
+        for header in self._headers:
+            header.set(key, value=value, comment=comment)
+
+    def __getitem__(self, key):
+        missing_at = []
+        ret = []
+        for n, header in enumerate(self._headers):
+            try:
+                ret.append(header[key])
+
+            except KeyError:
+                logging.debug(
+                    "Assigning None to header missing keyword %s", key
+                )
+
+                missing_at.append(n)
+                ret.append(None)
+
+        if missing_at:
+            error = KeyError(
+                f"The keyword couldn't be found at headers: "
+                f"{tuple(missing_at)}"
+            )
+
+            error.missing_at = missing_at
+            error.values = ret
+            raise error
+
+        return ret
+
+    def get(self, key, default=None):
+        """Get a keyword, defaulting to None."""
+        try:
+            return self[key]
+        except KeyError as err:
+            vals = err.values
+            for n in err.missing_at:
+                vals[n] = default
+            return vals
+
+    def __delitem__(self, key):
+        self.remove(key)
+
+    def remove(self, key):
+        """Remove a keyword from all the headers."""
+        deleted = 0
+        for header in self._headers:
+            try:
+                del header[key]
+                deleted = deleted + 1
+            except KeyError:
+                pass
+        if not deleted:
+            raise KeyError(f"'{key}' is not on any of the extensions")
+
+    def get_comment(self, key):
+        """Get the comment for a keyword, from all the headers, as a list."""
+        return [header.comments[key] for header in self._headers]
+
+    def set_comment(self, key, comment):
+        """Set the comment for a keyword in all the headers."""
+
+        def _inner_set_comment(header):
+            if key not in header:
+                raise KeyError(f"Keyword {key!r} not available")
+
+            header.set(key, comment=comment)
+
+        for n, header in enumerate(self._headers):
+            try:
+                _inner_set_comment(header)
+            except KeyError as err:
+                raise KeyError(f"{err.args[0]} at header {n}") from err
+
+    def __contains__(self, key):
+        return any(tuple(key in h for h in self._headers))
+
+
+def new_imagehdu(data, header, name=None):
+    """Create a new ImageHDU from data and header.
+
+    Parameters
+    ----------
+    data : `numpy.ndarray`
+        The data array.
+
+    header : `astropy.io.fits.Header`
+        The header.
+
+    name : str
+        The extension name.
+
+    Notes
+    -----
+    Assigning data in a delayed way, won't reset BZERO/BSCALE in the header,
+    for some reason. Need to investigated. Maybe astropy.io.fits bug. Figure
+    out WHY were we delaying in the first place.
+
+    Example:
+    >> i = ImageHDU(data=DELAYED, header=header.copy(), name=name)
+    >> i.data = data
+    """
+    # Assigning data in a delayed way, won't reset BZERO/BSCALE in the header,
+    # for some reason. Need to investigated. Maybe astropy.io.fits bug. Figure
+    # out WHY were we delaying in the first place.
+    #    i = ImageHDU(data=DELAYED, header=header.copy(), name=name)
+    #    i.data = data
+    return ImageHDU(data=data, header=header.copy(), name=name)
+
+
+def table_to_bintablehdu(table, extname=None):
+    """Convert an astropy Table object to a BinTableHDU before writing to disk.
+
+    Parameters
+    ----------
+    table: astropy.table.Table instance
+        the table to be converted to a BinTableHDU
+
+    extname: str
+        name to go in the EXTNAME field of the FITS header
+
+    Returns
+    -------
+    BinTableHDU
+    """
+    # remove header to avoid warning from table_to_hdu
+    table_header = table.meta.pop("header", None)
+
+    # table_to_hdu sets units only if the unit conforms to the FITS standard,
+    # otherwise it issues a warning, which we catch here.
+    with warnings.catch_warnings():
+        warnings.simplefilter("ignore", UserWarning)
+        hdu = fits.table_to_hdu(table)
+
+    # And now we try to set the units that do not conform to the standard,
+    # using unit.to_string() without the format='fits' argument.
+    for col in table.itercols():
+        if col.unit and not hdu.columns[col.name].unit:
+            hdu.columns[col.name].unit = col.unit.to_string()
+
+    if table_header is not None:
+        # Update with cards from table.meta, but skip structural FITS
+        # keywords since those have been set by table_to_hdu
+        exclude = (
+            "SIMPLE",
+            "XTENSION",
+            "BITPIX",
+            "NAXIS",
+            "EXTEND",
+            "PCOUNT",
+            "GCOUNT",
+            "TFIELDS",
+            "TFORM",
+            "TSCAL",
+            "TZERO",
+            "TNULL",
+            "TTYPE",
+            "TUNIT",
+            "TDISP",
+            "TDIM",
+            "THEAP",
+            "TBCOL",
+        )
+        hdr = fits.Header(
+            [
+                card
+                for card in table_header.cards
+                if not card.keyword.startswith(exclude)
+            ]
+        )
+        update_header(hdu.header, hdr)
+        # reset table's header
+        table.meta["header"] = table_header
+    if extname:
+        hdu.header["EXTNAME"] = (extname, "added by AstroData")
+    return hdu
+
+
+def header_for_table(table):
+    """Return a FITS header for a table."""
+    table_header = table.meta.pop("header", None)
+    fits_header = fits.table_to_hdu(table).header
+    if table_header:
+        table.meta["header"] = table_header  # restore original meta
+        fits_header = update_header(table_header, fits_header)
+    return fits_header
+
+
+
+[docs] +def add_header_to_table(table): + """Add a FITS header to a table.""" + header = header_for_table(table) + table.meta["header"] = header + return header
+ + + +def _process_table(table, name=None, header=None): + """Convert a BinTableHDU or TableHDU to an astropy Table object. + + Arguments + --------- + table : |BinTableHDU| or |TableHDU| or |Table| + The table to convert. If it's already an |Table|, it will be returned + as is. + + name : str + The name to assign to the table. + + header : `astropy.io.fits.Header` + The header to assign to the table. + """ + if isinstance(table, (BinTableHDU, TableHDU)): + obj = Table(table.data, meta={"header": header or table.header}) + for i, col in enumerate(obj.columns, start=1): + try: + obj[col].unit = u.Unit(obj.meta["header"][f"TUNIT{i}"]) + except (KeyError, TypeError, ValueError): + pass + elif isinstance(table, Table): + obj = Table(table) + if header is not None: + obj.meta["header"] = deepcopy(header) + elif "header" not in obj.meta: + obj.meta["header"] = header_for_table(obj) + else: + raise ValueError(f"{table.__class__} is not a recognized table type") + + if name is not None: + obj.meta["header"]["EXTNAME"] = name + + return obj + + +def card_filter(cards, include=None, exclude=None): + """Filter a list of cards, lazily returning only those that match the + criteria. + + Parameters + ---------- + cards : iterable + The cards to filter. + + include : iterable of str + Only cards with these keywords will be returned. + + exclude : iterable of str + Cards with these keywords will be skipped. + + Yields + ------ + card : tuple + A card that matches the criteria. + """ + for card in cards: + if include is not None and card[0] not in include: + continue + + if exclude is not None and card[0] in exclude: + continue + + yield card + + +def update_header(headera, headerb): + """Update headera with the cards from headerb, but only if they are + different. + + Parameters + ---------- + headera : `astropy.io.fits.Header` + The header to update. + + headerb : `astropy.io.fits.Header` + The header to update from. + """ + cardsa = tuple(tuple(cr) for cr in headera.cards) + cardsb = tuple(tuple(cr) for cr in headerb.cards) + + if cardsa == cardsb: + return headera + + # Ok, headerb differs somehow. Let's try to bring the changes to headera + # Updated keywords that should be unique + difference = set(cardsb) - set(cardsa) + headera.update(card_filter(difference, exclude={"HISTORY", "COMMENT", ""})) + # Check the HISTORY and COMMENT cards, just in case + for key in ("HISTORY", "COMMENT"): + fltcardsa = card_filter(cardsa, include={key}) + fltcardsb = card_filter(cardsb, include={key}) + # assume we start with two headers that are mostly the same and + # that will have added comments/history at the end (in headerb) + for ca, cb in zip_longest(fltcardsa, fltcardsb): + if ca is None: + headera.update((cb,)) + + return headera + + +def fits_ext_comp_key(ext): + """Returns a pair (int, str) that will be used to sort extensions.""" + if isinstance(ext, PrimaryHDU): + # This will guarantee that the primary HDU goes first + ret = (-1, "") + else: + # When two extensions share version number, we'll use their names + # to sort them out. Choose a suitable key so that: + # + # - SCI extensions come first + # - unnamed extensions come last + # + # We'll resort to add 'z' in front of the usual name to force + # SCI to be the "smallest" + name = ext.name + if name == "": + name = "zzzz" + elif name != DEFAULT_EXTENSION: + name = "z" + name + + ver = ext.header.get("EXTVER") + if ver in (-1, None): + # In practice, this number should be larger than any EXTVER found + # in real life HDUs, pushing unnumbered HDUs to the end. + ver = 2**32 - 1 + + # For the general case, just return version and name, to let them + # be sorted naturally + ret = (ver, name) + + return ret + + +class FitsLazyLoadable: + """Class to delay loading of data from a FITS file.""" + + def __init__(self, obj): + """Initializes the object. + + Parameters + ---------- + obj : `astropy.io.fits.ImageHDU` or `astropy.io.fits.BinTableHDU` + The HDU to delay loading from. + """ + self._obj = obj + self.lazy = True + + def _create_result(self, shape): + """Create an empty array to hold the data.""" + return np.empty(shape, dtype=self.dtype) + + def _scale(self, data): + """Scale the data, if necessary.""" + # TODO: It would be goot to access these differently. Is this always an + # object we control? Even if so, should access through a property, not + # a protected member. No friends in python... + # pylint: disable=protected-access + bscale = self._obj._orig_bscale + bzero = self._obj._orig_bzero + + if bscale == 1 and bzero == 0: + return data + + return (bscale * data + bzero).astype(self.dtype) + + def __getitem__(self, arr_slice): + # TODO: We may want (read: should) create an empty result array before + # scaling + return self._scale(self._obj.section[arr_slice]) + + @property + def header(self): + """The header of the HDU.""" + return self._obj.header + + @property + def data(self): + """The data of the HDU.""" + res = self._create_result(self.shape) + res[:] = self._scale(self._obj.data) + return res + + @property + def shape(self): + """The shape of the data.""" + return self._obj.shape + + @property + def dtype(self): + """Need to to some overriding of astropy.io.fits since it doesn't + know about BITPIX=8 + """ + # TODO: It would be goot to access these differently. Is this always an + # object we control? Even if so, should access through a property, not + # a protected member. No friends in python... These are scattered + # throughout the function. + # pylint: disable=protected-access + bitpix = self._obj._orig_bitpix + if self._obj._orig_bscale == 1 and self._obj._orig_bzero == 0: + dtype = fits.BITPIX2DTYPE[bitpix] + else: + # this method from astropy will return the dtype if the data + # needs to be converted to unsigned int or scaled to float + dtype = self._obj._dtype_for_bitpix() + + if dtype is None: + if bitpix < 0: + dtype = np.dtype(f"float{abs(bitpix)}") + + if ( + self._obj.header["EXTNAME"] == "DQ" + or self._obj._uint + and self._obj._orig_bscale == 1 + and bitpix == 8 + ): + dtype = np.uint16 + + return dtype + + +def _prepare_hdulist(hdulist, default_extension="SCI", extname_parser=None): + """Prepare an HDUList for reading. + + Parameters + ---------- + hdulist : `astropy.io.fits.HDUList` + The HDUList to prepare. + + default_extension : str + The name of the default extension. + + extname_parser : callable + A function to parse the EXTNAME of an HDU. + + Returns + ------- + hdulist : `astropy.io.fits.HDUList` + The prepared HDUList. + """ + new_list = [] + highest_ver = 0 + recognized = set() + + if len(hdulist) > 1 or (len(hdulist) == 1 and hdulist[0].data is None): + # MEF file + # First get HDUs for which EXTVER is defined + for hdu in hdulist: + if extname_parser: + extname_parser(hdu) + ver = hdu.header.get("EXTVER") + if ver not in (-1, None) and hdu.name: + highest_ver = max(highest_ver, ver) + elif not isinstance(hdu, PrimaryHDU): + continue + + new_list.append(hdu) + recognized.add(hdu) + + # Then HDUs that miss EXTVER + for hdu in hdulist: + if hdu in recognized: + continue + + if isinstance(hdu, ImageHDU): + highest_ver += 1 + if "EXTNAME" not in hdu.header: + hdu.header["EXTNAME"] = ( + default_extension, + "Added by AstroData", + ) + + if hdu.header.get("EXTVER") in (-1, None): + hdu.header["EXTVER"] = (highest_ver, "Added by AstroData") + + new_list.append(hdu) + recognized.add(hdu) + + else: + # Uh-oh, a single image FITS file + new_list.append(PrimaryHDU(header=hdulist[0].header)) + image = ImageHDU(header=hdulist[0].header, data=hdulist[0].data) + # Fudge due to apparent issues with assigning ImageHDU from data + # TODO: protected members + # pylint: disable=protected-access + image._orig_bscale = hdulist[0]._orig_bscale + image._orig_bzero = hdulist[0]._orig_bzero + + for keyw in ("SIMPLE", "EXTEND"): + if keyw in image.header: + del image.header[keyw] + + image.header["EXTNAME"] = (default_extension, "Added by AstroData") + image.header["EXTVER"] = (1, "Added by AstroData") + new_list.append(image) + + return HDUList(sorted(new_list, key=fits_ext_comp_key)) + + +def read_fits(cls, source, extname_parser=None): + """Takes either a string (with the path to a file) or an HDUList as input, + and tries to return a populated AstroData (or descendant) instance. + + It will raise exceptions if the file is not found, or if there is no match + for the HDUList, among the registered AstroData classes. + + Parameters + ---------- + cls : class + The class to instantiate. + + source : str or `astropy.io.fits.HDUList` + The path to the file, or an HDUList. + + extname_parser : callable + A function to parse the EXTNAME of an HDU. + + Returns + ------- + ad : `astrodata.AstroData` or descendant + The populated AstroData object. This is of the type specified by cls. + """ + + ad = cls() + + if isinstance(source, (str, os.PathLike)): + hdulist = fits.open( + source, memmap=True, do_not_scale_image_data=True, mode="readonly" + ) + + ad.path = source + + else: + hdulist = source + + try: + ad.path = source[0].header.get("ORIGNAME") + + except AttributeError as err: + logging.info("Attribute error in read_fits: %s", err) + ad.path = None + + # TODO: This is a hack to get around the fact that we don't have a + # proper way to pass the original filename to the object. This is + # needed for the writer to be able to write the ORIGNAME keyword. + # pylint: disable=protected-access + _file = hdulist._file + + hdulist = _prepare_hdulist( + hdulist, + default_extension=DEFAULT_EXTENSION, + extname_parser=extname_parser, + ) + + if _file is not None: + hdulist._file = _file + + # Initialize the object containers to a bare minimum + # pylint: disable=no-member + if "ORIGNAME" not in hdulist[0].header and ad.orig_filename is not None: + hdulist[0].header.set( + "ORIGNAME", + ad.orig_filename, + "Original filename prior to processing", + ) + + ad.phu = hdulist[0].header + + # This is hashable --- we can use it to check if we've seen this object + # before. + # pylint: disable=unhashable-member + seen = {hdulist[0]} + + skip_names = {DEFAULT_EXTENSION, "REFCAT", "MDF"} + + def associated_extensions(ver): + for hdu in hdulist: + if hdu.header.get("EXTVER") == ver and hdu.name not in skip_names: + yield hdu + + # Only SCI HDUs + sci_units = [hdu for hdu in hdulist[1:] if hdu.name == DEFAULT_EXTENSION] + + seen_vers = [] + + for hdu in sci_units: + seen.add(hdu) + ver = hdu.header.get("EXTVER", -1) + + if ver > -1 and seen_vers.count(ver) == 1: + LOGGER.warning("Multiple SCI extension with EXTVER %s", ver) + + seen_vers.append(ver) + parts = { + "data": hdu, + "uncertainty": None, + "mask": None, + "wcs": None, + "other": [], + } + + # For each SCI HDU find if it has an associated variance, mask, wcs + for extra_unit in associated_extensions(ver): + seen.add(extra_unit) + name = extra_unit.name + if name == "DQ": + parts["mask"] = extra_unit + elif name == "VAR": + parts["uncertainty"] = extra_unit + elif name == "WCS": + parts["wcs"] = extra_unit + else: + parts["other"].append(extra_unit) + + header = parts["data"].header + lazy = hdulist._file is not None and hdulist._file.memmap + + for part_name in ("data", "mask", "uncertainty"): + if parts[part_name] is not None: + if lazy: + # Use FitsLazyLoadable to delay loading of the data + parts[part_name] = FitsLazyLoadable(parts[part_name]) + else: + # Otherwise use the data array + # parts[part_name] = parts[part_name].data + # TODO: we open the file with do_not_scale_data=True, so + # the data array does not have the correct data values. + # AstroData handles scaling internally, and we can ensure + # it does that by making the data a FitsLazyLoadable; the + # side-effect of this is that the is_lazy() function will + # return True, but this has minimal knock-on effects. + # Hopefully astropy will handle this better in future. + if hdulist._file is not None: # probably compressed + parts[part_name] = FitsLazyLoadable(parts[part_name]) + else: # for astrodata.create() files + parts[part_name] = parts[part_name].data + + # handle the variance if not lazy + if parts["uncertainty"] is not None and not isinstance( + parts["uncertainty"], FitsLazyLoadable + ): + parts["uncertainty"] = ADVarianceUncertainty(parts["uncertainty"]) + + # Create the NDData object + nd = NDDataObject( + data=parts["data"], + uncertainty=parts["uncertainty"], + mask=parts["mask"], + meta={"header": header}, + ) + + ad.append(nd, name=DEFAULT_EXTENSION) + + # This is used in the writer to keep track of the extensions that + # were read from the current object. + nd.meta["parent_ad"] = id(ad) + + for other in parts["other"]: + if not other.name: + warnings.warn(f"Skip HDU {other} because it has no EXTNAME") + else: + setattr(ad[-1], other.name, other) + + if parts["wcs"] is not None: + # Load the gWCS object from the ASDF extension + nd.wcs = asdftablehdu_to_wcs(parts["wcs"]) + if nd.wcs is None: + # Fallback to the data header + nd.wcs = fitswcs_to_gwcs(nd) + if nd.wcs is None: + # In case WCS info is in the PHU + nd.wcs = fitswcs_to_gwcs(hdulist[0].header) + + for other in hdulist: + if other in seen: + continue + name = other.header.get("EXTNAME") + try: + ad.append(other, name=name) + except ValueError as e: + warnings.warn(f"Discarding {name} :\n {e}") + + return ad + + +def ad_to_hdulist(ad): + """Creates an HDUList from an AstroData object.""" + hdul = HDUList() + hdul.append(PrimaryHDU(header=ad.phu, data=DELAYED)) + + # Find the maximum EXTVER for extensions that belonged with this + # object if it was read from a FITS file + # TODO: Is there a way to access _nddata without using the protected + # variable? Should it be a protected variable? + # pylint: disable=protected-access + maxver = max( + ( + nd.meta["header"].get("EXTVER", 0) + for nd in ad._nddata + if nd.meta.get("parent_ad") == id(ad) + ), + default=0, + ) + + for ext in ad._nddata: + header = ext.meta["header"].copy() + + if not isinstance(header, fits.Header): + header = fits.Header(header) + + if ext.meta.get("parent_ad") == id(ad): + # If the extension belonged with this object, use its + # original EXTVER + ver = header["EXTVER"] + else: + # Otherwise renumber the extension + ver = header["EXTVER"] = maxver + 1 + maxver += 1 + + wcs = ext.wcs + + if isinstance(wcs, gWCS): + # We don't have access to the AD tags so see if it's an image + # Catch ValueError as any sort of failure + try: + wcs_dict = gwcs_to_fits(ext, ad.phu) + + except (ValueError, NotImplementedError) as e: + LOGGER.warning(e) + + else: + # Must delete keywords if image WCS has been downscaled + # from a higher number of dimensions + for i in range(1, 5): + for kw in ( + f"CDELT{i}", + f"CRVAL{i}", + f"CUNIT{i}", + f"CTYPE{i}", + f"NAXIS{i}", + ): + if kw in header: + del header[kw] + + for j in range(1, 5): + for kw in (f"CD{i}_{j}", f"PC{i}_{j}", f"CRPIX{j}"): + if kw in header: + del header[kw] + + # Delete this if it's left over from a previous save + if "FITS-WCS" in header: + del header["FITS-WCS"] + + try: + extensions = wcs_dict.pop("extensions") + + except KeyError: + pass + + else: + for k, v in extensions.items(): + ext.meta["other"][k] = v + + header.update(wcs_dict) + + # Use "in" here as the dict entry may be (value, comment) + if "APPROXIMATE" not in wcs_dict.get("FITS-WCS", ""): + wcs = None # There's no need to create a WCS extension + + hdul.append(new_imagehdu(ext.data, header, "SCI")) + + if ext.uncertainty is not None: + hdul.append(new_imagehdu(ext.uncertainty.array, header, "VAR")) + + if ext.mask is not None: + hdul.append(new_imagehdu(ext.mask, header, "DQ")) + + if isinstance(wcs, gWCS): + hdul.append(wcs_to_asdftablehdu(ext.wcs, extver=ver)) + + for name, other in ext.meta.get("other", {}).items(): + if isinstance(other, Table): + hdu = table_to_bintablehdu(other, extname=name) + + elif isinstance(other, np.ndarray): + hdu = new_imagehdu(other, header, name=name) + + elif isinstance(other, NDDataObject): + hdu = new_imagehdu(other.data, ext.meta["header"]) + + else: + raise ValueError( + "I don't know how to write back an object " + f"of type {type(other)}" + ) + + hdu.ver = ver + hdul.append(hdu) + + if ad._tables is not None: + for name, table in sorted(ad._tables.items()): + hdul.append(table_to_bintablehdu(table, extname=name)) + + # Additional FITS compatibility, add to PHU + # pylint: disable=no-member + hdul[0].header["NEXTEND"] = len(hdul) - 1 + + return hdul + + +def write_fits(ad, filename, overwrite=False): + """Writes the AstroData object to a FITS file.""" + hdul = ad_to_hdulist(ad) + hdul.writeto(filename, overwrite=overwrite) + + +@deprecated( + "Renamed to 'windowed_operation', this is just an alias for now, " + "and will be removed in a future version." +) +def windowedOp(*args, **kwargs): # pylint: disable=invalid-name + """Alias for windowed_operation.""" + return windowed_operation(*args, **kwargs) + + +# TODO: Need to refactor this function +def windowed_operation( + func, + sequence, + kernel, + shape=None, + dtype=None, + with_uncertainty=False, + with_mask=False, + **kwargs, +): + """Apply function on a NDData obbjects, splitting the data in chunks to + limit memory usage. + + Parameters + ---------- + func : callable + The function to apply. + + sequence : list of NDData + List of NDData objects. + + kernel : tuple of int + Shape of the blocks. + + shape : tuple of int + Shape of inputs. Defaults to ``sequence[0].shape``. + + dtype : str or dtype + Type of the output array. Defaults to ``sequence[0].dtype``. + + with_uncertainty : bool + Compute uncertainty? + + with_mask : bool + Compute mask? + + **kwargs + Additional args are passed to ``func``. + """ + + def generate_boxes(shape, kernel): + if len(shape) != len(kernel): + raise AssertionError( + f"Incompatible shape ({shape}) and kernel ({kernel})" + ) + + ticks = [ + [(x, x + step) for x in range(0, axis, step)] + for axis, step in zip(shape, kernel) + ] + + return list(cart_product(*ticks)) + + if shape is None: + if len({x.shape for x in sequence}) > 1: + raise ValueError( + "Can't calculate final shape: sequence elements " + "disagree on shape, and none was provided" + ) + + shape = sequence[0].shape + + if dtype is None: + dtype = sequence[0].window[:1, :1].data.dtype + + result = NDDataObject( + np.empty(shape, dtype=dtype), + variance=np.zeros(shape, dtype=dtype) if with_uncertainty else None, + mask=np.empty(shape, dtype=np.uint16) if with_mask else None, + meta=sequence[0].meta, + wcs=sequence[0].wcs, + ) + + # Delete other extensions because we don't know what to do with them + result.meta["other"] = OrderedDict() + + # The Astropy logger's "INFO" messages aren't warnings, so have to fudge + # pylint: disable=no-member + log_level = astropy.logger.conf.log_level + astropy.log.setLevel(astropy.logger.WARNING) + + boxes = generate_boxes(shape, kernel) + + try: + for coords in boxes: + section = tuple(slice(start, end) for (start, end) in coords) + out = func( + [element.window[section] for element in sequence], **kwargs + ) + result.set_section(section, out) + + # propagate additional attributes + if out.meta.get("other"): + for k, v in out.meta["other"].items(): + if len(boxes) > 1: + result.meta["other"][k, coords] = v + else: + result.meta["other"][k] = v + + gc.collect() + + finally: + astropy.log.setLevel(log_level) # and reset + + # Now if the input arrays where splitted in chunks, we need to gather + # the data arrays for the additional attributes. + other = result.meta["other"] + if other: + if len(boxes) > 1: + for (name, coords), obj in list(other.items()): + if not isinstance(obj, NDData): + raise ValueError("only NDData objects are handled here") + if name not in other: + other[name] = NDDataObject( + np.empty(shape, dtype=obj.data.dtype) + ) + section = tuple(slice(start, end) for (start, end) in coords) + other[name].set_section(section, obj) + del other[name, coords] + + for name in other: + # To set the name of our object we need to save it as an ndarray, + # otherwise for a NDData one AstroData would use the name of the + # AstroData object. + other[name] = other[name].data + + return result + + +# --------------------------------------------------------------------------- +# gWCS <-> FITS WCS helper functions go here +# --------------------------------------------------------------------------- +# Could parametrize some naming conventions in the following two functions if +# done elsewhere for hard-coded names like 'SCI' in future, but they only have +# to be self-consistent with one another anyway. + + +def wcs_to_asdftablehdu(wcs, extver=None): + """Serialize a gWCS object as a FITS TableHDU (ASCII) extension. + + The ASCII table is actually a mini ASDF file. The constituent AstroPy + models must have associated ASDF "tags" that specify how to serialize them. + + In the event that serialization as pure ASCII fails (this should not + happen), a binary table representation will be used as a fallback. + + Returns None (issuing a warning) if the WCS object cannot be serialized, + so the rest of the file can still be written. + + Parameters + ---------- + wcs : gWCS + The gWCS object to serialize. + + extver : int + The EXTVER to assign to the extension. + + Returns + ------- + hdu : TableHDU or BinTableHDU + The FITS table extension containing the serialized WCS object. + """ + # Create a small ASDF file in memory containing the WCS object + # representation because there's no public API for generating only the + # relevant YAML subsection and an ASDF file handles the "tags" properly. + try: + af = asdf.AsdfFile({"wcs": wcs}) + except jsonschema.exceptions.ValidationError as err: + # (The original traceback also gets printed here) + raise TypeError( + f"Cannot serialize model(s) for 'WCS' extension " f"{extver or ''}" + ) from err + + # ASDF can only dump YAML to a binary file object, so do that and read + # the contents back from it for storage in a FITS extension: + with BytesIO() as fd: + with af: + # Generate the YAML, dumping any binary arrays as text: + af.write_to(fd, all_array_storage="inline") + fd.seek(0) + wcsbuf = fd.read() + + # Convert the bytes to readable lines of text for storage (falling back to + # saving as binary in the unexpected event that this is not possible): + try: + wcsbuf = wcsbuf.decode("ascii").splitlines() + + except UnicodeDecodeError as err: + # This should not happen, but if the ASDF contains binary data in + # spite of the 'inline' option above, we have to dump the bytes to + # a non-human-readable binary table rather than an ASCII one: + LOGGER.warning( + "Could not convert WCS %s ASDF to ASCII; saving table " + "as binary (error was %s)", + extver or "", + err, + ) + + hduclass = BinTableHDU + fmt = "B" + wcsbuf = np.frombuffer(wcsbuf, dtype=np.uint8) + + else: + hduclass = TableHDU + fmt = f"A{max(len(line) for line in wcsbuf)}" + + # Construct the FITS table extension: + col = Column( + name="gWCS", format=fmt, array=wcsbuf, ascii=hduclass is TableHDU + ) + + return hduclass.from_columns([col], name="WCS", ver=extver) + + +def asdftablehdu_to_wcs(hdu): + """Recreate a gWCS object from its serialization in a FITS table extension. + + Returns None (issuing a warning) if the extension cannot be parsed, so + the rest of the file can still be read. + """ + ver = hdu.header.get("EXTVER", -1) + + if isinstance(hdu, (TableHDU, BinTableHDU)): + try: + colarr = hdu.data["gWCS"] + + except KeyError as err: + LOGGER.warning( + "Ignoring 'WCS' extension %s with no 'gWCS' table " + "column (error was %s)", + ver, + err, + ) + + return None + + # If this table column contains text strings as expected, join the rows + # as separate lines of a string buffer and encode the resulting YAML as + # bytes that ASDF can parse. If AstroData has produced another format, + # it will be a binary dump due to the unexpected presence of non-ASCII + # data, in which case we just extract unmodified bytes from the table. + if colarr.dtype.kind in ("U", "S"): + sep = os.linesep + # Just in case io.fits ever produces 'S' on Py 3 (not the default): + # join lines as str & avoid a TypeError with unicode linesep; could + # also use astype('U') but it assumes an encoding implicitly. + if colarr.dtype.kind == "S" and not isinstance(sep, bytes): + colarr = np.char.decode( + np.char.rstrip(colarr), encoding="ascii" + ) + wcsbuf = sep.join(colarr).encode("ascii") + else: + wcsbuf = colarr.tobytes() + + # Convert the stored text to a Bytes file object that ASDF can open: + with BytesIO(wcsbuf) as fd: + # Try to extract a 'wcs' entry from the YAML: + try: + af = asdf.open(fd) + + except IOError: + LOGGER.warning( + "Ignoring 'WCS' extension %s: failed to parse " + "ASDF.\nError was as follows:\n%s", + ver, + traceback.format_exc(), + ) + + return None + + with af: + try: + wcs = af.tree["wcs"] + + except KeyError as err: + LOGGER.warning( + "Ignoring 'WCS' extension %s: missing " + "'wcs' dict entry. Error was %s", + ver, + err, + ) + + return None + + else: + LOGGER.warning("Ignoring non-FITS-table 'WCS' extension %s", ver) + + return None + + return wcs +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/astrodata/nddata.html b/_modules/astrodata/nddata.html new file mode 100644 index 00000000..fa3f3f96 --- /dev/null +++ b/_modules/astrodata/nddata.html @@ -0,0 +1,789 @@ + + + + + + + astrodata.nddata — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for astrodata.nddata

+"""This module implements a derivative class based on NDData with some Mixins,
+implementing windowing and on-the-fly data scaling.
+"""
+
+
+import warnings
+from copy import deepcopy
+from functools import reduce
+
+import numpy as np
+
+from astropy.io.fits import ImageHDU
+from astropy.modeling import Model, models
+from astropy.nddata import (
+    NDArithmeticMixin,
+    NDData,
+    NDSlicingMixin,
+    VarianceUncertainty,
+)
+from gwcs.wcs import WCS as gWCS
+from .wcs import remove_axis_from_frame
+
+INTEGER_TYPES = (int, np.integer)
+
+__all__ = ["NDAstroData"]
+
+
+class ADVarianceUncertainty(VarianceUncertainty):
+    """Subclass VarianceUncertainty to check for negative values."""
+
+    @VarianceUncertainty.array.setter
+    def array(self, value):
+        if value is not None and np.any(value < 0):
+            warnings.warn(
+                "Negative variance values found. Setting to zero.",
+                RuntimeWarning,
+            )
+            value = np.where(value >= 0.0, value, 0.0)
+        VarianceUncertainty.array.fset(self, value)
+
+
+
+[docs] +class AstroDataMixin: + """A Mixin for ``NDData``-like classes (such as ``Spectrum1D``) to enable + them to behave similarly to ``AstroData`` objects. + + These behaviors are: + 1. ``mask`` attributes are combined with bitwise, not logical, or, + since the individual bits are important. + 2. The WCS must be a ``gwcs.WCS`` object and slicing results in + the model being modified. + 3. There is a settable ``variance`` attribute. + 4. Additional attributes such as OBJMASK can be extracted from + the .meta['other'] dict + """ + + def __getattr__(self, attribute): + """Allow access to attributes stored in self.meta['other'], as we do + with AstroData objects. + """ + if attribute.isupper(): + try: + return self.meta["other"][attribute] + except KeyError: + pass + raise AttributeError( + f"{self.__class__.__name__!r} object has no " + f"attribute {attribute!r}" + ) + + def _arithmetic( + self, + operation, + operand, + propagate_uncertainties=True, + handle_mask=np.bitwise_or, + handle_meta=None, + uncertainty_correlation=0, + compare_wcs="first_found", + **kwds, + ): + """Override the NDData method so that "bitwise_or" becomes the default + operation to combine masks, rather than "logical_or" + """ + return super()._arithmetic( + operation, + operand, + propagate_uncertainties=propagate_uncertainties, + handle_mask=handle_mask, + handle_meta=handle_meta, + uncertainty_correlation=uncertainty_correlation, + compare_wcs=compare_wcs, + **kwds, + ) + + def _slice_wcs(self, slices): + """The ``__call__()`` method of gWCS doesn't appear to conform to the + APE 14 interface for WCS implementations, and doesn't react to slicing + properly. We override NDSlicing's method to do what we want. + """ + if not isinstance(self.wcs, gWCS): + return self.wcs + + # Sanitize the slices, catching some errors early + if not isinstance(slices, (tuple, list)): + slices = (slices,) + slices = list(slices) + ndim = len(self.shape) + if len(slices) > ndim: + raise ValueError( + f"Too many dimensions specified in slice {slices}" + ) + + if Ellipsis in slices: + if slices.count(Ellipsis) > 1: + raise IndexError( + "Only one ellipsis can be specified in a slice" + ) + + ell_index = slices.index(Ellipsis) + 1 + slice_fill = [slice(None)] * (ndim - len(slices) + 1) + slices[ell_index:ell_index] = slice_fill + + slices.extend([slice(None)] * (ndim - len(slices))) + + mods = [] + mapped_axes = [] + for i, (slice_, length) in enumerate(zip(slices[::-1], self.shape)): + model = [] + if isinstance(slice_, slice): + if slice_.step and slice_.step > 1: + raise IndexError("Cannot slice with a step") + if slice_.start: + start = ( + length + slice_.start + if slice_.start < 1 + else slice_.start + ) + if start > 0: + model.append(models.Shift(start)) + mapped_axes.append(max(mapped_axes) + 1 if mapped_axes else 0) + elif isinstance(slice_, INTEGER_TYPES): + model.append(models.Const1D(slice_)) + mapped_axes.append(-1) + else: + raise IndexError("Slice not an integer or range") + if model: + mods.append(reduce(Model.__or__, model)) + else: + # If the previous model was an Identity, we can hang this + # one onto that without needing to append a new Identity + if i > 0 and isinstance(mods[-1], models.Identity): + mods[-1] = models.Identity(mods[-1].n_inputs + 1) + else: + mods.append(models.Identity(1)) + + slicing_model = reduce(Model.__and__, mods) + if mapped_axes != list(np.arange(ndim)): + slicing_model = ( + models.Mapping(tuple(max(ax, 0) for ax in mapped_axes)) + | slicing_model + ) + slicing_model.inverse = models.Mapping( + tuple(ax for ax in mapped_axes if ax != -1), n_inputs=ndim + ) + + if ( + isinstance(slicing_model, models.Identity) + and slicing_model.n_inputs == ndim + ): + return self.wcs # Unchanged! + new_wcs = deepcopy(self.wcs) + input_frame = new_wcs.input_frame + for axis, mapped_axis in reversed(list(enumerate(mapped_axes))): + if mapped_axis == -1: + input_frame = remove_axis_from_frame(input_frame, axis) + new_wcs.pipeline[0].frame = input_frame + new_wcs.insert_transform( + new_wcs.input_frame, slicing_model, after=True + ) + return new_wcs + + @property + def variance(self): + """A convenience property to access the contents of ``uncertainty``.""" + return getattr(self.uncertainty.array, "array", None) + + @variance.setter + def variance(self, value): + self.uncertainty = ( + ADVarianceUncertainty(value) if value is not None else None + ) + + @property + def wcs(self): + """The WCS of the data. This is a gWCS object, not a FITS WCS object. + + This is returning wcs from an inhertited class, see NDData.wcs for more + details. + """ + return super().wcs + + @wcs.setter + def wcs(self, value): + if value is not None and not isinstance(value, gWCS): + raise TypeError("wcs value must be None or a gWCS object") + self._wcs = value + + @property + def shape(self): + """The shape of the data.""" + return self._data.shape + + @property + def size(self): + """The size of the data.""" + return self._data.size
+ + + +class FakeArray: + """A class that pretends to be an array, but is actually a lazy-loaded""" + + def __init__(self, very_faked): + self.data = very_faked + self.shape = (100, 100) # Won't matter. This is just to fool NDData + self.dtype = np.float32 # Same here + + def __getitem__(self, index): + return None + + def __array__(self): + return self.data + + +class NDWindowing: + """A class to allow "windowed" access to some properties of an + ``NDAstroData`` instance. In particular, ``data``, ``uncertainty``, + ``variance``, and ``mask`` return clipped data. + """ + + def __init__(self, target): + self._target = target + + def __getitem__(self, window_slice): + return NDWindowingAstroData(self._target, window=window_slice) + + +class NDWindowingAstroData( + AstroDataMixin, NDArithmeticMixin, NDSlicingMixin, NDData +): + """Allows "windowed" access to some properties of an ``NDAstroData`` + instance. In particular, ``data``, ``uncertainty``, ``variance``, and + ``mask`` return clipped data. + """ + + # pylint: disable=super-init-not-called + def __init__(self, target, window): + self._target = target + self._window = window + + # TODO: __init__ exists in parent classes, but we don't call it. + # Is this a problem? + + def __getattr__(self, attribute): + """Allow access to attributes stored in self.meta['other'], as we do + with AstroData objects. + """ + if attribute.isupper(): + try: + return self._target._get_simple( + attribute, section=self._window + ) + except KeyError: + pass + raise AttributeError( + f"{self.__class__.__name__!r} object has no " + f"attribute {attribute!r}" + ) + + @property + def unit(self): + return self._target.unit + + @property + def wcs(self): + # TODO: Accessing protected member from _target + # pylint: disable=protected-access + return self._target._slice_wcs(self._window) + + @property + def data(self): + # TODO: Accessing protected member from _target + # pylint: disable=protected-access + return self._target._get_simple("_data", section=self._window) + + @property + def uncertainty(self): + # TODO: Accessing protected member from _target + # pylint: disable=protected-access + return self._target._get_uncertainty(section=self._window) + + @property + def variance(self): + if self.uncertainty is not None: + return self.uncertainty.array + + return None + + @property + def mask(self): + # TODO: Accessing protected member from _target + # pylint: disable=protected-access + return self._target._get_simple("_mask", section=self._window) + + +def is_lazy(item): + """Returns True if the item is a lazy-loaded object, False otherwise.""" + return isinstance(item, ImageHDU) or (hasattr(item, "lazy") and item.lazy) + + +
+[docs] +class NDAstroData(AstroDataMixin, NDArithmeticMixin, NDSlicingMixin, NDData): + """Implements ``NDData`` with all Mixins, plus some ``AstroData`` + specifics. + + This class implements an ``NDData``-like container that supports reading + and writing as implemented in the ``astropy.io.registry`` and also slicing + (indexing) and simple arithmetics (add, subtract, divide and multiply). + + A very important difference between ``NDAstroData`` and ``NDData`` is that + the former attempts to load all its data lazily. There are also some + important differences in the interface (eg. ``.data`` lets you reset its + contents after initialization). + + Documentation is provided where our class differs. + + See also + -------- + NDData + NDArithmeticMixin + NDSlicingMixin + + Examples + -------- + + The mixins allow operation that are not possible with ``NDData`` or + ``NDDataBase``, i.e. simple arithmetics:: + + >>> from astropy.nddata import StdDevUncertainty + >>> import numpy as np + >>> data = np.ones((3,3), dtype=float) + >>> ndd1 = NDAstroData(data, uncertainty=StdDevUncertainty(data)) + >>> ndd2 = NDAstroData(data, uncertainty=StdDevUncertainty(data)) + >>> ndd3 = ndd1.add(ndd2) + >>> ndd3.data + array([[2., 2., 2.], + [2., 2., 2.], + [2., 2., 2.]]) + >>> ndd3.uncertainty.array + array([[1.41421356, 1.41421356, 1.41421356], + [1.41421356, 1.41421356, 1.41421356], + [1.41421356, 1.41421356, 1.41421356]]) + + see ``NDArithmeticMixin`` for a complete list of all supported arithmetic + operations. + + But also slicing (indexing) is possible:: + + >>> ndd4 = ndd3[1,:] + >>> ndd4.data + array([2., 2., 2.]) + >>> ndd4.uncertainty.array + array([1.41421356, 1.41421356, 1.41421356]) + + See ``NDSlicingMixin`` for a description how slicing works (which + attributes) are sliced. + """ + + def __init__( + self, + data, + uncertainty=None, + mask=None, + wcs=None, + meta=None, + unit=None, + copy=False, + variance=None, + ): + """Initialize an ``NDAstroData`` instance. + + Parameters + ---------- + data : array-like + The actual data. This can be a numpy array, a memmap, or a + ``fits.ImageHDU`` object. + + uncertainty : ``NDUncertainty``-like object, optional + An object that represents the uncertainty of the data. If not + specified, the uncertainty will be set to None. + + mask : array-like, optional + An array that represents the mask of the data. If not specified, + the mask will be set to None. + + wcs : ``gwcs.WCS`` object, optional + The WCS of the data. If not specified, the WCS will be set to None. + + meta : dict-like, optional + A dictionary-like object that holds the meta data. If not + specified, the meta data will be set to None. + + unit : ``astropy.units.Unit`` object, optional + The unit of the data. If not specified, the unit will be set to + None. + + copy : bool, optional + If True, the data, uncertainty, mask, wcs, meta, and unit will be + copied. Otherwise, they will be referenced. Default is False. + + variance : array-like, optional + An array that represents the variance of the data. If not + specified, the variance will be set to None. + + Raises + ------ + ValueError + If ``uncertainty`` and ``variance`` are both specified. + + Notes + ----- + The ``uncertainty`` and ``variance`` parameters are mutually exclusive. + """ + if variance is not None: + if uncertainty is not None: + raise ValueError( + f"Cannot specify both uncertainty and variance" + f"({uncertainty = }, {variance = })." + ) + + uncertainty = ADVarianceUncertainty(variance) + + super().__init__( + FakeArray(data) if is_lazy(data) else data, + None if is_lazy(uncertainty) else uncertainty, + mask, + wcs, + meta, + unit, + copy, + ) + + if is_lazy(data): + self.data = data + if is_lazy(uncertainty): + self.uncertainty = uncertainty + + def __deepcopy__(self, memo): + new = self.__class__( + self._data if is_lazy(self._data) else deepcopy(self.data, memo), + self._uncertainty if is_lazy(self._uncertainty) else None, + self._mask if is_lazy(self._mask) else deepcopy(self.mask, memo), + deepcopy(self.wcs, memo), + None, + self.unit, + ) + new.meta = deepcopy(self.meta, memo) + # Needed to avoid recursion because of uncertainty's weakref to self + if not is_lazy(self._uncertainty): + new.variance = deepcopy(self.variance) + return new + + @property + def window(self): + """Interface to access a section of the data, using lazy access + whenever possible. + + Returns + -------- + An instance of ``NDWindowing``, which provides ``__getitem__``, + to allow the use of square brackets when specifying the window. + Ultimately, an ``NDWindowingAstrodata`` instance is returned. + + Examples + --------- + + >>> ad[0].nddata.window[100:200, 100:200] # doctest: +SKIP + <NDWindowingAstrodata .....> + """ + return NDWindowing(self) + + def _get_uncertainty(self, section=None): + """Return the ADVarianceUncertainty object, or a slice of it.""" + if self._uncertainty is not None: + if is_lazy(self._uncertainty): + if section is None: + self.uncertainty = ADVarianceUncertainty( + self._uncertainty.data + ) + return self.uncertainty + + return ADVarianceUncertainty(self._uncertainty[section]) + + if section is not None: + return self._uncertainty[section] + + return self._uncertainty + + return None + + def _get_simple(self, target, section=None): + """Only use 'section' for image-like objects that have the same shape + as the NDAstroData object; otherwise, return the whole object""" + source = getattr(self, target) + if source is not None: + if is_lazy(source): + if section is None: + ret = np.empty(source.shape, dtype=source.dtype) + ret[:] = source.data + setattr(self, target, ret) + + else: + ret = source[section] + + return ret + + if hasattr(source, "shape"): + if section is None or source.shape != self.shape: + return np.array(source, copy=False) + + return np.array(source, copy=False)[section] + + return source + + return None + + @property + def data(self): + """An array representing the raw data stored in this instance. It + implements a setter. + """ + return self._get_simple("_data") + + @data.setter + def data(self, value): + if value is None: + raise ValueError(f"Cannot set data to {value}.") + + if is_lazy(value): + self.meta["header"] = value.header + + self._data = value + + @property + def uncertainty(self): + return self._get_uncertainty() + + @uncertainty.setter + def uncertainty(self, value): + if value is not None and not is_lazy(value): + # TODO: Accessing protected member from value + # pylint: disable=protected-access + if value._parent_nddata is not None: + value = value.__class__(value, copy=False) + + value.parent_nddata = self + + self._uncertainty = value + + @property + def mask(self): + """Get or set the mask of the data.""" + return self._get_simple("_mask") + + @mask.setter + def mask(self, value): + self._mask = value + + @property + def variance(self): + """A convenience property to access the contents of ``uncertainty``, + squared (as the uncertainty data is stored as standard deviation). + """ + # TODO: Is this supposed to be squared? + arr = self._get_uncertainty() + + if arr is not None: + return arr.array + + return arr + + @variance.setter + def variance(self, value): + self.uncertainty = ( + ADVarianceUncertainty(value) if value is not None else None + ) + +
+[docs] + def set_section(self, section, input_data): + """Sets only a section of the data. This method is meant to prevent + fragmentation in the Python heap, by reusing the internal structures + instead of replacing them with new ones. + + Args + ----- + section : ``slice`` + The area that will be replaced + + input_data : ``NDData``-like instance + This object needs to implement at least ``data``, ``uncertainty``, + and ``mask``. Their entire contents will replace the data in the + area defined by ``section``. + + Examples + --------- + + >>> def setup(): + ... sec = NDData(np.zeros((100,100))) + ... ad[0].nddata.set_section( + ... (slice(None,100),slice(None,100)), + ... sec + ... ) + ... + >>> setup() # doctest: +SKIP + + """ + self.data[section] = input_data.data + + if self.uncertainty is not None: + self.uncertainty.array[section] = input_data.uncertainty.array + + if self.mask is not None: + self.mask[section] = input_data.mask
+ + + def __repr__(self): + if is_lazy(self._data): + return self.__class__.__name__ + "(Memmapped)" + + return super().__repr__() + + # This is a common idiom in numpy, so keep the name. + # pylint: disable=invalid-name + @property + def T(self): + """Transpose the data. This is not a copy of the data.""" + return self.transpose() + +
+[docs] + def transpose(self): + """Transpose the data. This is not a copy of the data.""" + unc = self.uncertainty + new_wcs = deepcopy(self.wcs) + inframe = new_wcs.input_frame + new_wcs.insert_transform( + inframe, + models.Mapping(tuple(reversed(range(inframe.naxes)))), + after=True, + ) + return self.__class__( + self.data.T, + uncertainty=None if unc is None else unc.__class__(unc.array.T), + mask=None if self.mask is None else self.mask.T, + wcs=new_wcs, + copy=False, + )
+
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/astrodata/utils.html b/_modules/astrodata/utils.html new file mode 100644 index 00000000..083b1943 --- /dev/null +++ b/_modules/astrodata/utils.html @@ -0,0 +1,621 @@ + + + + + + + astrodata.utils — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for astrodata.utils

+"""Utility functions and classes for AstroData objects."""
+import inspect
+import logging
+import warnings
+from collections import namedtuple
+from functools import wraps
+from traceback import format_stack
+
+import numpy as np
+
+INTEGER_TYPES = (int, np.integer)
+
+__all__ = (
+    "assign_only_single_slice",
+    "astro_data_descriptor",
+    "AstroDataDeprecationWarning",
+    "astro_data_tag",
+    "deprecated",
+    "normalize_indices",
+    "returns_list",
+    "TagSet",
+    "Section",
+)
+
+
+class AstroDataDeprecationWarning(DeprecationWarning):
+    """Warning class for deprecated AstroData methods."""
+
+
+warnings.simplefilter("always", AstroDataDeprecationWarning)
+
+
+def deprecated(reason):
+    """Marks a function as deprecated.
+
+    Parameters
+    ----------
+    reason : str
+        The reason why the function is deprecated
+
+    Returns
+    -------
+    function
+        The decorated function
+
+    Usage
+    -----
+
+    >>> @deprecated("Use another function instead")
+    ... def my_function():
+    ...     pass
+    """
+
+    def decorator_wrapper(fn):
+        @wraps(fn)
+        def wrapper(*args, **kw):
+            current_source = "|".join(format_stack(inspect.currentframe()))
+            if current_source not in wrapper.seen:
+                wrapper.seen.add(current_source)
+                warnings.warn(reason, AstroDataDeprecationWarning)
+            return fn(*args, **kw)
+
+        wrapper.seen = set()
+        return wrapper
+
+    return decorator_wrapper
+
+
+def normalize_indices(slc, nitems):
+    """Normalize a slice or index to a list of indices."""
+    multiple = True
+    if isinstance(slc, slice):
+        start, stop, step = slc.indices(nitems)
+        indices = list(range(start, stop, step))
+    elif isinstance(slc, INTEGER_TYPES) or (
+        isinstance(slc, tuple)
+        and all(isinstance(i, INTEGER_TYPES) for i in slc)
+    ):
+        if isinstance(slc, INTEGER_TYPES):
+            slc = (int(slc),)  # slc's type m
+            multiple = False
+
+        else:
+            multiple = True
+
+        # Normalize negative indices...
+        indices = [(x if x >= 0 else nitems + x) for x in slc]
+
+    else:
+        raise ValueError(f"Invalid index: {slc}")
+
+    if any(i >= nitems for i in indices):
+        raise IndexError("Index out of range")
+
+    return indices, multiple
+
+
+
+[docs] +class TagSet(namedtuple("TagSet", "add remove blocked_by blocks if_present")): + """Named tuple that is used by tag methods to return which actions should + be performed on a tag set. + + All the attributes are optional, and any combination of them can be used, + allowing to create complex tag structures. Read the documentation on the + tag-generating algorithm if you want to better understand the interactions. + + The simplest TagSet, though, tends to just add tags to the global set. + + It can be initialized by position, like any other tuple (the order of the + arguments is the one in which the attributes are listed below). It can + also be initialized by name. + + Attributes + ---------- + add : set of str, optional + Tags to be added to the global set + + remove : set of str, optional + Tags to be removed from the global set + + blocked_by : set of str, optional + Tags that will prevent this TagSet from being applied + + blocks : set of str, optional + Other TagSets containing these won't be applied + + if_present : set of str, optional + This TagSet will be applied only *all* of these tags are present + + Examples + --------- + >>> TagSet() # doctest: +SKIP + TagSet( + add=set(), + remove=set(), + blocked_by=set(), + blocks=set(), + if_present=set() + ) + >>> TagSet({'BIAS', 'CAL'}) # doctest: +SKIP + TagSet( + add={'BIAS', 'CAL'}, + remove=set(), + blocked_by=set(), + blocks=set(), + if_present=set() + ) + >>> TagSet(remove={'BIAS', 'CAL'}) # doctest: +SKIP + TagSet( + add=set(), + remove={'BIAS', 'CAL'}, + blocked_by=set(), + blocks=set(), + if_present=set() + ) + """ + + def __new__( + cls, + add=None, + remove=None, + blocked_by=None, + blocks=None, + if_present=None, + ): + return super().__new__( + cls, + add or set(), + remove or set(), + blocked_by or set(), + blocks or set(), + if_present or set(), + )
+ + + +
+[docs] +def astro_data_descriptor(fn): + """Decorator that will mark a class method as an AstroData descriptor. + Useful to produce list of descriptors, for example. + + If used in combination with other decorators, this one *must* be the + one on the top (ie. the last one applying). It doesn't modify the + method in any other way. + + Args + ----- + fn : method + The method to be decorated + + Returns + -------- + The tagged method (not a wrapper) + """ + fn.descriptor_method = True + return fn
+ + + +
+[docs] +def returns_list(fn): + """Decorator to ensure that descriptors that should return a list (of one + value per extension) only returns single values when operating on single + slices; and vice versa. + + This is a common case, and you can use the decorator to simplify the + logic of your descriptors. + + Args + ----- + fn : method + The method to be decorated + + Returns + -------- + A function + """ + + @wraps(fn) + def wrapper(self, *args, **kwargs): + ret = fn(self, *args, **kwargs) + if self.is_single: + if isinstance(ret, list): + if len(ret) > 1: + logging.warning( + "Descriptor %s returned a list " + "of %s elements when operating on " + "a single slice", + fn.__name__, + len(ret), + ) + + return ret[0] + + return ret + + if isinstance(ret, list): + if len(ret) == len(self): + return ret + + raise IndexError( + f"Incompatible numbers of extensions and " + f"elements in {fn.__name__}" + ) + + return [ret] * len(self) + + return wrapper
+ + + +def assign_only_single_slice(fn): + """Raise `ValueError` if assigning to a non-single slice.""" + + @wraps(fn) + def wrapper(self, *args, **kwargs): + if not self.is_single: + raise ValueError( + "Trying to assign to an AstroData object that " + "is not a single slice" + ) + return fn(self, *args, **kwargs) + + return wrapper + + +
+[docs] +def astro_data_tag(fn): + """Decorator that marks methods of an `AstroData` derived class as part of + the tag-producing system. + + It wraps the method around a function that will ensure a consistent return + value: the wrapped method can return any sequence of sequences of strings, + and they will be converted to a TagSet. If the wrapped method + returns None, it will be turned into an empty TagSet. + + Args + ----- + fn : method + The method to be decorated + + Returns + -------- + A wrapper function + """ + + @wraps(fn) + def wrapper(self): + try: + ret = fn(self) + if ret is not None: + if not isinstance(ret, TagSet): + raise TypeError( + f"Tag function {fn.__name__} didn't return a TagSet" + ) + + return TagSet(*tuple(set(s) for s in ret)) + + except KeyError: + pass + + # Return empty TagSet for the "doesn't apply" case + return TagSet() + + wrapper.tag_method = True + return wrapper
+ + + +
+[docs] +class Section(tuple): + """A class to handle n-dimensional sections""" + + def __new__(cls, *args, **kwargs): + # Ensure that the order of keys is what we want + axis_names = [x for axis in "xyzuvw" for x in (f"{axis}1", f"{axis}2")] + + _dict = dict(zip(axis_names, args + ("",) * len(kwargs))) + + _dict.update(kwargs) + + if list(_dict.values()).count("") or (len(_dict) % 2): + raise ValueError("Cannot initialize 'Section' object") + + instance = tuple.__new__(cls, tuple(_dict.values())) + instance._axis_names = tuple(_dict.keys()) + + if not all(np.diff(instance)[::2] > 0): + raise ValueError( + "Not all 'Section' end coordinates exceed the " + "start coordinates" + ) + + return instance + + @property + def axis_dict(self): + return dict(zip(self._axis_names, self)) + + def __getnewargs__(self): + return tuple(self) + + def __getattr__(self, attr): + if attr in self._axis_names: + return self.axis_dict[attr] + + raise AttributeError(f"No such attribute '{attr}'") + + def __repr__(self): + return ( + "Section(" + + ", ".join([f"{k}={self.axis_dict[k]}" for k in self._axis_names]) + + ")" + ) + + @property + def ndim(self): + """The number of dimensions in the section.""" + return len(self) // 2 + +
+[docs] + @staticmethod + def from_shape(value): + """Produce a Section object defining a given shape.""" + return Section(*[y for x in reversed(value) for y in (0, x)])
+ + +
+[docs] + @staticmethod + def from_string(value): + """The inverse of __str__, produce a Section object from a string.""" + return Section( + *[ + y + for x in value.strip("[]").split(",") + for start, end in [x.split(":")] + for y in ( + None if start == "" else int(start) - 1, + None if end == "" else int(end), + ) + ] + )
+ + +
+[docs] + @deprecated( + "Renamed to 'as_iraf_section', this is just an alias for now " + "and will be removed in a future version." + ) + def asIRAFsection(self): # pylint: disable=invalid-name + """Deprecated, see as_iraf_section""" + return self.as_iraf_section()
+ + +
+[docs] + def as_iraf_section(self): + """Produce string of style '[x1:x2,y1:y2]' that is 1-indexed + and end-inclusive + """ + return ( + "[" + + ",".join( + [ + ":".join( + [ + str(self.axis_dict[axis] + 1), + str(self.axis_dict[axis.replace("1", "2")]), + ] + ) + for axis in self._axis_names[::2] + ] + ) + + "]" + )
+ + +
+[docs] + def asslice(self, add_dims=0): + """Return the Section object as a slice/list of slices. Higher + dimensionality can be achieved with the add_dims parameter. + """ + return (slice(None),) * add_dims + tuple( + slice(self.axis_dict[axis], self.axis_dict[axis.replace("1", "2")]) + for axis in reversed(self._axis_names[::2]) + )
+ + +
+[docs] + def contains(self, section): + """Return True if the supplied section is entirely within self""" + if self.ndim != section.ndim: + raise ValueError("Sections have different dimensionality") + + con1 = all(s2 >= s1 for s1, s2 in zip(self[::2], section[::2])) + + if not con1: + return False + + con2 = all(s2 <= s1 for s1, s2 in zip(self[1::2], section[1::2])) + + return con1 and con2
+ + +
+[docs] + def is_same_size(self, section): + """Return True if the Sections are the same size""" + return np.array_equal(np.diff(self)[::2], np.diff(section)[::2])
+ + +
+[docs] + def overlap(self, section): + """Determine whether the two sections overlap. If so, the Section + common to both is returned, otherwise None + """ + if self.ndim != section.ndim: + raise ValueError("Sections have different dimensionality") + + mins = [max(s1, s2) for s1, s2 in zip(self[::2], section[::2])] + maxs = [min(s1, s2) for s1, s2 in zip(self[1::2], section[1::2])] + + try: + return self.__class__( + *[v for pair in zip(mins, maxs) for v in pair] + ) + + except ValueError as err: + logging.warning( + "Sections do not overlap, recieved %s: %s", + err.__class__.__name__, + err, + ) + + return None
+ + +
+[docs] + def shift(self, *shifts): + """Shift a section in each direction by the specified amount""" + if len(shifts) != self.ndim: + raise ValueError( + f"Number of shifts {len(shifts)} incompatible " + f"with dimensionality {self.ndim}" + ) + return self.__class__( + *[ + x + s + for x, s in zip(self, [ss for s in shifts for ss in [s] * 2]) + ] + )
+
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/index.html b/_modules/index.html new file mode 100644 index 00000000..c30a83e9 --- /dev/null +++ b/_modules/index.html @@ -0,0 +1,119 @@ + + + + + + + Overview: module code — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + +
+
+
+ + + + +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_sources/api/astrodata.AstroData.rst.txt b/_sources/api/astrodata.AstroData.rst.txt new file mode 100644 index 00000000..24707c0c --- /dev/null +++ b/_sources/api/astrodata.AstroData.rst.txt @@ -0,0 +1,101 @@ +AstroData +========= + +.. currentmodule:: astrodata + +.. autoclass:: AstroData + :show-inheritance: + + .. rubric:: Attributes Summary + + .. autosummary:: + + ~AstroData.data + ~AstroData.descriptors + ~AstroData.exposed + ~AstroData.ext_tables + ~AstroData.filename + ~AstroData.hdr + ~AstroData.header + ~AstroData.id + ~AstroData.indices + ~AstroData.is_sliced + ~AstroData.mask + ~AstroData.nddata + ~AstroData.orig_filename + ~AstroData.path + ~AstroData.phu + ~AstroData.shape + ~AstroData.tables + ~AstroData.tags + ~AstroData.uncertainty + ~AstroData.variance + ~AstroData.wcs + + .. rubric:: Methods Summary + + .. autosummary:: + + ~AstroData.add + ~AstroData.append + ~AstroData.crop + ~AstroData.divide + ~AstroData.info + ~AstroData.instrument + ~AstroData.is_settable + ~AstroData.load + ~AstroData.multiply + ~AstroData.object + ~AstroData.operate + ~AstroData.read + ~AstroData.reset + ~AstroData.subtract + ~AstroData.table + ~AstroData.telescope + ~AstroData.update_filename + ~AstroData.write + + .. rubric:: Attributes Documentation + + .. autoattribute:: data + .. autoattribute:: descriptors + .. autoattribute:: exposed + .. autoattribute:: ext_tables + .. autoattribute:: filename + .. autoattribute:: hdr + .. autoattribute:: header + .. autoattribute:: id + .. autoattribute:: indices + .. autoattribute:: is_sliced + .. autoattribute:: mask + .. autoattribute:: nddata + .. autoattribute:: orig_filename + .. autoattribute:: path + .. autoattribute:: phu + .. autoattribute:: shape + .. autoattribute:: tables + .. autoattribute:: tags + .. autoattribute:: uncertainty + .. autoattribute:: variance + .. autoattribute:: wcs + + .. rubric:: Methods Documentation + + .. automethod:: add + .. automethod:: append + .. automethod:: crop + .. automethod:: divide + .. automethod:: info + .. automethod:: instrument + .. automethod:: is_settable + .. automethod:: load + .. automethod:: multiply + .. automethod:: object + .. automethod:: operate + .. automethod:: read + .. automethod:: reset + .. automethod:: subtract + .. automethod:: table + .. automethod:: telescope + .. automethod:: update_filename + .. automethod:: write diff --git a/_sources/api/astrodata.AstroDataError.rst.txt b/_sources/api/astrodata.AstroDataError.rst.txt new file mode 100644 index 00000000..b93c97b1 --- /dev/null +++ b/_sources/api/astrodata.AstroDataError.rst.txt @@ -0,0 +1,6 @@ +AstroDataError +============== + +.. currentmodule:: astrodata + +.. autoexception:: AstroDataError diff --git a/_sources/api/astrodata.AstroDataMixin.rst.txt b/_sources/api/astrodata.AstroDataMixin.rst.txt new file mode 100644 index 00000000..a9892094 --- /dev/null +++ b/_sources/api/astrodata.AstroDataMixin.rst.txt @@ -0,0 +1,23 @@ +AstroDataMixin +============== + +.. currentmodule:: astrodata + +.. autoclass:: AstroDataMixin + :show-inheritance: + + .. rubric:: Attributes Summary + + .. autosummary:: + + ~AstroDataMixin.shape + ~AstroDataMixin.size + ~AstroDataMixin.variance + ~AstroDataMixin.wcs + + .. rubric:: Attributes Documentation + + .. autoattribute:: shape + .. autoattribute:: size + .. autoattribute:: variance + .. autoattribute:: wcs diff --git a/_sources/api/astrodata.NDAstroData.rst.txt b/_sources/api/astrodata.NDAstroData.rst.txt new file mode 100644 index 00000000..65784550 --- /dev/null +++ b/_sources/api/astrodata.NDAstroData.rst.txt @@ -0,0 +1,39 @@ +NDAstroData +=========== + +.. currentmodule:: astrodata + +.. autoclass:: NDAstroData + :show-inheritance: + + .. rubric:: Attributes Summary + + .. autosummary:: + + ~NDAstroData.T + ~NDAstroData.data + ~NDAstroData.mask + ~NDAstroData.uncertainty + ~NDAstroData.variance + ~NDAstroData.window + + .. rubric:: Methods Summary + + .. autosummary:: + + ~NDAstroData.set_section + ~NDAstroData.transpose + + .. rubric:: Attributes Documentation + + .. autoattribute:: T + .. autoattribute:: data + .. autoattribute:: mask + .. autoattribute:: uncertainty + .. autoattribute:: variance + .. autoattribute:: window + + .. rubric:: Methods Documentation + + .. automethod:: set_section + .. automethod:: transpose diff --git a/_sources/api/astrodata.Section.rst.txt b/_sources/api/astrodata.Section.rst.txt new file mode 100644 index 00000000..1fa174a9 --- /dev/null +++ b/_sources/api/astrodata.Section.rst.txt @@ -0,0 +1,45 @@ +Section +======= + +.. currentmodule:: astrodata + +.. autoclass:: Section + :show-inheritance: + + .. rubric:: Attributes Summary + + .. autosummary:: + + ~Section.axis_dict + ~Section.ndim + + .. rubric:: Methods Summary + + .. autosummary:: + + ~Section.asIRAFsection + ~Section.as_iraf_section + ~Section.asslice + ~Section.contains + ~Section.from_shape + ~Section.from_string + ~Section.is_same_size + ~Section.overlap + ~Section.shift + + .. rubric:: Attributes Documentation + + .. autoattribute:: axis_dict + .. autoattribute:: ndim + + .. rubric:: Methods Documentation + + .. automethod:: asIRAFsection + .. automethod:: as_iraf_section + .. automethod:: asslice + .. automethod:: contains + .. automethod:: from_shape + .. automethod:: from_string + .. automethod:: is_same_size + .. automethod:: overlap + .. automethod:: shift diff --git a/_sources/api/astrodata.TagSet.rst.txt b/_sources/api/astrodata.TagSet.rst.txt new file mode 100644 index 00000000..2d6191cd --- /dev/null +++ b/_sources/api/astrodata.TagSet.rst.txt @@ -0,0 +1,7 @@ +TagSet +====== + +.. currentmodule:: astrodata + +.. autoclass:: TagSet + :show-inheritance: diff --git a/_sources/api/astrodata.add_header_to_table.rst.txt b/_sources/api/astrodata.add_header_to_table.rst.txt new file mode 100644 index 00000000..920b0cae --- /dev/null +++ b/_sources/api/astrodata.add_header_to_table.rst.txt @@ -0,0 +1,6 @@ +add_header_to_table +=================== + +.. currentmodule:: astrodata + +.. autofunction:: add_header_to_table diff --git a/_sources/api/astrodata.astro_data_descriptor.rst.txt b/_sources/api/astrodata.astro_data_descriptor.rst.txt new file mode 100644 index 00000000..80688c15 --- /dev/null +++ b/_sources/api/astrodata.astro_data_descriptor.rst.txt @@ -0,0 +1,6 @@ +astro_data_descriptor +===================== + +.. currentmodule:: astrodata + +.. autofunction:: astro_data_descriptor diff --git a/_sources/api/astrodata.astro_data_tag.rst.txt b/_sources/api/astrodata.astro_data_tag.rst.txt new file mode 100644 index 00000000..1f224c4d --- /dev/null +++ b/_sources/api/astrodata.astro_data_tag.rst.txt @@ -0,0 +1,6 @@ +astro_data_tag +============== + +.. currentmodule:: astrodata + +.. autofunction:: astro_data_tag diff --git a/_sources/api/astrodata.create.rst.txt b/_sources/api/astrodata.create.rst.txt new file mode 100644 index 00000000..ae5daa40 --- /dev/null +++ b/_sources/api/astrodata.create.rst.txt @@ -0,0 +1,6 @@ +create +====== + +.. currentmodule:: astrodata + +.. autofunction:: create diff --git a/_sources/api/astrodata.from_file.rst.txt b/_sources/api/astrodata.from_file.rst.txt new file mode 100644 index 00000000..fb9081fa --- /dev/null +++ b/_sources/api/astrodata.from_file.rst.txt @@ -0,0 +1,6 @@ +from_file +========= + +.. currentmodule:: astrodata + +.. autofunction:: from_file diff --git a/_sources/api/astrodata.open.rst.txt b/_sources/api/astrodata.open.rst.txt new file mode 100644 index 00000000..37110c63 --- /dev/null +++ b/_sources/api/astrodata.open.rst.txt @@ -0,0 +1,6 @@ +open +==== + +.. currentmodule:: astrodata + +.. autofunction:: open diff --git a/_sources/api/astrodata.returns_list.rst.txt b/_sources/api/astrodata.returns_list.rst.txt new file mode 100644 index 00000000..60a14c66 --- /dev/null +++ b/_sources/api/astrodata.returns_list.rst.txt @@ -0,0 +1,6 @@ +returns_list +============ + +.. currentmodule:: astrodata + +.. autofunction:: returns_list diff --git a/_sources/api/astrodata.version.rst.txt b/_sources/api/astrodata.version.rst.txt new file mode 100644 index 00000000..553303d6 --- /dev/null +++ b/_sources/api/astrodata.version.rst.txt @@ -0,0 +1,6 @@ +version +======= + +.. currentmodule:: astrodata + +.. autofunction:: version diff --git a/_sources/index.rst.txt b/_sources/index.rst.txt new file mode 100644 index 00000000..d4c9fd2a --- /dev/null +++ b/_sources/index.rst.txt @@ -0,0 +1,18 @@ +astrodata Documentation +----------------------- + +This is the documentation for astrodata. + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + +.. automodapi:: astrodata + :no-inheritance-diagram: + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/_static/alabaster.css b/_static/alabaster.css new file mode 100644 index 00000000..517d0b29 --- /dev/null +++ b/_static/alabaster.css @@ -0,0 +1,703 @@ +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: Georgia, serif; + font-size: 17px; + background-color: #fff; + color: #000; + margin: 0; + padding: 0; +} + + +div.document { + width: 940px; + margin: 30px auto 0 auto; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 220px; +} + +div.sphinxsidebar { + width: 220px; + font-size: 14px; + line-height: 1.5; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.body { + background-color: #fff; + color: #3E4349; + padding: 0 30px 0 30px; +} + +div.body > .section { + text-align: left; +} + +div.footer { + width: 940px; + margin: 20px auto 30px auto; + font-size: 14px; + color: #888; + text-align: right; +} + +div.footer a { + color: #888; +} + +p.caption { + font-family: inherit; + font-size: inherit; +} + + +div.relations { + display: none; +} + + +div.sphinxsidebar a { + color: #444; + text-decoration: none; + border-bottom: 1px dotted #999; +} + +div.sphinxsidebar a:hover { + border-bottom: 1px solid #999; +} + +div.sphinxsidebarwrapper { + padding: 18px 10px; +} + +div.sphinxsidebarwrapper p.logo { + padding: 0; + margin: -10px 0 0 0px; + text-align: center; +} + +div.sphinxsidebarwrapper h1.logo { + margin-top: -10px; + text-align: center; + margin-bottom: 5px; + text-align: left; +} + +div.sphinxsidebarwrapper h1.logo-name { + margin-top: 0px; +} + +div.sphinxsidebarwrapper p.blurb { + margin-top: 0; + font-style: normal; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: Georgia, serif; + color: #444; + font-size: 24px; + font-weight: normal; + margin: 0 0 5px 0; + padding: 0; +} + +div.sphinxsidebar h4 { + font-size: 20px; +} + +div.sphinxsidebar h3 a { + color: #444; +} + +div.sphinxsidebar p.logo a, +div.sphinxsidebar h3 a, +div.sphinxsidebar p.logo a:hover, +div.sphinxsidebar h3 a:hover { + border: none; +} + +div.sphinxsidebar p { + color: #555; + margin: 10px 0; +} + +div.sphinxsidebar ul { + margin: 10px 0; + padding: 0; + color: #000; +} + +div.sphinxsidebar ul li.toctree-l1 > a { + font-size: 120%; +} + +div.sphinxsidebar ul li.toctree-l2 > a { + font-size: 110%; +} + +div.sphinxsidebar input { + border: 1px solid #CCC; + font-family: Georgia, serif; + font-size: 1em; +} + +div.sphinxsidebar hr { + border: none; + height: 1px; + color: #AAA; + background: #AAA; + + text-align: left; + margin-left: 0; + width: 50%; +} + +div.sphinxsidebar .badge { + border-bottom: none; +} + +div.sphinxsidebar .badge:hover { + border-bottom: none; +} + +/* To address an issue with donation coming after search */ +div.sphinxsidebar h3.donation { + margin-top: 10px; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #004B6B; + text-decoration: underline; +} + +a:hover { + color: #6D4100; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: Georgia, serif; + font-weight: normal; + margin: 30px 0px 10px 0px; + padding: 0; +} + +div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } +div.body h2 { font-size: 180%; } +div.body h3 { font-size: 150%; } +div.body h4 { font-size: 130%; } +div.body h5 { font-size: 100%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #DDD; + padding: 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + color: #444; + background: #EAEAEA; +} + +div.body p, div.body dd, div.body li { + line-height: 1.4em; +} + +div.admonition { + margin: 20px 0px; + padding: 10px 30px; + background-color: #EEE; + border: 1px solid #CCC; +} + +div.admonition tt.xref, div.admonition code.xref, div.admonition a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fafafa; +} + +div.admonition p.admonition-title { + font-family: Georgia, serif; + font-weight: normal; + font-size: 24px; + margin: 0 0 10px 0; + padding: 0; + line-height: 1; +} + +div.admonition p.last { + margin-bottom: 0; +} + +div.highlight { + background-color: #fff; +} + +dt:target, .highlight { + background: #FAF3E8; +} + +div.warning { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.danger { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.error { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.caution { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.attention { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.important { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.note { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.tip { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.hint { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.seealso { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.topic { + background-color: #EEE; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre, tt, code { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; + font-size: 0.9em; +} + +.hll { + background-color: #FFC; + margin: 0 -12px; + padding: 0 12px; + display: block; +} + +img.screenshot { +} + +tt.descname, tt.descclassname, code.descname, code.descclassname { + font-size: 0.95em; +} + +tt.descname, code.descname { + padding-right: 0.08em; +} + +img.screenshot { + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils { + border: 1px solid #888; + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils td, table.docutils th { + border: 1px solid #888; + padding: 0.25em 0.7em; +} + +table.field-list, table.footnote { + border: none; + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + +table.footnote { + margin: 15px 0; + width: 100%; + border: 1px solid #EEE; + background: #FDFDFD; + font-size: 0.9em; +} + +table.footnote + table.footnote { + margin-top: -15px; + border-top: none; +} + +table.field-list th { + padding: 0 0.8em 0 0; +} + +table.field-list td { + padding: 0; +} + +table.field-list p { + margin-bottom: 0.8em; +} + +/* Cloned from + * https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68 + */ +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +table.footnote td.label { + width: .1px; + padding: 0.3em 0 0.3em 0.5em; +} + +table.footnote td { + padding: 0.3em 0.5em; +} + +dl { + margin-left: 0; + margin-right: 0; + margin-top: 0; + padding: 0; +} + +dl dd { + margin-left: 30px; +} + +blockquote { + margin: 0 0 0 30px; + padding: 0; +} + +ul, ol { + /* Matches the 30px from the narrow-screen "li > ul" selector below */ + margin: 10px 0 10px 30px; + padding: 0; +} + +pre { + background: #EEE; + padding: 7px 30px; + margin: 15px 0px; + line-height: 1.3em; +} + +div.viewcode-block:target { + background: #ffd; +} + +dl pre, blockquote pre, li pre { + margin-left: 0; + padding-left: 30px; +} + +tt, code { + background-color: #ecf0f3; + color: #222; + /* padding: 1px 2px; */ +} + +tt.xref, code.xref, a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fff; +} + +a.reference { + text-decoration: none; + border-bottom: 1px dotted #004B6B; +} + +/* Don't put an underline on images */ +a.image-reference, a.image-reference:hover { + border-bottom: none; +} + +a.reference:hover { + border-bottom: 1px solid #6D4100; +} + +a.footnote-reference { + text-decoration: none; + font-size: 0.7em; + vertical-align: top; + border-bottom: 1px dotted #004B6B; +} + +a.footnote-reference:hover { + border-bottom: 1px solid #6D4100; +} + +a:hover tt, a:hover code { + background: #EEE; +} + + +@media screen and (max-width: 870px) { + + div.sphinxsidebar { + display: none; + } + + div.document { + width: 100%; + + } + + div.documentwrapper { + margin-left: 0; + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + } + + div.bodywrapper { + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + margin-left: 0; + } + + ul { + margin-left: 0; + } + + li > ul { + /* Matches the 30px from the "ul, ol" selector above */ + margin-left: 30px; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .bodywrapper { + margin: 0; + } + + .footer { + width: auto; + } + + .github { + display: none; + } + + + +} + + + +@media screen and (max-width: 875px) { + + body { + margin: 0; + padding: 20px 30px; + } + + div.documentwrapper { + float: none; + background: #fff; + } + + div.sphinxsidebar { + display: block; + float: none; + width: 102.5%; + margin: 50px -30px -20px -30px; + padding: 10px 20px; + background: #333; + color: #FFF; + } + + div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, + div.sphinxsidebar h3 a { + color: #fff; + } + + div.sphinxsidebar a { + color: #AAA; + } + + div.sphinxsidebar p.logo { + display: none; + } + + div.document { + width: 100%; + margin: 0; + } + + div.footer { + display: none; + } + + div.bodywrapper { + margin: 0; + } + + div.body { + min-height: 0; + padding: 0; + } + + .rtd_doc_footer { + display: none; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .footer { + width: auto; + } + + .github { + display: none; + } +} + + +/* misc. */ + +.revsys-inline { + display: none!important; +} + +/* Make nested-list/multi-paragraph items look better in Releases changelog + * pages. Without this, docutils' magical list fuckery causes inconsistent + * formatting between different release sub-lists. + */ +div#changelog > div.section > ul > li > p:only-child { + margin-bottom: 0; +} + +/* Hide fugly table cell borders in ..bibliography:: directive output */ +table.docutils.citation, table.docutils.citation td, table.docutils.citation th { + border: none; + /* Below needed in some edge cases; if not applied, bottom shadows appear */ + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + + +/* relbar */ + +.related { + line-height: 30px; + width: 100%; + font-size: 0.9rem; +} + +.related.top { + border-bottom: 1px solid #EEE; + margin-bottom: 20px; +} + +.related.bottom { + border-top: 1px solid #EEE; +} + +.related ul { + padding: 0; + margin: 0; + list-style: none; +} + +.related li { + display: inline; +} + +nav#rellinks { + float: right; +} + +nav#rellinks li+li:before { + content: "|"; +} + +nav#breadcrumbs li+li:before { + content: "\00BB"; +} + +/* Hide certain items when printing */ +@media print { + div.related { + display: none; + } +} \ No newline at end of file diff --git a/_static/basic.css b/_static/basic.css new file mode 100644 index 00000000..30fee9d0 --- /dev/null +++ b/_static/basic.css @@ -0,0 +1,925 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a:visited { + color: #551A8B; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/_static/custom.css b/_static/custom.css new file mode 100644 index 00000000..2a924f1d --- /dev/null +++ b/_static/custom.css @@ -0,0 +1 @@ +/* This file intentionally left blank. */ diff --git a/_static/doctools.js b/_static/doctools.js new file mode 100644 index 00000000..d06a71d7 --- /dev/null +++ b/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/_static/documentation_options.js b/_static/documentation_options.js new file mode 100644 index 00000000..b1611d5c --- /dev/null +++ b/_static/documentation_options.js @@ -0,0 +1,13 @@ +const DOCUMENTATION_OPTIONS = { + VERSION: '3.2.0', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/_static/file.png b/_static/file.png new file mode 100644 index 00000000..a858a410 Binary files /dev/null and b/_static/file.png differ diff --git a/_static/graphviz.css b/_static/graphviz.css new file mode 100644 index 00000000..8d81c02e --- /dev/null +++ b/_static/graphviz.css @@ -0,0 +1,19 @@ +/* + * graphviz.css + * ~~~~~~~~~~~~ + * + * Sphinx stylesheet -- graphviz extension. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +img.graphviz { + border: 0; + max-width: 100%; +} + +object.graphviz { + max-width: 100%; +} diff --git a/_static/language_data.js b/_static/language_data.js new file mode 100644 index 00000000..250f5665 --- /dev/null +++ b/_static/language_data.js @@ -0,0 +1,199 @@ +/* + * language_data.js + * ~~~~~~~~~~~~~~~~ + * + * This script contains the language-specific data used by searchtools.js, + * namely the list of stopwords, stemmer, scorer and splitter. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; + + +/* Non-minified version is copied as a separate JS file, is available */ + +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/_static/minus.png b/_static/minus.png new file mode 100644 index 00000000..d96755fd Binary files /dev/null and b/_static/minus.png differ diff --git a/_static/plus.png b/_static/plus.png new file mode 100644 index 00000000..7107cec9 Binary files /dev/null and b/_static/plus.png differ diff --git a/_static/pygments.css b/_static/pygments.css new file mode 100644 index 00000000..57c7df37 --- /dev/null +++ b/_static/pygments.css @@ -0,0 +1,84 @@ +pre { line-height: 125%; } +td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight .hll { background-color: #ffffcc } +.highlight { background: #f8f8f8; } +.highlight .c { color: #8f5902; font-style: italic } /* Comment */ +.highlight .err { color: #a40000; border: 1px solid #ef2929 } /* Error */ +.highlight .g { color: #000000 } /* Generic */ +.highlight .k { color: #004461; font-weight: bold } /* Keyword */ +.highlight .l { color: #000000 } /* Literal */ +.highlight .n { color: #000000 } /* Name */ +.highlight .o { color: #582800 } /* Operator */ +.highlight .x { color: #000000 } /* Other */ +.highlight .p { color: #000000; font-weight: bold } /* Punctuation */ +.highlight .ch { color: #8f5902; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #8f5902; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #8f5902 } /* Comment.Preproc */ +.highlight .cpf { color: #8f5902; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #8f5902; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #8f5902; font-style: italic } /* Comment.Special */ +.highlight .gd { color: #a40000 } /* Generic.Deleted */ +.highlight .ge { color: #000000; font-style: italic } /* Generic.Emph */ +.highlight .ges { color: #000000 } /* Generic.EmphStrong */ +.highlight .gr { color: #ef2929 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #00A000 } /* Generic.Inserted */ +.highlight .go { color: #888888 } /* Generic.Output */ +.highlight .gp { color: #745334 } /* Generic.Prompt */ +.highlight .gs { color: #000000; font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #a40000; font-weight: bold } /* Generic.Traceback */ +.highlight .kc { color: #004461; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #004461; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #004461; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #004461; font-weight: bold } /* Keyword.Pseudo */ +.highlight .kr { color: #004461; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #004461; font-weight: bold } /* Keyword.Type */ +.highlight .ld { color: #000000 } /* Literal.Date */ +.highlight .m { color: #990000 } /* Literal.Number */ +.highlight .s { color: #4e9a06 } /* Literal.String */ +.highlight .na { color: #c4a000 } /* Name.Attribute */ +.highlight .nb { color: #004461 } /* Name.Builtin */ +.highlight .nc { color: #000000 } /* Name.Class */ +.highlight .no { color: #000000 } /* Name.Constant */ +.highlight .nd { color: #888888 } /* Name.Decorator */ +.highlight .ni { color: #ce5c00 } /* Name.Entity */ +.highlight .ne { color: #cc0000; font-weight: bold } /* Name.Exception */ +.highlight .nf { color: #000000 } /* Name.Function */ +.highlight .nl { color: #f57900 } /* Name.Label */ +.highlight .nn { color: #000000 } /* Name.Namespace */ +.highlight .nx { color: #000000 } /* Name.Other */ +.highlight .py { color: #000000 } /* Name.Property */ +.highlight .nt { color: #004461; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #000000 } /* Name.Variable */ +.highlight .ow { color: #004461; font-weight: bold } /* Operator.Word */ +.highlight .pm { color: #000000; font-weight: bold } /* Punctuation.Marker */ +.highlight .w { color: #f8f8f8; text-decoration: underline } /* Text.Whitespace */ +.highlight .mb { color: #990000 } /* Literal.Number.Bin */ +.highlight .mf { color: #990000 } /* Literal.Number.Float */ +.highlight .mh { color: #990000 } /* Literal.Number.Hex */ +.highlight .mi { color: #990000 } /* Literal.Number.Integer */ +.highlight .mo { color: #990000 } /* Literal.Number.Oct */ +.highlight .sa { color: #4e9a06 } /* Literal.String.Affix */ +.highlight .sb { color: #4e9a06 } /* Literal.String.Backtick */ +.highlight .sc { color: #4e9a06 } /* Literal.String.Char */ +.highlight .dl { color: #4e9a06 } /* Literal.String.Delimiter */ +.highlight .sd { color: #8f5902; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #4e9a06 } /* Literal.String.Double */ +.highlight .se { color: #4e9a06 } /* Literal.String.Escape */ +.highlight .sh { color: #4e9a06 } /* Literal.String.Heredoc */ +.highlight .si { color: #4e9a06 } /* Literal.String.Interpol */ +.highlight .sx { color: #4e9a06 } /* Literal.String.Other */ +.highlight .sr { color: #4e9a06 } /* Literal.String.Regex */ +.highlight .s1 { color: #4e9a06 } /* Literal.String.Single */ +.highlight .ss { color: #4e9a06 } /* Literal.String.Symbol */ +.highlight .bp { color: #3465a4 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #000000 } /* Name.Function.Magic */ +.highlight .vc { color: #000000 } /* Name.Variable.Class */ +.highlight .vg { color: #000000 } /* Name.Variable.Global */ +.highlight .vi { color: #000000 } /* Name.Variable.Instance */ +.highlight .vm { color: #000000 } /* Name.Variable.Magic */ +.highlight .il { color: #990000 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/_static/searchtools.js b/_static/searchtools.js new file mode 100644 index 00000000..7918c3fa --- /dev/null +++ b/_static/searchtools.js @@ -0,0 +1,574 @@ +/* + * searchtools.js + * ~~~~~~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for the full-text search. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { + var Scorer = { + // Implement the following function to further tweak the score for each result + // The function takes a result array [docname, title, anchor, descr, score, filename] + // and returns the new score. + /* + score: result => { + const [docname, title, anchor, descr, score, filename] = result + return score + }, + */ + + // query matches the full name of an object + objNameMatch: 11, + // or matches in the last dotted part of the object name + objPartialMatch: 6, + // Additive scores depending on the priority of the object + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, + // Used when the priority is not in the mapping. + objPrioDefault: 0, + + // query found in title + title: 15, + partialTitle: 7, + // query found in terms + term: 5, + partialTerm: 2, + }; +} + +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms, highlightTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + const contentRoot = document.documentElement.dataset.content_root; + + const [docName, title, anchor, descr, score, _filename] = item; + + let listItem = document.createElement("li"); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = contentRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = contentRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) { + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + // highlight search terms in the description + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + } + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms) + ); + // highlight search terms in the summary + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = _( + `Search finished, found ${resultCount} page(s) matching the search query.` + ); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms, + highlightTerms, +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms, highlightTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms, highlightTerms), + 5 + ); + } + // search finished, update title and status message + else _finishSearch(resultCount); +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings +} + +/** + * Search Module + */ +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent !== undefined) return docContent.textContent; + console.warn( + "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." + ); + return ""; + }, + + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); + }, + + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), + + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); + } + }, + + hasIndex: () => Search._index !== null, + + deferQuery: (query) => (Search._queued_query = query), + + stopPulse: () => (Search._pulse_status = -1), + + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { + Search._pulse_status = (Search._pulse_status + 1) % 4; + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; + pulse(); + }, + + /** + * perform a search for something (or wait until index is loaded) + */ + performSearch: (query) => { + // create the required interface elements + const searchText = document.createElement("h2"); + searchText.textContent = _("Searching"); + const searchSummary = document.createElement("p"); + searchSummary.classList.add("search-summary"); + searchSummary.innerText = ""; + const searchList = document.createElement("ul"); + searchList.classList.add("search"); + + const out = document.getElementById("search-results"); + Search.title = out.appendChild(searchText); + Search.dots = Search.title.appendChild(document.createElement("span")); + Search.status = out.appendChild(searchSummary); + Search.output = out.appendChild(searchList); + + const searchProgress = document.getElementById("search-progress"); + // Some themes don't use the search progress node + if (searchProgress) { + searchProgress.innerText = _("Preparing search..."); + } + Search.startPulse(); + + // index already loaded, the browser was quick! + if (Search.hasIndex()) Search.query(query); + else Search.deferQuery(query); + }, + + /** + * execute search (requires search index to be loaded) + */ + query: (query) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // stem the search terms and add them to the correct list + const stemmer = new Stemmer(); + const searchTerms = new Set(); + const excludedTerms = new Set(); + const highlightTerms = new Set(); + const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); + splitQuery(query.trim()).forEach((queryTerm) => { + const queryTermLower = queryTerm.toLowerCase(); + + // maybe skip this "word" + // stopwords array is from language_data.js + if ( + stopwords.indexOf(queryTermLower) !== -1 || + queryTerm.match(/^\d+$/) + ) + return; + + // stem the word + let word = stemmer.stemWord(queryTermLower); + // select the correct list + if (word[0] === "-") excludedTerms.add(word.substr(1)); + else { + searchTerms.add(word); + highlightTerms.add(queryTermLower); + } + }); + + if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js + localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) + } + + // console.debug("SEARCH: searching for:"); + // console.info("required: ", [...searchTerms]); + // console.info("excluded: ", [...excludedTerms]); + + // array of [docname, title, anchor, descr, score, filename] + let results = []; + _removeChildren(document.getElementById("search-progress")); + + const queryLower = query.toLowerCase(); + for (const [title, foundTitles] of Object.entries(allTitles)) { + if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { + for (const [file, id] of foundTitles) { + let score = Math.round(100 * queryLower.length / title.length) + results.push([ + docNames[file], + titles[file] !== title ? `${titles[file]} > ${title}` : title, + id !== null ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // search for explicit entries in index directives + for (const [entry, foundEntries] of Object.entries(indexEntries)) { + if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { + for (const [file, id] of foundEntries) { + let score = Math.round(100 * queryLower.length / entry.length) + results.push([ + docNames[file], + titles[file], + id ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // lookup as object + objectTerms.forEach((term) => + results.push(...Search.performObjectSearch(term, objectTerms)) + ); + + // lookup as search terms in fulltext + results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + + // let the scorer override scores with a custom scoring function + if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); + + // now sort the results by score (in opposite order of appearance, since the + // display function below uses pop() to retrieve items) and then + // alphabetically + results.sort((a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; + }); + + // remove duplicate search results + // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept + let seen = new Set(); + results = results.reverse().reduce((acc, result) => { + let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); + if (!seen.has(resultStr)) { + acc.push(result); + seen.add(resultStr); + } + return acc; + }, []); + + results = results.reverse(); + + // for debugging + //Search.lastresults = results.slice(); // a copy + // console.info("search results:", Search.lastresults); + + // print the results + _displayNextItem(results, results.length, searchTerms, highlightTerms); + }, + + /** + * search for object names + */ + performObjectSearch: (object, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const objects = Search._index.objects; + const objNames = Search._index.objnames; + const titles = Search._index.titles; + + const results = []; + + const objectSearchCallback = (prefix, match) => { + const name = match[4] + const fullname = (prefix ? prefix + "." : "") + name; + const fullnameLower = fullname.toLowerCase(); + if (fullnameLower.indexOf(object) < 0) return; + + let score = 0; + const parts = fullnameLower.split("."); + + // check for different match types: exact matches of full name or + // "last name" (i.e. last dotted part) + if (fullnameLower === object || parts.slice(-1)[0] === object) + score += Scorer.objNameMatch; + else if (parts.slice(-1)[0].indexOf(object) > -1) + score += Scorer.objPartialMatch; // matches in last name + + const objName = objNames[match[1]][2]; + const title = titles[match[0]]; + + // If more than one term searched for, we require other words to be + // found in the name/title/description + const otherTerms = new Set(objectTerms); + otherTerms.delete(object); + if (otherTerms.size > 0) { + const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); + if ( + [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) + ) + return; + } + + let anchor = match[3]; + if (anchor === "") anchor = fullname; + else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; + + const descr = objName + _(", in ") + title; + + // add custom score for some objects according to scorer + if (Scorer.objPrio.hasOwnProperty(match[2])) + score += Scorer.objPrio[match[2]]; + else score += Scorer.objPrioDefault; + + results.push([ + docNames[match[0]], + fullname, + "#" + anchor, + descr, + score, + filenames[match[0]], + ]); + }; + Object.keys(objects).forEach((prefix) => + objects[prefix].forEach((array) => + objectSearchCallback(prefix, array) + ) + ); + return results; + }, + + /** + * search for full-text terms in the index + */ + performTermsSearch: (searchTerms, excludedTerms) => { + // prepare search + const terms = Search._index.terms; + const titleTerms = Search._index.titleterms; + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + + const scoreMap = new Map(); + const fileMap = new Map(); + + // perform the search on the required terms + searchTerms.forEach((word) => { + const files = []; + const arr = [ + { files: terms[word], score: Scorer.term }, + { files: titleTerms[word], score: Scorer.title }, + ]; + // add support for partial matches + if (word.length > 2) { + const escapedWord = _escapeRegExp(word); + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord) && !terms[word]) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord) && !titleTerms[word]) + arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); + }); + } + + // no match but word was a required one + if (arr.every((record) => record.files === undefined)) return; + + // found search word in contents + arr.forEach((record) => { + if (record.files === undefined) return; + + let recordFiles = record.files; + if (recordFiles.length === undefined) recordFiles = [recordFiles]; + files.push(...recordFiles); + + // set score for the word in each file + recordFiles.forEach((file) => { + if (!scoreMap.has(file)) scoreMap.set(file, {}); + scoreMap.get(file)[word] = record.score; + }); + }); + + // create the mapping + files.forEach((file) => { + if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) + fileMap.get(file).push(word); + else fileMap.set(file, [word]); + }); + }); + + // now check if the files don't contain excluded terms + const results = []; + for (const [file, wordList] of fileMap) { + // check if all requirements are matched + + // as search terms with length < 3 are discarded + const filteredTermCount = [...searchTerms].filter( + (term) => term.length > 2 + ).length; + if ( + wordList.length !== searchTerms.size && + wordList.length !== filteredTermCount + ) + continue; + + // ensure that none of the excluded terms is in the search result + if ( + [...excludedTerms].some( + (term) => + terms[term] === file || + titleTerms[term] === file || + (terms[term] || []).includes(file) || + (titleTerms[term] || []).includes(file) + ) + ) + break; + + // select one (max) score for the file. + const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); + // add result to the result list + results.push([ + docNames[file], + titles[file], + "", + null, + score, + filenames[file], + ]); + } + return results; + }, + + /** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words. + */ + makeSearchSummary: (htmlText, keywords) => { + const text = Search.htmlToText(htmlText); + if (text === "") return null; + + const textLower = text.toLowerCase(); + const actualStartPosition = [...keywords] + .map((k) => textLower.indexOf(k.toLowerCase())) + .filter((i) => i > -1) + .slice(-1)[0]; + const startWithContext = Math.max(actualStartPosition - 120, 0); + + const top = startWithContext === 0 ? "" : "..."; + const tail = startWithContext + 240 < text.length ? "..." : ""; + + let summary = document.createElement("p"); + summary.classList.add("context"); + summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; + + return summary; + }, +}; + +_ready(Search.init); diff --git a/_static/sphinx_highlight.js b/_static/sphinx_highlight.js new file mode 100644 index 00000000..8a96c69a --- /dev/null +++ b/_static/sphinx_highlight.js @@ -0,0 +1,154 @@ +/* Highlighting utilities for Sphinx HTML documentation. */ +"use strict"; + +const SPHINX_HIGHLIGHT_ENABLED = true + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + const rest = document.createTextNode(val.substr(pos + text.length)); + parent.insertBefore( + span, + parent.insertBefore( + rest, + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + /* There may be more occurrences of search term in this node. So call this + * function recursively on the remaining fragment. + */ + _highlight(rest, addItems, text, className); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const SphinxHighlight = { + + /** + * highlight the search words provided in localstorage in the text + */ + highlightSearchWords: () => { + if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight + + // get and clear terms from localstorage + const url = new URL(window.location); + const highlight = + localStorage.getItem("sphinx_highlight_terms") + || url.searchParams.get("highlight") + || ""; + localStorage.removeItem("sphinx_highlight_terms") + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + + // get individual terms from highlight string + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/api/astrodata.AstroData.html b/api/astrodata.AstroData.html new file mode 100644 index 00000000..0d372d05 --- /dev/null +++ b/api/astrodata.AstroData.html @@ -0,0 +1,755 @@ + + + + + + + + AstroData — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

AstroData

+
+
+class astrodata.AstroData(nddata=None, tables=None, phu=None, indices=None, is_single=False)[source]
+

Bases: object

+

Base class for the AstroData software package. It provides an interface +to manipulate astronomical data sets.

+
+
Parameters:
+
    +
  • nddata (astrodata.NDAstroData or list of astrodata.NDAstroData) – List of NDAstroData objects.

  • +
  • tables (dict[name, astropy.table.Table]) – Dict of table objects.

  • +
  • phu (astropy.io.fits.Header) – Primary header.

  • +
  • indices (list of int) – List of indices mapping the astrodata.NDAstroData objects that this +object will access to. This is used when slicing an object, then the +sliced AstroData will have the .nddata list from its parent and +access the sliced NDAstroData through this list of indices.

  • +
+
+
+

Attributes Summary

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

data

A list of the arrays (or single array, if this is a single slice) corresponding to the science data attached to each extension.

descriptors

Returns a sequence of names for the methods that have been decorated as descriptors.

exposed

A collection of strings with the names of objects that can be accessed directly by name as attributes of this instance, and that are not part of its standard interface (i.e. data objects that have been added dynamically).

ext_tables

Return the names of the astropy.table.Table objects associated to an extension.

filename

Return the file name.

hdr

Return all headers, as a astrodata.fits.FitsHeaderCollection.

header

Deprecated header access.

id

Returns the extension identifier (1-based extension number) for sliced objects.

indices

Returns the extensions indices for sliced objects.

is_sliced

If this data provider instance represents the whole dataset, return False.

mask

A list of the mask arrays (or a single array, if this is a single slice) attached to the science data, for each extension.

nddata

Return the list of astrodata.NDAstroData objects.

orig_filename

Return the original file name (before it was modified).

path

Return the file path.

phu

Return the primary header.

shape

Return the shape of the data array for each extension as a list of shapes.

tables

Return the names of the astropy.table.Table objects associated to the top-level object.

tags

A set of strings that represent the tags defining this instance.

uncertainty

A list of the uncertainty objects (or a single object, if this is a single slice) attached to the science data, for each extension.

variance

A list of the variance arrays (or a single array, if this is a single slice) attached to the science data, for each extension.

wcs

Returns the list of WCS objects for each extension.

+

Methods Summary

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add(oper)

Performs inplace addition by evaluating self += operand.

append(ext[, name, header])

Adds a new top-level extension.

crop(x1, y1, x2, y2)

Crop the NDData objects given indices.

divide(oper)

Performs inplace division by evaluating self /= operand.

info()

Prints out information about the contents of this instance.

instrument()

Returns the name of the instrument making the observation.

is_settable(attr)

Return True if the attribute is meant to be modified.

load(source[, extname_parser])

Read from a file, file object, HDUList, etc.

multiply(oper)

Performs inplace multiplication by evaluating self *= operand.

object()

Returns the name of the object being observed.

operate(operator, *args, **kwargs)

Applies a function to the main data array on each extension, replacing the data with the result.

read(source[, extname_parser])

Read from a file, file object, HDUList, etc.

reset(data[, mask, variance, check])

Sets the .data, and optionally .mask and .variance attributes of a single-extension AstroData slice.

subtract(oper)

Performs inplace subtraction by evaluating self -= operand.

table()

Return a dictionary of astropy.table.Table objects.

telescope()

Returns the name of the telescope.

update_filename([prefix, suffix, strip])

Update the "filename" attribute of the AstroData object.

write([filename, overwrite])

Write the object to disk.

+

Attributes Documentation

+
+
+data
+

A list of the arrays (or single array, if this is a single slice) +corresponding to the science data attached to each extension.

+
+ +
+
+descriptors
+

Returns a sequence of names for the methods that have been +decorated as descriptors.

+
+
Return type:
+

tuple of str

+
+
+
+ +
+
+exposed
+

A collection of strings with the names of objects that can be +accessed directly by name as attributes of this instance, and that are +not part of its standard interface (i.e. data objects that have been +added dynamically).

+

Examples

+
>>> ad[0].exposed  
+set(['OBJMASK', 'OBJCAT'])
+
+
+
+ +
+
+ext_tables
+

Return the names of the astropy.table.Table objects associated to +an extension.

+
+ +
+
+filename
+

Return the file name.

+
+ +
+
+hdr
+

Return all headers, as a astrodata.fits.FitsHeaderCollection.

+
+ +
+
+header
+

Deprecated header access. Use .hdr instead.

+
+ +
+
+id
+

Returns the extension identifier (1-based extension number) +for sliced objects.

+
+ +
+
+indices
+

Returns the extensions indices for sliced objects.

+
+ +
+
+is_sliced
+

If this data provider instance represents the whole dataset, return +False. If it represents a slice out of the whole, return True.

+
+ +
+
+mask
+

A list of the mask arrays (or a single array, if this is a single +slice) attached to the science data, for each extension.

+

For objects that miss a mask, None will be provided instead.

+
+ +
+
+nddata
+

Return the list of astrodata.NDAstroData objects.

+

If the AstroData object is sliced, this returns only the NDData +objects of the sliced extensions. And if this is a single extension +object, the NDData object is returned directly (i.e. not a list).

+
+ +
+
+orig_filename
+

Return the original file name (before it was modified).

+
+ +
+
+path
+

Return the file path.

+
+ +
+
+phu
+

Return the primary header.

+
+ +
+
+shape
+

Return the shape of the data array for each extension as a list of +shapes.

+
+ +
+
+tables
+

Return the names of the astropy.table.Table objects associated to +the top-level object.

+
+ +
+
+tags
+

A set of strings that represent the tags defining this instance.

+
+ +
+
+uncertainty
+

A list of the uncertainty objects (or a single object, if this is +a single slice) attached to the science data, for each extension.

+

The objects are instances of AstroPy’s astropy.nddata.NDUncertainty, +or None where no information is available.

+
+

See also

+
+
variance

The actual array supporting the uncertainty object.

+
+
+
+
+ +
+
+variance
+

A list of the variance arrays (or a single array, if this is a +single slice) attached to the science data, for each extension.

+

For objects that miss uncertainty information, None will be provided +instead.

+
+

See also

+
+
uncertainty

The uncertainty objects used under the hood.

+
+
+
+
+ +
+
+wcs
+

Returns the list of WCS objects for each extension.

+
+ +

Methods Documentation

+
+
+add(oper)
+

Performs inplace addition by evaluating self += operand.

+
+
Parameters:
+

oper (number or object) – The operand to perform the operation self += operand.

+
+
Return type:
+

AstroData instance

+
+
+
+ +
+
+append(ext, name=None, header=None)[source]
+

Adds a new top-level extension.

+
+
Parameters:
+
    +
  • ext (array, astropy.nddata.NDData, astropy.table.Table, other) – The contents for the new extension. The exact accepted types depend +on the class implementing this interface. Implementations specific +to certain data formats may accept specialized types (eg. a FITS +provider will accept an astropy.io.fits.ImageHDU and extract the +array out of it).

  • +
  • name (str, optional) – A name that may be used to access the new object, as an attribute +of the provider. The name is typically ignored for top-level +(global) objects, and required for the others. If the name cannot +be derived from the metadata associated to ext, you will +have to provider one. +It can consist in a combination of numbers and letters, with the +restriction that the letters have to be all capital, and the first +character cannot be a number (“[A-Z][A-Z0-9]*”).

  • +
+
+
Returns:
+

    +
  • The same object, or a new one, if it was necessary to convert it to

  • +
  • a more suitable format for internal use.

  • +
+

+
+
Raises:
+
    +
  • TypeError – If adding the object in an invalid situation (eg. name is + None when adding to a single slice).

  • +
  • ValueError – Raised if the extension is of a proper type, but its value is + illegal somehow.

  • +
+
+
+
+ +
+
+crop(x1, y1, x2, y2)[source]
+

Crop the NDData objects given indices.

+
+
Parameters:
+
    +
  • x1 (int) – Minimum and maximum indices for the x and y axis.

  • +
  • y1 (int) – Minimum and maximum indices for the x and y axis.

  • +
  • x2 (int) – Minimum and maximum indices for the x and y axis.

  • +
  • y2 (int) – Minimum and maximum indices for the x and y axis.

  • +
+
+
+
+ +
+
+divide(oper)
+

Performs inplace division by evaluating self /= operand.

+
+
Parameters:
+

oper (number or object) – The operand to perform the operation self /= operand.

+
+
Return type:
+

AstroData instance

+
+
+
+ +
+
+info()[source]
+

Prints out information about the contents of this instance.

+
+ +
+
+instrument()[source]
+

Returns the name of the instrument making the observation.

+
+ +
+
+is_settable(attr)[source]
+

Return True if the attribute is meant to be modified.

+
+ +
+
+classmethod load(source, extname_parser=None)
+

Read from a file, file object, HDUList, etc.

+
+ +
+
+multiply(oper)
+

Performs inplace multiplication by evaluating self *= operand.

+
+
Parameters:
+

oper (number or object) – The operand to perform the operation self *= operand.

+
+
Return type:
+

AstroData instance

+
+
+
+ +
+
+object()[source]
+

Returns the name of the object being observed.

+
+ +
+
+operate(operator, *args, **kwargs)[source]
+

Applies a function to the main data array on each extension, replacing +the data with the result. The data will be passed as the first argument +to the function.

+

It will be applied to the mask and variance of each extension, too, if +they exist.

+

This is a convenience method, which is equivalent to:

+
for ext in ad:
+    ext.data = operator(ext.data, *args, **kwargs)
+    if ext.mask is not None:
+        ext.mask = operator(ext.mask, *args, **kwargs)
+    if ext.variance is not None:
+        ext.variance = operator(ext.variance, *args, **kwargs)
+
+
+

with the additional advantage that it will work on single slices, too.

+
+
Parameters:
+
    +
  • operator (callable) – A function that takes an array (and, maybe, other arguments) +and returns an array.

  • +
  • args (optional) – Additional arguments to be passed to the operator.

  • +
  • kwargs (optional) – Additional arguments to be passed to the operator.

  • +
+
+
+

Examples

+
>>> import numpy as np
+>>> ad.operate(np.squeeze)  
+
+
+
+ +
+
+classmethod read(source, extname_parser=None)[source]
+

Read from a file, file object, HDUList, etc.

+
+ +
+
+reset(data, mask=<object object>, variance=<object object>, check=True)[source]
+

Sets the .data, and optionally .mask and .variance +attributes of a single-extension AstroData slice. This function will +optionally check whether these attributes have the same shape.

+
+
Parameters:
+
    +
  • data (ndarray) – The array to assign to the .data attribute (“SCI”).

  • +
  • mask (ndarray, optional) – The array to assign to the .mask attribute (“DQ”).

  • +
  • variance (ndarray, optional) – The array to assign to the .variance attribute (“VAR”).

  • +
  • check (bool) – If set, then the function will check that the mask and variance +arrays have the same shape as the data array.

  • +
+
+
Raises:
+
    +
  • TypeError – if an attempt is made to set the .mask or .variance attributes + with something other than an array

  • +
  • ValueError – if the .mask or .variance attributes don’t have the same shape as + .data, OR if this is called on an AD instance that isn’t a single + extension slice

  • +
+
+
+
+ +
+
+subtract(oper)
+

Performs inplace subtraction by evaluating self -= operand.

+
+
Parameters:
+

oper (number or object) – The operand to perform the operation self -= operand.

+
+
Return type:
+

AstroData instance

+
+
+
+ +
+
+table()[source]
+

Return a dictionary of astropy.table.Table objects.

+

Notes

+

This returns a _copy_ of the tables, so modifying them will not +affect the original ones.

+
+ +
+
+telescope()[source]
+

Returns the name of the telescope.

+
+ +
+
+update_filename(prefix=None, suffix=None, strip=False)[source]
+

Update the “filename” attribute of the AstroData object.

+

A prefix and/or suffix can be specified. If strip=True, these will +replace the existing prefix/suffix; if strip=False, they will +simply be prepended/appended.

+

The current filename is broken down into its existing prefix, root, and +suffix using the ORIGNAME phu keyword, if it exists and is +contained within the current filename. Otherwise, the filename is split +at the last underscore and the part before is assigned as the root and +the underscore and part after the suffix. No prefix is assigned.

+

Note that, if strip=True, a prefix or suffix will only be stripped +if ‘’ is specified.

+
+
Parameters:
+
    +
  • prefix (str, optional) – New prefix (None => leave alone)

  • +
  • suffix (str, optional) – New suffix (None => leave alone)

  • +
  • strip (bool, optional) – Strip existing prefixes and suffixes if new ones are given?

  • +
+
+
Raises:
+

ValueError – If the filename cannot be determined

+
+
+
+ +
+
+write(filename=None, overwrite=False)[source]
+

Write the object to disk.

+
+
Parameters:
+
    +
  • filename (str, optional) – If the filename is not given, self.path is used.

  • +
  • overwrite (bool) – If True, overwrites existing file.

  • +
+
+
+
+ +
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/api/astrodata.AstroDataError.html b/api/astrodata.AstroDataError.html new file mode 100644 index 00000000..1082aee9 --- /dev/null +++ b/api/astrodata.AstroDataError.html @@ -0,0 +1,131 @@ + + + + + + + + AstroDataError — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

AstroDataError

+
+
+exception astrodata.AstroDataError[source]
+

Exception raised when there is a problem with the AstroData class.

+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/api/astrodata.AstroDataMixin.html b/api/astrodata.AstroDataMixin.html new file mode 100644 index 00000000..0bf3154b --- /dev/null +++ b/api/astrodata.AstroDataMixin.html @@ -0,0 +1,195 @@ + + + + + + + + AstroDataMixin — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

AstroDataMixin

+
+
+class astrodata.AstroDataMixin[source]
+

Bases: object

+

A Mixin for NDData-like classes (such as Spectrum1D) to enable +them to behave similarly to AstroData objects.

+
+
These behaviors are:
    +
  1. mask attributes are combined with bitwise, not logical, or, +since the individual bits are important.

  2. +
  3. The WCS must be a gwcs.WCS object and slicing results in +the model being modified.

  4. +
  5. There is a settable variance attribute.

  6. +
  7. Additional attributes such as OBJMASK can be extracted from +the .meta[‘other’] dict

  8. +
+
+
+

Attributes Summary

+ + + + + + + + + + + + + + + +

shape

The shape of the data.

size

The size of the data.

variance

A convenience property to access the contents of uncertainty.

wcs

The WCS of the data.

+

Attributes Documentation

+
+
+shape
+

The shape of the data.

+
+ +
+
+size
+

The size of the data.

+
+ +
+
+variance
+

A convenience property to access the contents of uncertainty.

+
+ +
+
+wcs
+

The WCS of the data. This is a gWCS object, not a FITS WCS object.

+

This is returning wcs from an inhertited class, see NDData.wcs for more +details.

+
+ +
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/api/astrodata.NDAstroData.html b/api/astrodata.NDAstroData.html new file mode 100644 index 00000000..e0c7f5c9 --- /dev/null +++ b/api/astrodata.NDAstroData.html @@ -0,0 +1,337 @@ + + + + + + + + NDAstroData — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

NDAstroData

+
+
+class astrodata.NDAstroData(data, uncertainty=None, mask=None, wcs=None, meta=None, unit=None, copy=False, variance=None)[source]
+

Bases: AstroDataMixin, NDArithmeticMixin, NDSlicingMixin, NDData

+

Implements NDData with all Mixins, plus some AstroData +specifics.

+

This class implements an NDData-like container that supports reading +and writing as implemented in the astropy.io.registry and also slicing +(indexing) and simple arithmetics (add, subtract, divide and multiply).

+

A very important difference between NDAstroData and NDData is that +the former attempts to load all its data lazily. There are also some +important differences in the interface (eg. .data lets you reset its +contents after initialization).

+

Documentation is provided where our class differs.

+
+

See also

+

NDData, NDArithmeticMixin, NDSlicingMixin

+
+

Examples

+

The mixins allow operation that are not possible with NDData or +NDDataBase, i.e. simple arithmetics:

+
>>> from astropy.nddata import StdDevUncertainty
+>>> import numpy as np
+>>> data = np.ones((3,3), dtype=float)
+>>> ndd1 = NDAstroData(data, uncertainty=StdDevUncertainty(data))
+>>> ndd2 = NDAstroData(data, uncertainty=StdDevUncertainty(data))
+>>> ndd3 = ndd1.add(ndd2)
+>>> ndd3.data
+array([[2., 2., 2.],
+    [2., 2., 2.],
+    [2., 2., 2.]])
+>>> ndd3.uncertainty.array
+array([[1.41421356, 1.41421356, 1.41421356],
+    [1.41421356, 1.41421356, 1.41421356],
+    [1.41421356, 1.41421356, 1.41421356]])
+
+
+

see NDArithmeticMixin for a complete list of all supported arithmetic +operations.

+

But also slicing (indexing) is possible:

+
>>> ndd4 = ndd3[1,:]
+>>> ndd4.data
+array([2., 2., 2.])
+>>> ndd4.uncertainty.array
+array([1.41421356, 1.41421356, 1.41421356])
+
+
+

See NDSlicingMixin for a description how slicing works (which +attributes) are sliced.

+

Initialize an NDAstroData instance.

+
+
Parameters:
+
    +
  • data (array-like) – The actual data. This can be a numpy array, a memmap, or a +fits.ImageHDU object.

  • +
  • uncertainty (NDUncertainty-like object, optional) – An object that represents the uncertainty of the data. If not +specified, the uncertainty will be set to None.

  • +
  • mask (array-like, optional) – An array that represents the mask of the data. If not specified, +the mask will be set to None.

  • +
  • wcs (gwcs.WCS object, optional) – The WCS of the data. If not specified, the WCS will be set to None.

  • +
  • meta (dict-like, optional) – A dictionary-like object that holds the meta data. If not +specified, the meta data will be set to None.

  • +
  • unit (astropy.units.Unit object, optional) – The unit of the data. If not specified, the unit will be set to +None.

  • +
  • copy (bool, optional) – If True, the data, uncertainty, mask, wcs, meta, and unit will be +copied. Otherwise, they will be referenced. Default is False.

  • +
  • variance (array-like, optional) – An array that represents the variance of the data. If not +specified, the variance will be set to None.

  • +
+
+
Raises:
+

ValueError – If uncertainty and variance are both specified.

+
+
+

Notes

+

The uncertainty and variance parameters are mutually exclusive.

+

Attributes Summary

+ + + + + + + + + + + + + + + + + + + + + +

T

Transpose the data.

data

An array representing the raw data stored in this instance.

mask

Get or set the mask of the data.

uncertainty

Uncertainty in the dataset, if any.

variance

A convenience property to access the contents of uncertainty, squared (as the uncertainty data is stored as standard deviation).

window

Interface to access a section of the data, using lazy access whenever possible.

+

Methods Summary

+ + + + + + + + + +

set_section(section, input_data)

Sets only a section of the data.

transpose()

Transpose the data.

+

Attributes Documentation

+
+
+T
+

Transpose the data. This is not a copy of the data.

+
+ +
+
+data
+

An array representing the raw data stored in this instance. It +implements a setter.

+
+ +
+
+mask
+

Get or set the mask of the data.

+
+ +
+
+uncertainty
+
+ +
+
+variance
+

A convenience property to access the contents of uncertainty, +squared (as the uncertainty data is stored as standard deviation).

+
+ +
+
+window
+

Interface to access a section of the data, using lazy access +whenever possible.

+
+
Returns:
+

    +
  • An instance of NDWindowing, which provides __getitem__,

  • +
  • to allow the use of square brackets when specifying the window.

  • +
  • Ultimately, an NDWindowingAstrodata instance is returned.

  • +
+

+
+
+

Examples

+
>>> ad[0].nddata.window[100:200, 100:200]  
+<NDWindowingAstrodata .....>
+
+
+
+ +

Methods Documentation

+
+
+set_section(section, input_data)[source]
+

Sets only a section of the data. This method is meant to prevent +fragmentation in the Python heap, by reusing the internal structures +instead of replacing them with new ones.

+
+
Parameters:
+
    +
  • section (slice) – The area that will be replaced

  • +
  • input_data (NDData-like instance) – This object needs to implement at least data, uncertainty, +and mask. Their entire contents will replace the data in the +area defined by section.

  • +
+
+
+

Examples

+
>>> def setup():
+...     sec = NDData(np.zeros((100,100)))
+...     ad[0].nddata.set_section(
+...         (slice(None,100),slice(None,100)),
+...         sec
+...     )
+...
+>>> setup()  
+
+
+
+ +
+
+transpose()[source]
+

Transpose the data. This is not a copy of the data.

+
+ +
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/api/astrodata.Section.html b/api/astrodata.Section.html new file mode 100644 index 00000000..6810410d --- /dev/null +++ b/api/astrodata.Section.html @@ -0,0 +1,258 @@ + + + + + + + + Section — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Section

+
+
+class astrodata.Section(*args, **kwargs)[source]
+

Bases: tuple

+

A class to handle n-dimensional sections

+

Attributes Summary

+ + + + + + + + + +

axis_dict

ndim

The number of dimensions in the section.

+

Methods Summary

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

asIRAFsection()

Deprecated, see as_iraf_section

as_iraf_section()

Produce string of style '[x1:x2,y1:y2]' that is 1-indexed and end-inclusive

asslice([add_dims])

Return the Section object as a slice/list of slices.

contains(section)

Return True if the supplied section is entirely within self

from_shape(value)

Produce a Section object defining a given shape.

from_string(value)

The inverse of __str__, produce a Section object from a string.

is_same_size(section)

Return True if the Sections are the same size

overlap(section)

Determine whether the two sections overlap.

shift(*shifts)

Shift a section in each direction by the specified amount

+

Attributes Documentation

+
+
+axis_dict
+
+ +
+
+ndim
+

The number of dimensions in the section.

+
+ +

Methods Documentation

+
+
+asIRAFsection()[source]
+

Deprecated, see as_iraf_section

+
+ +
+
+as_iraf_section()[source]
+

Produce string of style ‘[x1:x2,y1:y2]’ that is 1-indexed +and end-inclusive

+
+ +
+
+asslice(add_dims=0)[source]
+

Return the Section object as a slice/list of slices. Higher +dimensionality can be achieved with the add_dims parameter.

+
+ +
+
+contains(section)[source]
+

Return True if the supplied section is entirely within self

+
+ +
+
+static from_shape(value)[source]
+

Produce a Section object defining a given shape.

+
+ +
+
+static from_string(value)[source]
+

The inverse of __str__, produce a Section object from a string.

+
+ +
+
+is_same_size(section)[source]
+

Return True if the Sections are the same size

+
+ +
+
+overlap(section)[source]
+

Determine whether the two sections overlap. If so, the Section +common to both is returned, otherwise None

+
+ +
+
+shift(*shifts)[source]
+

Shift a section in each direction by the specified amount

+
+ +
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/api/astrodata.TagSet.html b/api/astrodata.TagSet.html new file mode 100644 index 00000000..36490b0d --- /dev/null +++ b/api/astrodata.TagSet.html @@ -0,0 +1,228 @@ + + + + + + + + TagSet — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

TagSet

+
+
+class astrodata.TagSet(add=None, remove=None, blocked_by=None, blocks=None, if_present=None)[source]
+

Bases: TagSet

+

Named tuple that is used by tag methods to return which actions should +be performed on a tag set.

+

All the attributes are optional, and any combination of them can be used, +allowing to create complex tag structures. Read the documentation on the +tag-generating algorithm if you want to better understand the interactions.

+

The simplest TagSet, though, tends to just add tags to the global set.

+

It can be initialized by position, like any other tuple (the order of the +arguments is the one in which the attributes are listed below). It can +also be initialized by name.

+
+
+add
+

Tags to be added to the global set

+
+
Type:
+

set of str, optional

+
+
+
+ +
+
+remove
+

Tags to be removed from the global set

+
+
Type:
+

set of str, optional

+
+
+
+ +
+
+blocked_by
+

Tags that will prevent this TagSet from being applied

+
+
Type:
+

set of str, optional

+
+
+
+ +
+
+blocks
+

Other TagSets containing these won’t be applied

+
+
Type:
+

set of str, optional

+
+
+
+ +
+
+if_present
+

This TagSet will be applied only all of these tags are present

+
+
Type:
+

set of str, optional

+
+
+
+ +

Examples

+
>>> TagSet()  
+TagSet(
+    add=set(),
+    remove=set(),
+    blocked_by=set(),
+    blocks=set(),
+    if_present=set()
+)
+>>> TagSet({'BIAS', 'CAL'})  
+TagSet(
+    add={'BIAS', 'CAL'},
+    remove=set(),
+    blocked_by=set(),
+    blocks=set(),
+    if_present=set()
+)
+>>> TagSet(remove={'BIAS', 'CAL'}) 
+TagSet(
+    add=set(),
+    remove={'BIAS', 'CAL'},
+    blocked_by=set(),
+    blocks=set(),
+    if_present=set()
+)
+
+
+

Create new instance of TagSet(add, remove, blocked_by, blocks, if_present)

+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/api/astrodata.add_header_to_table.html b/api/astrodata.add_header_to_table.html new file mode 100644 index 00000000..a1f83daf --- /dev/null +++ b/api/astrodata.add_header_to_table.html @@ -0,0 +1,131 @@ + + + + + + + + add_header_to_table — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

add_header_to_table

+
+
+astrodata.add_header_to_table(table)[source]
+

Add a FITS header to a table.

+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/api/astrodata.astro_data_descriptor.html b/api/astrodata.astro_data_descriptor.html new file mode 100644 index 00000000..5efeb4be --- /dev/null +++ b/api/astrodata.astro_data_descriptor.html @@ -0,0 +1,143 @@ + + + + + + + + astro_data_descriptor — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

astro_data_descriptor

+
+
+astrodata.astro_data_descriptor(fn)[source]
+

Decorator that will mark a class method as an AstroData descriptor. +Useful to produce list of descriptors, for example.

+

If used in combination with other decorators, this one must be the +one on the top (ie. the last one applying). It doesn’t modify the +method in any other way.

+
+
Parameters:
+

fn (method) – The method to be decorated

+
+
Return type:
+

The tagged method (not a wrapper)

+
+
+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/api/astrodata.astro_data_tag.html b/api/astrodata.astro_data_tag.html new file mode 100644 index 00000000..ea8d0696 --- /dev/null +++ b/api/astrodata.astro_data_tag.html @@ -0,0 +1,144 @@ + + + + + + + + astro_data_tag — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

astro_data_tag

+
+
+astrodata.astro_data_tag(fn)[source]
+

Decorator that marks methods of an AstroData derived class as part of +the tag-producing system.

+

It wraps the method around a function that will ensure a consistent return +value: the wrapped method can return any sequence of sequences of strings, +and they will be converted to a TagSet. If the wrapped method +returns None, it will be turned into an empty TagSet.

+
+
Parameters:
+

fn (method) – The method to be decorated

+
+
Return type:
+

A wrapper function

+
+
+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/api/astrodata.create.html b/api/astrodata.create.html new file mode 100644 index 00000000..e671baee --- /dev/null +++ b/api/astrodata.create.html @@ -0,0 +1,149 @@ + + + + + + + + create — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

create

+
+
+astrodata.create(phu, extensions=None)
+

Creates an AstroData object from a collection of objects.

+
+
Parameters:
+
    +
  • phu (fits.PrimaryHDU or fits.Header or dict or list) – FITS primary HDU or header, or something that can be used to create +a fits.Header (a dict, a list of “cards”).

  • +
  • extensions (list of HDUs) – List of HDU objects.

  • +
+
+
Returns:
+

An AstroData instance.

+
+
Return type:
+

astrodata.AstroData

+
+
Raises:
+

ValueError – If phu is not a valid object.

+
+
+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/api/astrodata.from_file.html b/api/astrodata.from_file.html new file mode 100644 index 00000000..5aadb39c --- /dev/null +++ b/api/astrodata.from_file.html @@ -0,0 +1,141 @@ + + + + + + + + from_file — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

from_file

+
+
+astrodata.from_file(source)
+

Takes either a string (with the path to a file) or an HDUList as +input, and tries to return an AstroData instance.

+

It will raise exceptions if the file is not found, or if there is no +match for the HDUList, among the registered AstroData classes.

+

Returns an instantiated object, or raises AstroDataError if it was +not possible to find a match

+
+
Parameters:
+

source (str or pathlib.Path or fits.HDUList) – The file path or HDUList to read.

+
+
+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/api/astrodata.open.html b/api/astrodata.open.html new file mode 100644 index 00000000..4562b3c1 --- /dev/null +++ b/api/astrodata.open.html @@ -0,0 +1,131 @@ + + + + + + + + open — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

open

+
+
+astrodata.open(*args, **kwargs)[source]
+

Return an |AstroData| object from a file.

+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/api/astrodata.returns_list.html b/api/astrodata.returns_list.html new file mode 100644 index 00000000..faac3e25 --- /dev/null +++ b/api/astrodata.returns_list.html @@ -0,0 +1,143 @@ + + + + + + + + returns_list — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

returns_list

+
+
+astrodata.returns_list(fn)[source]
+

Decorator to ensure that descriptors that should return a list (of one +value per extension) only returns single values when operating on single +slices; and vice versa.

+

This is a common case, and you can use the decorator to simplify the +logic of your descriptors.

+
+
Parameters:
+

fn (method) – The method to be decorated

+
+
Return type:
+

A function

+
+
+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/api/astrodata.version.html b/api/astrodata.version.html new file mode 100644 index 00000000..8fcf37ad --- /dev/null +++ b/api/astrodata.version.html @@ -0,0 +1,140 @@ + + + + + + + + version — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

version

+
+
+astrodata.version(short=False, tag='')[source]
+

Returns DRAGONS’s version based on the api, +feature and bug numbers.

+
+
Returns:
+

str

+
+
Return type:
+

formatted version

+
+
+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/genindex.html b/genindex.html new file mode 100644 index 00000000..fe250285 --- /dev/null +++ b/genindex.html @@ -0,0 +1,474 @@ + + + + + + + Index — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ + +

Index

+ +
+ A + | B + | C + | D + | E + | F + | H + | I + | L + | M + | N + | O + | P + | R + | S + | T + | U + | V + | W + +
+

A

+ + + +
+ +

B

+ + + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + + +
+ +

F

+ + + +
+ +

H

+ + + +
+ +

I

+ + + +
+ +

L

+ + +
+ +

M

+ + + +
+ +

N

+ + + +
+ +

O

+ + + +
+ +

P

+ + + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
+ +

U

+ + + +
+ +

V

+ + + +
+ +

W

+ + + +
+ + + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 00000000..bfb2cee2 --- /dev/null +++ b/index.html @@ -0,0 +1,194 @@ + + + + + + + + astrodata Documentation — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

astrodata Documentation

+

This is the documentation for astrodata.

+
+
+
+
+

astrodata Package

+

This package adds an abstraction layer to astronomical data by parsing the +information contained in the headers as attributes. To do so, one must subclass +astrodata.AstroData and add parse methods accordingly to the +TagSet received.

+
+

Functions

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_header_to_table(table)

Add a FITS header to a table.

astro_data_descriptor(fn)

Decorator that will mark a class method as an AstroData descriptor.

astro_data_tag(fn)

Decorator that marks methods of an AstroData derived class as part of the tag-producing system.

from_file(source)

Takes either a string (with the path to a file) or an HDUList as input, and tries to return an AstroData instance.

create(phu[, extensions])

Creates an AstroData object from a collection of objects.

open(*args, **kwargs)

Return an AstroData object from a file.

returns_list(fn)

Decorator to ensure that descriptors that should return a list (of one value per extension) only returns single values when operating on single slices; and vice versa.

version([short, tag])

Returns DRAGONS's version based on the api, feature and bug numbers.

+
+
+

Classes

+ + + + + + + + + + + + + + + + + + + + + +

AstroData([nddata, tables, phu, indices, ...])

Base class for the AstroData software package.

AstroDataError

Exception raised when there is a problem with the AstroData class.

AstroDataMixin()

A Mixin for NDData-like classes (such as Spectrum1D) to enable them to behave similarly to AstroData objects.

NDAstroData(data[, uncertainty, mask, wcs, ...])

Implements NDData with all Mixins, plus some AstroData specifics.

Section(*args, **kwargs)

A class to handle n-dimensional sections

TagSet([add, remove, blocked_by, blocks, ...])

Named tuple that is used by tag methods to return which actions should be performed on a tag set.

+
+

Indices and tables

+ +
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/objects.inv b/objects.inv new file mode 100644 index 00000000..bcd37384 Binary files /dev/null and b/objects.inv differ diff --git a/py-modindex.html b/py-modindex.html new file mode 100644 index 00000000..0d6e2aca --- /dev/null +++ b/py-modindex.html @@ -0,0 +1,135 @@ + + + + + + + Python Module Index — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ + +

Python Module Index

+ +
+ a +
+ + + + + + + +
 
+ a
+ astrodata +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/search.html b/search.html new file mode 100644 index 00000000..73b9a65b --- /dev/null +++ b/search.html @@ -0,0 +1,136 @@ + + + + + + + Search — astrodata 3.2.0 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Search

+ + + + +

+ Searching for multiple words only shows matches that contain + all words. +

+ + +
+ + + +
+ + + +
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/searchindex.js b/searchindex.js new file mode 100644 index 00000000..50df233e --- /dev/null +++ b/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({"docnames": ["api/astrodata.AstroData", "api/astrodata.AstroDataError", "api/astrodata.AstroDataMixin", "api/astrodata.NDAstroData", "api/astrodata.Section", "api/astrodata.TagSet", "api/astrodata.add_header_to_table", "api/astrodata.astro_data_descriptor", "api/astrodata.astro_data_tag", "api/astrodata.create", "api/astrodata.from_file", "api/astrodata.open", "api/astrodata.returns_list", "api/astrodata.version", "index"], "filenames": ["api/astrodata.AstroData.rst", "api/astrodata.AstroDataError.rst", "api/astrodata.AstroDataMixin.rst", "api/astrodata.NDAstroData.rst", "api/astrodata.Section.rst", "api/astrodata.TagSet.rst", "api/astrodata.add_header_to_table.rst", "api/astrodata.astro_data_descriptor.rst", "api/astrodata.astro_data_tag.rst", "api/astrodata.create.rst", "api/astrodata.from_file.rst", "api/astrodata.open.rst", "api/astrodata.returns_list.rst", "api/astrodata.version.rst", "index.rst"], "titles": ["AstroData", "AstroDataError", "AstroDataMixin", "NDAstroData", "Section", "TagSet", "add_header_to_table", "astro_data_descriptor", "astro_data_tag", "create", "from_file", "open", "returns_list", "version", "astrodata Documentation"], "terms": {"class": [0, 1, 2, 3, 4, 5, 7, 8, 10], "nddata": [0, 2, 3], "none": [0, 3, 4, 5, 8, 9], "tabl": [0, 6], "phu": [0, 9], "indic": 0, "is_singl": 0, "fals": [0, 3, 13], "sourc": [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13], "base": [0, 2, 3, 4, 5, 13], "object": [0, 2, 3, 4, 9, 10, 11], "softwar": 0, "packag": 0, "It": [0, 3, 5, 7, 8, 10], "provid": [0, 3], "an": [0, 2, 3, 7, 8, 9, 10, 11, 14], "interfac": [0, 3], "manipul": 0, "astronom": [0, 14], "data": [0, 2, 3, 14], "set": [0, 3, 5], "paramet": [0, 3, 4, 7, 8, 9, 10, 12], "ndastrodata": [0, 14], "list": [0, 3, 4, 5, 7, 9, 12], "dict": [0, 2, 3, 9], "name": [0, 5], "astropi": [0, 3], "io": [0, 3], "fit": [0, 2, 3, 6, 9, 10], "header": [0, 6, 9, 14], "primari": [0, 9], "int": 0, "map": 0, "thi": [0, 2, 3, 5, 7, 12, 14], "access": [0, 2, 3], "i": [0, 1, 2, 3, 4, 5, 9, 10, 12, 14], "us": [0, 3, 5, 7, 9, 12], "when": [0, 1, 3, 12], "slice": [0, 2, 3, 4, 12], "have": 0, "from": [0, 2, 3, 4, 5, 9, 11], "its": [0, 3], "parent": 0, "through": 0, "attribut": [0, 2, 3, 4, 5, 14], "summari": [0, 2, 3, 4], "method": [0, 3, 4, 5, 7, 8, 12, 14], "document": [0, 2, 3, 4, 5], "A": [0, 2, 3, 4, 8, 12], "arrai": [0, 3], "singl": [0, 12], "correspond": 0, "scienc": 0, "attach": 0, "each": [0, 4], "extens": [0, 9, 12], "descriptor": [0, 7, 12], "return": [0, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13], "sequenc": [0, 8], "been": 0, "decor": [0, 7, 8, 12], "type": [0, 5, 7, 8, 9, 12, 13], "tupl": [0, 4, 5], "str": [0, 5, 10, 13], "expos": 0, "collect": [0, 9], "string": [0, 4, 8, 10], "can": [0, 2, 3, 4, 5, 8, 9, 12], "directli": 0, "instanc": [0, 3, 5, 9, 10], "ar": [0, 2, 3, 4, 5], "part": [0, 8], "standard": [0, 3], "e": [0, 3], "ad": [0, 3, 5], "dynam": 0, "exampl": [0, 3, 5, 7], "0": [0, 3, 4], "objmask": [0, 2], "objcat": 0, "ext_tabl": 0, "associ": 0, "filenam": 0, "file": [0, 10, 11], "hdr": 0, "all": [0, 3, 5], "fitsheadercollect": 0, "deprec": [0, 4], "instead": [0, 3], "id": 0, "identifi": 0, "1": [0, 3, 4], "number": [0, 4, 13], "is_slic": 0, "If": [0, 3, 4, 7, 8, 9], "repres": [0, 3], "whole": 0, "dataset": 0, "out": 0, "true": [0, 3, 4], "mask": [0, 2, 3], "For": 0, "miss": 0, "onli": [0, 3, 5, 12], "And": 0, "orig_filenam": 0, "origin": 0, "befor": 0, "wa": [0, 10], "modifi": [0, 2, 7], "path": [0, 10], "shape": [0, 2, 4], "top": [0, 7], "level": 0, "tag": [0, 5, 7, 8, 13], "defin": [0, 3, 4], "uncertainti": [0, 2, 3], "The": [0, 2, 3, 4, 5, 7, 8, 10, 12], "": [0, 13], "nduncertainti": [0, 3], "where": [0, 3], "inform": [0, 14], "avail": 0, "varianc": [0, 2, 3], "actual": [0, 3], "support": [0, 3], "under": 0, "hood": 0, "wc": [0, 2, 3], "add": [0, 3, 5, 6, 14], "oper": [0, 3, 12], "perform": [0, 5], "inplac": 0, "addit": [0, 2], "evalu": 0, "self": [0, 4], "operand": 0, "append": 0, "ext": 0, "new": [0, 3, 5], "other": [0, 2, 5, 7], "content": [0, 2, 3], "exact": 0, "accept": 0, "depend": 0, "implement": [0, 3], "specif": [0, 3], "certain": 0, "format": [0, 13], "mai": 0, "special": 0, "eg": [0, 3], "imagehdu": [0, 3], "extract": [0, 2], "option": [0, 3, 5], "typic": 0, "ignor": 0, "global": [0, 5], "requir": 0, "cannot": 0, "deriv": [0, 8], "metadata": 0, "you": [0, 3, 5, 12], "one": [0, 5, 7, 12, 14], "consist": [0, 8], "combin": [0, 2, 5, 7], "letter": 0, "restrict": 0, "capit": 0, "first": 0, "charact": 0, "z": 0, "z0": 0, "9": 0, "same": [0, 4], "necessari": 0, "convert": [0, 8], "more": [0, 2], "suitabl": 0, "intern": [0, 3], "rais": [0, 1, 3, 9, 10], "typeerror": 0, "invalid": 0, "situat": 0, "valueerror": [0, 3, 9], "proper": 0, "valu": [0, 4, 8, 12], "illeg": 0, "somehow": 0, "crop": 0, "x1": [0, 4], "y1": [0, 4], "x2": [0, 4], "y2": [0, 4], "given": [0, 4], "minimum": 0, "maximum": 0, "x": 0, "y": 0, "axi": 0, "divid": [0, 3], "divis": 0, "info": 0, "print": 0, "about": 0, "instrument": 0, "make": 0, "observ": 0, "is_sett": 0, "attr": 0, "meant": [0, 3], "classmethod": 0, "load": [0, 3], "extname_pars": 0, "read": [0, 3, 5, 10], "hdulist": [0, 10], "etc": 0, "multipli": [0, 3], "multipl": 0, "being": [0, 2, 5], "arg": [0, 4, 11], "kwarg": [0, 4, 11], "appli": [0, 5, 7], "function": [0, 8, 12], "main": 0, "replac": [0, 3], "result": [0, 2], "pass": 0, "argument": [0, 5], "too": 0, "thei": [0, 3, 8], "exist": 0, "conveni": [0, 2, 3], "which": [0, 3, 5], "equival": 0, "advantag": 0, "work": [0, 3], "callabl": 0, "take": [0, 10], "mayb": 0, "import": [0, 2, 3], "numpi": [0, 3], "np": [0, 3], "squeez": 0, "reset": [0, 3], "check": 0, "whether": [0, 4], "ndarrai": 0, "assign": 0, "sci": 0, "dq": 0, "var": 0, "bool": [0, 3], "attempt": [0, 3], "made": 0, "someth": [0, 9], "than": 0, "don": 0, "t": [0, 3, 5, 7], "OR": 0, "call": 0, "isn": 0, "subtract": [0, 3], "dictionari": [0, 3], "note": [0, 3], "_copy_": 0, "so": [0, 4, 14], "them": [0, 2, 3, 5], "affect": 0, "ones": [0, 3], "telescop": 0, "update_filenam": 0, "prefix": 0, "suffix": 0, "strip": 0, "updat": 0, "specifi": [0, 3, 4], "simpli": 0, "prepend": 0, "current": 0, "broken": 0, "down": 0, "root": 0, "orignam": 0, "keyword": 0, "contain": [0, 3, 4, 5, 14], "within": [0, 4], "otherwis": [0, 3, 4], "split": 0, "last": [0, 7], "underscor": 0, "after": [0, 3], "No": 0, "leav": 0, "alon": 0, "determin": [0, 4], "write": [0, 3], "overwrit": 0, "disk": 0, "except": [1, 10], "astrodata": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], "problem": 1, "mixin": [2, 3], "like": [2, 3, 5], "spectrum1d": 2, "enabl": 2, "behav": 2, "similarli": 2, "These": 2, "behavior": 2, "bitwis": 2, "logic": [2, 12], "sinc": 2, "individu": 2, "bit": 2, "must": [2, 7, 14], "gwc": [2, 3], "model": 2, "There": [2, 3], "settabl": 2, "meta": [2, 3], "size": [2, 4], "properti": [2, 3], "inhertit": 2, "see": [2, 3, 4], "detail": 2, "unit": 3, "copi": 3, "astrodatamixin": [3, 14], "ndarithmeticmixin": 3, "ndslicingmixin": 3, "plu": 3, "some": 3, "registri": 3, "also": [3, 5], "index": [3, 4, 14], "simpl": 3, "arithmet": 3, "veri": 3, "differ": 3, "between": 3, "former": 3, "lazili": 3, "let": 3, "initi": [3, 5], "our": 3, "allow": [3, 5], "possibl": [3, 10], "nddatabas": 3, "stddevuncertainti": 3, "3": 3, "dtype": 3, "float": 3, "ndd1": 3, "ndd2": 3, "ndd3": 3, "2": 3, "41421356": 3, "complet": 3, "But": 3, "ndd4": 3, "descript": 3, "how": 3, "memmap": 3, "hold": 3, "referenc": 3, "default": 3, "both": [3, 4], "mutual": 3, "exclus": 3, "transpos": 3, "raw": 3, "store": 3, "setter": 3, "get": 3, "squar": 3, "deviat": 3, "window": 3, "section": [3, 14], "lazi": 3, "whenev": 3, "ndwindow": 3, "__getitem__": 3, "bracket": 3, "ultim": 3, "ndwindowingastrodata": 3, "100": 3, "200": 3, "set_sect": 3, "input_data": 3, "prevent": [3, 5], "fragment": 3, "python": 3, "heap": 3, "reus": 3, "structur": [3, 5], "area": 3, "need": 3, "least": 3, "Their": 3, "entir": [3, 4], "def": 3, "setup": 3, "sec": 3, "zero": 3, "handl": 4, "n": 4, "dimension": 4, "axis_dict": 4, "ndim": 4, "dimens": 4, "asirafsect": 4, "as_iraf_sect": 4, "produc": [4, 7, 8], "style": 4, "end": 4, "inclus": 4, "asslic": 4, "add_dim": 4, "higher": 4, "achiev": 4, "suppli": 4, "static": 4, "from_shap": 4, "from_str": 4, "invers": 4, "__str__": 4, "is_same_s": 4, "overlap": 4, "two": 4, "common": [4, 12], "shift": 4, "direct": 4, "amount": 4, "remov": 5, "blocked_bi": 5, "block": 5, "if_pres": 5, "action": 5, "should": [5, 12], "ani": [5, 7, 8], "creat": [5, 14], "complex": 5, "gener": 5, "algorithm": 5, "want": 5, "better": 5, "understand": 5, "interact": 5, "simplest": 5, "though": 5, "tend": 5, "just": 5, "posit": 5, "order": 5, "below": 5, "won": 5, "present": 5, "bia": 5, "cal": 5, "fn": [7, 8, 12], "mark": [7, 8], "ie": 7, "doesn": 7, "wai": 7, "wrapper": [7, 8], "system": 8, "wrap": 8, "around": 8, "ensur": [8, 12], "tagset": [8, 14], "turn": 8, "empti": 8, "primaryhdu": 9, "hdu": 9, "card": 9, "valid": 9, "either": 10, "input": 10, "tri": 10, "found": 10, "match": 10, "among": 10, "regist": 10, "instanti": 10, "astrodataerror": [10, 14], "find": 10, "pathlib": 10, "per": 12, "vice": 12, "versa": 12, "case": 12, "simplifi": 12, "your": 12, "short": 13, "dragon": 13, "api": 13, "featur": 13, "bug": 13, "abstract": 14, "layer": 14, "pars": 14, "To": 14, "do": 14, "subclass": 14, "accordingli": 14, "receiv": 14, "astro_data_descriptor": 14, "astro_data_tag": 14, "open": 14, "return_list": 14, "version": 14, "modul": 14, "search": 14, "page": 14}, "objects": {"": [[14, 0, 0, "-", "astrodata"]], "astrodata": [[0, 1, 1, "", "AstroData"], [1, 4, 1, "", "AstroDataError"], [2, 1, 1, "", "AstroDataMixin"], [3, 1, 1, "", "NDAstroData"], [4, 1, 1, "", "Section"], [5, 1, 1, "", "TagSet"], [6, 5, 1, "", "add_header_to_table"], [7, 5, 1, "", "astro_data_descriptor"], [8, 5, 1, "", "astro_data_tag"], [9, 5, 1, "", "create"], [10, 5, 1, "", "from_file"], [11, 5, 1, "", "open"], [12, 5, 1, "", "returns_list"], [13, 5, 1, "", "version"]], "astrodata.AstroData": [[0, 2, 1, "", "add"], [0, 2, 1, "", "append"], [0, 2, 1, "", "crop"], [0, 3, 1, "", "data"], [0, 3, 1, "", "descriptors"], [0, 2, 1, "", "divide"], [0, 3, 1, "", "exposed"], [0, 3, 1, "", "ext_tables"], [0, 3, 1, "", "filename"], [0, 3, 1, "", "hdr"], [0, 3, 1, "", "header"], [0, 3, 1, "", "id"], [0, 3, 1, "", "indices"], [0, 2, 1, "", "info"], [0, 2, 1, "", "instrument"], [0, 2, 1, "", "is_settable"], [0, 3, 1, "", "is_sliced"], [0, 2, 1, "", "load"], [0, 3, 1, "", "mask"], [0, 2, 1, "", "multiply"], [0, 3, 1, "", "nddata"], [0, 2, 1, "", "object"], [0, 2, 1, "", "operate"], [0, 3, 1, "", "orig_filename"], [0, 3, 1, "", "path"], [0, 3, 1, "", "phu"], [0, 2, 1, "", "read"], [0, 2, 1, "", "reset"], [0, 3, 1, "", "shape"], [0, 2, 1, "", "subtract"], [0, 2, 1, "", "table"], [0, 3, 1, "", "tables"], [0, 3, 1, "", "tags"], [0, 2, 1, "", "telescope"], [0, 3, 1, "", "uncertainty"], [0, 2, 1, "", "update_filename"], [0, 3, 1, "", "variance"], [0, 3, 1, "", "wcs"], [0, 2, 1, "", "write"]], "astrodata.AstroDataMixin": [[2, 3, 1, "", "shape"], [2, 3, 1, "", "size"], [2, 3, 1, "", "variance"], [2, 3, 1, "", "wcs"]], "astrodata.NDAstroData": [[3, 3, 1, "", "T"], [3, 3, 1, "", "data"], [3, 3, 1, "", "mask"], [3, 2, 1, "", "set_section"], [3, 2, 1, "", "transpose"], [3, 3, 1, "", "uncertainty"], [3, 3, 1, "", "variance"], [3, 3, 1, "", "window"]], "astrodata.Section": [[4, 2, 1, "", "asIRAFsection"], [4, 2, 1, "", "as_iraf_section"], [4, 2, 1, "", "asslice"], [4, 3, 1, "", "axis_dict"], [4, 2, 1, "", "contains"], [4, 2, 1, "", "from_shape"], [4, 2, 1, "", "from_string"], [4, 2, 1, "", "is_same_size"], [4, 3, 1, "", "ndim"], [4, 2, 1, "", "overlap"], [4, 2, 1, "", "shift"]], "astrodata.TagSet": [[5, 3, 1, "", "add"], [5, 3, 1, "", "blocked_by"], [5, 3, 1, "", "blocks"], [5, 3, 1, "", "if_present"], [5, 3, 1, "", "remove"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method", "3": "py:attribute", "4": "py:exception", "5": "py:function"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"], "3": ["py", "attribute", "Python attribute"], "4": ["py", "exception", "Python exception"], "5": ["py", "function", "Python function"]}, "titleterms": {"astrodata": [0, 14], "astrodataerror": 1, "astrodatamixin": 2, "ndastrodata": 3, "section": 4, "tagset": 5, "add_header_to_t": 6, "astro_data_descriptor": 7, "astro_data_tag": 8, "creat": 9, "from_fil": 10, "open": 11, "returns_list": 12, "version": 13, "document": 14, "packag": 14, "function": 14, "class": 14, "indic": 14, "tabl": 14}, "envversion": {"sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinx.ext.todo": 2, "sphinx.ext.viewcode": 1, "sphinx": 60}, "alltitles": {"AstroData": [[0, "astrodata"]], "AstroDataError": [[1, "astrodataerror"]], "AstroDataMixin": [[2, "astrodatamixin"]], "NDAstroData": [[3, "ndastrodata"]], "Section": [[4, "section"]], "TagSet": [[5, "tagset"]], "add_header_to_table": [[6, "add-header-to-table"]], "astro_data_descriptor": [[7, "astro-data-descriptor"]], "astro_data_tag": [[8, "astro-data-tag"]], "create": [[9, "create"]], "from_file": [[10, "from-file"]], "open": [[11, "open"]], "returns_list": [[12, "returns-list"]], "version": [[13, "version"]], "astrodata Documentation": [[14, "astrodata-documentation"]], "astrodata Package": [[14, "module-astrodata"]], "Functions": [[14, "functions"]], "Classes": [[14, "classes"]], "Indices and tables": [[14, "indices-and-tables"]]}, "indexentries": {"astrodata (class in astrodata)": [[0, "astrodata.AstroData"]], "add() (astrodata.astrodata method)": [[0, "astrodata.AstroData.add"]], "append() (astrodata.astrodata method)": [[0, "astrodata.AstroData.append"]], "crop() (astrodata.astrodata method)": [[0, "astrodata.AstroData.crop"]], "data (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.data"]], "descriptors (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.descriptors"]], "divide() (astrodata.astrodata method)": [[0, "astrodata.AstroData.divide"]], "exposed (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.exposed"]], "ext_tables (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.ext_tables"]], "filename (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.filename"]], "hdr (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.hdr"]], "header (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.header"]], "id (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.id"]], "indices (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.indices"]], "info() (astrodata.astrodata method)": [[0, "astrodata.AstroData.info"]], "instrument() (astrodata.astrodata method)": [[0, "astrodata.AstroData.instrument"]], "is_settable() (astrodata.astrodata method)": [[0, "astrodata.AstroData.is_settable"]], "is_sliced (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.is_sliced"]], "load() (astrodata.astrodata class method)": [[0, "astrodata.AstroData.load"]], "mask (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.mask"]], "multiply() (astrodata.astrodata method)": [[0, "astrodata.AstroData.multiply"]], "nddata (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.nddata"]], "object() (astrodata.astrodata method)": [[0, "astrodata.AstroData.object"]], "operate() (astrodata.astrodata method)": [[0, "astrodata.AstroData.operate"]], "orig_filename (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.orig_filename"]], "path (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.path"]], "phu (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.phu"]], "read() (astrodata.astrodata class method)": [[0, "astrodata.AstroData.read"]], "reset() (astrodata.astrodata method)": [[0, "astrodata.AstroData.reset"]], "shape (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.shape"]], "subtract() (astrodata.astrodata method)": [[0, "astrodata.AstroData.subtract"]], "table() (astrodata.astrodata method)": [[0, "astrodata.AstroData.table"]], "tables (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.tables"]], "tags (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.tags"]], "telescope() (astrodata.astrodata method)": [[0, "astrodata.AstroData.telescope"]], "uncertainty (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.uncertainty"]], "update_filename() (astrodata.astrodata method)": [[0, "astrodata.AstroData.update_filename"]], "variance (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.variance"]], "wcs (astrodata.astrodata attribute)": [[0, "astrodata.AstroData.wcs"]], "write() (astrodata.astrodata method)": [[0, "astrodata.AstroData.write"]], "astrodataerror": [[1, "astrodata.AstroDataError"]], "astrodatamixin (class in astrodata)": [[2, "astrodata.AstroDataMixin"]], "shape (astrodata.astrodatamixin attribute)": [[2, "astrodata.AstroDataMixin.shape"]], "size (astrodata.astrodatamixin attribute)": [[2, "astrodata.AstroDataMixin.size"]], "variance (astrodata.astrodatamixin attribute)": [[2, "astrodata.AstroDataMixin.variance"]], "wcs (astrodata.astrodatamixin attribute)": [[2, "astrodata.AstroDataMixin.wcs"]], "ndastrodata (class in astrodata)": [[3, "astrodata.NDAstroData"]], "t (astrodata.ndastrodata attribute)": [[3, "astrodata.NDAstroData.T"]], "data (astrodata.ndastrodata attribute)": [[3, "astrodata.NDAstroData.data"]], "mask (astrodata.ndastrodata attribute)": [[3, "astrodata.NDAstroData.mask"]], "set_section() (astrodata.ndastrodata method)": [[3, "astrodata.NDAstroData.set_section"]], "transpose() (astrodata.ndastrodata method)": [[3, "astrodata.NDAstroData.transpose"]], "uncertainty (astrodata.ndastrodata attribute)": [[3, "astrodata.NDAstroData.uncertainty"]], "variance (astrodata.ndastrodata attribute)": [[3, "astrodata.NDAstroData.variance"]], "window (astrodata.ndastrodata attribute)": [[3, "astrodata.NDAstroData.window"]], "section (class in astrodata)": [[4, "astrodata.Section"]], "asirafsection() (astrodata.section method)": [[4, "astrodata.Section.asIRAFsection"]], "as_iraf_section() (astrodata.section method)": [[4, "astrodata.Section.as_iraf_section"]], "asslice() (astrodata.section method)": [[4, "astrodata.Section.asslice"]], "axis_dict (astrodata.section attribute)": [[4, "astrodata.Section.axis_dict"]], "contains() (astrodata.section method)": [[4, "astrodata.Section.contains"]], "from_shape() (astrodata.section static method)": [[4, "astrodata.Section.from_shape"]], "from_string() (astrodata.section static method)": [[4, "astrodata.Section.from_string"]], "is_same_size() (astrodata.section method)": [[4, "astrodata.Section.is_same_size"]], "ndim (astrodata.section attribute)": [[4, "astrodata.Section.ndim"]], "overlap() (astrodata.section method)": [[4, "astrodata.Section.overlap"]], "shift() (astrodata.section method)": [[4, "astrodata.Section.shift"]], "tagset (class in astrodata)": [[5, "astrodata.TagSet"]], "add (astrodata.tagset attribute)": [[5, "astrodata.TagSet.add"]], "blocked_by (astrodata.tagset attribute)": [[5, "astrodata.TagSet.blocked_by"]], "blocks (astrodata.tagset attribute)": [[5, "astrodata.TagSet.blocks"]], "if_present (astrodata.tagset attribute)": [[5, "astrodata.TagSet.if_present"]], "remove (astrodata.tagset attribute)": [[5, "astrodata.TagSet.remove"]], "add_header_to_table() (in module astrodata)": [[6, "astrodata.add_header_to_table"]], "astro_data_descriptor() (in module astrodata)": [[7, "astrodata.astro_data_descriptor"]], "astro_data_tag() (in module astrodata)": [[8, "astrodata.astro_data_tag"]], "create() (in module astrodata)": [[9, "astrodata.create"]], "from_file() (in module astrodata)": [[10, "astrodata.from_file"]], "open() (in module astrodata)": [[11, "astrodata.open"]], "returns_list() (in module astrodata)": [[12, "astrodata.returns_list"]], "version() (in module astrodata)": [[13, "astrodata.version"]], "astrodata": [[14, "module-astrodata"]], "module": [[14, "module-astrodata"]]}}) \ No newline at end of file