-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmeme_plpythonu.sql
205 lines (182 loc) · 326 KB
/
meme_plpythonu.sql
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
CREATE EXTENSION IF NOT EXISTS plpython3u;
DROP SCHEMA IF EXISTS meme CASCADE;
CREATE SCHEMA meme;
CREATE TABLE IF NOT EXISTS meme (aid varchar(255), rid varchar(255), bid varchar(255), qnt DECIMAL(20,6));
CREATE FUNCTION meme.query(memelang_in TEXT) RETURNS TABLE (LIKE meme) AS $$ #!/usr/bin/env python
import contextlib as __stickytape_contextlib
@__stickytape_contextlib.contextmanager
def __stickytape_temporary_dir():
import tempfile
import shutil
dir_path = tempfile.mkdtemp()
try:
yield dir_path
finally:
shutil.rmtree(dir_path)
with __stickytape_temporary_dir() as __stickytape_working_dir:
def __stickytape_write_module(path, contents):
import os, os.path
def make_package(path):
parts = path.split("/")
partial_path = __stickytape_working_dir
for part in parts:
partial_path = os.path.join(partial_path, part)
if not os.path.exists(partial_path):
os.mkdir(partial_path)
with open(os.path.join(partial_path, "__init__.py"), "wb") as f:
f.write(b"\n")
make_package(os.path.dirname(path))
full_path = os.path.join(__stickytape_working_dir, path)
with open(full_path, "wb") as module_file:
module_file.write(contents)
import sys as __stickytape_sys
__stickytape_sys.path.insert(0, __stickytape_working_dir)
__stickytape_write_module('typing.py', b'"""\nThe typing module: Support for gradual typing as defined by PEP 484 and subsequent PEPs.\n\nAmong other things, the module includes the following:\n* Generic, Protocol, and internal machinery to support generic aliases.\n All subscripted types like X[int], Union[int, str] are generic aliases.\n* Various "special forms" that have unique meanings in type annotations:\n NoReturn, Never, ClassVar, Self, Concatenate, Unpack, and others.\n* Classes whose instances can be type arguments to generic classes and functions:\n TypeVar, ParamSpec, TypeVarTuple.\n* Public helper functions: get_type_hints, overload, cast, final, and others.\n* Several protocols to support duck-typing:\n SupportsFloat, SupportsIndex, SupportsAbs, and others.\n* Special types: NewType, NamedTuple, TypedDict.\n* Deprecated wrapper submodules for re and io related types.\n* Deprecated aliases for builtin types and collections.abc ABCs.\n\nAny name not present in __all__ is an implementation detail\nthat may be changed without notice. Use at your own risk!\n"""\n\nfrom abc import abstractmethod, ABCMeta\nimport collections\nfrom collections import defaultdict\nimport collections.abc\nimport copyreg\nimport contextlib\nimport functools\nimport operator\nimport re as stdlib_re # Avoid confusion with the re we export.\nimport sys\nimport types\nimport warnings\nfrom types import WrapperDescriptorType, MethodWrapperType, MethodDescriptorType, GenericAlias\n\nfrom _typing import (\n _idfunc,\n TypeVar,\n ParamSpec,\n TypeVarTuple,\n ParamSpecArgs,\n ParamSpecKwargs,\n TypeAliasType,\n Generic,\n)\n\n# Please keep __all__ alphabetized within each category.\n__all__ = [\n # Super-special typing primitives.\n \'Annotated\',\n \'Any\',\n \'Callable\',\n \'ClassVar\',\n \'Concatenate\',\n \'Final\',\n \'ForwardRef\',\n \'Generic\',\n \'Literal\',\n \'Optional\',\n \'ParamSpec\',\n \'Protocol\',\n \'Tuple\',\n \'Type\',\n \'TypeVar\',\n \'TypeVarTuple\',\n \'Union\',\n\n # ABCs (from collections.abc).\n \'AbstractSet\', # collections.abc.Set.\n \'ByteString\',\n \'Container\',\n \'ContextManager\',\n \'Hashable\',\n \'ItemsView\',\n \'Iterable\',\n \'Iterator\',\n \'KeysView\',\n \'Mapping\',\n \'MappingView\',\n \'MutableMapping\',\n \'MutableSequence\',\n \'MutableSet\',\n \'Sequence\',\n \'Sized\',\n \'ValuesView\',\n \'Awaitable\',\n \'AsyncIterator\',\n \'AsyncIterable\',\n \'Coroutine\',\n \'Collection\',\n \'AsyncGenerator\',\n \'AsyncContextManager\',\n\n # Structural checks, a.k.a. protocols.\n \'Reversible\',\n \'SupportsAbs\',\n \'SupportsBytes\',\n \'SupportsComplex\',\n \'SupportsFloat\',\n \'SupportsIndex\',\n \'SupportsInt\',\n \'SupportsRound\',\n\n # Concrete collection types.\n \'ChainMap\',\n \'Counter\',\n \'Deque\',\n \'Dict\',\n \'DefaultDict\',\n \'List\',\n \'OrderedDict\',\n \'Set\',\n \'FrozenSet\',\n \'NamedTuple\', # Not really a type.\n \'TypedDict\', # Not really a type.\n \'Generator\',\n\n # Other concrete types.\n \'BinaryIO\',\n \'IO\',\n \'Match\',\n \'Pattern\',\n \'TextIO\',\n\n # One-off things.\n \'AnyStr\',\n \'assert_type\',\n \'assert_never\',\n \'cast\',\n \'clear_overloads\',\n \'dataclass_transform\',\n \'final\',\n \'get_args\',\n \'get_origin\',\n \'get_overloads\',\n \'get_type_hints\',\n \'is_typeddict\',\n \'LiteralString\',\n \'Never\',\n \'NewType\',\n \'no_type_check\',\n \'no_type_check_decorator\',\n \'NoReturn\',\n \'NotRequired\',\n \'overload\',\n \'override\',\n \'ParamSpecArgs\',\n \'ParamSpecKwargs\',\n \'Required\',\n \'reveal_type\',\n \'runtime_checkable\',\n \'Self\',\n \'Text\',\n \'TYPE_CHECKING\',\n \'TypeAlias\',\n \'TypeGuard\',\n \'TypeAliasType\',\n \'Unpack\',\n]\n\n# The pseudo-submodules \'re\' and \'io\' are part of the public\n# namespace, but excluded from __all__ because they might stomp on\n# legitimate imports of those modules.\n\n\ndef _type_convert(arg, module=None, *, allow_special_forms=False):\n """For converting None to type(None), and strings to ForwardRef."""\n if arg is None:\n return type(None)\n if isinstance(arg, str):\n return ForwardRef(arg, module=module, is_class=allow_special_forms)\n return arg\n\n\ndef _type_check(arg, msg, is_argument=True, module=None, *, allow_special_forms=False):\n """Check that the argument is a type, and return it (internal helper).\n\n As a special case, accept None and return type(None) instead. Also wrap strings\n into ForwardRef instances. Consider several corner cases, for example plain\n special forms like Union are not valid, while Union[int, str] is OK, etc.\n The msg argument is a human-readable error message, e.g.::\n\n "Union[arg, ...]: arg should be a type."\n\n We append the repr() of the actual value (truncated to 100 chars).\n """\n invalid_generic_forms = (Generic, Protocol)\n if not allow_special_forms:\n invalid_generic_forms += (ClassVar,)\n if is_argument:\n invalid_generic_forms += (Final,)\n\n arg = _type_convert(arg, module=module, allow_special_forms=allow_special_forms)\n if (isinstance(arg, _GenericAlias) and\n arg.__origin__ in invalid_generic_forms):\n raise TypeError(f"{arg} is not valid as type argument")\n if arg in (Any, LiteralString, NoReturn, Never, Self, TypeAlias):\n return arg\n if allow_special_forms and arg in (ClassVar, Final):\n return arg\n if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol):\n raise TypeError(f"Plain {arg} is not valid as type argument")\n if type(arg) is tuple:\n raise TypeError(f"{msg} Got {arg!r:.100}.")\n return arg\n\n\ndef _is_param_expr(arg):\n return arg is ... or isinstance(arg,\n (tuple, list, ParamSpec, _ConcatenateGenericAlias))\n\n\ndef _should_unflatten_callable_args(typ, args):\n """Internal helper for munging collections.abc.Callable\'s __args__.\n\n The canonical representation for a Callable\'s __args__ flattens the\n argument types, see https://github.com/python/cpython/issues/86361.\n\n For example::\n\n >>> import collections.abc\n >>> P = ParamSpec(\'P\')\n >>> collections.abc.Callable[[int, int], str].__args__ == (int, int, str)\n True\n >>> collections.abc.Callable[P, str].__args__ == (P, str)\n True\n\n As a result, if we need to reconstruct the Callable from its __args__,\n we need to unflatten it.\n """\n return (\n typ.__origin__ is collections.abc.Callable\n and not (len(args) == 2 and _is_param_expr(args[0]))\n )\n\n\ndef _type_repr(obj):\n """Return the repr() of an object, special-casing types (internal helper).\n\n If obj is a type, we return a shorter version than the default\n type.__repr__, based on the module and qualified name, which is\n typically enough to uniquely identify a type. For everything\n else, we fall back on repr(obj).\n """\n # When changing this function, don\'t forget about\n # `_collections_abc._type_repr`, which does the same thing\n # and must be consistent with this one.\n if isinstance(obj, type):\n if obj.__module__ == \'builtins\':\n return obj.__qualname__\n return f\'{obj.__module__}.{obj.__qualname__}\'\n if obj is ...:\n return \'...\'\n if isinstance(obj, types.FunctionType):\n return obj.__name__\n if isinstance(obj, tuple):\n # Special case for `repr` of types with `ParamSpec`:\n return \'[\' + \', \'.join(_type_repr(t) for t in obj) + \']\'\n return repr(obj)\n\n\ndef _collect_parameters(args):\n """Collect all type variables and parameter specifications in args\n in order of first appearance (lexicographic order).\n\n For example::\n\n >>> P = ParamSpec(\'P\')\n >>> T = TypeVar(\'T\')\n >>> _collect_parameters((T, Callable[P, T]))\n (~T, ~P)\n """\n parameters = []\n for t in args:\n if isinstance(t, type):\n # We don\'t want __parameters__ descriptor of a bare Python class.\n pass\n elif isinstance(t, tuple):\n # `t` might be a tuple, when `ParamSpec` is substituted with\n # `[T, int]`, or `[int, *Ts]`, etc.\n for x in t:\n for collected in _collect_parameters([x]):\n if collected not in parameters:\n parameters.append(collected)\n elif hasattr(t, \'__typing_subst__\'):\n if t not in parameters:\n parameters.append(t)\n else:\n for x in getattr(t, \'__parameters__\', ()):\n if x not in parameters:\n parameters.append(x)\n return tuple(parameters)\n\n\ndef _check_generic(cls, parameters, elen):\n """Check correct count for parameters of a generic cls (internal helper).\n\n This gives a nice error message in case of count mismatch.\n """\n if not elen:\n raise TypeError(f"{cls} is not a generic class")\n alen = len(parameters)\n if alen != elen:\n raise TypeError(f"Too {\'many\' if alen > elen else \'few\'} arguments for {cls};"\n f" actual {alen}, expected {elen}")\n\ndef _unpack_args(args):\n newargs = []\n for arg in args:\n subargs = getattr(arg, \'__typing_unpacked_tuple_args__\', None)\n if subargs is not None and not (subargs and subargs[-1] is ...):\n newargs.extend(subargs)\n else:\n newargs.append(arg)\n return newargs\n\ndef _deduplicate(params, *, unhashable_fallback=False):\n # Weed out strict duplicates, preserving the first of each occurrence.\n try:\n return dict.fromkeys(params)\n except TypeError:\n if not unhashable_fallback:\n raise\n # Happens for cases like `Annotated[dict, {\'x\': IntValidator()}]`\n return _deduplicate_unhashable(params)\n\ndef _deduplicate_unhashable(unhashable_params):\n new_unhashable = []\n for t in unhashable_params:\n if t not in new_unhashable:\n new_unhashable.append(t)\n return new_unhashable\n\ndef _compare_args_orderless(first_args, second_args):\n first_unhashable = _deduplicate_unhashable(first_args)\n second_unhashable = _deduplicate_unhashable(second_args)\n t = list(second_unhashable)\n try:\n for elem in first_unhashable:\n t.remove(elem)\n except ValueError:\n return False\n return not t\n\ndef _remove_dups_flatten(parameters):\n """Internal helper for Union creation and substitution.\n\n Flatten Unions among parameters, then remove duplicates.\n """\n # Flatten out Union[Union[...], ...].\n params = []\n for p in parameters:\n if isinstance(p, (_UnionGenericAlias, types.UnionType)):\n params.extend(p.__args__)\n else:\n params.append(p)\n\n return tuple(_deduplicate(params, unhashable_fallback=True))\n\n\ndef _flatten_literal_params(parameters):\n """Internal helper for Literal creation: flatten Literals among parameters."""\n params = []\n for p in parameters:\n if isinstance(p, _LiteralGenericAlias):\n params.extend(p.__args__)\n else:\n params.append(p)\n return tuple(params)\n\n\n_cleanups = []\n_caches = {}\n\n\ndef _tp_cache(func=None, /, *, typed=False):\n """Internal wrapper caching __getitem__ of generic types.\n\n For non-hashable arguments, the original function is used as a fallback.\n """\n def decorator(func):\n # The callback \'inner\' references the newly created lru_cache\n # indirectly by performing a lookup in the global \'_caches\' dictionary.\n # This breaks a reference that can be problematic when combined with\n # C API extensions that leak references to types. See GH-98253.\n\n cache = functools.lru_cache(typed=typed)(func)\n _caches[func] = cache\n _cleanups.append(cache.cache_clear)\n del cache\n\n @functools.wraps(func)\n def inner(*args, **kwds):\n try:\n return _caches[func](*args, **kwds)\n except TypeError:\n pass # All real errors (not unhashable args) are raised below.\n return func(*args, **kwds)\n return inner\n\n if func is not None:\n return decorator(func)\n\n return decorator\n\ndef _eval_type(t, globalns, localns, recursive_guard=frozenset()):\n """Evaluate all forward references in the given type t.\n\n For use of globalns and localns see the docstring for get_type_hints().\n recursive_guard is used to prevent infinite recursion with a recursive\n ForwardRef.\n """\n if isinstance(t, ForwardRef):\n return t._evaluate(globalns, localns, recursive_guard)\n if isinstance(t, (_GenericAlias, GenericAlias, types.UnionType)):\n if isinstance(t, GenericAlias):\n args = tuple(\n ForwardRef(arg) if isinstance(arg, str) else arg\n for arg in t.__args__\n )\n is_unpacked = t.__unpacked__\n if _should_unflatten_callable_args(t, args):\n t = t.__origin__[(args[:-1], args[-1])]\n else:\n t = t.__origin__[args]\n if is_unpacked:\n t = Unpack[t]\n ev_args = tuple(_eval_type(a, globalns, localns, recursive_guard) for a in t.__args__)\n if ev_args == t.__args__:\n return t\n if isinstance(t, GenericAlias):\n return GenericAlias(t.__origin__, ev_args)\n if isinstance(t, types.UnionType):\n return functools.reduce(operator.or_, ev_args)\n else:\n return t.copy_with(ev_args)\n return t\n\n\nclass _Final:\n """Mixin to prohibit subclassing."""\n\n __slots__ = (\'__weakref__\',)\n\n def __init_subclass__(cls, /, *args, **kwds):\n if \'_root\' not in kwds:\n raise TypeError("Cannot subclass special typing classes")\n\n\nclass _NotIterable:\n """Mixin to prevent iteration, without being compatible with Iterable.\n\n That is, we could do::\n\n def __iter__(self): raise TypeError()\n\n But this would make users of this mixin duck type-compatible with\n collections.abc.Iterable - isinstance(foo, Iterable) would be True.\n\n Luckily, we can instead prevent iteration by setting __iter__ to None, which\n is treated specially.\n """\n\n __slots__ = ()\n __iter__ = None\n\n\n# Internal indicator of special typing constructs.\n# See __doc__ instance attribute for specific docs.\nclass _SpecialForm(_Final, _NotIterable, _root=True):\n __slots__ = (\'_name\', \'__doc__\', \'_getitem\')\n\n def __init__(self, getitem):\n self._getitem = getitem\n self._name = getitem.__name__\n self.__doc__ = getitem.__doc__\n\n def __getattr__(self, item):\n if item in {\'__name__\', \'__qualname__\'}:\n return self._name\n\n raise AttributeError(item)\n\n def __mro_entries__(self, bases):\n raise TypeError(f"Cannot subclass {self!r}")\n\n def __repr__(self):\n return \'typing.\' + self._name\n\n def __reduce__(self):\n return self._name\n\n def __call__(self, *args, **kwds):\n raise TypeError(f"Cannot instantiate {self!r}")\n\n def __or__(self, other):\n return Union[self, other]\n\n def __ror__(self, other):\n return Union[other, self]\n\n def __instancecheck__(self, obj):\n raise TypeError(f"{self} cannot be used with isinstance()")\n\n def __subclasscheck__(self, cls):\n raise TypeError(f"{self} cannot be used with issubclass()")\n\n @_tp_cache\n def __getitem__(self, parameters):\n return self._getitem(self, parameters)\n\n\nclass _LiteralSpecialForm(_SpecialForm, _root=True):\n def __getitem__(self, parameters):\n if not isinstance(parameters, tuple):\n parameters = (parameters,)\n return self._getitem(self, *parameters)\n\n\nclass _AnyMeta(type):\n def __instancecheck__(self, obj):\n if self is Any:\n raise TypeError("typing.Any cannot be used with isinstance()")\n return super().__instancecheck__(obj)\n\n def __repr__(self):\n if self is Any:\n return "typing.Any"\n return super().__repr__() # respect to subclasses\n\n\nclass Any(metaclass=_AnyMeta):\n """Special type indicating an unconstrained type.\n\n - Any is compatible with every type.\n - Any assumed to have all methods.\n - All values assumed to be instances of Any.\n\n Note that all the above statements are true from the point of view of\n static type checkers. At runtime, Any should not be used with instance\n checks.\n """\n\n def __new__(cls, *args, **kwargs):\n if cls is Any:\n raise TypeError("Any cannot be instantiated")\n return super().__new__(cls)\n\n\n@_SpecialForm\ndef NoReturn(self, parameters):\n """Special type indicating functions that never return.\n\n Example::\n\n from typing import NoReturn\n\n def stop() -> NoReturn:\n raise Exception(\'no way\')\n\n NoReturn can also be used as a bottom type, a type that\n has no values. Starting in Python 3.11, the Never type should\n be used for this concept instead. Type checkers should treat the two\n equivalently.\n """\n raise TypeError(f"{self} is not subscriptable")\n\n# This is semantically identical to NoReturn, but it is implemented\n# separately so that type checkers can distinguish between the two\n# if they want.\n@_SpecialForm\ndef Never(self, parameters):\n """The bottom type, a type that has no members.\n\n This can be used to define a function that should never be\n called, or a function that never returns::\n\n from typing import Never\n\n def never_call_me(arg: Never) -> None:\n pass\n\n def int_or_str(arg: int | str) -> None:\n never_call_me(arg) # type checker error\n match arg:\n case int():\n print("It\'s an int")\n case str():\n print("It\'s a str")\n case _:\n never_call_me(arg) # OK, arg is of type Never\n """\n raise TypeError(f"{self} is not subscriptable")\n\n\n@_SpecialForm\ndef Self(self, parameters):\n """Used to spell the type of "self" in classes.\n\n Example::\n\n from typing import Self\n\n class Foo:\n def return_self(self) -> Self:\n ...\n return self\n\n This is especially useful for:\n - classmethods that are used as alternative constructors\n - annotating an `__enter__` method which returns self\n """\n raise TypeError(f"{self} is not subscriptable")\n\n\n@_SpecialForm\ndef LiteralString(self, parameters):\n """Represents an arbitrary literal string.\n\n Example::\n\n from typing import LiteralString\n\n def run_query(sql: LiteralString) -> None:\n ...\n\n def caller(arbitrary_string: str, literal_string: LiteralString) -> None:\n run_query("SELECT * FROM students") # OK\n run_query(literal_string) # OK\n run_query("SELECT * FROM " + literal_string) # OK\n run_query(arbitrary_string) # type checker error\n run_query( # type checker error\n f"SELECT * FROM students WHERE name = {arbitrary_string}"\n )\n\n Only string literals and other LiteralStrings are compatible\n with LiteralString. This provides a tool to help prevent\n security issues such as SQL injection.\n """\n raise TypeError(f"{self} is not subscriptable")\n\n\n@_SpecialForm\ndef ClassVar(self, parameters):\n """Special type construct to mark class variables.\n\n An annotation wrapped in ClassVar indicates that a given\n attribute is intended to be used as a class variable and\n should not be set on instances of that class.\n\n Usage::\n\n class Starship:\n stats: ClassVar[dict[str, int]] = {} # class variable\n damage: int = 10 # instance variable\n\n ClassVar accepts only types and cannot be further subscribed.\n\n Note that ClassVar is not a class itself, and should not\n be used with isinstance() or issubclass().\n """\n item = _type_check(parameters, f\'{self} accepts only single type.\')\n return _GenericAlias(self, (item,))\n\n@_SpecialForm\ndef Final(self, parameters):\n """Special typing construct to indicate final names to type checkers.\n\n A final name cannot be re-assigned or overridden in a subclass.\n\n For example::\n\n MAX_SIZE: Final = 9000\n MAX_SIZE += 1 # Error reported by type checker\n\n class Connection:\n TIMEOUT: Final[int] = 10\n\n class FastConnector(Connection):\n TIMEOUT = 1 # Error reported by type checker\n\n There is no runtime checking of these properties.\n """\n item = _type_check(parameters, f\'{self} accepts only single type.\')\n return _GenericAlias(self, (item,))\n\n@_SpecialForm\ndef Union(self, parameters):\n """Union type; Union[X, Y] means either X or Y.\n\n On Python 3.10 and higher, the | operator\n can also be used to denote unions;\n X | Y means the same thing to the type checker as Union[X, Y].\n\n To define a union, use e.g. Union[int, str]. Details:\n - The arguments must be types and there must be at least one.\n - None as an argument is a special case and is replaced by\n type(None).\n - Unions of unions are flattened, e.g.::\n\n assert Union[Union[int, str], float] == Union[int, str, float]\n\n - Unions of a single argument vanish, e.g.::\n\n assert Union[int] == int # The constructor actually returns int\n\n - Redundant arguments are skipped, e.g.::\n\n assert Union[int, str, int] == Union[int, str]\n\n - When comparing unions, the argument order is ignored, e.g.::\n\n assert Union[int, str] == Union[str, int]\n\n - You cannot subclass or instantiate a union.\n - You can use Optional[X] as a shorthand for Union[X, None].\n """\n if parameters == ():\n raise TypeError("Cannot take a Union of no types.")\n if not isinstance(parameters, tuple):\n parameters = (parameters,)\n msg = "Union[arg, ...]: each arg must be a type."\n parameters = tuple(_type_check(p, msg) for p in parameters)\n parameters = _remove_dups_flatten(parameters)\n if len(parameters) == 1:\n return parameters[0]\n if len(parameters) == 2 and type(None) in parameters:\n return _UnionGenericAlias(self, parameters, name="Optional")\n return _UnionGenericAlias(self, parameters)\n\ndef _make_union(left, right):\n """Used from the C implementation of TypeVar.\n\n TypeVar.__or__ calls this instead of returning types.UnionType\n because we want to allow unions between TypeVars and strings\n (forward references).\n """\n return Union[left, right]\n\n@_SpecialForm\ndef Optional(self, parameters):\n """Optional[X] is equivalent to Union[X, None]."""\n arg = _type_check(parameters, f"{self} requires a single type.")\n return Union[arg, type(None)]\n\n@_LiteralSpecialForm\n@_tp_cache(typed=True)\ndef Literal(self, *parameters):\n """Special typing form to define literal types (a.k.a. value types).\n\n This form can be used to indicate to type checkers that the corresponding\n variable or function parameter has a value equivalent to the provided\n literal (or one of several literals)::\n\n def validate_simple(data: Any) -> Literal[True]: # always returns True\n ...\n\n MODE = Literal[\'r\', \'rb\', \'w\', \'wb\']\n def open_helper(file: str, mode: MODE) -> str:\n ...\n\n open_helper(\'/some/path\', \'r\') # Passes type check\n open_helper(\'/other/path\', \'typo\') # Error in type checker\n\n Literal[...] cannot be subclassed. At runtime, an arbitrary value\n is allowed as type argument to Literal[...], but type checkers may\n impose restrictions.\n """\n # There is no \'_type_check\' call because arguments to Literal[...] are\n # values, not types.\n parameters = _flatten_literal_params(parameters)\n\n try:\n parameters = tuple(p for p, _ in _deduplicate(list(_value_and_type_iter(parameters))))\n except TypeError: # unhashable parameters\n pass\n\n return _LiteralGenericAlias(self, parameters)\n\n\n@_SpecialForm\ndef TypeAlias(self, parameters):\n """Special form for marking type aliases.\n\n Use TypeAlias to indicate that an assignment should\n be recognized as a proper type alias definition by type\n checkers.\n\n For example::\n\n Predicate: TypeAlias = Callable[..., bool]\n\n It\'s invalid when used anywhere except as in the example above.\n """\n raise TypeError(f"{self} is not subscriptable")\n\n\n@_SpecialForm\ndef Concatenate(self, parameters):\n """Special form for annotating higher-order functions.\n\n ``Concatenate`` can be used in conjunction with ``ParamSpec`` and\n ``Callable`` to represent a higher-order function which adds, removes or\n transforms the parameters of a callable.\n\n For example::\n\n Callable[Concatenate[int, P], int]\n\n See PEP 612 for detailed information.\n """\n if parameters == ():\n raise TypeError("Cannot take a Concatenate of no types.")\n if not isinstance(parameters, tuple):\n parameters = (parameters,)\n if not (parameters[-1] is ... or isinstance(parameters[-1], ParamSpec)):\n raise TypeError("The last parameter to Concatenate should be a "\n "ParamSpec variable or ellipsis.")\n msg = "Concatenate[arg, ...]: each arg must be a type."\n parameters = (*(_type_check(p, msg) for p in parameters[:-1]), parameters[-1])\n return _ConcatenateGenericAlias(self, parameters)\n\n\n@_SpecialForm\ndef TypeGuard(self, parameters):\n """Special typing construct for marking user-defined type guard functions.\n\n ``TypeGuard`` can be used to annotate the return type of a user-defined\n type guard function. ``TypeGuard`` only accepts a single type argument.\n At runtime, functions marked this way should return a boolean.\n\n ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static\n type checkers to determine a more precise type of an expression within a\n program\'s code flow. Usually type narrowing is done by analyzing\n conditional code flow and applying the narrowing to a block of code. The\n conditional expression here is sometimes referred to as a "type guard".\n\n Sometimes it would be convenient to use a user-defined boolean function\n as a type guard. Such a function should use ``TypeGuard[...]`` as its\n return type to alert static type checkers to this intention.\n\n Using ``-> TypeGuard`` tells the static type checker that for a given\n function:\n\n 1. The return value is a boolean.\n 2. If the return value is ``True``, the type of its argument\n is the type inside ``TypeGuard``.\n\n For example::\n\n def is_str_list(val: list[object]) -> TypeGuard[list[str]]:\n \'\'\'Determines whether all objects in the list are strings\'\'\'\n return all(isinstance(x, str) for x in val)\n\n def func1(val: list[object]):\n if is_str_list(val):\n # Type of ``val`` is narrowed to ``list[str]``.\n print(" ".join(val))\n else:\n # Type of ``val`` remains as ``list[object]``.\n print("Not a list of strings!")\n\n Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower\n form of ``TypeA`` (it can even be a wider form) and this may lead to\n type-unsafe results. The main reason is to allow for things like\n narrowing ``list[object]`` to ``list[str]`` even though the latter is not\n a subtype of the former, since ``list`` is invariant. The responsibility of\n writing type-safe type guards is left to the user.\n\n ``TypeGuard`` also works with type variables. For more information, see\n PEP 647 (User-Defined Type Guards).\n """\n item = _type_check(parameters, f\'{self} accepts only single type.\')\n return _GenericAlias(self, (item,))\n\n\nclass ForwardRef(_Final, _root=True):\n """Internal wrapper to hold a forward reference."""\n\n __slots__ = (\'__forward_arg__\', \'__forward_code__\',\n \'__forward_evaluated__\', \'__forward_value__\',\n \'__forward_is_argument__\', \'__forward_is_class__\',\n \'__forward_module__\')\n\n def __init__(self, arg, is_argument=True, module=None, *, is_class=False):\n if not isinstance(arg, str):\n raise TypeError(f"Forward reference must be a string -- got {arg!r}")\n\n # If we do `def f(*args: *Ts)`, then we\'ll have `arg = \'*Ts\'`.\n # Unfortunately, this isn\'t a valid expression on its own, so we\n # do the unpacking manually.\n if arg.startswith(\'*\'):\n arg_to_compile = f\'({arg},)[0]\' # E.g. (*Ts,)[0] or (*tuple[int, int],)[0]\n else:\n arg_to_compile = arg\n try:\n code = compile(arg_to_compile, \'<string>\', \'eval\')\n except SyntaxError:\n raise SyntaxError(f"Forward reference must be an expression -- got {arg!r}")\n\n self.__forward_arg__ = arg\n self.__forward_code__ = code\n self.__forward_evaluated__ = False\n self.__forward_value__ = None\n self.__forward_is_argument__ = is_argument\n self.__forward_is_class__ = is_class\n self.__forward_module__ = module\n\n def _evaluate(self, globalns, localns, recursive_guard):\n if self.__forward_arg__ in recursive_guard:\n return self\n if not self.__forward_evaluated__ or localns is not globalns:\n if globalns is None and localns is None:\n globalns = localns = {}\n elif globalns is None:\n globalns = localns\n elif localns is None:\n localns = globalns\n if self.__forward_module__ is not None:\n globalns = getattr(\n sys.modules.get(self.__forward_module__, None), \'__dict__\', globalns\n )\n type_ = _type_check(\n eval(self.__forward_code__, globalns, localns),\n "Forward references must evaluate to types.",\n is_argument=self.__forward_is_argument__,\n allow_special_forms=self.__forward_is_class__,\n )\n self.__forward_value__ = _eval_type(\n type_, globalns, localns, recursive_guard | {self.__forward_arg__}\n )\n self.__forward_evaluated__ = True\n return self.__forward_value__\n\n def __eq__(self, other):\n if not isinstance(other, ForwardRef):\n return NotImplemented\n if self.__forward_evaluated__ and other.__forward_evaluated__:\n return (self.__forward_arg__ == other.__forward_arg__ and\n self.__forward_value__ == other.__forward_value__)\n return (self.__forward_arg__ == other.__forward_arg__ and\n self.__forward_module__ == other.__forward_module__)\n\n def __hash__(self):\n return hash((self.__forward_arg__, self.__forward_module__))\n\n def __or__(self, other):\n return Union[self, other]\n\n def __ror__(self, other):\n return Union[other, self]\n\n def __repr__(self):\n if self.__forward_module__ is None:\n module_repr = \'\'\n else:\n module_repr = f\', module={self.__forward_module__!r}\'\n return f\'ForwardRef({self.__forward_arg__!r}{module_repr})\'\n\n\ndef _is_unpacked_typevartuple(x: Any) -> bool:\n return ((not isinstance(x, type)) and\n getattr(x, \'__typing_is_unpacked_typevartuple__\', False))\n\n\ndef _is_typevar_like(x: Any) -> bool:\n return isinstance(x, (TypeVar, ParamSpec)) or _is_unpacked_typevartuple(x)\n\n\nclass _PickleUsingNameMixin:\n """Mixin enabling pickling based on self.__name__."""\n\n def __reduce__(self):\n return self.__name__\n\n\ndef _typevar_subst(self, arg):\n msg = "Parameters to generic types must be types."\n arg = _type_check(arg, msg, is_argument=True)\n if ((isinstance(arg, _GenericAlias) and arg.__origin__ is Unpack) or\n (isinstance(arg, GenericAlias) and getattr(arg, \'__unpacked__\', False))):\n raise TypeError(f"{arg} is not valid as type argument")\n return arg\n\n\ndef _typevartuple_prepare_subst(self, alias, args):\n params = alias.__parameters__\n typevartuple_index = params.index(self)\n for param in params[typevartuple_index + 1:]:\n if isinstance(param, TypeVarTuple):\n raise TypeError(f"More than one TypeVarTuple parameter in {alias}")\n\n alen = len(args)\n plen = len(params)\n left = typevartuple_index\n right = plen - typevartuple_index - 1\n var_tuple_index = None\n fillarg = None\n for k, arg in enumerate(args):\n if not isinstance(arg, type):\n subargs = getattr(arg, \'__typing_unpacked_tuple_args__\', None)\n if subargs and len(subargs) == 2 and subargs[-1] is ...:\n if var_tuple_index is not None:\n raise TypeError("More than one unpacked arbitrary-length tuple argument")\n var_tuple_index = k\n fillarg = subargs[0]\n if var_tuple_index is not None:\n left = min(left, var_tuple_index)\n right = min(right, alen - var_tuple_index - 1)\n elif left + right > alen:\n raise TypeError(f"Too few arguments for {alias};"\n f" actual {alen}, expected at least {plen-1}")\n\n return (\n *args[:left],\n *([fillarg]*(typevartuple_index - left)),\n tuple(args[left: alen - right]),\n *([fillarg]*(plen - right - left - typevartuple_index - 1)),\n *args[alen - right:],\n )\n\n\ndef _paramspec_subst(self, arg):\n if isinstance(arg, (list, tuple)):\n arg = tuple(_type_check(a, "Expected a type.") for a in arg)\n elif not _is_param_expr(arg):\n raise TypeError(f"Expected a list of types, an ellipsis, "\n f"ParamSpec, or Concatenate. Got {arg}")\n return arg\n\n\ndef _paramspec_prepare_subst(self, alias, args):\n params = alias.__parameters__\n i = params.index(self)\n if i >= len(args):\n raise TypeError(f"Too few arguments for {alias}")\n # Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612.\n if len(params) == 1 and not _is_param_expr(args[0]):\n assert i == 0\n args = (args,)\n # Convert lists to tuples to help other libraries cache the results.\n elif isinstance(args[i], list):\n args = (*args[:i], tuple(args[i]), *args[i+1:])\n return args\n\n\n@_tp_cache\ndef _generic_class_getitem(cls, params):\n """Parameterizes a generic class.\n\n At least, parameterizing a generic class is the *main* thing this method\n does. For example, for some generic class `Foo`, this is called when we\n do `Foo[int]` - there, with `cls=Foo` and `params=int`.\n\n However, note that this method is also called when defining generic\n classes in the first place with `class Foo(Generic[T]): ...`.\n """\n if not isinstance(params, tuple):\n params = (params,)\n\n params = tuple(_type_convert(p) for p in params)\n is_generic_or_protocol = cls in (Generic, Protocol)\n\n if is_generic_or_protocol:\n # Generic and Protocol can only be subscripted with unique type variables.\n if not params:\n raise TypeError(\n f"Parameter list to {cls.__qualname__}[...] cannot be empty"\n )\n if not all(_is_typevar_like(p) for p in params):\n raise TypeError(\n f"Parameters to {cls.__name__}[...] must all be type variables "\n f"or parameter specification variables.")\n if len(set(params)) != len(params):\n raise TypeError(\n f"Parameters to {cls.__name__}[...] must all be unique")\n else:\n # Subscripting a regular Generic subclass.\n for param in cls.__parameters__:\n prepare = getattr(param, \'__typing_prepare_subst__\', None)\n if prepare is not None:\n params = prepare(cls, params)\n _check_generic(cls, params, len(cls.__parameters__))\n\n new_args = []\n for param, new_arg in zip(cls.__parameters__, params):\n if isinstance(param, TypeVarTuple):\n new_args.extend(new_arg)\n else:\n new_args.append(new_arg)\n params = tuple(new_args)\n\n return _GenericAlias(cls, params)\n\n\ndef _generic_init_subclass(cls, *args, **kwargs):\n super(Generic, cls).__init_subclass__(*args, **kwargs)\n tvars = []\n if \'__orig_bases__\' in cls.__dict__:\n error = Generic in cls.__orig_bases__\n else:\n error = (Generic in cls.__bases__ and\n cls.__name__ != \'Protocol\' and\n type(cls) != _TypedDictMeta)\n if error:\n raise TypeError("Cannot inherit from plain Generic")\n if \'__orig_bases__\' in cls.__dict__:\n tvars = _collect_parameters(cls.__orig_bases__)\n # Look for Generic[T1, ..., Tn].\n # If found, tvars must be a subset of it.\n # If not found, tvars is it.\n # Also check for and reject plain Generic,\n # and reject multiple Generic[...].\n gvars = None\n for base in cls.__orig_bases__:\n if (isinstance(base, _GenericAlias) and\n base.__origin__ is Generic):\n if gvars is not None:\n raise TypeError(\n "Cannot inherit from Generic[...] multiple times.")\n gvars = base.__parameters__\n if gvars is not None:\n tvarset = set(tvars)\n gvarset = set(gvars)\n if not tvarset <= gvarset:\n s_vars = \', \'.join(str(t) for t in tvars if t not in gvarset)\n s_args = \', \'.join(str(g) for g in gvars)\n raise TypeError(f"Some type variables ({s_vars}) are"\n f" not listed in Generic[{s_args}]")\n tvars = gvars\n cls.__parameters__ = tuple(tvars)\n\n\ndef _is_dunder(attr):\n return attr.startswith(\'__\') and attr.endswith(\'__\')\n\nclass _BaseGenericAlias(_Final, _root=True):\n """The central part of the internal API.\n\n This represents a generic version of type \'origin\' with type arguments \'params\'.\n There are two kind of these aliases: user defined and special. The special ones\n are wrappers around builtin collections and ABCs in collections.abc. These must\n have \'name\' always set. If \'inst\' is False, then the alias can\'t be instantiated;\n this is used by e.g. typing.List and typing.Dict.\n """\n\n def __init__(self, origin, *, inst=True, name=None):\n self._inst = inst\n self._name = name\n self.__origin__ = origin\n self.__slots__ = None # This is not documented.\n\n def __call__(self, *args, **kwargs):\n if not self._inst:\n raise TypeError(f"Type {self._name} cannot be instantiated; "\n f"use {self.__origin__.__name__}() instead")\n result = self.__origin__(*args, **kwargs)\n try:\n result.__orig_class__ = self\n # Some objects raise TypeError (or something even more exotic)\n # if you try to set attributes on them; we guard against that here\n except Exception:\n pass\n return result\n\n def __mro_entries__(self, bases):\n res = []\n if self.__origin__ not in bases:\n res.append(self.__origin__)\n i = bases.index(self)\n for b in bases[i+1:]:\n if isinstance(b, _BaseGenericAlias) or issubclass(b, Generic):\n break\n else:\n res.append(Generic)\n return tuple(res)\n\n def __getattr__(self, attr):\n if attr in {\'__name__\', \'__qualname__\'}:\n return self._name or self.__origin__.__name__\n\n # We are careful for copy and pickle.\n # Also for simplicity we don\'t relay any dunder names\n if \'__origin__\' in self.__dict__ and not _is_dunder(attr):\n return getattr(self.__origin__, attr)\n raise AttributeError(attr)\n\n def __setattr__(self, attr, val):\n if _is_dunder(attr) or attr in {\'_name\', \'_inst\', \'_nparams\'}:\n super().__setattr__(attr, val)\n else:\n setattr(self.__origin__, attr, val)\n\n def __instancecheck__(self, obj):\n return self.__subclasscheck__(type(obj))\n\n def __subclasscheck__(self, cls):\n raise TypeError("Subscripted generics cannot be used with"\n " class and instance checks")\n\n def __dir__(self):\n return list(set(super().__dir__()\n + [attr for attr in dir(self.__origin__) if not _is_dunder(attr)]))\n\n\n# Special typing constructs Union, Optional, Generic, Callable and Tuple\n# use three special attributes for internal bookkeeping of generic types:\n# * __parameters__ is a tuple of unique free type parameters of a generic\n# type, for example, Dict[T, T].__parameters__ == (T,);\n# * __origin__ keeps a reference to a type that was subscripted,\n# e.g., Union[T, int].__origin__ == Union, or the non-generic version of\n# the type.\n# * __args__ is a tuple of all arguments used in subscripting,\n# e.g., Dict[T, int].__args__ == (T, int).\n\n\nclass _GenericAlias(_BaseGenericAlias, _root=True):\n # The type of parameterized generics.\n #\n # That is, for example, `type(List[int])` is `_GenericAlias`.\n #\n # Objects which are instances of this class include:\n # * Parameterized container types, e.g. `Tuple[int]`, `List[int]`.\n # * Note that native container types, e.g. `tuple`, `list`, use\n # `types.GenericAlias` instead.\n # * Parameterized classes:\n # class C[T]: pass\n # # C[int] is a _GenericAlias\n # * `Callable` aliases, generic `Callable` aliases, and\n # parameterized `Callable` aliases:\n # T = TypeVar(\'T\')\n # # _CallableGenericAlias inherits from _GenericAlias.\n # A = Callable[[], None] # _CallableGenericAlias\n # B = Callable[[T], None] # _CallableGenericAlias\n # C = B[int] # _CallableGenericAlias\n # * Parameterized `Final`, `ClassVar` and `TypeGuard`:\n # # All _GenericAlias\n # Final[int]\n # ClassVar[float]\n # TypeVar[bool]\n\n def __init__(self, origin, args, *, inst=True, name=None):\n super().__init__(origin, inst=inst, name=name)\n if not isinstance(args, tuple):\n args = (args,)\n self.__args__ = tuple(... if a is _TypingEllipsis else\n a for a in args)\n self.__parameters__ = _collect_parameters(args)\n if not name:\n self.__module__ = origin.__module__\n\n def __eq__(self, other):\n if not isinstance(other, _GenericAlias):\n return NotImplemented\n return (self.__origin__ == other.__origin__\n and self.__args__ == other.__args__)\n\n def __hash__(self):\n return hash((self.__origin__, self.__args__))\n\n def __or__(self, right):\n return Union[self, right]\n\n def __ror__(self, left):\n return Union[left, self]\n\n @_tp_cache\n def __getitem__(self, args):\n # Parameterizes an already-parameterized object.\n #\n # For example, we arrive here doing something like:\n # T1 = TypeVar(\'T1\')\n # T2 = TypeVar(\'T2\')\n # T3 = TypeVar(\'T3\')\n # class A(Generic[T1]): pass\n # B = A[T2] # B is a _GenericAlias\n # C = B[T3] # Invokes _GenericAlias.__getitem__\n #\n # We also arrive here when parameterizing a generic `Callable` alias:\n # T = TypeVar(\'T\')\n # C = Callable[[T], None]\n # C[int] # Invokes _GenericAlias.__getitem__\n\n if self.__origin__ in (Generic, Protocol):\n # Can\'t subscript Generic[...] or Protocol[...].\n raise TypeError(f"Cannot subscript already-subscripted {self}")\n if not self.__parameters__:\n raise TypeError(f"{self} is not a generic class")\n\n # Preprocess `args`.\n if not isinstance(args, tuple):\n args = (args,)\n args = tuple(_type_convert(p) for p in args)\n args = _unpack_args(args)\n new_args = self._determine_new_args(args)\n r = self.copy_with(new_args)\n return r\n\n def _determine_new_args(self, args):\n # Determines new __args__ for __getitem__.\n #\n # For example, suppose we had:\n # T1 = TypeVar(\'T1\')\n # T2 = TypeVar(\'T2\')\n # class A(Generic[T1, T2]): pass\n # T3 = TypeVar(\'T3\')\n # B = A[int, T3]\n # C = B[str]\n # `B.__args__` is `(int, T3)`, so `C.__args__` should be `(int, str)`.\n # Unfortunately, this is harder than it looks, because if `T3` is\n # anything more exotic than a plain `TypeVar`, we need to consider\n # edge cases.\n\n params = self.__parameters__\n # In the example above, this would be {T3: str}\n for param in params:\n prepare = getattr(param, \'__typing_prepare_subst__\', None)\n if prepare is not None:\n args = prepare(self, args)\n alen = len(args)\n plen = len(params)\n if alen != plen:\n raise TypeError(f"Too {\'many\' if alen > plen else \'few\'} arguments for {self};"\n f" actual {alen}, expected {plen}")\n new_arg_by_param = dict(zip(params, args))\n return tuple(self._make_substitution(self.__args__, new_arg_by_param))\n\n def _make_substitution(self, args, new_arg_by_param):\n """Create a list of new type arguments."""\n new_args = []\n for old_arg in args:\n if isinstance(old_arg, type):\n new_args.append(old_arg)\n continue\n\n substfunc = getattr(old_arg, \'__typing_subst__\', None)\n if substfunc:\n new_arg = substfunc(new_arg_by_param[old_arg])\n else:\n subparams = getattr(old_arg, \'__parameters__\', ())\n if not subparams:\n new_arg = old_arg\n else:\n subargs = []\n for x in subparams:\n if isinstance(x, TypeVarTuple):\n subargs.extend(new_arg_by_param[x])\n else:\n subargs.append(new_arg_by_param[x])\n new_arg = old_arg[tuple(subargs)]\n\n if self.__origin__ == collections.abc.Callable and isinstance(new_arg, tuple):\n # Consider the following `Callable`.\n # C = Callable[[int], str]\n # Here, `C.__args__` should be (int, str) - NOT ([int], str).\n # That means that if we had something like...\n # P = ParamSpec(\'P\')\n # T = TypeVar(\'T\')\n # C = Callable[P, T]\n # D = C[[int, str], float]\n # ...we need to be careful; `new_args` should end up as\n # `(int, str, float)` rather than `([int, str], float)`.\n new_args.extend(new_arg)\n elif _is_unpacked_typevartuple(old_arg):\n # Consider the following `_GenericAlias`, `B`:\n # class A(Generic[*Ts]): ...\n # B = A[T, *Ts]\n # If we then do:\n # B[float, int, str]\n # The `new_arg` corresponding to `T` will be `float`, and the\n # `new_arg` corresponding to `*Ts` will be `(int, str)`. We\n # should join all these types together in a flat list\n # `(float, int, str)` - so again, we should `extend`.\n new_args.extend(new_arg)\n elif isinstance(old_arg, tuple):\n # Corner case:\n # P = ParamSpec(\'P\')\n # T = TypeVar(\'T\')\n # class Base(Generic[P]): ...\n # Can be substituted like this:\n # X = Base[[int, T]]\n # In this case, `old_arg` will be a tuple:\n new_args.append(\n tuple(self._make_substitution(old_arg, new_arg_by_param)),\n )\n else:\n new_args.append(new_arg)\n return new_args\n\n def copy_with(self, args):\n return self.__class__(self.__origin__, args, name=self._name, inst=self._inst)\n\n def __repr__(self):\n if self._name:\n name = \'typing.\' + self._name\n else:\n name = _type_repr(self.__origin__)\n if self.__args__:\n args = ", ".join([_type_repr(a) for a in self.__args__])\n else:\n # To ensure the repr is eval-able.\n args = "()"\n return f\'{name}[{args}]\'\n\n def __reduce__(self):\n if self._name:\n origin = globals()[self._name]\n else:\n origin = self.__origin__\n args = tuple(self.__args__)\n if len(args) == 1 and not isinstance(args[0], tuple):\n args, = args\n return operator.getitem, (origin, args)\n\n def __mro_entries__(self, bases):\n if isinstance(self.__origin__, _SpecialForm):\n raise TypeError(f"Cannot subclass {self!r}")\n\n if self._name: # generic version of an ABC or built-in class\n return super().__mro_entries__(bases)\n if self.__origin__ is Generic:\n if Protocol in bases:\n return ()\n i = bases.index(self)\n for b in bases[i+1:]:\n if isinstance(b, _BaseGenericAlias) and b is not self:\n return ()\n return (self.__origin__,)\n\n def __iter__(self):\n yield Unpack[self]\n\n\n# _nparams is the number of accepted parameters, e.g. 0 for Hashable,\n# 1 for List and 2 for Dict. It may be -1 if variable number of\n# parameters are accepted (needs custom __getitem__).\n\nclass _SpecialGenericAlias(_NotIterable, _BaseGenericAlias, _root=True):\n def __init__(self, origin, nparams, *, inst=True, name=None):\n if name is None:\n name = origin.__name__\n super().__init__(origin, inst=inst, name=name)\n self._nparams = nparams\n if origin.__module__ == \'builtins\':\n self.__doc__ = f\'A generic version of {origin.__qualname__}.\'\n else:\n self.__doc__ = f\'A generic version of {origin.__module__}.{origin.__qualname__}.\'\n\n @_tp_cache\n def __getitem__(self, params):\n if not isinstance(params, tuple):\n params = (params,)\n msg = "Parameters to generic types must be types."\n params = tuple(_type_check(p, msg) for p in params)\n _check_generic(self, params, self._nparams)\n return self.copy_with(params)\n\n def copy_with(self, params):\n return _GenericAlias(self.__origin__, params,\n name=self._name, inst=self._inst)\n\n def __repr__(self):\n return \'typing.\' + self._name\n\n def __subclasscheck__(self, cls):\n if isinstance(cls, _SpecialGenericAlias):\n return issubclass(cls.__origin__, self.__origin__)\n if not isinstance(cls, _GenericAlias):\n return issubclass(cls, self.__origin__)\n return super().__subclasscheck__(cls)\n\n def __reduce__(self):\n return self._name\n\n def __or__(self, right):\n return Union[self, right]\n\n def __ror__(self, left):\n return Union[left, self]\n\n\nclass _DeprecatedGenericAlias(_SpecialGenericAlias, _root=True):\n def __init__(\n self, origin, nparams, *, removal_version, inst=True, name=None\n ):\n super().__init__(origin, nparams, inst=inst, name=name)\n self._removal_version = removal_version\n\n def __instancecheck__(self, inst):\n import warnings\n warnings._deprecated(\n f"{self.__module__}.{self._name}", remove=self._removal_version\n )\n return super().__instancecheck__(inst)\n\n\nclass _CallableGenericAlias(_NotIterable, _GenericAlias, _root=True):\n def __repr__(self):\n assert self._name == \'Callable\'\n args = self.__args__\n if len(args) == 2 and _is_param_expr(args[0]):\n return super().__repr__()\n return (f\'typing.Callable\'\n f\'[[{", ".join([_type_repr(a) for a in args[:-1]])}], \'\n f\'{_type_repr(args[-1])}]\')\n\n def __reduce__(self):\n args = self.__args__\n if not (len(args) == 2 and _is_param_expr(args[0])):\n args = list(args[:-1]), args[-1]\n return operator.getitem, (Callable, args)\n\n\nclass _CallableType(_SpecialGenericAlias, _root=True):\n def copy_with(self, params):\n return _CallableGenericAlias(self.__origin__, params,\n name=self._name, inst=self._inst)\n\n def __getitem__(self, params):\n if not isinstance(params, tuple) or len(params) != 2:\n raise TypeError("Callable must be used as "\n "Callable[[arg, ...], result].")\n args, result = params\n # This relaxes what args can be on purpose to allow things like\n # PEP 612 ParamSpec. Responsibility for whether a user is using\n # Callable[...] properly is deferred to static type checkers.\n if isinstance(args, list):\n params = (tuple(args), result)\n else:\n params = (args, result)\n return self.__getitem_inner__(params)\n\n @_tp_cache\n def __getitem_inner__(self, params):\n args, result = params\n msg = "Callable[args, result]: result must be a type."\n result = _type_check(result, msg)\n if args is Ellipsis:\n return self.copy_with((_TypingEllipsis, result))\n if not isinstance(args, tuple):\n args = (args,)\n args = tuple(_type_convert(arg) for arg in args)\n params = args + (result,)\n return self.copy_with(params)\n\n\nclass _TupleType(_SpecialGenericAlias, _root=True):\n @_tp_cache\n def __getitem__(self, params):\n if not isinstance(params, tuple):\n params = (params,)\n if len(params) >= 2 and params[-1] is ...:\n msg = "Tuple[t, ...]: t must be a type."\n params = tuple(_type_check(p, msg) for p in params[:-1])\n return self.copy_with((*params, _TypingEllipsis))\n msg = "Tuple[t0, t1, ...]: each t must be a type."\n params = tuple(_type_check(p, msg) for p in params)\n return self.copy_with(params)\n\n\nclass _UnionGenericAlias(_NotIterable, _GenericAlias, _root=True):\n def copy_with(self, params):\n return Union[params]\n\n def __eq__(self, other):\n if not isinstance(other, (_UnionGenericAlias, types.UnionType)):\n return NotImplemented\n try: # fast path\n return set(self.__args__) == set(other.__args__)\n except TypeError: # not hashable, slow path\n return _compare_args_orderless(self.__args__, other.__args__)\n\n def __hash__(self):\n return hash(frozenset(self.__args__))\n\n def __repr__(self):\n args = self.__args__\n if len(args) == 2:\n if args[0] is type(None):\n return f\'typing.Optional[{_type_repr(args[1])}]\'\n elif args[1] is type(None):\n return f\'typing.Optional[{_type_repr(args[0])}]\'\n return super().__repr__()\n\n def __instancecheck__(self, obj):\n return self.__subclasscheck__(type(obj))\n\n def __subclasscheck__(self, cls):\n for arg in self.__args__:\n if issubclass(cls, arg):\n return True\n\n def __reduce__(self):\n func, (origin, args) = super().__reduce__()\n return func, (Union, args)\n\n\ndef _value_and_type_iter(parameters):\n return ((p, type(p)) for p in parameters)\n\n\nclass _LiteralGenericAlias(_GenericAlias, _root=True):\n def __eq__(self, other):\n if not isinstance(other, _LiteralGenericAlias):\n return NotImplemented\n\n return set(_value_and_type_iter(self.__args__)) == set(_value_and_type_iter(other.__args__))\n\n def __hash__(self):\n return hash(frozenset(_value_and_type_iter(self.__args__)))\n\n\nclass _ConcatenateGenericAlias(_GenericAlias, _root=True):\n def copy_with(self, params):\n if isinstance(params[-1], (list, tuple)):\n return (*params[:-1], *params[-1])\n if isinstance(params[-1], _ConcatenateGenericAlias):\n params = (*params[:-1], *params[-1].__args__)\n return super().copy_with(params)\n\n\n@_SpecialForm\ndef Unpack(self, parameters):\n """Type unpack operator.\n\n The type unpack operator takes the child types from some container type,\n such as `tuple[int, str]` or a `TypeVarTuple`, and \'pulls them out\'.\n\n For example::\n\n # For some generic class `Foo`:\n Foo[Unpack[tuple[int, str]]] # Equivalent to Foo[int, str]\n\n Ts = TypeVarTuple(\'Ts\')\n # Specifies that `Bar` is generic in an arbitrary number of types.\n # (Think of `Ts` as a tuple of an arbitrary number of individual\n # `TypeVar`s, which the `Unpack` is \'pulling out\' directly into the\n # `Generic[]`.)\n class Bar(Generic[Unpack[Ts]]): ...\n Bar[int] # Valid\n Bar[int, str] # Also valid\n\n From Python 3.11, this can also be done using the `*` operator::\n\n Foo[*tuple[int, str]]\n class Bar(Generic[*Ts]): ...\n\n And from Python 3.12, it can be done using built-in syntax for generics::\n\n Foo[*tuple[int, str]]\n class Bar[*Ts]: ...\n\n The operator can also be used along with a `TypedDict` to annotate\n `**kwargs` in a function signature::\n\n class Movie(TypedDict):\n name: str\n year: int\n\n # This function expects two keyword arguments - *name* of type `str` and\n # *year* of type `int`.\n def foo(**kwargs: Unpack[Movie]): ...\n\n Note that there is only some runtime checking of this operator. Not\n everything the runtime allows may be accepted by static type checkers.\n\n For more information, see PEPs 646 and 692.\n """\n item = _type_check(parameters, f\'{self} accepts only single type.\')\n return _UnpackGenericAlias(origin=self, args=(item,))\n\n\nclass _UnpackGenericAlias(_GenericAlias, _root=True):\n def __repr__(self):\n # `Unpack` only takes one argument, so __args__ should contain only\n # a single item.\n return f\'typing.Unpack[{_type_repr(self.__args__[0])}]\'\n\n def __getitem__(self, args):\n if self.__typing_is_unpacked_typevartuple__:\n return args\n return super().__getitem__(args)\n\n @property\n def __typing_unpacked_tuple_args__(self):\n assert self.__origin__ is Unpack\n assert len(self.__args__) == 1\n arg, = self.__args__\n if isinstance(arg, _GenericAlias):\n assert arg.__origin__ is tuple\n return arg.__args__\n return None\n\n @property\n def __typing_is_unpacked_typevartuple__(self):\n assert self.__origin__ is Unpack\n assert len(self.__args__) == 1\n return isinstance(self.__args__[0], TypeVarTuple)\n\n\nclass _TypingEllipsis:\n """Internal placeholder for ... (ellipsis)."""\n\n\n_TYPING_INTERNALS = frozenset({\n \'__parameters__\', \'__orig_bases__\', \'__orig_class__\',\n \'_is_protocol\', \'_is_runtime_protocol\', \'__protocol_attrs__\',\n \'__non_callable_proto_members__\', \'__type_params__\',\n})\n\n_SPECIAL_NAMES = frozenset({\n \'__abstractmethods__\', \'__annotations__\', \'__dict__\', \'__doc__\',\n \'__init__\', \'__module__\', \'__new__\', \'__slots__\',\n \'__subclasshook__\', \'__weakref__\', \'__class_getitem__\'\n})\n\n# These special attributes will be not collected as protocol members.\nEXCLUDED_ATTRIBUTES = _TYPING_INTERNALS | _SPECIAL_NAMES | {\'_MutableMapping__marker\'}\n\n\ndef _get_protocol_attrs(cls):\n """Collect protocol members from a protocol class objects.\n\n This includes names actually defined in the class dictionary, as well\n as names that appear in annotations. Special names (above) are skipped.\n """\n attrs = set()\n for base in cls.__mro__[:-1]: # without object\n if base.__name__ in {\'Protocol\', \'Generic\'}:\n continue\n annotations = getattr(base, \'__annotations__\', {})\n for attr in (*base.__dict__, *annotations):\n if not attr.startswith(\'_abc_\') and attr not in EXCLUDED_ATTRIBUTES:\n attrs.add(attr)\n return attrs\n\n\ndef _no_init_or_replace_init(self, *args, **kwargs):\n cls = type(self)\n\n if cls._is_protocol:\n raise TypeError(\'Protocols cannot be instantiated\')\n\n # Already using a custom `__init__`. No need to calculate correct\n # `__init__` to call. This can lead to RecursionError. See bpo-45121.\n if cls.__init__ is not _no_init_or_replace_init:\n return\n\n # Initially, `__init__` of a protocol subclass is set to `_no_init_or_replace_init`.\n # The first instantiation of the subclass will call `_no_init_or_replace_init` which\n # searches for a proper new `__init__` in the MRO. The new `__init__`\n # replaces the subclass\' old `__init__` (ie `_no_init_or_replace_init`). Subsequent\n # instantiation of the protocol subclass will thus use the new\n # `__init__` and no longer call `_no_init_or_replace_init`.\n for base in cls.__mro__:\n init = base.__dict__.get(\'__init__\', _no_init_or_replace_init)\n if init is not _no_init_or_replace_init:\n cls.__init__ = init\n break\n else:\n # should not happen\n cls.__init__ = object.__init__\n\n cls.__init__(self, *args, **kwargs)\n\n\ndef _caller(depth=1, default=\'__main__\'):\n try:\n return sys._getframemodulename(depth + 1) or default\n except AttributeError: # For platforms without _getframemodulename()\n pass\n try:\n return sys._getframe(depth + 1).f_globals.get(\'__name__\', default)\n except (AttributeError, ValueError): # For platforms without _getframe()\n pass\n return None\n\ndef _allow_reckless_class_checks(depth=2):\n """Allow instance and class checks for special stdlib modules.\n\n The abc and functools modules indiscriminately call isinstance() and\n issubclass() on the whole MRO of a user class, which may contain protocols.\n """\n return _caller(depth) in {\'abc\', \'functools\', None}\n\n\n_PROTO_ALLOWLIST = {\n \'collections.abc\': [\n \'Callable\', \'Awaitable\', \'Iterable\', \'Iterator\', \'AsyncIterable\',\n \'Hashable\', \'Sized\', \'Container\', \'Collection\', \'Reversible\', \'Buffer\',\n ],\n \'contextlib\': [\'AbstractContextManager\', \'AbstractAsyncContextManager\'],\n}\n\n\[email protected]\ndef _lazy_load_getattr_static():\n # Import getattr_static lazily so as not to slow down the import of typing.py\n # Cache the result so we don\'t slow down _ProtocolMeta.__instancecheck__ unnecessarily\n from inspect import getattr_static\n return getattr_static\n\n\n_cleanups.append(_lazy_load_getattr_static.cache_clear)\n\ndef _pickle_psargs(psargs):\n return ParamSpecArgs, (psargs.__origin__,)\n\ncopyreg.pickle(ParamSpecArgs, _pickle_psargs)\n\ndef _pickle_pskwargs(pskwargs):\n return ParamSpecKwargs, (pskwargs.__origin__,)\n\ncopyreg.pickle(ParamSpecKwargs, _pickle_pskwargs)\n\ndel _pickle_psargs, _pickle_pskwargs\n\n\nclass _ProtocolMeta(ABCMeta):\n # This metaclass is somewhat unfortunate,\n # but is necessary for several reasons...\n def __new__(mcls, name, bases, namespace, /, **kwargs):\n if name == "Protocol" and bases == (Generic,):\n pass\n elif Protocol in bases:\n for base in bases:\n if not (\n base in {object, Generic}\n or base.__name__ in _PROTO_ALLOWLIST.get(base.__module__, [])\n or (\n issubclass(base, Generic)\n and getattr(base, "_is_protocol", False)\n )\n ):\n raise TypeError(\n f"Protocols can only inherit from other protocols, "\n f"got {base!r}"\n )\n return super().__new__(mcls, name, bases, namespace, **kwargs)\n\n def __init__(cls, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if getattr(cls, "_is_protocol", False):\n cls.__protocol_attrs__ = _get_protocol_attrs(cls)\n\n def __subclasscheck__(cls, other):\n if cls is Protocol:\n return type.__subclasscheck__(cls, other)\n if (\n getattr(cls, \'_is_protocol\', False)\n and not _allow_reckless_class_checks()\n ):\n if not isinstance(other, type):\n # Same error message as for issubclass(1, int).\n raise TypeError(\'issubclass() arg 1 must be a class\')\n if not getattr(cls, \'_is_runtime_protocol\', False):\n raise TypeError(\n "Instance and class checks can only be used with "\n "@runtime_checkable protocols"\n )\n if (\n # this attribute is set by @runtime_checkable:\n cls.__non_callable_proto_members__\n and cls.__dict__.get("__subclasshook__") is _proto_hook\n ):\n raise TypeError(\n "Protocols with non-method members don\'t support issubclass()"\n )\n return super().__subclasscheck__(other)\n\n def __instancecheck__(cls, instance):\n # We need this method for situations where attributes are\n # assigned in __init__.\n if cls is Protocol:\n return type.__instancecheck__(cls, instance)\n if not getattr(cls, "_is_protocol", False):\n # i.e., it\'s a concrete subclass of a protocol\n return super().__instancecheck__(instance)\n\n if (\n not getattr(cls, \'_is_runtime_protocol\', False) and\n not _allow_reckless_class_checks()\n ):\n raise TypeError("Instance and class checks can only be used with"\n " @runtime_checkable protocols")\n\n if super().__instancecheck__(instance):\n return True\n\n getattr_static = _lazy_load_getattr_static()\n for attr in cls.__protocol_attrs__:\n try:\n val = getattr_static(instance, attr)\n except AttributeError:\n break\n # this attribute is set by @runtime_checkable:\n if val is None and attr not in cls.__non_callable_proto_members__:\n break\n else:\n return True\n\n return False\n\n\n@classmethod\ndef _proto_hook(cls, other):\n if not cls.__dict__.get(\'_is_protocol\', False):\n return NotImplemented\n\n for attr in cls.__protocol_attrs__:\n for base in other.__mro__:\n # Check if the members appears in the class dictionary...\n if attr in base.__dict__:\n if base.__dict__[attr] is None:\n return NotImplemented\n break\n\n # ...or in annotations, if it is a sub-protocol.\n annotations = getattr(base, \'__annotations__\', {})\n if (isinstance(annotations, collections.abc.Mapping) and\n attr in annotations and\n issubclass(other, Generic) and getattr(other, \'_is_protocol\', False)):\n break\n else:\n return NotImplemented\n return True\n\n\nclass Protocol(Generic, metaclass=_ProtocolMeta):\n """Base class for protocol classes.\n\n Protocol classes are defined as::\n\n class Proto(Protocol):\n def meth(self) -> int:\n ...\n\n Such classes are primarily used with static type checkers that recognize\n structural subtyping (static duck-typing).\n\n For example::\n\n class C:\n def meth(self) -> int:\n return 0\n\n def func(x: Proto) -> int:\n return x.meth()\n\n func(C()) # Passes static type check\n\n See PEP 544 for details. Protocol classes decorated with\n @typing.runtime_checkable act as simple-minded runtime protocols that check\n only the presence of given attributes, ignoring their type signatures.\n Protocol classes can be generic, they are defined as::\n\n class GenProto[T](Protocol):\n def meth(self) -> T:\n ...\n """\n\n __slots__ = ()\n _is_protocol = True\n _is_runtime_protocol = False\n\n def __init_subclass__(cls, *args, **kwargs):\n super().__init_subclass__(*args, **kwargs)\n\n # Determine if this is a protocol or a concrete subclass.\n if not cls.__dict__.get(\'_is_protocol\', False):\n cls._is_protocol = any(b is Protocol for b in cls.__bases__)\n\n # Set (or override) the protocol subclass hook.\n if \'__subclasshook__\' not in cls.__dict__:\n cls.__subclasshook__ = _proto_hook\n\n # Prohibit instantiation for protocol classes\n if cls._is_protocol and cls.__init__ is Protocol.__init__:\n cls.__init__ = _no_init_or_replace_init\n\n\nclass _AnnotatedAlias(_NotIterable, _GenericAlias, _root=True):\n """Runtime representation of an annotated type.\n\n At its core \'Annotated[t, dec1, dec2, ...]\' is an alias for the type \'t\'\n with extra annotations. The alias behaves like a normal typing alias.\n Instantiating is the same as instantiating the underlying type; binding\n it to types is also the same.\n\n The metadata itself is stored in a \'__metadata__\' attribute as a tuple.\n """\n\n def __init__(self, origin, metadata):\n if isinstance(origin, _AnnotatedAlias):\n metadata = origin.__metadata__ + metadata\n origin = origin.__origin__\n super().__init__(origin, origin, name=\'Annotated\')\n self.__metadata__ = metadata\n\n def copy_with(self, params):\n assert len(params) == 1\n new_type = params[0]\n return _AnnotatedAlias(new_type, self.__metadata__)\n\n def __repr__(self):\n return "typing.Annotated[{}, {}]".format(\n _type_repr(self.__origin__),\n ", ".join(repr(a) for a in self.__metadata__)\n )\n\n def __reduce__(self):\n return operator.getitem, (\n Annotated, (self.__origin__,) + self.__metadata__\n )\n\n def __eq__(self, other):\n if not isinstance(other, _AnnotatedAlias):\n return NotImplemented\n return (self.__origin__ == other.__origin__\n and self.__metadata__ == other.__metadata__)\n\n def __hash__(self):\n return hash((self.__origin__, self.__metadata__))\n\n def __getattr__(self, attr):\n if attr in {\'__name__\', \'__qualname__\'}:\n return \'Annotated\'\n return super().__getattr__(attr)\n\n def __mro_entries__(self, bases):\n return (self.__origin__,)\n\n\nclass Annotated:\n """Add context-specific metadata to a type.\n\n Example: Annotated[int, runtime_check.Unsigned] indicates to the\n hypothetical runtime_check module that this type is an unsigned int.\n Every other consumer of this type can ignore this metadata and treat\n this type as int.\n\n The first argument to Annotated must be a valid type.\n\n Details:\n\n - It\'s an error to call `Annotated` with less than two arguments.\n - Access the metadata via the ``__metadata__`` attribute::\n\n assert Annotated[int, \'$\'].__metadata__ == (\'$\',)\n\n - Nested Annotated types are flattened::\n\n assert Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]\n\n - Instantiating an annotated type is equivalent to instantiating the\n underlying type::\n\n assert Annotated[C, Ann1](5) == C(5)\n\n - Annotated can be used as a generic type alias::\n\n type Optimized[T] = Annotated[T, runtime.Optimize()]\n # type checker will treat Optimized[int]\n # as equivalent to Annotated[int, runtime.Optimize()]\n\n type OptimizedList[T] = Annotated[list[T], runtime.Optimize()]\n # type checker will treat OptimizedList[int]\n # as equivalent to Annotated[list[int], runtime.Optimize()]\n\n - Annotated cannot be used with an unpacked TypeVarTuple::\n\n type Variadic[*Ts] = Annotated[*Ts, Ann1] # NOT valid\n\n This would be equivalent to::\n\n Annotated[T1, T2, T3, ..., Ann1]\n\n where T1, T2 etc. are TypeVars, which would be invalid, because\n only one type should be passed to Annotated.\n """\n\n __slots__ = ()\n\n def __new__(cls, *args, **kwargs):\n raise TypeError("Type Annotated cannot be instantiated.")\n\n def __class_getitem__(cls, params):\n if not isinstance(params, tuple):\n params = (params,)\n return cls._class_getitem_inner(cls, *params)\n\n @_tp_cache(typed=True)\n def _class_getitem_inner(cls, *params):\n if len(params) < 2:\n raise TypeError("Annotated[...] should be used "\n "with at least two arguments (a type and an "\n "annotation).")\n if _is_unpacked_typevartuple(params[0]):\n raise TypeError("Annotated[...] should not be used with an "\n "unpacked TypeVarTuple")\n msg = "Annotated[t, ...]: t must be a type."\n origin = _type_check(params[0], msg, allow_special_forms=True)\n metadata = tuple(params[1:])\n return _AnnotatedAlias(origin, metadata)\n\n def __init_subclass__(cls, *args, **kwargs):\n raise TypeError(\n "Cannot subclass {}.Annotated".format(cls.__module__)\n )\n\n\ndef runtime_checkable(cls):\n """Mark a protocol class as a runtime protocol.\n\n Such protocol can be used with isinstance() and issubclass().\n Raise TypeError if applied to a non-protocol class.\n This allows a simple-minded structural check very similar to\n one trick ponies in collections.abc such as Iterable.\n\n For example::\n\n @runtime_checkable\n class Closable(Protocol):\n def close(self): ...\n\n assert isinstance(open(\'/some/file\'), Closable)\n\n Warning: this will check only the presence of the required methods,\n not their type signatures!\n """\n if not issubclass(cls, Generic) or not getattr(cls, \'_is_protocol\', False):\n raise TypeError(\'@runtime_checkable can be only applied to protocol classes,\'\n \' got %r\' % cls)\n cls._is_runtime_protocol = True\n # PEP 544 prohibits using issubclass()\n # with protocols that have non-method members.\n # See gh-113320 for why we compute this attribute here,\n # rather than in `_ProtocolMeta.__init__`\n cls.__non_callable_proto_members__ = set()\n for attr in cls.__protocol_attrs__:\n try:\n is_callable = callable(getattr(cls, attr, None))\n except Exception as e:\n raise TypeError(\n f"Failed to determine whether protocol member {attr!r} "\n "is a method member"\n ) from e\n else:\n if not is_callable:\n cls.__non_callable_proto_members__.add(attr)\n return cls\n\n\ndef cast(typ, val):\n """Cast a value to a type.\n\n This returns the value unchanged. To the type checker this\n signals that the return value has the designated type, but at\n runtime we intentionally don\'t check anything (we want this\n to be as fast as possible).\n """\n return val\n\n\ndef assert_type(val, typ, /):\n """Ask a static type checker to confirm that the value is of the given type.\n\n At runtime this does nothing: it returns the first argument unchanged with no\n checks or side effects, no matter the actual type of the argument.\n\n When a static type checker encounters a call to assert_type(), it\n emits an error if the value is not of the specified type::\n\n def greet(name: str) -> None:\n assert_type(name, str) # OK\n assert_type(name, int) # type checker error\n """\n return val\n\n\n_allowed_types = (types.FunctionType, types.BuiltinFunctionType,\n types.MethodType, types.ModuleType,\n WrapperDescriptorType, MethodWrapperType, MethodDescriptorType)\n\n\ndef get_type_hints(obj, globalns=None, localns=None, include_extras=False):\n """Return type hints for an object.\n\n This is often the same as obj.__annotations__, but it handles\n forward references encoded as string literals and recursively replaces all\n \'Annotated[T, ...]\' with \'T\' (unless \'include_extras=True\').\n\n The argument may be a module, class, method, or function. The annotations\n are returned as a dictionary. For classes, annotations include also\n inherited members.\n\n TypeError is raised if the argument is not of a type that can contain\n annotations, and an empty dictionary is returned if no annotations are\n present.\n\n BEWARE -- the behavior of globalns and localns is counterintuitive\n (unless you are familiar with how eval() and exec() work). The\n search order is locals first, then globals.\n\n - If no dict arguments are passed, an attempt is made to use the\n globals from obj (or the respective module\'s globals for classes),\n and these are also used as the locals. If the object does not appear\n to have globals, an empty dictionary is used. For classes, the search\n order is globals first then locals.\n\n - If one dict argument is passed, it is used for both globals and\n locals.\n\n - If two dict arguments are passed, they specify globals and\n locals, respectively.\n """\n if getattr(obj, \'__no_type_check__\', None):\n return {}\n # Classes require a special treatment.\n if isinstance(obj, type):\n hints = {}\n for base in reversed(obj.__mro__):\n if globalns is None:\n base_globals = getattr(sys.modules.get(base.__module__, None), \'__dict__\', {})\n else:\n base_globals = globalns\n ann = base.__dict__.get(\'__annotations__\', {})\n if isinstance(ann, types.GetSetDescriptorType):\n ann = {}\n base_locals = dict(vars(base)) if localns is None else localns\n if localns is None and globalns is None:\n # This is surprising, but required. Before Python 3.10,\n # get_type_hints only evaluated the globalns of\n # a class. To maintain backwards compatibility, we reverse\n # the globalns and localns order so that eval() looks into\n # *base_globals* first rather than *base_locals*.\n # This only affects ForwardRefs.\n base_globals, base_locals = base_locals, base_globals\n for name, value in ann.items():\n if value is None:\n value = type(None)\n if isinstance(value, str):\n value = ForwardRef(value, is_argument=False, is_class=True)\n value = _eval_type(value, base_globals, base_locals)\n hints[name] = value\n return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}\n\n if globalns is None:\n if isinstance(obj, types.ModuleType):\n globalns = obj.__dict__\n else:\n nsobj = obj\n # Find globalns for the unwrapped object.\n while hasattr(nsobj, \'__wrapped__\'):\n nsobj = nsobj.__wrapped__\n globalns = getattr(nsobj, \'__globals__\', {})\n if localns is None:\n localns = globalns\n elif localns is None:\n localns = globalns\n hints = getattr(obj, \'__annotations__\', None)\n if hints is None:\n # Return empty annotations for something that _could_ have them.\n if isinstance(obj, _allowed_types):\n return {}\n else:\n raise TypeError(\'{!r} is not a module, class, method, \'\n \'or function.\'.format(obj))\n hints = dict(hints)\n for name, value in hints.items():\n if value is None:\n value = type(None)\n if isinstance(value, str):\n # class-level forward refs were handled above, this must be either\n # a module-level annotation or a function argument annotation\n value = ForwardRef(\n value,\n is_argument=not isinstance(obj, types.ModuleType),\n is_class=False,\n )\n hints[name] = _eval_type(value, globalns, localns)\n return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}\n\n\ndef _strip_annotations(t):\n """Strip the annotations from a given type."""\n if isinstance(t, _AnnotatedAlias):\n return _strip_annotations(t.__origin__)\n if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired):\n return _strip_annotations(t.__args__[0])\n if isinstance(t, _GenericAlias):\n stripped_args = tuple(_strip_annotations(a) for a in t.__args__)\n if stripped_args == t.__args__:\n return t\n return t.copy_with(stripped_args)\n if isinstance(t, GenericAlias):\n stripped_args = tuple(_strip_annotations(a) for a in t.__args__)\n if stripped_args == t.__args__:\n return t\n return GenericAlias(t.__origin__, stripped_args)\n if isinstance(t, types.UnionType):\n stripped_args = tuple(_strip_annotations(a) for a in t.__args__)\n if stripped_args == t.__args__:\n return t\n return functools.reduce(operator.or_, stripped_args)\n\n return t\n\n\ndef get_origin(tp):\n """Get the unsubscripted version of a type.\n\n This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar,\n Annotated, and others. Return None for unsupported types.\n\n Examples::\n\n >>> P = ParamSpec(\'P\')\n >>> assert get_origin(Literal[42]) is Literal\n >>> assert get_origin(int) is None\n >>> assert get_origin(ClassVar[int]) is ClassVar\n >>> assert get_origin(Generic) is Generic\n >>> assert get_origin(Generic[T]) is Generic\n >>> assert get_origin(Union[T, int]) is Union\n >>> assert get_origin(List[Tuple[T, T]][int]) is list\n >>> assert get_origin(P.args) is P\n """\n if isinstance(tp, _AnnotatedAlias):\n return Annotated\n if isinstance(tp, (_BaseGenericAlias, GenericAlias,\n ParamSpecArgs, ParamSpecKwargs)):\n return tp.__origin__\n if tp is Generic:\n return Generic\n if isinstance(tp, types.UnionType):\n return types.UnionType\n return None\n\n\ndef get_args(tp):\n """Get type arguments with all substitutions performed.\n\n For unions, basic simplifications used by Union constructor are performed.\n\n Examples::\n\n >>> T = TypeVar(\'T\')\n >>> assert get_args(Dict[str, int]) == (str, int)\n >>> assert get_args(int) == ()\n >>> assert get_args(Union[int, Union[T, int], str][int]) == (int, str)\n >>> assert get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])\n >>> assert get_args(Callable[[], T][int]) == ([], int)\n """\n if isinstance(tp, _AnnotatedAlias):\n return (tp.__origin__,) + tp.__metadata__\n if isinstance(tp, (_GenericAlias, GenericAlias)):\n res = tp.__args__\n if _should_unflatten_callable_args(tp, res):\n res = (list(res[:-1]), res[-1])\n return res\n if isinstance(tp, types.UnionType):\n return tp.__args__\n return ()\n\n\ndef is_typeddict(tp):\n """Check if an annotation is a TypedDict class.\n\n For example::\n\n >>> from typing import TypedDict\n >>> class Film(TypedDict):\n ... title: str\n ... year: int\n ...\n >>> is_typeddict(Film)\n True\n >>> is_typeddict(dict)\n False\n """\n return isinstance(tp, _TypedDictMeta)\n\n\n_ASSERT_NEVER_REPR_MAX_LENGTH = 100\n\n\ndef assert_never(arg: Never, /) -> Never:\n """Statically assert that a line of code is unreachable.\n\n Example::\n\n def int_or_str(arg: int | str) -> None:\n match arg:\n case int():\n print("It\'s an int")\n case str():\n print("It\'s a str")\n case _:\n assert_never(arg)\n\n If a type checker finds that a call to assert_never() is\n reachable, it will emit an error.\n\n At runtime, this throws an exception when called.\n """\n value = repr(arg)\n if len(value) > _ASSERT_NEVER_REPR_MAX_LENGTH:\n value = value[:_ASSERT_NEVER_REPR_MAX_LENGTH] + \'...\'\n raise AssertionError(f"Expected code to be unreachable, but got: {value}")\n\n\ndef no_type_check(arg):\n """Decorator to indicate that annotations are not type hints.\n\n The argument must be a class or function; if it is a class, it\n applies recursively to all methods and classes defined in that class\n (but not to methods defined in its superclasses or subclasses).\n\n This mutates the function(s) or class(es) in place.\n """\n if isinstance(arg, type):\n for key in dir(arg):\n obj = getattr(arg, key)\n if (\n not hasattr(obj, \'__qualname__\')\n or obj.__qualname__ != f\'{arg.__qualname__}.{obj.__name__}\'\n or getattr(obj, \'__module__\', None) != arg.__module__\n ):\n # We only modify objects that are defined in this type directly.\n # If classes / methods are nested in multiple layers,\n # we will modify them when processing their direct holders.\n continue\n # Instance, class, and static methods:\n if isinstance(obj, types.FunctionType):\n obj.__no_type_check__ = True\n if isinstance(obj, types.MethodType):\n obj.__func__.__no_type_check__ = True\n # Nested types:\n if isinstance(obj, type):\n no_type_check(obj)\n try:\n arg.__no_type_check__ = True\n except TypeError: # built-in classes\n pass\n return arg\n\n\ndef no_type_check_decorator(decorator):\n """Decorator to give another decorator the @no_type_check effect.\n\n This wraps the decorator with something that wraps the decorated\n function in @no_type_check.\n """\n @functools.wraps(decorator)\n def wrapped_decorator(*args, **kwds):\n func = decorator(*args, **kwds)\n func = no_type_check(func)\n return func\n\n return wrapped_decorator\n\n\ndef _overload_dummy(*args, **kwds):\n """Helper for @overload to raise when called."""\n raise NotImplementedError(\n "You should not call an overloaded function. "\n "A series of @overload-decorated functions "\n "outside a stub module should always be followed "\n "by an implementation that is not @overload-ed.")\n\n\n# {module: {qualname: {firstlineno: func}}}\n_overload_registry = defaultdict(functools.partial(defaultdict, dict))\n\n\ndef overload(func):\n """Decorator for overloaded functions/methods.\n\n In a stub file, place two or more stub definitions for the same\n function in a row, each decorated with @overload.\n\n For example::\n\n @overload\n def utf8(value: None) -> None: ...\n @overload\n def utf8(value: bytes) -> bytes: ...\n @overload\n def utf8(value: str) -> bytes: ...\n\n In a non-stub file (i.e. a regular .py file), do the same but\n follow it with an implementation. The implementation should *not*\n be decorated with @overload::\n\n @overload\n def utf8(value: None) -> None: ...\n @overload\n def utf8(value: bytes) -> bytes: ...\n @overload\n def utf8(value: str) -> bytes: ...\n def utf8(value):\n ... # implementation goes here\n\n The overloads for a function can be retrieved at runtime using the\n get_overloads() function.\n """\n # classmethod and staticmethod\n f = getattr(func, "__func__", func)\n try:\n _overload_registry[f.__module__][f.__qualname__][f.__code__.co_firstlineno] = func\n except AttributeError:\n # Not a normal function; ignore.\n pass\n return _overload_dummy\n\n\ndef get_overloads(func):\n """Return all defined overloads for *func* as a sequence."""\n # classmethod and staticmethod\n f = getattr(func, "__func__", func)\n if f.__module__ not in _overload_registry:\n return []\n mod_dict = _overload_registry[f.__module__]\n if f.__qualname__ not in mod_dict:\n return []\n return list(mod_dict[f.__qualname__].values())\n\n\ndef clear_overloads():\n """Clear all overloads in the registry."""\n _overload_registry.clear()\n\n\ndef final(f):\n """Decorator to indicate final methods and final classes.\n\n Use this decorator to indicate to type checkers that the decorated\n method cannot be overridden, and decorated class cannot be subclassed.\n\n For example::\n\n class Base:\n @final\n def done(self) -> None:\n ...\n class Sub(Base):\n def done(self) -> None: # Error reported by type checker\n ...\n\n @final\n class Leaf:\n ...\n class Other(Leaf): # Error reported by type checker\n ...\n\n There is no runtime checking of these properties. The decorator\n attempts to set the ``__final__`` attribute to ``True`` on the decorated\n object to allow runtime introspection.\n """\n try:\n f.__final__ = True\n except (AttributeError, TypeError):\n # Skip the attribute silently if it is not writable.\n # AttributeError happens if the object has __slots__ or a\n # read-only property, TypeError if it\'s a builtin class.\n pass\n return f\n\n\n# Some unconstrained type variables. These were initially used by the container types.\n# They were never meant for export and are now unused, but we keep them around to\n# avoid breaking compatibility with users who import them.\nT = TypeVar(\'T\') # Any type.\nKT = TypeVar(\'KT\') # Key type.\nVT = TypeVar(\'VT\') # Value type.\nT_co = TypeVar(\'T_co\', covariant=True) # Any type covariant containers.\nV_co = TypeVar(\'V_co\', covariant=True) # Any type covariant containers.\nVT_co = TypeVar(\'VT_co\', covariant=True) # Value type covariant containers.\nT_contra = TypeVar(\'T_contra\', contravariant=True) # Ditto contravariant.\n# Internal type variable used for Type[].\nCT_co = TypeVar(\'CT_co\', covariant=True, bound=type)\n\n\n# A useful type variable with constraints. This represents string types.\n# (This one *is* for export!)\nAnyStr = TypeVar(\'AnyStr\', bytes, str)\n\n\n# Various ABCs mimicking those in collections.abc.\n_alias = _SpecialGenericAlias\n\nHashable = _alias(collections.abc.Hashable, 0) # Not generic.\nAwaitable = _alias(collections.abc.Awaitable, 1)\nCoroutine = _alias(collections.abc.Coroutine, 3)\nAsyncIterable = _alias(collections.abc.AsyncIterable, 1)\nAsyncIterator = _alias(collections.abc.AsyncIterator, 1)\nIterable = _alias(collections.abc.Iterable, 1)\nIterator = _alias(collections.abc.Iterator, 1)\nReversible = _alias(collections.abc.Reversible, 1)\nSized = _alias(collections.abc.Sized, 0) # Not generic.\nContainer = _alias(collections.abc.Container, 1)\nCollection = _alias(collections.abc.Collection, 1)\nCallable = _CallableType(collections.abc.Callable, 2)\nCallable.__doc__ = \\\n """Deprecated alias to collections.abc.Callable.\n\n Callable[[int], str] signifies a function that takes a single\n parameter of type int and returns a str.\n\n The subscription syntax must always be used with exactly two\n values: the argument list and the return type.\n The argument list must be a list of types, a ParamSpec,\n Concatenate or ellipsis. The return type must be a single type.\n\n There is no syntax to indicate optional or keyword arguments;\n such function types are rarely used as callback types.\n """\nAbstractSet = _alias(collections.abc.Set, 1, name=\'AbstractSet\')\nMutableSet = _alias(collections.abc.MutableSet, 1)\n# NOTE: Mapping is only covariant in the value type.\nMapping = _alias(collections.abc.Mapping, 2)\nMutableMapping = _alias(collections.abc.MutableMapping, 2)\nSequence = _alias(collections.abc.Sequence, 1)\nMutableSequence = _alias(collections.abc.MutableSequence, 1)\nByteString = _DeprecatedGenericAlias(\n collections.abc.ByteString, 0, removal_version=(3, 14) # Not generic.\n)\n# Tuple accepts variable number of parameters.\nTuple = _TupleType(tuple, -1, inst=False, name=\'Tuple\')\nTuple.__doc__ = \\\n """Deprecated alias to builtins.tuple.\n\n Tuple[X, Y] is the cross-product type of X and Y.\n\n Example: Tuple[T1, T2] is a tuple of two elements corresponding\n to type variables T1 and T2. Tuple[int, float, str] is a tuple\n of an int, a float and a string.\n\n To specify a variable-length tuple of homogeneous type, use Tuple[T, ...].\n """\nList = _alias(list, 1, inst=False, name=\'List\')\nDeque = _alias(collections.deque, 1, name=\'Deque\')\nSet = _alias(set, 1, inst=False, name=\'Set\')\nFrozenSet = _alias(frozenset, 1, inst=False, name=\'FrozenSet\')\nMappingView = _alias(collections.abc.MappingView, 1)\nKeysView = _alias(collections.abc.KeysView, 1)\nItemsView = _alias(collections.abc.ItemsView, 2)\nValuesView = _alias(collections.abc.ValuesView, 1)\nContextManager = _alias(contextlib.AbstractContextManager, 1, name=\'ContextManager\')\nAsyncContextManager = _alias(contextlib.AbstractAsyncContextManager, 1, name=\'AsyncContextManager\')\nDict = _alias(dict, 2, inst=False, name=\'Dict\')\nDefaultDict = _alias(collections.defaultdict, 2, name=\'DefaultDict\')\nOrderedDict = _alias(collections.OrderedDict, 2)\nCounter = _alias(collections.Counter, 1)\nChainMap = _alias(collections.ChainMap, 2)\nGenerator = _alias(collections.abc.Generator, 3)\nAsyncGenerator = _alias(collections.abc.AsyncGenerator, 2)\nType = _alias(type, 1, inst=False, name=\'Type\')\nType.__doc__ = \\\n """Deprecated alias to builtins.type.\n\n builtins.type or typing.Type can be used to annotate class objects.\n For example, suppose we have the following classes::\n\n class User: ... # Abstract base for User classes\n class BasicUser(User): ...\n class ProUser(User): ...\n class TeamUser(User): ...\n\n And a function that takes a class argument that\'s a subclass of\n User and returns an instance of the corresponding class::\n\n def new_user[U](user_class: Type[U]) -> U:\n user = user_class()\n # (Here we could write the user object to a database)\n return user\n\n joe = new_user(BasicUser)\n\n At this point the type checker knows that joe has type BasicUser.\n """\n\n\n@runtime_checkable\nclass SupportsInt(Protocol):\n """An ABC with one abstract method __int__."""\n\n __slots__ = ()\n\n @abstractmethod\n def __int__(self) -> int:\n pass\n\n\n@runtime_checkable\nclass SupportsFloat(Protocol):\n """An ABC with one abstract method __float__."""\n\n __slots__ = ()\n\n @abstractmethod\n def __float__(self) -> float:\n pass\n\n\n@runtime_checkable\nclass SupportsComplex(Protocol):\n """An ABC with one abstract method __complex__."""\n\n __slots__ = ()\n\n @abstractmethod\n def __complex__(self) -> complex:\n pass\n\n\n@runtime_checkable\nclass SupportsBytes(Protocol):\n """An ABC with one abstract method __bytes__."""\n\n __slots__ = ()\n\n @abstractmethod\n def __bytes__(self) -> bytes:\n pass\n\n\n@runtime_checkable\nclass SupportsIndex(Protocol):\n """An ABC with one abstract method __index__."""\n\n __slots__ = ()\n\n @abstractmethod\n def __index__(self) -> int:\n pass\n\n\n@runtime_checkable\nclass SupportsAbs[T](Protocol):\n """An ABC with one abstract method __abs__ that is covariant in its return type."""\n\n __slots__ = ()\n\n @abstractmethod\n def __abs__(self) -> T:\n pass\n\n\n@runtime_checkable\nclass SupportsRound[T](Protocol):\n """An ABC with one abstract method __round__ that is covariant in its return type."""\n\n __slots__ = ()\n\n @abstractmethod\n def __round__(self, ndigits: int = 0) -> T:\n pass\n\n\ndef _make_nmtuple(name, types, module, defaults = ()):\n fields = [n for n, t in types]\n types = {n: _type_check(t, f"field {n} annotation must be a type")\n for n, t in types}\n nm_tpl = collections.namedtuple(name, fields,\n defaults=defaults, module=module)\n nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = types\n return nm_tpl\n\n\n# attributes prohibited to set in NamedTuple class syntax\n_prohibited = frozenset({\'__new__\', \'__init__\', \'__slots__\', \'__getnewargs__\',\n \'_fields\', \'_field_defaults\',\n \'_make\', \'_replace\', \'_asdict\', \'_source\'})\n\n_special = frozenset({\'__module__\', \'__name__\', \'__annotations__\'})\n\n\nclass NamedTupleMeta(type):\n def __new__(cls, typename, bases, ns):\n assert _NamedTuple in bases\n for base in bases:\n if base is not _NamedTuple and base is not Generic:\n raise TypeError(\n \'can only inherit from a NamedTuple type and Generic\')\n bases = tuple(tuple if base is _NamedTuple else base for base in bases)\n types = ns.get(\'__annotations__\', {})\n default_names = []\n for field_name in types:\n if field_name in ns:\n default_names.append(field_name)\n elif default_names:\n raise TypeError(f"Non-default namedtuple field {field_name} "\n f"cannot follow default field"\n f"{\'s\' if len(default_names) > 1 else \'\'} "\n f"{\', \'.join(default_names)}")\n nm_tpl = _make_nmtuple(typename, types.items(),\n defaults=[ns[n] for n in default_names],\n module=ns[\'__module__\'])\n nm_tpl.__bases__ = bases\n if Generic in bases:\n class_getitem = _generic_class_getitem\n nm_tpl.__class_getitem__ = classmethod(class_getitem)\n # update from user namespace without overriding special namedtuple attributes\n for key in ns:\n if key in _prohibited:\n raise AttributeError("Cannot overwrite NamedTuple attribute " + key)\n elif key not in _special and key not in nm_tpl._fields:\n setattr(nm_tpl, key, ns[key])\n if Generic in bases:\n nm_tpl.__init_subclass__()\n return nm_tpl\n\n\ndef NamedTuple(typename, fields=None, /, **kwargs):\n """Typed version of namedtuple.\n\n Usage::\n\n class Employee(NamedTuple):\n name: str\n id: int\n\n This is equivalent to::\n\n Employee = collections.namedtuple(\'Employee\', [\'name\', \'id\'])\n\n The resulting class has an extra __annotations__ attribute, giving a\n dict that maps field names to types. (The field names are also in\n the _fields attribute, which is part of the namedtuple API.)\n An alternative equivalent functional syntax is also accepted::\n\n Employee = NamedTuple(\'Employee\', [(\'name\', str), (\'id\', int)])\n """\n if fields is None:\n fields = kwargs.items()\n elif kwargs:\n raise TypeError("Either list of fields or keywords"\n " can be provided to NamedTuple, not both")\n nt = _make_nmtuple(typename, fields, module=_caller())\n nt.__orig_bases__ = (NamedTuple,)\n return nt\n\n_NamedTuple = type.__new__(NamedTupleMeta, \'NamedTuple\', (), {})\n\ndef _namedtuple_mro_entries(bases):\n assert NamedTuple in bases\n return (_NamedTuple,)\n\nNamedTuple.__mro_entries__ = _namedtuple_mro_entries\n\n\nclass _TypedDictMeta(type):\n def __new__(cls, name, bases, ns, total=True):\n """Create a new typed dict class object.\n\n This method is called when TypedDict is subclassed,\n or when TypedDict is instantiated. This way\n TypedDict supports all three syntax forms described in its docstring.\n Subclasses and instances of TypedDict return actual dictionaries.\n """\n for base in bases:\n if type(base) is not _TypedDictMeta and base is not Generic:\n raise TypeError(\'cannot inherit from both a TypedDict type \'\n \'and a non-TypedDict base class\')\n\n if any(issubclass(b, Generic) for b in bases):\n generic_base = (Generic,)\n else:\n generic_base = ()\n\n tp_dict = type.__new__(_TypedDictMeta, name, (*generic_base, dict), ns)\n\n if not hasattr(tp_dict, \'__orig_bases__\'):\n tp_dict.__orig_bases__ = bases\n\n annotations = {}\n own_annotations = ns.get(\'__annotations__\', {})\n msg = "TypedDict(\'Name\', {f0: t0, f1: t1, ...}); each t must be a type"\n own_annotations = {\n n: _type_check(tp, msg, module=tp_dict.__module__)\n for n, tp in own_annotations.items()\n }\n required_keys = set()\n optional_keys = set()\n\n for base in bases:\n annotations.update(base.__dict__.get(\'__annotations__\', {}))\n\n base_required = base.__dict__.get(\'__required_keys__\', set())\n required_keys |= base_required\n optional_keys -= base_required\n\n base_optional = base.__dict__.get(\'__optional_keys__\', set())\n required_keys -= base_optional\n optional_keys |= base_optional\n\n annotations.update(own_annotations)\n for annotation_key, annotation_type in own_annotations.items():\n annotation_origin = get_origin(annotation_type)\n if annotation_origin is Annotated:\n annotation_args = get_args(annotation_type)\n if annotation_args:\n annotation_type = annotation_args[0]\n annotation_origin = get_origin(annotation_type)\n\n if annotation_origin is Required:\n is_required = True\n elif annotation_origin is NotRequired:\n is_required = False\n else:\n is_required = total\n\n if is_required:\n required_keys.add(annotation_key)\n optional_keys.discard(annotation_key)\n else:\n optional_keys.add(annotation_key)\n required_keys.discard(annotation_key)\n\n assert required_keys.isdisjoint(optional_keys), (\n f"Required keys overlap with optional keys in {name}:"\n f" {required_keys=}, {optional_keys=}"\n )\n tp_dict.__annotations__ = annotations\n tp_dict.__required_keys__ = frozenset(required_keys)\n tp_dict.__optional_keys__ = frozenset(optional_keys)\n if not hasattr(tp_dict, \'__total__\'):\n tp_dict.__total__ = total\n return tp_dict\n\n __call__ = dict # static method\n\n def __subclasscheck__(cls, other):\n # Typed dicts are only for static structural subtyping.\n raise TypeError(\'TypedDict does not support instance and class checks\')\n\n __instancecheck__ = __subclasscheck__\n\n\ndef TypedDict(typename, fields=None, /, *, total=True, **kwargs):\n """A simple typed namespace. At runtime it is equivalent to a plain dict.\n\n TypedDict creates a dictionary type such that a type checker will expect all\n instances to have a certain set of keys, where each key is\n associated with a value of a consistent type. This expectation\n is not checked at runtime.\n\n Usage::\n\n >>> class Point2D(TypedDict):\n ... x: int\n ... y: int\n ... label: str\n ...\n >>> a: Point2D = {\'x\': 1, \'y\': 2, \'label\': \'good\'} # OK\n >>> b: Point2D = {\'z\': 3, \'label\': \'bad\'} # Fails type check\n >>> Point2D(x=1, y=2, label=\'first\') == dict(x=1, y=2, label=\'first\')\n True\n\n The type info can be accessed via the Point2D.__annotations__ dict, and\n the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.\n TypedDict supports an additional equivalent form::\n\n Point2D = TypedDict(\'Point2D\', {\'x\': int, \'y\': int, \'label\': str})\n\n By default, all keys must be present in a TypedDict. It is possible\n to override this by specifying totality::\n\n class Point2D(TypedDict, total=False):\n x: int\n y: int\n\n This means that a Point2D TypedDict can have any of the keys omitted. A type\n checker is only expected to support a literal False or True as the value of\n the total argument. True is the default, and makes all items defined in the\n class body be required.\n\n The Required and NotRequired special forms can also be used to mark\n individual keys as being required or not required::\n\n class Point2D(TypedDict):\n x: int # the "x" key must always be present (Required is the default)\n y: NotRequired[int] # the "y" key can be omitted\n\n See PEP 655 for more details on Required and NotRequired.\n """\n if fields is None:\n fields = kwargs\n elif kwargs:\n raise TypeError("TypedDict takes either a dict or keyword arguments,"\n " but not both")\n if kwargs:\n warnings.warn(\n "The kwargs-based syntax for TypedDict definitions is deprecated "\n "in Python 3.11, will be removed in Python 3.13, and may not be "\n "understood by third-party type checkers.",\n DeprecationWarning,\n stacklevel=2,\n )\n\n ns = {\'__annotations__\': dict(fields)}\n module = _caller()\n if module is not None:\n # Setting correct module is necessary to make typed dict classes pickleable.\n ns[\'__module__\'] = module\n\n td = _TypedDictMeta(typename, (), ns, total=total)\n td.__orig_bases__ = (TypedDict,)\n return td\n\n_TypedDict = type.__new__(_TypedDictMeta, \'TypedDict\', (), {})\nTypedDict.__mro_entries__ = lambda bases: (_TypedDict,)\n\n\n@_SpecialForm\ndef Required(self, parameters):\n """Special typing construct to mark a TypedDict key as required.\n\n This is mainly useful for total=False TypedDicts.\n\n For example::\n\n class Movie(TypedDict, total=False):\n title: Required[str]\n year: int\n\n m = Movie(\n title=\'The Matrix\', # typechecker error if key is omitted\n year=1999,\n )\n\n There is no runtime checking that a required key is actually provided\n when instantiating a related TypedDict.\n """\n item = _type_check(parameters, f\'{self._name} accepts only a single type.\')\n return _GenericAlias(self, (item,))\n\n\n@_SpecialForm\ndef NotRequired(self, parameters):\n """Special typing construct to mark a TypedDict key as potentially missing.\n\n For example::\n\n class Movie(TypedDict):\n title: str\n year: NotRequired[int]\n\n m = Movie(\n title=\'The Matrix\', # typechecker error if key is omitted\n year=1999,\n )\n """\n item = _type_check(parameters, f\'{self._name} accepts only a single type.\')\n return _GenericAlias(self, (item,))\n\n\nclass NewType:\n """NewType creates simple unique types with almost zero runtime overhead.\n\n NewType(name, tp) is considered a subtype of tp\n by static type checkers. At runtime, NewType(name, tp) returns\n a dummy callable that simply returns its argument.\n\n Usage::\n\n UserId = NewType(\'UserId\', int)\n\n def name_by_id(user_id: UserId) -> str:\n ...\n\n UserId(\'user\') # Fails type check\n\n name_by_id(42) # Fails type check\n name_by_id(UserId(42)) # OK\n\n num = UserId(5) + 1 # type: int\n """\n\n __call__ = _idfunc\n\n def __init__(self, name, tp):\n self.__qualname__ = name\n if \'.\' in name:\n name = name.rpartition(\'.\')[-1]\n self.__name__ = name\n self.__supertype__ = tp\n def_mod = _caller()\n if def_mod != \'typing\':\n self.__module__ = def_mod\n\n def __mro_entries__(self, bases):\n # We defined __mro_entries__ to get a better error message\n # if a user attempts to subclass a NewType instance. bpo-46170\n superclass_name = self.__name__\n\n class Dummy:\n def __init_subclass__(cls):\n subclass_name = cls.__name__\n raise TypeError(\n f"Cannot subclass an instance of NewType. Perhaps you were looking for: "\n f"`{subclass_name} = NewType({subclass_name!r}, {superclass_name})`"\n )\n\n return (Dummy,)\n\n def __repr__(self):\n return f\'{self.__module__}.{self.__qualname__}\'\n\n def __reduce__(self):\n return self.__qualname__\n\n def __or__(self, other):\n return Union[self, other]\n\n def __ror__(self, other):\n return Union[other, self]\n\n\n# Python-version-specific alias (Python 2: unicode; Python 3: str)\nText = str\n\n\n# Constant that\'s True when type checking, but False here.\nTYPE_CHECKING = False\n\n\nclass IO(Generic[AnyStr]):\n """Generic base class for TextIO and BinaryIO.\n\n This is an abstract, generic version of the return of open().\n\n NOTE: This does not distinguish between the different possible\n classes (text vs. binary, read vs. write vs. read/write,\n append-only, unbuffered). The TextIO and BinaryIO subclasses\n below capture the distinctions between text vs. binary, which is\n pervasive in the interface; however we currently do not offer a\n way to track the other distinctions in the type system.\n """\n\n __slots__ = ()\n\n @property\n @abstractmethod\n def mode(self) -> str:\n pass\n\n @property\n @abstractmethod\n def name(self) -> str:\n pass\n\n @abstractmethod\n def close(self) -> None:\n pass\n\n @property\n @abstractmethod\n def closed(self) -> bool:\n pass\n\n @abstractmethod\n def fileno(self) -> int:\n pass\n\n @abstractmethod\n def flush(self) -> None:\n pass\n\n @abstractmethod\n def isatty(self) -> bool:\n pass\n\n @abstractmethod\n def read(self, n: int = -1) -> AnyStr:\n pass\n\n @abstractmethod\n def readable(self) -> bool:\n pass\n\n @abstractmethod\n def readline(self, limit: int = -1) -> AnyStr:\n pass\n\n @abstractmethod\n def readlines(self, hint: int = -1) -> List[AnyStr]:\n pass\n\n @abstractmethod\n def seek(self, offset: int, whence: int = 0) -> int:\n pass\n\n @abstractmethod\n def seekable(self) -> bool:\n pass\n\n @abstractmethod\n def tell(self) -> int:\n pass\n\n @abstractmethod\n def truncate(self, size: int = None) -> int:\n pass\n\n @abstractmethod\n def writable(self) -> bool:\n pass\n\n @abstractmethod\n def write(self, s: AnyStr) -> int:\n pass\n\n @abstractmethod\n def writelines(self, lines: List[AnyStr]) -> None:\n pass\n\n @abstractmethod\n def __enter__(self) -> \'IO[AnyStr]\':\n pass\n\n @abstractmethod\n def __exit__(self, type, value, traceback) -> None:\n pass\n\n\nclass BinaryIO(IO[bytes]):\n """Typed version of the return of open() in binary mode."""\n\n __slots__ = ()\n\n @abstractmethod\n def write(self, s: Union[bytes, bytearray]) -> int:\n pass\n\n @abstractmethod\n def __enter__(self) -> \'BinaryIO\':\n pass\n\n\nclass TextIO(IO[str]):\n """Typed version of the return of open() in text mode."""\n\n __slots__ = ()\n\n @property\n @abstractmethod\n def buffer(self) -> BinaryIO:\n pass\n\n @property\n @abstractmethod\n def encoding(self) -> str:\n pass\n\n @property\n @abstractmethod\n def errors(self) -> Optional[str]:\n pass\n\n @property\n @abstractmethod\n def line_buffering(self) -> bool:\n pass\n\n @property\n @abstractmethod\n def newlines(self) -> Any:\n pass\n\n @abstractmethod\n def __enter__(self) -> \'TextIO\':\n pass\n\n\nclass _DeprecatedType(type):\n def __getattribute__(cls, name):\n if name not in {"__dict__", "__module__", "__doc__"} and name in cls.__dict__:\n warnings.warn(\n f"{cls.__name__} is deprecated, import directly "\n f"from typing instead. {cls.__name__} will be removed "\n "in Python 3.13.",\n DeprecationWarning,\n stacklevel=2,\n )\n return super().__getattribute__(name)\n\n\nclass io(metaclass=_DeprecatedType):\n """Wrapper namespace for IO generic classes."""\n\n __all__ = [\'IO\', \'TextIO\', \'BinaryIO\']\n IO = IO\n TextIO = TextIO\n BinaryIO = BinaryIO\n\n\nio.__name__ = __name__ + \'.io\'\nsys.modules[io.__name__] = io\n\nPattern = _alias(stdlib_re.Pattern, 1)\nMatch = _alias(stdlib_re.Match, 1)\n\nclass re(metaclass=_DeprecatedType):\n """Wrapper namespace for re type aliases."""\n\n __all__ = [\'Pattern\', \'Match\']\n Pattern = Pattern\n Match = Match\n\n\nre.__name__ = __name__ + \'.re\'\nsys.modules[re.__name__] = re\n\n\ndef reveal_type[T](obj: T, /) -> T:\n """Ask a static type checker to reveal the inferred type of an expression.\n\n When a static type checker encounters a call to ``reveal_type()``,\n it will emit the inferred type of the argument::\n\n x: int = 1\n reveal_type(x)\n\n Running a static type checker (e.g., mypy) on this example\n will produce output similar to \'Revealed type is "builtins.int"\'.\n\n At runtime, the function prints the runtime type of the\n argument and returns the argument unchanged.\n """\n print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr)\n return obj\n\n\nclass _IdentityCallable(Protocol):\n def __call__[T](self, arg: T, /) -> T:\n ...\n\n\ndef dataclass_transform(\n *,\n eq_default: bool = True,\n order_default: bool = False,\n kw_only_default: bool = False,\n frozen_default: bool = False,\n field_specifiers: tuple[type[Any] | Callable[..., Any], ...] = (),\n **kwargs: Any,\n) -> _IdentityCallable:\n """Decorator to mark an object as providing dataclass-like behaviour.\n\n The decorator can be applied to a function, class, or metaclass.\n\n Example usage with a decorator function::\n\n @dataclass_transform()\n def create_model[T](cls: type[T]) -> type[T]:\n ...\n return cls\n\n @create_model\n class CustomerModel:\n id: int\n name: str\n\n On a base class::\n\n @dataclass_transform()\n class ModelBase: ...\n\n class CustomerModel(ModelBase):\n id: int\n name: str\n\n On a metaclass::\n\n @dataclass_transform()\n class ModelMeta(type): ...\n\n class ModelBase(metaclass=ModelMeta): ...\n\n class CustomerModel(ModelBase):\n id: int\n name: str\n\n The ``CustomerModel`` classes defined above will\n be treated by type checkers similarly to classes created with\n ``@dataclasses.dataclass``.\n For example, type checkers will assume these classes have\n ``__init__`` methods that accept ``id`` and ``name``.\n\n The arguments to this decorator can be used to customize this behavior:\n - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be\n ``True`` or ``False`` if it is omitted by the caller.\n - ``order_default`` indicates whether the ``order`` parameter is\n assumed to be True or False if it is omitted by the caller.\n - ``kw_only_default`` indicates whether the ``kw_only`` parameter is\n assumed to be True or False if it is omitted by the caller.\n - ``frozen_default`` indicates whether the ``frozen`` parameter is\n assumed to be True or False if it is omitted by the caller.\n - ``field_specifiers`` specifies a static list of supported classes\n or functions that describe fields, similar to ``dataclasses.field()``.\n - Arbitrary other keyword arguments are accepted in order to allow for\n possible future extensions.\n\n At runtime, this decorator records its arguments in the\n ``__dataclass_transform__`` attribute on the decorated object.\n It has no other runtime effect.\n\n See PEP 681 for more details.\n """\n def decorator(cls_or_fn):\n cls_or_fn.__dataclass_transform__ = {\n "eq_default": eq_default,\n "order_default": order_default,\n "kw_only_default": kw_only_default,\n "frozen_default": frozen_default,\n "field_specifiers": field_specifiers,\n "kwargs": kwargs,\n }\n return cls_or_fn\n return decorator\n\n\ntype _Func = Callable[..., Any]\n\n\ndef override[F: _Func](method: F, /) -> F:\n """Indicate that a method is intended to override a method in a base class.\n\n Usage::\n\n class Base:\n def method(self) -> None:\n pass\n\n class Child(Base):\n @override\n def method(self) -> None:\n super().method()\n\n When this decorator is applied to a method, the type checker will\n validate that it overrides a method or attribute with the same name on a\n base class. This helps prevent bugs that may occur when a base class is\n changed without an equivalent change to a child class.\n\n There is no runtime checking of this property. The decorator attempts to\n set the ``__override__`` attribute to ``True`` on the decorated object to\n allow runtime introspection.\n\n See PEP 698 for details.\n """\n try:\n method.__override__ = True\n except (AttributeError, TypeError):\n # Skip the attribute silently if it is not writable.\n # AttributeError happens if the object has __slots__ or a\n # read-only property, TypeError if it\'s a builtin class.\n pass\n return method\n')
__stickytape_write_module('collections/__init__.py', b'\'\'\'This module implements specialized container datatypes providing\nalternatives to Python\'s general purpose built-in containers, dict,\nlist, set, and tuple.\n\n* namedtuple factory function for creating tuple subclasses with named fields\n* deque list-like container with fast appends and pops on either end\n* ChainMap dict-like class for creating a single view of multiple mappings\n* Counter dict subclass for counting hashable objects\n* OrderedDict dict subclass that remembers the order entries were added\n* defaultdict dict subclass that calls a factory function to supply missing values\n* UserDict wrapper around dictionary objects for easier dict subclassing\n* UserList wrapper around list objects for easier list subclassing\n* UserString wrapper around string objects for easier string subclassing\n\n\'\'\'\n\n__all__ = [\n \'ChainMap\',\n \'Counter\',\n \'OrderedDict\',\n \'UserDict\',\n \'UserList\',\n \'UserString\',\n \'defaultdict\',\n \'deque\',\n \'namedtuple\',\n]\n\nimport _collections_abc\nimport sys as _sys\n\nfrom itertools import chain as _chain\nfrom itertools import repeat as _repeat\nfrom itertools import starmap as _starmap\nfrom keyword import iskeyword as _iskeyword\nfrom operator import eq as _eq\nfrom operator import itemgetter as _itemgetter\nfrom reprlib import recursive_repr as _recursive_repr\nfrom _weakref import proxy as _proxy\n\ntry:\n from _collections import deque\nexcept ImportError:\n pass\nelse:\n _collections_abc.MutableSequence.register(deque)\n\ntry:\n from _collections import _deque_iterator\nexcept ImportError:\n pass\n\ntry:\n from _collections import defaultdict\nexcept ImportError:\n pass\n\n\n################################################################################\n### OrderedDict\n################################################################################\n\nclass _OrderedDictKeysView(_collections_abc.KeysView):\n\n def __reversed__(self):\n yield from reversed(self._mapping)\n\nclass _OrderedDictItemsView(_collections_abc.ItemsView):\n\n def __reversed__(self):\n for key in reversed(self._mapping):\n yield (key, self._mapping[key])\n\nclass _OrderedDictValuesView(_collections_abc.ValuesView):\n\n def __reversed__(self):\n for key in reversed(self._mapping):\n yield self._mapping[key]\n\nclass _Link(object):\n __slots__ = \'prev\', \'next\', \'key\', \'__weakref__\'\n\nclass OrderedDict(dict):\n \'Dictionary that remembers insertion order\'\n # An inherited dict maps keys to values.\n # The inherited dict provides __getitem__, __len__, __contains__, and get.\n # The remaining methods are order-aware.\n # Big-O running times for all methods are the same as regular dictionaries.\n\n # The internal self.__map dict maps keys to links in a doubly linked list.\n # The circular doubly linked list starts and ends with a sentinel element.\n # The sentinel element never gets deleted (this simplifies the algorithm).\n # The sentinel is in self.__hardroot with a weakref proxy in self.__root.\n # The prev links are weakref proxies (to prevent circular references).\n # Individual links are kept alive by the hard reference in self.__map.\n # Those hard references disappear when a key is deleted from an OrderedDict.\n\n def __new__(cls, /, *args, **kwds):\n "Create the ordered dict object and set up the underlying structures."\n self = dict.__new__(cls)\n self.__hardroot = _Link()\n self.__root = root = _proxy(self.__hardroot)\n root.prev = root.next = root\n self.__map = {}\n return self\n\n def __init__(self, other=(), /, **kwds):\n \'\'\'Initialize an ordered dictionary. The signature is the same as\n regular dictionaries. Keyword argument order is preserved.\n \'\'\'\n self.__update(other, **kwds)\n\n def __setitem__(self, key, value,\n dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):\n \'od.__setitem__(i, y) <==> od[i]=y\'\n # Setting a new item creates a new link at the end of the linked list,\n # and the inherited dictionary is updated with the new key/value pair.\n if key not in self:\n self.__map[key] = link = Link()\n root = self.__root\n last = root.prev\n link.prev, link.next, link.key = last, root, key\n last.next = link\n root.prev = proxy(link)\n dict_setitem(self, key, value)\n\n def __delitem__(self, key, dict_delitem=dict.__delitem__):\n \'od.__delitem__(y) <==> del od[y]\'\n # Deleting an existing item uses self.__map to find the link which gets\n # removed by updating the links in the predecessor and successor nodes.\n dict_delitem(self, key)\n link = self.__map.pop(key)\n link_prev = link.prev\n link_next = link.next\n link_prev.next = link_next\n link_next.prev = link_prev\n link.prev = None\n link.next = None\n\n def __iter__(self):\n \'od.__iter__() <==> iter(od)\'\n # Traverse the linked list in order.\n root = self.__root\n curr = root.next\n while curr is not root:\n yield curr.key\n curr = curr.next\n\n def __reversed__(self):\n \'od.__reversed__() <==> reversed(od)\'\n # Traverse the linked list in reverse order.\n root = self.__root\n curr = root.prev\n while curr is not root:\n yield curr.key\n curr = curr.prev\n\n def clear(self):\n \'od.clear() -> None. Remove all items from od.\'\n root = self.__root\n root.prev = root.next = root\n self.__map.clear()\n dict.clear(self)\n\n def popitem(self, last=True):\n \'\'\'Remove and return a (key, value) pair from the dictionary.\n\n Pairs are returned in LIFO order if last is true or FIFO order if false.\n \'\'\'\n if not self:\n raise KeyError(\'dictionary is empty\')\n root = self.__root\n if last:\n link = root.prev\n link_prev = link.prev\n link_prev.next = root\n root.prev = link_prev\n else:\n link = root.next\n link_next = link.next\n root.next = link_next\n link_next.prev = root\n key = link.key\n del self.__map[key]\n value = dict.pop(self, key)\n return key, value\n\n def move_to_end(self, key, last=True):\n \'\'\'Move an existing element to the end (or beginning if last is false).\n\n Raise KeyError if the element does not exist.\n \'\'\'\n link = self.__map[key]\n link_prev = link.prev\n link_next = link.next\n soft_link = link_next.prev\n link_prev.next = link_next\n link_next.prev = link_prev\n root = self.__root\n if last:\n last = root.prev\n link.prev = last\n link.next = root\n root.prev = soft_link\n last.next = link\n else:\n first = root.next\n link.prev = root\n link.next = first\n first.prev = soft_link\n root.next = link\n\n def __sizeof__(self):\n sizeof = _sys.getsizeof\n n = len(self) + 1 # number of links including root\n size = sizeof(self.__dict__) # instance dictionary\n size += sizeof(self.__map) * 2 # internal dict and inherited dict\n size += sizeof(self.__hardroot) * n # link objects\n size += sizeof(self.__root) * n # proxy objects\n return size\n\n update = __update = _collections_abc.MutableMapping.update\n\n def keys(self):\n "D.keys() -> a set-like object providing a view on D\'s keys"\n return _OrderedDictKeysView(self)\n\n def items(self):\n "D.items() -> a set-like object providing a view on D\'s items"\n return _OrderedDictItemsView(self)\n\n def values(self):\n "D.values() -> an object providing a view on D\'s values"\n return _OrderedDictValuesView(self)\n\n __ne__ = _collections_abc.MutableMapping.__ne__\n\n __marker = object()\n\n def pop(self, key, default=__marker):\n \'\'\'od.pop(k[,d]) -> v, remove specified key and return the corresponding\n value. If key is not found, d is returned if given, otherwise KeyError\n is raised.\n\n \'\'\'\n marker = self.__marker\n result = dict.pop(self, key, marker)\n if result is not marker:\n # The same as in __delitem__().\n link = self.__map.pop(key)\n link_prev = link.prev\n link_next = link.next\n link_prev.next = link_next\n link_next.prev = link_prev\n link.prev = None\n link.next = None\n return result\n if default is marker:\n raise KeyError(key)\n return default\n\n def setdefault(self, key, default=None):\n \'\'\'Insert key with a value of default if key is not in the dictionary.\n\n Return the value for key if key is in the dictionary, else default.\n \'\'\'\n if key in self:\n return self[key]\n self[key] = default\n return default\n\n @_recursive_repr()\n def __repr__(self):\n \'od.__repr__() <==> repr(od)\'\n if not self:\n return \'%s()\' % (self.__class__.__name__,)\n return \'%s(%r)\' % (self.__class__.__name__, dict(self.items()))\n\n def __reduce__(self):\n \'Return state information for pickling\'\n state = self.__getstate__()\n if state:\n if isinstance(state, tuple):\n state, slots = state\n else:\n slots = {}\n state = state.copy()\n slots = slots.copy()\n for k in vars(OrderedDict()):\n state.pop(k, None)\n slots.pop(k, None)\n if slots:\n state = state, slots\n else:\n state = state or None\n return self.__class__, (), state, None, iter(self.items())\n\n def copy(self):\n \'od.copy() -> a shallow copy of od\'\n return self.__class__(self)\n\n @classmethod\n def fromkeys(cls, iterable, value=None):\n \'\'\'Create a new ordered dictionary with keys from iterable and values set to value.\n \'\'\'\n self = cls()\n for key in iterable:\n self[key] = value\n return self\n\n def __eq__(self, other):\n \'\'\'od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive\n while comparison to a regular mapping is order-insensitive.\n\n \'\'\'\n if isinstance(other, OrderedDict):\n return dict.__eq__(self, other) and all(map(_eq, self, other))\n return dict.__eq__(self, other)\n\n def __ior__(self, other):\n self.update(other)\n return self\n\n def __or__(self, other):\n if not isinstance(other, dict):\n return NotImplemented\n new = self.__class__(self)\n new.update(other)\n return new\n\n def __ror__(self, other):\n if not isinstance(other, dict):\n return NotImplemented\n new = self.__class__(other)\n new.update(self)\n return new\n\n\ntry:\n from _collections import OrderedDict\nexcept ImportError:\n # Leave the pure Python version in place.\n pass\n\n\n################################################################################\n### namedtuple\n################################################################################\n\ntry:\n from _collections import _tuplegetter\nexcept ImportError:\n _tuplegetter = lambda index, doc: property(_itemgetter(index), doc=doc)\n\ndef namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):\n """Returns a new subclass of tuple with named fields.\n\n >>> Point = namedtuple(\'Point\', [\'x\', \'y\'])\n >>> Point.__doc__ # docstring for the new class\n \'Point(x, y)\'\n >>> p = Point(11, y=22) # instantiate with positional args or keywords\n >>> p[0] + p[1] # indexable like a plain tuple\n 33\n >>> x, y = p # unpack like a regular tuple\n >>> x, y\n (11, 22)\n >>> p.x + p.y # fields also accessible by name\n 33\n >>> d = p._asdict() # convert to a dictionary\n >>> d[\'x\']\n 11\n >>> Point(**d) # convert from a dictionary\n Point(x=11, y=22)\n >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields\n Point(x=100, y=22)\n\n """\n\n # Validate the field names. At the user\'s option, either generate an error\n # message or automatically replace the field name with a valid name.\n if isinstance(field_names, str):\n field_names = field_names.replace(\',\', \' \').split()\n field_names = list(map(str, field_names))\n typename = _sys.intern(str(typename))\n\n if rename:\n seen = set()\n for index, name in enumerate(field_names):\n if (not name.isidentifier()\n or _iskeyword(name)\n or name.startswith(\'_\')\n or name in seen):\n field_names[index] = f\'_{index}\'\n seen.add(name)\n\n for name in [typename] + field_names:\n if type(name) is not str:\n raise TypeError(\'Type names and field names must be strings\')\n if not name.isidentifier():\n raise ValueError(\'Type names and field names must be valid \'\n f\'identifiers: {name!r}\')\n if _iskeyword(name):\n raise ValueError(\'Type names and field names cannot be a \'\n f\'keyword: {name!r}\')\n\n seen = set()\n for name in field_names:\n if name.startswith(\'_\') and not rename:\n raise ValueError(\'Field names cannot start with an underscore: \'\n f\'{name!r}\')\n if name in seen:\n raise ValueError(f\'Encountered duplicate field name: {name!r}\')\n seen.add(name)\n\n field_defaults = {}\n if defaults is not None:\n defaults = tuple(defaults)\n if len(defaults) > len(field_names):\n raise TypeError(\'Got more default values than field names\')\n field_defaults = dict(reversed(list(zip(reversed(field_names),\n reversed(defaults)))))\n\n # Variables used in the methods and docstrings\n field_names = tuple(map(_sys.intern, field_names))\n num_fields = len(field_names)\n arg_list = \', \'.join(field_names)\n if num_fields == 1:\n arg_list += \',\'\n repr_fmt = \'(\' + \', \'.join(f\'{name}=%r\' for name in field_names) + \')\'\n tuple_new = tuple.__new__\n _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip\n\n # Create all the named tuple methods to be added to the class namespace\n\n namespace = {\n \'_tuple_new\': tuple_new,\n \'__builtins__\': {},\n \'__name__\': f\'namedtuple_{typename}\',\n }\n code = f\'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))\'\n __new__ = eval(code, namespace)\n __new__.__name__ = \'__new__\'\n __new__.__doc__ = f\'Create new instance of {typename}({arg_list})\'\n if defaults is not None:\n __new__.__defaults__ = defaults\n\n @classmethod\n def _make(cls, iterable):\n result = tuple_new(cls, iterable)\n if _len(result) != num_fields:\n raise TypeError(f\'Expected {num_fields} arguments, got {len(result)}\')\n return result\n\n _make.__func__.__doc__ = (f\'Make a new {typename} object from a sequence \'\n \'or iterable\')\n\n def _replace(self, /, **kwds):\n result = self._make(_map(kwds.pop, field_names, self))\n if kwds:\n raise ValueError(f\'Got unexpected field names: {list(kwds)!r}\')\n return result\n\n _replace.__doc__ = (f\'Return a new {typename} object replacing specified \'\n \'fields with new values\')\n\n def __repr__(self):\n \'Return a nicely formatted representation string\'\n return self.__class__.__name__ + repr_fmt % self\n\n def _asdict(self):\n \'Return a new dict which maps field names to their values.\'\n return _dict(_zip(self._fields, self))\n\n def __getnewargs__(self):\n \'Return self as a plain tuple. Used by copy and pickle.\'\n return _tuple(self)\n\n # Modify function metadata to help with introspection and debugging\n for method in (\n __new__,\n _make.__func__,\n _replace,\n __repr__,\n _asdict,\n __getnewargs__,\n ):\n method.__qualname__ = f\'{typename}.{method.__name__}\'\n\n # Build-up the class namespace dictionary\n # and use type() to build the result class\n class_namespace = {\n \'__doc__\': f\'{typename}({arg_list})\',\n \'__slots__\': (),\n \'_fields\': field_names,\n \'_field_defaults\': field_defaults,\n \'__new__\': __new__,\n \'_make\': _make,\n \'_replace\': _replace,\n \'__repr__\': __repr__,\n \'_asdict\': _asdict,\n \'__getnewargs__\': __getnewargs__,\n \'__match_args__\': field_names,\n }\n for index, name in enumerate(field_names):\n doc = _sys.intern(f\'Alias for field number {index}\')\n class_namespace[name] = _tuplegetter(index, doc)\n\n result = type(typename, (tuple,), class_namespace)\n\n # For pickling to work, the __module__ variable needs to be set to the frame\n # where the named tuple is created. Bypass this step in environments where\n # sys._getframe is not defined (Jython for example) or sys._getframe is not\n # defined for arguments greater than 0 (IronPython), or where the user has\n # specified a particular module.\n if module is None:\n try:\n module = _sys._getframemodulename(1) or \'__main__\'\n except AttributeError:\n try:\n module = _sys._getframe(1).f_globals.get(\'__name__\', \'__main__\')\n except (AttributeError, ValueError):\n pass\n if module is not None:\n result.__module__ = module\n\n return result\n\n\n########################################################################\n### Counter\n########################################################################\n\ndef _count_elements(mapping, iterable):\n \'Tally elements from the iterable.\'\n mapping_get = mapping.get\n for elem in iterable:\n mapping[elem] = mapping_get(elem, 0) + 1\n\ntry: # Load C helper function if available\n from _collections import _count_elements\nexcept ImportError:\n pass\n\nclass Counter(dict):\n \'\'\'Dict subclass for counting hashable items. Sometimes called a bag\n or multiset. Elements are stored as dictionary keys and their counts\n are stored as dictionary values.\n\n >>> c = Counter(\'abcdeabcdabcaba\') # count elements from a string\n\n >>> c.most_common(3) # three most common elements\n [(\'a\', 5), (\'b\', 4), (\'c\', 3)]\n >>> sorted(c) # list all unique elements\n [\'a\', \'b\', \'c\', \'d\', \'e\']\n >>> \'\'.join(sorted(c.elements())) # list elements with repetitions\n \'aaaaabbbbcccdde\'\n >>> sum(c.values()) # total of all counts\n 15\n\n >>> c[\'a\'] # count of letter \'a\'\n 5\n >>> for elem in \'shazam\': # update counts from an iterable\n ... c[elem] += 1 # by adding 1 to each element\'s count\n >>> c[\'a\'] # now there are seven \'a\'\n 7\n >>> del c[\'b\'] # remove all \'b\'\n >>> c[\'b\'] # now there are zero \'b\'\n 0\n\n >>> d = Counter(\'simsalabim\') # make another counter\n >>> c.update(d) # add in the second counter\n >>> c[\'a\'] # now there are nine \'a\'\n 9\n\n >>> c.clear() # empty the counter\n >>> c\n Counter()\n\n Note: If a count is set to zero or reduced to zero, it will remain\n in the counter until the entry is deleted or the counter is cleared:\n\n >>> c = Counter(\'aaabbc\')\n >>> c[\'b\'] -= 2 # reduce the count of \'b\' by two\n >>> c.most_common() # \'b\' is still in, but its count is zero\n [(\'a\', 3), (\'c\', 1), (\'b\', 0)]\n\n \'\'\'\n # References:\n # http://en.wikipedia.org/wiki/Multiset\n # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html\n # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm\n # http://code.activestate.com/recipes/259174/\n # Knuth, TAOCP Vol. II section 4.6.3\n\n def __init__(self, iterable=None, /, **kwds):\n \'\'\'Create a new, empty Counter object. And if given, count elements\n from an input iterable. Or, initialize the count from another mapping\n of elements to their counts.\n\n >>> c = Counter() # a new, empty counter\n >>> c = Counter(\'gallahad\') # a new counter from an iterable\n >>> c = Counter({\'a\': 4, \'b\': 2}) # a new counter from a mapping\n >>> c = Counter(a=4, b=2) # a new counter from keyword args\n\n \'\'\'\n super().__init__()\n self.update(iterable, **kwds)\n\n def __missing__(self, key):\n \'The count of elements not in the Counter is zero.\'\n # Needed so that self[missing_item] does not raise KeyError\n return 0\n\n def total(self):\n \'Sum of the counts\'\n return sum(self.values())\n\n def most_common(self, n=None):\n \'\'\'List the n most common elements and their counts from the most\n common to the least. If n is None, then list all element counts.\n\n >>> Counter(\'abracadabra\').most_common(3)\n [(\'a\', 5), (\'b\', 2), (\'r\', 2)]\n\n \'\'\'\n # Emulate Bag.sortedByCount from Smalltalk\n if n is None:\n return sorted(self.items(), key=_itemgetter(1), reverse=True)\n\n # Lazy import to speedup Python startup time\n import heapq\n return heapq.nlargest(n, self.items(), key=_itemgetter(1))\n\n def elements(self):\n \'\'\'Iterator over elements repeating each as many times as its count.\n\n >>> c = Counter(\'ABCABC\')\n >>> sorted(c.elements())\n [\'A\', \'A\', \'B\', \'B\', \'C\', \'C\']\n\n Knuth\'s example for prime factors of 1836: 2**2 * 3**3 * 17**1\n\n >>> import math\n >>> prime_factors = Counter({2: 2, 3: 3, 17: 1})\n >>> math.prod(prime_factors.elements())\n 1836\n\n Note, if an element\'s count has been set to zero or is a negative\n number, elements() will ignore it.\n\n \'\'\'\n # Emulate Bag.do from Smalltalk and Multiset.begin from C++.\n return _chain.from_iterable(_starmap(_repeat, self.items()))\n\n # Override dict methods where necessary\n\n @classmethod\n def fromkeys(cls, iterable, v=None):\n # There is no equivalent method for counters because the semantics\n # would be ambiguous in cases such as Counter.fromkeys(\'aaabbc\', v=2).\n # Initializing counters to zero values isn\'t necessary because zero\n # is already the default value for counter lookups. Initializing\n # to one is easily accomplished with Counter(set(iterable)). For\n # more exotic cases, create a dictionary first using a dictionary\n # comprehension or dict.fromkeys().\n raise NotImplementedError(\n \'Counter.fromkeys() is undefined. Use Counter(iterable) instead.\')\n\n def update(self, iterable=None, /, **kwds):\n \'\'\'Like dict.update() but add counts instead of replacing them.\n\n Source can be an iterable, a dictionary, or another Counter instance.\n\n >>> c = Counter(\'which\')\n >>> c.update(\'witch\') # add elements from another iterable\n >>> d = Counter(\'watch\')\n >>> c.update(d) # add elements from another counter\n >>> c[\'h\'] # four \'h\' in which, witch, and watch\n 4\n\n \'\'\'\n # The regular dict.update() operation makes no sense here because the\n # replace behavior results in some of the original untouched counts\n # being mixed-in with all of the other counts for a mismash that\n # doesn\'t have a straight-forward interpretation in most counting\n # contexts. Instead, we implement straight-addition. Both the inputs\n # and outputs are allowed to contain zero and negative counts.\n\n if iterable is not None:\n if isinstance(iterable, _collections_abc.Mapping):\n if self:\n self_get = self.get\n for elem, count in iterable.items():\n self[elem] = count + self_get(elem, 0)\n else:\n # fast path when counter is empty\n super().update(iterable)\n else:\n _count_elements(self, iterable)\n if kwds:\n self.update(kwds)\n\n def subtract(self, iterable=None, /, **kwds):\n \'\'\'Like dict.update() but subtracts counts instead of replacing them.\n Counts can be reduced below zero. Both the inputs and outputs are\n allowed to contain zero and negative counts.\n\n Source can be an iterable, a dictionary, or another Counter instance.\n\n >>> c = Counter(\'which\')\n >>> c.subtract(\'witch\') # subtract elements from another iterable\n >>> c.subtract(Counter(\'watch\')) # subtract elements from another counter\n >>> c[\'h\'] # 2 in which, minus 1 in witch, minus 1 in watch\n 0\n >>> c[\'w\'] # 1 in which, minus 1 in witch, minus 1 in watch\n -1\n\n \'\'\'\n if iterable is not None:\n self_get = self.get\n if isinstance(iterable, _collections_abc.Mapping):\n for elem, count in iterable.items():\n self[elem] = self_get(elem, 0) - count\n else:\n for elem in iterable:\n self[elem] = self_get(elem, 0) - 1\n if kwds:\n self.subtract(kwds)\n\n def copy(self):\n \'Return a shallow copy.\'\n return self.__class__(self)\n\n def __reduce__(self):\n return self.__class__, (dict(self),)\n\n def __delitem__(self, elem):\n \'Like dict.__delitem__() but does not raise KeyError for missing values.\'\n if elem in self:\n super().__delitem__(elem)\n\n def __repr__(self):\n if not self:\n return f\'{self.__class__.__name__}()\'\n try:\n # dict() preserves the ordering returned by most_common()\n d = dict(self.most_common())\n except TypeError:\n # handle case where values are not orderable\n d = dict(self)\n return f\'{self.__class__.__name__}({d!r})\'\n\n # Multiset-style mathematical operations discussed in:\n # Knuth TAOCP Volume II section 4.6.3 exercise 19\n # and at http://en.wikipedia.org/wiki/Multiset\n #\n # Outputs guaranteed to only include positive counts.\n #\n # To strip negative and zero counts, add-in an empty counter:\n # c += Counter()\n #\n # Results are ordered according to when an element is first\n # encountered in the left operand and then by the order\n # encountered in the right operand.\n #\n # When the multiplicities are all zero or one, multiset operations\n # are guaranteed to be equivalent to the corresponding operations\n # for regular sets.\n # Given counter multisets such as:\n # cp = Counter(a=1, b=0, c=1)\n # cq = Counter(c=1, d=0, e=1)\n # The corresponding regular sets would be:\n # sp = {\'a\', \'c\'}\n # sq = {\'c\', \'e\'}\n # All of the following relations would hold:\n # set(cp + cq) == sp | sq\n # set(cp - cq) == sp - sq\n # set(cp | cq) == sp | sq\n # set(cp & cq) == sp & sq\n # (cp == cq) == (sp == sq)\n # (cp != cq) == (sp != sq)\n # (cp <= cq) == (sp <= sq)\n # (cp < cq) == (sp < sq)\n # (cp >= cq) == (sp >= sq)\n # (cp > cq) == (sp > sq)\n\n def __eq__(self, other):\n \'True if all counts agree. Missing counts are treated as zero.\'\n if not isinstance(other, Counter):\n return NotImplemented\n return all(self[e] == other[e] for c in (self, other) for e in c)\n\n def __ne__(self, other):\n \'True if any counts disagree. Missing counts are treated as zero.\'\n if not isinstance(other, Counter):\n return NotImplemented\n return not self == other\n\n def __le__(self, other):\n \'True if all counts in self are a subset of those in other.\'\n if not isinstance(other, Counter):\n return NotImplemented\n return all(self[e] <= other[e] for c in (self, other) for e in c)\n\n def __lt__(self, other):\n \'True if all counts in self are a proper subset of those in other.\'\n if not isinstance(other, Counter):\n return NotImplemented\n return self <= other and self != other\n\n def __ge__(self, other):\n \'True if all counts in self are a superset of those in other.\'\n if not isinstance(other, Counter):\n return NotImplemented\n return all(self[e] >= other[e] for c in (self, other) for e in c)\n\n def __gt__(self, other):\n \'True if all counts in self are a proper superset of those in other.\'\n if not isinstance(other, Counter):\n return NotImplemented\n return self >= other and self != other\n\n def __add__(self, other):\n \'\'\'Add counts from two counters.\n\n >>> Counter(\'abbb\') + Counter(\'bcc\')\n Counter({\'b\': 4, \'c\': 2, \'a\': 1})\n\n \'\'\'\n if not isinstance(other, Counter):\n return NotImplemented\n result = Counter()\n for elem, count in self.items():\n newcount = count + other[elem]\n if newcount > 0:\n result[elem] = newcount\n for elem, count in other.items():\n if elem not in self and count > 0:\n result[elem] = count\n return result\n\n def __sub__(self, other):\n \'\'\' Subtract count, but keep only results with positive counts.\n\n >>> Counter(\'abbbc\') - Counter(\'bccd\')\n Counter({\'b\': 2, \'a\': 1})\n\n \'\'\'\n if not isinstance(other, Counter):\n return NotImplemented\n result = Counter()\n for elem, count in self.items():\n newcount = count - other[elem]\n if newcount > 0:\n result[elem] = newcount\n for elem, count in other.items():\n if elem not in self and count < 0:\n result[elem] = 0 - count\n return result\n\n def __or__(self, other):\n \'\'\'Union is the maximum of value in either of the input counters.\n\n >>> Counter(\'abbb\') | Counter(\'bcc\')\n Counter({\'b\': 3, \'c\': 2, \'a\': 1})\n\n \'\'\'\n if not isinstance(other, Counter):\n return NotImplemented\n result = Counter()\n for elem, count in self.items():\n other_count = other[elem]\n newcount = other_count if count < other_count else count\n if newcount > 0:\n result[elem] = newcount\n for elem, count in other.items():\n if elem not in self and count > 0:\n result[elem] = count\n return result\n\n def __and__(self, other):\n \'\'\' Intersection is the minimum of corresponding counts.\n\n >>> Counter(\'abbb\') & Counter(\'bcc\')\n Counter({\'b\': 1})\n\n \'\'\'\n if not isinstance(other, Counter):\n return NotImplemented\n result = Counter()\n for elem, count in self.items():\n other_count = other[elem]\n newcount = count if count < other_count else other_count\n if newcount > 0:\n result[elem] = newcount\n return result\n\n def __pos__(self):\n \'Adds an empty counter, effectively stripping negative and zero counts\'\n result = Counter()\n for elem, count in self.items():\n if count > 0:\n result[elem] = count\n return result\n\n def __neg__(self):\n \'\'\'Subtracts from an empty counter. Strips positive and zero counts,\n and flips the sign on negative counts.\n\n \'\'\'\n result = Counter()\n for elem, count in self.items():\n if count < 0:\n result[elem] = 0 - count\n return result\n\n def _keep_positive(self):\n \'\'\'Internal method to strip elements with a negative or zero count\'\'\'\n nonpositive = [elem for elem, count in self.items() if not count > 0]\n for elem in nonpositive:\n del self[elem]\n return self\n\n def __iadd__(self, other):\n \'\'\'Inplace add from another counter, keeping only positive counts.\n\n >>> c = Counter(\'abbb\')\n >>> c += Counter(\'bcc\')\n >>> c\n Counter({\'b\': 4, \'c\': 2, \'a\': 1})\n\n \'\'\'\n for elem, count in other.items():\n self[elem] += count\n return self._keep_positive()\n\n def __isub__(self, other):\n \'\'\'Inplace subtract counter, but keep only results with positive counts.\n\n >>> c = Counter(\'abbbc\')\n >>> c -= Counter(\'bccd\')\n >>> c\n Counter({\'b\': 2, \'a\': 1})\n\n \'\'\'\n for elem, count in other.items():\n self[elem] -= count\n return self._keep_positive()\n\n def __ior__(self, other):\n \'\'\'Inplace union is the maximum of value from either counter.\n\n >>> c = Counter(\'abbb\')\n >>> c |= Counter(\'bcc\')\n >>> c\n Counter({\'b\': 3, \'c\': 2, \'a\': 1})\n\n \'\'\'\n for elem, other_count in other.items():\n count = self[elem]\n if other_count > count:\n self[elem] = other_count\n return self._keep_positive()\n\n def __iand__(self, other):\n \'\'\'Inplace intersection is the minimum of corresponding counts.\n\n >>> c = Counter(\'abbb\')\n >>> c &= Counter(\'bcc\')\n >>> c\n Counter({\'b\': 1})\n\n \'\'\'\n for elem, count in self.items():\n other_count = other[elem]\n if other_count < count:\n self[elem] = other_count\n return self._keep_positive()\n\n\n########################################################################\n### ChainMap\n########################################################################\n\nclass ChainMap(_collections_abc.MutableMapping):\n \'\'\' A ChainMap groups multiple dicts (or other mappings) together\n to create a single, updateable view.\n\n The underlying mappings are stored in a list. That list is public and can\n be accessed or updated using the *maps* attribute. There is no other\n state.\n\n Lookups search the underlying mappings successively until a key is found.\n In contrast, writes, updates, and deletions only operate on the first\n mapping.\n\n \'\'\'\n\n def __init__(self, *maps):\n \'\'\'Initialize a ChainMap by setting *maps* to the given mappings.\n If no mappings are provided, a single empty dictionary is used.\n\n \'\'\'\n self.maps = list(maps) or [{}] # always at least one map\n\n def __missing__(self, key):\n raise KeyError(key)\n\n def __getitem__(self, key):\n for mapping in self.maps:\n try:\n return mapping[key] # can\'t use \'key in mapping\' with defaultdict\n except KeyError:\n pass\n return self.__missing__(key) # support subclasses that define __missing__\n\n def get(self, key, default=None):\n return self[key] if key in self else default\n\n def __len__(self):\n return len(set().union(*self.maps)) # reuses stored hash values if possible\n\n def __iter__(self):\n d = {}\n for mapping in map(dict.fromkeys, reversed(self.maps)):\n d |= mapping # reuses stored hash values if possible\n return iter(d)\n\n def __contains__(self, key):\n return any(key in m for m in self.maps)\n\n def __bool__(self):\n return any(self.maps)\n\n @_recursive_repr()\n def __repr__(self):\n return f\'{self.__class__.__name__}({", ".join(map(repr, self.maps))})\'\n\n @classmethod\n def fromkeys(cls, iterable, *args):\n \'Create a ChainMap with a single dict created from the iterable.\'\n return cls(dict.fromkeys(iterable, *args))\n\n def copy(self):\n \'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]\'\n return self.__class__(self.maps[0].copy(), *self.maps[1:])\n\n __copy__ = copy\n\n def new_child(self, m=None, **kwargs): # like Django\'s Context.push()\n \'\'\'New ChainMap with a new map followed by all previous maps.\n If no map is provided, an empty dict is used.\n Keyword arguments update the map or new empty dict.\n \'\'\'\n if m is None:\n m = kwargs\n elif kwargs:\n m.update(kwargs)\n return self.__class__(m, *self.maps)\n\n @property\n def parents(self): # like Django\'s Context.pop()\n \'New ChainMap from maps[1:].\'\n return self.__class__(*self.maps[1:])\n\n def __setitem__(self, key, value):\n self.maps[0][key] = value\n\n def __delitem__(self, key):\n try:\n del self.maps[0][key]\n except KeyError:\n raise KeyError(f\'Key not found in the first mapping: {key!r}\')\n\n def popitem(self):\n \'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.\'\n try:\n return self.maps[0].popitem()\n except KeyError:\n raise KeyError(\'No keys found in the first mapping.\')\n\n def pop(self, key, *args):\n \'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].\'\n try:\n return self.maps[0].pop(key, *args)\n except KeyError:\n raise KeyError(f\'Key not found in the first mapping: {key!r}\')\n\n def clear(self):\n \'Clear maps[0], leaving maps[1:] intact.\'\n self.maps[0].clear()\n\n def __ior__(self, other):\n self.maps[0].update(other)\n return self\n\n def __or__(self, other):\n if not isinstance(other, _collections_abc.Mapping):\n return NotImplemented\n m = self.copy()\n m.maps[0].update(other)\n return m\n\n def __ror__(self, other):\n if not isinstance(other, _collections_abc.Mapping):\n return NotImplemented\n m = dict(other)\n for child in reversed(self.maps):\n m.update(child)\n return self.__class__(m)\n\n\n################################################################################\n### UserDict\n################################################################################\n\nclass UserDict(_collections_abc.MutableMapping):\n\n # Start by filling-out the abstract methods\n def __init__(self, dict=None, /, **kwargs):\n self.data = {}\n if dict is not None:\n self.update(dict)\n if kwargs:\n self.update(kwargs)\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, key):\n if key in self.data:\n return self.data[key]\n if hasattr(self.__class__, "__missing__"):\n return self.__class__.__missing__(self, key)\n raise KeyError(key)\n\n def __setitem__(self, key, item):\n self.data[key] = item\n\n def __delitem__(self, key):\n del self.data[key]\n\n def __iter__(self):\n return iter(self.data)\n\n # Modify __contains__ and get() to work like dict\n # does when __missing__ is present.\n def __contains__(self, key):\n return key in self.data\n\n def get(self, key, default=None):\n if key in self:\n return self[key]\n return default\n\n\n # Now, add the methods in dicts but not in MutableMapping\n def __repr__(self):\n return repr(self.data)\n\n def __or__(self, other):\n if isinstance(other, UserDict):\n return self.__class__(self.data | other.data)\n if isinstance(other, dict):\n return self.__class__(self.data | other)\n return NotImplemented\n\n def __ror__(self, other):\n if isinstance(other, UserDict):\n return self.__class__(other.data | self.data)\n if isinstance(other, dict):\n return self.__class__(other | self.data)\n return NotImplemented\n\n def __ior__(self, other):\n if isinstance(other, UserDict):\n self.data |= other.data\n else:\n self.data |= other\n return self\n\n def __copy__(self):\n inst = self.__class__.__new__(self.__class__)\n inst.__dict__.update(self.__dict__)\n # Create a copy and avoid triggering descriptors\n inst.__dict__["data"] = self.__dict__["data"].copy()\n return inst\n\n def copy(self):\n if self.__class__ is UserDict:\n return UserDict(self.data.copy())\n import copy\n data = self.data\n try:\n self.data = {}\n c = copy.copy(self)\n finally:\n self.data = data\n c.update(self)\n return c\n\n @classmethod\n def fromkeys(cls, iterable, value=None):\n d = cls()\n for key in iterable:\n d[key] = value\n return d\n\n\n################################################################################\n### UserList\n################################################################################\n\nclass UserList(_collections_abc.MutableSequence):\n """A more or less complete user-defined wrapper around list objects."""\n\n def __init__(self, initlist=None):\n self.data = []\n if initlist is not None:\n # XXX should this accept an arbitrary sequence?\n if type(initlist) == type(self.data):\n self.data[:] = initlist\n elif isinstance(initlist, UserList):\n self.data[:] = initlist.data[:]\n else:\n self.data = list(initlist)\n\n def __repr__(self):\n return repr(self.data)\n\n def __lt__(self, other):\n return self.data < self.__cast(other)\n\n def __le__(self, other):\n return self.data <= self.__cast(other)\n\n def __eq__(self, other):\n return self.data == self.__cast(other)\n\n def __gt__(self, other):\n return self.data > self.__cast(other)\n\n def __ge__(self, other):\n return self.data >= self.__cast(other)\n\n def __cast(self, other):\n return other.data if isinstance(other, UserList) else other\n\n def __contains__(self, item):\n return item in self.data\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, i):\n if isinstance(i, slice):\n return self.__class__(self.data[i])\n else:\n return self.data[i]\n\n def __setitem__(self, i, item):\n self.data[i] = item\n\n def __delitem__(self, i):\n del self.data[i]\n\n def __add__(self, other):\n if isinstance(other, UserList):\n return self.__class__(self.data + other.data)\n elif isinstance(other, type(self.data)):\n return self.__class__(self.data + other)\n return self.__class__(self.data + list(other))\n\n def __radd__(self, other):\n if isinstance(other, UserList):\n return self.__class__(other.data + self.data)\n elif isinstance(other, type(self.data)):\n return self.__class__(other + self.data)\n return self.__class__(list(other) + self.data)\n\n def __iadd__(self, other):\n if isinstance(other, UserList):\n self.data += other.data\n elif isinstance(other, type(self.data)):\n self.data += other\n else:\n self.data += list(other)\n return self\n\n def __mul__(self, n):\n return self.__class__(self.data * n)\n\n __rmul__ = __mul__\n\n def __imul__(self, n):\n self.data *= n\n return self\n\n def __copy__(self):\n inst = self.__class__.__new__(self.__class__)\n inst.__dict__.update(self.__dict__)\n # Create a copy and avoid triggering descriptors\n inst.__dict__["data"] = self.__dict__["data"][:]\n return inst\n\n def append(self, item):\n self.data.append(item)\n\n def insert(self, i, item):\n self.data.insert(i, item)\n\n def pop(self, i=-1):\n return self.data.pop(i)\n\n def remove(self, item):\n self.data.remove(item)\n\n def clear(self):\n self.data.clear()\n\n def copy(self):\n return self.__class__(self)\n\n def count(self, item):\n return self.data.count(item)\n\n def index(self, item, *args):\n return self.data.index(item, *args)\n\n def reverse(self):\n self.data.reverse()\n\n def sort(self, /, *args, **kwds):\n self.data.sort(*args, **kwds)\n\n def extend(self, other):\n if isinstance(other, UserList):\n self.data.extend(other.data)\n else:\n self.data.extend(other)\n\n\n################################################################################\n### UserString\n################################################################################\n\nclass UserString(_collections_abc.Sequence):\n\n def __init__(self, seq):\n if isinstance(seq, str):\n self.data = seq\n elif isinstance(seq, UserString):\n self.data = seq.data[:]\n else:\n self.data = str(seq)\n\n def __str__(self):\n return str(self.data)\n\n def __repr__(self):\n return repr(self.data)\n\n def __int__(self):\n return int(self.data)\n\n def __float__(self):\n return float(self.data)\n\n def __complex__(self):\n return complex(self.data)\n\n def __hash__(self):\n return hash(self.data)\n\n def __getnewargs__(self):\n return (self.data[:],)\n\n def __eq__(self, string):\n if isinstance(string, UserString):\n return self.data == string.data\n return self.data == string\n\n def __lt__(self, string):\n if isinstance(string, UserString):\n return self.data < string.data\n return self.data < string\n\n def __le__(self, string):\n if isinstance(string, UserString):\n return self.data <= string.data\n return self.data <= string\n\n def __gt__(self, string):\n if isinstance(string, UserString):\n return self.data > string.data\n return self.data > string\n\n def __ge__(self, string):\n if isinstance(string, UserString):\n return self.data >= string.data\n return self.data >= string\n\n def __contains__(self, char):\n if isinstance(char, UserString):\n char = char.data\n return char in self.data\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n return self.__class__(self.data[index])\n\n def __add__(self, other):\n if isinstance(other, UserString):\n return self.__class__(self.data + other.data)\n elif isinstance(other, str):\n return self.__class__(self.data + other)\n return self.__class__(self.data + str(other))\n\n def __radd__(self, other):\n if isinstance(other, str):\n return self.__class__(other + self.data)\n return self.__class__(str(other) + self.data)\n\n def __mul__(self, n):\n return self.__class__(self.data * n)\n\n __rmul__ = __mul__\n\n def __mod__(self, args):\n return self.__class__(self.data % args)\n\n def __rmod__(self, template):\n return self.__class__(str(template) % self)\n\n # the following methods are defined in alphabetical order:\n def capitalize(self):\n return self.__class__(self.data.capitalize())\n\n def casefold(self):\n return self.__class__(self.data.casefold())\n\n def center(self, width, *args):\n return self.__class__(self.data.center(width, *args))\n\n def count(self, sub, start=0, end=_sys.maxsize):\n if isinstance(sub, UserString):\n sub = sub.data\n return self.data.count(sub, start, end)\n\n def removeprefix(self, prefix, /):\n if isinstance(prefix, UserString):\n prefix = prefix.data\n return self.__class__(self.data.removeprefix(prefix))\n\n def removesuffix(self, suffix, /):\n if isinstance(suffix, UserString):\n suffix = suffix.data\n return self.__class__(self.data.removesuffix(suffix))\n\n def encode(self, encoding=\'utf-8\', errors=\'strict\'):\n encoding = \'utf-8\' if encoding is None else encoding\n errors = \'strict\' if errors is None else errors\n return self.data.encode(encoding, errors)\n\n def endswith(self, suffix, start=0, end=_sys.maxsize):\n return self.data.endswith(suffix, start, end)\n\n def expandtabs(self, tabsize=8):\n return self.__class__(self.data.expandtabs(tabsize))\n\n def find(self, sub, start=0, end=_sys.maxsize):\n if isinstance(sub, UserString):\n sub = sub.data\n return self.data.find(sub, start, end)\n\n def format(self, /, *args, **kwds):\n return self.data.format(*args, **kwds)\n\n def format_map(self, mapping):\n return self.data.format_map(mapping)\n\n def index(self, sub, start=0, end=_sys.maxsize):\n return self.data.index(sub, start, end)\n\n def isalpha(self):\n return self.data.isalpha()\n\n def isalnum(self):\n return self.data.isalnum()\n\n def isascii(self):\n return self.data.isascii()\n\n def isdecimal(self):\n return self.data.isdecimal()\n\n def isdigit(self):\n return self.data.isdigit()\n\n def isidentifier(self):\n return self.data.isidentifier()\n\n def islower(self):\n return self.data.islower()\n\n def isnumeric(self):\n return self.data.isnumeric()\n\n def isprintable(self):\n return self.data.isprintable()\n\n def isspace(self):\n return self.data.isspace()\n\n def istitle(self):\n return self.data.istitle()\n\n def isupper(self):\n return self.data.isupper()\n\n def join(self, seq):\n return self.data.join(seq)\n\n def ljust(self, width, *args):\n return self.__class__(self.data.ljust(width, *args))\n\n def lower(self):\n return self.__class__(self.data.lower())\n\n def lstrip(self, chars=None):\n return self.__class__(self.data.lstrip(chars))\n\n maketrans = str.maketrans\n\n def partition(self, sep):\n return self.data.partition(sep)\n\n def replace(self, old, new, maxsplit=-1):\n if isinstance(old, UserString):\n old = old.data\n if isinstance(new, UserString):\n new = new.data\n return self.__class__(self.data.replace(old, new, maxsplit))\n\n def rfind(self, sub, start=0, end=_sys.maxsize):\n if isinstance(sub, UserString):\n sub = sub.data\n return self.data.rfind(sub, start, end)\n\n def rindex(self, sub, start=0, end=_sys.maxsize):\n return self.data.rindex(sub, start, end)\n\n def rjust(self, width, *args):\n return self.__class__(self.data.rjust(width, *args))\n\n def rpartition(self, sep):\n return self.data.rpartition(sep)\n\n def rstrip(self, chars=None):\n return self.__class__(self.data.rstrip(chars))\n\n def split(self, sep=None, maxsplit=-1):\n return self.data.split(sep, maxsplit)\n\n def rsplit(self, sep=None, maxsplit=-1):\n return self.data.rsplit(sep, maxsplit)\n\n def splitlines(self, keepends=False):\n return self.data.splitlines(keepends)\n\n def startswith(self, prefix, start=0, end=_sys.maxsize):\n return self.data.startswith(prefix, start, end)\n\n def strip(self, chars=None):\n return self.__class__(self.data.strip(chars))\n\n def swapcase(self):\n return self.__class__(self.data.swapcase())\n\n def title(self):\n return self.__class__(self.data.title())\n\n def translate(self, *args):\n return self.__class__(self.data.translate(*args))\n\n def upper(self):\n return self.__class__(self.data.upper())\n\n def zfill(self, width):\n return self.__class__(self.data.zfill(width))\n')
__stickytape_write_module('_collections_abc.py', b'# Copyright 2007 Google, Inc. All Rights Reserved.\n# Licensed to PSF under a Contributor Agreement.\n\n"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.\n\nUnit tests are in test_collections.\n"""\n\n############ Maintenance notes #########################################\n#\n# ABCs are different from other standard library modules in that they\n# specify compliance tests. In general, once an ABC has been published,\n# new methods (either abstract or concrete) cannot be added.\n#\n# Though classes that inherit from an ABC would automatically receive a\n# new mixin method, registered classes would become non-compliant and\n# violate the contract promised by ``isinstance(someobj, SomeABC)``.\n#\n# Though irritating, the correct procedure for adding new abstract or\n# mixin methods is to create a new ABC as a subclass of the previous\n# ABC. For example, union(), intersection(), and difference() cannot\n# be added to Set but could go into a new ABC that extends Set.\n#\n# Because they are so hard to change, new ABCs should have their APIs\n# carefully thought through prior to publication.\n#\n# Since ABCMeta only checks for the presence of methods, it is possible\n# to alter the signature of a method by adding optional arguments\n# or changing parameters names. This is still a bit dubious but at\n# least it won\'t cause isinstance() to return an incorrect result.\n#\n#\n#######################################################################\n\nfrom abc import ABCMeta, abstractmethod\nimport sys\n\nGenericAlias = type(list[int])\nEllipsisType = type(...)\ndef _f(): pass\nFunctionType = type(_f)\ndel _f\n\n__all__ = ["Awaitable", "Coroutine",\n "AsyncIterable", "AsyncIterator", "AsyncGenerator",\n "Hashable", "Iterable", "Iterator", "Generator", "Reversible",\n "Sized", "Container", "Callable", "Collection",\n "Set", "MutableSet",\n "Mapping", "MutableMapping",\n "MappingView", "KeysView", "ItemsView", "ValuesView",\n "Sequence", "MutableSequence",\n "ByteString", "Buffer",\n ]\n\n# This module has been renamed from collections.abc to _collections_abc to\n# speed up interpreter startup. Some of the types such as MutableMapping are\n# required early but collections module imports a lot of other modules.\n# See issue #19218\n__name__ = "collections.abc"\n\n# Private list of types that we want to register with the various ABCs\n# so that they will pass tests like:\n# it = iter(somebytearray)\n# assert isinstance(it, Iterable)\n# Note: in other implementations, these types might not be distinct\n# and they may have their own implementation specific types that\n# are not included on this list.\nbytes_iterator = type(iter(b\'\'))\nbytearray_iterator = type(iter(bytearray()))\n#callable_iterator = ???\ndict_keyiterator = type(iter({}.keys()))\ndict_valueiterator = type(iter({}.values()))\ndict_itemiterator = type(iter({}.items()))\nlist_iterator = type(iter([]))\nlist_reverseiterator = type(iter(reversed([])))\nrange_iterator = type(iter(range(0)))\nlongrange_iterator = type(iter(range(1 << 1000)))\nset_iterator = type(iter(set()))\nstr_iterator = type(iter(""))\ntuple_iterator = type(iter(()))\nzip_iterator = type(iter(zip()))\n## views ##\ndict_keys = type({}.keys())\ndict_values = type({}.values())\ndict_items = type({}.items())\n## misc ##\nmappingproxy = type(type.__dict__)\ngenerator = type((lambda: (yield))())\n## coroutine ##\nasync def _coro(): pass\n_coro = _coro()\ncoroutine = type(_coro)\n_coro.close() # Prevent ResourceWarning\ndel _coro\n## asynchronous generator ##\nasync def _ag(): yield\n_ag = _ag()\nasync_generator = type(_ag)\ndel _ag\n\n\n### ONE-TRICK PONIES ###\n\ndef _check_methods(C, *methods):\n mro = C.__mro__\n for method in methods:\n for B in mro:\n if method in B.__dict__:\n if B.__dict__[method] is None:\n return NotImplemented\n break\n else:\n return NotImplemented\n return True\n\nclass Hashable(metaclass=ABCMeta):\n\n __slots__ = ()\n\n @abstractmethod\n def __hash__(self):\n return 0\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Hashable:\n return _check_methods(C, "__hash__")\n return NotImplemented\n\n\nclass Awaitable(metaclass=ABCMeta):\n\n __slots__ = ()\n\n @abstractmethod\n def __await__(self):\n yield\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Awaitable:\n return _check_methods(C, "__await__")\n return NotImplemented\n\n __class_getitem__ = classmethod(GenericAlias)\n\n\nclass Coroutine(Awaitable):\n\n __slots__ = ()\n\n @abstractmethod\n def send(self, value):\n """Send a value into the coroutine.\n Return next yielded value or raise StopIteration.\n """\n raise StopIteration\n\n @abstractmethod\n def throw(self, typ, val=None, tb=None):\n """Raise an exception in the coroutine.\n Return next yielded value or raise StopIteration.\n """\n if val is None:\n if tb is None:\n raise typ\n val = typ()\n if tb is not None:\n val = val.with_traceback(tb)\n raise val\n\n def close(self):\n """Raise GeneratorExit inside coroutine.\n """\n try:\n self.throw(GeneratorExit)\n except (GeneratorExit, StopIteration):\n pass\n else:\n raise RuntimeError("coroutine ignored GeneratorExit")\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Coroutine:\n return _check_methods(C, \'__await__\', \'send\', \'throw\', \'close\')\n return NotImplemented\n\n\nCoroutine.register(coroutine)\n\n\nclass AsyncIterable(metaclass=ABCMeta):\n\n __slots__ = ()\n\n @abstractmethod\n def __aiter__(self):\n return AsyncIterator()\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is AsyncIterable:\n return _check_methods(C, "__aiter__")\n return NotImplemented\n\n __class_getitem__ = classmethod(GenericAlias)\n\n\nclass AsyncIterator(AsyncIterable):\n\n __slots__ = ()\n\n @abstractmethod\n async def __anext__(self):\n """Return the next item or raise StopAsyncIteration when exhausted."""\n raise StopAsyncIteration\n\n def __aiter__(self):\n return self\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is AsyncIterator:\n return _check_methods(C, "__anext__", "__aiter__")\n return NotImplemented\n\n\nclass AsyncGenerator(AsyncIterator):\n\n __slots__ = ()\n\n async def __anext__(self):\n """Return the next item from the asynchronous generator.\n When exhausted, raise StopAsyncIteration.\n """\n return await self.asend(None)\n\n @abstractmethod\n async def asend(self, value):\n """Send a value into the asynchronous generator.\n Return next yielded value or raise StopAsyncIteration.\n """\n raise StopAsyncIteration\n\n @abstractmethod\n async def athrow(self, typ, val=None, tb=None):\n """Raise an exception in the asynchronous generator.\n Return next yielded value or raise StopAsyncIteration.\n """\n if val is None:\n if tb is None:\n raise typ\n val = typ()\n if tb is not None:\n val = val.with_traceback(tb)\n raise val\n\n async def aclose(self):\n """Raise GeneratorExit inside coroutine.\n """\n try:\n await self.athrow(GeneratorExit)\n except (GeneratorExit, StopAsyncIteration):\n pass\n else:\n raise RuntimeError("asynchronous generator ignored GeneratorExit")\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is AsyncGenerator:\n return _check_methods(C, \'__aiter__\', \'__anext__\',\n \'asend\', \'athrow\', \'aclose\')\n return NotImplemented\n\n\nAsyncGenerator.register(async_generator)\n\n\nclass Iterable(metaclass=ABCMeta):\n\n __slots__ = ()\n\n @abstractmethod\n def __iter__(self):\n while False:\n yield None\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Iterable:\n return _check_methods(C, "__iter__")\n return NotImplemented\n\n __class_getitem__ = classmethod(GenericAlias)\n\n\nclass Iterator(Iterable):\n\n __slots__ = ()\n\n @abstractmethod\n def __next__(self):\n \'Return the next item from the iterator. When exhausted, raise StopIteration\'\n raise StopIteration\n\n def __iter__(self):\n return self\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Iterator:\n return _check_methods(C, \'__iter__\', \'__next__\')\n return NotImplemented\n\n\nIterator.register(bytes_iterator)\nIterator.register(bytearray_iterator)\n#Iterator.register(callable_iterator)\nIterator.register(dict_keyiterator)\nIterator.register(dict_valueiterator)\nIterator.register(dict_itemiterator)\nIterator.register(list_iterator)\nIterator.register(list_reverseiterator)\nIterator.register(range_iterator)\nIterator.register(longrange_iterator)\nIterator.register(set_iterator)\nIterator.register(str_iterator)\nIterator.register(tuple_iterator)\nIterator.register(zip_iterator)\n\n\nclass Reversible(Iterable):\n\n __slots__ = ()\n\n @abstractmethod\n def __reversed__(self):\n while False:\n yield None\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Reversible:\n return _check_methods(C, "__reversed__", "__iter__")\n return NotImplemented\n\n\nclass Generator(Iterator):\n\n __slots__ = ()\n\n def __next__(self):\n """Return the next item from the generator.\n When exhausted, raise StopIteration.\n """\n return self.send(None)\n\n @abstractmethod\n def send(self, value):\n """Send a value into the generator.\n Return next yielded value or raise StopIteration.\n """\n raise StopIteration\n\n @abstractmethod\n def throw(self, typ, val=None, tb=None):\n """Raise an exception in the generator.\n Return next yielded value or raise StopIteration.\n """\n if val is None:\n if tb is None:\n raise typ\n val = typ()\n if tb is not None:\n val = val.with_traceback(tb)\n raise val\n\n def close(self):\n """Raise GeneratorExit inside generator.\n """\n try:\n self.throw(GeneratorExit)\n except (GeneratorExit, StopIteration):\n pass\n else:\n raise RuntimeError("generator ignored GeneratorExit")\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Generator:\n return _check_methods(C, \'__iter__\', \'__next__\',\n \'send\', \'throw\', \'close\')\n return NotImplemented\n\n\nGenerator.register(generator)\n\n\nclass Sized(metaclass=ABCMeta):\n\n __slots__ = ()\n\n @abstractmethod\n def __len__(self):\n return 0\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Sized:\n return _check_methods(C, "__len__")\n return NotImplemented\n\n\nclass Container(metaclass=ABCMeta):\n\n __slots__ = ()\n\n @abstractmethod\n def __contains__(self, x):\n return False\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Container:\n return _check_methods(C, "__contains__")\n return NotImplemented\n\n __class_getitem__ = classmethod(GenericAlias)\n\n\nclass Collection(Sized, Iterable, Container):\n\n __slots__ = ()\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Collection:\n return _check_methods(C, "__len__", "__iter__", "__contains__")\n return NotImplemented\n\n\nclass Buffer(metaclass=ABCMeta):\n\n __slots__ = ()\n\n @abstractmethod\n def __buffer__(self, flags: int, /) -> memoryview:\n raise NotImplementedError\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Buffer:\n return _check_methods(C, "__buffer__")\n return NotImplemented\n\n\nclass _CallableGenericAlias(GenericAlias):\n """ Represent `Callable[argtypes, resulttype]`.\n\n This sets ``__args__`` to a tuple containing the flattened ``argtypes``\n followed by ``resulttype``.\n\n Example: ``Callable[[int, str], float]`` sets ``__args__`` to\n ``(int, str, float)``.\n """\n\n __slots__ = ()\n\n def __new__(cls, origin, args):\n if not (isinstance(args, tuple) and len(args) == 2):\n raise TypeError(\n "Callable must be used as Callable[[arg, ...], result].")\n t_args, t_result = args\n if isinstance(t_args, (tuple, list)):\n args = (*t_args, t_result)\n elif not _is_param_expr(t_args):\n raise TypeError(f"Expected a list of types, an ellipsis, "\n f"ParamSpec, or Concatenate. Got {t_args}")\n return super().__new__(cls, origin, args)\n\n def __repr__(self):\n if len(self.__args__) == 2 and _is_param_expr(self.__args__[0]):\n return super().__repr__()\n return (f\'collections.abc.Callable\'\n f\'[[{", ".join([_type_repr(a) for a in self.__args__[:-1]])}], \'\n f\'{_type_repr(self.__args__[-1])}]\')\n\n def __reduce__(self):\n args = self.__args__\n if not (len(args) == 2 and _is_param_expr(args[0])):\n args = list(args[:-1]), args[-1]\n return _CallableGenericAlias, (Callable, args)\n\n def __getitem__(self, item):\n # Called during TypeVar substitution, returns the custom subclass\n # rather than the default types.GenericAlias object. Most of the\n # code is copied from typing\'s _GenericAlias and the builtin\n # types.GenericAlias.\n if not isinstance(item, tuple):\n item = (item,)\n\n new_args = super().__getitem__(item).__args__\n\n # args[0] occurs due to things like Z[[int, str, bool]] from PEP 612\n if not isinstance(new_args[0], (tuple, list)):\n t_result = new_args[-1]\n t_args = new_args[:-1]\n new_args = (t_args, t_result)\n return _CallableGenericAlias(Callable, tuple(new_args))\n\ndef _is_param_expr(obj):\n """Checks if obj matches either a list of types, ``...``, ``ParamSpec`` or\n ``_ConcatenateGenericAlias`` from typing.py\n """\n if obj is Ellipsis:\n return True\n if isinstance(obj, list):\n return True\n obj = type(obj)\n names = (\'ParamSpec\', \'_ConcatenateGenericAlias\')\n return obj.__module__ == \'typing\' and any(obj.__name__ == name for name in names)\n\ndef _type_repr(obj):\n """Return the repr() of an object, special-casing types (internal helper).\n\n Copied from :mod:`typing` since collections.abc\n shouldn\'t depend on that module.\n (Keep this roughly in sync with the typing version.)\n """\n if isinstance(obj, type):\n if obj.__module__ == \'builtins\':\n return obj.__qualname__\n return f\'{obj.__module__}.{obj.__qualname__}\'\n if obj is Ellipsis:\n return \'...\'\n if isinstance(obj, FunctionType):\n return obj.__name__\n return repr(obj)\n\n\nclass Callable(metaclass=ABCMeta):\n\n __slots__ = ()\n\n @abstractmethod\n def __call__(self, *args, **kwds):\n return False\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Callable:\n return _check_methods(C, "__call__")\n return NotImplemented\n\n __class_getitem__ = classmethod(_CallableGenericAlias)\n\n\n### SETS ###\n\n\nclass Set(Collection):\n """A set is a finite, iterable container.\n\n This class provides concrete generic implementations of all\n methods except for __contains__, __iter__ and __len__.\n\n To override the comparisons (presumably for speed, as the\n semantics are fixed), redefine __le__ and __ge__,\n then the other operations will automatically follow suit.\n """\n\n __slots__ = ()\n\n def __le__(self, other):\n if not isinstance(other, Set):\n return NotImplemented\n if len(self) > len(other):\n return False\n for elem in self:\n if elem not in other:\n return False\n return True\n\n def __lt__(self, other):\n if not isinstance(other, Set):\n return NotImplemented\n return len(self) < len(other) and self.__le__(other)\n\n def __gt__(self, other):\n if not isinstance(other, Set):\n return NotImplemented\n return len(self) > len(other) and self.__ge__(other)\n\n def __ge__(self, other):\n if not isinstance(other, Set):\n return NotImplemented\n if len(self) < len(other):\n return False\n for elem in other:\n if elem not in self:\n return False\n return True\n\n def __eq__(self, other):\n if not isinstance(other, Set):\n return NotImplemented\n return len(self) == len(other) and self.__le__(other)\n\n @classmethod\n def _from_iterable(cls, it):\n \'\'\'Construct an instance of the class from any iterable input.\n\n Must override this method if the class constructor signature\n does not accept an iterable for an input.\n \'\'\'\n return cls(it)\n\n def __and__(self, other):\n if not isinstance(other, Iterable):\n return NotImplemented\n return self._from_iterable(value for value in other if value in self)\n\n __rand__ = __and__\n\n def isdisjoint(self, other):\n \'Return True if two sets have a null intersection.\'\n for value in other:\n if value in self:\n return False\n return True\n\n def __or__(self, other):\n if not isinstance(other, Iterable):\n return NotImplemented\n chain = (e for s in (self, other) for e in s)\n return self._from_iterable(chain)\n\n __ror__ = __or__\n\n def __sub__(self, other):\n if not isinstance(other, Set):\n if not isinstance(other, Iterable):\n return NotImplemented\n other = self._from_iterable(other)\n return self._from_iterable(value for value in self\n if value not in other)\n\n def __rsub__(self, other):\n if not isinstance(other, Set):\n if not isinstance(other, Iterable):\n return NotImplemented\n other = self._from_iterable(other)\n return self._from_iterable(value for value in other\n if value not in self)\n\n def __xor__(self, other):\n if not isinstance(other, Set):\n if not isinstance(other, Iterable):\n return NotImplemented\n other = self._from_iterable(other)\n return (self - other) | (other - self)\n\n __rxor__ = __xor__\n\n def _hash(self):\n """Compute the hash value of a set.\n\n Note that we don\'t define __hash__: not all sets are hashable.\n But if you define a hashable set type, its __hash__ should\n call this function.\n\n This must be compatible __eq__.\n\n All sets ought to compare equal if they contain the same\n elements, regardless of how they are implemented, and\n regardless of the order of the elements; so there\'s not much\n freedom for __eq__ or __hash__. We match the algorithm used\n by the built-in frozenset type.\n """\n MAX = sys.maxsize\n MASK = 2 * MAX + 1\n n = len(self)\n h = 1927868237 * (n + 1)\n h &= MASK\n for x in self:\n hx = hash(x)\n h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167\n h &= MASK\n h ^= (h >> 11) ^ (h >> 25)\n h = h * 69069 + 907133923\n h &= MASK\n if h > MAX:\n h -= MASK + 1\n if h == -1:\n h = 590923713\n return h\n\n\nSet.register(frozenset)\n\n\nclass MutableSet(Set):\n """A mutable set is a finite, iterable container.\n\n This class provides concrete generic implementations of all\n methods except for __contains__, __iter__, __len__,\n add(), and discard().\n\n To override the comparisons (presumably for speed, as the\n semantics are fixed), all you have to do is redefine __le__ and\n then the other operations will automatically follow suit.\n """\n\n __slots__ = ()\n\n @abstractmethod\n def add(self, value):\n """Add an element."""\n raise NotImplementedError\n\n @abstractmethod\n def discard(self, value):\n """Remove an element. Do not raise an exception if absent."""\n raise NotImplementedError\n\n def remove(self, value):\n """Remove an element. If not a member, raise a KeyError."""\n if value not in self:\n raise KeyError(value)\n self.discard(value)\n\n def pop(self):\n """Return the popped value. Raise KeyError if empty."""\n it = iter(self)\n try:\n value = next(it)\n except StopIteration:\n raise KeyError from None\n self.discard(value)\n return value\n\n def clear(self):\n """This is slow (creates N new iterators!) but effective."""\n try:\n while True:\n self.pop()\n except KeyError:\n pass\n\n def __ior__(self, it):\n for value in it:\n self.add(value)\n return self\n\n def __iand__(self, it):\n for value in (self - it):\n self.discard(value)\n return self\n\n def __ixor__(self, it):\n if it is self:\n self.clear()\n else:\n if not isinstance(it, Set):\n it = self._from_iterable(it)\n for value in it:\n if value in self:\n self.discard(value)\n else:\n self.add(value)\n return self\n\n def __isub__(self, it):\n if it is self:\n self.clear()\n else:\n for value in it:\n self.discard(value)\n return self\n\n\nMutableSet.register(set)\n\n\n### MAPPINGS ###\n\nclass Mapping(Collection):\n """A Mapping is a generic container for associating key/value\n pairs.\n\n This class provides concrete generic implementations of all\n methods except for __getitem__, __iter__, and __len__.\n """\n\n __slots__ = ()\n\n # Tell ABCMeta.__new__ that this class should have TPFLAGS_MAPPING set.\n __abc_tpflags__ = 1 << 6 # Py_TPFLAGS_MAPPING\n\n @abstractmethod\n def __getitem__(self, key):\n raise KeyError\n\n def get(self, key, default=None):\n \'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.\'\n try:\n return self[key]\n except KeyError:\n return default\n\n def __contains__(self, key):\n try:\n self[key]\n except KeyError:\n return False\n else:\n return True\n\n def keys(self):\n "D.keys() -> a set-like object providing a view on D\'s keys"\n return KeysView(self)\n\n def items(self):\n "D.items() -> a set-like object providing a view on D\'s items"\n return ItemsView(self)\n\n def values(self):\n "D.values() -> an object providing a view on D\'s values"\n return ValuesView(self)\n\n def __eq__(self, other):\n if not isinstance(other, Mapping):\n return NotImplemented\n return dict(self.items()) == dict(other.items())\n\n __reversed__ = None\n\nMapping.register(mappingproxy)\n\n\nclass MappingView(Sized):\n\n __slots__ = \'_mapping\',\n\n def __init__(self, mapping):\n self._mapping = mapping\n\n def __len__(self):\n return len(self._mapping)\n\n def __repr__(self):\n return \'{0.__class__.__name__}({0._mapping!r})\'.format(self)\n\n __class_getitem__ = classmethod(GenericAlias)\n\n\nclass KeysView(MappingView, Set):\n\n __slots__ = ()\n\n @classmethod\n def _from_iterable(cls, it):\n return set(it)\n\n def __contains__(self, key):\n return key in self._mapping\n\n def __iter__(self):\n yield from self._mapping\n\n\nKeysView.register(dict_keys)\n\n\nclass ItemsView(MappingView, Set):\n\n __slots__ = ()\n\n @classmethod\n def _from_iterable(cls, it):\n return set(it)\n\n def __contains__(self, item):\n key, value = item\n try:\n v = self._mapping[key]\n except KeyError:\n return False\n else:\n return v is value or v == value\n\n def __iter__(self):\n for key in self._mapping:\n yield (key, self._mapping[key])\n\n\nItemsView.register(dict_items)\n\n\nclass ValuesView(MappingView, Collection):\n\n __slots__ = ()\n\n def __contains__(self, value):\n for key in self._mapping:\n v = self._mapping[key]\n if v is value or v == value:\n return True\n return False\n\n def __iter__(self):\n for key in self._mapping:\n yield self._mapping[key]\n\n\nValuesView.register(dict_values)\n\n\nclass MutableMapping(Mapping):\n """A MutableMapping is a generic container for associating\n key/value pairs.\n\n This class provides concrete generic implementations of all\n methods except for __getitem__, __setitem__, __delitem__,\n __iter__, and __len__.\n """\n\n __slots__ = ()\n\n @abstractmethod\n def __setitem__(self, key, value):\n raise KeyError\n\n @abstractmethod\n def __delitem__(self, key):\n raise KeyError\n\n __marker = object()\n\n def pop(self, key, default=__marker):\n \'\'\'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.\n If key is not found, d is returned if given, otherwise KeyError is raised.\n \'\'\'\n try:\n value = self[key]\n except KeyError:\n if default is self.__marker:\n raise\n return default\n else:\n del self[key]\n return value\n\n def popitem(self):\n \'\'\'D.popitem() -> (k, v), remove and return some (key, value) pair\n as a 2-tuple; but raise KeyError if D is empty.\n \'\'\'\n try:\n key = next(iter(self))\n except StopIteration:\n raise KeyError from None\n value = self[key]\n del self[key]\n return key, value\n\n def clear(self):\n \'D.clear() -> None. Remove all items from D.\'\n try:\n while True:\n self.popitem()\n except KeyError:\n pass\n\n def update(self, other=(), /, **kwds):\n \'\'\' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.\n If E present and has a .keys() method, does: for k in E: D[k] = E[k]\n If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v\n In either case, this is followed by: for k, v in F.items(): D[k] = v\n \'\'\'\n if isinstance(other, Mapping):\n for key in other:\n self[key] = other[key]\n elif hasattr(other, "keys"):\n for key in other.keys():\n self[key] = other[key]\n else:\n for key, value in other:\n self[key] = value\n for key, value in kwds.items():\n self[key] = value\n\n def setdefault(self, key, default=None):\n \'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D\'\n try:\n return self[key]\n except KeyError:\n self[key] = default\n return default\n\n\nMutableMapping.register(dict)\n\n\n### SEQUENCES ###\n\nclass Sequence(Reversible, Collection):\n """All the operations on a read-only sequence.\n\n Concrete subclasses must override __new__ or __init__,\n __getitem__, and __len__.\n """\n\n __slots__ = ()\n\n # Tell ABCMeta.__new__ that this class should have TPFLAGS_SEQUENCE set.\n __abc_tpflags__ = 1 << 5 # Py_TPFLAGS_SEQUENCE\n\n @abstractmethod\n def __getitem__(self, index):\n raise IndexError\n\n def __iter__(self):\n i = 0\n try:\n while True:\n v = self[i]\n yield v\n i += 1\n except IndexError:\n return\n\n def __contains__(self, value):\n for v in self:\n if v is value or v == value:\n return True\n return False\n\n def __reversed__(self):\n for i in reversed(range(len(self))):\n yield self[i]\n\n def index(self, value, start=0, stop=None):\n \'\'\'S.index(value, [start, [stop]]) -> integer -- return first index of value.\n Raises ValueError if the value is not present.\n\n Supporting start and stop arguments is optional, but\n recommended.\n \'\'\'\n if start is not None and start < 0:\n start = max(len(self) + start, 0)\n if stop is not None and stop < 0:\n stop += len(self)\n\n i = start\n while stop is None or i < stop:\n try:\n v = self[i]\n except IndexError:\n break\n if v is value or v == value:\n return i\n i += 1\n raise ValueError\n\n def count(self, value):\n \'S.count(value) -> integer -- return number of occurrences of value\'\n return sum(1 for v in self if v is value or v == value)\n\nSequence.register(tuple)\nSequence.register(str)\nSequence.register(range)\nSequence.register(memoryview)\n\nclass _DeprecateByteStringMeta(ABCMeta):\n def __new__(cls, name, bases, namespace, **kwargs):\n if name != "ByteString":\n import warnings\n\n warnings._deprecated(\n "collections.abc.ByteString",\n remove=(3, 14),\n )\n return super().__new__(cls, name, bases, namespace, **kwargs)\n\n def __instancecheck__(cls, instance):\n import warnings\n\n warnings._deprecated(\n "collections.abc.ByteString",\n remove=(3, 14),\n )\n return super().__instancecheck__(instance)\n\nclass ByteString(Sequence, metaclass=_DeprecateByteStringMeta):\n """This unifies bytes and bytearray.\n\n XXX Should add all their methods.\n """\n\n __slots__ = ()\n\nByteString.register(bytes)\nByteString.register(bytearray)\n\n\nclass MutableSequence(Sequence):\n """All the operations on a read-write sequence.\n\n Concrete subclasses must provide __new__ or __init__,\n __getitem__, __setitem__, __delitem__, __len__, and insert().\n """\n\n __slots__ = ()\n\n @abstractmethod\n def __setitem__(self, index, value):\n raise IndexError\n\n @abstractmethod\n def __delitem__(self, index):\n raise IndexError\n\n @abstractmethod\n def insert(self, index, value):\n \'S.insert(index, value) -- insert value before index\'\n raise IndexError\n\n def append(self, value):\n \'S.append(value) -- append value to the end of the sequence\'\n self.insert(len(self), value)\n\n def clear(self):\n \'S.clear() -> None -- remove all items from S\'\n try:\n while True:\n self.pop()\n except IndexError:\n pass\n\n def reverse(self):\n \'S.reverse() -- reverse *IN PLACE*\'\n n = len(self)\n for i in range(n//2):\n self[i], self[n-i-1] = self[n-i-1], self[i]\n\n def extend(self, values):\n \'S.extend(iterable) -- extend sequence by appending elements from the iterable\'\n if values is self:\n values = list(values)\n for v in values:\n self.append(v)\n\n def pop(self, index=-1):\n \'\'\'S.pop([index]) -> item -- remove and return item at index (default last).\n Raise IndexError if list is empty or index is out of range.\n \'\'\'\n v = self[index]\n del self[index]\n return v\n\n def remove(self, value):\n \'\'\'S.remove(value) -- remove first occurrence of value.\n Raise ValueError if the value is not present.\n \'\'\'\n del self[self.index(value)]\n\n def __iadd__(self, values):\n self.extend(values)\n return self\n\n\nMutableSequence.register(list)\nMutableSequence.register(bytearray) # Multiply inheriting, see ByteString\n')
__stickytape_write_module('reprlib.py', b'"""Redo the builtin repr() (representation) but with limits on most sizes."""\n\n__all__ = ["Repr", "repr", "recursive_repr"]\n\nimport builtins\nfrom itertools import islice\nfrom _thread import get_ident\n\ndef recursive_repr(fillvalue=\'...\'):\n \'Decorator to make a repr function return fillvalue for a recursive call\'\n\n def decorating_function(user_function):\n repr_running = set()\n\n def wrapper(self):\n key = id(self), get_ident()\n if key in repr_running:\n return fillvalue\n repr_running.add(key)\n try:\n result = user_function(self)\n finally:\n repr_running.discard(key)\n return result\n\n # Can\'t use functools.wraps() here because of bootstrap issues\n wrapper.__module__ = getattr(user_function, \'__module__\')\n wrapper.__doc__ = getattr(user_function, \'__doc__\')\n wrapper.__name__ = getattr(user_function, \'__name__\')\n wrapper.__qualname__ = getattr(user_function, \'__qualname__\')\n wrapper.__annotations__ = getattr(user_function, \'__annotations__\', {})\n wrapper.__type_params__ = getattr(user_function, \'__type_params__\', ())\n return wrapper\n\n return decorating_function\n\nclass Repr:\n\n def __init__(\n self, *, maxlevel=6, maxtuple=6, maxlist=6, maxarray=5, maxdict=4,\n maxset=6, maxfrozenset=6, maxdeque=6, maxstring=30, maxlong=40,\n maxother=30, fillvalue=\'...\', indent=None,\n ):\n self.maxlevel = maxlevel\n self.maxtuple = maxtuple\n self.maxlist = maxlist\n self.maxarray = maxarray\n self.maxdict = maxdict\n self.maxset = maxset\n self.maxfrozenset = maxfrozenset\n self.maxdeque = maxdeque\n self.maxstring = maxstring\n self.maxlong = maxlong\n self.maxother = maxother\n self.fillvalue = fillvalue\n self.indent = indent\n\n def repr(self, x):\n return self.repr1(x, self.maxlevel)\n\n def repr1(self, x, level):\n typename = type(x).__name__\n if \' \' in typename:\n parts = typename.split()\n typename = \'_\'.join(parts)\n if hasattr(self, \'repr_\' + typename):\n return getattr(self, \'repr_\' + typename)(x, level)\n else:\n return self.repr_instance(x, level)\n\n def _join(self, pieces, level):\n if self.indent is None:\n return \', \'.join(pieces)\n if not pieces:\n return \'\'\n indent = self.indent\n if isinstance(indent, int):\n if indent < 0:\n raise ValueError(\n f\'Repr.indent cannot be negative int (was {indent!r})\'\n )\n indent *= \' \'\n try:\n sep = \',\\n\' + (self.maxlevel - level + 1) * indent\n except TypeError as error:\n raise TypeError(\n f\'Repr.indent must be a str, int or None, not {type(indent)}\'\n ) from error\n return sep.join((\'\', *pieces, \'\'))[1:-len(indent) or None]\n\n def _repr_iterable(self, x, level, left, right, maxiter, trail=\'\'):\n n = len(x)\n if level <= 0 and n:\n s = self.fillvalue\n else:\n newlevel = level - 1\n repr1 = self.repr1\n pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]\n if n > maxiter:\n pieces.append(self.fillvalue)\n s = self._join(pieces, level)\n if n == 1 and trail and self.indent is None:\n right = trail + right\n return \'%s%s%s\' % (left, s, right)\n\n def repr_tuple(self, x, level):\n return self._repr_iterable(x, level, \'(\', \')\', self.maxtuple, \',\')\n\n def repr_list(self, x, level):\n return self._repr_iterable(x, level, \'[\', \']\', self.maxlist)\n\n def repr_array(self, x, level):\n if not x:\n return "array(\'%s\')" % x.typecode\n header = "array(\'%s\', [" % x.typecode\n return self._repr_iterable(x, level, header, \'])\', self.maxarray)\n\n def repr_set(self, x, level):\n if not x:\n return \'set()\'\n x = _possibly_sorted(x)\n return self._repr_iterable(x, level, \'{\', \'}\', self.maxset)\n\n def repr_frozenset(self, x, level):\n if not x:\n return \'frozenset()\'\n x = _possibly_sorted(x)\n return self._repr_iterable(x, level, \'frozenset({\', \'})\',\n self.maxfrozenset)\n\n def repr_deque(self, x, level):\n return self._repr_iterable(x, level, \'deque([\', \'])\', self.maxdeque)\n\n def repr_dict(self, x, level):\n n = len(x)\n if n == 0:\n return \'{}\'\n if level <= 0:\n return \'{\' + self.fillvalue + \'}\'\n newlevel = level - 1\n repr1 = self.repr1\n pieces = []\n for key in islice(_possibly_sorted(x), self.maxdict):\n keyrepr = repr1(key, newlevel)\n valrepr = repr1(x[key], newlevel)\n pieces.append(\'%s: %s\' % (keyrepr, valrepr))\n if n > self.maxdict:\n pieces.append(self.fillvalue)\n s = self._join(pieces, level)\n return \'{%s}\' % (s,)\n\n def repr_str(self, x, level):\n s = builtins.repr(x[:self.maxstring])\n if len(s) > self.maxstring:\n i = max(0, (self.maxstring-3)//2)\n j = max(0, self.maxstring-3-i)\n s = builtins.repr(x[:i] + x[len(x)-j:])\n s = s[:i] + self.fillvalue + s[len(s)-j:]\n return s\n\n def repr_int(self, x, level):\n s = builtins.repr(x) # XXX Hope this isn\'t too slow...\n if len(s) > self.maxlong:\n i = max(0, (self.maxlong-3)//2)\n j = max(0, self.maxlong-3-i)\n s = s[:i] + self.fillvalue + s[len(s)-j:]\n return s\n\n def repr_instance(self, x, level):\n try:\n s = builtins.repr(x)\n # Bugs in x.__repr__() can cause arbitrary\n # exceptions -- then make up something\n except Exception:\n return \'<%s instance at %#x>\' % (x.__class__.__name__, id(x))\n if len(s) > self.maxother:\n i = max(0, (self.maxother-3)//2)\n j = max(0, self.maxother-3-i)\n s = s[:i] + self.fillvalue + s[len(s)-j:]\n return s\n\n\ndef _possibly_sorted(x):\n # Since not all sequences of items can be sorted and comparison\n # functions may raise arbitrary exceptions, return an unsorted\n # sequence in that case.\n try:\n return sorted(x)\n except Exception:\n return list(x)\n\naRepr = Repr()\nrepr = aRepr.repr\n')
__stickytape_write_module('collections/abc.py', b'from _collections_abc import *\nfrom _collections_abc import __all__\nfrom _collections_abc import _CallableGenericAlias\n')
__stickytape_write_module('copyreg.py', b'"""Helper to provide extensibility for pickle.\n\nThis is only useful to add pickle support for extension types defined in\nC, not for instances of user-defined classes.\n"""\n\n__all__ = ["pickle", "constructor",\n "add_extension", "remove_extension", "clear_extension_cache"]\n\ndispatch_table = {}\n\ndef pickle(ob_type, pickle_function, constructor_ob=None):\n if not callable(pickle_function):\n raise TypeError("reduction functions must be callable")\n dispatch_table[ob_type] = pickle_function\n\n # The constructor_ob function is a vestige of safe for unpickling.\n # There is no reason for the caller to pass it anymore.\n if constructor_ob is not None:\n constructor(constructor_ob)\n\ndef constructor(object):\n if not callable(object):\n raise TypeError("constructors must be callable")\n\n# Example: provide pickling support for complex numbers.\n\ndef pickle_complex(c):\n return complex, (c.real, c.imag)\n\npickle(complex, pickle_complex, complex)\n\ndef pickle_union(obj):\n import functools, operator\n return functools.reduce, (operator.or_, obj.__args__)\n\npickle(type(int | str), pickle_union)\n\n# Support for pickling new-style objects\n\ndef _reconstructor(cls, base, state):\n if base is object:\n obj = object.__new__(cls)\n else:\n obj = base.__new__(cls, state)\n if base.__init__ != object.__init__:\n base.__init__(obj, state)\n return obj\n\n_HEAPTYPE = 1<<9\n_new_type = type(int.__new__)\n\n# Python code for object.__reduce_ex__ for protocols 0 and 1\n\ndef _reduce_ex(self, proto):\n assert proto < 2\n cls = self.__class__\n for base in cls.__mro__:\n if hasattr(base, \'__flags__\') and not base.__flags__ & _HEAPTYPE:\n break\n new = base.__new__\n if isinstance(new, _new_type) and new.__self__ is base:\n break\n else:\n base = object # not really reachable\n if base is object:\n state = None\n else:\n if base is cls:\n raise TypeError(f"cannot pickle {cls.__name__!r} object")\n state = base(self)\n args = (cls, base, state)\n try:\n getstate = self.__getstate__\n except AttributeError:\n if getattr(self, "__slots__", None):\n raise TypeError(f"cannot pickle {cls.__name__!r} object: "\n f"a class that defines __slots__ without "\n f"defining __getstate__ cannot be pickled "\n f"with protocol {proto}") from None\n try:\n dict = self.__dict__\n except AttributeError:\n dict = None\n else:\n if (type(self).__getstate__ is object.__getstate__ and\n getattr(self, "__slots__", None)):\n raise TypeError("a class that defines __slots__ without "\n "defining __getstate__ cannot be pickled")\n dict = getstate()\n if dict:\n return _reconstructor, args, dict\n else:\n return _reconstructor, args\n\n# Helper for __reduce_ex__ protocol 2\n\ndef __newobj__(cls, *args):\n return cls.__new__(cls, *args)\n\ndef __newobj_ex__(cls, args, kwargs):\n """Used by pickle protocol 4, instead of __newobj__ to allow classes with\n keyword-only arguments to be pickled correctly.\n """\n return cls.__new__(cls, *args, **kwargs)\n\ndef _slotnames(cls):\n """Return a list of slot names for a given class.\n\n This needs to find slots defined by the class and its bases, so we\n can\'t simply return the __slots__ attribute. We must walk down\n the Method Resolution Order and concatenate the __slots__ of each\n class found there. (This assumes classes don\'t modify their\n __slots__ attribute to misrepresent their slots after the class is\n defined.)\n """\n\n # Get the value from a cache in the class if possible\n names = cls.__dict__.get("__slotnames__")\n if names is not None:\n return names\n\n # Not cached -- calculate the value\n names = []\n if not hasattr(cls, "__slots__"):\n # This class has no slots\n pass\n else:\n # Slots found -- gather slot names from all base classes\n for c in cls.__mro__:\n if "__slots__" in c.__dict__:\n slots = c.__dict__[\'__slots__\']\n # if class has a single slot, it can be given as a string\n if isinstance(slots, str):\n slots = (slots,)\n for name in slots:\n # special descriptors\n if name in ("__dict__", "__weakref__"):\n continue\n # mangled names\n elif name.startswith(\'__\') and not name.endswith(\'__\'):\n stripped = c.__name__.lstrip(\'_\')\n if stripped:\n names.append(\'_%s%s\' % (stripped, name))\n else:\n names.append(name)\n else:\n names.append(name)\n\n # Cache the outcome in the class if at all possible\n try:\n cls.__slotnames__ = names\n except:\n pass # But don\'t die if we can\'t\n\n return names\n\n# A registry of extension codes. This is an ad-hoc compression\n# mechanism. Whenever a global reference to <module>, <name> is about\n# to be pickled, the (<module>, <name>) tuple is looked up here to see\n# if it is a registered extension code for it. Extension codes are\n# universal, so that the meaning of a pickle does not depend on\n# context. (There are also some codes reserved for local use that\n# don\'t have this restriction.) Codes are positive ints; 0 is\n# reserved.\n\n_extension_registry = {} # key -> code\n_inverted_registry = {} # code -> key\n_extension_cache = {} # code -> object\n# Don\'t ever rebind those names: pickling grabs a reference to them when\n# it\'s initialized, and won\'t see a rebinding.\n\ndef add_extension(module, name, code):\n """Register an extension code."""\n code = int(code)\n if not 1 <= code <= 0x7fffffff:\n raise ValueError("code out of range")\n key = (module, name)\n if (_extension_registry.get(key) == code and\n _inverted_registry.get(code) == key):\n return # Redundant registrations are benign\n if key in _extension_registry:\n raise ValueError("key %s is already registered with code %s" %\n (key, _extension_registry[key]))\n if code in _inverted_registry:\n raise ValueError("code %s is already in use for key %s" %\n (code, _inverted_registry[code]))\n _extension_registry[key] = code\n _inverted_registry[code] = key\n\ndef remove_extension(module, name, code):\n """Unregister an extension code. For testing only."""\n key = (module, name)\n if (_extension_registry.get(key) != code or\n _inverted_registry.get(code) != key):\n raise ValueError("key %s is not registered with code %s" %\n (key, code))\n del _extension_registry[key]\n del _inverted_registry[code]\n if code in _extension_cache:\n del _extension_cache[code]\n\ndef clear_extension_cache():\n _extension_cache.clear()\n\n# Standard extension code assignments\n\n# Reserved ranges\n\n# First Last Count Purpose\n# 1 127 127 Reserved for Python standard library\n# 128 191 64 Reserved for Zope\n# 192 239 48 Reserved for 3rd parties\n# 240 255 16 Reserved for private use (will never be assigned)\n# 256 Inf Inf Reserved for future assignment\n\n# Extension codes are assigned by the Python Software Foundation.\n')
__stickytape_write_module('funcparserlib/__init__.py', b'')
__stickytape_write_module('funcparserlib/lexer.py', b'# -*- coding: utf-8 -*-\n\n# Copyright \xc2\xa9 2009/2021 Andrey Vlasovskikh\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this\n# software and associated documentation files (the "Software"), to deal in the Software\n# without restriction, including without limitation the rights to use, copy, modify,\n# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to the following\n# conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies\n# or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n# PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\n# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE\n# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import unicode_literals\n\n__all__ = ["make_tokenizer", "TokenSpec", "Token", "LexerError"]\n\nimport re\n\n\nclass LexerError(Exception):\n def __init__(self, place, msg):\n self.place = place\n self.msg = msg\n\n def __str__(self):\n s = "cannot tokenize data"\n line, pos = self.place\n return \'%s: %d,%d: "%s"\' % (s, line, pos, self.msg)\n\n\nclass TokenSpec(object):\n """A token specification for generating a lexer via `make_tokenizer()`."""\n\n def __init__(self, type, pattern, flags=0):\n """Initialize a `TokenSpec` object.\n\n Parameters:\n type (str): User-defined type of the token (e.g. `"name"`, `"number"`,\n `"operator"`)\n pattern (str): Regexp for matching this token type\n flags (int, optional): Regexp flags, the second argument of `re.compile()`\n """\n self.type = type\n self.pattern = pattern\n self.flags = flags\n\n def __repr__(self):\n return "TokenSpec(%r, %r, %r)" % (self.type, self.pattern, self.flags)\n\n\nclass Token(object):\n """A token object that represents a substring of certain type in your text.\n\n You can compare tokens for equality using the `==` operator. Tokens also define\n custom `repr()` and `str()`.\n\n Attributes:\n type (str): User-defined type of the token (e.g. `"name"`, `"number"`,\n `"operator"`)\n value (str): Text value of the token\n start (Optional[Tuple[int, int]]): Start position (_line_, _column_)\n end (Optional[Tuple[int, int]]): End position (_line_, _column_)\n """\n\n def __init__(self, type, value, start=None, end=None):\n """Initialize a `Token` object."""\n self.type = type\n self.value = value\n self.start = start\n self.end = end\n\n def __repr__(self):\n return "Token(%r, %r)" % (self.type, self.value)\n\n def __eq__(self, other):\n # FIXME: Case sensitivity is assumed here\n if other is None:\n return False\n else:\n return self.type == other.type and self.value == other.value\n\n def _pos_str(self):\n if self.start is None or self.end is None:\n return ""\n else:\n sl, sp = self.start\n el, ep = self.end\n return "%d,%d-%d,%d:" % (sl, sp, el, ep)\n\n def __str__(self):\n s = "%s %s \'%s\'" % (self._pos_str(), self.type, self.value)\n return s.strip()\n\n @property\n def name(self):\n return self.value\n\n def pformat(self):\n return "%s %s \'%s\'" % (\n self._pos_str().ljust(20), # noqa\n self.type.ljust(14),\n self.value,\n )\n\n\ndef make_tokenizer(specs):\n # noinspection GrazieInspection\n """Make a function that tokenizes text based on the regexp specs.\n\n Type: `(Sequence[TokenSpec | Tuple]) -> Callable[[str], Iterable[Token]]`\n\n A token spec is `TokenSpec` instance.\n\n !!! Note\n\n For legacy reasons, a token spec may also be a tuple of (_type_, _args_), where\n _type_ sets the value of `Token.type` for the token, and _args_ are the\n positional arguments for `re.compile()`: either just (_pattern_,) or\n (_pattern_, _flags_).\n\n It returns a tokenizer function that takes a string and returns an iterable of\n `Token` objects, or raises `LexerError` if it cannot tokenize the string according\n to its token specs.\n\n Examples:\n\n ```pycon\n >>> tokenize = make_tokenizer([\n ... TokenSpec("space", r"\\\\s+"),\n ... TokenSpec("id", r"\\\\w+"),\n ... TokenSpec("op", r"[,!]"),\n ... ])\n >>> text = "Hello, World!"\n >>> [t for t in tokenize(text) if t.type != "space"] # noqa\n [Token(\'id\', \'Hello\'), Token(\'op\', \',\'), Token(\'id\', \'World\'), Token(\'op\', \'!\')]\n >>> text = "Bye?"\n >>> list(tokenize(text))\n Traceback (most recent call last):\n ...\n lexer.LexerError: cannot tokenize data: 1,4: "Bye?"\n\n ```\n """\n compiled = []\n for spec in specs:\n if isinstance(spec, TokenSpec):\n c = spec.type, re.compile(spec.pattern, spec.flags)\n else:\n name, args = spec\n c = name, re.compile(*args)\n compiled.append(c)\n\n def match_specs(s, i, position):\n line, pos = position\n for type, regexp in compiled:\n m = regexp.match(s, i)\n if m is not None:\n value = m.group()\n nls = value.count("\\n")\n n_line = line + nls\n if nls == 0:\n n_pos = pos + len(value)\n else:\n n_pos = len(value) - value.rfind("\\n") - 1\n return Token(type, value, (line, pos + 1), (n_line, n_pos))\n else:\n err_line = s.splitlines()[line - 1]\n raise LexerError((line, pos + 1), err_line)\n\n def f(s):\n length = len(s)\n line, pos = 1, 0\n i = 0\n while i < length:\n t = match_specs(s, i, (line, pos))\n yield t\n line, pos = t.end\n i += len(t.value)\n\n return f\n\n\n# This is an example of token specs. See also [this article][1] for a\n# discussion of searching for multiline comments using regexps (including `*?`).\n#\n# [1]: http://ostermiller.org/findcomment.html\n_example_token_specs = [\n TokenSpec("COMMENT", r"\\(\\*(.|[\\r\\n])*?\\*\\)", re.MULTILINE),\n TokenSpec("COMMENT", r"\\{(.|[\\r\\n])*?\\}", re.MULTILINE),\n TokenSpec("COMMENT", r"//.*"),\n TokenSpec("NL", r"[\\r\\n]+"),\n TokenSpec("SPACE", r"[ \\t\\r\\n]+"),\n TokenSpec("NAME", r"[A-Za-z_][A-Za-z_0-9]*"),\n TokenSpec("REAL", r"[0-9]+\\.[0-9]*([Ee][+\\-]?[0-9]+)*"),\n TokenSpec("INT", r"[0-9]+"),\n TokenSpec("INT", r"\\$[0-9A-Fa-f]+"),\n TokenSpec("OP", r"(\\.\\.)|(<>)|(<=)|(>=)|(:=)|[;,=\\(\\):\\[\\]\\.+\\-<>\\*/@\\^]"),\n TokenSpec("STRING", r"\'([^\']|(\'\'))*\'"),\n TokenSpec("CHAR", r"#[0-9]+"),\n TokenSpec("CHAR", r"#\\$[0-9A-Fa-f]+"),\n]\n# tokenize = make_tokenizer(_example_token_specs)\n')
__stickytape_write_module('funcparserlib/parser.py', b'# -*- coding: utf-8 -*-\n\n# Copyright \xc2\xa9 2009/2021 Andrey Vlasovskikh\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this\n# software and associated documentation files (the "Software"), to deal in the Software\n# without restriction, including without limitation the rights to use, copy, modify,\n# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to the following\n# conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies\n# or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n# PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\n# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE\n# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n"""Functional parsing combinators.\n\nParsing combinators define an internal domain-specific language (DSL) for describing\nthe parsing rules of a grammar. The DSL allows you to start with a few primitive\nparsers, then combine your parsers to get more complex ones, and finally cover\nthe whole grammar you want to parse.\n\nThe structure of the language:\n\n* Class `Parser`\n * All the primitives and combinators of the language return `Parser` objects\n * It defines the main `Parser.parse(tokens)` method\n* Primitive parsers\n * `tok(type, value)`, `a(value)`, `some(pred)`, `forward_decl()`, `finished`\n* Parser combinators\n * `p1 + p2`, `p1 | p2`, `p >> f`, `-p`, `maybe(p)`, `many(p)`, `oneplus(p)`,\n `skip(p)`\n* Abstraction\n * Use regular Python variables `p = ... # Expression of type Parser` to define new\n rules (non-terminals) of your grammar\n\nEvery time you apply one of the combinators, you get a new `Parser` object. In other\nwords, the set of `Parser` objects is closed under the means of combination.\n\n!!! Note\n\n We took the parsing combinators language from the book [Introduction to Functional\n Programming][1] and translated it from ML into Python.\n\n [1]: https://www.cl.cam.ac.uk/teaching/Lectures/funprog-jrh-1996/\n"""\n\nfrom __future__ import unicode_literals\n\n__all__ = [\n "some",\n "a",\n "tok",\n "many",\n "pure",\n "finished",\n "maybe",\n "skip",\n "oneplus",\n "forward_decl",\n "NoParseError",\n "Parser",\n]\n\nimport sys\nimport logging\nimport warnings\n\nfrom funcparserlib.lexer import Token\n\nlog = logging.getLogger("funcparserlib")\n\ndebug = False\nif sys.version_info < (3,):\n string_types = (str, unicode) # noqa\nelse:\n string_types = str\n\n\nclass Parser(object):\n """A parser object that can parse a sequence of tokens or can be combined with\n other parsers using `+`, `|`, `>>`, `many()`, and other parsing combinators.\n\n Type: `Parser[A, B]`\n\n The generic variables in the type are: `A` \xe2\x80\x94 the type of the tokens in the\n sequence to parse,`B` \xe2\x80\x94 the type of the parsed value.\n\n In order to define a parser for your grammar:\n\n 1. You start with primitive parsers by calling `a(value)`, `some(pred)`,\n `forward_decl()`, `finished`\n 2. You use parsing combinators `p1 + p2`, `p1 | p2`, `p >> f`, `many(p)`, and\n others to combine parsers into a more complex parser\n 3. You can assign complex parsers to variables to define names that correspond to\n the rules of your grammar\n\n !!! Note\n\n The constructor `Parser.__init__()` is considered **internal** and may be\n changed in future versions. Use primitive parsers and parsing combinators to\n construct new parsers.\n """\n\n def __init__(self, p):\n """Wrap the parser function `p` into a `Parser` object."""\n self.name = ""\n self.define(p)\n\n def named(self, name):\n # noinspection GrazieInspection\n """Specify the name of the parser for easier debugging.\n\n Type: `(str) -> Parser[A, B]`\n\n This name is used in the debug-level parsing log. You can also get it via the\n `Parser.name` attribute.\n\n Examples:\n\n ```pycon\n >>> expr = (a("x") + a("y")).named("expr")\n >>> expr.name\n \'expr\'\n\n ```\n\n ```pycon\n >>> expr = a("x") + a("y")\n >>> expr.name\n "(\'x\', \'y\')"\n\n ```\n\n !!! Note\n\n You can enable the parsing log this way:\n\n ```python\n import logging\n logging.basicConfig(level=logging.DEBUG)\n import funcparserlib.parser\n funcparserlib.parser.debug = True\n ```\n\n The way to enable the parsing log may be changed in future versions.\n """\n self.name = name\n return self\n\n def define(self, p):\n """Define the parser created earlier as a forward declaration.\n\n Type: `(Parser[A, B]) -> None`\n\n Use `p = forward_decl()` in combination with `p.define(...)` to define\n recursive parsers.\n\n See the examples in the docs for `forward_decl()`.\n """\n f = getattr(p, "run", p)\n if debug:\n setattr(self, "_run", f)\n else:\n setattr(self, "run", f)\n self.named(getattr(p, "name", p.__doc__))\n\n def run(self, tokens, s):\n """Run the parser against the tokens with the specified parsing state.\n\n Type: `(Sequence[A], State) -> Tuple[B, State]`\n\n The parsing state includes the current position in the sequence being parsed,\n and the position of the rightmost token that has been consumed while parsing for\n better error messages.\n\n If the parser fails to parse the tokens, it raises `NoParseError`.\n\n !!! Warning\n\n This is method is **internal** and may be changed in future versions. Use\n `Parser.parse(tokens)` instead and let the parser object take care of\n updating the parsing state.\n """\n if debug:\n log.debug("trying %s" % self.name)\n return self._run(tokens, s) # noqa\n\n def _run(self, tokens, s):\n raise NotImplementedError("you must define() a parser")\n\n def parse(self, tokens):\n """Parse the sequence of tokens and return the parsed value.\n\n Type: `(Sequence[A]) -> B`\n\n It takes a sequence of tokens of arbitrary type `A` and returns the parsed value\n of arbitrary type `B`.\n\n If the parser fails to parse the tokens, it raises `NoParseError`.\n\n !!! Note\n\n Although `Parser.parse()` can parse sequences of any objects (including\n `str` which is a sequence of `str` chars), **the recommended way** is\n parsing sequences of `Token` objects.\n\n You **should** use a regexp-based tokenizer `make_tokenizer()` defined in\n `funcparserlib.lexer` to convert your text into a sequence of `Token`\n objects before parsing it. You will get more readable parsing error messages\n (as `Token` objects contain their position in the source file) and good\n separation of the lexical and syntactic levels of the grammar.\n """\n try:\n (tree, _) = self.run(tokens, State(0, 0, None))\n return tree\n except NoParseError as e:\n max = e.state.max\n if len(tokens) > max:\n t = tokens[max]\n if isinstance(t, Token):\n if t.start is None or t.end is None:\n loc = ""\n else:\n s_line, s_pos = t.start\n e_line, e_pos = t.end\n loc = "%d,%d-%d,%d: " % (s_line, s_pos, e_line, e_pos)\n msg = "%s%s: %r" % (loc, e.msg, t.value)\n elif isinstance(t, string_types):\n msg = "%s: %r" % (e.msg, t)\n else:\n msg = "%s: %s" % (e.msg, t)\n else:\n msg = "got unexpected end of input"\n if e.state.parser is not None:\n msg = "%s, expected: %s" % (msg, e.state.parser.name)\n e.msg = msg\n raise\n\n def __add__(self, other):\n """Sequential combination of parsers. It runs this parser, then the other\n parser.\n\n The return value of the resulting parser is a tuple of each parsed value in\n the sum of parsers. We merge all parsing results of `p1 + p2 + ... + pN` into a\n single tuple. It means that the parsing result may be a 2-tuple, a 3-tuple,\n a 4-tuple, etc. of parsed values. You avoid this by transforming the parsed\n pair into a new value using the `>>` combinator.\n\n You can also skip some parsing results in the resulting parsers by using `-p`\n or `skip(p)` for some parsers in your sum of parsers. It means that the parsing\n result might be a single value, not a tuple of parsed values. See the docs\n for `Parser.__neg__()` for more examples.\n\n Overloaded types (lots of them to provide stricter checking for the quite\n dynamic return type of this method):\n\n * `(self: Parser[A, B], _IgnoredParser[A]) -> Parser[A, B]`\n * `(self: Parser[A, B], Parser[A, C]) -> _TupleParser[A, Tuple[B, C]]`\n * `(self: _TupleParser[A, B], _IgnoredParser[A]) -> _TupleParser[A, B]`\n * `(self: _TupleParser[A, B], Parser[A, Any]) -> Parser[A, Any]`\n * `(self: _IgnoredParser[A], _IgnoredParser[A]) -> _IgnoredParser[A]`\n * `(self: _IgnoredParser[A], Parser[A, C]) -> Parser[A, C]`\n\n Examples:\n\n ```pycon\n >>> expr = a("x") + a("y")\n >>> expr.parse("xy")\n (\'x\', \'y\')\n\n ```\n\n ```pycon\n >>> expr = a("x") + a("y") + a("z")\n >>> expr.parse("xyz")\n (\'x\', \'y\', \'z\')\n\n ```\n\n ```pycon\n >>> expr = a("x") + a("y")\n >>> expr.parse("xz")\n Traceback (most recent call last):\n ...\n parser.NoParseError: got unexpected token: \'z\', expected: \'y\'\n\n ```\n """\n\n def magic(v1, v2):\n if isinstance(v1, _Tuple):\n return _Tuple(v1 + (v2,))\n else:\n return _Tuple((v1, v2))\n\n @_TupleParser\n def _add(tokens, s):\n (v1, s2) = self.run(tokens, s)\n (v2, s3) = other.run(tokens, s2)\n return magic(v1, v2), s3\n\n @Parser\n def ignored_right(tokens, s):\n v, s2 = self.run(tokens, s)\n _, s3 = other.run(tokens, s2)\n return v, s3\n\n name = "(%s, %s)" % (self.name, other.name)\n if isinstance(other, _IgnoredParser):\n return ignored_right.named(name)\n else:\n return _add.named(name)\n\n def __or__(self, other):\n """Choice combination of parsers.\n\n It runs this parser and returns its result. If the parser fails, it runs the\n other parser.\n\n Examples:\n\n ```pycon\n >>> expr = a("x") | a("y")\n >>> expr.parse("x")\n \'x\'\n >>> expr.parse("y")\n \'y\'\n >>> expr.parse("z")\n Traceback (most recent call last):\n ...\n parser.NoParseError: got unexpected token: \'z\', expected: \'x\' or \'y\'\n\n ```\n """\n\n @Parser\n def _or(tokens, s):\n try:\n return self.run(tokens, s)\n except NoParseError as e:\n state = e.state\n try:\n return other.run(tokens, State(s.pos, state.max, state.parser))\n except NoParseError as e:\n if s.pos == e.state.max:\n e.state = State(e.state.pos, e.state.max, _or)\n raise\n\n _or.name = "%s or %s" % (self.name, other.name)\n return _or\n\n def __rshift__(self, f):\n """Transform the parsing result by applying the specified function.\n\n Type: `(Callable[[B], C]) -> Parser[A, C]`\n\n You can use it for transforming the parsed value into another value before\n including it into the parse tree (the AST).\n\n Examples:\n\n ```pycon\n >>> def make_canonical_name(s):\n ... return s.lower()\n >>> expr = (a("D") | a("d")) >> make_canonical_name\n >>> expr.parse("D")\n \'d\'\n >>> expr.parse("d")\n \'d\'\n\n ```\n """\n\n @Parser\n def _shift(tokens, s):\n (v, s2) = self.run(tokens, s)\n return f(v), s2\n\n return _shift.named(self.name)\n\n def bind(self, f):\n """Bind the parser to a monadic function that returns a new parser.\n\n Type: `(Callable[[B], Parser[A, C]]) -> Parser[A, C]`\n\n Also known as `>>=` in Haskell.\n\n !!! Note\n\n You can parse any context-free grammar without resorting to `bind`. Due\n to its poor performance please use it only when you really need it.\n """\n\n @Parser\n def _bind(tokens, s):\n (v, s2) = self.run(tokens, s)\n return f(v).run(tokens, s2)\n\n _bind.name = "(%s >>=)" % (self.name,)\n return _bind\n\n def __neg__(self):\n """Return a parser that parses the same tokens, but its parsing result is\n ignored by the sequential `+` combinator.\n\n Type: `(Parser[A, B]) -> _IgnoredParser[A]`\n\n You can use it for throwing away elements of concrete syntax (e.g. `","`,\n `";"`).\n\n Examples:\n\n ```pycon\n >>> expr = -a("x") + a("y")\n >>> expr.parse("xy")\n \'y\'\n\n ```\n\n ```pycon\n >>> expr = a("x") + -a("y")\n >>> expr.parse("xy")\n \'x\'\n\n ```\n\n ```pycon\n >>> expr = a("x") + -a("y") + a("z")\n >>> expr.parse("xyz")\n (\'x\', \'z\')\n\n ```\n\n ```pycon\n >>> expr = -a("x") + a("y") + -a("z")\n >>> expr.parse("xyz")\n \'y\'\n\n ```\n\n ```pycon\n >>> expr = -a("x") + a("y")\n >>> expr.parse("yz")\n Traceback (most recent call last):\n ...\n parser.NoParseError: got unexpected token: \'y\', expected: \'x\'\n\n ```\n\n ```pycon\n >>> expr = a("x") + -a("y")\n >>> expr.parse("xz")\n Traceback (most recent call last):\n ...\n parser.NoParseError: got unexpected token: \'z\', expected: \'y\'\n\n ```\n\n !!! Note\n\n You **should not** pass the resulting parser to any combinators other than\n `+`. You **should** have at least one non-skipped value in your\n `p1 + p2 + ... + pN`. The parsed value of `-p` is an **internal** `_Ignored`\n object, not intended for actual use.\n """\n return _IgnoredParser(self)\n\n def __class_getitem__(cls, key):\n return cls\n\n\nclass State(object):\n """Parsing state that is maintained basically for error reporting.\n\n It consists of the current position `pos` in the sequence being parsed, and the\n position `max` of the rightmost token that has been consumed while parsing.\n """\n\n def __init__(self, pos, max, parser=None):\n self.pos = pos\n self.max = max\n self.parser = parser\n\n def __str__(self):\n return str((self.pos, self.max))\n\n def __repr__(self):\n return "State(%r, %r)" % (self.pos, self.max)\n\n\nclass NoParseError(Exception):\n def __init__(self, msg, state):\n self.msg = msg\n self.state = state\n\n def __str__(self):\n return self.msg\n\n\nclass _Tuple(tuple):\n pass\n\n\nclass _TupleParser(Parser):\n pass\n\n\nclass _Ignored(object):\n def __init__(self, value):\n self.value = value\n\n def __repr__(self):\n return "_Ignored(%s)" % repr(self.value)\n\n def __eq__(self, other):\n return isinstance(other, _Ignored) and self.value == other.value\n\n\n@Parser\ndef finished(tokens, s):\n """A parser that throws an exception if there are any unparsed tokens left in the\n sequence."""\n if s.pos >= len(tokens):\n return None, s\n else:\n s2 = State(s.pos, s.max, finished if s.pos == s.max else s.parser)\n raise NoParseError("got unexpected token", s2)\n\n\nfinished.name = "end of input"\n\n\ndef many(p):\n """Return a parser that applies the parser `p` as many times as it succeeds at\n parsing the tokens.\n\n Return a parser that infinitely applies the parser `p` to the input sequence\n of tokens as long as it successfully parses them. The parsed value is a list of\n the sequentially parsed values.\n\n Examples:\n\n ```pycon\n >>> expr = many(a("x"))\n >>> expr.parse("x")\n [\'x\']\n >>> expr.parse("xx")\n [\'x\', \'x\']\n >>> expr.parse("xxxy") # noqa\n [\'x\', \'x\', \'x\']\n >>> expr.parse("y")\n []\n\n ```\n """\n\n @Parser\n def _many(tokens, s):\n res = []\n try:\n while True:\n (v, s) = p.run(tokens, s)\n res.append(v)\n except NoParseError as e:\n s2 = State(s.pos, e.state.max, e.state.parser)\n if debug:\n log.debug(\n "*matched* %d instances of %s, new state = %s"\n % (len(res), _many.name, s2)\n )\n return res, s2\n\n _many.name = "{ %s }" % p.name\n return _many\n\n\ndef some(pred):\n """Return a parser that parses a token if it satisfies the predicate `pred`.\n\n Type: `(Callable[[A], bool]) -> Parser[A, A]`\n\n Examples:\n\n ```pycon\n >>> expr = some(lambda s: s.isalpha()).named(\'alpha\')\n >>> expr.parse("x")\n \'x\'\n >>> expr.parse("y")\n \'y\'\n >>> expr.parse("1")\n Traceback (most recent call last):\n ...\n parser.NoParseError: got unexpected token: \'1\', expected: alpha\n\n ```\n\n !!! Warning\n\n The `some()` combinator is quite slow and may be changed or removed in future\n versions. If you need a parser for a token by its type (e.g. any identifier)\n and maybe its value, use `tok(type[, value])` instead. You should use\n `make_tokenizer()` from `funcparserlib.lexer` to tokenize your text first.\n """\n\n @Parser\n def _some(tokens, s):\n if s.pos >= len(tokens):\n s2 = State(s.pos, s.max, _some if s.pos == s.max else s.parser)\n raise NoParseError("got unexpected end of input", s2)\n else:\n t = tokens[s.pos]\n if pred(t):\n pos = s.pos + 1\n s2 = State(pos, max(pos, s.max), s.parser)\n if debug:\n log.debug("*matched* %r, new state = %s" % (t, s2))\n return t, s2\n else:\n s2 = State(s.pos, s.max, _some if s.pos == s.max else s.parser)\n if debug:\n log.debug(\n "failed %r, state = %s, expected = %s" % (t, s2, s2.parser.name)\n )\n raise NoParseError("got unexpected token", s2)\n\n _some.name = "some(...)"\n return _some\n\n\ndef a(value):\n """Return a parser that parses a token if it\'s equal to `value`.\n\n Type: `(A) -> Parser[A, A]`\n\n Examples:\n\n ```pycon\n >>> expr = a("x")\n >>> expr.parse("x")\n \'x\'\n >>> expr.parse("y")\n Traceback (most recent call last):\n ...\n parser.NoParseError: got unexpected token: \'y\', expected: \'x\'\n\n ```\n\n !!! Note\n\n Although `Parser.parse()` can parse sequences of any objects (including\n `str` which is a sequence of `str` chars), **the recommended way** is\n parsing sequences of `Token` objects.\n\n You **should** use a regexp-based tokenizer `make_tokenizer()` defined in\n `funcparserlib.lexer` to convert your text into a sequence of `Token` objects\n before parsing it. You will get more readable parsing error messages (as `Token`\n objects contain their position in the source file) and good separation of the\n lexical and syntactic levels of the grammar.\n """\n name = getattr(value, "name", value)\n return some(lambda t: t == value).named(repr(name))\n\n\ndef tok(type, value=None):\n """Return a parser that parses a `Token` and returns the string value of the token.\n\n Type: `(str, Optional[str]) -> Parser[Token, str]`\n\n You can match any token of the specified `type` or you can match a specific token by\n its `type` and `value`.\n\n Examples:\n\n ```pycon\n >>> expr = tok("expr")\n >>> expr.parse([Token("expr", "foo")])\n \'foo\'\n >>> expr.parse([Token("expr", "bar")])\n \'bar\'\n >>> expr.parse([Token("op", "=")])\n Traceback (most recent call last):\n ...\n parser.NoParseError: got unexpected token: \'=\', expected: expr\n\n ```\n\n ```pycon\n >>> expr = tok("op", "=")\n >>> expr.parse([Token("op", "=")])\n \'=\'\n >>> expr.parse([Token("op", "+")])\n Traceback (most recent call last):\n ...\n parser.NoParseError: got unexpected token: \'+\', expected: \'=\'\n\n ```\n\n !!! Note\n\n In order to convert your text to parse into a sequence of `Token` objects,\n use a regexp-based tokenizer `make_tokenizer()` defined in\n `funcparserlib.lexer`. You will get more readable parsing error messages (as\n `Token` objects contain their position in the source file) and good separation\n of the lexical and syntactic levels of the grammar.\n """\n if value is not None:\n p = a(Token(type, value))\n else:\n p = some(lambda t: t.type == type).named(type)\n return (p >> (lambda t: t.value)).named(p.name)\n\n\ndef pure(x):\n """Wrap any object into a parser.\n\n Type: `(A) -> Parser[A, A]`\n\n A pure parser doesn\'t touch the tokens sequence, it just returns its pure `x`\n value.\n\n Also known as `return` in Haskell.\n """\n\n @Parser\n def _pure(_, s):\n return x, s\n\n _pure.name = "(pure %r)" % (x,)\n return _pure\n\n\ndef maybe(p):\n """Return a parser that returns `None` if the parser `p` fails.\n\n Examples:\n\n ```pycon\n >>> expr = maybe(a("x"))\n >>> expr.parse("x")\n \'x\'\n >>> expr.parse("y") is None\n True\n\n ```\n """\n return (p | pure(None)).named("[ %s ]" % (p.name,))\n\n\ndef skip(p):\n """An alias for `-p`.\n\n See also the docs for `Parser.__neg__()`.\n """\n return -p\n\n\nclass _IgnoredParser(Parser):\n def __init__(self, p):\n super(_IgnoredParser, self).__init__(p)\n run = self._run if debug else self.run\n\n def ignored(tokens, s):\n v, s2 = run(tokens, s)\n return v if isinstance(v, _Ignored) else _Ignored(v), s2\n\n self.define(ignored)\n self.name = getattr(p, "name", p.__doc__)\n\n def __add__(self, other):\n def ignored_left(tokens, s):\n _, s2 = self.run(tokens, s)\n v, s3 = other.run(tokens, s2)\n return v, s3\n\n if isinstance(other, _IgnoredParser):\n return _IgnoredParser(ignored_left).named(\n "(%s, %s)" % (self.name, other.name)\n )\n else:\n return Parser(ignored_left).named("(%s, %s)" % (self.name, other.name))\n\n\ndef oneplus(p):\n """Return a parser that applies the parser `p` one or more times.\n\n A similar parser combinator `many(p)` means apply `p` zero or more times, whereas\n `oneplus(p)` means apply `p` one or more times.\n\n Examples:\n\n ```pycon\n >>> expr = oneplus(a("x"))\n >>> expr.parse("x")\n [\'x\']\n >>> expr.parse("xx")\n [\'x\', \'x\']\n >>> expr.parse("y")\n Traceback (most recent call last):\n ...\n parser.NoParseError: got unexpected token: \'y\', expected: \'x\'\n\n ```\n """\n\n @Parser\n def _oneplus(tokens, s):\n (v1, s2) = p.run(tokens, s)\n (v2, s3) = many(p).run(tokens, s2)\n return [v1] + v2, s3\n\n _oneplus.name = "(%s, { %s })" % (p.name, p.name)\n return _oneplus\n\n\ndef with_forward_decls(suspension):\n warnings.warn(\n "Use forward_decl() instead:\\n"\n "\\n"\n " p = forward_decl()\\n"\n " ...\\n"\n " p.define(parser_value)\\n",\n DeprecationWarning,\n )\n\n @Parser\n def f(tokens, s):\n return suspension().run(tokens, s)\n\n return f\n\n\ndef forward_decl():\n """Return an undefined parser that can be used as a forward declaration.\n\n Type: `Parser[Any, Any]`\n\n Use `p = forward_decl()` in combination with `p.define(...)` to define recursive\n parsers.\n\n\n Examples:\n\n ```pycon\n >>> expr = forward_decl()\n >>> expr.define(a("x") + maybe(expr) + a("y"))\n >>> expr.parse("xxyy") # noqa\n (\'x\', (\'x\', None, \'y\'), \'y\')\n >>> expr.parse("xxy")\n Traceback (most recent call last):\n ...\n parser.NoParseError: got unexpected end of input, expected: \'y\'\n\n ```\n\n !!! Note\n\n If you care about static types, you should add a type hint for your forward\n declaration, so that your type checker can check types in `p.define(...)` later:\n\n ```python\n p: Parser[str, int] = forward_decl()\n p.define(a("x")) # Type checker error\n p.define(a("1") >> int) # OK\n ```\n """\n\n @Parser\n def f(_tokens, _s):\n raise NotImplementedError("you must define() a forward_decl somewhere")\n\n f.name = "forward_decl()"\n return f\n\n\nif __name__ == "__main__":\n import doctest\n\n doctest.testmod()\n')
__stickytape_write_module('funcparserlib/util.py', b'# -*- coding: utf-8 -*-\n\n# Copyright \xc2\xa9 2009/2021 Andrey Vlasovskikh\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this\n# software and associated documentation files (the "Software"), to deal in the Software\n# without restriction, including without limitation the rights to use, copy, modify,\n# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to the following\n# conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies\n# or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n# PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\n# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE\n# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import unicode_literals\n\n\ndef pretty_tree(x, kids, show):\n """Return a pseudo-graphic tree representation of the object `x` similar to the\n `tree` command in Unix.\n\n Type: `(T, Callable[[T], List[T]], Callable[[T], str]) -> str`\n\n It applies the parameter `show` (which is a function of type `(T) -> str`) to get a\n textual representation of the objects to show.\n\n It applies the parameter `kids` (which is a function of type `(T) -> List[T]`) to\n list the children of the object to show.\n\n Examples:\n\n ```pycon\n >>> print(pretty_tree(\n ... ["foo", ["bar", "baz"], "quux"],\n ... lambda obj: obj if isinstance(obj, list) else [],\n ... lambda obj: "[]" if isinstance(obj, list) else str(obj),\n ... ))\n []\n |-- foo\n |-- []\n | |-- bar\n | `-- baz\n `-- quux\n\n ```\n """\n (MID, END, CONT, LAST, ROOT) = ("|-- ", "`-- ", "| ", " ", "")\n\n def rec(obj, indent, sym):\n line = indent + sym + show(obj)\n obj_kids = kids(obj)\n if len(obj_kids) == 0:\n return line\n else:\n if sym == MID:\n next_indent = indent + CONT\n elif sym == ROOT:\n next_indent = indent + ROOT\n else:\n next_indent = indent + LAST\n chars = [MID] * (len(obj_kids) - 1) + [END]\n lines = [rec(kid, next_indent, sym) for kid, sym in zip(obj_kids, chars)]\n return "\\n".join([line] + lines)\n\n return rec(x, "", ROOT)\n')
__stickytape_write_module('dataclasses.py', b'import re\nimport sys\nimport copy\nimport types\nimport inspect\nimport keyword\nimport functools\nimport itertools\nimport abc\nimport _thread\nfrom types import FunctionType, GenericAlias\n\n\n__all__ = [\'dataclass\',\n \'field\',\n \'Field\',\n \'FrozenInstanceError\',\n \'InitVar\',\n \'KW_ONLY\',\n \'MISSING\',\n\n # Helper functions.\n \'fields\',\n \'asdict\',\n \'astuple\',\n \'make_dataclass\',\n \'replace\',\n \'is_dataclass\',\n ]\n\n# Conditions for adding methods. The boxes indicate what action the\n# dataclass decorator takes. For all of these tables, when I talk\n# about init=, repr=, eq=, order=, unsafe_hash=, or frozen=, I\'m\n# referring to the arguments to the @dataclass decorator. When\n# checking if a dunder method already exists, I mean check for an\n# entry in the class\'s __dict__. I never check to see if an attribute\n# is defined in a base class.\n\n# Key:\n# +=========+=========================================+\n# + Value | Meaning |\n# +=========+=========================================+\n# | <blank> | No action: no method is added. |\n# +---------+-----------------------------------------+\n# | add | Generated method is added. |\n# +---------+-----------------------------------------+\n# | raise | TypeError is raised. |\n# +---------+-----------------------------------------+\n# | None | Attribute is set to None. |\n# +=========+=========================================+\n\n# __init__\n#\n# +--- init= parameter\n# |\n# v | | |\n# | no | yes | <--- class has __init__ in __dict__?\n# +=======+=======+=======+\n# | False | | |\n# +-------+-------+-------+\n# | True | add | | <- the default\n# +=======+=======+=======+\n\n# __repr__\n#\n# +--- repr= parameter\n# |\n# v | | |\n# | no | yes | <--- class has __repr__ in __dict__?\n# +=======+=======+=======+\n# | False | | |\n# +-------+-------+-------+\n# | True | add | | <- the default\n# +=======+=======+=======+\n\n\n# __setattr__\n# __delattr__\n#\n# +--- frozen= parameter\n# |\n# v | | |\n# | no | yes | <--- class has __setattr__ or __delattr__ in __dict__?\n# +=======+=======+=======+\n# | False | | | <- the default\n# +-------+-------+-------+\n# | True | add | raise |\n# +=======+=======+=======+\n# Raise because not adding these methods would break the "frozen-ness"\n# of the class.\n\n# __eq__\n#\n# +--- eq= parameter\n# |\n# v | | |\n# | no | yes | <--- class has __eq__ in __dict__?\n# +=======+=======+=======+\n# | False | | |\n# +-------+-------+-------+\n# | True | add | | <- the default\n# +=======+=======+=======+\n\n# __lt__\n# __le__\n# __gt__\n# __ge__\n#\n# +--- order= parameter\n# |\n# v | | |\n# | no | yes | <--- class has any comparison method in __dict__?\n# +=======+=======+=======+\n# | False | | | <- the default\n# +-------+-------+-------+\n# | True | add | raise |\n# +=======+=======+=======+\n# Raise because to allow this case would interfere with using\n# functools.total_ordering.\n\n# __hash__\n\n# +------------------- unsafe_hash= parameter\n# | +----------- eq= parameter\n# | | +--- frozen= parameter\n# | | |\n# v v v | | |\n# | no | yes | <--- class has explicitly defined __hash__\n# +=======+=======+=======+========+========+\n# | False | False | False | | | No __eq__, use the base class __hash__\n# +-------+-------+-------+--------+--------+\n# | False | False | True | | | No __eq__, use the base class __hash__\n# +-------+-------+-------+--------+--------+\n# | False | True | False | None | | <-- the default, not hashable\n# +-------+-------+-------+--------+--------+\n# | False | True | True | add | | Frozen, so hashable, allows override\n# +-------+-------+-------+--------+--------+\n# | True | False | False | add | raise | Has no __eq__, but hashable\n# +-------+-------+-------+--------+--------+\n# | True | False | True | add | raise | Has no __eq__, but hashable\n# +-------+-------+-------+--------+--------+\n# | True | True | False | add | raise | Not frozen, but hashable\n# +-------+-------+-------+--------+--------+\n# | True | True | True | add | raise | Frozen, so hashable\n# +=======+=======+=======+========+========+\n# For boxes that are blank, __hash__ is untouched and therefore\n# inherited from the base class. If the base is object, then\n# id-based hashing is used.\n#\n# Note that a class may already have __hash__=None if it specified an\n# __eq__ method in the class body (not one that was created by\n# @dataclass).\n#\n# See _hash_action (below) for a coded version of this table.\n\n# __match_args__\n#\n# +--- match_args= parameter\n# |\n# v | | |\n# | no | yes | <--- class has __match_args__ in __dict__?\n# +=======+=======+=======+\n# | False | | |\n# +-------+-------+-------+\n# | True | add | | <- the default\n# +=======+=======+=======+\n# __match_args__ is always added unless the class already defines it. It is a\n# tuple of __init__ parameter names; non-init fields must be matched by keyword.\n\n\n# Raised when an attempt is made to modify a frozen class.\nclass FrozenInstanceError(AttributeError): pass\n\n# A sentinel object for default values to signal that a default\n# factory will be used. This is given a nice repr() which will appear\n# in the function signature of dataclasses\' constructors.\nclass _HAS_DEFAULT_FACTORY_CLASS:\n def __repr__(self):\n return \'<factory>\'\n_HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS()\n\n# A sentinel object to detect if a parameter is supplied or not. Use\n# a class to give it a better repr.\nclass _MISSING_TYPE:\n pass\nMISSING = _MISSING_TYPE()\n\n# A sentinel object to indicate that following fields are keyword-only by\n# default. Use a class to give it a better repr.\nclass _KW_ONLY_TYPE:\n pass\nKW_ONLY = _KW_ONLY_TYPE()\n\n# Since most per-field metadata will be unused, create an empty\n# read-only proxy that can be shared among all fields.\n_EMPTY_METADATA = types.MappingProxyType({})\n\n# Markers for the various kinds of fields and pseudo-fields.\nclass _FIELD_BASE:\n def __init__(self, name):\n self.name = name\n def __repr__(self):\n return self.name\n_FIELD = _FIELD_BASE(\'_FIELD\')\n_FIELD_CLASSVAR = _FIELD_BASE(\'_FIELD_CLASSVAR\')\n_FIELD_INITVAR = _FIELD_BASE(\'_FIELD_INITVAR\')\n\n# The name of an attribute on the class where we store the Field\n# objects. Also used to check if a class is a Data Class.\n_FIELDS = \'__dataclass_fields__\'\n\n# The name of an attribute on the class that stores the parameters to\n# @dataclass.\n_PARAMS = \'__dataclass_params__\'\n\n# The name of the function, that if it exists, is called at the end of\n# __init__.\n_POST_INIT_NAME = \'__post_init__\'\n\n# String regex that string annotations for ClassVar or InitVar must match.\n# Allows "identifier.identifier[" or "identifier[".\n# https://bugs.python.org/issue33453 for details.\n_MODULE_IDENTIFIER_RE = re.compile(r\'^(?:\\s*(\\w+)\\s*\\.)?\\s*(\\w+)\')\n\n# Atomic immutable types which don\'t require any recursive handling and for which deepcopy\n# returns the same object. We can provide a fast-path for these types in asdict and astuple.\n_ATOMIC_TYPES = frozenset({\n # Common JSON Serializable types\n types.NoneType,\n bool,\n int,\n float,\n str,\n # Other common types\n complex,\n bytes,\n # Other types that are also unaffected by deepcopy\n types.EllipsisType,\n types.NotImplementedType,\n types.CodeType,\n types.BuiltinFunctionType,\n types.FunctionType,\n type,\n range,\n property,\n})\n\n# This function\'s logic is copied from "recursive_repr" function in\n# reprlib module to avoid dependency.\ndef _recursive_repr(user_function):\n # Decorator to make a repr function return "..." for a recursive\n # call.\n repr_running = set()\n\n @functools.wraps(user_function)\n def wrapper(self):\n key = id(self), _thread.get_ident()\n if key in repr_running:\n return \'...\'\n repr_running.add(key)\n try:\n result = user_function(self)\n finally:\n repr_running.discard(key)\n return result\n return wrapper\n\nclass InitVar:\n __slots__ = (\'type\', )\n\n def __init__(self, type):\n self.type = type\n\n def __repr__(self):\n if isinstance(self.type, type):\n type_name = self.type.__name__\n else:\n # typing objects, e.g. List[int]\n type_name = repr(self.type)\n return f\'dataclasses.InitVar[{type_name}]\'\n\n def __class_getitem__(cls, type):\n return InitVar(type)\n\n# Instances of Field are only ever created from within this module,\n# and only from the field() function, although Field instances are\n# exposed externally as (conceptually) read-only objects.\n#\n# name and type are filled in after the fact, not in __init__.\n# They\'re not known at the time this class is instantiated, but it\'s\n# convenient if they\'re available later.\n#\n# When cls._FIELDS is filled in with a list of Field objects, the name\n# and type fields will have been populated.\nclass Field:\n __slots__ = (\'name\',\n \'type\',\n \'default\',\n \'default_factory\',\n \'repr\',\n \'hash\',\n \'init\',\n \'compare\',\n \'metadata\',\n \'kw_only\',\n \'_field_type\', # Private: not to be used by user code.\n )\n\n def __init__(self, default, default_factory, init, repr, hash, compare,\n metadata, kw_only):\n self.name = None\n self.type = None\n self.default = default\n self.default_factory = default_factory\n self.init = init\n self.repr = repr\n self.hash = hash\n self.compare = compare\n self.metadata = (_EMPTY_METADATA\n if metadata is None else\n types.MappingProxyType(metadata))\n self.kw_only = kw_only\n self._field_type = None\n\n @_recursive_repr\n def __repr__(self):\n return (\'Field(\'\n f\'name={self.name!r},\'\n f\'type={self.type!r},\'\n f\'default={self.default!r},\'\n f\'default_factory={self.default_factory!r},\'\n f\'init={self.init!r},\'\n f\'repr={self.repr!r},\'\n f\'hash={self.hash!r},\'\n f\'compare={self.compare!r},\'\n f\'metadata={self.metadata!r},\'\n f\'kw_only={self.kw_only!r},\'\n f\'_field_type={self._field_type}\'\n \')\')\n\n # This is used to support the PEP 487 __set_name__ protocol in the\n # case where we\'re using a field that contains a descriptor as a\n # default value. For details on __set_name__, see\n # https://peps.python.org/pep-0487/#implementation-details.\n #\n # Note that in _process_class, this Field object is overwritten\n # with the default value, so the end result is a descriptor that\n # had __set_name__ called on it at the right time.\n def __set_name__(self, owner, name):\n func = getattr(type(self.default), \'__set_name__\', None)\n if func:\n # There is a __set_name__ method on the descriptor, call\n # it.\n func(self.default, owner, name)\n\n __class_getitem__ = classmethod(GenericAlias)\n\n\nclass _DataclassParams:\n __slots__ = (\'init\',\n \'repr\',\n \'eq\',\n \'order\',\n \'unsafe_hash\',\n \'frozen\',\n \'match_args\',\n \'kw_only\',\n \'slots\',\n \'weakref_slot\',\n )\n\n def __init__(self,\n init, repr, eq, order, unsafe_hash, frozen,\n match_args, kw_only, slots, weakref_slot):\n self.init = init\n self.repr = repr\n self.eq = eq\n self.order = order\n self.unsafe_hash = unsafe_hash\n self.frozen = frozen\n self.match_args = match_args\n self.kw_only = kw_only\n self.slots = slots\n self.weakref_slot = weakref_slot\n\n def __repr__(self):\n return (\'_DataclassParams(\'\n f\'init={self.init!r},\'\n f\'repr={self.repr!r},\'\n f\'eq={self.eq!r},\'\n f\'order={self.order!r},\'\n f\'unsafe_hash={self.unsafe_hash!r},\'\n f\'frozen={self.frozen!r},\'\n f\'match_args={self.match_args!r},\'\n f\'kw_only={self.kw_only!r},\'\n f\'slots={self.slots!r},\'\n f\'weakref_slot={self.weakref_slot!r}\'\n \')\')\n\n\n# This function is used instead of exposing Field creation directly,\n# so that a type checker can be told (via overloads) that this is a\n# function whose type depends on its parameters.\ndef field(*, default=MISSING, default_factory=MISSING, init=True, repr=True,\n hash=None, compare=True, metadata=None, kw_only=MISSING):\n """Return an object to identify dataclass fields.\n\n default is the default value of the field. default_factory is a\n 0-argument function called to initialize a field\'s value. If init\n is true, the field will be a parameter to the class\'s __init__()\n function. If repr is true, the field will be included in the\n object\'s repr(). If hash is true, the field will be included in the\n object\'s hash(). If compare is true, the field will be used in\n comparison functions. metadata, if specified, must be a mapping\n which is stored but not otherwise examined by dataclass. If kw_only\n is true, the field will become a keyword-only parameter to\n __init__().\n\n It is an error to specify both default and default_factory.\n """\n\n if default is not MISSING and default_factory is not MISSING:\n raise ValueError(\'cannot specify both default and default_factory\')\n return Field(default, default_factory, init, repr, hash, compare,\n metadata, kw_only)\n\n\ndef _fields_in_init_order(fields):\n # Returns the fields as __init__ will output them. It returns 2 tuples:\n # the first for normal args, and the second for keyword args.\n\n return (tuple(f for f in fields if f.init and not f.kw_only),\n tuple(f for f in fields if f.init and f.kw_only)\n )\n\n\ndef _tuple_str(obj_name, fields):\n # Return a string representing each field of obj_name as a tuple\n # member. So, if fields is [\'x\', \'y\'] and obj_name is "self",\n # return "(self.x,self.y)".\n\n # Special case for the 0-tuple.\n if not fields:\n return \'()\'\n # Note the trailing comma, needed if this turns out to be a 1-tuple.\n return f\'({",".join([f"{obj_name}.{f.name}" for f in fields])},)\'\n\n\ndef _create_fn(name, args, body, *, globals=None, locals=None,\n return_type=MISSING):\n # Note that we may mutate locals. Callers beware!\n # The only callers are internal to this module, so no\n # worries about external callers.\n if locals is None:\n locals = {}\n return_annotation = \'\'\n if return_type is not MISSING:\n locals[\'__dataclass_return_type__\'] = return_type\n return_annotation = \'->__dataclass_return_type__\'\n args = \',\'.join(args)\n body = \'\\n\'.join(f\' {b}\' for b in body)\n\n # Compute the text of the entire function.\n txt = f\' def {name}({args}){return_annotation}:\\n{body}\'\n\n # Free variables in exec are resolved in the global namespace.\n # The global namespace we have is user-provided, so we can\'t modify it for\n # our purposes. So we put the things we need into locals and introduce a\n # scope to allow the function we\'re creating to close over them.\n local_vars = \', \'.join(locals.keys())\n txt = f"def __create_fn__({local_vars}):\\n{txt}\\n return {name}"\n ns = {}\n exec(txt, globals, ns)\n return ns[\'__create_fn__\'](**locals)\n\n\ndef _field_assign(frozen, name, value, self_name):\n # If we\'re a frozen class, then assign to our fields in __init__\n # via object.__setattr__. Otherwise, just use a simple\n # assignment.\n #\n # self_name is what "self" is called in this function: don\'t\n # hard-code "self", since that might be a field name.\n if frozen:\n return f\'__dataclass_builtins_object__.__setattr__({self_name},{name!r},{value})\'\n return f\'{self_name}.{name}={value}\'\n\n\ndef _field_init(f, frozen, globals, self_name, slots):\n # Return the text of the line in the body of __init__ that will\n # initialize this field.\n\n default_name = f\'__dataclass_dflt_{f.name}__\'\n if f.default_factory is not MISSING:\n if f.init:\n # This field has a default factory. If a parameter is\n # given, use it. If not, call the factory.\n globals[default_name] = f.default_factory\n value = (f\'{default_name}() \'\n f\'if {f.name} is __dataclass_HAS_DEFAULT_FACTORY__ \'\n f\'else {f.name}\')\n else:\n # This is a field that\'s not in the __init__ params, but\n # has a default factory function. It needs to be\n # initialized here by calling the factory function,\n # because there\'s no other way to initialize it.\n\n # For a field initialized with a default=defaultvalue, the\n # class dict just has the default value\n # (cls.fieldname=defaultvalue). But that won\'t work for a\n # default factory, the factory must be called in __init__\n # and we must assign that to self.fieldname. We can\'t\n # fall back to the class dict\'s value, both because it\'s\n # not set, and because it might be different per-class\n # (which, after all, is why we have a factory function!).\n\n globals[default_name] = f.default_factory\n value = f\'{default_name}()\'\n else:\n # No default factory.\n if f.init:\n if f.default is MISSING:\n # There\'s no default, just do an assignment.\n value = f.name\n elif f.default is not MISSING:\n globals[default_name] = f.default\n value = f.name\n else:\n # If the class has slots, then initialize this field.\n if slots and f.default is not MISSING:\n globals[default_name] = f.default\n value = default_name\n else:\n # This field does not need initialization: reading from it will\n # just use the class attribute that contains the default.\n # Signify that to the caller by returning None.\n return None\n\n # Only test this now, so that we can create variables for the\n # default. However, return None to signify that we\'re not going\n # to actually do the assignment statement for InitVars.\n if f._field_type is _FIELD_INITVAR:\n return None\n\n # Now, actually generate the field assignment.\n return _field_assign(frozen, f.name, value, self_name)\n\n\ndef _init_param(f):\n # Return the __init__ parameter string for this field. For\n # example, the equivalent of \'x:int=3\' (except instead of \'int\',\n # reference a variable set to int, and instead of \'3\', reference a\n # variable set to 3).\n if f.default is MISSING and f.default_factory is MISSING:\n # There\'s no default, and no default_factory, just output the\n # variable name and type.\n default = \'\'\n elif f.default is not MISSING:\n # There\'s a default, this will be the name that\'s used to look\n # it up.\n default = f\'=__dataclass_dflt_{f.name}__\'\n elif f.default_factory is not MISSING:\n # There\'s a factory function. Set a marker.\n default = \'=__dataclass_HAS_DEFAULT_FACTORY__\'\n return f\'{f.name}:__dataclass_type_{f.name}__{default}\'\n\n\ndef _init_fn(fields, std_fields, kw_only_fields, frozen, has_post_init,\n self_name, globals, slots):\n # fields contains both real fields and InitVar pseudo-fields.\n\n # Make sure we don\'t have fields without defaults following fields\n # with defaults. This actually would be caught when exec-ing the\n # function source code, but catching it here gives a better error\n # message, and future-proofs us in case we build up the function\n # using ast.\n\n seen_default = False\n for f in std_fields:\n # Only consider the non-kw-only fields in the __init__ call.\n if f.init:\n if not (f.default is MISSING and f.default_factory is MISSING):\n seen_default = True\n elif seen_default:\n raise TypeError(f\'non-default argument {f.name!r} \'\n \'follows default argument\')\n\n locals = {f\'__dataclass_type_{f.name}__\': f.type for f in fields}\n locals.update({\n \'__dataclass_HAS_DEFAULT_FACTORY__\': _HAS_DEFAULT_FACTORY,\n \'__dataclass_builtins_object__\': object,\n })\n\n body_lines = []\n for f in fields:\n line = _field_init(f, frozen, locals, self_name, slots)\n # line is None means that this field doesn\'t require\n # initialization (it\'s a pseudo-field). Just skip it.\n if line:\n body_lines.append(line)\n\n # Does this class have a post-init function?\n if has_post_init:\n params_str = \',\'.join(f.name for f in fields\n if f._field_type is _FIELD_INITVAR)\n body_lines.append(f\'{self_name}.{_POST_INIT_NAME}({params_str})\')\n\n # If no body lines, use \'pass\'.\n if not body_lines:\n body_lines = [\'pass\']\n\n _init_params = [_init_param(f) for f in std_fields]\n if kw_only_fields:\n # Add the keyword-only args. Because the * can only be added if\n # there\'s at least one keyword-only arg, there needs to be a test here\n # (instead of just concatenting the lists together).\n _init_params += [\'*\']\n _init_params += [_init_param(f) for f in kw_only_fields]\n return _create_fn(\'__init__\',\n [self_name] + _init_params,\n body_lines,\n locals=locals,\n globals=globals,\n return_type=None)\n\n\ndef _repr_fn(fields, globals):\n fn = _create_fn(\'__repr__\',\n (\'self\',),\n [\'return self.__class__.__qualname__ + f"(\' +\n \', \'.join([f"{f.name}={{self.{f.name}!r}}"\n for f in fields]) +\n \')"\'],\n globals=globals)\n return _recursive_repr(fn)\n\n\ndef _frozen_get_del_attr(cls, fields, globals):\n locals = {\'cls\': cls,\n \'FrozenInstanceError\': FrozenInstanceError}\n condition = \'type(self) is cls\'\n if fields:\n condition += \' or name in {\' + \', \'.join(repr(f.name) for f in fields) + \'}\'\n return (_create_fn(\'__setattr__\',\n (\'self\', \'name\', \'value\'),\n (f\'if {condition}:\',\n \' raise FrozenInstanceError(f"cannot assign to field {name!r}")\',\n f\'super(cls, self).__setattr__(name, value)\'),\n locals=locals,\n globals=globals),\n _create_fn(\'__delattr__\',\n (\'self\', \'name\'),\n (f\'if {condition}:\',\n \' raise FrozenInstanceError(f"cannot delete field {name!r}")\',\n f\'super(cls, self).__delattr__(name)\'),\n locals=locals,\n globals=globals),\n )\n\n\ndef _cmp_fn(name, op, self_tuple, other_tuple, globals):\n # Create a comparison function. If the fields in the object are\n # named \'x\' and \'y\', then self_tuple is the string\n # \'(self.x,self.y)\' and other_tuple is the string\n # \'(other.x,other.y)\'.\n\n return _create_fn(name,\n (\'self\', \'other\'),\n [ \'if other.__class__ is self.__class__:\',\n f\' return {self_tuple}{op}{other_tuple}\',\n \'return NotImplemented\'],\n globals=globals)\n\n\ndef _hash_fn(fields, globals):\n self_tuple = _tuple_str(\'self\', fields)\n return _create_fn(\'__hash__\',\n (\'self\',),\n [f\'return hash({self_tuple})\'],\n globals=globals)\n\n\ndef _is_classvar(a_type, typing):\n # This test uses a typing internal class, but it\'s the best way to\n # test if this is a ClassVar.\n return (a_type is typing.ClassVar\n or (type(a_type) is typing._GenericAlias\n and a_type.__origin__ is typing.ClassVar))\n\n\ndef _is_initvar(a_type, dataclasses):\n # The module we\'re checking against is the module we\'re\n # currently in (dataclasses.py).\n return (a_type is dataclasses.InitVar\n or type(a_type) is dataclasses.InitVar)\n\ndef _is_kw_only(a_type, dataclasses):\n return a_type is dataclasses.KW_ONLY\n\n\ndef _is_type(annotation, cls, a_module, a_type, is_type_predicate):\n # Given a type annotation string, does it refer to a_type in\n # a_module? For example, when checking that annotation denotes a\n # ClassVar, then a_module is typing, and a_type is\n # typing.ClassVar.\n\n # It\'s possible to look up a_module given a_type, but it involves\n # looking in sys.modules (again!), and seems like a waste since\n # the caller already knows a_module.\n\n # - annotation is a string type annotation\n # - cls is the class that this annotation was found in\n # - a_module is the module we want to match\n # - a_type is the type in that module we want to match\n # - is_type_predicate is a function called with (obj, a_module)\n # that determines if obj is of the desired type.\n\n # Since this test does not do a local namespace lookup (and\n # instead only a module (global) lookup), there are some things it\n # gets wrong.\n\n # With string annotations, cv0 will be detected as a ClassVar:\n # CV = ClassVar\n # @dataclass\n # class C0:\n # cv0: CV\n\n # But in this example cv1 will not be detected as a ClassVar:\n # @dataclass\n # class C1:\n # CV = ClassVar\n # cv1: CV\n\n # In C1, the code in this function (_is_type) will look up "CV" in\n # the module and not find it, so it will not consider cv1 as a\n # ClassVar. This is a fairly obscure corner case, and the best\n # way to fix it would be to eval() the string "CV" with the\n # correct global and local namespaces. However that would involve\n # a eval() penalty for every single field of every dataclass\n # that\'s defined. It was judged not worth it.\n\n match = _MODULE_IDENTIFIER_RE.match(annotation)\n if match:\n ns = None\n module_name = match.group(1)\n if not module_name:\n # No module name, assume the class\'s module did\n # "from dataclasses import InitVar".\n ns = sys.modules.get(cls.__module__).__dict__\n else:\n # Look up module_name in the class\'s module.\n module = sys.modules.get(cls.__module__)\n if module and module.__dict__.get(module_name) is a_module:\n ns = sys.modules.get(a_type.__module__).__dict__\n if ns and is_type_predicate(ns.get(match.group(2)), a_module):\n return True\n return False\n\n\ndef _get_field(cls, a_name, a_type, default_kw_only):\n # Return a Field object for this field name and type. ClassVars and\n # InitVars are also returned, but marked as such (see f._field_type).\n # default_kw_only is the value of kw_only to use if there isn\'t a field()\n # that defines it.\n\n # If the default value isn\'t derived from Field, then it\'s only a\n # normal default value. Convert it to a Field().\n default = getattr(cls, a_name, MISSING)\n if isinstance(default, Field):\n f = default\n else:\n if isinstance(default, types.MemberDescriptorType):\n # This is a field in __slots__, so it has no default value.\n default = MISSING\n f = field(default=default)\n\n # Only at this point do we know the name and the type. Set them.\n f.name = a_name\n f.type = a_type\n\n # Assume it\'s a normal field until proven otherwise. We\'re next\n # going to decide if it\'s a ClassVar or InitVar, everything else\n # is just a normal field.\n f._field_type = _FIELD\n\n # In addition to checking for actual types here, also check for\n # string annotations. get_type_hints() won\'t always work for us\n # (see https://github.com/python/typing/issues/508 for example),\n # plus it\'s expensive and would require an eval for every string\n # annotation. So, make a best effort to see if this is a ClassVar\n # or InitVar using regex\'s and checking that the thing referenced\n # is actually of the correct type.\n\n # For the complete discussion, see https://bugs.python.org/issue33453\n\n # If typing has not been imported, then it\'s impossible for any\n # annotation to be a ClassVar. So, only look for ClassVar if\n # typing has been imported by any module (not necessarily cls\'s\n # module).\n typing = sys.modules.get(\'typing\')\n if typing:\n if (_is_classvar(a_type, typing)\n or (isinstance(f.type, str)\n and _is_type(f.type, cls, typing, typing.ClassVar,\n _is_classvar))):\n f._field_type = _FIELD_CLASSVAR\n\n # If the type is InitVar, or if it\'s a matching string annotation,\n # then it\'s an InitVar.\n if f._field_type is _FIELD:\n # The module we\'re checking against is the module we\'re\n # currently in (dataclasses.py).\n dataclasses = sys.modules[__name__]\n if (_is_initvar(a_type, dataclasses)\n or (isinstance(f.type, str)\n and _is_type(f.type, cls, dataclasses, dataclasses.InitVar,\n _is_initvar))):\n f._field_type = _FIELD_INITVAR\n\n # Validations for individual fields. This is delayed until now,\n # instead of in the Field() constructor, since only here do we\n # know the field name, which allows for better error reporting.\n\n # Special restrictions for ClassVar and InitVar.\n if f._field_type in (_FIELD_CLASSVAR, _FIELD_INITVAR):\n if f.default_factory is not MISSING:\n raise TypeError(f\'field {f.name} cannot have a \'\n \'default factory\')\n # Should I check for other field settings? default_factory\n # seems the most serious to check for. Maybe add others. For\n # example, how about init=False (or really,\n # init=<not-the-default-init-value>)? It makes no sense for\n # ClassVar and InitVar to specify init=<anything>.\n\n # kw_only validation and assignment.\n if f._field_type in (_FIELD, _FIELD_INITVAR):\n # For real and InitVar fields, if kw_only wasn\'t specified use the\n # default value.\n if f.kw_only is MISSING:\n f.kw_only = default_kw_only\n else:\n # Make sure kw_only isn\'t set for ClassVars\n assert f._field_type is _FIELD_CLASSVAR\n if f.kw_only is not MISSING:\n raise TypeError(f\'field {f.name} is a ClassVar but specifies \'\n \'kw_only\')\n\n # For real fields, disallow mutable defaults. Use unhashable as a proxy\n # indicator for mutability. Read the __hash__ attribute from the class,\n # not the instance.\n if f._field_type is _FIELD and f.default.__class__.__hash__ is None:\n raise ValueError(f\'mutable default {type(f.default)} for field \'\n f\'{f.name} is not allowed: use default_factory\')\n\n return f\n\ndef _set_qualname(cls, value):\n # Ensure that the functions returned from _create_fn uses the proper\n # __qualname__ (the class they belong to).\n if isinstance(value, FunctionType):\n value.__qualname__ = f"{cls.__qualname__}.{value.__name__}"\n return value\n\ndef _set_new_attribute(cls, name, value):\n # Never overwrites an existing attribute. Returns True if the\n # attribute already exists.\n if name in cls.__dict__:\n return True\n _set_qualname(cls, value)\n setattr(cls, name, value)\n return False\n\n\n# Decide if/how we\'re going to create a hash function. Key is\n# (unsafe_hash, eq, frozen, does-hash-exist). Value is the action to\n# take. The common case is to do nothing, so instead of providing a\n# function that is a no-op, use None to signify that.\n\ndef _hash_set_none(cls, fields, globals):\n return None\n\ndef _hash_add(cls, fields, globals):\n flds = [f for f in fields if (f.compare if f.hash is None else f.hash)]\n return _set_qualname(cls, _hash_fn(flds, globals))\n\ndef _hash_exception(cls, fields, globals):\n # Raise an exception.\n raise TypeError(f\'Cannot overwrite attribute __hash__ \'\n f\'in class {cls.__name__}\')\n\n#\n# +-------------------------------------- unsafe_hash?\n# | +------------------------------- eq?\n# | | +------------------------ frozen?\n# | | | +---------------- has-explicit-hash?\n# | | | |\n# | | | | +------- action\n# | | | | |\n# v v v v v\n_hash_action = {(False, False, False, False): None,\n (False, False, False, True ): None,\n (False, False, True, False): None,\n (False, False, True, True ): None,\n (False, True, False, False): _hash_set_none,\n (False, True, False, True ): None,\n (False, True, True, False): _hash_add,\n (False, True, True, True ): None,\n (True, False, False, False): _hash_add,\n (True, False, False, True ): _hash_exception,\n (True, False, True, False): _hash_add,\n (True, False, True, True ): _hash_exception,\n (True, True, False, False): _hash_add,\n (True, True, False, True ): _hash_exception,\n (True, True, True, False): _hash_add,\n (True, True, True, True ): _hash_exception,\n }\n# See https://bugs.python.org/issue32929#msg312829 for an if-statement\n# version of this table.\n\n\ndef _process_class(cls, init, repr, eq, order, unsafe_hash, frozen,\n match_args, kw_only, slots, weakref_slot):\n # Now that dicts retain insertion order, there\'s no reason to use\n # an ordered dict. I am leveraging that ordering here, because\n # derived class fields overwrite base class fields, but the order\n # is defined by the base class, which is found first.\n fields = {}\n\n if cls.__module__ in sys.modules:\n globals = sys.modules[cls.__module__].__dict__\n else:\n # Theoretically this can happen if someone writes\n # a custom string to cls.__module__. In which case\n # such dataclass won\'t be fully introspectable\n # (w.r.t. typing.get_type_hints) but will still function\n # correctly.\n globals = {}\n\n setattr(cls, _PARAMS, _DataclassParams(init, repr, eq, order,\n unsafe_hash, frozen,\n match_args, kw_only,\n slots, weakref_slot))\n\n # Find our base classes in reverse MRO order, and exclude\n # ourselves. In reversed order so that more derived classes\n # override earlier field definitions in base classes. As long as\n # we\'re iterating over them, see if any are frozen.\n any_frozen_base = False\n has_dataclass_bases = False\n for b in cls.__mro__[-1:0:-1]:\n # Only process classes that have been processed by our\n # decorator. That is, they have a _FIELDS attribute.\n base_fields = getattr(b, _FIELDS, None)\n if base_fields is not None:\n has_dataclass_bases = True\n for f in base_fields.values():\n fields[f.name] = f\n if getattr(b, _PARAMS).frozen:\n any_frozen_base = True\n\n # Annotations defined specifically in this class (not in base classes).\n #\n # Fields are found from cls_annotations, which is guaranteed to be\n # ordered. Default values are from class attributes, if a field\n # has a default. If the default value is a Field(), then it\n # contains additional info beyond (and possibly including) the\n # actual default value. Pseudo-fields ClassVars and InitVars are\n # included, despite the fact that they\'re not real fields. That\'s\n # dealt with later.\n cls_annotations = inspect.get_annotations(cls)\n\n # Now find fields in our class. While doing so, validate some\n # things, and set the default values (as class attributes) where\n # we can.\n cls_fields = []\n # Get a reference to this module for the _is_kw_only() test.\n KW_ONLY_seen = False\n dataclasses = sys.modules[__name__]\n for name, type in cls_annotations.items():\n # See if this is a marker to change the value of kw_only.\n if (_is_kw_only(type, dataclasses)\n or (isinstance(type, str)\n and _is_type(type, cls, dataclasses, dataclasses.KW_ONLY,\n _is_kw_only))):\n # Switch the default to kw_only=True, and ignore this\n # annotation: it\'s not a real field.\n if KW_ONLY_seen:\n raise TypeError(f\'{name!r} is KW_ONLY, but KW_ONLY \'\n \'has already been specified\')\n KW_ONLY_seen = True\n kw_only = True\n else:\n # Otherwise it\'s a field of some type.\n cls_fields.append(_get_field(cls, name, type, kw_only))\n\n for f in cls_fields:\n fields[f.name] = f\n\n # If the class attribute (which is the default value for this\n # field) exists and is of type \'Field\', replace it with the\n # real default. This is so that normal class introspection\n # sees a real default value, not a Field.\n if isinstance(getattr(cls, f.name, None), Field):\n if f.default is MISSING:\n # If there\'s no default, delete the class attribute.\n # This happens if we specify field(repr=False), for\n # example (that is, we specified a field object, but\n # no default value). Also if we\'re using a default\n # factory. The class attribute should not be set at\n # all in the post-processed class.\n delattr(cls, f.name)\n else:\n setattr(cls, f.name, f.default)\n\n # Do we have any Field members that don\'t also have annotations?\n for name, value in cls.__dict__.items():\n if isinstance(value, Field) and not name in cls_annotations:\n raise TypeError(f\'{name!r} is a field but has no type annotation\')\n\n # Check rules that apply if we are derived from any dataclasses.\n if has_dataclass_bases:\n # Raise an exception if any of our bases are frozen, but we\'re not.\n if any_frozen_base and not frozen:\n raise TypeError(\'cannot inherit non-frozen dataclass from a \'\n \'frozen one\')\n\n # Raise an exception if we\'re frozen, but none of our bases are.\n if not any_frozen_base and frozen:\n raise TypeError(\'cannot inherit frozen dataclass from a \'\n \'non-frozen one\')\n\n # Remember all of the fields on our class (including bases). This\n # also marks this class as being a dataclass.\n setattr(cls, _FIELDS, fields)\n\n # Was this class defined with an explicit __hash__? Note that if\n # __eq__ is defined in this class, then python will automatically\n # set __hash__ to None. This is a heuristic, as it\'s possible\n # that such a __hash__ == None was not auto-generated, but it\n # close enough.\n class_hash = cls.__dict__.get(\'__hash__\', MISSING)\n has_explicit_hash = not (class_hash is MISSING or\n (class_hash is None and \'__eq__\' in cls.__dict__))\n\n # If we\'re generating ordering methods, we must be generating the\n # eq methods.\n if order and not eq:\n raise ValueError(\'eq must be true if order is true\')\n\n # Include InitVars and regular fields (so, not ClassVars). This is\n # initialized here, outside of the "if init:" test, because std_init_fields\n # is used with match_args, below.\n all_init_fields = [f for f in fields.values()\n if f._field_type in (_FIELD, _FIELD_INITVAR)]\n (std_init_fields,\n kw_only_init_fields) = _fields_in_init_order(all_init_fields)\n\n if init:\n # Does this class have a post-init function?\n has_post_init = hasattr(cls, _POST_INIT_NAME)\n\n _set_new_attribute(cls, \'__init__\',\n _init_fn(all_init_fields,\n std_init_fields,\n kw_only_init_fields,\n frozen,\n has_post_init,\n # The name to use for the "self"\n # param in __init__. Use "self"\n # if possible.\n \'__dataclass_self__\' if \'self\' in fields\n else \'self\',\n globals,\n slots,\n ))\n\n # Get the fields as a list, and include only real fields. This is\n # used in all of the following methods.\n field_list = [f for f in fields.values() if f._field_type is _FIELD]\n\n if repr:\n flds = [f for f in field_list if f.repr]\n _set_new_attribute(cls, \'__repr__\', _repr_fn(flds, globals))\n\n if eq:\n # Create __eq__ method. There\'s no need for a __ne__ method,\n # since python will call __eq__ and negate it.\n flds = [f for f in field_list if f.compare]\n self_tuple = _tuple_str(\'self\', flds)\n other_tuple = _tuple_str(\'other\', flds)\n _set_new_attribute(cls, \'__eq__\',\n _cmp_fn(\'__eq__\', \'==\',\n self_tuple, other_tuple,\n globals=globals))\n\n if order:\n # Create and set the ordering methods.\n flds = [f for f in field_list if f.compare]\n self_tuple = _tuple_str(\'self\', flds)\n other_tuple = _tuple_str(\'other\', flds)\n for name, op in [(\'__lt__\', \'<\'),\n (\'__le__\', \'<=\'),\n (\'__gt__\', \'>\'),\n (\'__ge__\', \'>=\'),\n ]:\n if _set_new_attribute(cls, name,\n _cmp_fn(name, op, self_tuple, other_tuple,\n globals=globals)):\n raise TypeError(f\'Cannot overwrite attribute {name} \'\n f\'in class {cls.__name__}. Consider using \'\n \'functools.total_ordering\')\n\n if frozen:\n for fn in _frozen_get_del_attr(cls, field_list, globals):\n if _set_new_attribute(cls, fn.__name__, fn):\n raise TypeError(f\'Cannot overwrite attribute {fn.__name__} \'\n f\'in class {cls.__name__}\')\n\n # Decide if/how we\'re going to create a hash function.\n hash_action = _hash_action[bool(unsafe_hash),\n bool(eq),\n bool(frozen),\n has_explicit_hash]\n if hash_action:\n # No need to call _set_new_attribute here, since by the time\n # we\'re here the overwriting is unconditional.\n cls.__hash__ = hash_action(cls, field_list, globals)\n\n if not getattr(cls, \'__doc__\'):\n # Create a class doc-string.\n try:\n # In some cases fetching a signature is not possible.\n # But, we surely should not fail in this case.\n text_sig = str(inspect.signature(cls)).replace(\' -> None\', \'\')\n except (TypeError, ValueError):\n text_sig = \'\'\n cls.__doc__ = (cls.__name__ + text_sig)\n\n if match_args:\n # I could probably compute this once\n _set_new_attribute(cls, \'__match_args__\',\n tuple(f.name for f in std_init_fields))\n\n # It\'s an error to specify weakref_slot if slots is False.\n if weakref_slot and not slots:\n raise TypeError(\'weakref_slot is True but slots is False\')\n if slots:\n cls = _add_slots(cls, frozen, weakref_slot)\n\n abc.update_abstractmethods(cls)\n\n return cls\n\n\n# _dataclass_getstate and _dataclass_setstate are needed for pickling frozen\n# classes with slots. These could be slightly more performant if we generated\n# the code instead of iterating over fields. But that can be a project for\n# another day, if performance becomes an issue.\ndef _dataclass_getstate(self):\n return [getattr(self, f.name) for f in fields(self)]\n\n\ndef _dataclass_setstate(self, state):\n for field, value in zip(fields(self), state):\n # use setattr because dataclass may be frozen\n object.__setattr__(self, field.name, value)\n\n\ndef _get_slots(cls):\n match cls.__dict__.get(\'__slots__\'):\n # A class which does not define __slots__ at all is equivalent\n # to a class defining __slots__ = (\'__dict__\', \'__weakref__\')\n case None:\n yield from (\'__dict__\', \'__weakref__\')\n case str(slot):\n yield slot\n # Slots may be any iterable, but we cannot handle an iterator\n # because it will already be (partially) consumed.\n case iterable if not hasattr(iterable, \'__next__\'):\n yield from iterable\n case _:\n raise TypeError(f"Slots of \'{cls.__name__}\' cannot be determined")\n\n\ndef _add_slots(cls, is_frozen, weakref_slot):\n # Need to create a new class, since we can\'t set __slots__\n # after a class has been created.\n\n # Make sure __slots__ isn\'t already set.\n if \'__slots__\' in cls.__dict__:\n raise TypeError(f\'{cls.__name__} already specifies __slots__\')\n\n # Create a new dict for our new class.\n cls_dict = dict(cls.__dict__)\n field_names = tuple(f.name for f in fields(cls))\n # Make sure slots don\'t overlap with those in base classes.\n inherited_slots = set(\n itertools.chain.from_iterable(map(_get_slots, cls.__mro__[1:-1]))\n )\n # The slots for our class. Remove slots from our base classes. Add\n # \'__weakref__\' if weakref_slot was given, unless it is already present.\n cls_dict["__slots__"] = tuple(\n itertools.filterfalse(\n inherited_slots.__contains__,\n itertools.chain(\n # gh-93521: \'__weakref__\' also needs to be filtered out if\n # already present in inherited_slots\n field_names, (\'__weakref__\',) if weakref_slot else ()\n )\n ),\n )\n\n for field_name in field_names:\n # Remove our attributes, if present. They\'ll still be\n # available in _MARKER.\n cls_dict.pop(field_name, None)\n\n # Remove __dict__ itself.\n cls_dict.pop(\'__dict__\', None)\n\n # Clear existing `__weakref__` descriptor, it belongs to a previous type:\n cls_dict.pop(\'__weakref__\', None) # gh-102069\n\n # And finally create the class.\n qualname = getattr(cls, \'__qualname__\', None)\n cls = type(cls)(cls.__name__, cls.__bases__, cls_dict)\n if qualname is not None:\n cls.__qualname__ = qualname\n\n if is_frozen:\n # Need this for pickling frozen classes with slots.\n if \'__getstate__\' not in cls_dict:\n cls.__getstate__ = _dataclass_getstate\n if \'__setstate__\' not in cls_dict:\n cls.__setstate__ = _dataclass_setstate\n\n return cls\n\n\ndef dataclass(cls=None, /, *, init=True, repr=True, eq=True, order=False,\n unsafe_hash=False, frozen=False, match_args=True,\n kw_only=False, slots=False, weakref_slot=False):\n """Add dunder methods based on the fields defined in the class.\n\n Examines PEP 526 __annotations__ to determine fields.\n\n If init is true, an __init__() method is added to the class. If repr\n is true, a __repr__() method is added. If order is true, rich\n comparison dunder methods are added. If unsafe_hash is true, a\n __hash__() method is added. If frozen is true, fields may not be\n assigned to after instance creation. If match_args is true, the\n __match_args__ tuple is added. If kw_only is true, then by default\n all fields are keyword-only. If slots is true, a new class with a\n __slots__ attribute is returned.\n """\n\n def wrap(cls):\n return _process_class(cls, init, repr, eq, order, unsafe_hash,\n frozen, match_args, kw_only, slots,\n weakref_slot)\n\n # See if we\'re being called as @dataclass or @dataclass().\n if cls is None:\n # We\'re called with parens.\n return wrap\n\n # We\'re called as @dataclass without parens.\n return wrap(cls)\n\n\ndef fields(class_or_instance):\n """Return a tuple describing the fields of this dataclass.\n\n Accepts a dataclass or an instance of one. Tuple elements are of\n type Field.\n """\n\n # Might it be worth caching this, per class?\n try:\n fields = getattr(class_or_instance, _FIELDS)\n except AttributeError:\n raise TypeError(\'must be called with a dataclass type or instance\') from None\n\n # Exclude pseudo-fields. Note that fields is sorted by insertion\n # order, so the order of the tuple is as the fields were defined.\n return tuple(f for f in fields.values() if f._field_type is _FIELD)\n\n\ndef _is_dataclass_instance(obj):\n """Returns True if obj is an instance of a dataclass."""\n return hasattr(type(obj), _FIELDS)\n\n\ndef is_dataclass(obj):\n """Returns True if obj is a dataclass or an instance of a\n dataclass."""\n cls = obj if isinstance(obj, type) else type(obj)\n return hasattr(cls, _FIELDS)\n\n\ndef asdict(obj, *, dict_factory=dict):\n """Return the fields of a dataclass instance as a new dictionary mapping\n field names to field values.\n\n Example usage::\n\n @dataclass\n class C:\n x: int\n y: int\n\n c = C(1, 2)\n assert asdict(c) == {\'x\': 1, \'y\': 2}\n\n If given, \'dict_factory\' will be used instead of built-in dict.\n The function applies recursively to field values that are\n dataclass instances. This will also look into built-in containers:\n tuples, lists, and dicts. Other objects are copied with \'copy.deepcopy()\'.\n """\n if not _is_dataclass_instance(obj):\n raise TypeError("asdict() should be called on dataclass instances")\n return _asdict_inner(obj, dict_factory)\n\n\ndef _asdict_inner(obj, dict_factory):\n if type(obj) in _ATOMIC_TYPES:\n return obj\n elif _is_dataclass_instance(obj):\n # fast path for the common case\n if dict_factory is dict:\n return {\n f.name: _asdict_inner(getattr(obj, f.name), dict)\n for f in fields(obj)\n }\n else:\n result = []\n for f in fields(obj):\n value = _asdict_inner(getattr(obj, f.name), dict_factory)\n result.append((f.name, value))\n return dict_factory(result)\n elif isinstance(obj, tuple) and hasattr(obj, \'_fields\'):\n # obj is a namedtuple. Recurse into it, but the returned\n # object is another namedtuple of the same type. This is\n # similar to how other list- or tuple-derived classes are\n # treated (see below), but we just need to create them\n # differently because a namedtuple\'s __init__ needs to be\n # called differently (see bpo-34363).\n\n # I\'m not using namedtuple\'s _asdict()\n # method, because:\n # - it does not recurse in to the namedtuple fields and\n # convert them to dicts (using dict_factory).\n # - I don\'t actually want to return a dict here. The main\n # use case here is json.dumps, and it handles converting\n # namedtuples to lists. Admittedly we\'re losing some\n # information here when we produce a json list instead of a\n # dict. Note that if we returned dicts here instead of\n # namedtuples, we could no longer call asdict() on a data\n # structure where a namedtuple was used as a dict key.\n\n return type(obj)(*[_asdict_inner(v, dict_factory) for v in obj])\n elif isinstance(obj, (list, tuple)):\n # Assume we can create an object of this type by passing in a\n # generator (which is not true for namedtuples, handled\n # above).\n return type(obj)(_asdict_inner(v, dict_factory) for v in obj)\n elif isinstance(obj, dict):\n if hasattr(type(obj), \'default_factory\'):\n # obj is a defaultdict, which has a different constructor from\n # dict as it requires the default_factory as its first arg.\n result = type(obj)(getattr(obj, \'default_factory\'))\n for k, v in obj.items():\n result[_asdict_inner(k, dict_factory)] = _asdict_inner(v, dict_factory)\n return result\n return type(obj)((_asdict_inner(k, dict_factory),\n _asdict_inner(v, dict_factory))\n for k, v in obj.items())\n else:\n return copy.deepcopy(obj)\n\n\ndef astuple(obj, *, tuple_factory=tuple):\n """Return the fields of a dataclass instance as a new tuple of field values.\n\n Example usage::\n\n @dataclass\n class C:\n x: int\n y: int\n\n c = C(1, 2)\n assert astuple(c) == (1, 2)\n\n If given, \'tuple_factory\' will be used instead of built-in tuple.\n The function applies recursively to field values that are\n dataclass instances. This will also look into built-in containers:\n tuples, lists, and dicts. Other objects are copied with \'copy.deepcopy()\'.\n """\n\n if not _is_dataclass_instance(obj):\n raise TypeError("astuple() should be called on dataclass instances")\n return _astuple_inner(obj, tuple_factory)\n\n\ndef _astuple_inner(obj, tuple_factory):\n if type(obj) in _ATOMIC_TYPES:\n return obj\n elif _is_dataclass_instance(obj):\n result = []\n for f in fields(obj):\n value = _astuple_inner(getattr(obj, f.name), tuple_factory)\n result.append(value)\n return tuple_factory(result)\n elif isinstance(obj, tuple) and hasattr(obj, \'_fields\'):\n # obj is a namedtuple. Recurse into it, but the returned\n # object is another namedtuple of the same type. This is\n # similar to how other list- or tuple-derived classes are\n # treated (see below), but we just need to create them\n # differently because a namedtuple\'s __init__ needs to be\n # called differently (see bpo-34363).\n return type(obj)(*[_astuple_inner(v, tuple_factory) for v in obj])\n elif isinstance(obj, (list, tuple)):\n # Assume we can create an object of this type by passing in a\n # generator (which is not true for namedtuples, handled\n # above).\n return type(obj)(_astuple_inner(v, tuple_factory) for v in obj)\n elif isinstance(obj, dict):\n obj_type = type(obj)\n if hasattr(obj_type, \'default_factory\'):\n # obj is a defaultdict, which has a different constructor from\n # dict as it requires the default_factory as its first arg.\n result = obj_type(getattr(obj, \'default_factory\'))\n for k, v in obj.items():\n result[_astuple_inner(k, tuple_factory)] = _astuple_inner(v, tuple_factory)\n return result\n return obj_type((_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory))\n for k, v in obj.items())\n else:\n return copy.deepcopy(obj)\n\n\ndef make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True,\n repr=True, eq=True, order=False, unsafe_hash=False,\n frozen=False, match_args=True, kw_only=False, slots=False,\n weakref_slot=False, module=None):\n """Return a new dynamically created dataclass.\n\n The dataclass name will be \'cls_name\'. \'fields\' is an iterable\n of either (name), (name, type) or (name, type, Field) objects. If type is\n omitted, use the string \'typing.Any\'. Field objects are created by\n the equivalent of calling \'field(name, type [, Field-info])\'.::\n\n C = make_dataclass(\'C\', [\'x\', (\'y\', int), (\'z\', int, field(init=False))], bases=(Base,))\n\n is equivalent to::\n\n @dataclass\n class C(Base):\n x: \'typing.Any\'\n y: int\n z: int = field(init=False)\n\n For the bases and namespace parameters, see the builtin type() function.\n\n The parameters init, repr, eq, order, unsafe_hash, frozen, match_args, kw_only,\n slots, and weakref_slot are passed to dataclass().\n\n If module parameter is defined, the \'__module__\' attribute of the dataclass is\n set to that value.\n """\n\n if namespace is None:\n namespace = {}\n\n # While we\'re looking through the field names, validate that they\n # are identifiers, are not keywords, and not duplicates.\n seen = set()\n annotations = {}\n defaults = {}\n for item in fields:\n if isinstance(item, str):\n name = item\n tp = \'typing.Any\'\n elif len(item) == 2:\n name, tp, = item\n elif len(item) == 3:\n name, tp, spec = item\n defaults[name] = spec\n else:\n raise TypeError(f\'Invalid field: {item!r}\')\n\n if not isinstance(name, str) or not name.isidentifier():\n raise TypeError(f\'Field names must be valid identifiers: {name!r}\')\n if keyword.iskeyword(name):\n raise TypeError(f\'Field names must not be keywords: {name!r}\')\n if name in seen:\n raise TypeError(f\'Field name duplicated: {name!r}\')\n\n seen.add(name)\n annotations[name] = tp\n\n # Update \'ns\' with the user-supplied namespace plus our calculated values.\n def exec_body_callback(ns):\n ns.update(namespace)\n ns.update(defaults)\n ns[\'__annotations__\'] = annotations\n\n # We use `types.new_class()` instead of simply `type()` to allow dynamic creation\n # of generic dataclasses.\n cls = types.new_class(cls_name, bases, {}, exec_body_callback)\n\n # For pickling to work, the __module__ variable needs to be set to the frame\n # where the dataclass is created.\n if module is None:\n try:\n module = sys._getframemodulename(1) or \'__main__\'\n except AttributeError:\n try:\n module = sys._getframe(1).f_globals.get(\'__name__\', \'__main__\')\n except (AttributeError, ValueError):\n pass\n if module is not None:\n cls.__module__ = module\n\n # Apply the normal decorator.\n return dataclass(cls, init=init, repr=repr, eq=eq, order=order,\n unsafe_hash=unsafe_hash, frozen=frozen,\n match_args=match_args, kw_only=kw_only, slots=slots,\n weakref_slot=weakref_slot)\n\n\ndef replace(obj, /, **changes):\n """Return a new object replacing specified fields with new values.\n\n This is especially useful for frozen classes. Example usage::\n\n @dataclass(frozen=True)\n class C:\n x: int\n y: int\n\n c = C(1, 2)\n c1 = replace(c, x=3)\n assert c1.x == 3 and c1.y == 2\n """\n\n # We\'re going to mutate \'changes\', but that\'s okay because it\'s a\n # new dict, even if called with \'replace(obj, **my_changes)\'.\n\n if not _is_dataclass_instance(obj):\n raise TypeError("replace() should be called on dataclass instances")\n\n # It\'s an error to have init=False fields in \'changes\'.\n # If a field is not in \'changes\', read its value from the provided obj.\n\n for f in getattr(obj, _FIELDS).values():\n # Only consider normal fields or InitVars.\n if f._field_type is _FIELD_CLASSVAR:\n continue\n\n if not f.init:\n # Error if this field is specified in changes.\n if f.name in changes:\n raise ValueError(f\'field {f.name} is declared with \'\n \'init=False, it cannot be specified with \'\n \'replace()\')\n continue\n\n if f.name not in changes:\n if f._field_type is _FIELD_INITVAR and f.default is MISSING:\n raise ValueError(f"InitVar {f.name!r} "\n \'must be specified with replace()\')\n changes[f.name] = getattr(obj, f.name)\n\n # Create the new object, which calls __init__() and\n # __post_init__() (if defined), using all of the init fields we\'ve\n # added and/or left in \'changes\'. If there are values supplied in\n # changes that aren\'t fields, this will correctly raise a\n # TypeError.\n return obj.__class__(**changes)\n')
from typing import List, Tuple, Union, Optional
from funcparserlib.lexer import make_tokenizer, TokenSpec, Token
from funcparserlib.parser import tok, Parser, many, forward_decl, finished, maybe
from funcparserlib.util import pretty_tree
from dataclasses import dataclass
import sys
def tokenize(s: str) -> List[Token]:
specs = [
TokenSpec("float", r"[+\-]?\d+\.\d*([Ee][+\-]?\d+)*"),
TokenSpec("float", r"\d+"),
TokenSpec("relation",r"\.\w+"),
TokenSpec("target", r":\w+"),
TokenSpec("inverse_relation", r"'\w+"),
TokenSpec("idea",r"\w+"),
TokenSpec("whitespace", r"\s+"),
TokenSpec("or", r"\|"),
TokenSpec("and", r"&"),
TokenSpec("addqty", r"\+="),
TokenSpec("minusqty", r"\-="),
TokenSpec("timesqty", r"\*="),
TokenSpec("divideqty", r"/="),
TokenSpec("modqty", r"%="),
TokenSpec("expqty", r'\^='),
]
tokenizer = make_tokenizer(specs)
return [t for t in tokenizer(s) if t.type != "whitespace"]
@dataclass
class RelationalExpr:
idea: Optional[str]
relation: Optional[str]
target: Optional[None]
@dataclass
class MathExpr:
op: str
val: str
@dataclass
class RelationalMathExpr:
relExpr: RelationalExpr
mathExpr: MathExpr
@dataclass
class DisjunctionExpr:
left: RelationalExpr
right: 'QueryExpr'
@dataclass
class ConjunctionExpr:
left: RelationalExpr
right: 'QueryExpr'
QueryExpr = Union[RelationalExpr, DisjunctionExpr, ConjunctionExpr, RelationalMathExpr]
def parse(tokens: List[Token]) -> QueryExpr:
idea = tok('idea')
relation = tok('relation') >> (lambda a: a[1:])
target = tok('target') >> (lambda a: a[1:])
inverse_relation = tok('inverse_relation') >> (lambda a: a[1:])
expr = forward_decl()
relationalExpr = \
idea + relation + target >> (lambda args: RelationalExpr(*args)) | \
idea + relation >> (lambda args: RelationalExpr(idea=args[0], relation=args[1], target=None)) | \
idea + target >> (lambda args: RelationalExpr(idea=args[0], relation=None,target=args[1])) | \
idea + inverse_relation + target >> (lambda args: RelationalExpr(idea=args[2], relation=args[1], target=args[0])) | \
idea + inverse_relation >> (lambda args: RelationalExpr(idea=None, relation=args[1], target=args[0])) | \
relation + target >> (lambda args: RelationalExpr(idea=None, relation=args[0], target=args[1])) | \
idea >> (lambda i: RelationalExpr(idea=i, relation=None, target=None)) | \
relation >> (lambda r: RelationalExpr(idea=None, relation=r, target=None)) | \
target >> (lambda t: RelationalExpr(idea=None, relation=None, target=t))
mathOp = tok('addqty') | tok('minusqty') | tok('timesqty') | tok('divideqty') | tok('modqty') | tok('expqty')
mathExpr = mathOp + tok('float') >> (lambda args: MathExpr(op=args[0], val=args[1]))
relationalMathExpr = relationalExpr + mathExpr >> (lambda args: RelationalMathExpr(relExpr=args[0], mathExpr=args[1]))
term = relationalMathExpr | relationalExpr
disjunctionExpr = term + tok('or') + expr >> (lambda args: DisjunctionExpr(left=args[0], right=args[2]))
conjunctionExpr = term + tok('and') + expr >> (lambda args: ConjunctionExpr(left=args[0], right=args[2]))
expr.define(disjunctionExpr | conjunctionExpr | term)
document = expr + -finished
return document.parse(tokens)
def intersperse(value, seq):
res = [value] * (2 * len(seq) - 1)
res[::2] = seq
return res
def compile_sql(q: QueryExpr, argc: int=1) -> Tuple[str,List[str]]:
arg_stack = []
sql = 'SELECT * FROM meme WHERE '
filters = []
if isinstance(q, RelationalExpr):
if q.idea is not None:
filters.append(f'aid=${argc}')
argc += 1
arg_stack.append(q.idea)
if q.relation is not None:
filters.append(f'rid=${argc}')
argc += 1
arg_stack.append(q.relation)
if q.target is not None:
filters.append(f'bid=${argc}')
argc += 1
arg_stack.append(q.target)
elif isinstance(q, RelationalMathExpr):
(basesql, baseparams) = compile_sql(q.relExpr, argc)
math_projection = compile_math_sql(q.mathExpr)
sql = basesql.replace('*', f'aid,rid,bid,{math_projection} AS qnt', 1)
arg_stack = baseparams
elif isinstance(q, DisjunctionExpr):
(left_sql, left_params) = compile_sql(q.left, argc)
(right_sql, right_params) = compile_sql(q.right, argc + len(left_params))
sql = f'{left_sql} UNION {right_sql}'
arg_stack.extend(left_params + right_params)
elif isinstance(q, ConjunctionExpr):
(left_sql, left_params) = compile_sql(q.left, argc)
(right_sql, right_params) = compile_sql(q.right, argc + len(left_params))
filters.append(f'EXISTS ({left_sql}) AND EXISTS ({right_sql})')
arg_stack.extend(left_params + right_params)
else:
raise ValueError(f'unknown parse value: {q}')
sql += ' '.join(intersperse("AND", filters))
return (sql, arg_stack)
def compile_math_sql(expr: MathExpr) -> str:
if expr.op == '+=':
return f'qnt + {expr.val}'
elif expr.op == '-=':
return f'qnt - {expr.val}'
elif expr.op == '*=':
return f'qnt * {expr.val}'
elif expr.op == '/=':
return f'qnt / {expr.val}'
elif expr.op == '%=':
return f'qnt % {expr.val}'
elif expr.op == '^=':
return f'qnt ^ {expr.val}'
else:
raise ValueError(f'invalid operator: {expr.op}')
def execute_memelang():
(sql, params) = compile_sql(parse(tokenize(memelang_in)))
plpy.info(sql)
plan = plpy.prepare(sql, ['text'] * len(params))
result = plpy.execute(plan, params)
#for r in result:
# plpy.info(r)
return [r for r in result]
return execute_memelang()
$$ LANGUAGE plpython3u;