Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove Precedence metaclass. #6949

Merged
merged 2 commits into from
Mar 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 19 additions & 46 deletions edb/common/parsing.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
import json
import logging
import os
import sys
import types

import parsing
Expand All @@ -46,13 +45,9 @@ class Token(parsing.Token):
_token: str = ""

def __init_subclass__(
cls, *, token=None, lextoken=None, precedence_class=None,
is_internal=False, **kwargs):
cls, *, token=None, lextoken=None, is_internal=False, **kwargs):
super().__init_subclass__(**kwargs)

if precedence_class is not None:
cls._precedence_class = precedence_class

if is_internal:
return

Expand All @@ -72,18 +67,7 @@ def __init_subclass__(
if not cls.__doc__:
doc = '%%token %s' % token

pcls = getattr(cls, '_precedence_class', None)
if pcls is None:
try:
pcls = sys.modules[Token.__module__].PrecedenceMeta
except (KeyError, AttributeError):
pass

if pcls is None:
msg = 'Precedence class is not set for {!r}'.format(Token)
raise TypeError(msg)

prec = pcls.for_token(token)
prec = Precedence.for_token(token)
if prec:
doc += ' [%s]' % prec.__name__

Expand Down Expand Up @@ -235,55 +219,44 @@ def decorator(func):
return decorator


class PrecedenceMeta(type):
token_prec_map: Dict[Tuple[Any, Any], Any] = {}
last: Dict[Tuple[Any, Any], Any] = {}
class Precedence(parsing.Precedence):
token_prec_map: Dict[Any, Any] = {}
last: Dict[Any, Any] = {}

def __new__(
mcls, name, bases, dct, *, assoc, tokens=None, prec_group=None,
rel_to_last='>'):
result = super().__new__(mcls, name, bases, dct)
def __init_subclass__(
cls, *, assoc, tokens=None, prec_group=None, rel_to_last='>',
is_internal=False, **kwargs):
super().__init_subclass__(**kwargs)

if name == 'Precedence':
return result
if is_internal:
return

if not result.__doc__:
if not cls.__doc__:
doc = '%%%s' % assoc

last = mcls.last.get((mcls, prec_group))
last = Precedence.last.get(prec_group)
if last:
doc += ' %s%s' % (rel_to_last, last.__name__)

result.__doc__ = doc
cls.__doc__ = doc

if tokens:
for token in tokens:
existing = None
try:
existing = mcls.token_prec_map[mcls, token]
existing = Precedence.token_prec_map[token]
except KeyError:
mcls.token_prec_map[mcls, token] = result
Precedence.token_prec_map[token] = cls
else:
raise Exception(
'token {} has already been set precedence {}'.format(
token, existing))

mcls.last[mcls, prec_group] = result

return result

def __init__(
cls, name, bases, dct, *, assoc, tokens=None, prec_group=None,
rel_to_last='>'):
super().__init__(name, bases, dct)
Precedence.last[prec_group] = cls

@classmethod
def for_token(mcls, token_name):
return mcls.token_prec_map.get((mcls, token_name))


class Precedence(parsing.Precedence, assoc='fail', metaclass=PrecedenceMeta):
pass
def for_token(cls, token_name):
return Precedence.token_prec_map.get(token_name)


def load_parser_spec(
Expand Down
6 changes: 1 addition & 5 deletions edb/edgeql/parser/grammar/precedence.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,7 @@
from edb.common import parsing


class PrecedenceMeta(parsing.PrecedenceMeta):
pass


class Precedence(parsing.Precedence, assoc='fail', metaclass=PrecedenceMeta):
class Precedence(parsing.Precedence, assoc='fail', is_internal=True):
pass


Expand Down
4 changes: 1 addition & 3 deletions edb/edgeql/parser/grammar/tokens.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,13 @@
from edb.common import parsing

from . import keywords
from . import precedence


clean_string = re.compile(r"'(?:\s|\n)+'")
string_quote = re.compile(r'\$(?:[A-Za-z_][A-Za-z_0-9]*)?\$')


class Token(parsing.Token, precedence_class=precedence.PrecedenceMeta,
is_internal=True):
class Token(parsing.Token, is_internal=True):
pass


Expand Down