-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathinterpreter.py
179 lines (155 loc) · 5.76 KB
/
interpreter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
from typing import Generator
from analyze_eval import *
from fractions import Fraction
from lisp_shell_config import MAX_IN, EVAL_TIME
def interpret(
source: str, time: int, max_in: int = MAX_IN, eval_time: int|float = EVAL_TIME, default_env:Environment= run_env
) -> tuple[str|None, str, set[str]]:
"""
解释器入口:time为当前shell输入次数,max_in为最大输入长度,eval_time为最大运行次数
返回值为:结果,剩余输入,当前环境
"""
strgen = StrGen(source, max_in)
tokengen = TokenGen(strgen, time)
i=0
while i<eval_time:
try:
try:
exp=GenExp(tokengen)
except EndOfSource:
# 表达式全部运行结束
break
result=eval(analyze(exp),default_env)
except InterpretError as result:
result.display()
if isinstance(result,ExpError):
return None,'',set(run_env.keys())
else:
left=find_left(tokengen,source)
return None,left,set(run_env.keys())
i+=1
left=find_left(tokengen,source)
if result is not None:
return str(result),left,set(run_env.keys())
else:
return None,left,set(run_env.keys())
def find_left(tokengen:Generator[Token,None,None],source:str)->str:
try:
where=next(tokengen).where
return source[where:]
except StopIteration:
return ''
def StrGen(source: str, max_in: int) -> Generator[str, None, None]:
"""将源字符串转换为最大长度为max_in的字符串生成器"""
start = 0
while start < len(source):
yield source[start : start + max_in]
start += max_in
def TokenGen(
strgen: Generator[str, None, None], time: int
) -> Generator[Token, None, None]:
"""读取字符串生成器,生成Token"""
col, line = -1, 1
pending = ""
finished = []
where = -1
for string in strgen:
for i in string:
where+=1
if i == "\n":
line += 1
col = -1
else:
col += 1
# 分割符
if i in {"(", ")", " ", "\n", "'"}:
if pending:
finished.append(
Token(pending, Location(line, col - len(pending), time),where-len(pending))
)
pending = ""
if i in {"(", ")", "'"}:
finished.append(Token(i, Location(line, col, time),where))
else:
pending += i
yield from finished
finished = []
if pending:
yield Token(pending, Location(line, col - len(pending)+1, time),where-len(pending)+1)
def prase_atom(token:Token)->Atom:
try:
return int(token.value)
except ValueError:
try:
return float(token.value)
except ValueError:
try:
return float(Fraction(token.value))
except ValueError:
if token.value in {"#t", "#f"}:
return (token.value == "#t")
else:
return Symbol_(token)
def GenExp(tokengen: Generator[Token, None, None],slot:Token|None=None) -> Exp:
stack: list[Token] = [] # 事实上空间只需要一个Token
result = None
# 递归解析,生成一个Exp
while True:
try:
# 从预留槽或token流中取出token
if slot is not None:
token=slot
slot=None
else:
token= next(tokengen)
# 若token生成器结束
except StopIteration:
# 若还有左括号未匹配
if stack:
raise UnmatchedLeftParenthesis(stack[-1])
# 括号匹配完毕,正常结束
else:
raise EndOfSource()
# 开始建立Exp
if token.value == "(":
# 如果result为None,说明这是第一层括号
if result is None:
stack.append(token)
result=Compound_([],token.location,None)# type:ignore
else:
# 如果result不为None,说明这是嵌套括号
# 将当前token放入预留槽,并递归解析
result.append(GenExp(tokengen,token))
elif token.value == ")":
try:
# 将result的end位置定义为右括号的位置
stack.pop()
result.end=token.location #type:ignore
# 若没有左括号匹配,说明有右括号多余
except IndexError:
raise UnmatchedRightParenthesis(token)
elif token.value == "'":
try:
# 为了支持'(a b c)这种语法糖 将该表达式的前后位置定义为'与最后一个token的位置
next_token=next(tokengen)
next_exp=GenExp(tokengen,next_token)
except StopIteration:
raise UnmatchedQuote(token)
new_token=Token('quote',token.location,token.where)
if isinstance(next_exp,Compound_):
tmp=Compound_([Symbol_(new_token),next_exp],token.location,next_exp.end)
else:
tmp=Compound_([Symbol_(new_token),next_exp],token.location,next_token.location)
if result is None:
result=tmp
else:
result.append(tmp)
else:
# 对原子表达式解析
if result is None:
result=prase_atom(token) # type:ignore
else:
result.append(prase_atom(token)) # type:ignore
if not stack:
break
return result # type:ignore