-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathlexer-analyser.js
58 lines (47 loc) · 1.89 KB
/
lexer-analyser.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
// Character reader we implemented
const CharacterReader = require('./character-reader');
// List of token detector functions we will implement.
const tokenDetectors = require('./tokens');
const detectTokens = code => {
// Create character reader for our code.
const reader = new CharacterReader(code);
// List of tokens we found in the code.
const foundTokens = [];
// We loop until we go through all of the characters.
while (reader.hasNext()) {
let token = null;
// Store the positions in case we detect the token
let startPosition = reader.position;
let linePosition = reader.getLinePosition();
let characterPosition = reader.getCharacterPosition();
// We go through each of the token detectors
// and call the function for detecting each token.
for (const detectToken of tokenDetectors) {
token = detectToken(reader);
if (token) {
// Token is detected so we do not
// continue detection.
break;
}
}
// If no token could detect the character at this
// position means that we have a syntax error in our
// language so we should not continue.
if (!token) {
throw new Error(`Invalid character '${reader.peek()}' at ${linePosition}:${characterPosition}`);
}
// If a token is found we store the token data
// together with the position information.
foundTokens.push({
...token,
start: startPosition,
end: reader.position,
line: linePosition,
character: characterPosition
});
}
// After we found all of the tokens we remove the whitespace
// tokens because we will not use them.
return foundTokens.filter(i => i.type !== 'whitespace');
};
module.exports = code => detectTokens(code);