Skip to content

Commit b6c0b0c

Browse files
committed
Disable line counting temporarily.
1 parent 482840d commit b6c0b0c

File tree

1 file changed

+13
-50
lines changed

1 file changed

+13
-50
lines changed

src/tokenizer.rs

Lines changed: 13 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,6 @@ pub fn tokenize(input: &str) -> Tokenizer {
1818
length: input.len(),
1919
input: input,
2020
position: 0,
21-
line: 1,
22-
last_line_start: 0,
2321
}
2422
}
2523

@@ -43,8 +41,6 @@ pub struct Tokenizer {
4341
input: String,
4442
length: uint, // All counted in bytes, not characters
4543
position: uint, // All counted in bytes, not characters
46-
line: uint,
47-
last_line_start: uint, // All counted in bytes, not characters
4844
}
4945

5046

@@ -72,15 +68,6 @@ impl Tokenizer {
7268
fn starts_with(&self, needle: &str) -> bool {
7369
self.input.as_slice().slice_from(self.position).starts_with(needle)
7470
}
75-
76-
#[inline]
77-
fn new_line(&mut self) {
78-
if cfg!(test) {
79-
assert!(self.input.as_slice().char_at(self.position - 1) == '\n')
80-
}
81-
self.line += 1;
82-
self.last_line_start = self.position;
83-
}
8471
}
8572

8673
macro_rules! is_match(
@@ -93,27 +80,19 @@ macro_rules! is_match(
9380
fn next_component_value(tokenizer: &mut Tokenizer) -> Option<Node> {
9481
consume_comments(tokenizer);
9582
if tokenizer.is_eof() {
96-
if cfg!(test) {
97-
assert!(tokenizer.line == tokenizer.input.as_slice().split('\n').count(),
98-
"The tokenizer is missing a tokenizer.new_line() call somewhere.")
99-
}
10083
return None
10184
}
10285
let start_location = SourceLocation{
103-
line: tokenizer.line,
104-
// The start of the line is column 1:
105-
column: tokenizer.position - tokenizer.last_line_start + 1,
86+
// FIXME
87+
line: 0,
88+
column: tokenizer.position,
10689
};
10790
let c = tokenizer.current_char();
10891
let component_value = match c {
10992
'\t' | '\n' | ' ' => {
11093
while !tokenizer.is_eof() {
11194
match tokenizer.current_char() {
112-
' ' | '\t' => tokenizer.position += 1,
113-
'\n' => {
114-
tokenizer.position += 1;
115-
tokenizer.new_line();
116-
},
95+
' ' | '\t' | '\n' => tokenizer.position += 1,
11796
_ => break,
11897
}
11998
}
@@ -251,15 +230,11 @@ fn consume_comments(tokenizer: &mut Tokenizer) {
251230
while tokenizer.starts_with("/*") {
252231
tokenizer.position += 2; // +2 to consume "/*"
253232
while !tokenizer.is_eof() {
254-
match tokenizer.consume_char() {
255-
'*' => {
256-
if !tokenizer.is_eof() && tokenizer.current_char() == '/' {
257-
tokenizer.position += 1;
258-
break
259-
}
260-
},
261-
'\n' => tokenizer.new_line(),
262-
_ => ()
233+
if tokenizer.consume_char() == '*' &&
234+
!tokenizer.is_eof() &&
235+
tokenizer.current_char() == '/' {
236+
tokenizer.position += 1;
237+
break
263238
}
264239
}
265240
}
@@ -322,7 +297,6 @@ fn consume_quoted_string(tokenizer: &mut Tokenizer, single_quote: bool) -> Resul
322297
if !tokenizer.is_eof() {
323298
if tokenizer.current_char() == '\n' { // Escaped newline
324299
tokenizer.position += 1;
325-
tokenizer.new_line();
326300
}
327301
else { string.push(consume_escape(tokenizer)) }
328302
}
@@ -461,11 +435,7 @@ fn consume_url(tokenizer: &mut Tokenizer) -> ComponentValue {
461435
tokenizer.position += 1; // Skip the ( of url(
462436
while !tokenizer.is_eof() {
463437
match tokenizer.current_char() {
464-
' ' | '\t' => tokenizer.position += 1,
465-
'\n' => {
466-
tokenizer.position += 1;
467-
tokenizer.new_line();
468-
},
438+
' ' | '\t' | '\n' => tokenizer.position += 1,
469439
'"' => return consume_quoted_url(tokenizer, false),
470440
'\'' => return consume_quoted_url(tokenizer, true),
471441
')' => { tokenizer.position += 1; break },
@@ -485,11 +455,7 @@ fn consume_url(tokenizer: &mut Tokenizer) -> ComponentValue {
485455
let mut string = String::new();
486456
while !tokenizer.is_eof() {
487457
let next_char = match tokenizer.consume_char() {
488-
' ' | '\t' => return consume_url_end(tokenizer, string),
489-
'\n' => {
490-
tokenizer.new_line();
491-
return consume_url_end(tokenizer, string)
492-
},
458+
' ' | '\t' | '\n' => return consume_url_end(tokenizer, string),
493459
')' => break,
494460
'\x01'...'\x08' | '\x0B' | '\x0E'...'\x1F' | '\x7F' // non-printable
495461
| '"' | '\'' | '(' => return consume_bad_url(tokenizer),
@@ -510,8 +476,7 @@ fn consume_url(tokenizer: &mut Tokenizer) -> ComponentValue {
510476
fn consume_url_end(tokenizer: &mut Tokenizer, string: String) -> ComponentValue {
511477
while !tokenizer.is_eof() {
512478
match tokenizer.consume_char() {
513-
' ' | '\t' => (),
514-
'\n' => tokenizer.new_line(),
479+
' ' | '\t' | '\n' => (),
515480
')' => break,
516481
_ => return consume_bad_url(tokenizer)
517482
}
@@ -525,7 +490,6 @@ fn consume_url(tokenizer: &mut Tokenizer) -> ComponentValue {
525490
match tokenizer.consume_char() {
526491
')' => break,
527492
'\\' => tokenizer.position += 1, // Skip an escaped ')' or '\'
528-
'\n' => tokenizer.new_line(),
529493
_ => ()
530494
}
531495
}
@@ -593,8 +557,7 @@ fn consume_escape(tokenizer: &mut Tokenizer) -> char {
593557
}
594558
if !tokenizer.is_eof() {
595559
match tokenizer.current_char() {
596-
' ' | '\t' => tokenizer.position += 1,
597-
'\n' => { tokenizer.position += 1; tokenizer.new_line() },
560+
' ' | '\t' | '\n' => tokenizer.position += 1,
598561
_ => ()
599562
}
600563
}

0 commit comments

Comments
 (0)