diff --git a/prqlc/prqlc-parser/src/parser/mod.rs b/prqlc/prqlc-parser/src/parser/mod.rs index 0bfcdfb9906e..e16221e28793 100644 --- a/prqlc/prqlc-parser/src/parser/mod.rs +++ b/prqlc/prqlc-parser/src/parser/mod.rs @@ -17,18 +17,11 @@ mod types; pub fn parse_lr_to_pr( source: &str, source_id: u16, - lr_iter: Vec, + lr: Vec, ) -> (Option>, Vec) { // We don't want comments in the AST (but we do intend to use them as part of // formatting) - let semantic_tokens = lr_iter.into_iter().filter(|token| { - !matches!( - token.kind, - lr::TokenKind::Comment(_) | lr::TokenKind::LineWrap(_) | lr::TokenKind::DocComment(_) - ) - }); - - let stream = prepare_stream(semantic_tokens, source, source_id); + let stream = prepare_stream(lr.into_iter(), source, source_id); let (pr, parse_errors) = ::chumsky::Parser::parse_recovery(&stmt::source(), stream); let errors = parse_errors.into_iter().map(|e| e.into()).collect(); @@ -39,12 +32,21 @@ pub fn parse_lr_to_pr( /// Convert the output of the lexer into the input of the parser. Requires /// supplying the original source code. -fn prepare_stream( +pub(crate) fn prepare_stream( tokens: impl Iterator, source: &str, source_id: u16, ) -> Stream + Sized> { - let tokens = tokens + // We don't want comments in the AST (but we do intend to use them as part of + // formatting) + let semantic_tokens = tokens.filter(|token| { + !matches!( + token.kind, + lr::TokenKind::Comment(_) | lr::TokenKind::LineWrap(_) | lr::TokenKind::DocComment(_) + ) + }); + + let tokens = semantic_tokens .into_iter() .map(move |token| (token.kind, Span::new(source_id, token.span))); let len = source.chars().count();