Skip to content

Remove some TextUnit->usize escapees #3570

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 13, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion crates/ra_assists/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ mod helpers {
(Some(assist), ExpectedResult::Target(target)) => {
let action = assist.0[0].action.clone().unwrap();
let range = action.target.expect("expected target on action");
assert_eq_text!(&before[range.start().to_usize()..range.end().to_usize()], target);
assert_eq_text!(&before[range], target);
}
(Some(_), ExpectedResult::NotApplicable) => panic!("assist should not be applicable!"),
(None, ExpectedResult::After(_)) | (None, ExpectedResult::Target(_)) => {
Expand Down
16 changes: 10 additions & 6 deletions crates/ra_ide/src/syntax_tree.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use ra_ide_db::RootDatabase;
use ra_syntax::{
algo, AstNode, NodeOrToken, SourceFile,
SyntaxKind::{RAW_STRING, STRING},
SyntaxToken, TextRange,
SyntaxToken, TextRange, TextUnit,
};

pub use ra_db::FileId;
Expand Down Expand Up @@ -56,19 +56,23 @@ fn syntax_tree_for_token(node: &SyntaxToken, text_range: TextRange) -> Option<St
let start = text_range.start() - node_range.start();

// how many characters we have selected
let len = text_range.len().to_usize();
let len = text_range.len();

let node_len = node_range.len().to_usize();
let node_len = node_range.len();

let start = start.to_usize();
let start = start;

// We want to cap our length
let len = len.min(node_len);

// Ensure our slice is inside the actual string
let end = if start + len < text.len() { start + len } else { text.len() - start };
let end = if start + len < TextUnit::of_str(&text) {
start + len
} else {
TextUnit::of_str(&text) - start
};

let text = &text[start..end];
let text = &text[TextRange::from_to(start, end)];

// Remove possible extra string quotes from the start
// and the end of the string
Expand Down
14 changes: 7 additions & 7 deletions crates/ra_ide_db/src/line_index.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ impl LineIndex {
}

let char_len = TextUnit::of_char(c);
if char_len.to_usize() > 1 {
if char_len > TextUnit::from_usize(1) {
utf16_chars.push(Utf16Char { start: curr_col, end: curr_col + char_len });
}

Expand Down Expand Up @@ -101,23 +101,23 @@ impl LineIndex {
.filter(|it| !it.is_empty())
}

fn utf8_to_utf16_col(&self, line: u32, mut col: TextUnit) -> usize {
fn utf8_to_utf16_col(&self, line: u32, col: TextUnit) -> usize {
if let Some(utf16_chars) = self.utf16_lines.get(&line) {
let mut correction = TextUnit::from_usize(0);
let mut correction = 0;
for c in utf16_chars {
if col >= c.end {
correction += c.len() - TextUnit::from_usize(1);
correction += c.len().to_usize() - 1;
} else {
// From here on, all utf16 characters come *after* the character we are mapping,
// so we don't need to take them into account
break;
}
}

col -= correction;
col.to_usize() - correction
} else {
col.to_usize()
}

col.to_usize()
}

fn utf16_to_utf8_col(&self, line: u32, col: u32) -> TextUnit {
Expand Down
2 changes: 1 addition & 1 deletion crates/ra_ide_db/src/line_index_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ impl Iterator for OffsetStepIter<'_> {
Some((next, next_offset))
} else {
let char_len = TextUnit::of_char(c);
if char_len.to_usize() > 1 {
if char_len > TextUnit::from_usize(1) {
let start = self.offset + TextUnit::from_usize(i);
let end = start + char_len;
let next = Step::Utf16Char(TextRange::from_to(start, end));
Expand Down
4 changes: 2 additions & 2 deletions crates/ra_syntax/src/parsing/lexer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
/// Beware that unescape errors are not checked at tokenization time.
pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> {
lex_first_token(text)
.filter(|(token, _)| token.len.to_usize() == text.len())
.filter(|(token, _)| token.len == TextUnit::of_str(text))
.map(|(token, error)| (token.kind, error))
}

Expand All @@ -75,7 +75,7 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxEr
/// Beware that unescape errors are not checked at tokenization time.
pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
lex_first_token(text)
.filter(|(token, error)| !error.is_some() && token.len.to_usize() == text.len())
.filter(|(token, error)| !error.is_some() && token.len == TextUnit::of_str(text))
.map(|(token, _error)| token.kind)
}

Expand Down
10 changes: 5 additions & 5 deletions crates/ra_syntax/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use std::{

use test_utils::{collect_tests, dir_tests, project_dir, read_text};

use crate::{fuzz, tokenize, SourceFile, SyntaxError, Token};
use crate::{fuzz, tokenize, SourceFile, SyntaxError, TextRange, TextUnit, Token};

#[test]
fn lexer_tests() {
Expand Down Expand Up @@ -120,11 +120,11 @@ fn assert_errors_are_absent(errors: &[SyntaxError], path: &Path) {

fn dump_tokens_and_errors(tokens: &[Token], errors: &[SyntaxError], text: &str) -> String {
let mut acc = String::new();
let mut offset = 0;
let mut offset = TextUnit::from_usize(0);
for token in tokens {
let token_len = token.len.to_usize();
let token_text = &text[offset..offset + token_len];
offset += token_len;
let token_len = token.len;
let token_text = &text[TextRange::offset_len(offset, token.len)];
offset += token.len;
writeln!(acc, "{:?} {} {:?}", token.kind, token_len, token_text).unwrap();
}
for err in errors {
Expand Down
10 changes: 5 additions & 5 deletions crates/ra_text_edit/src/text_edit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,12 +63,12 @@ impl TextEdit {
}

pub fn apply(&self, text: &str) -> String {
let mut total_len = text.len();
let mut total_len = TextUnit::of_str(text);
for atom in self.atoms.iter() {
total_len += atom.insert.len();
total_len -= (atom.delete.end() - atom.delete.start()).to_usize();
total_len += TextUnit::of_str(&atom.insert);
total_len -= atom.delete.end() - atom.delete.start();
}
let mut buf = String::with_capacity(total_len);
let mut buf = String::with_capacity(total_len.to_usize());
let mut prev = 0;
for atom in self.atoms.iter() {
let start = atom.delete.start().to_usize();
Expand All @@ -80,7 +80,7 @@ impl TextEdit {
prev = end;
}
buf.push_str(&text[prev..text.len()]);
assert_eq!(buf.len(), total_len);
assert_eq!(TextUnit::of_str(&buf), total_len);
buf
}

Expand Down