Skip to content

Commit be3cf01

Browse files
bors[bot]CAD97
andauthored
Merge #3570
3570: Remove some TextUnit->usize escapees r=matklad a=CAD97 As spotted during [a review of all uses of `text_unit::TextUnit::to_usize`](rust-analyzer/text-size#12 (comment)). Legitimate uses do remain. Co-authored-by: CAD97 <[email protected]>
2 parents 2f9f409 + 88c944f commit be3cf01

File tree

7 files changed

+31
-27
lines changed

7 files changed

+31
-27
lines changed

crates/ra_assists/src/lib.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ mod helpers {
235235
(Some(assist), ExpectedResult::Target(target)) => {
236236
let action = assist.0[0].action.clone().unwrap();
237237
let range = action.target.expect("expected target on action");
238-
assert_eq_text!(&before[range.start().to_usize()..range.end().to_usize()], target);
238+
assert_eq_text!(&before[range], target);
239239
}
240240
(Some(_), ExpectedResult::NotApplicable) => panic!("assist should not be applicable!"),
241241
(None, ExpectedResult::After(_)) | (None, ExpectedResult::Target(_)) => {

crates/ra_ide/src/syntax_tree.rs

+10-6
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ use ra_ide_db::RootDatabase;
55
use ra_syntax::{
66
algo, AstNode, NodeOrToken, SourceFile,
77
SyntaxKind::{RAW_STRING, STRING},
8-
SyntaxToken, TextRange,
8+
SyntaxToken, TextRange, TextUnit,
99
};
1010

1111
pub use ra_db::FileId;
@@ -56,19 +56,23 @@ fn syntax_tree_for_token(node: &SyntaxToken, text_range: TextRange) -> Option<St
5656
let start = text_range.start() - node_range.start();
5757

5858
// how many characters we have selected
59-
let len = text_range.len().to_usize();
59+
let len = text_range.len();
6060

61-
let node_len = node_range.len().to_usize();
61+
let node_len = node_range.len();
6262

63-
let start = start.to_usize();
63+
let start = start;
6464

6565
// We want to cap our length
6666
let len = len.min(node_len);
6767

6868
// Ensure our slice is inside the actual string
69-
let end = if start + len < text.len() { start + len } else { text.len() - start };
69+
let end = if start + len < TextUnit::of_str(&text) {
70+
start + len
71+
} else {
72+
TextUnit::of_str(&text) - start
73+
};
7074

71-
let text = &text[start..end];
75+
let text = &text[TextRange::from_to(start, end)];
7276

7377
// Remove possible extra string quotes from the start
7478
// and the end of the string

crates/ra_ide_db/src/line_index.rs

+7-7
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ impl LineIndex {
5959
}
6060

6161
let char_len = TextUnit::of_char(c);
62-
if char_len.to_usize() > 1 {
62+
if char_len > TextUnit::from_usize(1) {
6363
utf16_chars.push(Utf16Char { start: curr_col, end: curr_col + char_len });
6464
}
6565

@@ -101,23 +101,23 @@ impl LineIndex {
101101
.filter(|it| !it.is_empty())
102102
}
103103

104-
fn utf8_to_utf16_col(&self, line: u32, mut col: TextUnit) -> usize {
104+
fn utf8_to_utf16_col(&self, line: u32, col: TextUnit) -> usize {
105105
if let Some(utf16_chars) = self.utf16_lines.get(&line) {
106-
let mut correction = TextUnit::from_usize(0);
106+
let mut correction = 0;
107107
for c in utf16_chars {
108108
if col >= c.end {
109-
correction += c.len() - TextUnit::from_usize(1);
109+
correction += c.len().to_usize() - 1;
110110
} else {
111111
// From here on, all utf16 characters come *after* the character we are mapping,
112112
// so we don't need to take them into account
113113
break;
114114
}
115115
}
116116

117-
col -= correction;
117+
col.to_usize() - correction
118+
} else {
119+
col.to_usize()
118120
}
119-
120-
col.to_usize()
121121
}
122122

123123
fn utf16_to_utf8_col(&self, line: u32, col: u32) -> TextUnit {

crates/ra_ide_db/src/line_index_utils.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ impl Iterator for OffsetStepIter<'_> {
145145
Some((next, next_offset))
146146
} else {
147147
let char_len = TextUnit::of_char(c);
148-
if char_len.to_usize() > 1 {
148+
if char_len > TextUnit::from_usize(1) {
149149
let start = self.offset + TextUnit::from_usize(i);
150150
let end = start + char_len;
151151
let next = Step::Utf16Char(TextRange::from_to(start, end));

crates/ra_syntax/src/parsing/lexer.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
6565
/// Beware that unescape errors are not checked at tokenization time.
6666
pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> {
6767
lex_first_token(text)
68-
.filter(|(token, _)| token.len.to_usize() == text.len())
68+
.filter(|(token, _)| token.len == TextUnit::of_str(text))
6969
.map(|(token, error)| (token.kind, error))
7070
}
7171

@@ -75,7 +75,7 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxEr
7575
/// Beware that unescape errors are not checked at tokenization time.
7676
pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
7777
lex_first_token(text)
78-
.filter(|(token, error)| !error.is_some() && token.len.to_usize() == text.len())
78+
.filter(|(token, error)| !error.is_some() && token.len == TextUnit::of_str(text))
7979
.map(|(token, _error)| token.kind)
8080
}
8181

crates/ra_syntax/src/tests.rs

+5-5
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ use std::{
55

66
use test_utils::{collect_tests, dir_tests, project_dir, read_text};
77

8-
use crate::{fuzz, tokenize, SourceFile, SyntaxError, Token};
8+
use crate::{fuzz, tokenize, SourceFile, SyntaxError, TextRange, TextUnit, Token};
99

1010
#[test]
1111
fn lexer_tests() {
@@ -120,11 +120,11 @@ fn assert_errors_are_absent(errors: &[SyntaxError], path: &Path) {
120120

121121
fn dump_tokens_and_errors(tokens: &[Token], errors: &[SyntaxError], text: &str) -> String {
122122
let mut acc = String::new();
123-
let mut offset = 0;
123+
let mut offset = TextUnit::from_usize(0);
124124
for token in tokens {
125-
let token_len = token.len.to_usize();
126-
let token_text = &text[offset..offset + token_len];
127-
offset += token_len;
125+
let token_len = token.len;
126+
let token_text = &text[TextRange::offset_len(offset, token.len)];
127+
offset += token.len;
128128
writeln!(acc, "{:?} {} {:?}", token.kind, token_len, token_text).unwrap();
129129
}
130130
for err in errors {

crates/ra_text_edit/src/text_edit.rs

+5-5
Original file line numberDiff line numberDiff line change
@@ -63,12 +63,12 @@ impl TextEdit {
6363
}
6464

6565
pub fn apply(&self, text: &str) -> String {
66-
let mut total_len = text.len();
66+
let mut total_len = TextUnit::of_str(text);
6767
for atom in self.atoms.iter() {
68-
total_len += atom.insert.len();
69-
total_len -= (atom.delete.end() - atom.delete.start()).to_usize();
68+
total_len += TextUnit::of_str(&atom.insert);
69+
total_len -= atom.delete.end() - atom.delete.start();
7070
}
71-
let mut buf = String::with_capacity(total_len);
71+
let mut buf = String::with_capacity(total_len.to_usize());
7272
let mut prev = 0;
7373
for atom in self.atoms.iter() {
7474
let start = atom.delete.start().to_usize();
@@ -80,7 +80,7 @@ impl TextEdit {
8080
prev = end;
8181
}
8282
buf.push_str(&text[prev..text.len()]);
83-
assert_eq!(buf.len(), total_len);
83+
assert_eq!(TextUnit::of_str(&buf), total_len);
8484
buf
8585
}
8686

0 commit comments

Comments
 (0)