@@ -1775,7 +1775,32 @@ impl<T, A: Allocator> Vec<T, A> {
1775
1775
return ;
1776
1776
}
1777
1777
1778
- /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
1778
+ // Check if we ever want to remove anything.
1779
+ // This allows to use copy_non_overlapping in next cycle.
1780
+ // And avoids any memory writes if we don't need to remove anything.
1781
+ let mut first_duplicate_idx: usize = 1 ;
1782
+ let start = self . as_mut_ptr ( ) ;
1783
+ while first_duplicate_idx != len {
1784
+ let found_duplicate = unsafe {
1785
+ // SAFETY: first_duplicate always in range [1..len)
1786
+ // Note that we start iteration from 1 so we never overflow.
1787
+ let prev = start. add ( first_duplicate_idx. wrapping_sub ( 1 ) ) ;
1788
+ let current = start. add ( first_duplicate_idx) ;
1789
+ // We explicitly say in docs that references are reversed.
1790
+ same_bucket ( & mut * current, & mut * prev)
1791
+ } ;
1792
+ if found_duplicate {
1793
+ break ;
1794
+ }
1795
+ first_duplicate_idx += 1 ;
1796
+ }
1797
+ // Don't need to remove anything.
1798
+ // We cannot get bigger than len.
1799
+ if first_duplicate_idx == len {
1800
+ return ;
1801
+ }
1802
+
1803
+ /* INVARIANT: vec.len() > read > write > write-1 >= 0 */
1779
1804
struct FillGapOnDrop < ' a , T , A : core:: alloc:: Allocator > {
1780
1805
/* Offset of the element we want to check if it is duplicate */
1781
1806
read : usize ,
@@ -1821,31 +1846,39 @@ impl<T, A: Allocator> Vec<T, A> {
1821
1846
}
1822
1847
}
1823
1848
1824
- let mut gap = FillGapOnDrop { read : 1 , write : 1 , vec : self } ;
1825
- let ptr = gap. vec . as_mut_ptr ( ) ;
1826
-
1827
1849
/* Drop items while going through Vec, it should be more efficient than
1828
1850
* doing slice partition_dedup + truncate */
1829
1851
1852
+ // Construct gap first and then drop item to avoid memory corruption if `T::drop` panics.
1853
+ let mut gap =
1854
+ FillGapOnDrop { read : first_duplicate_idx + 1 , write : first_duplicate_idx, vec : self } ;
1855
+ unsafe {
1856
+ // SAFETY: we checked that first_duplicate_idx in bounds before.
1857
+ // If drop panics, `gap` would remove this item without drop.
1858
+ ptr:: drop_in_place ( start. add ( first_duplicate_idx) ) ;
1859
+ }
1860
+
1830
1861
/* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
1831
1862
* are always in-bounds and read_ptr never aliases prev_ptr */
1832
1863
unsafe {
1833
1864
while gap. read < len {
1834
- let read_ptr = ptr . add ( gap. read ) ;
1835
- let prev_ptr = ptr . add ( gap. write . wrapping_sub ( 1 ) ) ;
1865
+ let read_ptr = start . add ( gap. read ) ;
1866
+ let prev_ptr = start . add ( gap. write . wrapping_sub ( 1 ) ) ;
1836
1867
1837
- if same_bucket ( & mut * read_ptr, & mut * prev_ptr) {
1868
+ // We explicitly say in docs that references are reversed.
1869
+ let found_duplicate = same_bucket ( & mut * read_ptr, & mut * prev_ptr) ;
1870
+ if found_duplicate {
1838
1871
// Increase `gap.read` now since the drop may panic.
1839
1872
gap. read += 1 ;
1840
1873
/* We have found duplicate, drop it in-place */
1841
1874
ptr:: drop_in_place ( read_ptr) ;
1842
1875
} else {
1843
- let write_ptr = ptr . add ( gap. write ) ;
1876
+ let write_ptr = start . add ( gap. write ) ;
1844
1877
1845
- /* Because ` read_ptr` can be equal to ` write_ptr`, we either
1846
- * have to use `copy` or conditional `copy_nonoverlapping` .
1847
- * Looks like the first option is faster. * /
1848
- ptr:: copy ( read_ptr, write_ptr, 1 ) ;
1878
+ /* read_ptr cannot be equal to write_ptr because at this point
1879
+ * we guaranteed to skip at least one element (before loop starts) .
1880
+ */
1881
+ ptr:: copy_nonoverlapping ( read_ptr, write_ptr, 1 ) ;
1849
1882
1850
1883
/* We have filled that place, so go further */
1851
1884
gap. write += 1 ;
0 commit comments