From e414242bead371b9dbb00e60b81ef548afb4acb0 Mon Sep 17 00:00:00 2001 From: Arthur Cohen Date: Sun, 22 Dec 2024 15:59:27 +0000 Subject: [PATCH] ast: Add DesugarForLoop class gcc/rust/ChangeLog: * ast/rust-desugar-for-loops.cc: New file. * ast/rust-desugar-for-loops.h: New file. * Make-lang.in: Compile it. --- gcc/rust/Make-lang.in | 1 + gcc/rust/ast/rust-desugar-for-loops.cc | 198 +++++++ gcc/rust/ast/rust-desugar-for-loops.h | 103 ++++ gcc/testsuite/rust/compile/for-loop1.rs | 543 +++++++++++++++++ gcc/testsuite/rust/compile/nr2/exclude | 1 + .../rust/execute/torture/for-loop1.rs | 545 ++++++++++++++++++ .../rust/execute/torture/for-loop2.rs | 544 +++++++++++++++++ 7 files changed, 1935 insertions(+) create mode 100644 gcc/rust/ast/rust-desugar-for-loops.cc create mode 100644 gcc/rust/ast/rust-desugar-for-loops.h create mode 100644 gcc/testsuite/rust/compile/for-loop1.rs create mode 100644 gcc/testsuite/rust/execute/torture/for-loop1.rs create mode 100644 gcc/testsuite/rust/execute/torture/for-loop2.rs diff --git a/gcc/rust/Make-lang.in b/gcc/rust/Make-lang.in index 751ae874def..bcb0f47c417 100644 --- a/gcc/rust/Make-lang.in +++ b/gcc/rust/Make-lang.in @@ -232,6 +232,7 @@ GRS_OBJS = \ rust/rust-expand-format-args.o \ rust/rust-lang-item.o \ rust/rust-collect-lang-items.o \ + rust/rust-desugar-for-loops.o \ $(END) # removed object files from here diff --git a/gcc/rust/ast/rust-desugar-for-loops.cc b/gcc/rust/ast/rust-desugar-for-loops.cc new file mode 100644 index 00000000000..6a84b283539 --- /dev/null +++ b/gcc/rust/ast/rust-desugar-for-loops.cc @@ -0,0 +1,198 @@ +// Copyright (C) 2024 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#include "rust-desugar-for-loops.h" +#include "rust-ast-visitor.h" +#include "rust-ast.h" +#include "rust-hir-map.h" +#include "rust-path.h" +#include "rust-pattern.h" +#include "rust-stmt.h" +#include "rust-expr.h" +#include "rust-ast-builder.h" + +namespace Rust { +namespace AST { + +DesugarForLoops::DesugarForLoops () {} + +void +DesugarForLoops::go (AST::Crate &crate) +{ + DefaultASTVisitor::visit (crate); +} + +void +replace_for_loop (std::unique_ptr &for_loop, + std::unique_ptr &&expanded) +{ + for_loop = std::move (expanded); +} + +MatchArm +DesugarForLoops::DesugarCtx::make_match_arm (std::unique_ptr &&path) +{ + auto patterns = std::vector> (); + patterns.emplace_back (std::move (path)); + + return MatchArm (std::move (patterns), loc); +} + +MatchCase +DesugarForLoops::DesugarCtx::make_break_arm () +{ + auto arm = make_match_arm (std::unique_ptr (new PathInExpression ( + builder.path_in_expression (LangItem::Kind::OPTION_NONE)))); + + auto break_expr = std::unique_ptr ( + new BreakExpr (Lifetime::error (), nullptr, {}, loc)); + + return MatchCase (std::move (arm), std::move (break_expr)); +} + +MatchCase +DesugarForLoops::DesugarCtx::make_continue_arm () +{ + auto val = builder.identifier_pattern ("val"); + + auto patterns = std::vector> (); + patterns.emplace_back (std::move (val)); + + auto pattern_item = std::unique_ptr ( + new TupleStructItemsNoRange (std::move (patterns))); + auto pattern = std::unique_ptr (new TupleStructPattern ( + builder.path_in_expression (LangItem::Kind::OPTION_SOME), + std::move (pattern_item))); + + auto val_arm = make_match_arm (std::move (pattern)); + + auto next = builder.identifier ("__next"); + + auto assignment = std::unique_ptr ( + new AssignmentExpr (std::move (next), builder.identifier ("val"), {}, loc)); + + return MatchCase (std::move (val_arm), std::move (assignment)); +} + +std::unique_ptr +DesugarForLoops::DesugarCtx::statementify (std::unique_ptr &&expr) +{ + return std::unique_ptr (new ExprStmt (std::move (expr), loc, true)); +} + +std::unique_ptr +DesugarForLoops::desugar (AST::ForLoopExpr &expr) +{ + auto ctx = DesugarCtx (expr.get_locus ()); + + auto into_iter = std::make_unique ( + ctx.builder.path_in_expression (LangItem::Kind::INTOITER_INTOITER)); + auto next = std::make_unique ( + ctx.builder.path_in_expression (LangItem::Kind::ITERATOR_NEXT)); + + // IntoIterator::into_iter() + auto into_iter_call + = ctx.builder.call (std::move (into_iter), + expr.get_iterator_expr ().clone_expr ()); + + // Iterator::next(iter) + auto next_call = ctx.builder.call ( + std::move (next), ctx.builder.ref (ctx.builder.identifier ("iter"), true)); + + // None => break, + auto break_arm = ctx.make_break_arm (); + // Some(val) => { __next = val; }, + auto continue_arm = ctx.make_continue_arm (); + + // match { + // + // + // } + auto match_next + = ctx.builder.match (std::move (next_call), + {std::move (continue_arm), std::move (break_arm)}); + + // let mut __next; + auto let_next + = ctx.builder.let (ctx.builder.identifier_pattern ("__next", true)); + // let = __next; + auto let_pat = ctx.builder.let (expr.get_pattern ().clone_pattern (), nullptr, + ctx.builder.identifier ("__next")); + + auto loop_stmts = std::vector> (); + loop_stmts.emplace_back (std::move (let_next)); + loop_stmts.emplace_back (ctx.statementify (std::move (match_next))); + loop_stmts.emplace_back (std::move (let_pat)); + loop_stmts.emplace_back ( + ctx.statementify (expr.get_loop_block ().clone_expr ())); + + // loop { + // ; + // ; + // ; + // + // ; + // } + auto loop = ctx.builder.loop (std::move (loop_stmts)); + + auto mut_iter_pattern = ctx.builder.identifier_pattern ("iter", true); + auto match_iter + = ctx.builder.match (std::move (into_iter_call), + {ctx.builder.match_case (std::move (mut_iter_pattern), + std::move (loop))}); + + auto let_result = ctx.builder.let (ctx.builder.identifier_pattern ("result"), + nullptr, std::move (match_iter)); + auto result_return = ctx.builder.identifier ("result"); + + return ctx.builder.block (std::move (let_result), std::move (result_return)); +} + +void +DesugarForLoops::maybe_desugar_expr (std::unique_ptr &expr) +{ + if (expr->get_expr_kind () == AST::Expr::Kind::Loop) + { + auto &loop = static_cast (*expr); + + if (loop.get_loop_kind () == AST::BaseLoopExpr::Kind::For) + { + auto &for_loop = static_cast (loop); + + auto desugared = desugar (for_loop); + + replace_for_loop (expr, std::move (desugared)); + } + } +} + +void +DesugarForLoops::visit (AST::BlockExpr &block) +{ + for (auto &stmt : block.get_statements ()) + if (stmt->get_stmt_kind () == AST::Stmt::Kind::Expr) + maybe_desugar_expr (static_cast (*stmt).get_expr_ptr ()); + + if (block.has_tail_expr ()) + maybe_desugar_expr (block.get_tail_expr_ptr ()); + + DefaultASTVisitor::visit (block); +} + +} // namespace AST +} // namespace Rust diff --git a/gcc/rust/ast/rust-desugar-for-loops.h b/gcc/rust/ast/rust-desugar-for-loops.h new file mode 100644 index 00000000000..9f396777493 --- /dev/null +++ b/gcc/rust/ast/rust-desugar-for-loops.h @@ -0,0 +1,103 @@ +// Copyright (C) 2024 Free Software Foundation, Inc. + +// This file is part of GCC. + +// GCC is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3, or (at your option) any later +// version. + +// GCC is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. + +// You should have received a copy of the GNU General Public License +// along with GCC; see the file COPYING3. If not see +// . + +#ifndef RUST_DESUGAR_FOR_LOOPS +#define RUST_DESUGAR_FOR_LOOPS + +#include "rust-ast-builder.h" +#include "rust-ast-visitor.h" +#include "rust-expr.h" + +namespace Rust { +namespace AST { + +// Desugar for-loops into a set of other AST nodes. The desugar is of the +// following form: +// +// ``` +// for in +// ``` +// +// becomes: +// +// ``` +// { +// let result = match ::std::iter::IntoIterator::into_iter() { +// mut iter => { +// loop { +// let mut __next; +// match ::std::iter::Iterator::next(&mut iter) { +// ::std::option::Option::Some(val) => __next = val, +// ::std::option::Option::None => break +// }; +// let = __next; +// +// ; +// } +// } +// }; +// result +// } +// ``` +// +// NOTE: In a perfect world, this would be an immutable visitor which would take +// ownership of the AST node and return a new one, instead of mutating this one +// in place. Nevertheless, this isn't Rust, and doing immutable visitors in C++ +// sucks, and the world isn't perfect, so we are impure and sad. +// +// NOTE: This class could eventually be removed in favor of +// an HIR desugar. This would avoid mutating the AST and would be cleaner. +// However, it requires multiple changes in the way we do typechecking and name +// resolution, as this desugar creates new bindings. Because of this, these new +// bindings need to be inserted into the name-resolution context outside of the +// name resolution pass, which is difficult. Those bindings are needed because +// of the way the typechecker is currently structured, where it will fetch name +// resolution information in order to typecheck paths - which technically isn't +// necessary. +class DesugarForLoops : public DefaultASTVisitor +{ + using DefaultASTVisitor::visit; + +public: + DesugarForLoops (); + void go (AST::Crate &); + +private: + struct DesugarCtx + { + DesugarCtx (location_t loc) : builder (Builder (loc)), loc (loc) {} + + Builder builder; + location_t loc; + + MatchArm make_match_arm (std::unique_ptr &&pattern); + MatchCase make_break_arm (); + MatchCase make_continue_arm (); + std::unique_ptr statementify (std::unique_ptr &&expr); + }; + + std::unique_ptr desugar (AST::ForLoopExpr &expr); + void maybe_desugar_expr (std::unique_ptr &expr); + + void visit (AST::BlockExpr &) override; +}; + +} // namespace AST +} // namespace Rust + +#endif // ! RUST_DESUGAR_FOR_LOOPS diff --git a/gcc/testsuite/rust/compile/for-loop1.rs b/gcc/testsuite/rust/compile/for-loop1.rs new file mode 100644 index 00000000000..1023ecde1c3 --- /dev/null +++ b/gcc/testsuite/rust/compile/for-loop1.rs @@ -0,0 +1,543 @@ +// { dg-output "loop\r*\nloop\r*\n" } +#![feature(intrinsics)] + +pub use option::Option::{self, None, Some}; +pub use result::Result::{self, Err, Ok}; + +extern "C" { + fn printf(s: *const i8, ...); + fn puts(s: *const i8); +} + +mod option { + pub enum Option { + #[lang = "None"] + None, + #[lang = "Some"] + Some(T), + } +} + +mod result { + enum Result { + Ok(T), + Err(E), + } +} + +#[lang = "sized"] +pub trait Sized {} + +#[lang = "clone"] +pub trait Clone: Sized { + fn clone(&self) -> Self; + + fn clone_from(&mut self, source: &Self) { + *self = source.clone() + } +} + +mod impls { + use super::Clone; + + macro_rules! impl_clone { + ($($t:ty)*) => { + $( + impl Clone for $t { + fn clone(&self) -> Self { + *self + } + } + )* + } + } + + impl_clone! { + usize u8 u16 u32 u64 // u128 + isize i8 i16 i32 i64 // i128 + f32 f64 + bool char + } +} + +#[lang = "copy"] +pub trait Copy: Clone { + // Empty. +} + +mod copy_impls { + use super::Copy; + + macro_rules! impl_copy { + ($($t:ty)*) => { + $( + impl Copy for $t {} + )* + } + } + + impl_copy! { + usize u8 u16 u32 u64 // u128 + isize i8 i16 i32 i64 // i128 + f32 f64 + bool char + } +} + +mod intrinsics { + extern "rust-intrinsic" { + pub fn add_with_overflow(x: T, y: T) -> (T, bool); + pub fn wrapping_add(a: T, b: T) -> T; + pub fn wrapping_sub(a: T, b: T) -> T; + pub fn rotate_left(a: T, b: T) -> T; + pub fn rotate_right(a: T, b: T) -> T; + pub fn offset(ptr: *const T, count: isize) -> *const T; + pub fn copy_nonoverlapping(src: *const T, dst: *mut T, count: usize); + pub fn move_val_init(dst: *mut T, src: T); + pub fn uninit() -> T; + } +} + +mod ptr { + #[lang = "const_ptr"] + impl *const T { + pub unsafe fn offset(self, count: isize) -> *const T { + intrinsics::offset(self, count) + } + } + + #[lang = "mut_ptr"] + impl *mut T { + pub unsafe fn offset(self, count: isize) -> *mut T { + intrinsics::offset(self, count) as *mut T + } + } + + pub unsafe fn swap_nonoverlapping(x: *mut T, y: *mut T, count: usize) { + let x = x as *mut u8; + let y = y as *mut u8; + let len = mem::size_of::() * count; + swap_nonoverlapping_bytes(x, y, len) + } + + pub unsafe fn swap_nonoverlapping_one(x: *mut T, y: *mut T) { + // For types smaller than the block optimization below, + // just swap directly to avoid pessimizing codegen. + if mem::size_of::() < 32 { + let z = read(x); + intrinsics::copy_nonoverlapping(y, x, 1); + write(y, z); + } else { + swap_nonoverlapping(x, y, 1); + } + } + + pub unsafe fn write(dst: *mut T, src: T) { + intrinsics::move_val_init(&mut *dst, src) + } + + pub unsafe fn read(src: *const T) -> T { + let mut tmp: T = mem::uninitialized(); + intrinsics::copy_nonoverlapping(src, &mut tmp, 1); + tmp + } + + pub unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { + struct Block(u64, u64, u64, u64); + struct UnalignedBlock(u64, u64, u64, u64); + + let block_size = mem::size_of::(); + + // Loop through x & y, copying them `Block` at a time + // The optimizer should unroll the loop fully for most types + // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively + let mut i: usize = 0; + while i + block_size <= len { + // Create some uninitialized memory as scratch space + // Declaring `t` here avoids aligning the stack when this loop is unused + let mut t: Block = mem::uninitialized(); + let t = &mut t as *mut _ as *mut u8; + let x = x.offset(i as isize); + let y = y.offset(i as isize); + + // Swap a block of bytes of x & y, using t as a temporary buffer + // This should be optimized into efficient SIMD operations where available + intrinsics::copy_nonoverlapping(x, t, block_size); + intrinsics::copy_nonoverlapping(y, x, block_size); + intrinsics::copy_nonoverlapping(t, y, block_size); + i += block_size; + } + + if i < len { + // Swap any remaining bytes + let mut t: UnalignedBlock = mem::uninitialized(); + let rem = len - i; + + let t = &mut t as *mut _ as *mut u8; + let x = x.offset(i as isize); + let y = y.offset(i as isize); + + intrinsics::copy_nonoverlapping(x, t, rem); + intrinsics::copy_nonoverlapping(y, x, rem); + intrinsics::copy_nonoverlapping(t, y, rem); + } + } +} + +mod mem { + extern "rust-intrinsic" { + #[rustc_const_stable(feature = "const_transmute", since = "1.46.0")] + pub fn transmute(_: T) -> U; + #[rustc_const_stable(feature = "const_size_of", since = "1.40.0")] + pub fn size_of() -> usize; + } + + pub fn swap(x: &mut T, y: &mut T) { + unsafe { + ptr::swap_nonoverlapping_one(x, y); + } + } + + pub fn replace(dest: &mut T, mut src: T) -> T { + swap(dest, &mut src); + src + } + + pub unsafe fn uninitialized() -> T { + intrinsics::uninit() + } +} + +macro_rules! impl_uint { + ($($ty:ident = $lang:literal),*) => { + $( + impl $ty { + pub fn wrapping_add(self, rhs: Self) -> Self { + unsafe { + intrinsics::wrapping_add(self, rhs) + } + } + + pub fn wrapping_sub(self, rhs: Self) -> Self { + unsafe { + intrinsics::wrapping_sub(self, rhs) + } + } + + pub fn rotate_left(self, n: u32) -> Self { + unsafe { + intrinsics::rotate_left(self, n as Self) + } + } + + pub fn rotate_right(self, n: u32) -> Self { + unsafe { + intrinsics::rotate_right(self, n as Self) + } + } + + pub fn to_le(self) -> Self { + #[cfg(target_endian = "little")] + { + self + } + } + + pub const fn from_le_bytes(bytes: [u8; mem::size_of::()]) -> Self { + Self::from_le(Self::from_ne_bytes(bytes)) + } + + pub const fn from_le(x: Self) -> Self { + #[cfg(target_endian = "little")] + { + x + } + } + + pub const fn from_ne_bytes(bytes: [u8; mem::size_of::()]) -> Self { + unsafe { mem::transmute(bytes) } + } + + pub fn checked_add(self, rhs: Self) -> Option { + let (a, b) = self.overflowing_add(rhs); + if b { + Option::None + } else { + Option::Some(a) + } + } + + pub fn overflowing_add(self, rhs: Self) -> (Self, bool) { + let (a, b) = unsafe { intrinsics::add_with_overflow(self as $ty, rhs as $ty) }; + (a as Self, b) + } + } + )* + } +} + +impl_uint!( + u8 = "u8", + u16 = "u16", + u32 = "u32", + u64 = "u64", + usize = "usize" +); + +#[lang = "add"] +pub trait Add { + type Output; + + fn add(self, rhs: RHS) -> Self::Output; +} +macro_rules! add_impl { + ($($t:ty)*) => ($( + impl Add for $t { + type Output = $t; + + fn add(self, other: $t) -> $t { self + other } + } + )*) +} + +add_impl! { usize u8 u16 u32 u64 /*isize i8 i16 i32 i64*/ f32 f64 } + +#[lang = "sub"] +pub trait Sub { + type Output; + + fn sub(self, rhs: RHS) -> Self::Output; +} +macro_rules! sub_impl { + ($($t:ty)*) => ($( + impl Sub for $t { + type Output = $t; + + fn sub(self, other: $t) -> $t { self - other } + } + )*) +} + +sub_impl! { usize u8 u16 u32 u64 /*isize i8 i16 i32 i64*/ f32 f64 } + +#[lang = "Range"] +pub struct Range { + pub start: Idx, + pub end: Idx, +} + +pub trait TryFrom: Sized { + /// The type returned in the event of a conversion error. + type Error; + + /// Performs the conversion. + fn try_from(value: T) -> Result; +} + +pub trait From: Sized { + fn from(_: T) -> Self; +} + +impl From for T { + fn from(t: T) -> T { + t + } +} + +impl TryFrom for T +where + T: From, +{ + type Error = !; + + fn try_from(value: U) -> Result { + Ok(T::from(value)) + } +} + +trait Step { + /// Returns the number of steps between two step objects. The count is + /// inclusive of `start` and exclusive of `end`. + /// + /// Returns `None` if it is not possible to calculate `steps_between` + /// without overflow. + fn steps_between(start: &Self, end: &Self) -> Option; + + /// Replaces this step with `1`, returning itself + fn replace_one(&mut self) -> Self; + + /// Replaces this step with `0`, returning itself + fn replace_zero(&mut self) -> Self; + + /// Adds one to this step, returning the result + fn add_one(&self) -> Self; + + /// Subtracts one to this step, returning the result + fn sub_one(&self) -> Self; + + /// Add an usize, returning None on overflow + fn add_usize(&self, n: usize) -> Option; +} + +// These are still macro-generated because the integer literals resolve to different types. +macro_rules! step_identical_methods { + () => { + #[inline] + fn replace_one(&mut self) -> Self { + mem::replace(self, 1) + } + + #[inline] + fn replace_zero(&mut self) -> Self { + mem::replace(self, 0) + } + + #[inline] + fn add_one(&self) -> Self { + Add::add(*self, 1) + } + + #[inline] + fn sub_one(&self) -> Self { + Sub::sub(*self, 1) + } + }; +} + +macro_rules! step_impl_unsigned { + ($($t:ty)*) => ($( + impl Step for $t { + fn steps_between(start: &$t, end: &$t) -> Option { + if *start < *end { + // Note: We assume $t <= usize here + Option::Some((*end - *start) as usize) + } else { + Option::Some(0) + } + } + + fn add_usize(&self, n: usize) -> Option { + match <$t>::try_from(n) { + Result::Ok(n_as_t) => self.checked_add(n_as_t), + Result::Err(_) => Option::None, + } + } + + step_identical_methods!(); + } + )*) +} +macro_rules! step_impl_signed { + ($( [$t:ty : $unsigned:ty] )*) => ($( + impl Step for $t { + #[inline] + #[allow(trivial_numeric_casts)] + fn steps_between(start: &$t, end: &$t) -> Option { + if *start < *end { + // Note: We assume $t <= isize here + // Use .wrapping_sub and cast to usize to compute the + // difference that may not fit inside the range of isize. + Option::Some((*end as isize).wrapping_sub(*start as isize) as usize) + } else { + Option::Some(0) + } + } + + #[inline] + #[allow(unreachable_patterns)] + fn add_usize(&self, n: usize) -> Option { + match <$unsigned>::try_from(n) { + Result::Ok(n_as_unsigned) => { + // Wrapping in unsigned space handles cases like + // `-120_i8.add_usize(200) == Option::Some(80_i8)`, + // even though 200_usize is out of range for i8. + let wrapped = (*self as $unsigned).wrapping_add(n_as_unsigned) as $t; + if wrapped >= *self { + Option::Some(wrapped) + } else { + Option::None // Addition overflowed + } + } + Result::Err(_) => Option::None, + } + } + + step_identical_methods!(); + } + )*) +} + +macro_rules! step_impl_no_between { + ($($t:ty)*) => ($( + impl Step for $t { + #[inline] + fn steps_between(_start: &Self, _end: &Self) -> Option { + Option::None + } + + #[inline] + fn add_usize(&self, n: usize) -> Option { + self.checked_add(n as $t) + } + + step_identical_methods!(); + } + )*) +} + +step_impl_unsigned!(usize); + +pub trait Iterator { + type Item; + + #[lang = "next"] + fn next(&mut self) -> Option; +} + +impl Iterator for Range { + type Item = A; + + fn next(&mut self) -> Option { + if self.start < self.end { + // We check for overflow here, even though it can't actually + // happen. Adding this check does however help llvm vectorize loops + // for some ranges that don't get vectorized otherwise, + // and this won't actually result in an extra check in an optimized build. + match self.start.add_usize(1) { + Option::Some(mut n) => { + mem::swap(&mut n, &mut self.start); + Option::Some(n) + } + Option::None => Option::None, + } + } else { + Option::None + } + } +} + +pub trait IntoIterator { + type Item; + + type IntoIter: Iterator; + + #[lang = "into_iter"] + fn into_iter(self) -> Self::IntoIter; +} + +impl IntoIterator for I { + type Item = I::Item; + type IntoIter = I; + + fn into_iter(self) -> I { + self + } +} + +pub fn main() { + let a = 1usize..3usize; + + for i in a { // { dg-warning "unused name" } + unsafe { puts("loop\0" as *const str as *const i8); } + } +} diff --git a/gcc/testsuite/rust/compile/nr2/exclude b/gcc/testsuite/rust/compile/nr2/exclude index c5c7326500d..878d1e5b0aa 100644 --- a/gcc/testsuite/rust/compile/nr2/exclude +++ b/gcc/testsuite/rust/compile/nr2/exclude @@ -150,4 +150,5 @@ issue-2953-1.rs issue-3030.rs traits12.rs try-trait.rs +for-loop1.rs # please don't delete the trailing newline diff --git a/gcc/testsuite/rust/execute/torture/for-loop1.rs b/gcc/testsuite/rust/execute/torture/for-loop1.rs new file mode 100644 index 00000000000..5a6a70c37d6 --- /dev/null +++ b/gcc/testsuite/rust/execute/torture/for-loop1.rs @@ -0,0 +1,545 @@ +// { dg-output "loop\r*\nloop\r*\n" } +#![feature(intrinsics)] + +pub use option::Option::{self, None, Some}; +pub use result::Result::{self, Err, Ok}; + +extern "C" { + fn printf(s: *const i8, ...); + fn puts(s: *const i8); +} + +mod option { + pub enum Option { + #[lang = "None"] + None, + #[lang = "Some"] + Some(T), + } +} + +mod result { + enum Result { + Ok(T), + Err(E), + } +} + +#[lang = "sized"] +pub trait Sized {} + +#[lang = "clone"] +pub trait Clone: Sized { + fn clone(&self) -> Self; + + fn clone_from(&mut self, source: &Self) { + *self = source.clone() + } +} + +mod impls { + use super::Clone; + + macro_rules! impl_clone { + ($($t:ty)*) => { + $( + impl Clone for $t { + fn clone(&self) -> Self { + *self + } + } + )* + } + } + + impl_clone! { + usize u8 u16 u32 u64 // u128 + isize i8 i16 i32 i64 // i128 + f32 f64 + bool char + } +} + +#[lang = "copy"] +pub trait Copy: Clone { + // Empty. +} + +mod copy_impls { + use super::Copy; + + macro_rules! impl_copy { + ($($t:ty)*) => { + $( + impl Copy for $t {} + )* + } + } + + impl_copy! { + usize u8 u16 u32 u64 // u128 + isize i8 i16 i32 i64 // i128 + f32 f64 + bool char + } +} + +mod intrinsics { + extern "rust-intrinsic" { + pub fn add_with_overflow(x: T, y: T) -> (T, bool); + pub fn wrapping_add(a: T, b: T) -> T; + pub fn wrapping_sub(a: T, b: T) -> T; + pub fn rotate_left(a: T, b: T) -> T; + pub fn rotate_right(a: T, b: T) -> T; + pub fn offset(ptr: *const T, count: isize) -> *const T; + pub fn copy_nonoverlapping(src: *const T, dst: *mut T, count: usize); + pub fn move_val_init(dst: *mut T, src: T); + pub fn uninit() -> T; + } +} + +mod ptr { + #[lang = "const_ptr"] + impl *const T { + pub unsafe fn offset(self, count: isize) -> *const T { + intrinsics::offset(self, count) + } + } + + #[lang = "mut_ptr"] + impl *mut T { + pub unsafe fn offset(self, count: isize) -> *mut T { + intrinsics::offset(self, count) as *mut T + } + } + + pub unsafe fn swap_nonoverlapping(x: *mut T, y: *mut T, count: usize) { + let x = x as *mut u8; + let y = y as *mut u8; + let len = mem::size_of::() * count; + swap_nonoverlapping_bytes(x, y, len) + } + + pub unsafe fn swap_nonoverlapping_one(x: *mut T, y: *mut T) { + // For types smaller than the block optimization below, + // just swap directly to avoid pessimizing codegen. + if mem::size_of::() < 32 { + let z = read(x); + intrinsics::copy_nonoverlapping(y, x, 1); + write(y, z); + } else { + swap_nonoverlapping(x, y, 1); + } + } + + pub unsafe fn write(dst: *mut T, src: T) { + intrinsics::move_val_init(&mut *dst, src) + } + + pub unsafe fn read(src: *const T) -> T { + let mut tmp: T = mem::uninitialized(); + intrinsics::copy_nonoverlapping(src, &mut tmp, 1); + tmp + } + + pub unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { + struct Block(u64, u64, u64, u64); + struct UnalignedBlock(u64, u64, u64, u64); + + let block_size = mem::size_of::(); + + // Loop through x & y, copying them `Block` at a time + // The optimizer should unroll the loop fully for most types + // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively + let mut i: usize = 0; + while i + block_size <= len { + // Create some uninitialized memory as scratch space + // Declaring `t` here avoids aligning the stack when this loop is unused + let mut t: Block = mem::uninitialized(); + let t = &mut t as *mut _ as *mut u8; + let x = x.offset(i as isize); + let y = y.offset(i as isize); + + // Swap a block of bytes of x & y, using t as a temporary buffer + // This should be optimized into efficient SIMD operations where available + intrinsics::copy_nonoverlapping(x, t, block_size); + intrinsics::copy_nonoverlapping(y, x, block_size); + intrinsics::copy_nonoverlapping(t, y, block_size); + i += block_size; + } + + if i < len { + // Swap any remaining bytes + let mut t: UnalignedBlock = mem::uninitialized(); + let rem = len - i; + + let t = &mut t as *mut _ as *mut u8; + let x = x.offset(i as isize); + let y = y.offset(i as isize); + + intrinsics::copy_nonoverlapping(x, t, rem); + intrinsics::copy_nonoverlapping(y, x, rem); + intrinsics::copy_nonoverlapping(t, y, rem); + } + } +} + +mod mem { + extern "rust-intrinsic" { + #[rustc_const_stable(feature = "const_transmute", since = "1.46.0")] + pub fn transmute(_: T) -> U; + #[rustc_const_stable(feature = "const_size_of", since = "1.40.0")] + pub fn size_of() -> usize; + } + + pub fn swap(x: &mut T, y: &mut T) { + unsafe { + ptr::swap_nonoverlapping_one(x, y); + } + } + + pub fn replace(dest: &mut T, mut src: T) -> T { + swap(dest, &mut src); + src + } + + pub unsafe fn uninitialized() -> T { + intrinsics::uninit() + } +} + +macro_rules! impl_uint { + ($($ty:ident = $lang:literal),*) => { + $( + impl $ty { + pub fn wrapping_add(self, rhs: Self) -> Self { + unsafe { + intrinsics::wrapping_add(self, rhs) + } + } + + pub fn wrapping_sub(self, rhs: Self) -> Self { + unsafe { + intrinsics::wrapping_sub(self, rhs) + } + } + + pub fn rotate_left(self, n: u32) -> Self { + unsafe { + intrinsics::rotate_left(self, n as Self) + } + } + + pub fn rotate_right(self, n: u32) -> Self { + unsafe { + intrinsics::rotate_right(self, n as Self) + } + } + + pub fn to_le(self) -> Self { + #[cfg(target_endian = "little")] + { + self + } + } + + pub const fn from_le_bytes(bytes: [u8; mem::size_of::()]) -> Self { + Self::from_le(Self::from_ne_bytes(bytes)) + } + + pub const fn from_le(x: Self) -> Self { + #[cfg(target_endian = "little")] + { + x + } + } + + pub const fn from_ne_bytes(bytes: [u8; mem::size_of::()]) -> Self { + unsafe { mem::transmute(bytes) } + } + + pub fn checked_add(self, rhs: Self) -> Option { + let (a, b) = self.overflowing_add(rhs); + if b { + Option::None + } else { + Option::Some(a) + } + } + + pub fn overflowing_add(self, rhs: Self) -> (Self, bool) { + let (a, b) = unsafe { intrinsics::add_with_overflow(self as $ty, rhs as $ty) }; + (a as Self, b) + } + } + )* + } +} + +impl_uint!( + u8 = "u8", + u16 = "u16", + u32 = "u32", + u64 = "u64", + usize = "usize" +); + +#[lang = "add"] +pub trait Add { + type Output; + + fn add(self, rhs: RHS) -> Self::Output; +} +macro_rules! add_impl { + ($($t:ty)*) => ($( + impl Add for $t { + type Output = $t; + + fn add(self, other: $t) -> $t { self + other } + } + )*) +} + +add_impl! { usize u8 u16 u32 u64 /*isize i8 i16 i32 i64*/ f32 f64 } + +#[lang = "sub"] +pub trait Sub { + type Output; + + fn sub(self, rhs: RHS) -> Self::Output; +} +macro_rules! sub_impl { + ($($t:ty)*) => ($( + impl Sub for $t { + type Output = $t; + + fn sub(self, other: $t) -> $t { self - other } + } + )*) +} + +sub_impl! { usize u8 u16 u32 u64 /*isize i8 i16 i32 i64*/ f32 f64 } + +#[lang = "Range"] +pub struct Range { + pub start: Idx, + pub end: Idx, +} + +pub trait TryFrom: Sized { + /// The type returned in the event of a conversion error. + type Error; + + /// Performs the conversion. + fn try_from(value: T) -> Result; +} + +pub trait From: Sized { + fn from(_: T) -> Self; +} + +impl From for T { + fn from(t: T) -> T { + t + } +} + +impl TryFrom for T +where + T: From, +{ + type Error = !; + + fn try_from(value: U) -> Result { + Ok(T::from(value)) + } +} + +trait Step { + /// Returns the number of steps between two step objects. The count is + /// inclusive of `start` and exclusive of `end`. + /// + /// Returns `None` if it is not possible to calculate `steps_between` + /// without overflow. + fn steps_between(start: &Self, end: &Self) -> Option; + + /// Replaces this step with `1`, returning itself + fn replace_one(&mut self) -> Self; + + /// Replaces this step with `0`, returning itself + fn replace_zero(&mut self) -> Self; + + /// Adds one to this step, returning the result + fn add_one(&self) -> Self; + + /// Subtracts one to this step, returning the result + fn sub_one(&self) -> Self; + + /// Add an usize, returning None on overflow + fn add_usize(&self, n: usize) -> Option; +} + +// These are still macro-generated because the integer literals resolve to different types. +macro_rules! step_identical_methods { + () => { + #[inline] + fn replace_one(&mut self) -> Self { + mem::replace(self, 1) + } + + #[inline] + fn replace_zero(&mut self) -> Self { + mem::replace(self, 0) + } + + #[inline] + fn add_one(&self) -> Self { + Add::add(*self, 1) + } + + #[inline] + fn sub_one(&self) -> Self { + Sub::sub(*self, 1) + } + }; +} + +macro_rules! step_impl_unsigned { + ($($t:ty)*) => ($( + impl Step for $t { + fn steps_between(start: &$t, end: &$t) -> Option { + if *start < *end { + // Note: We assume $t <= usize here + Option::Some((*end - *start) as usize) + } else { + Option::Some(0) + } + } + + fn add_usize(&self, n: usize) -> Option { + match <$t>::try_from(n) { + Result::Ok(n_as_t) => self.checked_add(n_as_t), + Result::Err(_) => Option::None, + } + } + + step_identical_methods!(); + } + )*) +} +macro_rules! step_impl_signed { + ($( [$t:ty : $unsigned:ty] )*) => ($( + impl Step for $t { + #[inline] + #[allow(trivial_numeric_casts)] + fn steps_between(start: &$t, end: &$t) -> Option { + if *start < *end { + // Note: We assume $t <= isize here + // Use .wrapping_sub and cast to usize to compute the + // difference that may not fit inside the range of isize. + Option::Some((*end as isize).wrapping_sub(*start as isize) as usize) + } else { + Option::Some(0) + } + } + + #[inline] + #[allow(unreachable_patterns)] + fn add_usize(&self, n: usize) -> Option { + match <$unsigned>::try_from(n) { + Result::Ok(n_as_unsigned) => { + // Wrapping in unsigned space handles cases like + // `-120_i8.add_usize(200) == Option::Some(80_i8)`, + // even though 200_usize is out of range for i8. + let wrapped = (*self as $unsigned).wrapping_add(n_as_unsigned) as $t; + if wrapped >= *self { + Option::Some(wrapped) + } else { + Option::None // Addition overflowed + } + } + Result::Err(_) => Option::None, + } + } + + step_identical_methods!(); + } + )*) +} + +macro_rules! step_impl_no_between { + ($($t:ty)*) => ($( + impl Step for $t { + #[inline] + fn steps_between(_start: &Self, _end: &Self) -> Option { + Option::None + } + + #[inline] + fn add_usize(&self, n: usize) -> Option { + self.checked_add(n as $t) + } + + step_identical_methods!(); + } + )*) +} + +step_impl_unsigned!(usize); + +pub trait Iterator { + type Item; + + #[lang = "next"] + fn next(&mut self) -> Option; +} + +impl Iterator for Range { + type Item = A; + + fn next(&mut self) -> Option { + if self.start < self.end { + // We check for overflow here, even though it can't actually + // happen. Adding this check does however help llvm vectorize loops + // for some ranges that don't get vectorized otherwise, + // and this won't actually result in an extra check in an optimized build. + match self.start.add_usize(1) { + Option::Some(mut n) => { + mem::swap(&mut n, &mut self.start); + Option::Some(n) + } + Option::None => Option::None, + } + } else { + Option::None + } + } +} + +pub trait IntoIterator { + type Item; + + type IntoIter: Iterator; + + #[lang = "into_iter"] + fn into_iter(self) -> Self::IntoIter; +} + +impl IntoIterator for I { + type Item = I::Item; + type IntoIter = I; + + fn into_iter(self) -> I { + self + } +} + +pub fn main() -> i32 { + let a = 1usize..3usize; + + for i in a { // { dg-warning "unused name" } + unsafe { puts("loop\0" as *const str as *const i8); } + } + + 0 +} diff --git a/gcc/testsuite/rust/execute/torture/for-loop2.rs b/gcc/testsuite/rust/execute/torture/for-loop2.rs new file mode 100644 index 00000000000..5ba2cd1351a --- /dev/null +++ b/gcc/testsuite/rust/execute/torture/for-loop2.rs @@ -0,0 +1,544 @@ +// { dg-output "loop1\r*\nloop2\r*\n" } +#![feature(intrinsics)] + +pub use option::Option::{self, None, Some}; +pub use result::Result::{self, Err, Ok}; + +extern "C" { + fn printf(s: *const i8, ...); +} + +mod option { + pub enum Option { + #[lang = "None"] + None, + #[lang = "Some"] + Some(T), + } +} + +mod result { + enum Result { + Ok(T), + Err(E), + } +} + +#[lang = "sized"] +pub trait Sized {} + +#[lang = "clone"] +pub trait Clone: Sized { + fn clone(&self) -> Self; + + fn clone_from(&mut self, source: &Self) { + *self = source.clone() + } +} + +mod impls { + use super::Clone; + + macro_rules! impl_clone { + ($($t:ty)*) => { + $( + impl Clone for $t { + fn clone(&self) -> Self { + *self + } + } + )* + } + } + + impl_clone! { + usize u8 u16 u32 u64 // u128 + isize i8 i16 i32 i64 // i128 + f32 f64 + bool char + } +} + +#[lang = "copy"] +pub trait Copy: Clone { + // Empty. +} + +mod copy_impls { + use super::Copy; + + macro_rules! impl_copy { + ($($t:ty)*) => { + $( + impl Copy for $t {} + )* + } + } + + impl_copy! { + usize u8 u16 u32 u64 // u128 + isize i8 i16 i32 i64 // i128 + f32 f64 + bool char + } +} + +mod intrinsics { + extern "rust-intrinsic" { + pub fn add_with_overflow(x: T, y: T) -> (T, bool); + pub fn wrapping_add(a: T, b: T) -> T; + pub fn wrapping_sub(a: T, b: T) -> T; + pub fn rotate_left(a: T, b: T) -> T; + pub fn rotate_right(a: T, b: T) -> T; + pub fn offset(ptr: *const T, count: isize) -> *const T; + pub fn copy_nonoverlapping(src: *const T, dst: *mut T, count: usize); + pub fn move_val_init(dst: *mut T, src: T); + pub fn uninit() -> T; + } +} + +mod ptr { + #[lang = "const_ptr"] + impl *const T { + pub unsafe fn offset(self, count: isize) -> *const T { + intrinsics::offset(self, count) + } + } + + #[lang = "mut_ptr"] + impl *mut T { + pub unsafe fn offset(self, count: isize) -> *mut T { + intrinsics::offset(self, count) as *mut T + } + } + + pub unsafe fn swap_nonoverlapping(x: *mut T, y: *mut T, count: usize) { + let x = x as *mut u8; + let y = y as *mut u8; + let len = mem::size_of::() * count; + swap_nonoverlapping_bytes(x, y, len) + } + + pub unsafe fn swap_nonoverlapping_one(x: *mut T, y: *mut T) { + // For types smaller than the block optimization below, + // just swap directly to avoid pessimizing codegen. + if mem::size_of::() < 32 { + let z = read(x); + intrinsics::copy_nonoverlapping(y, x, 1); + write(y, z); + } else { + swap_nonoverlapping(x, y, 1); + } + } + + pub unsafe fn write(dst: *mut T, src: T) { + intrinsics::move_val_init(&mut *dst, src) + } + + pub unsafe fn read(src: *const T) -> T { + let mut tmp: T = mem::uninitialized(); + intrinsics::copy_nonoverlapping(src, &mut tmp, 1); + tmp + } + + pub unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { + struct Block(u64, u64, u64, u64); + struct UnalignedBlock(u64, u64, u64, u64); + + let block_size = mem::size_of::(); + + // Loop through x & y, copying them `Block` at a time + // The optimizer should unroll the loop fully for most types + // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively + let mut i: usize = 0; + while i + block_size <= len { + // Create some uninitialized memory as scratch space + // Declaring `t` here avoids aligning the stack when this loop is unused + let mut t: Block = mem::uninitialized(); + let t = &mut t as *mut _ as *mut u8; + let x = x.offset(i as isize); + let y = y.offset(i as isize); + + // Swap a block of bytes of x & y, using t as a temporary buffer + // This should be optimized into efficient SIMD operations where available + intrinsics::copy_nonoverlapping(x, t, block_size); + intrinsics::copy_nonoverlapping(y, x, block_size); + intrinsics::copy_nonoverlapping(t, y, block_size); + i += block_size; + } + + if i < len { + // Swap any remaining bytes + let mut t: UnalignedBlock = mem::uninitialized(); + let rem = len - i; + + let t = &mut t as *mut _ as *mut u8; + let x = x.offset(i as isize); + let y = y.offset(i as isize); + + intrinsics::copy_nonoverlapping(x, t, rem); + intrinsics::copy_nonoverlapping(y, x, rem); + intrinsics::copy_nonoverlapping(t, y, rem); + } + } +} + +mod mem { + extern "rust-intrinsic" { + #[rustc_const_stable(feature = "const_transmute", since = "1.46.0")] + pub fn transmute(_: T) -> U; + #[rustc_const_stable(feature = "const_size_of", since = "1.40.0")] + pub fn size_of() -> usize; + } + + pub fn swap(x: &mut T, y: &mut T) { + unsafe { + ptr::swap_nonoverlapping_one(x, y); + } + } + + pub fn replace(dest: &mut T, mut src: T) -> T { + swap(dest, &mut src); + src + } + + pub unsafe fn uninitialized() -> T { + intrinsics::uninit() + } +} + +macro_rules! impl_uint { + ($($ty:ident = $lang:literal),*) => { + $( + impl $ty { + pub fn wrapping_add(self, rhs: Self) -> Self { + unsafe { + intrinsics::wrapping_add(self, rhs) + } + } + + pub fn wrapping_sub(self, rhs: Self) -> Self { + unsafe { + intrinsics::wrapping_sub(self, rhs) + } + } + + pub fn rotate_left(self, n: u32) -> Self { + unsafe { + intrinsics::rotate_left(self, n as Self) + } + } + + pub fn rotate_right(self, n: u32) -> Self { + unsafe { + intrinsics::rotate_right(self, n as Self) + } + } + + pub fn to_le(self) -> Self { + #[cfg(target_endian = "little")] + { + self + } + } + + pub const fn from_le_bytes(bytes: [u8; mem::size_of::()]) -> Self { + Self::from_le(Self::from_ne_bytes(bytes)) + } + + pub const fn from_le(x: Self) -> Self { + #[cfg(target_endian = "little")] + { + x + } + } + + pub const fn from_ne_bytes(bytes: [u8; mem::size_of::()]) -> Self { + unsafe { mem::transmute(bytes) } + } + + pub fn checked_add(self, rhs: Self) -> Option { + let (a, b) = self.overflowing_add(rhs); + if b { + Option::None + } else { + Option::Some(a) + } + } + + pub fn overflowing_add(self, rhs: Self) -> (Self, bool) { + let (a, b) = unsafe { intrinsics::add_with_overflow(self as $ty, rhs as $ty) }; + (a as Self, b) + } + } + )* + } +} + +impl_uint!( + u8 = "u8", + u16 = "u16", + u32 = "u32", + u64 = "u64", + usize = "usize" +); + +#[lang = "add"] +pub trait Add { + type Output; + + fn add(self, rhs: RHS) -> Self::Output; +} +macro_rules! add_impl { + ($($t:ty)*) => ($( + impl Add for $t { + type Output = $t; + + fn add(self, other: $t) -> $t { self + other } + } + )*) +} + +add_impl! { usize u8 u16 u32 u64 /*isize i8 i16 i32 i64*/ f32 f64 } + +#[lang = "sub"] +pub trait Sub { + type Output; + + fn sub(self, rhs: RHS) -> Self::Output; +} +macro_rules! sub_impl { + ($($t:ty)*) => ($( + impl Sub for $t { + type Output = $t; + + fn sub(self, other: $t) -> $t { self - other } + } + )*) +} + +sub_impl! { usize u8 u16 u32 u64 /*isize i8 i16 i32 i64*/ f32 f64 } + +#[lang = "Range"] +pub struct Range { + pub start: Idx, + pub end: Idx, +} + +pub trait TryFrom: Sized { + /// The type returned in the event of a conversion error. + type Error; + + /// Performs the conversion. + fn try_from(value: T) -> Result; +} + +pub trait From: Sized { + fn from(_: T) -> Self; +} + +impl From for T { + fn from(t: T) -> T { + t + } +} + +impl TryFrom for T +where + T: From, +{ + type Error = !; + + fn try_from(value: U) -> Result { + Ok(T::from(value)) + } +} + +trait Step { + /// Returns the number of steps between two step objects. The count is + /// inclusive of `start` and exclusive of `end`. + /// + /// Returns `None` if it is not possible to calculate `steps_between` + /// without overflow. + fn steps_between(start: &Self, end: &Self) -> Option; + + /// Replaces this step with `1`, returning itself + fn replace_one(&mut self) -> Self; + + /// Replaces this step with `0`, returning itself + fn replace_zero(&mut self) -> Self; + + /// Adds one to this step, returning the result + fn add_one(&self) -> Self; + + /// Subtracts one to this step, returning the result + fn sub_one(&self) -> Self; + + /// Add an usize, returning None on overflow + fn add_usize(&self, n: usize) -> Option; +} + +// These are still macro-generated because the integer literals resolve to different types. +macro_rules! step_identical_methods { + () => { + #[inline] + fn replace_one(&mut self) -> Self { + mem::replace(self, 1) + } + + #[inline] + fn replace_zero(&mut self) -> Self { + mem::replace(self, 0) + } + + #[inline] + fn add_one(&self) -> Self { + Add::add(*self, 1) + } + + #[inline] + fn sub_one(&self) -> Self { + Sub::sub(*self, 1) + } + }; +} + +macro_rules! step_impl_unsigned { + ($($t:ty)*) => ($( + impl Step for $t { + fn steps_between(start: &$t, end: &$t) -> Option { + if *start < *end { + // Note: We assume $t <= usize here + Option::Some((*end - *start) as usize) + } else { + Option::Some(0) + } + } + + fn add_usize(&self, n: usize) -> Option { + match <$t>::try_from(n) { + Result::Ok(n_as_t) => self.checked_add(n_as_t), + Result::Err(_) => Option::None, + } + } + + step_identical_methods!(); + } + )*) +} +macro_rules! step_impl_signed { + ($( [$t:ty : $unsigned:ty] )*) => ($( + impl Step for $t { + #[inline] + #[allow(trivial_numeric_casts)] + fn steps_between(start: &$t, end: &$t) -> Option { + if *start < *end { + // Note: We assume $t <= isize here + // Use .wrapping_sub and cast to usize to compute the + // difference that may not fit inside the range of isize. + Option::Some((*end as isize).wrapping_sub(*start as isize) as usize) + } else { + Option::Some(0) + } + } + + #[inline] + #[allow(unreachable_patterns)] + fn add_usize(&self, n: usize) -> Option { + match <$unsigned>::try_from(n) { + Result::Ok(n_as_unsigned) => { + // Wrapping in unsigned space handles cases like + // `-120_i8.add_usize(200) == Option::Some(80_i8)`, + // even though 200_usize is out of range for i8. + let wrapped = (*self as $unsigned).wrapping_add(n_as_unsigned) as $t; + if wrapped >= *self { + Option::Some(wrapped) + } else { + Option::None // Addition overflowed + } + } + Result::Err(_) => Option::None, + } + } + + step_identical_methods!(); + } + )*) +} + +macro_rules! step_impl_no_between { + ($($t:ty)*) => ($( + impl Step for $t { + #[inline] + fn steps_between(_start: &Self, _end: &Self) -> Option { + Option::None + } + + #[inline] + fn add_usize(&self, n: usize) -> Option { + self.checked_add(n as $t) + } + + step_identical_methods!(); + } + )*) +} + +step_impl_unsigned!(usize); + +pub trait Iterator { + type Item; + + #[lang = "next"] + fn next(&mut self) -> Option; +} + +impl Iterator for Range { + type Item = A; + + fn next(&mut self) -> Option { + if self.start < self.end { + // We check for overflow here, even though it can't actually + // happen. Adding this check does however help llvm vectorize loops + // for some ranges that don't get vectorized otherwise, + // and this won't actually result in an extra check in an optimized build. + match self.start.add_usize(1) { + Option::Some(mut n) => { + mem::swap(&mut n, &mut self.start); + Option::Some(n) + } + Option::None => Option::None, + } + } else { + Option::None + } + } +} + +pub trait IntoIterator { + type Item; + + type IntoIter: Iterator; + + #[lang = "into_iter"] + fn into_iter(self) -> Self::IntoIter; +} + +impl IntoIterator for I { + type Item = I::Item; + type IntoIter = I; + + fn into_iter(self) -> I { + self + } +} + +pub fn main() -> i32 { + let a = 1usize..3usize; + + for i in a { + unsafe { printf("loop%d\n\0" as *const str as *const i8, i); } + } + + 0 +}