From 618aeec51f61fb6d80cd4fde9e5fc3b9714ca72b Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Wed, 22 Jul 2020 14:51:12 -0700 Subject: [PATCH] Improve codegen for unchecked float casts on wasm This commit improves codegen for unchecked casts on WebAssembly targets to use the singluar `iNN.trunc_fMM_{u,s}` instructions. Previously rustc would codegen a bare `fptosi` and `fptoui` for float casts but for WebAssembly targets the codegen for these instructions is quite large. This large codegen is due to the fact that LLVM can speculate these instructions so the trapping behavior of WebAssembly needs to be protected against in case they're speculated. The change here is to update the codegen for the unchecked cast intrinsics to have a wasm-specific case where they call the appropriate LLVM intrinsic to generate the right wasm instruction. The intrinsic is explicitly opting-in to undefined behavior so a trap here for out-of-bounds inputs on wasm should be acceptable. cc #73591 --- src/librustc_codegen_llvm/context.rs | 8 +++ src/librustc_codegen_llvm/intrinsic.rs | 75 +++++++++++++++++------ src/test/codegen/unchecked-float-casts.rs | 1 + src/test/codegen/wasm_casts_trapping.rs | 28 +++------ 4 files changed, 74 insertions(+), 38 deletions(-) diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index a07f6c64edcb1..33a3cdbfa9b44 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -510,6 +510,14 @@ impl CodegenCx<'b, 'tcx> { ifn!("llvm.wasm.trunc.saturate.signed.i32.f64", fn(t_f64) -> t_i32); ifn!("llvm.wasm.trunc.saturate.signed.i64.f32", fn(t_f32) -> t_i64); ifn!("llvm.wasm.trunc.saturate.signed.i64.f64", fn(t_f64) -> t_i64); + ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32); + ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32); + ifn!("llvm.wasm.trunc.unsigned.i64.f32", fn(t_f32) -> t_i64); + ifn!("llvm.wasm.trunc.unsigned.i64.f64", fn(t_f64) -> t_i64); + ifn!("llvm.wasm.trunc.signed.i32.f32", fn(t_f32) -> t_i32); + ifn!("llvm.wasm.trunc.signed.i32.f64", fn(t_f64) -> t_i32); + ifn!("llvm.wasm.trunc.signed.i64.f32", fn(t_f32) -> t_i64); + ifn!("llvm.wasm.trunc.signed.i64.f64", fn(t_f64) -> t_i64); ifn!("llvm.trap", fn() -> void); ifn!("llvm.debugtrap", fn() -> void); diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 2e5929a433de0..11b1c95c58b3e 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -629,27 +629,24 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { } sym::float_to_int_unchecked => { - if float_type_width(arg_tys[0]).is_none() { - span_invalid_monomorphization_error( - tcx.sess, - span, - &format!( - "invalid monomorphization of `float_to_int_unchecked` \ + let float_width = match float_type_width(arg_tys[0]) { + Some(width) => width, + None => { + span_invalid_monomorphization_error( + tcx.sess, + span, + &format!( + "invalid monomorphization of `float_to_int_unchecked` \ intrinsic: expected basic float type, \ found `{}`", - arg_tys[0] - ), - ); - return; - } - match int_type_width_signed(ret_ty, self.cx) { - Some((width, signed)) => { - if signed { - self.fptosi(args[0].immediate(), self.cx.type_ix(width)) - } else { - self.fptoui(args[0].immediate(), self.cx.type_ix(width)) - } + arg_tys[0] + ), + ); + return; } + }; + let (width, signed) = match int_type_width_signed(ret_ty, self.cx) { + Some(pair) => pair, None => { span_invalid_monomorphization_error( tcx.sess, @@ -663,7 +660,49 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { ); return; } + }; + + // The LLVM backend can reorder and speculate `fptosi` and + // `fptoui`, so on WebAssembly the codegen for this instruction + // is quite heavyweight. To avoid this heavyweight codegen we + // instead use the raw wasm intrinsics which will lower to one + // instruction in WebAssembly (`iNN.trunc_fMM_{s,u}`). This one + // instruction will trap if the operand is out of bounds, but + // that's ok since this intrinsic is UB if the operands are out + // of bounds, so the behavior can be different on WebAssembly + // than other targets. + // + // Note, however, that when the `nontrapping-fptoint` feature is + // enabled in LLVM then LLVM will lower `fptosi` to + // `iNN.trunc_sat_fMM_{s,u}`, so if that's the case we don't + // bother with intrinsics. + let mut result = None; + if self.sess().target.target.arch == "wasm32" + && !self.sess().target_features.contains(&sym::nontrapping_dash_fptoint) + { + let name = match (width, float_width, signed) { + (32, 32, true) => Some("llvm.wasm.trunc.signed.i32.f32"), + (32, 64, true) => Some("llvm.wasm.trunc.signed.i32.f64"), + (64, 32, true) => Some("llvm.wasm.trunc.signed.i64.f32"), + (64, 64, true) => Some("llvm.wasm.trunc.signed.i64.f64"), + (32, 32, false) => Some("llvm.wasm.trunc.unsigned.i32.f32"), + (32, 64, false) => Some("llvm.wasm.trunc.unsigned.i32.f64"), + (64, 32, false) => Some("llvm.wasm.trunc.unsigned.i64.f32"), + (64, 64, false) => Some("llvm.wasm.trunc.unsigned.i64.f64"), + _ => None, + }; + if let Some(name) = name { + let intrinsic = self.get_intrinsic(name); + result = Some(self.call(intrinsic, &[args[0].immediate()], None)); + } } + result.unwrap_or_else(|| { + if signed { + self.fptosi(args[0].immediate(), self.cx.type_ix(width)) + } else { + self.fptoui(args[0].immediate(), self.cx.type_ix(width)) + } + }) } sym::discriminant_value => { diff --git a/src/test/codegen/unchecked-float-casts.rs b/src/test/codegen/unchecked-float-casts.rs index 789feea12d6d7..4e3bfcd439744 100644 --- a/src/test/codegen/unchecked-float-casts.rs +++ b/src/test/codegen/unchecked-float-casts.rs @@ -2,6 +2,7 @@ // unchecked intrinsics. // compile-flags: -C opt-level=3 +// ignore-wasm32 the wasm target is tested in `wasm_casts_*` #![crate_type = "lib"] diff --git a/src/test/codegen/wasm_casts_trapping.rs b/src/test/codegen/wasm_casts_trapping.rs index 45b905190493a..b7f8522fdfb03 100644 --- a/src/test/codegen/wasm_casts_trapping.rs +++ b/src/test/codegen/wasm_casts_trapping.rs @@ -38,7 +38,6 @@ pub fn cast_f32_i32(a: f32) -> i32 { a as _ } - // CHECK-LABEL: @cast_f64_u64 #[no_mangle] pub fn cast_f64_u64(a: f64) -> u64 { @@ -84,13 +83,10 @@ pub fn cast_f32_u8(a: f32) -> u8 { a as _ } - - // CHECK-LABEL: @cast_unchecked_f64_i64 #[no_mangle] pub unsafe fn cast_unchecked_f64_i64(a: f64) -> i64 { - // CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}} - // CHECK: fptosi double {{.*}} to i64 + // CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.signed.{{.*}} // CHECK-NEXT: ret i64 {{.*}} a.to_int_unchecked() } @@ -98,8 +94,7 @@ pub unsafe fn cast_unchecked_f64_i64(a: f64) -> i64 { // CHECK-LABEL: @cast_unchecked_f64_i32 #[no_mangle] pub unsafe fn cast_unchecked_f64_i32(a: f64) -> i32 { - // CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}} - // CHECK: fptosi double {{.*}} to i32 + // CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.signed.{{.*}} // CHECK-NEXT: ret i32 {{.*}} a.to_int_unchecked() } @@ -107,8 +102,7 @@ pub unsafe fn cast_unchecked_f64_i32(a: f64) -> i32 { // CHECK-LABEL: @cast_unchecked_f32_i64 #[no_mangle] pub unsafe fn cast_unchecked_f32_i64(a: f32) -> i64 { - // CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}} - // CHECK: fptosi float {{.*}} to i64 + // CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.signed.{{.*}} // CHECK-NEXT: ret i64 {{.*}} a.to_int_unchecked() } @@ -116,18 +110,15 @@ pub unsafe fn cast_unchecked_f32_i64(a: f32) -> i64 { // CHECK-LABEL: @cast_unchecked_f32_i32 #[no_mangle] pub unsafe fn cast_unchecked_f32_i32(a: f32) -> i32 { - // CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}} - // CHECK: fptosi float {{.*}} to i32 + // CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.signed.{{.*}} // CHECK-NEXT: ret i32 {{.*}} a.to_int_unchecked() } - // CHECK-LABEL: @cast_unchecked_f64_u64 #[no_mangle] pub unsafe fn cast_unchecked_f64_u64(a: f64) -> u64 { - // CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}} - // CHECK: fptoui double {{.*}} to i64 + // CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.unsigned.{{.*}} // CHECK-NEXT: ret i64 {{.*}} a.to_int_unchecked() } @@ -135,8 +126,7 @@ pub unsafe fn cast_unchecked_f64_u64(a: f64) -> u64 { // CHECK-LABEL: @cast_unchecked_f64_u32 #[no_mangle] pub unsafe fn cast_unchecked_f64_u32(a: f64) -> u32 { - // CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}} - // CHECK: fptoui double {{.*}} to i32 + // CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.unsigned.{{.*}} // CHECK-NEXT: ret i32 {{.*}} a.to_int_unchecked() } @@ -144,8 +134,7 @@ pub unsafe fn cast_unchecked_f64_u32(a: f64) -> u32 { // CHECK-LABEL: @cast_unchecked_f32_u64 #[no_mangle] pub unsafe fn cast_unchecked_f32_u64(a: f32) -> u64 { - // CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}} - // CHECK: fptoui float {{.*}} to i64 + // CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.unsigned.{{.*}} // CHECK-NEXT: ret i64 {{.*}} a.to_int_unchecked() } @@ -153,8 +142,7 @@ pub unsafe fn cast_unchecked_f32_u64(a: f32) -> u64 { // CHECK-LABEL: @cast_unchecked_f32_u32 #[no_mangle] pub unsafe fn cast_unchecked_f32_u32(a: f32) -> u32 { - // CHECK-NOT: {{.*}} call {{.*}} @llvm.wasm.trunc.{{.*}} - // CHECK: fptoui float {{.*}} to i32 + // CHECK: {{.*}} call {{.*}} @llvm.wasm.trunc.unsigned.{{.*}} // CHECK-NEXT: ret i32 {{.*}} a.to_int_unchecked() }