aboutsummaryrefslogtreecommitdiff
path: root/mlir
diff options
context:
space:
mode:
authorFrederik Gossen <frgossen@google.com>2020-12-08 13:57:54 +0100
committerFrederik Gossen <frgossen@google.com>2020-12-08 14:33:58 +0100
commitbb7d43e7d5f683fc3e7109072610dc8d176a3bf8 (patch)
tree982d37699162ac0a01e843dae06a51fe04c575ec /mlir
parent083e035c47f6c73084ecf5ab7f41cddca19ce332 (diff)
downloadllvm-project-bb7d43e7d5f683fc3e7109072610dc8d176a3bf8.tar.gz
Add rsqrt lowering from standard to NVVM
Differential Revision: https://reviews.llvm.org/D92838
Diffstat (limited to 'mlir')
-rw-r--r--mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp2
-rw-r--r--mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir20
2 files changed, 22 insertions, 0 deletions
diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
index 61f97d44e125..4b7bb6e193a4 100644
--- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
+++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
@@ -183,6 +183,8 @@ void mlir::populateGpuToNVVMConversionPatterns(
"__nv_log10");
patterns.insert<OpToFuncCallLowering<Log2Op>>(converter, "__nv_log2f",
"__nv_log2");
+ patterns.insert<OpToFuncCallLowering<RsqrtOp>>(converter, "__nv_rsqrtf",
+ "__nv_rsqrt");
patterns.insert<OpToFuncCallLowering<SinOp>>(converter, "__nv_sinf",
"__nv_sin");
patterns.insert<OpToFuncCallLowering<TanhOp>>(converter, "__nv_tanhf",
diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
index 8e4e4515c638..347cc48daa20 100644
--- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
@@ -295,6 +295,26 @@ gpu.module @test_module {
// -----
+gpu.module @test_module {
+ // CHECK: llvm.func @__nv_rsqrtf(!llvm.float) -> !llvm.float
+ // CHECK: llvm.func @__nv_rsqrt(!llvm.double) -> !llvm.double
+ // CHECK-LABEL: func @gpu_rsqrt
+ func @gpu_rsqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64)
+ -> (f16, f32, f64) {
+ %result16 = std.rsqrt %arg_f16 : f16
+ // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float
+ // CHECK-NEXT: llvm.call @__nv_rsqrtf(%{{.*}}) : (!llvm.float) -> !llvm.float
+ // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half
+ %result32 = std.rsqrt %arg_f32 : f32
+ // CHECK: llvm.call @__nv_rsqrtf(%{{.*}}) : (!llvm.float) -> !llvm.float
+ %result64 = std.rsqrt %arg_f64 : f64
+ // CHECK: llvm.call @__nv_rsqrt(%{{.*}}) : (!llvm.double) -> !llvm.double
+ std.return %result16, %result32, %result64 : f16, f32, f64
+ }
+}
+
+// -----
+
// Test that we handled properly operation with SymbolTable other than module op
gpu.module @test_module {
"test.symbol_scope"() ({