aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSlava Zakharin <szakharin@nvidia.com>2024-04-30 17:40:36 -0700
committerGitHub <noreply@github.com>2024-04-30 17:40:36 -0700
commit986f832cff9cfdd9fa6addfadcd93206636311ef (patch)
tree9960d6f3d8c9b13cea39a87ad091da11f17e441d
parent8cde1cfc60e36a1b4f632d00810983f0a7eb5462 (diff)
downloadllvm-986f832cff9cfdd9fa6addfadcd93206636311ef.tar.gz
[flang] Added fir.dummy_scope operation to preserve dummy arguments association. (#90642)
The new operation is just an abstract attribute that is attached to [hl]fir.declare operations of dummy arguments of a subroutine. Dummy arguments of the same subroutine refer to the same fir.dummy_scope, so they can be recognized as such during FIR AliasAnalysis. Note that the fir.dummy_scope must be specific to the runtime instantiation of a subroutine, so any MLIR inlining/cloning should duplicate and unique it vs using the same fir.dummy_scope for different runtime instantiations. This is why I made it an operation rather than an attribute. The new operation uses a write effect on DebuggingResource, same as [hl]fir.declare, to avoid optimizing it away.
-rw-r--r--flang/include/flang/Optimizer/Dialect/FIROps.td84
-rw-r--r--flang/include/flang/Optimizer/Dialect/FIRTypes.td11
-rw-r--r--flang/include/flang/Optimizer/HLFIR/HLFIROps.td4
-rw-r--r--flang/lib/Optimizer/CodeGen/PreCGRewrite.cpp18
-rw-r--r--flang/lib/Optimizer/CodeGen/TypeConverter.cpp5
-rw-r--r--flang/lib/Optimizer/Dialect/FIRType.cpp2
-rw-r--r--flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp3
-rw-r--r--flang/lib/Optimizer/HLFIR/Transforms/ConvertToFIR.cpp4
-rw-r--r--flang/test/Fir/dummy-scope-codegen.fir9
-rw-r--r--flang/test/Fir/dummy_scope.fir34
-rw-r--r--flang/test/HLFIR/declare-codegen.fir10
-rw-r--r--flang/test/HLFIR/dummy_scope.fir34
-rw-r--r--flang/unittests/Optimizer/FortranVariableTest.cpp9
13 files changed, 216 insertions, 11 deletions
diff --git a/flang/include/flang/Optimizer/Dialect/FIROps.td b/flang/include/flang/Optimizer/Dialect/FIROps.td
index 24950322c3ca..dc38e56d93c6 100644
--- a/flang/include/flang/Optimizer/Dialect/FIROps.td
+++ b/flang/include/flang/Optimizer/Dialect/FIROps.td
@@ -3074,6 +3074,7 @@ def fir_DeclareOp : fir_Op<"declare", [AttrSizedOperandSegments,
AnyRefOrBox:$memref,
Optional<AnyShapeOrShiftType>:$shape,
Variadic<AnyIntegerType>:$typeparams,
+ Optional<fir_DummyScopeType>:$dummy_scope,
Builtin_StringAttr:$uniq_name,
OptionalAttr<fir_FortranVariableFlagsAttr>:$fortran_attrs,
OptionalAttr<fir_CUDADataAttributeAttr>:$cuda_attr
@@ -3083,7 +3084,8 @@ def fir_DeclareOp : fir_Op<"declare", [AttrSizedOperandSegments,
let assemblyFormat = [{
$memref (`(` $shape^ `)`)? (`typeparams` $typeparams^)?
- attr-dict `:` functional-type(operands, results)
+ (`dummy_scope` $dummy_scope^)?
+ attr-dict `:` functional-type(operands, results)
}];
let hasVerifier = 1;
@@ -3247,6 +3249,86 @@ def fir_CUDADeallocateOp : fir_Op<"cuda_deallocate",
let hasVerifier = 1;
}
+def fir_DummyScopeOp : fir_Op<"dummy_scope",
+ [MemoryEffects<[MemWrite<DebuggingResource>]>]> {
+ let summary = "Define a scope for dummy arguments";
+
+ let description = [{
+ An abstract handle to be used to associate dummy arguments of the same
+ subroutine between each other. By lowering, all [hl]fir.declare
+ operations representing declarations of dummy arguments of a subroutine
+ use the result of this operation. This allows recognizing the references
+ of these dummy arguments as belonging to the same runtime instance
+ of the subroutine even after MLIR inlining. Thus, the Fortran aliasing
+ rules might be applied to those references based on the original
+ declarations of the dummy arguments.
+ For example:
+ ```
+ subroutine test(x, y)
+ real, target :: x, y
+ x = y ! may alias
+ call inner(x, y)
+ contains
+ subroutine inner(x, y)
+ real :: x, y
+ x = y ! may not alias
+ end subroutine inner
+ end subroutine test
+ ```
+ After MLIR inlining this may look like this:
+ ```
+ func.func @_QPtest(
+ %arg0: !fir.ref<f32> {fir.target},
+ %arg1: !fir.ref<f32> {fir.target}) {
+ %0 = fir.declare %arg0 {fortran_attrs = #fir.var_attrs<target>} :
+ (!fir.ref<f32>) -> !fir.ref<f32>
+ %1 = fir.declare %arg1 {fortran_attrs = #fir.var_attrs<target>} :
+ (!fir.ref<f32>) -> !fir.ref<f32>
+ %2 = fir.load %1 : !fir.ref<f32>
+ fir.store %2 to %0 : !fir.ref<f32>
+ %3 = fir.declare %0 : (!fir.ref<f32>) -> !fir.ref<f32>
+ %4 = fir.declare %1 : (!fir.ref<f32>) -> !fir.ref<f32>
+ %5 = fir.load %4 : !fir.ref<f32>
+ fir.store %5 to %3 : !fir.ref<f32>
+ return
+ }
+ ```
+ Without marking %3 and %4 as declaring the dummy arguments
+ of the same runtime instance of `inner` subroutine the FIR
+ AliasAnalysis cannot deduce non-aliasing for the second load/store pair.
+ This information may be preserved by using fir.dummy_scope operation:
+ ```
+ func.func @_QPtest(
+ %arg0: !fir.ref<f32> {fir.target},
+ %arg1: !fir.ref<f32> {fir.target}) {
+ %h1 = fir.dummy_scope : i1
+ %0 = fir.declare %arg0 dummy_scope(%h1)
+ {fortran_attrs = #fir.var_attrs<target>} :
+ (!fir.ref<f32>) -> !fir.ref<f32>
+ %1 = fir.declare %arg1 dummy_scope(%h1)
+ {fortran_attrs = #fir.var_attrs<target>} :
+ (!fir.ref<f32>) -> !fir.ref<f32>
+ %2 = fir.load %1 : !fir.ref<f32>
+ fir.store %2 to %0 : !fir.ref<f32>
+ %h2 = fir.dummy_scope : i1
+ %3 = fir.declare %0 dummy_scope(%h2) : (!fir.ref<f32>) -> !fir.ref<f32>
+ %4 = fir.declare %1 dummy_scope(%h2) : (!fir.ref<f32>) -> !fir.ref<f32>
+ %5 = fir.load %4 : !fir.ref<f32>
+ fir.store %5 to %3 : !fir.ref<f32>
+ return
+ }
+ ```
+ Note that even if `inner` is called and inlined twice inside
+ `test`, the two inlined instances of `inner` must use two different
+ fir.dummy_scope operations for their fir.declare ops. This
+ two distinct fir.dummy_scope must remain distinct during the optimizations.
+ This is guaranteed by the write memory effect on the DebuggingResource.
+ }];
+
+ let results = (outs fir_DummyScopeType);
+ let assemblyFormat = "attr-dict `:` type(results)";
+}
+
def fir_CUDAAllocOp : fir_Op<"cuda_alloc", [AttrSizedOperandSegments,
MemoryEffects<[MemAlloc]>]> {
let summary = "Allocate an object on device";
diff --git a/flang/include/flang/Optimizer/Dialect/FIRTypes.td b/flang/include/flang/Optimizer/Dialect/FIRTypes.td
index 7378ed93944c..ae984de63db4 100644
--- a/flang/include/flang/Optimizer/Dialect/FIRTypes.td
+++ b/flang/include/flang/Optimizer/Dialect/FIRTypes.td
@@ -576,6 +576,17 @@ def fir_VoidType : FIR_Type<"Void", "void"> {
let genStorageClass = 0;
}
+def fir_DummyScopeType : FIR_Type<"DummyScope", "dscope"> {
+ let summary = "Dummy scope type";
+
+ let description = [{
+ `fir.dscope` is a type returned by fir.dummy_scope operation.
+ It defines a unique identifier for a runtime instance of a subroutine
+ that is used by the [hl]fir.declare operations representing
+ the dummy arguments' declarations.
+ }];
+}
+
// Whether a type is a BaseBoxType
def IsBaseBoxTypePred
: CPred<"mlir::isa<::fir::BaseBoxType>($_self)">;
diff --git a/flang/include/flang/Optimizer/HLFIR/HLFIROps.td b/flang/include/flang/Optimizer/HLFIR/HLFIROps.td
index 743a6c98ec1a..ee3c26800ae3 100644
--- a/flang/include/flang/Optimizer/HLFIR/HLFIROps.td
+++ b/flang/include/flang/Optimizer/HLFIR/HLFIROps.td
@@ -87,6 +87,7 @@ def hlfir_DeclareOp : hlfir_Op<"declare", [AttrSizedOperandSegments,
AnyRefOrBox:$memref,
Optional<AnyShapeOrShiftType>:$shape,
Variadic<AnyIntegerType>:$typeparams,
+ Optional<fir_DummyScopeType>:$dummy_scope,
Builtin_StringAttr:$uniq_name,
OptionalAttr<fir_FortranVariableFlagsAttr>:$fortran_attrs,
OptionalAttr<fir_CUDADataAttributeAttr>:$cuda_attr
@@ -96,7 +97,8 @@ def hlfir_DeclareOp : hlfir_Op<"declare", [AttrSizedOperandSegments,
let assemblyFormat = [{
$memref (`(` $shape^ `)`)? (`typeparams` $typeparams^)?
- attr-dict `:` functional-type(operands, results)
+ (`dummy_scope` $dummy_scope^)?
+ attr-dict `:` functional-type(operands, results)
}];
let builders = [
diff --git a/flang/lib/Optimizer/CodeGen/PreCGRewrite.cpp b/flang/lib/Optimizer/CodeGen/PreCGRewrite.cpp
index ce7ee22d5d77..5bd3ec8d1845 100644
--- a/flang/lib/Optimizer/CodeGen/PreCGRewrite.cpp
+++ b/flang/lib/Optimizer/CodeGen/PreCGRewrite.cpp
@@ -281,6 +281,20 @@ public:
}
};
+class DummyScopeOpConversion
+ : public mlir::OpRewritePattern<fir::DummyScopeOp> {
+public:
+ using OpRewritePattern::OpRewritePattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(fir::DummyScopeOp dummyScopeOp,
+ mlir::PatternRewriter &rewriter) const override {
+ rewriter.replaceOpWithNewOp<fir::UndefOp>(dummyScopeOp,
+ dummyScopeOp.getType());
+ return mlir::success();
+ }
+};
+
class CodeGenRewrite : public fir::impl::CodeGenRewriteBase<CodeGenRewrite> {
public:
void runOnOperation() override final {
@@ -293,6 +307,7 @@ public:
target.addIllegalOp<fir::ArrayCoorOp>();
target.addIllegalOp<fir::ReboxOp>();
target.addIllegalOp<fir::DeclareOp>();
+ target.addIllegalOp<fir::DummyScopeOp>();
target.addDynamicallyLegalOp<fir::EmboxOp>([](fir::EmboxOp embox) {
return !(embox.getShape() ||
mlir::isa<fir::SequenceType>(
@@ -321,5 +336,6 @@ std::unique_ptr<mlir::Pass> fir::createFirCodeGenRewritePass() {
void fir::populatePreCGRewritePatterns(mlir::RewritePatternSet &patterns) {
patterns.insert<EmboxConversion, ArrayCoorConversion, ReboxConversion,
- DeclareOpConversion>(patterns.getContext());
+ DeclareOpConversion, DummyScopeOpConversion>(
+ patterns.getContext());
}
diff --git a/flang/lib/Optimizer/CodeGen/TypeConverter.cpp b/flang/lib/Optimizer/CodeGen/TypeConverter.cpp
index fb2ec3f0b2f5..729ece6fc177 100644
--- a/flang/lib/Optimizer/CodeGen/TypeConverter.cpp
+++ b/flang/lib/Optimizer/CodeGen/TypeConverter.cpp
@@ -115,6 +115,11 @@ LLVMTypeConverter::LLVMTypeConverter(mlir::ModuleOp module, bool applyTBAA,
return mlir::LLVM::LLVMStructType::getLiteral(
none.getContext(), std::nullopt, /*isPacked=*/false);
});
+ addConversion([&](fir::DummyScopeType dscope) {
+ // DummyScopeType values must not have any uses after PreCGRewrite.
+ // Convert it here to i1 just in case it survives.
+ return mlir::IntegerType::get(&getContext(), 1);
+ });
// FIXME: https://reviews.llvm.org/D82831 introduced an automatic
// materialization of conversion around function calls that is not working
// well with fir lowering to llvm (incorrect llvm.mlir.cast are inserted).
diff --git a/flang/lib/Optimizer/Dialect/FIRType.cpp b/flang/lib/Optimizer/Dialect/FIRType.cpp
index d9c387ad950e..daa3ac905dad 100644
--- a/flang/lib/Optimizer/Dialect/FIRType.cpp
+++ b/flang/lib/Optimizer/Dialect/FIRType.cpp
@@ -1340,7 +1340,7 @@ void FIROpsDialect::registerTypes() {
fir::ComplexType, FieldType, HeapType, fir::IntegerType, LenType,
LogicalType, LLVMPointerType, PointerType, RealType, RecordType,
ReferenceType, SequenceType, ShapeType, ShapeShiftType, ShiftType,
- SliceType, TypeDescType, fir::VectorType>();
+ SliceType, TypeDescType, fir::VectorType, fir::DummyScopeType>();
fir::ReferenceType::attachInterface<
OpenMPPointerLikeModel<fir::ReferenceType>>(*getContext());
fir::ReferenceType::attachInterface<
diff --git a/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp b/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp
index 0d62ca4954e6..4b586ad1d3a4 100644
--- a/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp
+++ b/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp
@@ -133,7 +133,8 @@ void hlfir::DeclareOp::build(mlir::OpBuilder &builder,
mlir::Type hlfirVariableType =
getHLFIRVariableType(inputType, hasExplicitLbs);
build(builder, result, {hlfirVariableType, inputType}, memref, shape,
- typeparams, nameAttr, fortran_attrs, cuda_attr);
+ typeparams, /*dummy_scope=*/nullptr, nameAttr, fortran_attrs,
+ cuda_attr);
}
mlir::LogicalResult hlfir::DeclareOp::verify() {
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/ConvertToFIR.cpp b/flang/lib/Optimizer/HLFIR/Transforms/ConvertToFIR.cpp
index 517285dce133..3570e0011ca7 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/ConvertToFIR.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/ConvertToFIR.cpp
@@ -328,8 +328,8 @@ public:
cudaAttr = fir::CUDADataAttributeAttr::get(rewriter.getContext(), *attr);
auto firDeclareOp = rewriter.create<fir::DeclareOp>(
loc, memref.getType(), memref, declareOp.getShape(),
- declareOp.getTypeparams(), declareOp.getUniqName(), fortranAttrs,
- cudaAttr);
+ declareOp.getTypeparams(), declareOp.getDummyScope(),
+ declareOp.getUniqName(), fortranAttrs, cudaAttr);
// Propagate other attributes from hlfir.declare to fir.declare.
// OpenACC's acc.declare is one example. Right now, the propagation
diff --git a/flang/test/Fir/dummy-scope-codegen.fir b/flang/test/Fir/dummy-scope-codegen.fir
new file mode 100644
index 000000000000..caef3c1b2578
--- /dev/null
+++ b/flang/test/Fir/dummy-scope-codegen.fir
@@ -0,0 +1,9 @@
+// RUN: fir-opt --cg-rewrite %s -o - | FileCheck %s
+
+func.func @dummy_scope(%arg0: !fir.ref<f32>) {
+ %scope = fir.dummy_scope : !fir.dscope
+ %0 = fir.declare %arg0 dummy_scope %scope {uniq_name = "x"} : (!fir.ref<f32>, !fir.dscope) -> !fir.ref<f32>
+ return
+}
+// CHECK-LABEL: func.func @dummy_scope(
+// CHECK-NEXT: return
diff --git a/flang/test/Fir/dummy_scope.fir b/flang/test/Fir/dummy_scope.fir
new file mode 100644
index 000000000000..58985923a8f4
--- /dev/null
+++ b/flang/test/Fir/dummy_scope.fir
@@ -0,0 +1,34 @@
+// RUN: fir-opt %s | fir-opt | FileCheck %s
+// RUN: fir-opt %s | fir-opt -cse | FileCheck %s
+
+// CHECK-LABEL: func.func @dummy_scope(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<f32>) {
+// CHECK: %[[VAL_1:.*]] = fir.dummy_scope : !fir.dscope
+// CHECK: %[[VAL_2:.*]] = fir.declare %[[VAL_0]] dummy_scope %[[VAL_1]] {uniq_name = "x"} : (!fir.ref<f32>, !fir.dscope) -> !fir.ref<f32>
+// CHECK: return
+// CHECK: }
+func.func @dummy_scope(%arg0: !fir.ref<f32>) {
+ %scope = fir.dummy_scope : !fir.dscope
+ %0 = fir.declare %arg0 dummy_scope %scope {uniq_name = "x"} : (!fir.ref<f32>, !fir.dscope) -> !fir.ref<f32>
+ return
+}
+
+// CHECK-LABEL: func.func @dummy_scopes(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<f32>) {
+// CHECK: %[[VAL_1:.*]] = fir.dummy_scope : !fir.dscope
+// CHECK: %[[VAL_2:.*]] = fir.declare %[[VAL_0]] dummy_scope %[[VAL_1]] {uniq_name = "x"} : (!fir.ref<f32>, !fir.dscope) -> !fir.ref<f32>
+// CHECK: %[[VAL_3:.*]] = fir.dummy_scope : !fir.dscope
+// CHECK: %[[VAL_4:.*]] = fir.declare %[[VAL_0]] dummy_scope %[[VAL_3]] {uniq_name = "innerEx"} : (!fir.ref<f32>, !fir.dscope) -> !fir.ref<f32>
+// CHECK: %[[VAL_5:.*]] = fir.dummy_scope : !fir.dscope
+// CHECK: %[[VAL_6:.*]] = fir.declare %[[VAL_0]] dummy_scope %[[VAL_5]] {uniq_name = "innerEx"} : (!fir.ref<f32>, !fir.dscope) -> !fir.ref<f32>
+// CHECK: return
+// CHECK: }
+func.func @dummy_scopes(%arg0: !fir.ref<f32>) {
+ %scope_out = fir.dummy_scope : !fir.dscope
+ %0 = fir.declare %arg0 dummy_scope %scope_out {uniq_name = "x"} : (!fir.ref<f32>, !fir.dscope) -> !fir.ref<f32>
+ %scope_in1 = fir.dummy_scope : !fir.dscope
+ %1 = fir.declare %arg0 dummy_scope %scope_in1 {uniq_name = "innerEx"} : (!fir.ref<f32>, !fir.dscope) -> !fir.ref<f32>
+ %scope_in2 = fir.dummy_scope : !fir.dscope
+ %2 = fir.declare %arg0 dummy_scope %scope_in2 {uniq_name = "innerEx"} : (!fir.ref<f32>, !fir.dscope) -> !fir.ref<f32>
+ return
+}
diff --git a/flang/test/HLFIR/declare-codegen.fir b/flang/test/HLFIR/declare-codegen.fir
index 3e80a52be452..9f51d0fbc7af 100644
--- a/flang/test/HLFIR/declare-codegen.fir
+++ b/flang/test/HLFIR/declare-codegen.fir
@@ -200,3 +200,13 @@ func.func @test_optional_declare(%arg0: !fir.box<!fir.array<?xi32>>) {
// CHECK: %[[VAL_7:.*]] = fir.absent !fir.box<!fir.array<?xi32>>
// CHECK: fir.result %[[VAL_7]] : !fir.box<!fir.array<?xi32>>
// CHECK: }
+
+func.func @dummy_scope(%arg0: !fir.ref<f32>) {
+ %scope = fir.dummy_scope : !fir.dscope
+ %0:2 = hlfir.declare %arg0 dummy_scope %scope {uniq_name = "x"} : (!fir.ref<f32>, !fir.dscope) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+// CHECK-LABEL: func.func @dummy_scope(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<f32>) {
+// CHECK: %[[SCOPE:.*]] = fir.dummy_scope : !fir.dscope
+// CHECK: %[[VAL_1:.*]] = fir.declare %[[VAL_0]] dummy_scope %[[SCOPE]] {uniq_name = "x"} : (!fir.ref<f32>, !fir.dscope) -> !fir.ref<f32>
diff --git a/flang/test/HLFIR/dummy_scope.fir b/flang/test/HLFIR/dummy_scope.fir
new file mode 100644
index 000000000000..6b5c61e21f1d
--- /dev/null
+++ b/flang/test/HLFIR/dummy_scope.fir
@@ -0,0 +1,34 @@
+// RUN: fir-opt %s | fir-opt | FileCheck %s
+// RUN: fir-opt %s | fir-opt -cse | FileCheck %s
+
+// CHECK-LABEL: func.func @dummy_scope(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<f32>) {
+// CHECK: %[[VAL_1:.*]] = fir.dummy_scope : !fir.dscope
+// CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_0]] dummy_scope %[[VAL_1]] {uniq_name = "x"} : (!fir.ref<f32>, !fir.dscope) -> (!fir.ref<f32>, !fir.ref<f32>)
+// CHECK: return
+// CHECK: }
+func.func @dummy_scope(%arg0: !fir.ref<f32>) {
+ %scope = fir.dummy_scope : !fir.dscope
+ %0:2 = hlfir.declare %arg0 dummy_scope %scope {uniq_name = "x"} : (!fir.ref<f32>, !fir.dscope) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
+
+// CHECK-LABEL: func.func @dummy_scopes(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<f32>) {
+// CHECK: %[[VAL_1:.*]] = fir.dummy_scope : !fir.dscope
+// CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_0]] dummy_scope %[[VAL_1]] {uniq_name = "x"} : (!fir.ref<f32>, !fir.dscope) -> (!fir.ref<f32>, !fir.ref<f32>)
+// CHECK: %[[VAL_3:.*]] = fir.dummy_scope : !fir.dscope
+// CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_0]] dummy_scope %[[VAL_3]] {uniq_name = "innerEx"} : (!fir.ref<f32>, !fir.dscope) -> (!fir.ref<f32>, !fir.ref<f32>)
+// CHECK: %[[VAL_5:.*]] = fir.dummy_scope : !fir.dscope
+// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_0]] dummy_scope %[[VAL_5]] {uniq_name = "innerEx"} : (!fir.ref<f32>, !fir.dscope) -> (!fir.ref<f32>, !fir.ref<f32>)
+// CHECK: return
+// CHECK: }
+func.func @dummy_scopes(%arg0: !fir.ref<f32>) {
+ %scope_out = fir.dummy_scope : !fir.dscope
+ %0:2 = hlfir.declare %arg0 dummy_scope %scope_out {uniq_name = "x"} : (!fir.ref<f32>, !fir.dscope) -> (!fir.ref<f32>, !fir.ref<f32>)
+ %scope_in1 = fir.dummy_scope : !fir.dscope
+ %1:2 = hlfir.declare %arg0 dummy_scope %scope_in1 {uniq_name = "innerEx"} : (!fir.ref<f32>, !fir.dscope) -> (!fir.ref<f32>, !fir.ref<f32>)
+ %scope_in2 = fir.dummy_scope : !fir.dscope
+ %2:2 = hlfir.declare %arg0 dummy_scope %scope_in2 {uniq_name = "innerEx"} : (!fir.ref<f32>, !fir.dscope) -> (!fir.ref<f32>, !fir.ref<f32>)
+ return
+}
diff --git a/flang/unittests/Optimizer/FortranVariableTest.cpp b/flang/unittests/Optimizer/FortranVariableTest.cpp
index 790f735a6cf2..f5f559ef887c 100644
--- a/flang/unittests/Optimizer/FortranVariableTest.cpp
+++ b/flang/unittests/Optimizer/FortranVariableTest.cpp
@@ -48,7 +48,8 @@ TEST_F(FortranVariableTest, SimpleScalar) {
mlir::Value addr = builder->create<fir::AllocaOp>(loc, eleType);
auto name = mlir::StringAttr::get(&context, "x");
auto declare = builder->create<fir::DeclareOp>(loc, addr.getType(), addr,
- /*shape=*/mlir::Value{}, /*typeParams=*/std::nullopt, name,
+ /*shape=*/mlir::Value{}, /*typeParams=*/std::nullopt,
+ /*dummy_scope=*/nullptr, name,
/*fortran_attrs=*/fir::FortranVariableFlagsAttr{},
/*cuda_attr=*/fir::CUDADataAttributeAttr{});
@@ -74,7 +75,7 @@ TEST_F(FortranVariableTest, CharacterScalar) {
loc, eleType, /*pinned=*/false, typeParams);
auto name = mlir::StringAttr::get(&context, "x");
auto declare = builder->create<fir::DeclareOp>(loc, addr.getType(), addr,
- /*shape=*/mlir::Value{}, typeParams, name,
+ /*shape=*/mlir::Value{}, typeParams, /*dummy_scope=*/nullptr, name,
/*fortran_attrs=*/fir::FortranVariableFlagsAttr{},
/*cuda_attr=*/fir::CUDADataAttributeAttr{});
@@ -105,7 +106,7 @@ TEST_F(FortranVariableTest, SimpleArray) {
mlir::Value shape = createShape(extents);
auto name = mlir::StringAttr::get(&context, "x");
auto declare = builder->create<fir::DeclareOp>(loc, addr.getType(), addr,
- shape, /*typeParams*/ std::nullopt, name,
+ shape, /*typeParams*/ std::nullopt, /*dummy_scope=*/nullptr, name,
/*fortran_attrs=*/fir::FortranVariableFlagsAttr{},
/*cuda_attr=*/fir::CUDADataAttributeAttr{});
@@ -136,7 +137,7 @@ TEST_F(FortranVariableTest, CharacterArray) {
mlir::Value shape = createShape(extents);
auto name = mlir::StringAttr::get(&context, "x");
auto declare = builder->create<fir::DeclareOp>(loc, addr.getType(), addr,
- shape, typeParams, name,
+ shape, typeParams, /*dummy_scope=*/nullptr, name,
/*fortran_attrs=*/fir::FortranVariableFlagsAttr{},
/*cuda_attr=*/fir::CUDADataAttributeAttr{});