summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVladimir Marko <vmarko@google.com>2022-04-21 19:49:23 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2022-04-21 19:49:23 +0000
commitf08f2e3895aae42b601c4f2171e1cf019f4917b1 (patch)
treec7ebac1e626dd11e72767af1174b68f8fea9dfa9
parent25fe661838228515aa268f88c52ddbb1e508ffa8 (diff)
parent9d31daa0b3f8e748a19555870932bace11f2b199 (diff)
downloadart-f08f2e3895aae42b601c4f2171e1cf019f4917b1.tar.gz
Avoid VarHandle checks for boot image field VarHandles. am: 9d31daa0b3
Original change: https://android-review.googlesource.com/c/platform/art/+/2069110 Change-Id: I26d010b317521f48b5920cbd685053cf63f9e7c7 Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--compiler/optimizing/code_generator_arm64.cc45
-rw-r--r--compiler/optimizing/code_generator_arm64.h1
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc39
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h1
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc8
-rw-r--r--compiler/optimizing/code_generator_x86_64.h2
-rw-r--r--compiler/optimizing/instruction_simplifier.cc125
-rw-r--r--compiler/optimizing/intrinsics.h11
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc134
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc144
-rw-r--r--compiler/optimizing/intrinsics_utils.h23
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc134
-rw-r--r--compiler/optimizing/sharpening.cc13
-rw-r--r--runtime/mirror/var_handle.h4
14 files changed, 485 insertions, 199 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 76d2a6d3f2..2a0b481b2d 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4622,14 +4622,9 @@ void CodeGeneratorARM64::LoadMethod(MethodLoadKind load_kind, Location temp, HIn
break;
}
case MethodLoadKind::kBootImageRelRo: {
- // Add ADRP with its PC-relative .data.bimg.rel.ro patch.
- uint32_t boot_image_offset = GetBootImageOffset(invoke);
- vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_offset);
- EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp));
- // Add LDR with its PC-relative .data.bimg.rel.ro patch.
- vixl::aarch64::Label* ldr_label = NewBootImageRelRoPatch(boot_image_offset, adrp_label);
// Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
- EmitLdrOffsetPlaceholder(ldr_label, WRegisterFrom(temp), XRegisterFrom(temp));
+ uint32_t boot_image_offset = GetBootImageOffset(invoke);
+ LoadBootImageRelRoEntry(WRegisterFrom(temp), boot_image_offset);
break;
}
case MethodLoadKind::kBssEntry: {
@@ -5035,6 +5030,17 @@ void CodeGeneratorARM64::EmitLdrOffsetPlaceholder(vixl::aarch64::Label* fixup_la
__ ldr(out, MemOperand(base, /* offset placeholder */ 0));
}
+void CodeGeneratorARM64::LoadBootImageRelRoEntry(vixl::aarch64::Register reg,
+ uint32_t boot_image_offset) {
+ DCHECK(reg.IsW());
+ // Add ADRP with its PC-relative .data.bimg.rel.ro patch.
+ vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_offset);
+ EmitAdrpPlaceholder(adrp_label, reg.X());
+ // Add LDR with its PC-relative .data.bimg.rel.ro patch.
+ vixl::aarch64::Label* ldr_label = NewBootImageRelRoPatch(boot_image_offset, adrp_label);
+ EmitLdrOffsetPlaceholder(ldr_label, reg.W(), reg.X());
+}
+
void CodeGeneratorARM64::LoadBootImageAddress(vixl::aarch64::Register reg,
uint32_t boot_image_reference) {
if (GetCompilerOptions().IsBootImage()) {
@@ -5045,12 +5051,7 @@ void CodeGeneratorARM64::LoadBootImageAddress(vixl::aarch64::Register reg,
vixl::aarch64::Label* add_label = NewBootImageIntrinsicPatch(boot_image_reference, adrp_label);
EmitAddPlaceholder(add_label, reg.X(), reg.X());
} else if (GetCompilerOptions().GetCompilePic()) {
- // Add ADRP with its PC-relative .data.bimg.rel.ro patch.
- vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_reference);
- EmitAdrpPlaceholder(adrp_label, reg.X());
- // Add LDR with its PC-relative .data.bimg.rel.ro patch.
- vixl::aarch64::Label* ldr_label = NewBootImageRelRoPatch(boot_image_reference, adrp_label);
- EmitLdrOffsetPlaceholder(ldr_label, reg.W(), reg.X());
+ LoadBootImageRelRoEntry(reg, boot_image_reference);
} else {
DCHECK(GetCompilerOptions().IsJitCompiler());
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -5063,7 +5064,7 @@ void CodeGeneratorARM64::LoadBootImageAddress(vixl::aarch64::Register reg,
void CodeGeneratorARM64::LoadTypeForBootImageIntrinsic(vixl::aarch64::Register reg,
TypeReference target_type) {
// Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
- DCHECK(GetCompilerOptions().IsBootImage());
+ DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
// Add ADRP with its PC-relative type patch.
vixl::aarch64::Label* adrp_label =
NewBootImageTypePatch(*target_type.dex_file, target_type.TypeIndex());
@@ -5387,13 +5388,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
case HLoadClass::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
uint32_t boot_image_offset = CodeGenerator::GetBootImageOffset(cls);
- // Add ADRP with its PC-relative .data.bimg.rel.ro patch.
- vixl::aarch64::Label* adrp_label = codegen_->NewBootImageRelRoPatch(boot_image_offset);
- codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
- // Add LDR with its PC-relative .data.bimg.rel.ro patch.
- vixl::aarch64::Label* ldr_label =
- codegen_->NewBootImageRelRoPatch(boot_image_offset, adrp_label);
- codegen_->EmitLdrOffsetPlaceholder(ldr_label, out.W(), out.X());
+ codegen_->LoadBootImageRelRoEntry(out.W(), boot_image_offset);
break;
}
case HLoadClass::LoadKind::kBssEntry:
@@ -5561,14 +5556,8 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
}
case HLoadString::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- // Add ADRP with its PC-relative .data.bimg.rel.ro patch.
uint32_t boot_image_offset = CodeGenerator::GetBootImageOffset(load);
- vixl::aarch64::Label* adrp_label = codegen_->NewBootImageRelRoPatch(boot_image_offset);
- codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
- // Add LDR with its PC-relative .data.bimg.rel.ro patch.
- vixl::aarch64::Label* ldr_label =
- codegen_->NewBootImageRelRoPatch(boot_image_offset, adrp_label);
- codegen_->EmitLdrOffsetPlaceholder(ldr_label, out.W(), out.X());
+ codegen_->LoadBootImageRelRoEntry(out.W(), boot_image_offset);
return;
}
case HLoadString::LoadKind::kBssEntry: {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a3e153e5c3..f4d652c29c 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -816,6 +816,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
vixl::aarch64::Register out,
vixl::aarch64::Register base);
+ void LoadBootImageRelRoEntry(vixl::aarch64::Register reg, uint32_t boot_image_offset);
void LoadBootImageAddress(vixl::aarch64::Register reg, uint32_t boot_image_reference);
void LoadTypeForBootImageIntrinsic(vixl::aarch64::Register reg, TypeReference type_reference);
void LoadIntrinsicDeclaringClass(vixl::aarch64::Register reg, HInvoke* invoke);
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 1c243b635e..09fa598203 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -7527,10 +7527,8 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
}
case HLoadClass::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- codegen_->NewBootImageRelRoPatch(CodeGenerator::GetBootImageOffset(cls));
- codegen_->EmitMovwMovtPlaceholder(labels, out);
- __ Ldr(out, MemOperand(out, /* offset= */ 0));
+ uint32_t boot_image_offset = CodeGenerator::GetBootImageOffset(cls);
+ codegen_->LoadBootImageRelRoEntry(out, boot_image_offset);
break;
}
case HLoadClass::LoadKind::kBssEntry:
@@ -7539,7 +7537,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = codegen_->NewTypeBssEntryPatch(cls);
codegen_->EmitMovwMovtPlaceholder(labels, out);
// All aligned loads are implicitly atomic consume operations on ARM.
- codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option);
+ codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /*offset=*/ 0, read_barrier_option);
generate_null_check = true;
break;
}
@@ -7555,7 +7553,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
cls->GetTypeIndex(),
cls->GetClass()));
// /* GcRoot<mirror::Class> */ out = *out
- codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option);
+ codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /*offset=*/ 0, read_barrier_option);
break;
}
case HLoadClass::LoadKind::kRuntimeCall:
@@ -7752,10 +7750,8 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
}
case HLoadString::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- codegen_->NewBootImageRelRoPatch(CodeGenerator::GetBootImageOffset(load));
- codegen_->EmitMovwMovtPlaceholder(labels, out);
- __ Ldr(out, MemOperand(out, /* offset= */ 0));
+ uint32_t boot_image_offset = CodeGenerator::GetBootImageOffset(load);
+ codegen_->LoadBootImageRelRoEntry(out, boot_image_offset);
return;
}
case HLoadString::LoadKind::kBssEntry: {
@@ -7764,7 +7760,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
codegen_->EmitMovwMovtPlaceholder(labels, out);
// All aligned loads are implicitly atomic consume operations on ARM.
codegen_->GenerateGcRootFieldLoad(
- load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption);
+ load, out_loc, out, /*offset=*/ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
@@ -7785,7 +7781,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
load->GetString()));
// /* GcRoot<mirror::String> */ out = *out
codegen_->GenerateGcRootFieldLoad(
- load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption);
+ load, out_loc, out, /*offset=*/ 0, kCompilerReadBarrierOption);
return;
}
default:
@@ -9222,10 +9218,7 @@ void CodeGeneratorARMVIXL::LoadMethod(MethodLoadKind load_kind, Location temp, H
}
case MethodLoadKind::kBootImageRelRo: {
uint32_t boot_image_offset = GetBootImageOffset(invoke);
- PcRelativePatchInfo* labels = NewBootImageRelRoPatch(boot_image_offset);
- vixl32::Register temp_reg = RegisterFrom(temp);
- EmitMovwMovtPlaceholder(labels, temp_reg);
- GetAssembler()->LoadFromOffset(kLoadWord, temp_reg, temp_reg, /* offset*/ 0);
+ LoadBootImageRelRoEntry(RegisterFrom(temp), boot_image_offset);
break;
}
case MethodLoadKind::kBssEntry: {
@@ -9520,6 +9513,13 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitClassLiteral(const DexFil
});
}
+void CodeGeneratorARMVIXL::LoadBootImageRelRoEntry(vixl32::Register reg,
+ uint32_t boot_image_offset) {
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = NewBootImageRelRoPatch(boot_image_offset);
+ EmitMovwMovtPlaceholder(labels, reg);
+ __ Ldr(reg, MemOperand(reg, /*offset=*/ 0));
+}
+
void CodeGeneratorARMVIXL::LoadBootImageAddress(vixl32::Register reg,
uint32_t boot_image_reference) {
if (GetCompilerOptions().IsBootImage()) {
@@ -9527,10 +9527,7 @@ void CodeGeneratorARMVIXL::LoadBootImageAddress(vixl32::Register reg,
NewBootImageIntrinsicPatch(boot_image_reference);
EmitMovwMovtPlaceholder(labels, reg);
} else if (GetCompilerOptions().GetCompilePic()) {
- CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- NewBootImageRelRoPatch(boot_image_reference);
- EmitMovwMovtPlaceholder(labels, reg);
- __ Ldr(reg, MemOperand(reg, /* offset= */ 0));
+ LoadBootImageRelRoEntry(reg, boot_image_reference);
} else {
DCHECK(GetCompilerOptions().IsJitCompiler());
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -9544,7 +9541,7 @@ void CodeGeneratorARMVIXL::LoadBootImageAddress(vixl32::Register reg,
void CodeGeneratorARMVIXL::LoadTypeForBootImageIntrinsic(vixl::aarch32::Register reg,
TypeReference target_type) {
// Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
- DCHECK(GetCompilerOptions().IsBootImage());
+ DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
PcRelativePatchInfo* labels =
NewBootImageTypePatch(*target_type.dex_file, target_type.TypeIndex());
EmitMovwMovtPlaceholder(labels, reg);
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index f385b3473c..790ad0f8f7 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -639,6 +639,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
dex::TypeIndex type_index,
Handle<mirror::Class> handle);
+ void LoadBootImageRelRoEntry(vixl::aarch32::Register reg, uint32_t boot_image_offset);
void LoadBootImageAddress(vixl::aarch32::Register reg, uint32_t boot_image_reference);
void LoadTypeForBootImageIntrinsic(vixl::aarch32::Register reg, TypeReference type_reference);
void LoadIntrinsicDeclaringClass(vixl::aarch32::Register reg, HInvoke* invoke);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index d919fa7c09..511917a735 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1266,9 +1266,9 @@ void CodeGeneratorX86_64::RecordMethodBssEntryPatch(HInvoke* invoke) {
__ Bind(&method_bss_entry_patches_.back().label);
}
-void CodeGeneratorX86_64::RecordBootImageTypePatch(HLoadClass* load_class) {
- boot_image_type_patches_.emplace_back(
- &load_class->GetDexFile(), load_class->GetTypeIndex().index_);
+void CodeGeneratorX86_64::RecordBootImageTypePatch(const DexFile& dex_file,
+ dex::TypeIndex type_index) {
+ boot_image_type_patches_.emplace_back(&dex_file, type_index.index_);
__ Bind(&boot_image_type_patches_.back().label);
}
@@ -6425,7 +6425,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ leal(out,
Address::Absolute(CodeGeneratorX86_64::kPlaceholder32BitOffset, /* no_rip= */ false));
- codegen_->RecordBootImageTypePatch(cls);
+ codegen_->RecordBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
break;
case HLoadClass::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index a1309943c7..39a72d8211 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -482,7 +482,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void RecordBootImageRelRoPatch(uint32_t boot_image_offset);
void RecordBootImageMethodPatch(HInvoke* invoke);
void RecordMethodBssEntryPatch(HInvoke* invoke);
- void RecordBootImageTypePatch(HLoadClass* load_class);
+ void RecordBootImageTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
Label* NewTypeBssEntryPatch(HLoadClass* load_class);
void RecordBootImageStringPatch(HLoadString* load_string);
Label* NewStringBssEntryPatch(HLoadString* load_string);
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 788dc60b23..914e44a18d 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -20,6 +20,7 @@
#include "class_linker-inl.h"
#include "class_root-inl.h"
#include "data_type-inl.h"
+#include "driver/compiler_options.h"
#include "escape.h"
#include "intrinsics.h"
#include "intrinsics_utils.h"
@@ -124,6 +125,7 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
void SimplifyAllocationIntrinsic(HInvoke* invoke);
void SimplifyVarHandleIntrinsic(HInvoke* invoke);
+ bool CanUseKnownBootImageVarHandle(HInvoke* invoke);
static bool CanEnsureNotNullAt(HInstruction* input, HInstruction* at);
CodeGenerator* codegen_;
@@ -2884,7 +2886,7 @@ void InstructionSimplifierVisitor::SimplifyVarHandleIntrinsic(HInvoke* invoke) {
}
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
- if (expected_coordinates_count == 1u) {
+ if (expected_coordinates_count != 0u) {
HInstruction* object = invoke->InputAt(1);
// The following has been ensured by static checks in the instruction builder.
DCHECK(object->GetType() == DataType::Type::kReference);
@@ -2898,6 +2900,127 @@ void InstructionSimplifierVisitor::SimplifyVarHandleIntrinsic(HInvoke* invoke) {
optimizations.SetSkipObjectNullCheck();
}
}
+
+ if (CanUseKnownBootImageVarHandle(invoke)) {
+ optimizations.SetUseKnownBootImageVarHandle();
+ }
+}
+
+bool InstructionSimplifierVisitor::CanUseKnownBootImageVarHandle(HInvoke* invoke) {
+ // If the `VarHandle` comes from a static final field of an initialized class in
+ // the boot image, we can do the checks at compile time. We do this optimization only
+ // for AOT and only for field handles when we can avoid all checks. This avoids the
+ // possibility of the code concurrently messing with the `VarHandle` using reflection,
+ // we simply perform the operation with the `VarHandle` as seen at compile time.
+ // TODO: Extend this to arrays to support the `AtomicIntegerArray` class.
+ const CompilerOptions& compiler_options = codegen_->GetCompilerOptions();
+ if (!compiler_options.IsAotCompiler()) {
+ return false;
+ }
+ size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
+ if (expected_coordinates_count == 2u) {
+ return false;
+ }
+ HInstruction* var_handle_instruction = invoke->InputAt(0);
+ if (var_handle_instruction->IsNullCheck()) {
+ var_handle_instruction = var_handle_instruction->InputAt(0);
+ }
+ if (!var_handle_instruction->IsStaticFieldGet()) {
+ return false;
+ }
+ ArtField* field = var_handle_instruction->AsStaticFieldGet()->GetFieldInfo().GetField();
+ DCHECK(field->IsStatic());
+ if (!field->IsFinal()) {
+ return false;
+ }
+ ScopedObjectAccess soa(Thread::Current());
+ ObjPtr<mirror::Class> declaring_class = field->GetDeclaringClass();
+ if (!declaring_class->IsVisiblyInitialized()) {
+ // During AOT compilation, dex2oat ensures that initialized classes are visibly initialized.
+ DCHECK(!declaring_class->IsInitialized());
+ return false;
+ }
+ HLoadClass* load_class = var_handle_instruction->InputAt(0)->AsLoadClass();
+ if (kIsDebugBuild) {
+ bool is_in_boot_image = false;
+ if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(declaring_class)) {
+ is_in_boot_image = true;
+ } else if (compiler_options.IsBootImage() || compiler_options.IsBootImageExtension()) {
+ std::string storage;
+ const char* descriptor = declaring_class->GetDescriptor(&storage);
+ is_in_boot_image = compiler_options.IsImageClass(descriptor);
+ }
+ CHECK_EQ(is_in_boot_image, load_class->IsInBootImage());
+ }
+ if (!load_class->IsInBootImage()) {
+ return false;
+ }
+
+ // Get the `VarHandle` object and check its class.
+ ObjPtr<mirror::Class> expected_var_handle_class;
+ switch (expected_coordinates_count) {
+ case 0:
+ expected_var_handle_class = GetClassRoot<mirror::StaticFieldVarHandle>();
+ break;
+ default:
+ DCHECK_EQ(expected_coordinates_count, 1u);
+ expected_var_handle_class = GetClassRoot<mirror::FieldVarHandle>();
+ break;
+ }
+ ObjPtr<mirror::Object> var_handle_object = field->GetObject(declaring_class);
+ if (var_handle_object == nullptr || var_handle_object->GetClass() != expected_var_handle_class) {
+ return false;
+ }
+ ObjPtr<mirror::VarHandle> var_handle = ObjPtr<mirror::VarHandle>::DownCast(var_handle_object);
+
+ // Check access mode.
+ mirror::VarHandle::AccessMode access_mode =
+ mirror::VarHandle::GetAccessModeByIntrinsic(invoke->GetIntrinsic());
+ if (!var_handle->IsAccessModeSupported(access_mode)) {
+ return false;
+ }
+
+ // Check argument types.
+ ObjPtr<mirror::Class> var_type = var_handle->GetVarType();
+ mirror::VarHandle::AccessModeTemplate access_mode_template =
+ mirror::VarHandle::GetAccessModeTemplate(access_mode);
+ // Note: The data type of input arguments does not need to match the type from shorty
+ // due to implicit conversions or avoiding unnecessary conversions before narrow stores.
+ DataType::Type type = (access_mode_template == mirror::VarHandle::AccessModeTemplate::kGet)
+ ? invoke->GetType()
+ : GetDataTypeFromShorty(invoke, invoke->GetNumberOfArguments() - 1u);
+ if (type != DataTypeFromPrimitive(var_type->GetPrimitiveType())) {
+ return false;
+ }
+ if (type == DataType::Type::kReference) {
+ uint32_t arguments_start = /* VarHandle object */ 1u + expected_coordinates_count;
+ uint32_t number_of_arguments = invoke->GetNumberOfArguments();
+ for (size_t arg_index = arguments_start; arg_index != number_of_arguments; ++arg_index) {
+ HInstruction* arg = invoke->InputAt(arg_index);
+ DCHECK_EQ(arg->GetType(), DataType::Type::kReference);
+ if (!arg->IsNullConstant()) {
+ ReferenceTypeInfo arg_type_info = arg->GetReferenceTypeInfo();
+ if (!arg_type_info.IsValid() ||
+ !var_type->IsAssignableFrom(arg_type_info.GetTypeHandle().Get())) {
+ return false;
+ }
+ }
+ }
+ }
+
+ // Check the first coordinate.
+ if (expected_coordinates_count != 0u) {
+ ObjPtr<mirror::Class> coordinate0_type = var_handle->GetCoordinateType0();
+ DCHECK(coordinate0_type != nullptr);
+ ReferenceTypeInfo object_type_info = invoke->InputAt(1)->GetReferenceTypeInfo();
+ if (!object_type_info.IsValid() ||
+ !coordinate0_type->IsAssignableFrom(object_type_info.GetTypeHandle().Get())) {
+ return false;
+ }
+ }
+
+ // All required checks passed.
+ return true;
}
void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 738dac0c4a..5109882295 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -230,6 +230,17 @@ class VarHandleOptimizations : public IntrinsicOptimizations {
INTRINSIC_OPTIMIZATION(DoNotIntrinsify, 0); // One of the checks is statically known to fail.
INTRINSIC_OPTIMIZATION(SkipObjectNullCheck, 1); // Not applicable for static fields.
+
+ // Use known `VarHandle` from the boot image. To apply this optimization, the following
+ // `VarHandle` checks must pass based on static analysis:
+ // - `VarHandle` type check (must match the coordinate count),
+ // - access mode check,
+ // - var type check (including assignability for reference types),
+ // - object type check (except for static field VarHandles that do not take an object).
+ // Note that the object null check is controlled by the above flag `SkipObjectNullCheck`
+ // and arrays and byte array views (which always need a range check and sometimes also
+ // array type check) are currently unsupported.
+ INTRINSIC_OPTIMIZATION(UseKnownBootImageVarHandle, 2);
};
#undef INTRISIC_OPTIMIZATION
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index c8fccfb7d5..646f4f2ea7 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -4422,24 +4422,26 @@ static void GenerateVarHandleInstanceFieldChecks(HInvoke* invoke,
__ Cbz(object, slow_path->GetEntryLabel());
}
- UseScratchRegisterScope temps(masm);
- Register temp = temps.AcquireW();
- Register temp2 = temps.AcquireW();
+ if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ UseScratchRegisterScope temps(masm);
+ Register temp = temps.AcquireW();
+ Register temp2 = temps.AcquireW();
- // Check that the VarHandle references an instance field by checking that
- // coordinateType1 == null. coordinateType0 should not be null, but this is handled by the
- // type compatibility check with the source object's type, which will fail for null.
- DCHECK_EQ(coordinate_type0_offset.Int32Value() + 4, coordinate_type1_offset.Int32Value());
- __ Ldp(temp, temp2, HeapOperand(varhandle, coordinate_type0_offset.Int32Value()));
- codegen->GetAssembler()->MaybeUnpoisonHeapReference(temp);
- // No need for read barrier or unpoisoning of coordinateType1 for comparison with null.
- __ Cbnz(temp2, slow_path->GetEntryLabel());
+ // Check that the VarHandle references an instance field by checking that
+ // coordinateType1 == null. coordinateType0 should not be null, but this is handled by the
+ // type compatibility check with the source object's type, which will fail for null.
+ DCHECK_EQ(coordinate_type0_offset.Int32Value() + 4, coordinate_type1_offset.Int32Value());
+ __ Ldp(temp, temp2, HeapOperand(varhandle, coordinate_type0_offset.Int32Value()));
+ codegen->GetAssembler()->MaybeUnpoisonHeapReference(temp);
+ // No need for read barrier or unpoisoning of coordinateType1 for comparison with null.
+ __ Cbnz(temp2, slow_path->GetEntryLabel());
- // Check that the object has the correct type.
- // We deliberately avoid the read barrier, letting the slow path handle the false negatives.
- temps.Release(temp2); // Needed by GenerateSubTypeObjectCheckNoReadBarrier().
- GenerateSubTypeObjectCheckNoReadBarrier(
- codegen, slow_path, object, temp, /*object_can_be_null=*/ false);
+ // Check that the object has the correct type.
+ // We deliberately avoid the read barrier, letting the slow path handle the false negatives.
+ temps.Release(temp2); // Needed by GenerateSubTypeObjectCheckNoReadBarrier().
+ GenerateSubTypeObjectCheckNoReadBarrier(
+ codegen, slow_path, object, temp, /*object_can_be_null=*/ false);
+ }
}
static void GenerateVarHandleArrayChecks(HInvoke* invoke,
@@ -4545,11 +4547,22 @@ static VarHandleSlowPathARM64* GenerateVarHandleChecks(HInvoke* invoke,
CodeGeneratorARM64* codegen,
std::memory_order order,
DataType::Type type) {
+ size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
+ VarHandleOptimizations optimizations(invoke);
+ if (optimizations.GetUseKnownBootImageVarHandle()) {
+ DCHECK_NE(expected_coordinates_count, 2u);
+ if (expected_coordinates_count == 0u || optimizations.GetSkipObjectNullCheck()) {
+ return nullptr;
+ }
+ }
+
VarHandleSlowPathARM64* slow_path =
new (codegen->GetScopedAllocator()) VarHandleSlowPathARM64(invoke, order);
codegen->AddSlowPath(slow_path);
- GenerateVarHandleAccessModeAndVarTypeChecks(invoke, codegen, slow_path, type);
+ if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ GenerateVarHandleAccessModeAndVarTypeChecks(invoke, codegen, slow_path, type);
+ }
GenerateVarHandleCoordinateChecks(invoke, codegen, slow_path);
return slow_path;
@@ -4582,25 +4595,42 @@ static void GenerateVarHandleTarget(HInvoke* invoke,
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
if (expected_coordinates_count <= 1u) {
- // For static fields, we need to fill the `target.object` with the declaring class,
- // so we can use `target.object` as temporary for the `ArtMethod*`. For instance fields,
- // we do not need the declaring class, so we can forget the `ArtMethod*` when
- // we load the `target.offset`, so use the `target.offset` to hold the `ArtMethod*`.
- Register method = (expected_coordinates_count == 0) ? target.object : target.offset;
-
- const MemberOffset art_field_offset = mirror::FieldVarHandle::ArtFieldOffset();
- const MemberOffset offset_offset = ArtField::OffsetOffset();
-
- // Load the ArtField, the offset and, if needed, declaring class.
- __ Ldr(method.X(), HeapOperand(varhandle, art_field_offset.Int32Value()));
- __ Ldr(target.offset, MemOperand(method.X(), offset_offset.Int32Value()));
- if (expected_coordinates_count == 0u) {
- codegen->GenerateGcRootFieldLoad(invoke,
- LocationFrom(target.object),
- method.X(),
- ArtField::DeclaringClassOffset().Int32Value(),
- /*fixup_label=*/ nullptr,
- kCompilerReadBarrierOption);
+ if (VarHandleOptimizations(invoke).GetUseKnownBootImageVarHandle()) {
+ ScopedObjectAccess soa(Thread::Current());
+ ArtField* target_field = GetBootImageVarHandleField(invoke);
+ if (expected_coordinates_count == 0u) {
+ ObjPtr<mirror::Class> declaring_class = target_field->GetDeclaringClass();
+ if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(declaring_class)) {
+ uint32_t boot_image_offset = CodeGenerator::GetBootImageOffset(declaring_class);
+ codegen->LoadBootImageRelRoEntry(target.object, boot_image_offset);
+ } else {
+ codegen->LoadTypeForBootImageIntrinsic(
+ target.object,
+ TypeReference(&declaring_class->GetDexFile(), declaring_class->GetDexTypeIndex()));
+ }
+ }
+ __ Mov(target.offset, target_field->GetOffset().Uint32Value());
+ } else {
+ // For static fields, we need to fill the `target.object` with the declaring class,
+ // so we can use `target.object` as temporary for the `ArtMethod*`. For instance fields,
+ // we do not need the declaring class, so we can forget the `ArtMethod*` when
+ // we load the `target.offset`, so use the `target.offset` to hold the `ArtMethod*`.
+ Register method = (expected_coordinates_count == 0) ? target.object : target.offset;
+
+ const MemberOffset art_field_offset = mirror::FieldVarHandle::ArtFieldOffset();
+ const MemberOffset offset_offset = ArtField::OffsetOffset();
+
+ // Load the ArtField, the offset and, if needed, declaring class.
+ __ Ldr(method.X(), HeapOperand(varhandle, art_field_offset.Int32Value()));
+ __ Ldr(target.offset, MemOperand(method.X(), offset_offset.Int32Value()));
+ if (expected_coordinates_count == 0u) {
+ codegen->GenerateGcRootFieldLoad(invoke,
+ LocationFrom(target.object),
+ method.X(),
+ ArtField::DeclaringClassOffset().Int32Value(),
+ /*fixup_label=*/ nullptr,
+ kCompilerReadBarrierOption);
+ }
}
} else {
DCHECK_EQ(expected_coordinates_count, 2u);
@@ -4705,7 +4735,9 @@ static void GenerateVarHandleGet(HInvoke* invoke,
if (!byte_swap) {
slow_path = GenerateVarHandleChecks(invoke, codegen, order, type);
GenerateVarHandleTarget(invoke, target, codegen);
- __ Bind(slow_path->GetNativeByteOrderLabel());
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetNativeByteOrderLabel());
+ }
}
// ARM64 load-acquire instructions are implicitly sequentially consistent.
@@ -4760,7 +4792,8 @@ static void GenerateVarHandleGet(HInvoke* invoke,
}
}
- if (!byte_swap) {
+ if (slow_path != nullptr) {
+ DCHECK(!byte_swap);
__ Bind(slow_path->GetExitLabel());
}
}
@@ -4821,7 +4854,9 @@ static void GenerateVarHandleSet(HInvoke* invoke,
if (!byte_swap) {
slow_path = GenerateVarHandleChecks(invoke, codegen, order, value_type);
GenerateVarHandleTarget(invoke, target, codegen);
- __ Bind(slow_path->GetNativeByteOrderLabel());
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetNativeByteOrderLabel());
+ }
}
// ARM64 store-release instructions are implicitly sequentially consistent.
@@ -4866,7 +4901,8 @@ static void GenerateVarHandleSet(HInvoke* invoke,
codegen->MarkGCCard(target.object, Register(value), /*value_can_be_null=*/ true);
}
- if (!byte_swap) {
+ if (slow_path != nullptr) {
+ DCHECK(!byte_swap);
__ Bind(slow_path->GetExitLabel());
}
}
@@ -5014,9 +5050,11 @@ static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke,
VarHandleSlowPathARM64* slow_path = nullptr;
if (!byte_swap) {
slow_path = GenerateVarHandleChecks(invoke, codegen, order, value_type);
- slow_path->SetCompareAndSetOrExchangeArgs(return_success, strong);
GenerateVarHandleTarget(invoke, target, codegen);
- __ Bind(slow_path->GetNativeByteOrderLabel());
+ if (slow_path != nullptr) {
+ slow_path->SetCompareAndSetOrExchangeArgs(return_success, strong);
+ __ Bind(slow_path->GetNativeByteOrderLabel());
+ }
}
// This needs to be before the temp registers, as MarkGCCard also uses VIXL temps.
@@ -5173,7 +5211,8 @@ static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke,
__ Sxth(out.W(), old_value);
}
- if (!byte_swap) {
+ if (slow_path != nullptr) {
+ DCHECK(!byte_swap);
__ Bind(slow_path->GetExitLabel());
}
}
@@ -5314,9 +5353,11 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke,
VarHandleSlowPathARM64* slow_path = nullptr;
if (!byte_swap) {
slow_path = GenerateVarHandleChecks(invoke, codegen, order, value_type);
- slow_path->SetGetAndUpdateOp(get_and_update_op);
GenerateVarHandleTarget(invoke, target, codegen);
- __ Bind(slow_path->GetNativeByteOrderLabel());
+ if (slow_path != nullptr) {
+ slow_path->SetGetAndUpdateOp(get_and_update_op);
+ __ Bind(slow_path->GetNativeByteOrderLabel());
+ }
}
// This needs to be before the temp registers, as MarkGCCard also uses VIXL temps.
@@ -5423,7 +5464,8 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke,
}
}
- if (!byte_swap) {
+ if (slow_path != nullptr) {
+ DCHECK(!byte_swap);
__ Bind(slow_path->GetExitLabel());
}
}
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index f651518e9f..d850cadc2b 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -4139,28 +4139,30 @@ static void GenerateVarHandleInstanceFieldChecks(HInvoke* invoke,
__ B(eq, slow_path->GetEntryLabel());
}
- // Use the first temporary register, whether it's for the declaring class or the offset.
- // It is not used yet at this point.
- vixl32::Register temp = RegisterFrom(invoke->GetLocations()->GetTemp(0u));
+ if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ // Use the first temporary register, whether it's for the declaring class or the offset.
+ // It is not used yet at this point.
+ vixl32::Register temp = RegisterFrom(invoke->GetLocations()->GetTemp(0u));
- // Check that the VarHandle references an instance field by checking that
- // coordinateType1 == null. coordinateType0 should not be null, but this is handled by the
- // type compatibility check with the source object's type, which will fail for null.
- {
- UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
- vixl32::Register temp2 = temps.Acquire();
- DCHECK_EQ(coordinate_type0_offset.Int32Value() + 4, coordinate_type1_offset.Int32Value());
- __ Ldrd(temp, temp2, MemOperand(varhandle, coordinate_type0_offset.Int32Value()));
- assembler->MaybeUnpoisonHeapReference(temp);
- // No need for read barrier or unpoisoning of coordinateType1 for comparison with null.
- __ Cmp(temp2, 0);
- __ B(ne, slow_path->GetEntryLabel());
- }
+ // Check that the VarHandle references an instance field by checking that
+ // coordinateType1 == null. coordinateType0 should not be null, but this is handled by the
+ // type compatibility check with the source object's type, which will fail for null.
+ {
+ UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+ vixl32::Register temp2 = temps.Acquire();
+ DCHECK_EQ(coordinate_type0_offset.Int32Value() + 4, coordinate_type1_offset.Int32Value());
+ __ Ldrd(temp, temp2, MemOperand(varhandle, coordinate_type0_offset.Int32Value()));
+ assembler->MaybeUnpoisonHeapReference(temp);
+ // No need for read barrier or unpoisoning of coordinateType1 for comparison with null.
+ __ Cmp(temp2, 0);
+ __ B(ne, slow_path->GetEntryLabel());
+ }
- // Check that the object has the correct type.
- // We deliberately avoid the read barrier, letting the slow path handle the false negatives.
- GenerateSubTypeObjectCheckNoReadBarrier(
- codegen, slow_path, object, temp, /*object_can_be_null=*/ false);
+ // Check that the object has the correct type.
+ // We deliberately avoid the read barrier, letting the slow path handle the false negatives.
+ GenerateSubTypeObjectCheckNoReadBarrier(
+ codegen, slow_path, object, temp, /*object_can_be_null=*/ false);
+ }
}
static void GenerateVarHandleArrayChecks(HInvoke* invoke,
@@ -4268,11 +4270,22 @@ static VarHandleSlowPathARMVIXL* GenerateVarHandleChecks(HInvoke* invoke,
CodeGeneratorARMVIXL* codegen,
std::memory_order order,
DataType::Type type) {
+ size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
+ VarHandleOptimizations optimizations(invoke);
+ if (optimizations.GetUseKnownBootImageVarHandle()) {
+ DCHECK_NE(expected_coordinates_count, 2u);
+ if (expected_coordinates_count == 0u || optimizations.GetSkipObjectNullCheck()) {
+ return nullptr;
+ }
+ }
+
VarHandleSlowPathARMVIXL* slow_path =
new (codegen->GetScopedAllocator()) VarHandleSlowPathARMVIXL(invoke, order);
codegen->AddSlowPath(slow_path);
- GenerateVarHandleAccessModeAndVarTypeChecks(invoke, codegen, slow_path, type);
+ if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ GenerateVarHandleAccessModeAndVarTypeChecks(invoke, codegen, slow_path, type);
+ }
GenerateVarHandleCoordinateChecks(invoke, codegen, slow_path);
return slow_path;
@@ -4305,24 +4318,41 @@ static void GenerateVarHandleTarget(HInvoke* invoke,
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
if (expected_coordinates_count <= 1u) {
- // For static fields, we need to fill the `target.object` with the declaring class,
- // so we can use `target.object` as temporary for the `ArtMethod*`. For instance fields,
- // we do not need the declaring class, so we can forget the `ArtMethod*` when
- // we load the `target.offset`, so use the `target.offset` to hold the `ArtMethod*`.
- vixl32::Register method = (expected_coordinates_count == 0) ? target.object : target.offset;
-
- const MemberOffset art_field_offset = mirror::FieldVarHandle::ArtFieldOffset();
- const MemberOffset offset_offset = ArtField::OffsetOffset();
-
- // Load the ArtField, the offset and, if needed, declaring class.
- __ Ldr(method, MemOperand(varhandle, art_field_offset.Int32Value()));
- __ Ldr(target.offset, MemOperand(method, offset_offset.Int32Value()));
- if (expected_coordinates_count == 0u) {
- codegen->GenerateGcRootFieldLoad(invoke,
- LocationFrom(target.object),
- method,
- ArtField::DeclaringClassOffset().Int32Value(),
- kCompilerReadBarrierOption);
+ if (VarHandleOptimizations(invoke).GetUseKnownBootImageVarHandle()) {
+ ScopedObjectAccess soa(Thread::Current());
+ ArtField* target_field = GetBootImageVarHandleField(invoke);
+ if (expected_coordinates_count == 0u) {
+ ObjPtr<mirror::Class> declaring_class = target_field->GetDeclaringClass();
+ if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(declaring_class)) {
+ uint32_t boot_image_offset = CodeGenerator::GetBootImageOffset(declaring_class);
+ codegen->LoadBootImageRelRoEntry(target.object, boot_image_offset);
+ } else {
+ codegen->LoadTypeForBootImageIntrinsic(
+ target.object,
+ TypeReference(&declaring_class->GetDexFile(), declaring_class->GetDexTypeIndex()));
+ }
+ }
+ __ Mov(target.offset, target_field->GetOffset().Uint32Value());
+ } else {
+ // For static fields, we need to fill the `target.object` with the declaring class,
+ // so we can use `target.object` as temporary for the `ArtMethod*`. For instance fields,
+ // we do not need the declaring class, so we can forget the `ArtMethod*` when
+ // we load the `target.offset`, so use the `target.offset` to hold the `ArtMethod*`.
+ vixl32::Register method = (expected_coordinates_count == 0) ? target.object : target.offset;
+
+ const MemberOffset art_field_offset = mirror::FieldVarHandle::ArtFieldOffset();
+ const MemberOffset offset_offset = ArtField::OffsetOffset();
+
+ // Load the ArtField, the offset and, if needed, declaring class.
+ __ Ldr(method, MemOperand(varhandle, art_field_offset.Int32Value()));
+ __ Ldr(target.offset, MemOperand(method, offset_offset.Int32Value()));
+ if (expected_coordinates_count == 0u) {
+ codegen->GenerateGcRootFieldLoad(invoke,
+ LocationFrom(target.object),
+ method,
+ ArtField::DeclaringClassOffset().Int32Value(),
+ kCompilerReadBarrierOption);
+ }
}
} else {
DCHECK_EQ(expected_coordinates_count, 2u);
@@ -4436,9 +4466,11 @@ static void GenerateVarHandleGet(HInvoke* invoke,
VarHandleSlowPathARMVIXL* slow_path = nullptr;
if (!byte_swap) {
slow_path = GenerateVarHandleChecks(invoke, codegen, order, type);
- slow_path->SetAtomic(atomic);
GenerateVarHandleTarget(invoke, target, codegen);
- __ Bind(slow_path->GetNativeByteOrderLabel());
+ if (slow_path != nullptr) {
+ slow_path->SetAtomic(atomic);
+ __ Bind(slow_path->GetNativeByteOrderLabel());
+ }
}
Location maybe_temp = Location::NoLocation();
@@ -4502,7 +4534,8 @@ static void GenerateVarHandleGet(HInvoke* invoke,
}
}
- if (!byte_swap) {
+ if (slow_path != nullptr) {
+ DCHECK(!byte_swap);
__ Bind(slow_path->GetExitLabel());
}
}
@@ -4591,9 +4624,11 @@ static void GenerateVarHandleSet(HInvoke* invoke,
VarHandleSlowPathARMVIXL* slow_path = nullptr;
if (!byte_swap) {
slow_path = GenerateVarHandleChecks(invoke, codegen, order, value_type);
- slow_path->SetAtomic(atomic);
GenerateVarHandleTarget(invoke, target, codegen);
- __ Bind(slow_path->GetNativeByteOrderLabel());
+ if (slow_path != nullptr) {
+ slow_path->SetAtomic(atomic);
+ __ Bind(slow_path->GetNativeByteOrderLabel());
+ }
}
Location maybe_temp = Location::NoLocation();
@@ -4667,7 +4702,8 @@ static void GenerateVarHandleSet(HInvoke* invoke,
codegen->MarkGCCard(temp, card, target.object, value_reg, /*value_can_be_null=*/ true);
}
- if (!byte_swap) {
+ if (slow_path != nullptr) {
+ DCHECK(!byte_swap);
__ Bind(slow_path->GetExitLabel());
}
}
@@ -4792,9 +4828,11 @@ static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke,
VarHandleSlowPathARMVIXL* slow_path = nullptr;
if (!byte_swap) {
slow_path = GenerateVarHandleChecks(invoke, codegen, order, value_type);
- slow_path->SetCompareAndSetOrExchangeArgs(return_success, strong);
GenerateVarHandleTarget(invoke, target, codegen);
- __ Bind(slow_path->GetNativeByteOrderLabel());
+ if (slow_path != nullptr) {
+ slow_path->SetCompareAndSetOrExchangeArgs(return_success, strong);
+ __ Bind(slow_path->GetNativeByteOrderLabel());
+ }
}
bool seq_cst_barrier = (order == std::memory_order_seq_cst);
@@ -4963,7 +5001,8 @@ static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke,
codegen->MarkGCCard(temp, card, target.object, RegisterFrom(new_value), new_value_can_be_null);
}
- if (!byte_swap) {
+ if (slow_path != nullptr) {
+ DCHECK(!byte_swap);
__ Bind(slow_path->GetExitLabel());
}
}
@@ -5116,9 +5155,11 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke,
VarHandleSlowPathARMVIXL* slow_path = nullptr;
if (!byte_swap) {
slow_path = GenerateVarHandleChecks(invoke, codegen, order, value_type);
- slow_path->SetGetAndUpdateOp(get_and_update_op);
GenerateVarHandleTarget(invoke, target, codegen);
- __ Bind(slow_path->GetNativeByteOrderLabel());
+ if (slow_path != nullptr) {
+ slow_path->SetGetAndUpdateOp(get_and_update_op);
+ __ Bind(slow_path->GetNativeByteOrderLabel());
+ }
}
bool seq_cst_barrier = (order == std::memory_order_seq_cst);
@@ -5279,7 +5320,8 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke,
codegen->MarkGCCard(temp, card, target.object, RegisterFrom(arg), new_value_can_be_null);
}
- if (!byte_swap) {
+ if (slow_path != nullptr) {
+ DCHECK(!byte_swap);
__ Bind(slow_path->GetExitLabel());
}
}
diff --git a/compiler/optimizing/intrinsics_utils.h b/compiler/optimizing/intrinsics_utils.h
index f24454786e..19f5e332a8 100644
--- a/compiler/optimizing/intrinsics_utils.h
+++ b/compiler/optimizing/intrinsics_utils.h
@@ -19,6 +19,7 @@
#include "base/casts.h"
#include "base/macros.h"
+#include "class_root-inl.h"
#include "code_generator.h"
#include "data_type-inl.h"
#include "dex/dex_file-inl.h"
@@ -196,6 +197,28 @@ static inline DataType::Type GetVarHandleExpectedValueType(HInvoke* invoke,
}
}
+static inline ArtField* GetBootImageVarHandleField(HInvoke* invoke)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK_LE(GetExpectedVarHandleCoordinatesCount(invoke), 1u);
+ DCHECK(VarHandleOptimizations(invoke).GetUseKnownBootImageVarHandle());
+ HInstruction* var_handle_instruction = invoke->InputAt(0);
+ if (var_handle_instruction->IsNullCheck()) {
+ var_handle_instruction = var_handle_instruction->InputAt(0);
+ }
+ DCHECK(var_handle_instruction->IsStaticFieldGet());
+ ArtField* field = var_handle_instruction->AsStaticFieldGet()->GetFieldInfo().GetField();
+ DCHECK(field->IsStatic());
+ DCHECK(field->IsFinal());
+ DCHECK(var_handle_instruction->InputAt(0)->AsLoadClass()->IsInBootImage());
+ ObjPtr<mirror::Object> var_handle = field->GetObject(field->GetDeclaringClass());
+ DCHECK(var_handle->GetClass() ==
+ (GetExpectedVarHandleCoordinatesCount(invoke) == 0u
+ ? GetClassRoot<mirror::StaticFieldVarHandle>()
+ : GetClassRoot<mirror::FieldVarHandle>()));
+ static_assert(std::is_base_of_v<mirror::FieldVarHandle, mirror::StaticFieldVarHandle>);
+ return ObjPtr<mirror::FieldVarHandle>::DownCast(var_handle)->GetArtField();
+}
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_INTRINSICS_UTILS_H_
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index ae535284eb..3c31374f67 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -3569,20 +3569,22 @@ static void GenerateVarHandleInstanceFieldChecks(HInvoke* invoke,
__ j(kZero, slow_path->GetEntryLabel());
}
- // Check that the VarHandle references an instance field by checking that
- // coordinateType1 == null. coordinateType0 should be not null, but this is handled by the
- // type compatibility check with the source object's type, which will fail for null.
- __ cmpl(Address(varhandle, coordinate_type1_offset), Immediate(0));
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ // Check that the VarHandle references an instance field by checking that
+ // coordinateType1 == null. coordinateType0 should be not null, but this is handled by the
+ // type compatibility check with the source object's type, which will fail for null.
+ __ cmpl(Address(varhandle, coordinate_type1_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
- // Check that the object has the correct type.
- // We deliberately avoid the read barrier, letting the slow path handle the false negatives.
- GenerateSubTypeObjectCheckNoReadBarrier(codegen,
- slow_path,
- object,
- temp,
- Address(varhandle, coordinate_type0_offset),
- /*object_can_be_null=*/ false);
+ // Check that the object has the correct type.
+ // We deliberately avoid the read barrier, letting the slow path handle the false negatives.
+ GenerateSubTypeObjectCheckNoReadBarrier(codegen,
+ slow_path,
+ object,
+ temp,
+ Address(varhandle, coordinate_type0_offset),
+ /*object_can_be_null=*/ false);
+ }
}
static void GenerateVarHandleArrayChecks(HInvoke* invoke,
@@ -3685,11 +3687,22 @@ static void GenerateVarHandleCoordinateChecks(HInvoke* invoke,
static VarHandleSlowPathX86_64* GenerateVarHandleChecks(HInvoke* invoke,
CodeGeneratorX86_64* codegen,
DataType::Type type) {
+ size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
+ VarHandleOptimizations optimizations(invoke);
+ if (optimizations.GetUseKnownBootImageVarHandle()) {
+ DCHECK_NE(expected_coordinates_count, 2u);
+ if (expected_coordinates_count == 0u || optimizations.GetSkipObjectNullCheck()) {
+ return nullptr;
+ }
+ }
+
VarHandleSlowPathX86_64* slow_path =
new (codegen->GetScopedAllocator()) VarHandleSlowPathX86_64(invoke);
codegen->AddSlowPath(slow_path);
- GenerateVarHandleAccessModeAndVarTypeChecks(invoke, codegen, slow_path, type);
+ if (!optimizations.GetUseKnownBootImageVarHandle()) {
+ GenerateVarHandleAccessModeAndVarTypeChecks(invoke, codegen, slow_path, type);
+ }
GenerateVarHandleCoordinateChecks(invoke, codegen, slow_path);
return slow_path;
@@ -3724,25 +3737,42 @@ static void GenerateVarHandleTarget(HInvoke* invoke,
CpuRegister varhandle = locations->InAt(0).AsRegister<CpuRegister>();
if (expected_coordinates_count <= 1u) {
- // For static fields, we need to fill the `target.object` with the declaring class,
- // so we can use `target.object` as temporary for the `ArtMethod*`. For instance fields,
- // we do not need the declaring class, so we can forget the `ArtMethod*` when
- // we load the `target.offset`, so use the `target.offset` to hold the `ArtMethod*`.
- CpuRegister method((expected_coordinates_count == 0) ? target.object : target.offset);
-
- const MemberOffset art_field_offset = mirror::FieldVarHandle::ArtFieldOffset();
- const MemberOffset offset_offset = ArtField::OffsetOffset();
-
- // Load the ArtField, the offset and, if needed, declaring class.
- __ movq(method, Address(varhandle, art_field_offset));
- __ movl(CpuRegister(target.offset), Address(method, offset_offset));
- if (expected_coordinates_count == 0u) {
- InstructionCodeGeneratorX86_64* instr_codegen = codegen->GetInstructionCodegen();
- instr_codegen->GenerateGcRootFieldLoad(invoke,
- Location::RegisterLocation(target.object),
- Address(method, ArtField::DeclaringClassOffset()),
- /*fixup_label=*/ nullptr,
- kCompilerReadBarrierOption);
+ if (VarHandleOptimizations(invoke).GetUseKnownBootImageVarHandle()) {
+ ScopedObjectAccess soa(Thread::Current());
+ ArtField* target_field = GetBootImageVarHandleField(invoke);
+ if (expected_coordinates_count == 0u) {
+ ObjPtr<mirror::Class> declaring_class = target_field->GetDeclaringClass();
+ __ movl(CpuRegister(target.object),
+ Address::Absolute(CodeGeneratorX86_64::kPlaceholder32BitOffset, /*no_rip=*/ false));
+ if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(declaring_class)) {
+ codegen->RecordBootImageRelRoPatch(CodeGenerator::GetBootImageOffset(declaring_class));
+ } else {
+ codegen->RecordBootImageTypePatch(declaring_class->GetDexFile(),
+ declaring_class->GetDexTypeIndex());
+ }
+ }
+ __ movl(CpuRegister(target.offset), Immediate(target_field->GetOffset().Uint32Value()));
+ } else {
+ // For static fields, we need to fill the `target.object` with the declaring class,
+ // so we can use `target.object` as temporary for the `ArtMethod*`. For instance fields,
+ // we do not need the declaring class, so we can forget the `ArtMethod*` when
+ // we load the `target.offset`, so use the `target.offset` to hold the `ArtMethod*`.
+ CpuRegister method((expected_coordinates_count == 0) ? target.object : target.offset);
+
+ const MemberOffset art_field_offset = mirror::FieldVarHandle::ArtFieldOffset();
+ const MemberOffset offset_offset = ArtField::OffsetOffset();
+
+ // Load the ArtField, the offset and, if needed, declaring class.
+ __ movq(method, Address(varhandle, art_field_offset));
+ __ movl(CpuRegister(target.offset), Address(method, offset_offset));
+ if (expected_coordinates_count == 0u) {
+ InstructionCodeGeneratorX86_64* instr_codegen = codegen->GetInstructionCodegen();
+ instr_codegen->GenerateGcRootFieldLoad(invoke,
+ Location::RegisterLocation(target.object),
+ Address(method, ArtField::DeclaringClassOffset()),
+ /*fixup_label=*/ nullptr,
+ kCompilerReadBarrierOption);
+ }
}
} else {
DCHECK_EQ(expected_coordinates_count, 2u);
@@ -3836,7 +3866,9 @@ static void GenerateVarHandleGet(HInvoke* invoke,
if (!byte_swap) {
slow_path = GenerateVarHandleChecks(invoke, codegen, type);
GenerateVarHandleTarget(invoke, target, codegen);
- __ Bind(slow_path->GetNativeByteOrderLabel());
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetNativeByteOrderLabel());
+ }
}
// Load the value from the field
@@ -3861,7 +3893,8 @@ static void GenerateVarHandleGet(HInvoke* invoke,
}
}
- if (!byte_swap) {
+ if (slow_path != nullptr) {
+ DCHECK(!byte_swap);
__ Bind(slow_path->GetExitLabel());
}
}
@@ -3929,10 +3962,12 @@ static void GenerateVarHandleSet(HInvoke* invoke,
VarHandleSlowPathX86_64* slow_path = nullptr;
if (!byte_swap) {
slow_path = GenerateVarHandleChecks(invoke, codegen, value_type);
- slow_path->SetVolatile(is_volatile);
- slow_path->SetAtomic(is_atomic);
GenerateVarHandleTarget(invoke, target, codegen);
- __ Bind(slow_path->GetNativeByteOrderLabel());
+ if (slow_path != nullptr) {
+ slow_path->SetVolatile(is_volatile);
+ slow_path->SetAtomic(is_atomic);
+ __ Bind(slow_path->GetNativeByteOrderLabel());
+ }
}
switch (invoke->GetIntrinsic()) {
@@ -3963,7 +3998,8 @@ static void GenerateVarHandleSet(HInvoke* invoke,
// setVolatile needs kAnyAny barrier, but HandleFieldSet takes care of that.
- if (!byte_swap) {
+ if (slow_path != nullptr) {
+ DCHECK(!byte_swap);
__ Bind(slow_path->GetExitLabel());
}
}
@@ -4064,7 +4100,9 @@ static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke,
if (!byte_swap) {
slow_path = GenerateVarHandleChecks(invoke, codegen, type);
GenerateVarHandleTarget(invoke, target, codegen);
- __ Bind(slow_path->GetNativeByteOrderLabel());
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetNativeByteOrderLabel());
+ }
}
uint32_t temp_count = locations->GetTempCount();
@@ -4085,7 +4123,8 @@ static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke,
// We are using LOCK CMPXCHG in all cases because there is no CAS equivalent that has weak
// failure semantics. LOCK CMPXCHG has full barrier semantics, so we don't need barriers.
- if (!byte_swap) {
+ if (slow_path != nullptr) {
+ DCHECK(!byte_swap);
__ Bind(slow_path->GetExitLabel());
}
}
@@ -4621,11 +4660,13 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke,
VarHandleTarget target = GetVarHandleTarget(invoke);
if (!byte_swap) {
slow_path = GenerateVarHandleChecks(invoke, codegen, type);
- slow_path->SetGetAndUpdateOp(get_and_update_op);
- slow_path->SetNeedAnyStoreBarrier(need_any_store_barrier);
- slow_path->SetNeedAnyAnyBarrier(need_any_any_barrier);
GenerateVarHandleTarget(invoke, target, codegen);
- __ Bind(slow_path->GetNativeByteOrderLabel());
+ if (slow_path != nullptr) {
+ slow_path->SetGetAndUpdateOp(get_and_update_op);
+ slow_path->SetNeedAnyStoreBarrier(need_any_store_barrier);
+ slow_path->SetNeedAnyAnyBarrier(need_any_any_barrier);
+ __ Bind(slow_path->GetNativeByteOrderLabel());
+ }
}
CpuRegister ref(target.object);
@@ -4654,7 +4695,8 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke,
codegen->GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
}
- if (!byte_swap) {
+ if (slow_path != nullptr) {
+ DCHECK(!byte_swap);
__ Bind(slow_path->GetExitLabel());
}
}
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 8794d5a566..17cf3d3477 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -171,6 +171,11 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
dex::TypeIndex type_index = load_class->GetTypeIndex();
const CompilerOptions& compiler_options = codegen->GetCompilerOptions();
+ auto is_class_in_current_boot_image = [&]() {
+ return (compiler_options.IsBootImage() || compiler_options.IsBootImageExtension()) &&
+ compiler_options.IsImageClass(dex_file.StringByTypeIdx(type_index));
+ };
+
bool is_in_boot_image = false;
HLoadClass::LoadKind desired_load_kind = HLoadClass::LoadKind::kInvalid;
@@ -181,12 +186,17 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
// locations of target classes. The additional register pressure
// for using the ArtMethod* should be considered.
desired_load_kind = HLoadClass::LoadKind::kReferrersClass;
+ // Determine whether the referrer's class is in the boot image.
+ is_in_boot_image = is_class_in_current_boot_image();
} else if (load_class->NeedsAccessCheck()) {
DCHECK_EQ(load_class->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
if (klass != nullptr) {
// Resolved class that needs access check must be really inaccessible
// and the access check is bound to fail. Just emit the runtime call.
desired_load_kind = HLoadClass::LoadKind::kRuntimeCall;
+ // Determine whether the class is in the boot image.
+ is_in_boot_image = Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass.Get()) ||
+ is_class_in_current_boot_image();
} else if (compiler_options.IsJitCompiler()) {
// Unresolved class while JITting means that either we never hit this
// instruction or it failed. Either way, just emit the runtime call.
@@ -222,6 +232,9 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
if (!compiler_options.GetCompilePic()) {
// Test configuration, do not sharpen.
desired_load_kind = HLoadClass::LoadKind::kRuntimeCall;
+ // Determine whether the class is in the boot image.
+ is_in_boot_image = Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass.Get()) ||
+ is_class_in_current_boot_image();
} else if (klass != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get())) {
DCHECK(compiler_options.IsBootImageExtension());
is_in_boot_image = true;
diff --git a/runtime/mirror/var_handle.h b/runtime/mirror/var_handle.h
index e5b807d6d5..18e0c3a482 100644
--- a/runtime/mirror/var_handle.h
+++ b/runtime/mirror/var_handle.h
@@ -150,6 +150,9 @@ class MANAGED VarHandle : public Object {
// Gets the variable type that is operated on by this VarHandle instance.
ObjPtr<Class> GetVarType() REQUIRES_SHARED(Locks::mutator_lock_);
+ // Gets the type of the object that this VarHandle operates on, null for StaticFieldVarHandle.
+ ObjPtr<Class> GetCoordinateType0() REQUIRES_SHARED(Locks::mutator_lock_);
+
// Gets the return type descriptor for a named accessor method,
// nullptr if accessor_method is not supported.
static const char* GetReturnTypeDescriptor(const char* accessor_method);
@@ -187,7 +190,6 @@ class MANAGED VarHandle : public Object {
}
private:
- ObjPtr<Class> GetCoordinateType0() REQUIRES_SHARED(Locks::mutator_lock_);
ObjPtr<Class> GetCoordinateType1() REQUIRES_SHARED(Locks::mutator_lock_);
int32_t GetAccessModesBitMask() REQUIRES_SHARED(Locks::mutator_lock_);