diff options
author | Nicolas Geoffray <ngeoffray@google.com> | 2024-04-17 15:19:55 +0100 |
---|---|---|
committer | Nicolas Geoffray <ngeoffray@google.com> | 2024-04-18 10:25:36 +0000 |
commit | 16567ac3d6925c0b84c819f2e475c2cadbde5dc3 (patch) | |
tree | 75e41b7b4da978a857400410a34a06d2271322b2 | |
parent | 36aba6488cd335f651161be607ac0af240faddc8 (diff) | |
download | art-16567ac3d6925c0b84c819f2e475c2cadbde5dc3.tar.gz |
Reland "Revamp JIT GC."
This reverts commit de8e6689cb3f829b73e921d843cfa38bbfe996bf.
Reason for revert:
- Adjust test for new behavior
- Enter MarkCompiledCodeOnThreadStacks with the mutator lock, as
expected by the checkpoint.
Change-Id: Ic9fca3e5fd04da62081701f4deccbc68bee56c2f
-rw-r--r-- | openjdkjvmti/ti_redefine.cc | 3 | ||||
-rw-r--r-- | runtime/art_method.cc | 26 | ||||
-rw-r--r-- | runtime/art_method.h | 8 | ||||
-rw-r--r-- | runtime/jit/jit_code_cache.cc | 329 | ||||
-rw-r--r-- | runtime/jit/jit_code_cache.h | 47 | ||||
-rw-r--r-- | runtime/runtime_image.cc | 4 | ||||
-rw-r--r-- | test/667-jit-jni-stub/jit_jni_stub_test.cc | 8 | ||||
-rw-r--r-- | test/667-jit-jni-stub/src/Main.java | 7 |
8 files changed, 236 insertions, 196 deletions
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc index 60e8193f5d..d0ce6a1f49 100644 --- a/openjdkjvmti/ti_redefine.cc +++ b/openjdkjvmti/ti_redefine.cc @@ -2550,6 +2550,9 @@ void Redefiner::ClassRedefinition::UpdateMethods(art::ObjPtr<art::mirror::Class> const art::DexFile& old_dex_file = mclass->GetDexFile(); // Update methods. for (art::ArtMethod& method : mclass->GetDeclaredMethods(image_pointer_size)) { + // Reinitialize the method by calling `CopyFrom`. This ensures for example + // the entrypoint and the hotness are reset. + method.CopyFrom(&method, image_pointer_size); const art::dex::StringId* new_name_id = dex_file_->FindStringId(method.GetName()); art::dex::TypeIndex method_return_idx = dex_file_->GetIndexForTypeId(*dex_file_->FindTypeId(method.GetReturnTypeDescriptor())); diff --git a/runtime/art_method.cc b/runtime/art_method.cc index c8b16990b2..0b6b883bab 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -795,16 +795,18 @@ void ArtMethod::CopyFrom(ArtMethod* src, PointerSize image_pointer_size) { const void* entry_point = GetEntryPointFromQuickCompiledCodePtrSize(image_pointer_size); if (runtime->UseJitCompilation()) { if (runtime->GetJit()->GetCodeCache()->ContainsPc(entry_point)) { - SetEntryPointFromQuickCompiledCodePtrSize( - src->IsNative() ? GetQuickGenericJniStub() : GetQuickToInterpreterBridge(), - image_pointer_size); + SetNativePointer(EntryPointFromQuickCompiledCodeOffset(image_pointer_size), + src->IsNative() ? GetQuickGenericJniStub() : GetQuickToInterpreterBridge(), + image_pointer_size); } } ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); if (interpreter::IsNterpSupported() && class_linker->IsNterpEntryPoint(entry_point)) { // If the entrypoint is nterp, it's too early to check if the new method // will support it. So for simplicity, use the interpreter bridge. - SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(), image_pointer_size); + SetNativePointer(EntryPointFromQuickCompiledCodeOffset(image_pointer_size), + GetQuickToInterpreterBridge(), + image_pointer_size); } // Clear the data pointer, it will be set if needed by the caller. @@ -922,4 +924,20 @@ ALWAYS_INLINE static inline void DoGetAccessFlagsHelper(ArtMethod* method) method->GetDeclaringClass<kReadBarrierOption>()->IsErroneous()); } +void ArtMethod::SetEntryPointFromQuickCompiledCodePtrSize( + const void* entry_point_from_quick_compiled_code, PointerSize pointer_size) { + const void* current_entry_point = GetEntryPointFromQuickCompiledCodePtrSize(pointer_size); + if (current_entry_point == entry_point_from_quick_compiled_code) { + return; + } + jit::Jit* jit = Runtime::Current()->GetJit(); + SetNativePointer(EntryPointFromQuickCompiledCodeOffset(pointer_size), + entry_point_from_quick_compiled_code, + pointer_size); + if (jit != nullptr && + jit->GetCodeCache()->ContainsPc(current_entry_point)) { + jit->GetCodeCache()->AddZombieCode(this, current_entry_point); + } +} + } // namespace art diff --git a/runtime/art_method.h b/runtime/art_method.h index 9632694273..9e247f7ae7 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -766,11 +766,7 @@ class EXPORT ArtMethod final { } ALWAYS_INLINE void SetEntryPointFromQuickCompiledCodePtrSize( const void* entry_point_from_quick_compiled_code, PointerSize pointer_size) - REQUIRES_SHARED(Locks::mutator_lock_) { - SetNativePointer(EntryPointFromQuickCompiledCodeOffset(pointer_size), - entry_point_from_quick_compiled_code, - pointer_size); - } + REQUIRES_SHARED(Locks::mutator_lock_); static constexpr MemberOffset DataOffset(PointerSize pointer_size) { return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER( @@ -1192,6 +1188,8 @@ class EXPORT ArtMethod final { // Used by GetName and GetNameView to share common code. const char* GetRuntimeMethodName() REQUIRES_SHARED(Locks::mutator_lock_); + friend class RuntimeImageHelper; // For SetNativePointer. + DISALLOW_COPY_AND_ASSIGN(ArtMethod); // Need to use CopyFrom to deal with 32 vs 64 bits. }; diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 7ea9efb0f9..01637008c1 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -342,6 +342,8 @@ const void* JitCodeCache::GetSavedEntryPointOfPreCompiledMethod(ArtMethod* metho auto it = saved_compiled_methods_map_.find(method); if (it != saved_compiled_methods_map_.end()) { code_ptr = it->second; + // Now that we're using the saved entrypoint, remove it from the saved map. + saved_compiled_methods_map_.erase(it); } } if (code_ptr != nullptr) { @@ -489,33 +491,40 @@ void JitCodeCache::FreeAllMethodHeaders( ->RemoveDependentsWithMethodHeaders(method_headers); } - ScopedCodeCacheWrite scc(private_region_); - for (const OatQuickMethodHeader* method_header : method_headers) { - FreeCodeAndData(method_header->GetCode()); - } + { + MutexLock mu(Thread::Current(), *Locks::jit_lock_); + ScopedCodeCacheWrite scc(private_region_); + for (const OatQuickMethodHeader* method_header : method_headers) { + FreeCodeAndData(method_header->GetCode()); + } - // We have potentially removed a lot of debug info. Do maintenance pass to save space. - RepackNativeDebugInfoForJit(); + // We have potentially removed a lot of debug info. Do maintenance pass to save space. + RepackNativeDebugInfoForJit(); + } // Check that the set of compiled methods exactly matches native debug information. // Does not check zygote methods since they can change concurrently. if (kIsDebugBuild && !Runtime::Current()->IsZygote()) { std::map<const void*, ArtMethod*> compiled_methods; - VisitAllMethods([&](const void* addr, ArtMethod* method) { - if (!IsInZygoteExecSpace(addr)) { - CHECK(addr != nullptr && method != nullptr); - compiled_methods.emplace(addr, method); - } - }); std::set<const void*> debug_info; - ForEachNativeDebugSymbol([&](const void* addr, size_t, const char* name) { - addr = AlignDown(addr, GetInstructionSetInstructionAlignment(kRuntimeISA)); // Thumb-bit. - CHECK(debug_info.emplace(addr).second) << "Duplicate debug info: " << addr << " " << name; - CHECK_EQ(compiled_methods.count(addr), 1u) << "Extra debug info: " << addr << " " << name; - }); + { + MutexLock mu(Thread::Current(), *Locks::jit_lock_); + VisitAllMethods([&](const void* addr, ArtMethod* method) { + if (!IsInZygoteExecSpace(addr)) { + CHECK(addr != nullptr && method != nullptr); + compiled_methods.emplace(addr, method); + } + }); + ForEachNativeDebugSymbol([&](const void* addr, size_t, const char* name) { + addr = AlignDown(addr, GetInstructionSetInstructionAlignment(kRuntimeISA)); // Thumb-bit. + bool res = debug_info.emplace(addr).second; + CHECK(res) << "Duplicate debug info: " << addr << " " << name; + CHECK_EQ(compiled_methods.count(addr), 1u) << "Extra debug info: " << addr << " " << name; + }); + } if (!debug_info.empty()) { // If debug-info generation is enabled. for (auto it : compiled_methods) { - CHECK_EQ(debug_info.count(it.first), 1u) << "No debug info: " << it.second->PrettyMethod(); + CHECK_EQ(debug_info.count(it.first), 1u) << "Missing debug info"; } CHECK_EQ(compiled_methods.size(), debug_info.size()); } @@ -546,17 +555,26 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { ++it; } } + for (auto it = zombie_jni_code_.begin(); it != zombie_jni_code_.end();) { + if (alloc.ContainsUnsafe(*it)) { + it = zombie_jni_code_.erase(it); + } else { + ++it; + } + } for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { if (alloc.ContainsUnsafe(it->second)) { method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first)); VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first; it = method_code_map_.erase(it); + zombie_code_.erase(it->first); } else { ++it; } } } for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) { + DCHECK(!ContainsElement(zombie_code_, it->second)); if (alloc.ContainsUnsafe(it->first)) { // Note that the code has already been pushed to method_headers in the loop // above and is going to be removed in FreeCode() below. @@ -574,8 +592,8 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { ++it; } } - FreeAllMethodHeaders(method_headers); } + FreeAllMethodHeaders(method_headers); } bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const { @@ -641,18 +659,6 @@ static void ClearMethodCounter(ArtMethod* method, bool was_warm) method->UpdateCounter(/* new_samples= */ 1); } -void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) { - while (collection_in_progress_) { - Locks::jit_lock_->Unlock(self); - { - ScopedThreadSuspension sts(self, ThreadState::kSuspended); - MutexLock mu(self, *Locks::jit_lock_); - WaitForPotentialCollectionToComplete(self); - } - Locks::jit_lock_->Lock(self); - } -} - bool JitCodeCache::Commit(Thread* self, JitMemoryRegion* region, ArtMethod* method, @@ -680,9 +686,6 @@ bool JitCodeCache::Commit(Thread* self, OatQuickMethodHeader* method_header = nullptr; { MutexLock mu(self, *Locks::jit_lock_); - // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to - // finish. - WaitForPotentialCollectionToCompleteRunnable(self); const uint8_t* code_ptr = region->CommitCode(reserved_code, code, stack_map_data); if (code_ptr == nullptr) { return false; @@ -782,11 +785,6 @@ bool JitCodeCache::Commit(Thread* self, method, method_header->GetEntryPoint()); } } - if (collection_in_progress_) { - // We need to update the live bitmap if there is a GC to ensure it sees this new - // code. - GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); - } VLOG(jit) << "JIT added (kind=" << compilation_kind << ") " << ArtMethod::PrettyMethod(method) << "@" << method @@ -855,6 +853,7 @@ bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) { FreeCodeAndData(it->second.GetCode()); } jni_stubs_map_.erase(it); + zombie_jni_code_.erase(method); } else { it->first.UpdateShorty(it->second.GetMethods().front()); } @@ -985,7 +984,6 @@ bool JitCodeCache::Reserve(Thread* self, { ScopedThreadSuspension sts(self, ThreadState::kSuspended); MutexLock mu(self, *Locks::jit_lock_); - WaitForPotentialCollectionToComplete(self); ScopedCodeCacheWrite ccw(*region); code = region->AllocateCode(code_size); data = region->AllocateData(data_size); @@ -1002,8 +1000,8 @@ bool JitCodeCache::Reserve(Thread* self, << PrettySize(data_size); return false; } - // Run a code cache collection and try again. - GarbageCollectCache(self); + // Increase the capacity and try again. + IncreaseCodeCacheCapacity(self); } *reserved_code = ArrayRef<const uint8_t>(code, code_size); @@ -1081,11 +1079,6 @@ class MarkCodeClosure final : public Closure { Barrier* const barrier_; }; -void JitCodeCache::NotifyCollectionDone(Thread* self) { - collection_in_progress_ = false; - lock_cond_.Broadcast(self); -} - void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) { Barrier barrier(0); size_t threads_running_checkpoint = 0; @@ -1103,86 +1096,117 @@ bool JitCodeCache::IsAtMaxCapacity() const { return private_region_.GetCurrentCapacity() == private_region_.GetMaxCapacity(); } -void JitCodeCache::GarbageCollectCache(Thread* self) { - ScopedTrace trace(__FUNCTION__); - // Wait for an existing collection, or let everyone know we are starting one. - { - ScopedThreadSuspension sts(self, ThreadState::kSuspended); - MutexLock mu(self, *Locks::jit_lock_); - if (!garbage_collect_code_) { - private_region_.IncreaseCodeCacheCapacity(); - return; - } else if (WaitForPotentialCollectionToComplete(self)) { - return; - } else { - number_of_collections_++; - live_bitmap_.reset(CodeCacheBitmap::Create( - "code-cache-bitmap", - reinterpret_cast<uintptr_t>(private_region_.GetExecPages()->Begin()), - reinterpret_cast<uintptr_t>( - private_region_.GetExecPages()->Begin() + private_region_.GetCurrentCapacity() / 2))); - collection_in_progress_ = true; - } - } - - TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit)); - { - TimingLogger::ScopedTiming st("Code cache collection", &logger); - - VLOG(jit) << "Do code cache collection, code=" - << PrettySize(CodeCacheSize()) - << ", data=" << PrettySize(DataCacheSize()); - - DoCollection(self); - - VLOG(jit) << "After code cache collection, code=" - << PrettySize(CodeCacheSize()) - << ", data=" << PrettySize(DataCacheSize()); - - { - MutexLock mu(self, *Locks::jit_lock_); - private_region_.IncreaseCodeCacheCapacity(); - live_bitmap_.reset(nullptr); - NotifyCollectionDone(self); - } - } - Runtime::Current()->GetJit()->AddTimingLogger(logger); +void JitCodeCache::IncreaseCodeCacheCapacity(Thread* self) { + MutexLock mu(self, *Locks::jit_lock_); + private_region_.IncreaseCodeCacheCapacity(); } void JitCodeCache::RemoveUnmarkedCode(Thread* self) { ScopedTrace trace(__FUNCTION__); - ScopedDebugDisallowReadBarriers sddrb(self); std::unordered_set<OatQuickMethodHeader*> method_headers; + ScopedDebugDisallowReadBarriers sddrb(self); { MutexLock mu(self, *Locks::jit_lock_); - // Iterate over all compiled code and remove entries that are not marked. - for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) { - JniStubData* data = &it->second; - if (IsInZygoteExecSpace(data->GetCode()) || - !data->IsCompiled() || - GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) { - ++it; - } else { - method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode())); - for (ArtMethod* method : data->GetMethods()) { - VLOG(jit) << "JIT removed (JNI) " << method->PrettyMethod() << ": " << data->GetCode(); - } - it = jni_stubs_map_.erase(it); - } - } - for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { - const void* code_ptr = it->first; + // Iterate over all zombie code and remove entries that are not marked. + for (auto it = zombie_code_.begin(); it != zombie_code_.end();) { + const void* code_ptr = *it; uintptr_t allocation = FromCodeToAllocation(code_ptr); - if (IsInZygoteExecSpace(code_ptr) || GetLiveBitmap()->Test(allocation)) { + DCHECK(!IsInZygoteExecSpace(code_ptr)); + if (GetLiveBitmap()->Test(allocation)) { ++it; } else { OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr); method_headers.insert(header); - VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first; - it = method_code_map_.erase(it); + VLOG(jit) << "JIT removed " << *it; + it = zombie_code_.erase(it); + method_code_map_.erase(header->GetCode()); + } + } + for (auto it = zombie_jni_code_.begin(); it != zombie_jni_code_.end();) { + ArtMethod* method = *it; + auto stub = jni_stubs_map_.find(JniStubKey(method)); + CHECK(stub != jni_stubs_map_.end()); + JniStubData& data = stub->second; + CHECK(data.IsCompiled()); + CHECK(ContainsElement(data.GetMethods(), method)); + uintptr_t allocation = FromCodeToAllocation(data.GetCode()); + if (method->GetEntryPointFromQuickCompiledCode() == + OatQuickMethodHeader::FromCodePointer(data.GetCode())->GetEntryPoint()) { + // The stub got reused for this method, remove ourselves from the zombie + // list. + it = zombie_jni_code_.erase(it); + } else if (!GetLiveBitmap()->Test(allocation)) { + if (data.GetMethods().empty()) { + OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(data.GetCode()); + method_headers.insert(header); + CHECK(ContainsPc(header)); + VLOG(jit) << "JIT removed native code of" << method->PrettyMethod(); + jni_stubs_map_.erase(stub); + } else if (ContainsElement(data.GetMethods(), method)) { + if (data.GetMethods().size() == 1) { + OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(data.GetCode()); + method_headers.insert(header); + CHECK(ContainsPc(header)); + VLOG(jit) << "JIT removed native code of" << method->PrettyMethod(); + jni_stubs_map_.erase(stub); + } else { + stub->second.RemoveMethod(method); + stub->first.UpdateShorty(stub->second.GetMethods().front()); + } + } + it = zombie_jni_code_.erase(it); + } else { + ++it; } } - FreeAllMethodHeaders(method_headers); + } + FreeAllMethodHeaders(method_headers); +} + +void JitCodeCache::AddZombieCode(ArtMethod* method, const void* entry_point) { + CHECK(method->IsNative() || (method->GetEntryPointFromQuickCompiledCode() != entry_point)); + const void* code_ptr = OatQuickMethodHeader::FromEntryPoint(entry_point)->GetCode(); + if (!IsInZygoteExecSpace(code_ptr)) { + Thread* self = Thread::Current(); + if (Locks::jit_lock_->IsExclusiveHeld(self)) { + AddZombieCodeInternal(method, code_ptr); + } else { + MutexLock mu(Thread::Current(), *Locks::jit_lock_); + AddZombieCodeInternal(method, code_ptr); + } + } +} + + +class JitGcTask final : public Task { + public: + JitGcTask() {} + + void Run(Thread* self) override { + Runtime::Current()->GetJit()->GetCodeCache()->DoCollection(self); + } + + void Finalize() override { + delete this; + } +}; + +void JitCodeCache::AddZombieCodeInternal(ArtMethod* method, const void* code_ptr) { + if (method->IsNative()) { + zombie_jni_code_.insert(method); + } else { + zombie_code_.insert(code_ptr); + } + // Arbitrary threshold of number of zombie code before doing a GC. + static constexpr size_t kNumberOfZombieCodeThreshold = kIsDebugBuild ? 1 : 1000; + size_t number_of_code_to_delete = + zombie_code_.size() + zombie_jni_code_.size() + osr_code_map_.size(); + if (number_of_code_to_delete >= kNumberOfZombieCodeThreshold) { + JitThreadPool* pool = Runtime::Current()->GetJit()->GetThreadPool(); + if (pool != nullptr && !gc_task_scheduled_) { + gc_task_scheduled_ = true; + pool->AddTask(Thread::Current(), new JitGcTask()); + } } } @@ -1237,48 +1261,50 @@ void JitCodeCache::DoCollection(Thread* self) { { ScopedDebugDisallowReadBarriers sddrb(self); MutexLock mu(self, *Locks::jit_lock_); - // Mark compiled code that are entrypoints of ArtMethods. Compiled code that is not - // an entry point is either: - // - an osr compiled code, that will be removed if not in a thread call stack. - // - discarded compiled code, that will be removed if not in a thread call stack. - for (const auto& entry : jni_stubs_map_) { - const JniStubData& data = entry.second; - const void* code_ptr = data.GetCode(); - if (IsInZygoteExecSpace(code_ptr)) { - continue; - } - const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); - for (ArtMethod* method : data.GetMethods()) { - if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) { - GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); - break; - } - } - } - for (const auto& it : method_code_map_) { - ArtMethod* method = it.second; - const void* code_ptr = it.first; - if (IsInZygoteExecSpace(code_ptr)) { - continue; - } - const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); - if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) { - GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); - } + if (!garbage_collect_code_) { + return; + } else if (WaitForPotentialCollectionToComplete(self)) { + return; } - + collection_in_progress_ = true; + number_of_collections_++; + live_bitmap_.reset(CodeCacheBitmap::Create( + "code-cache-bitmap", + reinterpret_cast<uintptr_t>(private_region_.GetExecPages()->Begin()), + reinterpret_cast<uintptr_t>( + private_region_.GetExecPages()->Begin() + private_region_.GetCurrentCapacity() / 2))); // Empty osr method map, as osr compiled code will be deleted (except the ones // on thread stacks). + for (auto it = osr_code_map_.begin(); it != osr_code_map_.end(); ++it) { + zombie_code_.insert(it->second); + } osr_code_map_.clear(); } + TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit)); + { + TimingLogger::ScopedTiming st("Code cache collection", &logger); + + { + ScopedObjectAccess soa(self); + // Run a checkpoint on all threads to mark the JIT compiled code they are running. + MarkCompiledCodeOnThreadStacks(self); + + // Remove zombie code which hasn't been marked. + RemoveUnmarkedCode(self); + } + + MutexLock mu(self, *Locks::jit_lock_); + live_bitmap_.reset(nullptr); + NotifyCollectionDone(self); + } - // Run a checkpoint on all threads to mark the JIT compiled code they are running. - MarkCompiledCodeOnThreadStacks(self); + Runtime::Current()->GetJit()->AddTimingLogger(logger); +} - // At this point, mutator threads are still running, and entrypoints of methods can - // change. We do know they cannot change to a code cache entry that is not marked, - // therefore we can safely remove those entries. - RemoveUnmarkedCode(self); +void JitCodeCache::NotifyCollectionDone(Thread* self) { + collection_in_progress_ = false; + gc_task_scheduled_ = false; + lock_cond_.Broadcast(self); } OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) { @@ -1382,7 +1408,7 @@ ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self, } if (info == nullptr) { - GarbageCollectCache(self); + IncreaseCodeCacheCapacity(self); MutexLock mu(self, *Locks::jit_lock_); info = AddProfilingInfoInternal(self, method, inline_cache_entries, branch_cache_entries); } @@ -1610,11 +1636,6 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, // as well change them back as this stub shall not be collected anyway and this // can avoid a few expensive GenericJNI calls. data->UpdateEntryPoints(entrypoint); - if (collection_in_progress_) { - if (!IsInZygoteExecSpace(data->GetCode())) { - GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode())); - } - } } return new_compilation; } else { diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index 96fc7e2706..08572edd2d 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -285,11 +285,9 @@ class JitCodeCache { REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::jit_lock_); void FreeLocked(JitMemoryRegion* region, const uint8_t* code, const uint8_t* data) - REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::jit_lock_); - // Perform a collection on the code cache. - EXPORT void GarbageCollectCache(Thread* self) + void IncreaseCodeCacheCapacity(Thread* self) REQUIRES(!Locks::jit_lock_) REQUIRES_SHARED(Locks::mutator_lock_); @@ -423,9 +421,20 @@ class JitCodeCache { Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); + // NO_THREAD_SAFETY_ANALYSIS because we may be called with the JIT lock held + // or not. The implementation of this method handles the two cases. + void AddZombieCode(ArtMethod* method, const void* code_ptr) NO_THREAD_SAFETY_ANALYSIS; + + EXPORT void DoCollection(Thread* self) + REQUIRES(!Locks::jit_lock_); + private: JitCodeCache(); + void AddZombieCodeInternal(ArtMethod* method, const void* code_ptr) + REQUIRES(Locks::jit_lock_) + REQUIRES_SHARED(Locks::mutator_lock_); + ProfilingInfo* AddProfilingInfoInternal(Thread* self, ArtMethod* method, const std::vector<uint32_t>& inline_cache_entries, @@ -433,21 +442,16 @@ class JitCodeCache { REQUIRES(Locks::jit_lock_) REQUIRES_SHARED(Locks::mutator_lock_); - // If a collection is in progress, wait for it to finish. Must be called with the mutator lock. - // The non-mutator lock version should be used if possible. This method will release then - // re-acquire the mutator lock. - void WaitForPotentialCollectionToCompleteRunnable(Thread* self) - REQUIRES(Locks::jit_lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_); - // If a collection is in progress, wait for it to finish. Return // whether the thread actually waited. bool WaitForPotentialCollectionToComplete(Thread* self) - REQUIRES(Locks::jit_lock_) REQUIRES(!Locks::mutator_lock_); + REQUIRES(Locks::jit_lock_) REQUIRES_SHARED(!Locks::mutator_lock_); + + // Notify all waiting threads that a collection is done. + void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_); // Remove CHA dependents and underlying allocations for entries in `method_headers`. void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers) - REQUIRES_SHARED(Locks::mutator_lock_) - REQUIRES(Locks::jit_lock_) REQUIRES(!Locks::cha_lock_); // Removes method from the cache. The caller must ensure that all threads @@ -462,8 +466,7 @@ class JitCodeCache { // Free code and data allocations for `code_ptr`. void FreeCodeAndData(const void* code_ptr) - REQUIRES(Locks::jit_lock_) - REQUIRES_SHARED(Locks::mutator_lock_); + REQUIRES(Locks::jit_lock_); // Number of bytes allocated in the code cache. size_t CodeCacheSize() REQUIRES(!Locks::jit_lock_); @@ -477,16 +480,9 @@ class JitCodeCache { // Number of bytes allocated in the data cache. size_t DataCacheSizeLocked() REQUIRES(Locks::jit_lock_); - // Notify all waiting threads that a collection is done. - void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_); - // Return whether the code cache's capacity is at its maximum. bool IsAtMaxCapacity() const REQUIRES(Locks::jit_lock_); - void DoCollection(Thread* self) - REQUIRES(!Locks::jit_lock_) - REQUIRES_SHARED(Locks::mutator_lock_); - void RemoveUnmarkedCode(Thread* self) REQUIRES(!Locks::jit_lock_) REQUIRES_SHARED(Locks::mutator_lock_); @@ -563,18 +559,25 @@ class JitCodeCache { // ProfilingInfo objects we have allocated. SafeMap<ArtMethod*, ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_); + // Zombie code and JNI methods to consider for collection. + std::set<const void*> zombie_code_ GUARDED_BY(Locks::jit_lock_); + std::set<ArtMethod*> zombie_jni_code_ GUARDED_BY(Locks::jit_lock_); + // Methods that the zygote has compiled and can be shared across processes // forked from the zygote. ZygoteMap zygote_map_; // -------------- JIT GC related data structures ----------------------- // - // Condition to wait on during collection. + // Condition to wait on during collection and for accessing weak references in inline caches. ConditionVariable lock_cond_ GUARDED_BY(Locks::jit_lock_); // Whether there is a code cache collection in progress. bool collection_in_progress_ GUARDED_BY(Locks::jit_lock_); + // Whether a GC task is already scheduled. + bool gc_task_scheduled_ GUARDED_BY(Locks::jit_lock_); + // Bitmap for collecting code and data. std::unique_ptr<CodeCacheBitmap> live_bitmap_; diff --git a/runtime/runtime_image.cc b/runtime/runtime_image.cc index 997ea2fde6..b134c79286 100644 --- a/runtime/runtime_image.cc +++ b/runtime/runtime_image.cc @@ -964,7 +964,9 @@ class RuntimeImageHelper { entrypoint = boot_jni_stub; } } - copy->SetEntryPointFromQuickCompiledCode(entrypoint); + copy->SetNativePointer(ArtMethod::EntryPointFromQuickCompiledCodeOffset(kRuntimePointerSize), + entrypoint, + kRuntimePointerSize); if (method->IsNative()) { StubType stub_type = method->IsCriticalNative() diff --git a/test/667-jit-jni-stub/jit_jni_stub_test.cc b/test/667-jit-jni-stub/jit_jni_stub_test.cc index 2b8e14c03d..b7946f39ed 100644 --- a/test/667-jit-jni-stub/jit_jni_stub_test.cc +++ b/test/667-jit-jni-stub/jit_jni_stub_test.cc @@ -39,9 +39,11 @@ extern "C" JNIEXPORT void Java_Main_jitGc(JNIEnv*, jclass) { CHECK(Runtime::Current()->GetJit() != nullptr); jit::JitCodeCache* cache = Runtime::Current()->GetJit()->GetCodeCache(); - ScopedObjectAccess soa(Thread::Current()); - cache->InvalidateAllCompiledCode(); - cache->GarbageCollectCache(Thread::Current()); + { + ScopedObjectAccess soa(Thread::Current()); + cache->InvalidateAllCompiledCode(); + } + cache->DoCollection(Thread::Current()); } extern "C" JNIEXPORT diff --git a/test/667-jit-jni-stub/src/Main.java b/test/667-jit-jni-stub/src/Main.java index c0829b5100..05039670d1 100644 --- a/test/667-jit-jni-stub/src/Main.java +++ b/test/667-jit-jni-stub/src/Main.java @@ -67,9 +67,6 @@ public class Main { assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough")); assertFalse(hasJitCompiledCode(Main.class, "callThrough")); callThrough(Main.class, "testMixedFramesOnStackStage2"); - // We have just returned through the JIT-compiled JNI stub, so it must still - // be compiled (though not necessarily with the entrypoint pointing to it). - assertTrue(hasJitCompiledCode(Main.class, "callThrough")); // Though the callThrough() is on the stack, that frame is using the GenericJNI // and does not prevent the collection of the JNI stub. testStubCanBeCollected(); @@ -94,10 +91,6 @@ public class Main { } public static void testStubCanBeCollected() { - assertTrue(hasJitCompiledCode(Main.class, "callThrough")); - doJitGcsUntilFullJitGcIsScheduled(); - assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough")); - assertTrue(hasJitCompiledCode(Main.class, "callThrough")); jitGc(); // JIT GC without callThrough() on the stack should collect the callThrough() stub. assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough")); assertFalse(hasJitCompiledCode(Main.class, "callThrough")); |