aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoranoll <none@none>2013-09-27 10:50:55 +0200
committeranoll <none@none>2013-09-27 10:50:55 +0200
commit63bc9198c28ccb65cdd30788a85bb63e00a8db88 (patch)
tree7c0d4a26a691da47a783b356fc35f04441dec2f8
parentc8bf142b3afa29d5c83a15fd24229950ec03ba77 (diff)
downloadjdk8u_hotspot-63bc9198c28ccb65cdd30788a85bb63e00a8db88.tar.gz
8020151: PSR:PERF Large performance regressions when code cache is filled
Summary: Code cache sweeping based on method hotness; removed speculatively disconnect Reviewed-by: kvn, iveresov
-rw-r--r--src/share/vm/code/codeCache.cpp86
-rw-r--r--src/share/vm/code/codeCache.hpp6
-rw-r--r--src/share/vm/code/nmethod.cpp29
-rw-r--r--src/share/vm/code/nmethod.hpp27
-rw-r--r--src/share/vm/compiler/compileBroker.cpp53
-rw-r--r--src/share/vm/oops/method.cpp10
-rw-r--r--src/share/vm/runtime/arguments.cpp7
-rw-r--r--src/share/vm/runtime/globals.hpp13
-rw-r--r--src/share/vm/runtime/safepoint.cpp4
-rw-r--r--src/share/vm/runtime/sweeper.cpp326
-rw-r--r--src/share/vm/runtime/sweeper.hpp75
-rw-r--r--src/share/vm/runtime/vmStructs.cpp4
-rw-r--r--src/share/vm/runtime/vm_operations.cpp4
-rw-r--r--src/share/vm/runtime/vm_operations.hpp11
-rw-r--r--src/share/vm/trace/trace.xml7
15 files changed, 242 insertions, 420 deletions
diff --git a/src/share/vm/code/codeCache.cpp b/src/share/vm/code/codeCache.cpp
index 65c1e5f2e..fcdab3d5a 100644
--- a/src/share/vm/code/codeCache.cpp
+++ b/src/share/vm/code/codeCache.cpp
@@ -124,7 +124,6 @@ int CodeCache::_number_of_nmethods = 0;
int CodeCache::_number_of_nmethods_with_dependencies = 0;
bool CodeCache::_needs_cache_clean = false;
nmethod* CodeCache::_scavenge_root_nmethods = NULL;
-nmethod* CodeCache::_saved_nmethods = NULL;
int CodeCache::_codemem_full_count = 0;
@@ -464,96 +463,11 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
}
#endif //PRODUCT
-/**
- * Remove and return nmethod from the saved code list in order to reanimate it.
- */
-nmethod* CodeCache::reanimate_saved_code(Method* m) {
- MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- nmethod* saved = _saved_nmethods;
- nmethod* prev = NULL;
- while (saved != NULL) {
- if (saved->is_in_use() && saved->method() == m) {
- if (prev != NULL) {
- prev->set_saved_nmethod_link(saved->saved_nmethod_link());
- } else {
- _saved_nmethods = saved->saved_nmethod_link();
- }
- assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
- saved->set_speculatively_disconnected(false);
- saved->set_saved_nmethod_link(NULL);
- if (PrintMethodFlushing) {
- saved->print_on(tty, " ### nmethod is reconnected");
- }
- if (LogCompilation && (xtty != NULL)) {
- ttyLocker ttyl;
- xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
- xtty->method(m);
- xtty->stamp();
- xtty->end_elem();
- }
- return saved;
- }
- prev = saved;
- saved = saved->saved_nmethod_link();
- }
- return NULL;
-}
-
-/**
- * Remove nmethod from the saved code list in order to discard it permanently
- */
-void CodeCache::remove_saved_code(nmethod* nm) {
- // For conc swpr this will be called with CodeCache_lock taken by caller
- assert_locked_or_safepoint(CodeCache_lock);
- assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
- nmethod* saved = _saved_nmethods;
- nmethod* prev = NULL;
- while (saved != NULL) {
- if (saved == nm) {
- if (prev != NULL) {
- prev->set_saved_nmethod_link(saved->saved_nmethod_link());
- } else {
- _saved_nmethods = saved->saved_nmethod_link();
- }
- if (LogCompilation && (xtty != NULL)) {
- ttyLocker ttyl;
- xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
- xtty->stamp();
- xtty->end_elem();
- }
- return;
- }
- prev = saved;
- saved = saved->saved_nmethod_link();
- }
- ShouldNotReachHere();
-}
-
-void CodeCache::speculatively_disconnect(nmethod* nm) {
- assert_locked_or_safepoint(CodeCache_lock);
- assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
- nm->set_saved_nmethod_link(_saved_nmethods);
- _saved_nmethods = nm;
- if (PrintMethodFlushing) {
- nm->print_on(tty, " ### nmethod is speculatively disconnected");
- }
- if (LogCompilation && (xtty != NULL)) {
- ttyLocker ttyl;
- xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
- xtty->method(nm->method());
- xtty->stamp();
- xtty->end_elem();
- }
- nm->method()->clear_code();
- nm->set_speculatively_disconnected(true);
-}
-
void CodeCache::gc_prologue() {
assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
}
-
void CodeCache::gc_epilogue() {
assert_locked_or_safepoint(CodeCache_lock);
FOR_ALL_ALIVE_BLOBS(cb) {
diff --git a/src/share/vm/code/codeCache.hpp b/src/share/vm/code/codeCache.hpp
index 3e8eda6e2..e190b11ae 100644
--- a/src/share/vm/code/codeCache.hpp
+++ b/src/share/vm/code/codeCache.hpp
@@ -57,7 +57,6 @@ class CodeCache : AllStatic {
static int _number_of_nmethods_with_dependencies;
static bool _needs_cache_clean;
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
- static nmethod* _saved_nmethods; // Linked list of speculatively disconnected nmethods.
static void verify_if_often() PRODUCT_RETURN;
@@ -167,17 +166,12 @@ class CodeCache : AllStatic {
static size_t capacity() { return _heap->capacity(); }
static size_t max_capacity() { return _heap->max_capacity(); }
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
- static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
static double reverse_free_ratio();
static bool needs_cache_clean() { return _needs_cache_clean; }
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
static void clear_inline_caches(); // clear all inline caches
- static nmethod* reanimate_saved_code(Method* m);
- static void remove_saved_code(nmethod* nm);
- static void speculatively_disconnect(nmethod* nm);
-
// Deoptimization
static int mark_for_deoptimization(DepChange& changes);
#ifdef HOTSWAP
diff --git a/src/share/vm/code/nmethod.cpp b/src/share/vm/code/nmethod.cpp
index 4f8608f08..cfc2ddeab 100644
--- a/src/share/vm/code/nmethod.cpp
+++ b/src/share/vm/code/nmethod.cpp
@@ -462,7 +462,6 @@ void nmethod::init_defaults() {
_state = alive;
_marked_for_reclamation = 0;
_has_flushed_dependencies = 0;
- _speculatively_disconnected = 0;
_has_unsafe_access = 0;
_has_method_handle_invokes = 0;
_lazy_critical_native = 0;
@@ -481,7 +480,6 @@ void nmethod::init_defaults() {
_osr_link = NULL;
_scavenge_root_link = NULL;
_scavenge_root_state = 0;
- _saved_nmethod_link = NULL;
_compiler = NULL;
#ifdef HAVE_DTRACE_H
@@ -686,6 +684,7 @@ nmethod::nmethod(
_osr_entry_point = NULL;
_exception_cache = NULL;
_pc_desc_cache.reset_to(NULL);
+ _hotness_counter = NMethodSweeper::hotness_counter_reset_val();
code_buffer->copy_values_to(this);
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
@@ -770,6 +769,7 @@ nmethod::nmethod(
_osr_entry_point = NULL;
_exception_cache = NULL;
_pc_desc_cache.reset_to(NULL);
+ _hotness_counter = NMethodSweeper::hotness_counter_reset_val();
code_buffer->copy_values_to(this);
debug_only(verify_scavenge_root_oops());
@@ -842,6 +842,7 @@ nmethod::nmethod(
_comp_level = comp_level;
_compiler = compiler;
_orig_pc_offset = orig_pc_offset;
+ _hotness_counter = NMethodSweeper::hotness_counter_reset_val();
// Section offsets
_consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts());
@@ -1176,7 +1177,7 @@ void nmethod::cleanup_inline_caches() {
// This is a private interface with the sweeper.
void nmethod::mark_as_seen_on_stack() {
- assert(is_not_entrant(), "must be a non-entrant method");
+ assert(is_alive(), "Must be an alive method");
// Set the traversal mark to ensure that the sweeper does 2
// cleaning passes before moving to zombie.
set_stack_traversal_mark(NMethodSweeper::traversal_count());
@@ -1261,7 +1262,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
set_osr_link(NULL);
//set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
- NMethodSweeper::notify(this);
+ NMethodSweeper::notify();
}
void nmethod::invalidate_osr_method() {
@@ -1351,6 +1352,15 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
nmethod_needs_unregister = true;
}
+ // Must happen before state change. Otherwise we have a race condition in
+ // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
+ // transition its state from 'not_entrant' to 'zombie' without having to wait
+ // for stack scanning.
+ if (state == not_entrant) {
+ mark_as_seen_on_stack();
+ OrderAccess::storestore();
+ }
+
// Change state
_state = state;
@@ -1369,11 +1379,6 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
HandleMark hm;
method()->clear_code();
}
-
- if (state == not_entrant) {
- mark_as_seen_on_stack();
- }
-
} // leave critical region under Patching_lock
// When the nmethod becomes zombie it is no longer alive so the
@@ -1416,7 +1421,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
}
// Make sweeper aware that there is a zombie method that needs to be removed
- NMethodSweeper::notify(this);
+ NMethodSweeper::notify();
return true;
}
@@ -1451,10 +1456,6 @@ void nmethod::flush() {
CodeCache::drop_scavenge_root_nmethod(this);
}
- if (is_speculatively_disconnected()) {
- CodeCache::remove_saved_code(this);
- }
-
#ifdef SHARK
((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
#endif // SHARK
diff --git a/src/share/vm/code/nmethod.hpp b/src/share/vm/code/nmethod.hpp
index 4929ea820..538bcf778 100644
--- a/src/share/vm/code/nmethod.hpp
+++ b/src/share/vm/code/nmethod.hpp
@@ -119,7 +119,6 @@ class nmethod : public CodeBlob {
// To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
- nmethod* _saved_nmethod_link; // from CodeCache::speculatively_disconnect
static nmethod* volatile _oops_do_mark_nmethods;
nmethod* volatile _oops_do_mark_link;
@@ -165,7 +164,6 @@ class nmethod : public CodeBlob {
// protected by CodeCache_lock
bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
- bool _speculatively_disconnected; // Marked for potential unload
bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper)
bool _marked_for_deoptimization; // Used for stack deoptimization
@@ -180,7 +178,7 @@ class nmethod : public CodeBlob {
unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
// Protected by Patching_lock
- unsigned char _state; // {alive, not_entrant, zombie, unloaded}
+ volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded}
#ifdef ASSERT
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
@@ -202,11 +200,18 @@ class nmethod : public CodeBlob {
// not_entrant method removal. Each mark_sweep pass will update
// this mark to current sweep invocation count if it is seen on the
- // stack. An not_entrant method can be removed when there is no
+ // stack. An not_entrant method can be removed when there are no
// more activations, i.e., when the _stack_traversal_mark is less than
// current sweep traversal index.
long _stack_traversal_mark;
+ // The _hotness_counter indicates the hotness of a method. The higher
+ // the value the hotter the method. The hotness counter of a nmethod is
+ // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
+ // is active while stack scanning (mark_active_nmethods()). The hotness
+ // counter is decreased (by 1) while sweeping.
+ int _hotness_counter;
+
ExceptionCache *_exception_cache;
PcDescCache _pc_desc_cache;
@@ -382,6 +387,10 @@ class nmethod : public CodeBlob {
int total_size () const;
+ void dec_hotness_counter() { _hotness_counter--; }
+ void set_hotness_counter(int val) { _hotness_counter = val; }
+ int hotness_counter() const { return _hotness_counter; }
+
// Containment
bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
@@ -408,8 +417,8 @@ class nmethod : public CodeBlob {
// alive. It is used when an uncommon trap happens. Returns true
// if this thread changed the state of the nmethod or false if
// another thread performed the transition.
- bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
- bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
+ bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
+ bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
// used by jvmti to track if the unload event has been reported
bool unload_reported() { return _unload_reported; }
@@ -437,9 +446,6 @@ class nmethod : public CodeBlob {
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
- bool is_speculatively_disconnected() const { return _speculatively_disconnected; }
- void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; }
-
bool is_lazy_critical_native() const { return _lazy_critical_native; }
void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
@@ -499,9 +505,6 @@ public:
nmethod* scavenge_root_link() const { return _scavenge_root_link; }
void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
- nmethod* saved_nmethod_link() const { return _saved_nmethod_link; }
- void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; }
-
public:
// Sweeper support
diff --git a/src/share/vm/compiler/compileBroker.cpp b/src/share/vm/compiler/compileBroker.cpp
index e8f57b24e..594c58b5d 100644
--- a/src/share/vm/compiler/compileBroker.cpp
+++ b/src/share/vm/compiler/compileBroker.cpp
@@ -634,19 +634,36 @@ CompileTask* CompileQueue::get() {
NMethodSweeper::possibly_sweep();
MutexLocker locker(lock());
- // Wait for an available CompileTask.
+ // If _first is NULL we have no more compile jobs. There are two reasons for
+ // having no compile jobs: First, we compiled everything we wanted. Second,
+ // we ran out of code cache so compilation has been disabled. In the latter
+ // case we perform code cache sweeps to free memory such that we can re-enable
+ // compilation.
while (_first == NULL) {
- // There is no work to be done right now. Wait.
- if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) {
- // During the emergency sweeping periods, wake up and sweep occasionally
- bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000);
- if (timedout) {
+ if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
+ // Wait a certain amount of time to possibly do another sweep.
+ // We must wait until stack scanning has happened so that we can
+ // transition a method's state from 'not_entrant' to 'zombie'.
+ long wait_time = NmethodSweepCheckInterval * 1000;
+ if (FLAG_IS_DEFAULT(NmethodSweepCheckInterval)) {
+ // Only one thread at a time can do sweeping. Scale the
+ // wait time according to the number of compiler threads.
+ // As a result, the next sweep is likely to happen every 100ms
+ // with an arbitrary number of threads that do sweeping.
+ wait_time = 100 * CICompilerCount;
+ }
+ bool timeout = lock()->wait(!Mutex::_no_safepoint_check_flag, wait_time);
+ if (timeout) {
MutexUnlocker ul(lock());
- // When otherwise not busy, run nmethod sweeping
NMethodSweeper::possibly_sweep();
}
} else {
- // During normal operation no need to wake up on timer
+ // If there are no compilation tasks and we can compile new jobs
+ // (i.e., there is enough free space in the code cache) there is
+ // no need to invoke the sweeper. As a result, the hotness of methods
+ // remains unchanged. This behavior is desired, since we want to keep
+ // the stable state, i.e., we do not want to evict methods from the
+ // code cache if it is unnecessary.
lock()->wait();
}
}
@@ -1227,16 +1244,9 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
return method_code;
}
}
- if (method->is_not_compilable(comp_level)) return NULL;
-
- if (UseCodeCacheFlushing) {
- nmethod* saved = CodeCache::reanimate_saved_code(method());
- if (saved != NULL) {
- method->set_code(method, saved);
- return saved;
- }
+ if (method->is_not_compilable(comp_level)) {
+ return NULL;
}
-
} else {
// osr compilation
#ifndef TIERED
@@ -1585,9 +1595,6 @@ void CompileBroker::compiler_thread_loop() {
if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
// the code cache is really full
handle_full_code_cache();
- } else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
- // Attempt to start cleaning the code cache while there is still a little headroom
- NMethodSweeper::handle_full_code_cache(false);
}
CompileTask* task = queue->get();
@@ -1943,7 +1950,11 @@ void CompileBroker::handle_full_code_cache() {
}
#endif
if (UseCodeCacheFlushing) {
- NMethodSweeper::handle_full_code_cache(true);
+ // Since code cache is full, immediately stop new compiles
+ if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
+ NMethodSweeper::log_sweep("disable_compiler");
+ NMethodSweeper::possibly_sweep();
+ }
} else {
UseCompiler = false;
AlwaysCompileLoopMethods = false;
diff --git a/src/share/vm/oops/method.cpp b/src/share/vm/oops/method.cpp
index 30631fa3b..9d30c40f9 100644
--- a/src/share/vm/oops/method.cpp
+++ b/src/share/vm/oops/method.cpp
@@ -901,16 +901,6 @@ address Method::make_adapters(methodHandle mh, TRAPS) {
// This function must not hit a safepoint!
address Method::verified_code_entry() {
debug_only(No_Safepoint_Verifier nsv;)
- nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
- if (code == NULL && UseCodeCacheFlushing) {
- nmethod *saved_code = CodeCache::reanimate_saved_code(this);
- if (saved_code != NULL) {
- methodHandle method(this);
- assert( ! saved_code->is_osr_method(), "should not get here for osr" );
- set_code( method, saved_code );
- }
- }
-
assert(_from_compiled_entry != NULL, "must be set");
return _from_compiled_entry;
}
diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp
index 53454e558..9a76b1b6a 100644
--- a/src/share/vm/runtime/arguments.cpp
+++ b/src/share/vm/runtime/arguments.cpp
@@ -1130,6 +1130,9 @@ void Arguments::set_tiered_flags() {
Tier3InvokeNotifyFreqLog = 0;
Tier4InvocationThreshold = 0;
}
+ if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
+ FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M));
+ }
}
#if INCLUDE_ALL_GCS
@@ -2333,6 +2336,10 @@ bool Arguments::check_vm_args_consistency() {
(2*G)/M);
status = false;
}
+
+ status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction");
+ status &= verify_interval(NmethodSweepActivity, 0, 2000, "NmethodSweepActivity");
+
return status;
}
diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
index 9d429cd76..394358d78 100644
--- a/src/share/vm/runtime/globals.hpp
+++ b/src/share/vm/runtime/globals.hpp
@@ -2868,6 +2868,10 @@ class CommandLineFlags {
product(intx, NmethodSweepCheckInterval, 5, \
"Compilers wake up every n seconds to possibly sweep nmethods") \
\
+ product(intx, NmethodSweepActivity, 10, \
+ "Removes cold nmethods from code cache if > 0. Higher values " \
+ "result in more aggressive sweeping") \
+ \
notproduct(bool, LogSweeper, false, \
"Keep a ring buffer of sweeper activity") \
\
@@ -3239,15 +3243,6 @@ class CommandLineFlags {
product(bool, UseCodeCacheFlushing, true, \
"Attempt to clean the code cache before shutting off compiler") \
\
- product(intx, MinCodeCacheFlushingInterval, 30, \
- "Min number of seconds between code cache cleaning sessions") \
- \
- product(uintx, CodeCacheFlushingMinimumFreeSpace, 1500*K, \
- "When less than X space left, start code cache cleaning") \
- \
- product(uintx, CodeCacheFlushingFraction, 2, \
- "Fraction of the code cache that is flushed when full") \
- \
/* interpreter debugging */ \
develop(intx, BinarySwitchThreshold, 5, \
"Minimal number of lookupswitch entries for rewriting to binary " \
diff --git a/src/share/vm/runtime/safepoint.cpp b/src/share/vm/runtime/safepoint.cpp
index f3191e524..95c395fad 100644
--- a/src/share/vm/runtime/safepoint.cpp
+++ b/src/share/vm/runtime/safepoint.cpp
@@ -519,8 +519,8 @@ void SafepointSynchronize::do_cleanup_tasks() {
}
{
- TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
- NMethodSweeper::scan_stacks();
+ TraceTime t4("mark nmethods", TraceSafepointCleanupTime);
+ NMethodSweeper::mark_active_nmethods();
}
if (SymbolTable::needs_rehashing()) {
diff --git a/src/share/vm/runtime/sweeper.cpp b/src/share/vm/runtime/sweeper.cpp
index 37315aec3..eab7636fa 100644
--- a/src/share/vm/runtime/sweeper.cpp
+++ b/src/share/vm/runtime/sweeper.cpp
@@ -127,64 +127,79 @@ void NMethodSweeper::record_sweep(nmethod* nm, int line) {
#define SWEEP(nm)
#endif
-
-long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
-nmethod* NMethodSweeper::_current = NULL; // Current nmethod
-int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache
-int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
-int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
-int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep
-
-volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
+nmethod* NMethodSweeper::_current = NULL; // Current nmethod
+long NMethodSweeper::_traversals = 0; // Nof. stack traversals performed
+int NMethodSweeper::_seen = 0; // Nof. nmethods we have currently processed in current pass of CodeCache
+int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
+int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
+int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep
+
+volatile int NMethodSweeper::_invocations = 0; // Nof. invocations left until we are completed with this pass
volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
-jint NMethodSweeper::_locked_seen = 0;
+jint NMethodSweeper::_locked_seen = 0;
jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
-bool NMethodSweeper::_resweep = false;
-jint NMethodSweeper::_flush_token = 0;
-jlong NMethodSweeper::_last_full_flush_time = 0;
-int NMethodSweeper::_highest_marked = 0;
-int NMethodSweeper::_dead_compile_ids = 0;
-long NMethodSweeper::_last_flush_traversal_id = 0;
-
-int NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
+bool NMethodSweeper::_request_mark_phase = false;
+
int NMethodSweeper::_total_nof_methods_reclaimed = 0;
-jlong NMethodSweeper::_total_time_sweeping = 0;
-jlong NMethodSweeper::_total_time_this_sweep = 0;
-jlong NMethodSweeper::_peak_sweep_time = 0;
-jlong NMethodSweeper::_peak_sweep_fraction_time = 0;
-jlong NMethodSweeper::_total_disconnect_time = 0;
-jlong NMethodSweeper::_peak_disconnect_time = 0;
+jlong NMethodSweeper::_total_time_sweeping = 0;
+jlong NMethodSweeper::_total_time_this_sweep = 0;
+jlong NMethodSweeper::_peak_sweep_time = 0;
+jlong NMethodSweeper::_peak_sweep_fraction_time = 0;
+int NMethodSweeper::_hotness_counter_reset_val = 0;
+
class MarkActivationClosure: public CodeBlobClosure {
public:
virtual void do_code_blob(CodeBlob* cb) {
- // If we see an activation belonging to a non_entrant nmethod, we mark it.
- if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
- ((nmethod*)cb)->mark_as_seen_on_stack();
+ if (cb->is_nmethod()) {
+ nmethod* nm = (nmethod*)cb;
+ nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
+ // If we see an activation belonging to a non_entrant nmethod, we mark it.
+ if (nm->is_not_entrant()) {
+ nm->mark_as_seen_on_stack();
+ }
}
}
};
static MarkActivationClosure mark_activation_closure;
+class SetHotnessClosure: public CodeBlobClosure {
+public:
+ virtual void do_code_blob(CodeBlob* cb) {
+ if (cb->is_nmethod()) {
+ nmethod* nm = (nmethod*)cb;
+ nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
+ }
+ }
+};
+static SetHotnessClosure set_hotness_closure;
+
+
+int NMethodSweeper::hotness_counter_reset_val() {
+ if (_hotness_counter_reset_val == 0) {
+ _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
+ }
+ return _hotness_counter_reset_val;
+}
bool NMethodSweeper::sweep_in_progress() {
return (_current != NULL);
}
-void NMethodSweeper::scan_stacks() {
+// Scans the stacks of all Java threads and marks activations of not-entrant methods.
+// No need to synchronize access, since 'mark_active_nmethods' is always executed at a
+// safepoint.
+void NMethodSweeper::mark_active_nmethods() {
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
- if (!MethodFlushing) return;
-
- // No need to synchronize access, since this is always executed at a
- // safepoint.
-
- // Make sure CompiledIC_lock in unlocked, since we might update some
- // inline caches. If it is, we just bail-out and try later.
- if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
+ // If we do not want to reclaim not-entrant or zombie methods there is no need
+ // to scan stacks
+ if (!MethodFlushing) {
+ return;
+ }
// Check for restart
assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
- if (!sweep_in_progress() && _resweep) {
+ if (!sweep_in_progress() && need_marking_phase()) {
_seen = 0;
_invocations = NmethodSweepFraction;
_current = CodeCache::first_nmethod();
@@ -197,30 +212,22 @@ void NMethodSweeper::scan_stacks() {
Threads::nmethods_do(&mark_activation_closure);
// reset the flags since we started a scan from the beginning.
- _resweep = false;
+ reset_nmethod_marking();
_locked_seen = 0;
_not_entrant_seen_on_stack = 0;
+ } else {
+ // Only set hotness counter
+ Threads::nmethods_do(&set_hotness_closure);
}
- if (UseCodeCacheFlushing) {
- // only allow new flushes after the interval is complete.
- jlong now = os::javaTimeMillis();
- jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
- jlong curr_interval = now - _last_full_flush_time;
- if (curr_interval > max_interval) {
- _flush_token = 0;
- }
-
- if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) {
- CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
- log_sweep("restart_compiler");
- }
- }
+ OrderAccess::storestore();
}
void NMethodSweeper::possibly_sweep() {
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
- if (!MethodFlushing || !sweep_in_progress()) return;
+ if (!MethodFlushing || !sweep_in_progress()) {
+ return;
+ }
if (_invocations > 0) {
// Only one thread at a time will sweep
@@ -258,8 +265,7 @@ void NMethodSweeper::sweep_code_cache() {
if (!CompileBroker::should_compile_new_jobs()) {
// If we have turned off compilations we might as well do full sweeps
// in order to reach the clean state faster. Otherwise the sleeping compiler
- // threads will slow down sweeping. After a few iterations the cache
- // will be clean and sweeping stops (_resweep will not be set)
+ // threads will slow down sweeping.
_invocations = 1;
}
@@ -271,9 +277,11 @@ void NMethodSweeper::sweep_code_cache() {
int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
int swept_count = 0;
+
assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
assert(!CodeCache_lock->owned_by_self(), "just checking");
+ int freed_memory = 0;
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
@@ -299,7 +307,7 @@ void NMethodSweeper::sweep_code_cache() {
// Now ready to process nmethod and give up CodeCache_lock
{
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- process_nmethod(_current);
+ freed_memory += process_nmethod(_current);
}
_seen++;
_current = next;
@@ -308,11 +316,11 @@ void NMethodSweeper::sweep_code_cache() {
assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
- if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
+ if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) {
// we've completed a scan without making progress but there were
// nmethods we were unable to process either because they were
- // locked or were still on stack. We don't have to aggresively
- // clean them up so just stop scanning. We could scan once more
+ // locked or were still on stack. We don't have to aggressively
+ // clean them up so just stop scanning. We could scan once more
// but that complicates the control logic and it's unlikely to
// matter much.
if (PrintMethodFlushing) {
@@ -351,9 +359,16 @@ void NMethodSweeper::sweep_code_cache() {
log_sweep("finished");
}
- // Sweeper is the only case where memory is released,
- // check here if it is time to restart the compiler.
- if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) {
+ // Sweeper is the only case where memory is released, check here if it
+ // is time to restart the compiler. Only checking if there is a certain
+ // amount of free memory in the code cache might lead to re-enabling
+ // compilation although no memory has been released. For example, there are
+ // cases when compilation was disabled although there is 4MB (or more) free
+ // memory in the code cache. The reason is code cache fragmentation. Therefore,
+ // it only makes sense to re-enable compilation if we have actually freed memory.
+ // Note that typically several kB are released for sweeping 16MB of the code
+ // cache. As a result, 'freed_memory' > 0 to restart the compiler.
+ if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) {
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
log_sweep("restart_compiler");
}
@@ -367,8 +382,8 @@ class NMethodMarker: public StackObj {
_thread = CompilerThread::current();
if (!nm->is_zombie() && !nm->is_unloaded()) {
// Only expose live nmethods for scanning
- _thread->set_scanned_nmethod(nm);
- }
+ _thread->set_scanned_nmethod(nm);
+ }
}
~NMethodMarker() {
_thread->set_scanned_nmethod(NULL);
@@ -392,20 +407,20 @@ void NMethodSweeper::release_nmethod(nmethod *nm) {
nm->flush();
}
-void NMethodSweeper::process_nmethod(nmethod *nm) {
+int NMethodSweeper::process_nmethod(nmethod *nm) {
assert(!CodeCache_lock->owned_by_self(), "just checking");
+ int freed_memory = 0;
// Make sure this nmethod doesn't get unloaded during the scan,
- // since the locks acquired below might safepoint.
+ // since safepoints may happen during acquired below locks.
NMethodMarker nmm(nm);
-
SWEEP(nm);
// Skip methods that are currently referenced by the VM
if (nm->is_locked_by_vm()) {
// But still remember to clean-up inline caches for alive nmethods
if (nm->is_alive()) {
- // Clean-up all inline caches that points to zombie/non-reentrant methods
+ // Clean inline caches that point to zombie/non-entrant methods
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
SWEEP(nm);
@@ -413,18 +428,19 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
_locked_seen++;
SWEEP(nm);
}
- return;
+ return freed_memory;
}
if (nm->is_zombie()) {
- // If it is first time, we see nmethod then we mark it. Otherwise,
- // we reclame it. When we have seen a zombie method twice, we know that
+ // If it is the first time we see nmethod then we mark it. Otherwise,
+ // we reclaim it. When we have seen a zombie method twice, we know that
// there are no inline caches that refer to it.
if (nm->is_marked_for_reclamation()) {
assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
}
+ freed_memory = nm->total_size();
release_nmethod(nm);
_flushed_count++;
} else {
@@ -432,19 +448,19 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
}
nm->mark_for_reclamation();
- _resweep = true;
+ request_nmethod_marking();
_marked_count++;
SWEEP(nm);
}
} else if (nm->is_not_entrant()) {
- // If there is no current activations of this method on the
+ // If there are no current activations of this method on the
// stack we can safely convert it to a zombie method
if (nm->can_not_entrant_be_converted()) {
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
}
nm->make_zombie();
- _resweep = true;
+ request_nmethod_marking();
_zombified_count++;
SWEEP(nm);
} else {
@@ -459,159 +475,57 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
}
} else if (nm->is_unloaded()) {
// Unloaded code, just make it a zombie
- if (PrintMethodFlushing && Verbose)
+ if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
-
+ }
if (nm->is_osr_method()) {
SWEEP(nm);
// No inline caches will ever point to osr methods, so we can just remove it
+ freed_memory = nm->total_size();
release_nmethod(nm);
_flushed_count++;
} else {
nm->make_zombie();
- _resweep = true;
+ request_nmethod_marking();
_zombified_count++;
SWEEP(nm);
}
} else {
- assert(nm->is_alive(), "should be alive");
-
if (UseCodeCacheFlushing) {
- if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() &&
- (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) {
- // This method has not been called since the forced cleanup happened
- nm->make_not_entrant();
+ if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
+ // Do not make native methods and OSR-methods not-entrant
+ nm->dec_hotness_counter();
+ // Get the initial value of the hotness counter. This value depends on the
+ // ReservedCodeCacheSize
+ int reset_val = hotness_counter_reset_val();
+ int time_since_reset = reset_val - nm->hotness_counter();
+ double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
+ // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
+ // I.e., 'threshold' increases with lower available space in the code cache and a higher
+ // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
+ // value until it is reset by stack walking - is smaller than the computed threshold, the
+ // corresponding nmethod is considered for removal.
+ if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
+ // A method is marked as not-entrant if the method is
+ // 1) 'old enough': nm->hotness_counter() < threshold
+ // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
+ // The second condition is necessary if we are dealing with very small code cache
+ // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
+ // The second condition ensures that methods are not immediately made not-entrant
+ // after compilation.
+ nm->make_not_entrant();
+ request_nmethod_marking();
+ }
}
}
-
- // Clean-up all inline caches that points to zombie/non-reentrant methods
+ // Clean-up all inline caches that point to zombie/non-reentrant methods
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
SWEEP(nm);
}
+ return freed_memory;
}
-// Code cache unloading: when compilers notice the code cache is getting full,
-// they will call a vm op that comes here. This code attempts to speculatively
-// unload the oldest half of the nmethods (based on the compile job id) by
-// saving the old code in a list in the CodeCache. Then
-// execution resumes. If a method so marked is not called by the second sweeper
-// stack traversal after the current one, the nmethod will be marked non-entrant and
-// got rid of by normal sweeping. If the method is called, the Method*'s
-// _code field is restored and the Method*/nmethod
-// go back to their normal state.
-void NMethodSweeper::handle_full_code_cache(bool is_full) {
-
- if (is_full) {
- // Since code cache is full, immediately stop new compiles
- if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
- log_sweep("disable_compiler");
- }
- }
-
- // Make sure only one thread can flush
- // The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
- // no need to check the timeout here.
- jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
- if (old != 0) {
- return;
- }
-
- VM_HandleFullCodeCache op(is_full);
- VMThread::execute(&op);
-
- // resweep again as soon as possible
- _resweep = true;
-}
-
-void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
- // If there was a race in detecting full code cache, only run
- // one vm op for it or keep the compiler shut off
-
- jlong disconnect_start_counter = os::elapsed_counter();
-
- // Traverse the code cache trying to dump the oldest nmethods
- int curr_max_comp_id = CompileBroker::get_compilation_id();
- int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
-
- log_sweep("start_cleaning");
-
- nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
- jint disconnected = 0;
- jint made_not_entrant = 0;
- jint nmethod_count = 0;
-
- while ((nm != NULL)){
- int curr_comp_id = nm->compile_id();
-
- // OSR methods cannot be flushed like this. Also, don't flush native methods
- // since they are part of the JDK in most cases
- if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
-
- // only count methods that can be speculatively disconnected
- nmethod_count++;
-
- if (nm->is_in_use() && (curr_comp_id < flush_target)) {
- if ((nm->method()->code() == nm)) {
- // This method has not been previously considered for
- // unloading or it was restored already
- CodeCache::speculatively_disconnect(nm);
- disconnected++;
- } else if (nm->is_speculatively_disconnected()) {
- // This method was previously considered for preemptive unloading and was not called since then
- CompilationPolicy::policy()->delay_compilation(nm->method());
- nm->make_not_entrant();
- made_not_entrant++;
- }
-
- if (curr_comp_id > _highest_marked) {
- _highest_marked = curr_comp_id;
- }
- }
- }
- nm = CodeCache::alive_nmethod(CodeCache::next(nm));
- }
-
- // remember how many compile_ids wheren't seen last flush.
- _dead_compile_ids = curr_max_comp_id - nmethod_count;
-
- log_sweep("stop_cleaning",
- "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
- disconnected, made_not_entrant);
-
- // Shut off compiler. Sweeper will start over with a new stack scan and
- // traversal cycle and turn it back on if it clears enough space.
- if (is_full) {
- _last_full_flush_time = os::javaTimeMillis();
- }
-
- jlong disconnect_end_counter = os::elapsed_counter();
- jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
- _total_disconnect_time += disconnect_time;
- _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
-
- EventCleanCodeCache event(UNTIMED);
- if (event.should_commit()) {
- event.set_starttime(disconnect_start_counter);
- event.set_endtime(disconnect_end_counter);
- event.set_disconnectedCount(disconnected);
- event.set_madeNonEntrantCount(made_not_entrant);
- event.commit();
- }
- _number_of_flushes++;
-
- // After two more traversals the sweeper will get rid of unrestored nmethods
- _last_flush_traversal_id = _traversals;
- _resweep = true;
-#ifdef ASSERT
-
- if(PrintMethodFlushing && Verbose) {
- tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
- }
-#endif
-}
-
-
// Print out some state information about the current sweep and the
// state of the code cache if it's requested.
void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
diff --git a/src/share/vm/runtime/sweeper.hpp b/src/share/vm/runtime/sweeper.hpp
index da4a13adc..bd3517607 100644
--- a/src/share/vm/runtime/sweeper.hpp
+++ b/src/share/vm/runtime/sweeper.hpp
@@ -27,8 +27,30 @@
// An NmethodSweeper is an incremental cleaner for:
// - cleanup inline caches
-// - reclamation of unreferences zombie nmethods
-//
+// - reclamation of nmethods
+// Removing nmethods from the code cache includes two operations
+// 1) mark active nmethods
+// Is done in 'mark_active_nmethods()'. This function is called at a
+// safepoint and marks all nmethods that are active on a thread's stack.
+// 2) sweep nmethods
+// Is done in sweep_code_cache(). This function is the only place in the
+// sweeper where memory is reclaimed. Note that sweep_code_cache() is not
+// called at a safepoint. However, sweep_code_cache() stops executing if
+// another thread requests a safepoint. Consequently, 'mark_active_nmethods()'
+// and sweep_code_cache() cannot execute at the same time.
+// To reclaim memory, nmethods are first marked as 'not-entrant'. Methods can
+// be made not-entrant by (i) the sweeper, (ii) deoptimization, (iii) dependency
+// invalidation, and (iv) being replaced be a different method version (tiered
+// compilation). Not-entrant nmethod cannot be called by Java threads, but they
+// can still be active on the stack. To ensure that active nmethod are not reclaimed,
+// we have to wait until the next marking phase has completed. If a not-entrant
+// nmethod was NOT marked as active, it can be converted to 'zombie' state. To safely
+// remove the nmethod, all inline caches (IC) that point to the the nmethod must be
+// cleared. After that, the nmethod can be evicted from the code cache. Each nmethod's
+// state change happens during separate sweeps. It may take at least 3 sweeps before an
+// nmethod's space is freed. Sweeping is currently done by compiler threads between
+// compilations or at least each 5 sec (NmethodSweepCheckInterval) when the code cache
+// is full.
class NMethodSweeper : public AllStatic {
static long _traversals; // Stack scan count, also sweep ID.
@@ -41,46 +63,38 @@ class NMethodSweeper : public AllStatic {
static volatile int _invocations; // No. of invocations left until we are completed with this pass
static volatile int _sweep_started; // Flag to control conc sweeper
- //The following are reset in scan_stacks and synchronized by the safepoint
- static bool _resweep; // Indicates that a change has happend and we want another sweep,
- // always checked and reset at a safepoint so memory will be in sync.
- static int _locked_seen; // Number of locked nmethods encountered during the scan
+ //The following are reset in mark_active_nmethods and synchronized by the safepoint
+ static bool _request_mark_phase; // Indicates that a change has happend and we need another mark pahse,
+ // always checked and reset at a safepoint so memory will be in sync.
+ static int _locked_seen; // Number of locked nmethods encountered during the scan
static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
- static jint _flush_token; // token that guards method flushing, making sure it is executed only once.
-
- // These are set during a flush, a VM-operation
- static long _last_flush_traversal_id; // trav number at last flush unloading
- static jlong _last_full_flush_time; // timestamp of last emergency unloading
-
- // These are synchronized by the _sweep_started token
- static int _highest_marked; // highest compile id dumped at last emergency unloading
- static int _dead_compile_ids; // number of compile ids that where not in the cache last flush
// Stat counters
- static int _number_of_flushes; // Total of full traversals caused by full cache
static int _total_nof_methods_reclaimed; // Accumulated nof methods flushed
static jlong _total_time_sweeping; // Accumulated time sweeping
static jlong _total_time_this_sweep; // Total time this sweep
static jlong _peak_sweep_time; // Peak time for a full sweep
static jlong _peak_sweep_fraction_time; // Peak time sweeping one fraction
- static jlong _total_disconnect_time; // Total time cleaning code mem
- static jlong _peak_disconnect_time; // Peak time cleaning code mem
- static void process_nmethod(nmethod *nm);
+ static int process_nmethod(nmethod *nm);
static void release_nmethod(nmethod* nm);
- static void log_sweep(const char* msg, const char* format = NULL, ...);
static bool sweep_in_progress();
+ static void sweep_code_cache();
+ static void request_nmethod_marking() { _request_mark_phase = true; }
+ static void reset_nmethod_marking() { _request_mark_phase = false; }
+ static bool need_marking_phase() { return _request_mark_phase; }
+
+ static int _hotness_counter_reset_val;
public:
static long traversal_count() { return _traversals; }
- static int number_of_flushes() { return _number_of_flushes; }
static int total_nof_methods_reclaimed() { return _total_nof_methods_reclaimed; }
static jlong total_time_sweeping() { return _total_time_sweeping; }
static jlong peak_sweep_time() { return _peak_sweep_time; }
static jlong peak_sweep_fraction_time() { return _peak_sweep_fraction_time; }
- static jlong total_disconnect_time() { return _total_disconnect_time; }
- static jlong peak_disconnect_time() { return _peak_disconnect_time; }
+ static void log_sweep(const char* msg, const char* format = NULL, ...);
+
#ifdef ASSERT
static bool is_sweeping(nmethod* which) { return _current == which; }
@@ -90,19 +104,18 @@ class NMethodSweeper : public AllStatic {
static void report_events();
#endif
- static void scan_stacks(); // Invoked at the end of each safepoint
- static void sweep_code_cache(); // Concurrent part of sweep job
- static void possibly_sweep(); // Compiler threads call this to sweep
+ static void mark_active_nmethods(); // Invoked at the end of each safepoint
+ static void possibly_sweep(); // Compiler threads call this to sweep
- static void notify(nmethod* nm) {
+ static int sort_nmethods_by_hotness(nmethod** nm1, nmethod** nm2);
+ static int hotness_counter_reset_val();
+
+ static void notify() {
// Request a new sweep of the code cache from the beginning. No
// need to synchronize the setting of this flag since it only
// changes to false at safepoint so we can never overwrite it with false.
- _resweep = true;
+ request_nmethod_marking();
}
-
- static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate
- static void speculative_disconnect_nmethods(bool was_full); // Called by vm op to deal with alloc failure
};
#endif // SHARE_VM_RUNTIME_SWEEPER_HPP
diff --git a/src/share/vm/runtime/vmStructs.cpp b/src/share/vm/runtime/vmStructs.cpp
index e526c7607..e274b9034 100644
--- a/src/share/vm/runtime/vmStructs.cpp
+++ b/src/share/vm/runtime/vmStructs.cpp
@@ -842,7 +842,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(nmethod, _osr_link, nmethod*) \
nonstatic_field(nmethod, _scavenge_root_link, nmethod*) \
nonstatic_field(nmethod, _scavenge_root_state, jbyte) \
- nonstatic_field(nmethod, _state, unsigned char) \
+ nonstatic_field(nmethod, _state, volatile unsigned char) \
nonstatic_field(nmethod, _exception_offset, int) \
nonstatic_field(nmethod, _deoptimize_offset, int) \
nonstatic_field(nmethod, _deoptimize_mh_offset, int) \
@@ -1360,6 +1360,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
declare_integer_type(long) \
declare_integer_type(char) \
declare_unsigned_integer_type(unsigned char) \
+ declare_unsigned_integer_type(volatile unsigned char) \
declare_unsigned_integer_type(u_char) \
declare_unsigned_integer_type(unsigned int) \
declare_unsigned_integer_type(uint) \
@@ -1382,6 +1383,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
declare_toplevel_type(char**) \
declare_toplevel_type(u_char*) \
declare_toplevel_type(unsigned char*) \
+ declare_toplevel_type(volatile unsigned char*) \
\
/*******************************************************************/ \
/* Types which it will be handy to have available over in the SA */ \
diff --git a/src/share/vm/runtime/vm_operations.cpp b/src/share/vm/runtime/vm_operations.cpp
index 5166cfdaa..e26c3938b 100644
--- a/src/share/vm/runtime/vm_operations.cpp
+++ b/src/share/vm/runtime/vm_operations.cpp
@@ -173,10 +173,6 @@ void VM_UnlinkSymbols::doit() {
SymbolTable::unlink();
}
-void VM_HandleFullCodeCache::doit() {
- NMethodSweeper::speculative_disconnect_nmethods(_is_full);
-}
-
void VM_Verify::doit() {
Universe::heap()->prepare_for_verify();
Universe::verify(_silent);
diff --git a/src/share/vm/runtime/vm_operations.hpp b/src/share/vm/runtime/vm_operations.hpp
index b6555b457..ca616a52c 100644
--- a/src/share/vm/runtime/vm_operations.hpp
+++ b/src/share/vm/runtime/vm_operations.hpp
@@ -51,7 +51,6 @@
template(DeoptimizeAll) \
template(ZombieAll) \
template(UnlinkSymbols) \
- template(HandleFullCodeCache) \
template(Verify) \
template(PrintJNI) \
template(HeapDumper) \
@@ -261,16 +260,6 @@ class VM_DeoptimizeFrame: public VM_Operation {
bool allow_nested_vm_operations() const { return true; }
};
-class VM_HandleFullCodeCache: public VM_Operation {
- private:
- bool _is_full;
- public:
- VM_HandleFullCodeCache(bool is_full) { _is_full = is_full; }
- VMOp_Type type() const { return VMOp_HandleFullCodeCache; }
- void doit();
- bool allow_nested_vm_operations() const { return true; }
-};
-
#ifndef PRODUCT
class VM_DeoptimizeAll: public VM_Operation {
private:
diff --git a/src/share/vm/trace/trace.xml b/src/share/vm/trace/trace.xml
index 6b1d9a885..a4286116a 100644
--- a/src/share/vm/trace/trace.xml
+++ b/src/share/vm/trace/trace.xml
@@ -313,13 +313,6 @@ Declares a structure type that can be used in other events.
<value type="UINT" field="zombifiedCount" label="Methods Zombified"/>
</event>
- <event id="CleanCodeCache" path="vm/code_sweeper/clean" label="Clean Code Cache"
- description="Clean code cache from oldest methods"
- has_thread="true" is_requestable="false" is_constant="false">
- <value type="UINT" field="disconnectedCount" label="Methods Disconnected"/>
- <value type="UINT" field="madeNonEntrantCount" label="Methods Made Non-Entrant"/>
- </event>
-
<!-- Code cache events -->
<event id="CodeCacheFull" path="vm/code_cache/full" label="Code Cache Full"