aboutsummaryrefslogtreecommitdiff
path: root/src/share/vm/runtime
diff options
context:
space:
mode:
authorduke <none@none>2007-12-01 00:00:00 +0000
committerduke <none@none>2007-12-01 00:00:00 +0000
commitfa88c88cef63eb7f9083dccb60548d78635a44b1 (patch)
tree8376f6e5c41e70162b5867d9e1fea3f17f540473 /src/share/vm/runtime
downloadjdk8u_hotspot-fa88c88cef63eb7f9083dccb60548d78635a44b1.tar.gz
Initial load
Diffstat (limited to 'src/share/vm/runtime')
-rw-r--r--src/share/vm/runtime/aprofiler.cpp172
-rw-r--r--src/share/vm/runtime/aprofiler.hpp60
-rw-r--r--src/share/vm/runtime/arguments.cpp2654
-rw-r--r--src/share/vm/runtime/arguments.hpp511
-rw-r--r--src/share/vm/runtime/atomic.cpp46
-rw-r--r--src/share/vm/runtime/atomic.hpp70
-rw-r--r--src/share/vm/runtime/biasedLocking.cpp752
-rw-r--r--src/share/vm/runtime/biasedLocking.hpp187
-rw-r--r--src/share/vm/runtime/compilationPolicy.cpp448
-rw-r--r--src/share/vm/runtime/compilationPolicy.hpp90
-rw-r--r--src/share/vm/runtime/deoptimization.cpp1789
-rw-r--r--src/share/vm/runtime/deoptimization.hpp348
-rw-r--r--src/share/vm/runtime/extendedPC.hpp36
-rw-r--r--src/share/vm/runtime/fieldDescriptor.cpp168
-rw-r--r--src/share/vm/runtime/fieldDescriptor.hpp92
-rw-r--r--src/share/vm/runtime/fieldType.cpp92
-rw-r--r--src/share/vm/runtime/fieldType.hpp52
-rw-r--r--src/share/vm/runtime/fprofiler.cpp1595
-rw-r--r--src/share/vm/runtime/fprofiler.hpp311
-rw-r--r--src/share/vm/runtime/frame.cpp1408
-rw-r--r--src/share/vm/runtime/frame.hpp469
-rw-r--r--src/share/vm/runtime/frame.inline.hpp55
-rw-r--r--src/share/vm/runtime/globals.cpp429
-rw-r--r--src/share/vm/runtime/globals.hpp3208
-rw-r--r--src/share/vm/runtime/globals_extension.hpp174
-rw-r--r--src/share/vm/runtime/handles.cpp185
-rw-r--r--src/share/vm/runtime/handles.hpp348
-rw-r--r--src/share/vm/runtime/handles.inline.hpp73
-rw-r--r--src/share/vm/runtime/hpi.cpp101
-rw-r--r--src/share/vm/runtime/hpi.hpp224
-rw-r--r--src/share/vm/runtime/icache.cpp105
-rw-r--r--src/share/vm/runtime/icache.hpp115
-rw-r--r--src/share/vm/runtime/init.cpp160
-rw-r--r--src/share/vm/runtime/init.hpp38
-rw-r--r--src/share/vm/runtime/interfaceSupport.cpp269
-rw-r--r--src/share/vm/runtime/interfaceSupport.hpp568
-rw-r--r--src/share/vm/runtime/java.cpp593
-rw-r--r--src/share/vm/runtime/java.hpp123
-rw-r--r--src/share/vm/runtime/javaCalls.cpp524
-rw-r--r--src/share/vm/runtime/javaCalls.hpp195
-rw-r--r--src/share/vm/runtime/javaFrameAnchor.hpp87
-rw-r--r--src/share/vm/runtime/jfieldIDWorkaround.hpp159
-rw-r--r--src/share/vm/runtime/jniHandles.cpp576
-rw-r--r--src/share/vm/runtime/jniHandles.hpp212
-rw-r--r--src/share/vm/runtime/jniPeriodicChecker.cpp75
-rw-r--r--src/share/vm/runtime/jniPeriodicChecker.hpp52
-rw-r--r--src/share/vm/runtime/memprofiler.cpp125
-rw-r--r--src/share/vm/runtime/memprofiler.hpp42
-rw-r--r--src/share/vm/runtime/monitorChunk.cpp44
-rw-r--r--src/share/vm/runtime/monitorChunk.hpp58
-rw-r--r--src/share/vm/runtime/mutex.cpp1356
-rw-r--r--src/share/vm/runtime/mutex.hpp318
-rw-r--r--src/share/vm/runtime/mutexLocker.cpp266
-rw-r--r--src/share/vm/runtime/mutexLocker.hpp325
-rw-r--r--src/share/vm/runtime/objectMonitor.hpp208
-rw-r--r--src/share/vm/runtime/objectMonitor.inline.hpp110
-rw-r--r--src/share/vm/runtime/orderAccess.cpp28
-rw-r--r--src/share/vm/runtime/orderAccess.hpp303
-rw-r--r--src/share/vm/runtime/os.cpp1108
-rw-r--r--src/share/vm/runtime/os.hpp596
-rw-r--r--src/share/vm/runtime/osThread.cpp55
-rw-r--r--src/share/vm/runtime/osThread.hpp131
-rw-r--r--src/share/vm/runtime/perfData.cpp594
-rw-r--r--src/share/vm/runtime/perfData.hpp955
-rw-r--r--src/share/vm/runtime/perfMemory.cpp248
-rw-r--r--src/share/vm/runtime/perfMemory.hpp167
-rw-r--r--src/share/vm/runtime/prefetch.hpp44
-rw-r--r--src/share/vm/runtime/reflection.cpp1586
-rw-r--r--src/share/vm/runtime/reflection.hpp162
-rw-r--r--src/share/vm/runtime/reflectionCompat.hpp42
-rw-r--r--src/share/vm/runtime/reflectionUtils.cpp84
-rw-r--r--src/share/vm/runtime/reflectionUtils.hpp211
-rw-r--r--src/share/vm/runtime/registerMap.hpp118
-rw-r--r--src/share/vm/runtime/relocator.cpp647
-rw-r--r--src/share/vm/runtime/relocator.hpp118
-rw-r--r--src/share/vm/runtime/rframe.cpp170
-rw-r--r--src/share/vm/runtime/rframe.hpp117
-rw-r--r--src/share/vm/runtime/safepoint.cpp1215
-rw-r--r--src/share/vm/runtime/safepoint.hpp234
-rw-r--r--src/share/vm/runtime/sharedRuntime.cpp2181
-rw-r--r--src/share/vm/runtime/sharedRuntime.hpp536
-rw-r--r--src/share/vm/runtime/sharedRuntimeTrans.cpp719
-rw-r--r--src/share/vm/runtime/sharedRuntimeTrig.cpp957
-rw-r--r--src/share/vm/runtime/signature.cpp432
-rw-r--r--src/share/vm/runtime/signature.hpp416
-rw-r--r--src/share/vm/runtime/stackValue.cpp180
-rw-r--r--src/share/vm/runtime/stackValue.hpp101
-rw-r--r--src/share/vm/runtime/stackValueCollection.cpp149
-rw-r--r--src/share/vm/runtime/stackValueCollection.hpp53
-rw-r--r--src/share/vm/runtime/statSampler.cpp359
-rw-r--r--src/share/vm/runtime/statSampler.hpp62
-rw-r--r--src/share/vm/runtime/stubCodeGenerator.cpp145
-rw-r--r--src/share/vm/runtime/stubCodeGenerator.hpp120
-rw-r--r--src/share/vm/runtime/stubRoutines.cpp277
-rw-r--r--src/share/vm/runtime/stubRoutines.hpp261
-rw-r--r--src/share/vm/runtime/sweeper.cpp161
-rw-r--r--src/share/vm/runtime/sweeper.hpp54
-rw-r--r--src/share/vm/runtime/synchronizer.cpp4716
-rw-r--r--src/share/vm/runtime/synchronizer.hpp216
-rw-r--r--src/share/vm/runtime/task.cpp131
-rw-r--r--src/share/vm/runtime/task.hpp125
-rw-r--r--src/share/vm/runtime/thread.cpp3972
-rw-r--r--src/share/vm/runtime/thread.hpp1757
-rw-r--r--src/share/vm/runtime/threadCritical.hpp53
-rw-r--r--src/share/vm/runtime/threadLocalStorage.cpp49
-rw-r--r--src/share/vm/runtime/threadLocalStorage.hpp62
-rw-r--r--src/share/vm/runtime/timer.cpp209
-rw-r--r--src/share/vm/runtime/timer.hpp121
-rw-r--r--src/share/vm/runtime/unhandledOops.cpp128
-rw-r--r--src/share/vm/runtime/unhandledOops.hpp83
-rw-r--r--src/share/vm/runtime/vframe.cpp636
-rw-r--r--src/share/vm/runtime/vframe.hpp447
-rw-r--r--src/share/vm/runtime/vframeArray.cpp585
-rw-r--r--src/share/vm/runtime/vframeArray.hpp201
-rw-r--r--src/share/vm/runtime/vframe_hp.cpp337
-rw-r--r--src/share/vm/runtime/vframe_hp.hpp135
-rw-r--r--src/share/vm/runtime/virtualspace.cpp704
-rw-r--r--src/share/vm/runtime/virtualspace.hpp190
-rw-r--r--src/share/vm/runtime/vmStructs.cpp2277
-rw-r--r--src/share/vm/runtime/vmStructs.hpp121
-rw-r--r--src/share/vm/runtime/vmThread.cpp655
-rw-r--r--src/share/vm/runtime/vmThread.hpp148
-rw-r--r--src/share/vm/runtime/vm_operations.cpp450
-rw-r--r--src/share/vm/runtime/vm_operations.hpp366
-rw-r--r--src/share/vm/runtime/vm_version.cpp212
-rw-r--r--src/share/vm/runtime/vm_version.hpp72
-rw-r--r--src/share/vm/runtime/vtune.hpp55
127 files changed, 58159 insertions, 0 deletions
diff --git a/src/share/vm/runtime/aprofiler.cpp b/src/share/vm/runtime/aprofiler.cpp
new file mode 100644
index 000000000..4787ba56f
--- /dev/null
+++ b/src/share/vm/runtime/aprofiler.cpp
@@ -0,0 +1,172 @@
+/*
+ * Copyright 1997-2002 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_aprofiler.cpp.incl"
+
+
+bool AllocationProfiler::_active = false;
+GrowableArray<klassOop>* AllocationProfiler::_print_array = NULL;
+
+
+class AllocProfClosure : public ObjectClosure {
+ public:
+ void do_object(oop obj) {
+ Klass* k = obj->blueprint();
+ k->set_alloc_count(k->alloc_count() + 1);
+ k->set_alloc_size(k->alloc_size() + obj->size());
+ }
+};
+
+
+#ifndef PRODUCT
+
+class AllocProfResetClosure : public ObjectClosure {
+ public:
+ void do_object(oop obj) {
+ if (obj->is_klass()) {
+ Klass* k = Klass::cast(klassOop(obj));
+ k->set_alloc_count(0);
+ k->set_alloc_size(0);
+ }
+ }
+};
+
+#endif
+
+
+void AllocationProfiler::iterate_since_last_gc() {
+ if (is_active()) {
+ AllocProfClosure blk;
+ GenCollectedHeap* heap = GenCollectedHeap::heap();
+ heap->object_iterate_since_last_GC(&blk);
+ }
+}
+
+
+void AllocationProfiler::engage() {
+ _active = true;
+}
+
+
+void AllocationProfiler::disengage() {
+ _active = false;
+}
+
+
+void AllocationProfiler::add_class_to_array(klassOop k) {
+ _print_array->append(k);
+}
+
+
+void AllocationProfiler::add_classes_to_array(klassOop k) {
+ // Iterate over klass and all array klasses for klass
+ k->klass_part()->with_array_klasses_do(&AllocationProfiler::add_class_to_array);
+}
+
+
+int AllocationProfiler::compare_classes(klassOop* k1, klassOop* k2) {
+ // Sort by total allocation size
+ return (*k2)->klass_part()->alloc_size() - (*k1)->klass_part()->alloc_size();
+}
+
+
+int AllocationProfiler::average(size_t alloc_size, int alloc_count) {
+ return (int) ((double) (alloc_size * BytesPerWord) / MAX2(alloc_count, 1) + 0.5);
+}
+
+
+void AllocationProfiler::sort_and_print_array(size_t cutoff) {
+ _print_array->sort(&AllocationProfiler::compare_classes);
+ tty->print_cr("________________Size"
+ "__Instances"
+ "__Average"
+ "__Class________________");
+ size_t total_alloc_size = 0;
+ int total_alloc_count = 0;
+ for (int index = 0; index < _print_array->length(); index++) {
+ klassOop k = _print_array->at(index);
+ size_t alloc_size = k->klass_part()->alloc_size();
+ if (alloc_size > cutoff) {
+ int alloc_count = k->klass_part()->alloc_count();
+#ifdef PRODUCT
+ const char* name = k->klass_part()->external_name();
+#else
+ const char* name = k->klass_part()->internal_name();
+#endif
+ tty->print_cr("%20u %10u %8u %s",
+ alloc_size * BytesPerWord,
+ alloc_count,
+ average(alloc_size, alloc_count),
+ name);
+ total_alloc_size += alloc_size;
+ total_alloc_count += alloc_count;
+ }
+ }
+ tty->print_cr("%20u %10u %8u --total--",
+ total_alloc_size * BytesPerWord,
+ total_alloc_count,
+ average(total_alloc_size, total_alloc_count));
+ tty->cr();
+}
+
+
+void AllocationProfiler::print(size_t cutoff) {
+ ResourceMark rm;
+ assert(!is_active(), "AllocationProfiler cannot be active while printing profile");
+
+ tty->cr();
+ tty->print_cr("Allocation profile (sizes in bytes, cutoff = %ld bytes):", cutoff * BytesPerWord);
+ tty->cr();
+
+ // Print regular instance klasses and basic type array klasses
+ _print_array = new GrowableArray<klassOop>(SystemDictionary::number_of_classes()*2);
+ SystemDictionary::classes_do(&add_classes_to_array);
+ Universe::basic_type_classes_do(&add_classes_to_array);
+ sort_and_print_array(cutoff);
+
+ #ifndef PRODUCT
+ tty->print_cr("Allocation profile for system classes (sizes in bytes, cutoff = %d bytes):", cutoff * BytesPerWord);
+ tty->cr();
+
+ // Print system klasses (methods, symbols, constant pools, etc.)
+ _print_array = new GrowableArray<klassOop>(64);
+ Universe::system_classes_do(&add_classes_to_array);
+ sort_and_print_array(cutoff);
+
+ tty->print_cr("Permanent generation dump (sizes in bytes, cutoff = %d bytes):", cutoff * BytesPerWord);
+ tty->cr();
+
+ AllocProfResetClosure resetblk;
+ Universe::heap()->permanent_object_iterate(&resetblk);
+ AllocProfClosure blk;
+ Universe::heap()->permanent_object_iterate(&blk);
+
+ _print_array = new GrowableArray<klassOop>(SystemDictionary::number_of_classes()*2);
+ SystemDictionary::classes_do(&add_classes_to_array);
+ Universe::basic_type_classes_do(&add_classes_to_array);
+ Universe::system_classes_do(&add_classes_to_array);
+ sort_and_print_array(cutoff);
+ #endif
+}
diff --git a/src/share/vm/runtime/aprofiler.hpp b/src/share/vm/runtime/aprofiler.hpp
new file mode 100644
index 000000000..3a7b9b01b
--- /dev/null
+++ b/src/share/vm/runtime/aprofiler.hpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright 1997-2003 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// A simple allocation profiler for Java. The profiler collects and prints
+// the number and total size of instances allocated per class, including
+// array classes.
+//
+// The profiler is currently global for all threads. It can be changed to a
+// per threads profiler by keeping a more elaborate data structure and calling
+// iterate_since_last_scavenge at thread switches.
+
+
+class AllocationProfiler: AllStatic {
+ friend class GenCollectedHeap;
+ friend class MarkSweep;
+ private:
+ static bool _active; // tells whether profiler is active
+ static GrowableArray<klassOop>* _print_array; // temporary array for printing
+
+ // Utility printing functions
+ static void add_class_to_array(klassOop k);
+ static void add_classes_to_array(klassOop k);
+ static int compare_classes(klassOop* k1, klassOop* k2);
+ static int average(size_t alloc_size, int alloc_count);
+ static void sort_and_print_array(size_t cutoff);
+
+ // Call for collecting allocation information. Called at scavenge, mark-sweep and disengage.
+ static void iterate_since_last_gc();
+
+ public:
+ // Start profiler
+ static void engage();
+ // Stop profiler
+ static void disengage();
+ // Tells whether profiler is active
+ static bool is_active() { return _active; }
+ // Print profile
+ static void print(size_t cutoff); // Cutoff in total allocation size (in words)
+};
diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp
new file mode 100644
index 000000000..5d0329ad0
--- /dev/null
+++ b/src/share/vm/runtime/arguments.cpp
@@ -0,0 +1,2654 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_arguments.cpp.incl"
+
+#define DEFAULT_VENDOR_URL_BUG "http://java.sun.com/webapps/bugreport/crash.jsp"
+#define DEFAULT_JAVA_LAUNCHER "generic"
+
+char** Arguments::_jvm_flags_array = NULL;
+int Arguments::_num_jvm_flags = 0;
+char** Arguments::_jvm_args_array = NULL;
+int Arguments::_num_jvm_args = 0;
+char* Arguments::_java_command = NULL;
+SystemProperty* Arguments::_system_properties = NULL;
+const char* Arguments::_gc_log_filename = NULL;
+bool Arguments::_has_profile = false;
+bool Arguments::_has_alloc_profile = false;
+uintx Arguments::_initial_heap_size = 0;
+uintx Arguments::_min_heap_size = 0;
+Arguments::Mode Arguments::_mode = _mixed;
+bool Arguments::_java_compiler = false;
+bool Arguments::_xdebug_mode = false;
+const char* Arguments::_java_vendor_url_bug = DEFAULT_VENDOR_URL_BUG;
+const char* Arguments::_sun_java_launcher = DEFAULT_JAVA_LAUNCHER;
+int Arguments::_sun_java_launcher_pid = -1;
+
+// These parameters are reset in method parse_vm_init_args(JavaVMInitArgs*)
+bool Arguments::_AlwaysCompileLoopMethods = AlwaysCompileLoopMethods;
+bool Arguments::_UseOnStackReplacement = UseOnStackReplacement;
+bool Arguments::_BackgroundCompilation = BackgroundCompilation;
+bool Arguments::_ClipInlining = ClipInlining;
+intx Arguments::_Tier2CompileThreshold = Tier2CompileThreshold;
+
+char* Arguments::SharedArchivePath = NULL;
+
+AgentLibraryList Arguments::_libraryList;
+AgentLibraryList Arguments::_agentList;
+
+abort_hook_t Arguments::_abort_hook = NULL;
+exit_hook_t Arguments::_exit_hook = NULL;
+vfprintf_hook_t Arguments::_vfprintf_hook = NULL;
+
+
+SystemProperty *Arguments::_java_ext_dirs = NULL;
+SystemProperty *Arguments::_java_endorsed_dirs = NULL;
+SystemProperty *Arguments::_sun_boot_library_path = NULL;
+SystemProperty *Arguments::_java_library_path = NULL;
+SystemProperty *Arguments::_java_home = NULL;
+SystemProperty *Arguments::_java_class_path = NULL;
+SystemProperty *Arguments::_sun_boot_class_path = NULL;
+
+char* Arguments::_meta_index_path = NULL;
+char* Arguments::_meta_index_dir = NULL;
+
+static bool force_client_mode = false;
+
+// Check if head of 'option' matches 'name', and sets 'tail' remaining part of option string
+
+static bool match_option(const JavaVMOption *option, const char* name,
+ const char** tail) {
+ int len = (int)strlen(name);
+ if (strncmp(option->optionString, name, len) == 0) {
+ *tail = option->optionString + len;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static void logOption(const char* opt) {
+ if (PrintVMOptions) {
+ jio_fprintf(defaultStream::output_stream(), "VM option '%s'\n", opt);
+ }
+}
+
+// Process java launcher properties.
+void Arguments::process_sun_java_launcher_properties(JavaVMInitArgs* args) {
+ // See if sun.java.launcher or sun.java.launcher.pid is defined.
+ // Must do this before setting up other system properties,
+ // as some of them may depend on launcher type.
+ for (int index = 0; index < args->nOptions; index++) {
+ const JavaVMOption* option = args->options + index;
+ const char* tail;
+
+ if (match_option(option, "-Dsun.java.launcher=", &tail)) {
+ process_java_launcher_argument(tail, option->extraInfo);
+ continue;
+ }
+ if (match_option(option, "-Dsun.java.launcher.pid=", &tail)) {
+ _sun_java_launcher_pid = atoi(tail);
+ continue;
+ }
+ }
+}
+
+// Initialize system properties key and value.
+void Arguments::init_system_properties() {
+
+ PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.version", "1.0", false));
+ PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.name",
+ "Java Virtual Machine Specification", false));
+ PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.vendor",
+ "Sun Microsystems Inc.", false));
+ PropertyList_add(&_system_properties, new SystemProperty("java.vm.version", VM_Version::vm_release(), false));
+ PropertyList_add(&_system_properties, new SystemProperty("java.vm.name", VM_Version::vm_name(), false));
+ PropertyList_add(&_system_properties, new SystemProperty("java.vm.vendor", VM_Version::vm_vendor(), false));
+ PropertyList_add(&_system_properties, new SystemProperty("java.vm.info", VM_Version::vm_info_string(), true));
+
+ // following are JVMTI agent writeable properties.
+ // Properties values are set to NULL and they are
+ // os specific they are initialized in os::init_system_properties_values().
+ _java_ext_dirs = new SystemProperty("java.ext.dirs", NULL, true);
+ _java_endorsed_dirs = new SystemProperty("java.endorsed.dirs", NULL, true);
+ _sun_boot_library_path = new SystemProperty("sun.boot.library.path", NULL, true);
+ _java_library_path = new SystemProperty("java.library.path", NULL, true);
+ _java_home = new SystemProperty("java.home", NULL, true);
+ _sun_boot_class_path = new SystemProperty("sun.boot.class.path", NULL, true);
+
+ _java_class_path = new SystemProperty("java.class.path", "", true);
+
+ // Add to System Property list.
+ PropertyList_add(&_system_properties, _java_ext_dirs);
+ PropertyList_add(&_system_properties, _java_endorsed_dirs);
+ PropertyList_add(&_system_properties, _sun_boot_library_path);
+ PropertyList_add(&_system_properties, _java_library_path);
+ PropertyList_add(&_system_properties, _java_home);
+ PropertyList_add(&_system_properties, _java_class_path);
+ PropertyList_add(&_system_properties, _sun_boot_class_path);
+
+ // Set OS specific system properties values
+ os::init_system_properties_values();
+}
+
+// String containing commands that will be ignored and cause a
+// warning to be issued. These commands should be accepted
+// for 1.6 but not 1.7. The string should be cleared at the
+// beginning of 1.7.
+static const char* obsolete_jvm_flags_1_5_0[] = {
+ "UseTrainGC",
+ "UseSpecialLargeObjectHandling",
+ "UseOversizedCarHandling",
+ "TraceCarAllocation",
+ "PrintTrainGCProcessingStats",
+ "LogOfCarSpaceSize",
+ "OversizedCarThreshold",
+ "MinTickInterval",
+ "DefaultTickInterval",
+ "MaxTickInterval",
+ "DelayTickAdjustment",
+ "ProcessingToTenuringRatio",
+ "MinTrainLength",
+ 0};
+
+bool Arguments::made_obsolete_in_1_5_0(const char *s) {
+ int i = 0;
+ while (obsolete_jvm_flags_1_5_0[i] != NULL) {
+ // <flag>=xxx form
+ // [-|+]<flag> form
+ if ((strncmp(obsolete_jvm_flags_1_5_0[i], s,
+ strlen(obsolete_jvm_flags_1_5_0[i])) == 0) ||
+ ((s[0] == '+' || s[0] == '-') &&
+ (strncmp(obsolete_jvm_flags_1_5_0[i], &s[1],
+ strlen(obsolete_jvm_flags_1_5_0[i])) == 0))) {
+ return true;
+ }
+ i++;
+ }
+ return false;
+}
+
+// Constructs the system class path (aka boot class path) from the following
+// components, in order:
+//
+// prefix // from -Xbootclasspath/p:...
+// endorsed // the expansion of -Djava.endorsed.dirs=...
+// base // from os::get_system_properties() or -Xbootclasspath=
+// suffix // from -Xbootclasspath/a:...
+//
+// java.endorsed.dirs is a list of directories; any jar or zip files in the
+// directories are added to the sysclasspath just before the base.
+//
+// This could be AllStatic, but it isn't needed after argument processing is
+// complete.
+class SysClassPath: public StackObj {
+public:
+ SysClassPath(const char* base);
+ ~SysClassPath();
+
+ inline void set_base(const char* base);
+ inline void add_prefix(const char* prefix);
+ inline void add_suffix(const char* suffix);
+ inline void reset_path(const char* base);
+
+ // Expand the jar/zip files in each directory listed by the java.endorsed.dirs
+ // property. Must be called after all command-line arguments have been
+ // processed (in particular, -Djava.endorsed.dirs=...) and before calling
+ // combined_path().
+ void expand_endorsed();
+
+ inline const char* get_base() const { return _items[_scp_base]; }
+ inline const char* get_prefix() const { return _items[_scp_prefix]; }
+ inline const char* get_suffix() const { return _items[_scp_suffix]; }
+ inline const char* get_endorsed() const { return _items[_scp_endorsed]; }
+
+ // Combine all the components into a single c-heap-allocated string; caller
+ // must free the string if/when no longer needed.
+ char* combined_path();
+
+private:
+ // Utility routines.
+ static char* add_to_path(const char* path, const char* str, bool prepend);
+ static char* add_jars_to_path(char* path, const char* directory);
+
+ inline void reset_item_at(int index);
+
+ // Array indices for the items that make up the sysclasspath. All except the
+ // base are allocated in the C heap and freed by this class.
+ enum {
+ _scp_prefix, // from -Xbootclasspath/p:...
+ _scp_endorsed, // the expansion of -Djava.endorsed.dirs=...
+ _scp_base, // the default sysclasspath
+ _scp_suffix, // from -Xbootclasspath/a:...
+ _scp_nitems // the number of items, must be last.
+ };
+
+ const char* _items[_scp_nitems];
+ DEBUG_ONLY(bool _expansion_done;)
+};
+
+SysClassPath::SysClassPath(const char* base) {
+ memset(_items, 0, sizeof(_items));
+ _items[_scp_base] = base;
+ DEBUG_ONLY(_expansion_done = false;)
+}
+
+SysClassPath::~SysClassPath() {
+ // Free everything except the base.
+ for (int i = 0; i < _scp_nitems; ++i) {
+ if (i != _scp_base) reset_item_at(i);
+ }
+ DEBUG_ONLY(_expansion_done = false;)
+}
+
+inline void SysClassPath::set_base(const char* base) {
+ _items[_scp_base] = base;
+}
+
+inline void SysClassPath::add_prefix(const char* prefix) {
+ _items[_scp_prefix] = add_to_path(_items[_scp_prefix], prefix, true);
+}
+
+inline void SysClassPath::add_suffix(const char* suffix) {
+ _items[_scp_suffix] = add_to_path(_items[_scp_suffix], suffix, false);
+}
+
+inline void SysClassPath::reset_item_at(int index) {
+ assert(index < _scp_nitems && index != _scp_base, "just checking");
+ if (_items[index] != NULL) {
+ FREE_C_HEAP_ARRAY(char, _items[index]);
+ _items[index] = NULL;
+ }
+}
+
+inline void SysClassPath::reset_path(const char* base) {
+ // Clear the prefix and suffix.
+ reset_item_at(_scp_prefix);
+ reset_item_at(_scp_suffix);
+ set_base(base);
+}
+
+//------------------------------------------------------------------------------
+
+void SysClassPath::expand_endorsed() {
+ assert(_items[_scp_endorsed] == NULL, "can only be called once.");
+
+ const char* path = Arguments::get_property("java.endorsed.dirs");
+ if (path == NULL) {
+ path = Arguments::get_endorsed_dir();
+ assert(path != NULL, "no default for java.endorsed.dirs");
+ }
+
+ char* expanded_path = NULL;
+ const char separator = *os::path_separator();
+ const char* const end = path + strlen(path);
+ while (path < end) {
+ const char* tmp_end = strchr(path, separator);
+ if (tmp_end == NULL) {
+ expanded_path = add_jars_to_path(expanded_path, path);
+ path = end;
+ } else {
+ char* dirpath = NEW_C_HEAP_ARRAY(char, tmp_end - path + 1);
+ memcpy(dirpath, path, tmp_end - path);
+ dirpath[tmp_end - path] = '\0';
+ expanded_path = add_jars_to_path(expanded_path, dirpath);
+ FREE_C_HEAP_ARRAY(char, dirpath);
+ path = tmp_end + 1;
+ }
+ }
+ _items[_scp_endorsed] = expanded_path;
+ DEBUG_ONLY(_expansion_done = true;)
+}
+
+// Combine the bootclasspath elements, some of which may be null, into a single
+// c-heap-allocated string.
+char* SysClassPath::combined_path() {
+ assert(_items[_scp_base] != NULL, "empty default sysclasspath");
+ assert(_expansion_done, "must call expand_endorsed() first.");
+
+ size_t lengths[_scp_nitems];
+ size_t total_len = 0;
+
+ const char separator = *os::path_separator();
+
+ // Get the lengths.
+ int i;
+ for (i = 0; i < _scp_nitems; ++i) {
+ if (_items[i] != NULL) {
+ lengths[i] = strlen(_items[i]);
+ // Include space for the separator char (or a NULL for the last item).
+ total_len += lengths[i] + 1;
+ }
+ }
+ assert(total_len > 0, "empty sysclasspath not allowed");
+
+ // Copy the _items to a single string.
+ char* cp = NEW_C_HEAP_ARRAY(char, total_len);
+ char* cp_tmp = cp;
+ for (i = 0; i < _scp_nitems; ++i) {
+ if (_items[i] != NULL) {
+ memcpy(cp_tmp, _items[i], lengths[i]);
+ cp_tmp += lengths[i];
+ *cp_tmp++ = separator;
+ }
+ }
+ *--cp_tmp = '\0'; // Replace the extra separator.
+ return cp;
+}
+
+// Note: path must be c-heap-allocated (or NULL); it is freed if non-null.
+char*
+SysClassPath::add_to_path(const char* path, const char* str, bool prepend) {
+ char *cp;
+
+ assert(str != NULL, "just checking");
+ if (path == NULL) {
+ size_t len = strlen(str) + 1;
+ cp = NEW_C_HEAP_ARRAY(char, len);
+ memcpy(cp, str, len); // copy the trailing null
+ } else {
+ const char separator = *os::path_separator();
+ size_t old_len = strlen(path);
+ size_t str_len = strlen(str);
+ size_t len = old_len + str_len + 2;
+
+ if (prepend) {
+ cp = NEW_C_HEAP_ARRAY(char, len);
+ char* cp_tmp = cp;
+ memcpy(cp_tmp, str, str_len);
+ cp_tmp += str_len;
+ *cp_tmp = separator;
+ memcpy(++cp_tmp, path, old_len + 1); // copy the trailing null
+ FREE_C_HEAP_ARRAY(char, path);
+ } else {
+ cp = REALLOC_C_HEAP_ARRAY(char, path, len);
+ char* cp_tmp = cp + old_len;
+ *cp_tmp = separator;
+ memcpy(++cp_tmp, str, str_len + 1); // copy the trailing null
+ }
+ }
+ return cp;
+}
+
+// Scan the directory and append any jar or zip files found to path.
+// Note: path must be c-heap-allocated (or NULL); it is freed if non-null.
+char* SysClassPath::add_jars_to_path(char* path, const char* directory) {
+ DIR* dir = os::opendir(directory);
+ if (dir == NULL) return path;
+
+ char dir_sep[2] = { '\0', '\0' };
+ size_t directory_len = strlen(directory);
+ const char fileSep = *os::file_separator();
+ if (directory[directory_len - 1] != fileSep) dir_sep[0] = fileSep;
+
+ /* Scan the directory for jars/zips, appending them to path. */
+ struct dirent *entry;
+ char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory));
+ while ((entry = os::readdir(dir, (dirent *) dbuf)) != NULL) {
+ const char* name = entry->d_name;
+ const char* ext = name + strlen(name) - 4;
+ bool isJarOrZip = ext > name &&
+ (os::file_name_strcmp(ext, ".jar") == 0 ||
+ os::file_name_strcmp(ext, ".zip") == 0);
+ if (isJarOrZip) {
+ char* jarpath = NEW_C_HEAP_ARRAY(char, directory_len + 2 + strlen(name));
+ sprintf(jarpath, "%s%s%s", directory, dir_sep, name);
+ path = add_to_path(path, jarpath, false);
+ FREE_C_HEAP_ARRAY(char, jarpath);
+ }
+ }
+ FREE_C_HEAP_ARRAY(char, dbuf);
+ os::closedir(dir);
+ return path;
+}
+
+// Parses a memory size specification string.
+static bool atomll(const char *s, jlong* result) {
+ jlong n = 0;
+ int args_read = sscanf(s, os::jlong_format_specifier(), &n);
+ if (args_read != 1) {
+ return false;
+ }
+ while (*s != '\0' && isdigit(*s)) {
+ s++;
+ }
+ // 4705540: illegal if more characters are found after the first non-digit
+ if (strlen(s) > 1) {
+ return false;
+ }
+ switch (*s) {
+ case 'T': case 't':
+ *result = n * G * K;
+ return true;
+ case 'G': case 'g':
+ *result = n * G;
+ return true;
+ case 'M': case 'm':
+ *result = n * M;
+ return true;
+ case 'K': case 'k':
+ *result = n * K;
+ return true;
+ case '\0':
+ *result = n;
+ return true;
+ default:
+ return false;
+ }
+}
+
+Arguments::ArgsRange Arguments::check_memory_size(jlong size, jlong min_size) {
+ if (size < min_size) return arg_too_small;
+ // Check that size will fit in a size_t (only relevant on 32-bit)
+ if ((julong) size > max_uintx) return arg_too_big;
+ return arg_in_range;
+}
+
+// Describe an argument out of range error
+void Arguments::describe_range_error(ArgsRange errcode) {
+ switch(errcode) {
+ case arg_too_big:
+ jio_fprintf(defaultStream::error_stream(),
+ "The specified size exceeds the maximum "
+ "representable size.\n");
+ break;
+ case arg_too_small:
+ case arg_unreadable:
+ case arg_in_range:
+ // do nothing for now
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+}
+
+static bool set_bool_flag(char* name, bool value, FlagValueOrigin origin) {
+ return CommandLineFlags::boolAtPut(name, &value, origin);
+}
+
+
+static bool set_fp_numeric_flag(char* name, char* value, FlagValueOrigin origin) {
+ double v;
+ if (sscanf(value, "%lf", &v) != 1) {
+ return false;
+ }
+
+ if (CommandLineFlags::doubleAtPut(name, &v, origin)) {
+ return true;
+ }
+ return false;
+}
+
+
+static bool set_numeric_flag(char* name, char* value, FlagValueOrigin origin) {
+ jlong v;
+ intx intx_v;
+ bool is_neg = false;
+ // Check the sign first since atomll() parses only unsigned values.
+ if (*value == '-') {
+ if (!CommandLineFlags::intxAt(name, &intx_v)) {
+ return false;
+ }
+ value++;
+ is_neg = true;
+ }
+ if (!atomll(value, &v)) {
+ return false;
+ }
+ intx_v = (intx) v;
+ if (is_neg) {
+ intx_v = -intx_v;
+ }
+ if (CommandLineFlags::intxAtPut(name, &intx_v, origin)) {
+ return true;
+ }
+ uintx uintx_v = (uintx) v;
+ if (!is_neg && CommandLineFlags::uintxAtPut(name, &uintx_v, origin)) {
+ return true;
+ }
+ return false;
+}
+
+
+static bool set_string_flag(char* name, const char* value, FlagValueOrigin origin) {
+ if (!CommandLineFlags::ccstrAtPut(name, &value, origin)) return false;
+ // Contract: CommandLineFlags always returns a pointer that needs freeing.
+ FREE_C_HEAP_ARRAY(char, value);
+ return true;
+}
+
+static bool append_to_string_flag(char* name, const char* new_value, FlagValueOrigin origin) {
+ const char* old_value = "";
+ if (!CommandLineFlags::ccstrAt(name, &old_value)) return false;
+ size_t old_len = old_value != NULL ? strlen(old_value) : 0;
+ size_t new_len = strlen(new_value);
+ const char* value;
+ char* free_this_too = NULL;
+ if (old_len == 0) {
+ value = new_value;
+ } else if (new_len == 0) {
+ value = old_value;
+ } else {
+ char* buf = NEW_C_HEAP_ARRAY(char, old_len + 1 + new_len + 1);
+ // each new setting adds another LINE to the switch:
+ sprintf(buf, "%s\n%s", old_value, new_value);
+ value = buf;
+ free_this_too = buf;
+ }
+ (void) CommandLineFlags::ccstrAtPut(name, &value, origin);
+ // CommandLineFlags always returns a pointer that needs freeing.
+ FREE_C_HEAP_ARRAY(char, value);
+ if (free_this_too != NULL) {
+ // CommandLineFlags made its own copy, so I must delete my own temp. buffer.
+ FREE_C_HEAP_ARRAY(char, free_this_too);
+ }
+ return true;
+}
+
+
+bool Arguments::parse_argument(const char* arg, FlagValueOrigin origin) {
+
+ // range of acceptable characters spelled out for portability reasons
+#define NAME_RANGE "[abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_]"
+#define BUFLEN 255
+ char name[BUFLEN+1];
+ char dummy;
+
+ if (sscanf(arg, "-%" XSTR(BUFLEN) NAME_RANGE "%c", name, &dummy) == 1) {
+ return set_bool_flag(name, false, origin);
+ }
+ if (sscanf(arg, "+%" XSTR(BUFLEN) NAME_RANGE "%c", name, &dummy) == 1) {
+ return set_bool_flag(name, true, origin);
+ }
+
+ char punct;
+ if (sscanf(arg, "%" XSTR(BUFLEN) NAME_RANGE "%c", name, &punct) == 2 && punct == '=') {
+ const char* value = strchr(arg, '=') + 1;
+ Flag* flag = Flag::find_flag(name, strlen(name));
+ if (flag != NULL && flag->is_ccstr()) {
+ if (flag->ccstr_accumulates()) {
+ return append_to_string_flag(name, value, origin);
+ } else {
+ if (value[0] == '\0') {
+ value = NULL;
+ }
+ return set_string_flag(name, value, origin);
+ }
+ }
+ }
+
+ if (sscanf(arg, "%" XSTR(BUFLEN) NAME_RANGE ":%c", name, &punct) == 2 && punct == '=') {
+ const char* value = strchr(arg, '=') + 1;
+ // -XX:Foo:=xxx will reset the string flag to the given value.
+ if (value[0] == '\0') {
+ value = NULL;
+ }
+ return set_string_flag(name, value, origin);
+ }
+
+#define SIGNED_FP_NUMBER_RANGE "[-0123456789.]"
+#define SIGNED_NUMBER_RANGE "[-0123456789]"
+#define NUMBER_RANGE "[0123456789]"
+ char value[BUFLEN + 1];
+ char value2[BUFLEN + 1];
+ if (sscanf(arg, "%" XSTR(BUFLEN) NAME_RANGE "=" "%" XSTR(BUFLEN) SIGNED_NUMBER_RANGE "." "%" XSTR(BUFLEN) NUMBER_RANGE "%c", name, value, value2, &dummy) == 3) {
+ // Looks like a floating-point number -- try again with more lenient format string
+ if (sscanf(arg, "%" XSTR(BUFLEN) NAME_RANGE "=" "%" XSTR(BUFLEN) SIGNED_FP_NUMBER_RANGE "%c", name, value, &dummy) == 2) {
+ return set_fp_numeric_flag(name, value, origin);
+ }
+ }
+
+#define VALUE_RANGE "[-kmgtKMGT0123456789]"
+ if (sscanf(arg, "%" XSTR(BUFLEN) NAME_RANGE "=" "%" XSTR(BUFLEN) VALUE_RANGE "%c", name, value, &dummy) == 2) {
+ return set_numeric_flag(name, value, origin);
+ }
+
+ return false;
+}
+
+
+void Arguments::add_string(char*** bldarray, int* count, const char* arg) {
+ assert(bldarray != NULL, "illegal argument");
+
+ if (arg == NULL) {
+ return;
+ }
+
+ int index = *count;
+
+ // expand the array and add arg to the last element
+ (*count)++;
+ if (*bldarray == NULL) {
+ *bldarray = NEW_C_HEAP_ARRAY(char*, *count);
+ } else {
+ *bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, *count);
+ }
+ (*bldarray)[index] = strdup(arg);
+}
+
+void Arguments::build_jvm_args(const char* arg) {
+ add_string(&_jvm_args_array, &_num_jvm_args, arg);
+}
+
+void Arguments::build_jvm_flags(const char* arg) {
+ add_string(&_jvm_flags_array, &_num_jvm_flags, arg);
+}
+
+// utility function to return a string that concatenates all
+// strings in a given char** array
+const char* Arguments::build_resource_string(char** args, int count) {
+ if (args == NULL || count == 0) {
+ return NULL;
+ }
+ size_t length = strlen(args[0]) + 1; // add 1 for the null terminator
+ for (int i = 1; i < count; i++) {
+ length += strlen(args[i]) + 1; // add 1 for a space
+ }
+ char* s = NEW_RESOURCE_ARRAY(char, length);
+ strcpy(s, args[0]);
+ for (int j = 1; j < count; j++) {
+ strcat(s, " ");
+ strcat(s, args[j]);
+ }
+ return (const char*) s;
+}
+
+void Arguments::print_on(outputStream* st) {
+ st->print_cr("VM Arguments:");
+ if (num_jvm_flags() > 0) {
+ st->print("jvm_flags: "); print_jvm_flags_on(st);
+ }
+ if (num_jvm_args() > 0) {
+ st->print("jvm_args: "); print_jvm_args_on(st);
+ }
+ st->print_cr("java_command: %s", java_command() ? java_command() : "<unknown>");
+ st->print_cr("Launcher Type: %s", _sun_java_launcher);
+}
+
+void Arguments::print_jvm_flags_on(outputStream* st) {
+ if (_num_jvm_flags > 0) {
+ for (int i=0; i < _num_jvm_flags; i++) {
+ st->print("%s ", _jvm_flags_array[i]);
+ }
+ st->print_cr("");
+ }
+}
+
+void Arguments::print_jvm_args_on(outputStream* st) {
+ if (_num_jvm_args > 0) {
+ for (int i=0; i < _num_jvm_args; i++) {
+ st->print("%s ", _jvm_args_array[i]);
+ }
+ st->print_cr("");
+ }
+}
+
+bool Arguments::process_argument(const char* arg, jboolean ignore_unrecognized, FlagValueOrigin origin) {
+
+ if (parse_argument(arg, origin)) {
+ // do nothing
+ } else if (made_obsolete_in_1_5_0(arg)) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Warning: The flag %s has been EOL'd as of 1.5.0 and will"
+ " be ignored\n", arg);
+ } else {
+ if (!ignore_unrecognized) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Unrecognized VM option '%s'\n", arg);
+ // allow for commandline "commenting out" options like -XX:#+Verbose
+ if (strlen(arg) == 0 || arg[0] != '#') {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
+bool Arguments::process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized) {
+ FILE* stream = fopen(file_name, "rb");
+ if (stream == NULL) {
+ if (should_exist) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Could not open settings file %s\n", file_name);
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ char token[1024];
+ int pos = 0;
+
+ bool in_white_space = true;
+ bool in_comment = false;
+ bool in_quote = false;
+ char quote_c = 0;
+ bool result = true;
+
+ int c = getc(stream);
+ while(c != EOF) {
+ if (in_white_space) {
+ if (in_comment) {
+ if (c == '\n') in_comment = false;
+ } else {
+ if (c == '#') in_comment = true;
+ else if (!isspace(c)) {
+ in_white_space = false;
+ token[pos++] = c;
+ }
+ }
+ } else {
+ if (c == '\n' || (!in_quote && isspace(c))) {
+ // token ends at newline, or at unquoted whitespace
+ // this allows a way to include spaces in string-valued options
+ token[pos] = '\0';
+ logOption(token);
+ result &= process_argument(token, ignore_unrecognized, CONFIG_FILE);
+ build_jvm_flags(token);
+ pos = 0;
+ in_white_space = true;
+ in_quote = false;
+ } else if (!in_quote && (c == '\'' || c == '"')) {
+ in_quote = true;
+ quote_c = c;
+ } else if (in_quote && (c == quote_c)) {
+ in_quote = false;
+ } else {
+ token[pos++] = c;
+ }
+ }
+ c = getc(stream);
+ }
+ if (pos > 0) {
+ token[pos] = '\0';
+ result &= process_argument(token, ignore_unrecognized, CONFIG_FILE);
+ build_jvm_flags(token);
+ }
+ fclose(stream);
+ return result;
+}
+
+//=============================================================================================================
+// Parsing of properties (-D)
+
+const char* Arguments::get_property(const char* key) {
+ return PropertyList_get_value(system_properties(), key);
+}
+
+bool Arguments::add_property(const char* prop) {
+ const char* eq = strchr(prop, '=');
+ char* key;
+ // ns must be static--its address may be stored in a SystemProperty object.
+ const static char ns[1] = {0};
+ char* value = (char *)ns;
+
+ size_t key_len = (eq == NULL) ? strlen(prop) : (eq - prop);
+ key = AllocateHeap(key_len + 1, "add_property");
+ strncpy(key, prop, key_len);
+ key[key_len] = '\0';
+
+ if (eq != NULL) {
+ size_t value_len = strlen(prop) - key_len - 1;
+ value = AllocateHeap(value_len + 1, "add_property");
+ strncpy(value, &prop[key_len + 1], value_len + 1);
+ }
+
+ if (strcmp(key, "java.compiler") == 0) {
+ process_java_compiler_argument(value);
+ FreeHeap(key);
+ if (eq != NULL) {
+ FreeHeap(value);
+ }
+ return true;
+ }
+ else if (strcmp(key, "sun.java.command") == 0) {
+
+ _java_command = value;
+
+ // don't add this property to the properties exposed to the java application
+ FreeHeap(key);
+ return true;
+ }
+ else if (strcmp(key, "sun.java.launcher.pid") == 0) {
+ // launcher.pid property is private and is processed
+ // in process_sun_java_launcher_properties();
+ // the sun.java.launcher property is passed on to the java application
+ FreeHeap(key);
+ if (eq != NULL) {
+ FreeHeap(value);
+ }
+ return true;
+ }
+ else if (strcmp(key, "java.vendor.url.bug") == 0) {
+ // save it in _java_vendor_url_bug, so JVM fatal error handler can access
+ // its value without going through the property list or making a Java call.
+ _java_vendor_url_bug = value;
+ }
+
+ // Create new property and add at the end of the list
+ PropertyList_unique_add(&_system_properties, key, value);
+ return true;
+}
+
+//===========================================================================================================
+// Setting int/mixed/comp mode flags
+
+void Arguments::set_mode_flags(Mode mode) {
+ // Set up default values for all flags.
+ // If you add a flag to any of the branches below,
+ // add a default value for it here.
+ set_java_compiler(false);
+ _mode = mode;
+
+ // Ensure Agent_OnLoad has the correct initial values.
+ // This may not be the final mode; mode may change later in onload phase.
+ PropertyList_unique_add(&_system_properties, "java.vm.info",
+ (char*)Abstract_VM_Version::vm_info_string());
+
+ UseInterpreter = true;
+ UseCompiler = true;
+ UseLoopCounter = true;
+
+ // Default values may be platform/compiler dependent -
+ // use the saved values
+ ClipInlining = Arguments::_ClipInlining;
+ AlwaysCompileLoopMethods = Arguments::_AlwaysCompileLoopMethods;
+ UseOnStackReplacement = Arguments::_UseOnStackReplacement;
+ BackgroundCompilation = Arguments::_BackgroundCompilation;
+ Tier2CompileThreshold = Arguments::_Tier2CompileThreshold;
+
+ // Change from defaults based on mode
+ switch (mode) {
+ default:
+ ShouldNotReachHere();
+ break;
+ case _int:
+ UseCompiler = false;
+ UseLoopCounter = false;
+ AlwaysCompileLoopMethods = false;
+ UseOnStackReplacement = false;
+ break;
+ case _mixed:
+ // same as default
+ break;
+ case _comp:
+ UseInterpreter = false;
+ BackgroundCompilation = false;
+ ClipInlining = false;
+ break;
+ }
+}
+
+
+// Conflict: required to use shared spaces (-Xshare:on), but
+// incompatible command line options were chosen.
+
+static void no_shared_spaces() {
+ if (RequireSharedSpaces) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Class data sharing is inconsistent with other specified options.\n");
+ vm_exit_during_initialization("Unable to use shared archive.", NULL);
+ } else {
+ FLAG_SET_DEFAULT(UseSharedSpaces, false);
+ }
+}
+
+
+// If the user has chosen ParallelGCThreads > 0, we set UseParNewGC
+// if it's not explictly set or unset. If the user has chosen
+// UseParNewGC and not explicitly set ParallelGCThreads we
+// set it, unless this is a single cpu machine.
+void Arguments::set_parnew_gc_flags() {
+ assert(!UseSerialGC && !UseParallelGC, "control point invariant");
+
+ if (FLAG_IS_DEFAULT(UseParNewGC) && ParallelGCThreads > 1) {
+ FLAG_SET_DEFAULT(UseParNewGC, true);
+ } else if (UseParNewGC && ParallelGCThreads == 0) {
+ FLAG_SET_DEFAULT(ParallelGCThreads, nof_parallel_gc_threads());
+ if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
+ FLAG_SET_DEFAULT(UseParNewGC, false);
+ }
+ }
+ if (!UseParNewGC) {
+ FLAG_SET_DEFAULT(ParallelGCThreads, 0);
+ } else {
+ no_shared_spaces();
+
+ // By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 correspondinly,
+ // these settings are default for Parallel Scavenger. For ParNew+Tenured configuration
+ // we set them to 1024 and 1024.
+ // See CR 6362902.
+ if (FLAG_IS_DEFAULT(YoungPLABSize)) {
+ FLAG_SET_DEFAULT(YoungPLABSize, (intx)1024);
+ }
+ if (FLAG_IS_DEFAULT(OldPLABSize)) {
+ FLAG_SET_DEFAULT(OldPLABSize, (intx)1024);
+ }
+
+ // AlwaysTenure flag should make ParNew to promote all at first collection.
+ // See CR 6362902.
+ if (AlwaysTenure) {
+ FLAG_SET_CMDLINE(intx, MaxTenuringThreshold, 0);
+ }
+ }
+}
+
+// CAUTION: this code is currently shared by UseParallelGC, UseParNewGC and
+// UseconcMarkSweepGC. Further tuning of individual collectors might
+// dictate refinement on a per-collector basis.
+int Arguments::nof_parallel_gc_threads() {
+ if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
+ // For very large machines, there are diminishing returns
+ // for large numbers of worker threads. Instead of
+ // hogging the whole system, use 5/8ths of a worker for every
+ // processor after the first 8. For example, on a 72 cpu
+ // machine use 8 + (72 - 8) * (5/8) == 48 worker threads.
+ // This is just a start and needs further tuning and study in
+ // Tiger.
+ int ncpus = os::active_processor_count();
+ return (ncpus <= 8) ? ncpus : 3 + ((ncpus * 5) / 8);
+ } else {
+ return ParallelGCThreads;
+ }
+}
+
+// Adjust some sizes to suit CMS and/or ParNew needs; these work well on
+// sparc/solaris for certain applications, but would gain from
+// further optimization and tuning efforts, and would almost
+// certainly gain from analysis of platform and environment.
+void Arguments::set_cms_and_parnew_gc_flags() {
+ if (UseSerialGC || UseParallelGC) {
+ return;
+ }
+
+ // If we are using CMS, we prefer to UseParNewGC,
+ // unless explicitly forbidden.
+ if (UseConcMarkSweepGC && !UseParNewGC && FLAG_IS_DEFAULT(UseParNewGC)) {
+ FLAG_SET_DEFAULT(UseParNewGC, true);
+ }
+
+ // Turn off AdaptiveSizePolicy by default for cms until it is
+ // complete. Also turn it off in general if the
+ // parnew collector has been selected.
+ if ((UseConcMarkSweepGC || UseParNewGC) &&
+ FLAG_IS_DEFAULT(UseAdaptiveSizePolicy)) {
+ FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
+ }
+
+ // In either case, adjust ParallelGCThreads and/or UseParNewGC
+ // as needed.
+ set_parnew_gc_flags();
+
+ if (!UseConcMarkSweepGC) {
+ return;
+ }
+
+ // Now make adjustments for CMS
+ size_t young_gen_per_worker;
+ intx new_ratio;
+ size_t min_new_default;
+ intx tenuring_default;
+ if (CMSUseOldDefaults) { // old defaults: "old" as of 6.0
+ if FLAG_IS_DEFAULT(CMSYoungGenPerWorker) {
+ FLAG_SET_DEFAULT(CMSYoungGenPerWorker, 4*M);
+ }
+ young_gen_per_worker = 4*M;
+ new_ratio = (intx)15;
+ min_new_default = 4*M;
+ tenuring_default = (intx)0;
+ } else { // new defaults: "new" as of 6.0
+ young_gen_per_worker = CMSYoungGenPerWorker;
+ new_ratio = (intx)7;
+ min_new_default = 16*M;
+ tenuring_default = (intx)4;
+ }
+
+ // Preferred young gen size for "short" pauses
+ const uintx parallel_gc_threads =
+ (ParallelGCThreads == 0 ? 1 : ParallelGCThreads);
+ const size_t preferred_max_new_size_unaligned =
+ ScaleForWordSize(young_gen_per_worker * parallel_gc_threads);
+ const size_t preferred_max_new_size =
+ align_size_up(preferred_max_new_size_unaligned, os::vm_page_size());
+
+ // Unless explicitly requested otherwise, size young gen
+ // for "short" pauses ~ 4M*ParallelGCThreads
+ if (FLAG_IS_DEFAULT(MaxNewSize)) { // MaxNewSize not set at command-line
+ if (!FLAG_IS_DEFAULT(NewSize)) { // NewSize explicitly set at command-line
+ FLAG_SET_DEFAULT(MaxNewSize, MAX2(NewSize, preferred_max_new_size));
+ } else {
+ FLAG_SET_DEFAULT(MaxNewSize, preferred_max_new_size);
+ }
+ }
+ // Unless explicitly requested otherwise, prefer a large
+ // Old to Young gen size so as to shift the collection load
+ // to the old generation concurrent collector
+ if (FLAG_IS_DEFAULT(NewRatio)) {
+ FLAG_SET_DEFAULT(NewRatio, MAX2(NewRatio, new_ratio));
+
+ size_t min_new = align_size_up(ScaleForWordSize(min_new_default), os::vm_page_size());
+ size_t prev_initial_size = initial_heap_size();
+ if (prev_initial_size != 0 && prev_initial_size < min_new+OldSize) {
+ set_initial_heap_size(min_new+OldSize);
+ // Currently minimum size and the initial heap sizes are the same.
+ set_min_heap_size(initial_heap_size());
+ if (PrintGCDetails && Verbose) {
+ warning("Initial heap size increased to " SIZE_FORMAT " M from "
+ SIZE_FORMAT " M; use -XX:NewSize=... for finer control.",
+ initial_heap_size()/M, prev_initial_size/M);
+ }
+ }
+ // MaxHeapSize is aligned down in collectorPolicy
+ size_t max_heap = align_size_down(MaxHeapSize,
+ CardTableRS::ct_max_alignment_constraint());
+
+ if (max_heap > min_new) {
+ // Unless explicitly requested otherwise, make young gen
+ // at least min_new, and at most preferred_max_new_size.
+ if (FLAG_IS_DEFAULT(NewSize)) {
+ FLAG_SET_DEFAULT(NewSize, MAX2(NewSize, min_new));
+ FLAG_SET_DEFAULT(NewSize, MIN2(preferred_max_new_size, NewSize));
+ }
+ // Unless explicitly requested otherwise, size old gen
+ // so that it's at least 3X of NewSize to begin with;
+ // later NewRatio will decide how it grows; see above.
+ if (FLAG_IS_DEFAULT(OldSize)) {
+ if (max_heap > NewSize) {
+ FLAG_SET_DEFAULT(OldSize, MIN2(3*NewSize, max_heap - NewSize));
+ }
+ }
+ }
+ }
+ // Unless explicitly requested otherwise, definitely
+ // promote all objects surviving "tenuring_default" scavenges.
+ if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
+ FLAG_IS_DEFAULT(SurvivorRatio)) {
+ FLAG_SET_DEFAULT(MaxTenuringThreshold, tenuring_default);
+ }
+ // If we decided above (or user explicitly requested)
+ // `promote all' (via MaxTenuringThreshold := 0),
+ // prefer minuscule survivor spaces so as not to waste
+ // space for (non-existent) survivors
+ if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
+ FLAG_SET_DEFAULT(SurvivorRatio, MAX2((intx)1024, SurvivorRatio));
+ }
+ // If OldPLABSize is set and CMSParPromoteBlocksToClaim is not,
+ // set CMSParPromoteBlocksToClaim equal to OldPLABSize.
+ // This is done in order to make ParNew+CMS configuration to work
+ // with YoungPLABSize and OldPLABSize options.
+ // See CR 6362902.
+ if (!FLAG_IS_DEFAULT(OldPLABSize)) {
+ if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) {
+ FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, OldPLABSize);
+ }
+ else {
+ // OldPLABSize and CMSParPromoteBlocksToClaim are both set.
+ // CMSParPromoteBlocksToClaim is a collector-specific flag, so
+ // we'll let it to take precedence.
+ jio_fprintf(defaultStream::error_stream(),
+ "Both OldPLABSize and CMSParPromoteBlocksToClaim options are specified "
+ "for the CMS collector. CMSParPromoteBlocksToClaim will take precedence.\n");
+ }
+ }
+}
+
+bool Arguments::should_auto_select_low_pause_collector() {
+ if (UseAutoGCSelectPolicy &&
+ !FLAG_IS_DEFAULT(MaxGCPauseMillis) &&
+ (MaxGCPauseMillis <= AutoGCSelectPauseMillis)) {
+ if (PrintGCDetails) {
+ // Cannot use gclog_or_tty yet.
+ tty->print_cr("Automatic selection of the low pause collector"
+ " based on pause goal of %d (ms)", MaxGCPauseMillis);
+ }
+ return true;
+ }
+ return false;
+}
+
+void Arguments::set_ergonomics_flags() {
+ // Parallel GC is not compatible with sharing. If one specifies
+ // that they want sharing explicitly, do not set ergonmics flags.
+ if (DumpSharedSpaces || ForceSharedSpaces) {
+ return;
+ }
+
+ if (os::is_server_class_machine() && !force_client_mode ) {
+ // If no other collector is requested explicitly,
+ // let the VM select the collector based on
+ // machine class and automatic selection policy.
+ if (!UseSerialGC &&
+ !UseConcMarkSweepGC &&
+ !UseParNewGC &&
+ !DumpSharedSpaces &&
+ FLAG_IS_DEFAULT(UseParallelGC)) {
+ if (should_auto_select_low_pause_collector()) {
+ FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true);
+ set_cms_and_parnew_gc_flags();
+ } else {
+ FLAG_SET_ERGO(bool, UseParallelGC, true);
+ }
+ no_shared_spaces();
+ }
+
+ // This is here because the parallel collector could
+ // have been selected so this initialization should
+ // still be done.
+ set_parallel_gc_flags();
+ }
+}
+
+void Arguments::set_parallel_gc_flags() {
+ // If parallel old was requested, automatically enable parallel scavenge.
+ if (UseParallelOldGC && !UseParallelGC && FLAG_IS_DEFAULT(UseParallelGC)) {
+ FLAG_SET_DEFAULT(UseParallelGC, true);
+ }
+
+ // If no heap maximum was requested explicitly, use some reasonable fraction
+ // of the physical memory, up to a maximum of 1GB.
+ if (UseParallelGC) {
+ if (FLAG_IS_DEFAULT(MaxHeapSize)) {
+ const uint64_t reasonable_fraction =
+ os::physical_memory() / DefaultMaxRAMFraction;
+ const uint64_t maximum_size = (uint64_t) DefaultMaxRAM;
+ size_t reasonable_max =
+ (size_t) os::allocatable_physical_memory(reasonable_fraction);
+ if (reasonable_max > maximum_size) {
+ reasonable_max = maximum_size;
+ }
+ if (PrintGCDetails && Verbose) {
+ // Cannot use gclog_or_tty yet.
+ tty->print_cr(" Max heap size for server class platform "
+ SIZE_FORMAT, reasonable_max);
+ }
+ // If the initial_heap_size has not been set with -Xms,
+ // then set it as fraction of size of physical memory
+ // respecting the maximum and minimum sizes of the heap.
+ if (initial_heap_size() == 0) {
+ const uint64_t reasonable_initial_fraction =
+ os::physical_memory() / DefaultInitialRAMFraction;
+ const size_t reasonable_initial =
+ (size_t) os::allocatable_physical_memory(reasonable_initial_fraction);
+ const size_t minimum_size = NewSize + OldSize;
+ set_initial_heap_size(MAX2(MIN2(reasonable_initial, reasonable_max),
+ minimum_size));
+ // Currently the minimum size and the initial heap sizes are the same.
+ set_min_heap_size(initial_heap_size());
+ if (PrintGCDetails && Verbose) {
+ // Cannot use gclog_or_tty yet.
+ tty->print_cr(" Initial heap size for server class platform "
+ SIZE_FORMAT, initial_heap_size());
+ }
+ } else {
+ // An minimum size was specified on the command line. Be sure
+ // that the maximum size is consistent.
+ if (initial_heap_size() > reasonable_max) {
+ reasonable_max = initial_heap_size();
+ }
+ }
+ FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx) reasonable_max);
+ }
+
+ // If InitialSurvivorRatio or MinSurvivorRatio were not specified, but the
+ // SurvivorRatio has been set, reset their default values to SurvivorRatio +
+ // 2. By doing this we make SurvivorRatio also work for Parallel Scavenger.
+ // See CR 6362902 for details.
+ if (!FLAG_IS_DEFAULT(SurvivorRatio)) {
+ if (FLAG_IS_DEFAULT(InitialSurvivorRatio)) {
+ FLAG_SET_DEFAULT(InitialSurvivorRatio, SurvivorRatio + 2);
+ }
+ if (FLAG_IS_DEFAULT(MinSurvivorRatio)) {
+ FLAG_SET_DEFAULT(MinSurvivorRatio, SurvivorRatio + 2);
+ }
+ }
+
+ if (UseParallelOldGC) {
+ // Par compact uses lower default values since they are treated as
+ // minimums.
+ if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) {
+ MarkSweepDeadRatio = 1;
+ }
+ if (FLAG_IS_DEFAULT(PermMarkSweepDeadRatio)) {
+ PermMarkSweepDeadRatio = 5;
+ }
+ }
+ }
+}
+
+// This must be called after ergonomics because we want bytecode rewriting
+// if the server compiler is used, or if UseSharedSpaces is disabled.
+void Arguments::set_bytecode_flags() {
+ // Better not attempt to store into a read-only space.
+ if (UseSharedSpaces) {
+ FLAG_SET_DEFAULT(RewriteBytecodes, false);
+ FLAG_SET_DEFAULT(RewriteFrequentPairs, false);
+ }
+
+ if (!RewriteBytecodes) {
+ FLAG_SET_DEFAULT(RewriteFrequentPairs, false);
+ }
+}
+
+// Aggressive optimization flags -XX:+AggressiveOpts
+void Arguments::set_aggressive_opts_flags() {
+ if (AggressiveOpts) {
+NOT_WINDOWS(
+ // No measured benefit on Windows
+ if (FLAG_IS_DEFAULT(CacheTimeMillis)) {
+ FLAG_SET_DEFAULT(CacheTimeMillis, true);
+ }
+)
+ }
+}
+
+//===========================================================================================================
+// Parsing of java.compiler property
+
+void Arguments::process_java_compiler_argument(char* arg) {
+ // For backwards compatibility, Djava.compiler=NONE or ""
+ // causes us to switch to -Xint mode UNLESS -Xdebug
+ // is also specified.
+ if (strlen(arg) == 0 || strcasecmp(arg, "NONE") == 0) {
+ set_java_compiler(true); // "-Djava.compiler[=...]" most recently seen.
+ }
+}
+
+void Arguments::process_java_launcher_argument(const char* launcher, void* extra_info) {
+ _sun_java_launcher = strdup(launcher);
+}
+
+bool Arguments::created_by_java_launcher() {
+ assert(_sun_java_launcher != NULL, "property must have value");
+ return strcmp(DEFAULT_JAVA_LAUNCHER, _sun_java_launcher) != 0;
+}
+
+//===========================================================================================================
+// Parsing of main arguments
+
+bool Arguments::verify_percentage(uintx value, const char* name) {
+ if (value <= 100) {
+ return true;
+ }
+ jio_fprintf(defaultStream::error_stream(),
+ "%s of " UINTX_FORMAT " is invalid; must be between 0 and 100\n",
+ name, value);
+ return false;
+}
+
+static void set_serial_gc_flags() {
+ FLAG_SET_DEFAULT(UseSerialGC, true);
+ FLAG_SET_DEFAULT(UseParNewGC, false);
+ FLAG_SET_DEFAULT(UseConcMarkSweepGC, false);
+ FLAG_SET_DEFAULT(UseParallelGC, false);
+ FLAG_SET_DEFAULT(UseParallelOldGC, false);
+}
+
+static bool verify_serial_gc_flags() {
+ return (UseSerialGC &&
+ !(UseParNewGC || UseConcMarkSweepGC || UseParallelGC ||
+ UseParallelOldGC));
+}
+
+// Check the consistency of vm_init_args
+bool Arguments::check_vm_args_consistency() {
+ // Method for adding checks for flag consistency.
+ // The intent is to warn the user of all possible conflicts,
+ // before returning an error.
+ // Note: Needs platform-dependent factoring.
+ bool status = true;
+
+#if ( (defined(COMPILER2) && defined(SPARC)))
+ // NOTE: The call to VM_Version_init depends on the fact that VM_Version_init
+ // on sparc doesn't require generation of a stub as is the case on, e.g.,
+ // x86. Normally, VM_Version_init must be called from init_globals in
+ // init.cpp, which is called by the initial java thread *after* arguments
+ // have been parsed. VM_Version_init gets called twice on sparc.
+ extern void VM_Version_init();
+ VM_Version_init();
+ if (!VM_Version::has_v9()) {
+ jio_fprintf(defaultStream::error_stream(),
+ "V8 Machine detected, Server requires V9\n");
+ status = false;
+ }
+#endif /* COMPILER2 && SPARC */
+
+ // Allow both -XX:-UseStackBanging and -XX:-UseBoundThreads in non-product
+ // builds so the cost of stack banging can be measured.
+#if (defined(PRODUCT) && defined(SOLARIS))
+ if (!UseBoundThreads && !UseStackBanging) {
+ jio_fprintf(defaultStream::error_stream(),
+ "-UseStackBanging conflicts with -UseBoundThreads\n");
+
+ status = false;
+ }
+#endif
+
+ if (TLABRefillWasteFraction == 0) {
+ jio_fprintf(defaultStream::error_stream(),
+ "TLABRefillWasteFraction should be a denominator, "
+ "not " SIZE_FORMAT "\n",
+ TLABRefillWasteFraction);
+ status = false;
+ }
+
+ status &= verify_percentage(MaxLiveObjectEvacuationRatio,
+ "MaxLiveObjectEvacuationRatio");
+ status &= verify_percentage(AdaptiveSizePolicyWeight,
+ "AdaptiveSizePolicyWeight");
+ status &= verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight");
+ status &= verify_percentage(ThresholdTolerance, "ThresholdTolerance");
+ status &= verify_percentage(MinHeapFreeRatio, "MinHeapFreeRatio");
+ status &= verify_percentage(MaxHeapFreeRatio, "MaxHeapFreeRatio");
+
+ if (MinHeapFreeRatio > MaxHeapFreeRatio) {
+ jio_fprintf(defaultStream::error_stream(),
+ "MinHeapFreeRatio (" UINTX_FORMAT ") must be less than or "
+ "equal to MaxHeapFreeRatio (" UINTX_FORMAT ")\n",
+ MinHeapFreeRatio, MaxHeapFreeRatio);
+ status = false;
+ }
+ // Keeping the heap 100% free is hard ;-) so limit it to 99%.
+ MinHeapFreeRatio = MIN2(MinHeapFreeRatio, (uintx) 99);
+
+ if (FullGCALot && FLAG_IS_DEFAULT(MarkSweepAlwaysCompactCount)) {
+ MarkSweepAlwaysCompactCount = 1; // Move objects every gc.
+ }
+
+ status &= verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
+ status &= verify_percentage(GCTimeLimit, "GCTimeLimit");
+ if (GCTimeLimit == 100) {
+ // Turn off gc-overhead-limit-exceeded checks
+ FLAG_SET_DEFAULT(UseGCOverheadLimit, false);
+ }
+
+ status &= verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
+
+ // Check user specified sharing option conflict with Parallel GC
+ bool cannot_share = (UseConcMarkSweepGC || UseParallelGC ||
+ UseParallelOldGC || UseParNewGC ||
+ SOLARIS_ONLY(UseISM) NOT_SOLARIS(UseLargePages));
+
+ if (cannot_share) {
+ // Either force sharing on by forcing the other options off, or
+ // force sharing off.
+ if (DumpSharedSpaces || ForceSharedSpaces) {
+ set_serial_gc_flags();
+ FLAG_SET_DEFAULT(SOLARIS_ONLY(UseISM) NOT_SOLARIS(UseLargePages), false);
+ } else {
+ no_shared_spaces();
+ }
+ }
+
+ // Ensure that the user has not selected conflicting sets
+ // of collectors. [Note: this check is merely a user convenience;
+ // collectors over-ride each other so that only a non-conflicting
+ // set is selected; however what the user gets is not what they
+ // may have expected from the combination they asked for. It's
+ // better to reduce user confusion by not allowing them to
+ // select conflicting combinations.
+ uint i = 0;
+ if (UseSerialGC) i++;
+ if (UseConcMarkSweepGC || UseParNewGC) i++;
+ if (UseParallelGC || UseParallelOldGC) i++;
+ if (i > 1) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Conflicting collector combinations in option list; "
+ "please refer to the release notes for the combinations "
+ "allowed\n");
+ status = false;
+ }
+
+ if (_has_alloc_profile) {
+ if (UseParallelGC || UseParallelOldGC) {
+ jio_fprintf(defaultStream::error_stream(),
+ "error: invalid argument combination.\n"
+ "Allocation profiling (-Xaprof) cannot be used together with "
+ "Parallel GC (-XX:+UseParallelGC or -XX:+UseParallelOldGC).\n");
+ status = false;
+ }
+ if (UseConcMarkSweepGC) {
+ jio_fprintf(defaultStream::error_stream(),
+ "error: invalid argument combination.\n"
+ "Allocation profiling (-Xaprof) cannot be used together with "
+ "the CMS collector (-XX:+UseConcMarkSweepGC).\n");
+ status = false;
+ }
+ }
+
+ if (CMSIncrementalMode) {
+ if (!UseConcMarkSweepGC) {
+ jio_fprintf(defaultStream::error_stream(),
+ "error: invalid argument combination.\n"
+ "The CMS collector (-XX:+UseConcMarkSweepGC) must be "
+ "selected in order\nto use CMSIncrementalMode.\n");
+ status = false;
+ } else if (!UseTLAB) {
+ jio_fprintf(defaultStream::error_stream(),
+ "error: CMSIncrementalMode requires thread-local "
+ "allocation buffers\n(-XX:+UseTLAB).\n");
+ status = false;
+ } else {
+ status &= verify_percentage(CMSIncrementalDutyCycle,
+ "CMSIncrementalDutyCycle");
+ status &= verify_percentage(CMSIncrementalDutyCycleMin,
+ "CMSIncrementalDutyCycleMin");
+ status &= verify_percentage(CMSIncrementalSafetyFactor,
+ "CMSIncrementalSafetyFactor");
+ status &= verify_percentage(CMSIncrementalOffset,
+ "CMSIncrementalOffset");
+ status &= verify_percentage(CMSExpAvgFactor,
+ "CMSExpAvgFactor");
+ // If it was not set on the command line, set
+ // CMSInitiatingOccupancyFraction to 1 so icms can initiate cycles early.
+ if (CMSInitiatingOccupancyFraction < 0) {
+ FLAG_SET_DEFAULT(CMSInitiatingOccupancyFraction, 1);
+ }
+ }
+ }
+
+ if (UseNUMA && !UseTLAB) {
+ jio_fprintf(defaultStream::error_stream(),
+ "error: NUMA allocator (-XX:+UseNUMA) requires thread-local "
+ "allocation\nbuffers (-XX:+UseTLAB).\n");
+ status = false;
+ }
+
+ // CMS space iteration, which FLSVerifyAllHeapreferences entails,
+ // insists that we hold the requisite locks so that the iteration is
+ // MT-safe. For the verification at start-up and shut-down, we don't
+ // yet have a good way of acquiring and releasing these locks,
+ // which are not visible at the CollectedHeap level. We want to
+ // be able to acquire these locks and then do the iteration rather
+ // than just disable the lock verification. This will be fixed under
+ // bug 4788986.
+ if (UseConcMarkSweepGC && FLSVerifyAllHeapReferences) {
+ if (VerifyGCStartAt == 0) {
+ warning("Heap verification at start-up disabled "
+ "(due to current incompatibility with FLSVerifyAllHeapReferences)");
+ VerifyGCStartAt = 1; // Disable verification at start-up
+ }
+ if (VerifyBeforeExit) {
+ warning("Heap verification at shutdown disabled "
+ "(due to current incompatibility with FLSVerifyAllHeapReferences)");
+ VerifyBeforeExit = false; // Disable verification at shutdown
+ }
+ }
+
+ // Note: only executed in non-PRODUCT mode
+ if (!UseAsyncConcMarkSweepGC &&
+ (ExplicitGCInvokesConcurrent ||
+ ExplicitGCInvokesConcurrentAndUnloadsClasses)) {
+ jio_fprintf(defaultStream::error_stream(),
+ "error: +ExplictGCInvokesConcurrent[AndUnloadsClasses] conflicts"
+ " with -UseAsyncConcMarkSweepGC");
+ status = false;
+ }
+
+ return status;
+}
+
+bool Arguments::is_bad_option(const JavaVMOption* option, jboolean ignore,
+ const char* option_type) {
+ if (ignore) return false;
+
+ const char* spacer = " ";
+ if (option_type == NULL) {
+ option_type = ++spacer; // Set both to the empty string.
+ }
+
+ if (os::obsolete_option(option)) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Obsolete %s%soption: %s\n", option_type, spacer,
+ option->optionString);
+ return false;
+ } else {
+ jio_fprintf(defaultStream::error_stream(),
+ "Unrecognized %s%soption: %s\n", option_type, spacer,
+ option->optionString);
+ return true;
+ }
+}
+
+static const char* user_assertion_options[] = {
+ "-da", "-ea", "-disableassertions", "-enableassertions", 0
+};
+
+static const char* system_assertion_options[] = {
+ "-dsa", "-esa", "-disablesystemassertions", "-enablesystemassertions", 0
+};
+
+// Return true if any of the strings in null-terminated array 'names' matches.
+// If tail_allowed is true, then the tail must begin with a colon; otherwise,
+// the option must match exactly.
+static bool match_option(const JavaVMOption* option, const char** names, const char** tail,
+ bool tail_allowed) {
+ for (/* empty */; *names != NULL; ++names) {
+ if (match_option(option, *names, tail)) {
+ if (**tail == '\0' || tail_allowed && **tail == ':') {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+Arguments::ArgsRange Arguments::parse_memory_size(const char* s,
+ jlong* long_arg,
+ jlong min_size) {
+ if (!atomll(s, long_arg)) return arg_unreadable;
+ return check_memory_size(*long_arg, min_size);
+}
+
+// Parse JavaVMInitArgs structure
+
+jint Arguments::parse_vm_init_args(const JavaVMInitArgs* args) {
+ // For components of the system classpath.
+ SysClassPath scp(Arguments::get_sysclasspath());
+ bool scp_assembly_required = false;
+
+ // Save default settings for some mode flags
+ Arguments::_AlwaysCompileLoopMethods = AlwaysCompileLoopMethods;
+ Arguments::_UseOnStackReplacement = UseOnStackReplacement;
+ Arguments::_ClipInlining = ClipInlining;
+ Arguments::_BackgroundCompilation = BackgroundCompilation;
+ Arguments::_Tier2CompileThreshold = Tier2CompileThreshold;
+
+ // Parse JAVA_TOOL_OPTIONS environment variable (if present)
+ jint result = parse_java_tool_options_environment_variable(&scp, &scp_assembly_required);
+ if (result != JNI_OK) {
+ return result;
+ }
+
+ // Parse JavaVMInitArgs structure passed in
+ result = parse_each_vm_init_arg(args, &scp, &scp_assembly_required, COMMAND_LINE);
+ if (result != JNI_OK) {
+ return result;
+ }
+
+ // Parse _JAVA_OPTIONS environment variable (if present) (mimics classic VM)
+ result = parse_java_options_environment_variable(&scp, &scp_assembly_required);
+ if (result != JNI_OK) {
+ return result;
+ }
+
+ // Do final processing now that all arguments have been parsed
+ result = finalize_vm_init_args(&scp, scp_assembly_required);
+ if (result != JNI_OK) {
+ return result;
+ }
+
+ return JNI_OK;
+}
+
+
+jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
+ SysClassPath* scp_p,
+ bool* scp_assembly_required_p,
+ FlagValueOrigin origin) {
+ // Remaining part of option string
+ const char* tail;
+
+ // iterate over arguments
+ for (int index = 0; index < args->nOptions; index++) {
+ bool is_absolute_path = false; // for -agentpath vs -agentlib
+
+ const JavaVMOption* option = args->options + index;
+
+ if (!match_option(option, "-Djava.class.path", &tail) &&
+ !match_option(option, "-Dsun.java.command", &tail) &&
+ !match_option(option, "-Dsun.java.launcher", &tail)) {
+
+ // add all jvm options to the jvm_args string. This string
+ // is used later to set the java.vm.args PerfData string constant.
+ // the -Djava.class.path and the -Dsun.java.command options are
+ // omitted from jvm_args string as each have their own PerfData
+ // string constant object.
+ build_jvm_args(option->optionString);
+ }
+
+ // -verbose:[class/gc/jni]
+ if (match_option(option, "-verbose", &tail)) {
+ if (!strcmp(tail, ":class") || !strcmp(tail, "")) {
+ FLAG_SET_CMDLINE(bool, TraceClassLoading, true);
+ FLAG_SET_CMDLINE(bool, TraceClassUnloading, true);
+ } else if (!strcmp(tail, ":gc")) {
+ FLAG_SET_CMDLINE(bool, PrintGC, true);
+ FLAG_SET_CMDLINE(bool, TraceClassUnloading, true);
+ } else if (!strcmp(tail, ":jni")) {
+ FLAG_SET_CMDLINE(bool, PrintJNIResolving, true);
+ }
+ // -da / -ea / -disableassertions / -enableassertions
+ // These accept an optional class/package name separated by a colon, e.g.,
+ // -da:java.lang.Thread.
+ } else if (match_option(option, user_assertion_options, &tail, true)) {
+ bool enable = option->optionString[1] == 'e'; // char after '-' is 'e'
+ if (*tail == '\0') {
+ JavaAssertions::setUserClassDefault(enable);
+ } else {
+ assert(*tail == ':', "bogus match by match_option()");
+ JavaAssertions::addOption(tail + 1, enable);
+ }
+ // -dsa / -esa / -disablesystemassertions / -enablesystemassertions
+ } else if (match_option(option, system_assertion_options, &tail, false)) {
+ bool enable = option->optionString[1] == 'e'; // char after '-' is 'e'
+ JavaAssertions::setSystemClassDefault(enable);
+ // -bootclasspath:
+ } else if (match_option(option, "-Xbootclasspath:", &tail)) {
+ scp_p->reset_path(tail);
+ *scp_assembly_required_p = true;
+ // -bootclasspath/a:
+ } else if (match_option(option, "-Xbootclasspath/a:", &tail)) {
+ scp_p->add_suffix(tail);
+ *scp_assembly_required_p = true;
+ // -bootclasspath/p:
+ } else if (match_option(option, "-Xbootclasspath/p:", &tail)) {
+ scp_p->add_prefix(tail);
+ *scp_assembly_required_p = true;
+ // -Xrun
+ } else if (match_option(option, "-Xrun", &tail)) {
+ if(tail != NULL) {
+ const char* pos = strchr(tail, ':');
+ size_t len = (pos == NULL) ? strlen(tail) : pos - tail;
+ char* name = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len + 1), tail, len);
+ name[len] = '\0';
+
+ char *options = NULL;
+ if(pos != NULL) {
+ size_t len2 = strlen(pos+1) + 1; // options start after ':'. Final zero must be copied.
+ options = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len2), pos+1, len2);
+ }
+#ifdef JVMTI_KERNEL
+ if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) {
+ warning("profiling and debugging agents are not supported with Kernel VM");
+ } else
+#endif // JVMTI_KERNEL
+ add_init_library(name, options);
+ }
+ // -agentlib and -agentpath
+ } else if (match_option(option, "-agentlib:", &tail) ||
+ (is_absolute_path = match_option(option, "-agentpath:", &tail))) {
+ if(tail != NULL) {
+ const char* pos = strchr(tail, '=');
+ size_t len = (pos == NULL) ? strlen(tail) : pos - tail;
+ char* name = strncpy(NEW_C_HEAP_ARRAY(char, len + 1), tail, len);
+ name[len] = '\0';
+
+ char *options = NULL;
+ if(pos != NULL) {
+ options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(pos + 1) + 1), pos + 1);
+ }
+#ifdef JVMTI_KERNEL
+ if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) {
+ warning("profiling and debugging agents are not supported with Kernel VM");
+ } else
+#endif // JVMTI_KERNEL
+ add_init_agent(name, options, is_absolute_path);
+
+ }
+ // -javaagent
+ } else if (match_option(option, "-javaagent:", &tail)) {
+ if(tail != NULL) {
+ char *options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(tail) + 1), tail);
+ add_init_agent("instrument", options, false);
+ }
+ // -Xnoclassgc
+ } else if (match_option(option, "-Xnoclassgc", &tail)) {
+ FLAG_SET_CMDLINE(bool, ClassUnloading, false);
+ // -Xincgc: i-CMS
+ } else if (match_option(option, "-Xincgc", &tail)) {
+ FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true);
+ FLAG_SET_CMDLINE(bool, CMSIncrementalMode, true);
+ // -Xnoincgc: no i-CMS
+ } else if (match_option(option, "-Xnoincgc", &tail)) {
+ FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, false);
+ FLAG_SET_CMDLINE(bool, CMSIncrementalMode, false);
+ // -Xconcgc
+ } else if (match_option(option, "-Xconcgc", &tail)) {
+ FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true);
+ // -Xnoconcgc
+ } else if (match_option(option, "-Xnoconcgc", &tail)) {
+ FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, false);
+ // -Xbatch
+ } else if (match_option(option, "-Xbatch", &tail)) {
+ FLAG_SET_CMDLINE(bool, BackgroundCompilation, false);
+ // -Xmn for compatibility with other JVM vendors
+ } else if (match_option(option, "-Xmn", &tail)) {
+ jlong long_initial_eden_size = 0;
+ ArgsRange errcode = parse_memory_size(tail, &long_initial_eden_size, 1);
+ if (errcode != arg_in_range) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid initial eden size: %s\n", option->optionString);
+ describe_range_error(errcode);
+ return JNI_EINVAL;
+ }
+ FLAG_SET_CMDLINE(uintx, MaxNewSize, (size_t) long_initial_eden_size);
+ FLAG_SET_CMDLINE(uintx, NewSize, (size_t) long_initial_eden_size);
+ // -Xms
+ } else if (match_option(option, "-Xms", &tail)) {
+ jlong long_initial_heap_size = 0;
+ ArgsRange errcode = parse_memory_size(tail, &long_initial_heap_size, 1);
+ if (errcode != arg_in_range) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid initial heap size: %s\n", option->optionString);
+ describe_range_error(errcode);
+ return JNI_EINVAL;
+ }
+ set_initial_heap_size((size_t) long_initial_heap_size);
+ // Currently the minimum size and the initial heap sizes are the same.
+ set_min_heap_size(initial_heap_size());
+ // -Xmx
+ } else if (match_option(option, "-Xmx", &tail)) {
+ jlong long_max_heap_size = 0;
+ ArgsRange errcode = parse_memory_size(tail, &long_max_heap_size, 1);
+ if (errcode != arg_in_range) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid maximum heap size: %s\n", option->optionString);
+ describe_range_error(errcode);
+ return JNI_EINVAL;
+ }
+ FLAG_SET_CMDLINE(uintx, MaxHeapSize, (size_t) long_max_heap_size);
+ // Xmaxf
+ } else if (match_option(option, "-Xmaxf", &tail)) {
+ int maxf = (int)(atof(tail) * 100);
+ if (maxf < 0 || maxf > 100) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Bad max heap free percentage size: %s\n",
+ option->optionString);
+ return JNI_EINVAL;
+ } else {
+ FLAG_SET_CMDLINE(uintx, MaxHeapFreeRatio, maxf);
+ }
+ // Xminf
+ } else if (match_option(option, "-Xminf", &tail)) {
+ int minf = (int)(atof(tail) * 100);
+ if (minf < 0 || minf > 100) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Bad min heap free percentage size: %s\n",
+ option->optionString);
+ return JNI_EINVAL;
+ } else {
+ FLAG_SET_CMDLINE(uintx, MinHeapFreeRatio, minf);
+ }
+ // -Xss
+ } else if (match_option(option, "-Xss", &tail)) {
+ jlong long_ThreadStackSize = 0;
+ ArgsRange errcode = parse_memory_size(tail, &long_ThreadStackSize, 1000);
+ if (errcode != arg_in_range) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid thread stack size: %s\n", option->optionString);
+ describe_range_error(errcode);
+ return JNI_EINVAL;
+ }
+ // Internally track ThreadStackSize in units of 1024 bytes.
+ FLAG_SET_CMDLINE(intx, ThreadStackSize,
+ round_to((int)long_ThreadStackSize, K) / K);
+ // -Xoss
+ } else if (match_option(option, "-Xoss", &tail)) {
+ // HotSpot does not have separate native and Java stacks, ignore silently for compatibility
+ // -Xmaxjitcodesize
+ } else if (match_option(option, "-Xmaxjitcodesize", &tail)) {
+ jlong long_ReservedCodeCacheSize = 0;
+ ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize,
+ InitialCodeCacheSize);
+ if (errcode != arg_in_range) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid maximum code cache size: %s\n",
+ option->optionString);
+ describe_range_error(errcode);
+ return JNI_EINVAL;
+ }
+ FLAG_SET_CMDLINE(uintx, ReservedCodeCacheSize, (uintx)long_ReservedCodeCacheSize);
+ // -green
+ } else if (match_option(option, "-green", &tail)) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Green threads support not available\n");
+ return JNI_EINVAL;
+ // -native
+ } else if (match_option(option, "-native", &tail)) {
+ // HotSpot always uses native threads, ignore silently for compatibility
+ // -Xsqnopause
+ } else if (match_option(option, "-Xsqnopause", &tail)) {
+ // EVM option, ignore silently for compatibility
+ // -Xrs
+ } else if (match_option(option, "-Xrs", &tail)) {
+ // Classic/EVM option, new functionality
+ FLAG_SET_CMDLINE(bool, ReduceSignalUsage, true);
+ } else if (match_option(option, "-Xusealtsigs", &tail)) {
+ // change default internal VM signals used - lower case for back compat
+ FLAG_SET_CMDLINE(bool, UseAltSigs, true);
+ // -Xoptimize
+ } else if (match_option(option, "-Xoptimize", &tail)) {
+ // EVM option, ignore silently for compatibility
+ // -Xprof
+ } else if (match_option(option, "-Xprof", &tail)) {
+#ifndef FPROF_KERNEL
+ _has_profile = true;
+#else // FPROF_KERNEL
+ // do we have to exit?
+ warning("Kernel VM does not support flat profiling.");
+#endif // FPROF_KERNEL
+ // -Xaprof
+ } else if (match_option(option, "-Xaprof", &tail)) {
+ _has_alloc_profile = true;
+ // -Xconcurrentio
+ } else if (match_option(option, "-Xconcurrentio", &tail)) {
+ FLAG_SET_CMDLINE(bool, UseLWPSynchronization, true);
+ FLAG_SET_CMDLINE(bool, BackgroundCompilation, false);
+ FLAG_SET_CMDLINE(intx, DeferThrSuspendLoopCount, 1);
+ FLAG_SET_CMDLINE(bool, UseTLAB, false);
+ FLAG_SET_CMDLINE(uintx, NewSizeThreadIncrease, 16 * K); // 20Kb per thread added to new generation
+
+ // -Xinternalversion
+ } else if (match_option(option, "-Xinternalversion", &tail)) {
+ jio_fprintf(defaultStream::output_stream(), "%s\n",
+ VM_Version::internal_vm_info_string());
+ vm_exit(0);
+#ifndef PRODUCT
+ // -Xprintflags
+ } else if (match_option(option, "-Xprintflags", &tail)) {
+ CommandLineFlags::printFlags();
+ vm_exit(0);
+#endif
+ // -D
+ } else if (match_option(option, "-D", &tail)) {
+ if (!add_property(tail)) {
+ return JNI_ENOMEM;
+ }
+ // Out of the box management support
+ if (match_option(option, "-Dcom.sun.management", &tail)) {
+ FLAG_SET_CMDLINE(bool, ManagementServer, true);
+ }
+ // -Xint
+ } else if (match_option(option, "-Xint", &tail)) {
+ set_mode_flags(_int);
+ // -Xmixed
+ } else if (match_option(option, "-Xmixed", &tail)) {
+ set_mode_flags(_mixed);
+ // -Xcomp
+ } else if (match_option(option, "-Xcomp", &tail)) {
+ // for testing the compiler; turn off all flags that inhibit compilation
+ set_mode_flags(_comp);
+
+ // -Xshare:dump
+ } else if (match_option(option, "-Xshare:dump", &tail)) {
+#ifdef TIERED
+ FLAG_SET_CMDLINE(bool, DumpSharedSpaces, true);
+ set_mode_flags(_int); // Prevent compilation, which creates objects
+#elif defined(COMPILER2)
+ vm_exit_during_initialization(
+ "Dumping a shared archive is not supported on the Server JVM.", NULL);
+#elif defined(KERNEL)
+ vm_exit_during_initialization(
+ "Dumping a shared archive is not supported on the Kernel JVM.", NULL);
+#else
+ FLAG_SET_CMDLINE(bool, DumpSharedSpaces, true);
+ set_mode_flags(_int); // Prevent compilation, which creates objects
+#endif
+ // -Xshare:on
+ } else if (match_option(option, "-Xshare:on", &tail)) {
+ FLAG_SET_CMDLINE(bool, UseSharedSpaces, true);
+ FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true);
+#ifdef TIERED
+ FLAG_SET_CMDLINE(bool, ForceSharedSpaces, true);
+#endif // TIERED
+ // -Xshare:auto
+ } else if (match_option(option, "-Xshare:auto", &tail)) {
+ FLAG_SET_CMDLINE(bool, UseSharedSpaces, true);
+ FLAG_SET_CMDLINE(bool, RequireSharedSpaces, false);
+ // -Xshare:off
+ } else if (match_option(option, "-Xshare:off", &tail)) {
+ FLAG_SET_CMDLINE(bool, UseSharedSpaces, false);
+ FLAG_SET_CMDLINE(bool, RequireSharedSpaces, false);
+
+ // -Xverify
+ } else if (match_option(option, "-Xverify", &tail)) {
+ if (strcmp(tail, ":all") == 0 || strcmp(tail, "") == 0) {
+ FLAG_SET_CMDLINE(bool, BytecodeVerificationLocal, true);
+ FLAG_SET_CMDLINE(bool, BytecodeVerificationRemote, true);
+ } else if (strcmp(tail, ":remote") == 0) {
+ FLAG_SET_CMDLINE(bool, BytecodeVerificationLocal, false);
+ FLAG_SET_CMDLINE(bool, BytecodeVerificationRemote, true);
+ } else if (strcmp(tail, ":none") == 0) {
+ FLAG_SET_CMDLINE(bool, BytecodeVerificationLocal, false);
+ FLAG_SET_CMDLINE(bool, BytecodeVerificationRemote, false);
+ } else if (is_bad_option(option, args->ignoreUnrecognized, "verification")) {
+ return JNI_EINVAL;
+ }
+ // -Xdebug
+ } else if (match_option(option, "-Xdebug", &tail)) {
+ // note this flag has been used, then ignore
+ set_xdebug_mode(true);
+ // -Xnoagent
+ } else if (match_option(option, "-Xnoagent", &tail)) {
+ // For compatibility with classic. HotSpot refuses to load the old style agent.dll.
+ } else if (match_option(option, "-Xboundthreads", &tail)) {
+ // Bind user level threads to kernel threads (Solaris only)
+ FLAG_SET_CMDLINE(bool, UseBoundThreads, true);
+ } else if (match_option(option, "-Xloggc:", &tail)) {
+ // Redirect GC output to the file. -Xloggc:<filename>
+ // ostream_init_log(), when called will use this filename
+ // to initialize a fileStream.
+ _gc_log_filename = strdup(tail);
+ FLAG_SET_CMDLINE(bool, PrintGC, true);
+ FLAG_SET_CMDLINE(bool, PrintGCTimeStamps, true);
+ FLAG_SET_CMDLINE(bool, TraceClassUnloading, true);
+
+ // JNI hooks
+ } else if (match_option(option, "-Xcheck", &tail)) {
+ if (!strcmp(tail, ":jni")) {
+ CheckJNICalls = true;
+ } else if (is_bad_option(option, args->ignoreUnrecognized,
+ "check")) {
+ return JNI_EINVAL;
+ }
+ } else if (match_option(option, "vfprintf", &tail)) {
+ _vfprintf_hook = CAST_TO_FN_PTR(vfprintf_hook_t, option->extraInfo);
+ } else if (match_option(option, "exit", &tail)) {
+ _exit_hook = CAST_TO_FN_PTR(exit_hook_t, option->extraInfo);
+ } else if (match_option(option, "abort", &tail)) {
+ _abort_hook = CAST_TO_FN_PTR(abort_hook_t, option->extraInfo);
+ // -XX:+AggressiveHeap
+ } else if (match_option(option, "-XX:+AggressiveHeap", &tail)) {
+
+ // This option inspects the machine and attempts to set various
+ // parameters to be optimal for long-running, memory allocation
+ // intensive jobs. It is intended for machines with large
+ // amounts of cpu and memory.
+
+ // initHeapSize is needed since _initial_heap_size is 4 bytes on a 32 bit
+ // VM, but we may not be able to represent the total physical memory
+ // available (like having 8gb of memory on a box but using a 32bit VM).
+ // Thus, we need to make sure we're using a julong for intermediate
+ // calculations.
+ julong initHeapSize;
+ julong total_memory = os::physical_memory();
+
+ if (total_memory < (julong)256*M) {
+ jio_fprintf(defaultStream::error_stream(),
+ "You need at least 256mb of memory to use -XX:+AggressiveHeap\n");
+ vm_exit(1);
+ }
+
+ // The heap size is half of available memory, or (at most)
+ // all of possible memory less 160mb (leaving room for the OS
+ // when using ISM). This is the maximum; because adaptive sizing
+ // is turned on below, the actual space used may be smaller.
+
+ initHeapSize = MIN2(total_memory / (julong)2,
+ total_memory - (julong)160*M);
+
+ // Make sure that if we have a lot of memory we cap the 32 bit
+ // process space. The 64bit VM version of this function is a nop.
+ initHeapSize = os::allocatable_physical_memory(initHeapSize);
+
+ // The perm gen is separate but contiguous with the
+ // object heap (and is reserved with it) so subtract it
+ // from the heap size.
+ if (initHeapSize > MaxPermSize) {
+ initHeapSize = initHeapSize - MaxPermSize;
+ } else {
+ warning("AggressiveHeap and MaxPermSize values may conflict");
+ }
+
+ if (FLAG_IS_DEFAULT(MaxHeapSize)) {
+ FLAG_SET_CMDLINE(uintx, MaxHeapSize, initHeapSize);
+ set_initial_heap_size(MaxHeapSize);
+ // Currently the minimum size and the initial heap sizes are the same.
+ set_min_heap_size(initial_heap_size());
+ }
+ if (FLAG_IS_DEFAULT(NewSize)) {
+ // Make the young generation 3/8ths of the total heap.
+ FLAG_SET_CMDLINE(uintx, NewSize,
+ ((julong)MaxHeapSize / (julong)8) * (julong)3);
+ FLAG_SET_CMDLINE(uintx, MaxNewSize, NewSize);
+ }
+
+ FLAG_SET_DEFAULT(UseLargePages, true);
+
+ // Increase some data structure sizes for efficiency
+ FLAG_SET_CMDLINE(uintx, BaseFootPrintEstimate, MaxHeapSize);
+ FLAG_SET_CMDLINE(bool, ResizeTLAB, false);
+ FLAG_SET_CMDLINE(uintx, TLABSize, 256*K);
+
+ // See the OldPLABSize comment below, but replace 'after promotion'
+ // with 'after copying'. YoungPLABSize is the size of the survivor
+ // space per-gc-thread buffers. The default is 4kw.
+ FLAG_SET_CMDLINE(uintx, YoungPLABSize, 256*K); // Note: this is in words
+
+ // OldPLABSize is the size of the buffers in the old gen that
+ // UseParallelGC uses to promote live data that doesn't fit in the
+ // survivor spaces. At any given time, there's one for each gc thread.
+ // The default size is 1kw. These buffers are rarely used, since the
+ // survivor spaces are usually big enough. For specjbb, however, there
+ // are occasions when there's lots of live data in the young gen
+ // and we end up promoting some of it. We don't have a definite
+ // explanation for why bumping OldPLABSize helps, but the theory
+ // is that a bigger PLAB results in retaining something like the
+ // original allocation order after promotion, which improves mutator
+ // locality. A minor effect may be that larger PLABs reduce the
+ // number of PLAB allocation events during gc. The value of 8kw
+ // was arrived at by experimenting with specjbb.
+ FLAG_SET_CMDLINE(uintx, OldPLABSize, 8*K); // Note: this is in words
+
+ // CompilationPolicyChoice=0 causes the server compiler to adopt
+ // a more conservative which-method-do-I-compile policy when one
+ // of the counters maintained by the interpreter trips. The
+ // result is reduced startup time and improved specjbb and
+ // alacrity performance. Zero is the default, but we set it
+ // explicitly here in case the default changes.
+ // See runtime/compilationPolicy.*.
+ FLAG_SET_CMDLINE(intx, CompilationPolicyChoice, 0);
+
+ // Enable parallel GC and adaptive generation sizing
+ FLAG_SET_CMDLINE(bool, UseParallelGC, true);
+ FLAG_SET_DEFAULT(ParallelGCThreads, nof_parallel_gc_threads());
+
+ // Encourage steady state memory management
+ FLAG_SET_CMDLINE(uintx, ThresholdTolerance, 100);
+
+ // This appears to improve mutator locality
+ FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false);
+
+ // Get around early Solaris scheduling bug
+ // (affinity vs other jobs on system)
+ // but disallow DR and offlining (5008695).
+ FLAG_SET_CMDLINE(bool, BindGCTaskThreadsToCPUs, true);
+
+ } else if (match_option(option, "-XX:+NeverTenure", &tail)) {
+ // The last option must always win.
+ FLAG_SET_CMDLINE(bool, AlwaysTenure, false);
+ FLAG_SET_CMDLINE(bool, NeverTenure, true);
+ } else if (match_option(option, "-XX:+AlwaysTenure", &tail)) {
+ // The last option must always win.
+ FLAG_SET_CMDLINE(bool, NeverTenure, false);
+ FLAG_SET_CMDLINE(bool, AlwaysTenure, true);
+ } else if (match_option(option, "-XX:+CMSPermGenSweepingEnabled", &tail) ||
+ match_option(option, "-XX:-CMSPermGenSweepingEnabled", &tail)) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Please use CMSClassUnloadingEnabled in place of "
+ "CMSPermGenSweepingEnabled in the future\n");
+ } else if (match_option(option, "-XX:+UseGCTimeLimit", &tail)) {
+ FLAG_SET_CMDLINE(bool, UseGCOverheadLimit, true);
+ jio_fprintf(defaultStream::error_stream(),
+ "Please use -XX:+UseGCOverheadLimit in place of "
+ "-XX:+UseGCTimeLimit in the future\n");
+ } else if (match_option(option, "-XX:-UseGCTimeLimit", &tail)) {
+ FLAG_SET_CMDLINE(bool, UseGCOverheadLimit, false);
+ jio_fprintf(defaultStream::error_stream(),
+ "Please use -XX:-UseGCOverheadLimit in place of "
+ "-XX:-UseGCTimeLimit in the future\n");
+ // The TLE options are for compatibility with 1.3 and will be
+ // removed without notice in a future release. These options
+ // are not to be documented.
+ } else if (match_option(option, "-XX:MaxTLERatio=", &tail)) {
+ // No longer used.
+ } else if (match_option(option, "-XX:+ResizeTLE", &tail)) {
+ FLAG_SET_CMDLINE(bool, ResizeTLAB, true);
+ } else if (match_option(option, "-XX:-ResizeTLE", &tail)) {
+ FLAG_SET_CMDLINE(bool, ResizeTLAB, false);
+ } else if (match_option(option, "-XX:+PrintTLE", &tail)) {
+ FLAG_SET_CMDLINE(bool, PrintTLAB, true);
+ } else if (match_option(option, "-XX:-PrintTLE", &tail)) {
+ FLAG_SET_CMDLINE(bool, PrintTLAB, false);
+ } else if (match_option(option, "-XX:TLEFragmentationRatio=", &tail)) {
+ // No longer used.
+ } else if (match_option(option, "-XX:TLESize=", &tail)) {
+ jlong long_tlab_size = 0;
+ ArgsRange errcode = parse_memory_size(tail, &long_tlab_size, 1);
+ if (errcode != arg_in_range) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid TLAB size: %s\n", option->optionString);
+ describe_range_error(errcode);
+ return JNI_EINVAL;
+ }
+ FLAG_SET_CMDLINE(uintx, TLABSize, long_tlab_size);
+ } else if (match_option(option, "-XX:TLEThreadRatio=", &tail)) {
+ // No longer used.
+ } else if (match_option(option, "-XX:+UseTLE", &tail)) {
+ FLAG_SET_CMDLINE(bool, UseTLAB, true);
+ } else if (match_option(option, "-XX:-UseTLE", &tail)) {
+ FLAG_SET_CMDLINE(bool, UseTLAB, false);
+SOLARIS_ONLY(
+ } else if (match_option(option, "-XX:+UsePermISM", &tail)) {
+ warning("-XX:+UsePermISM is obsolete.");
+ FLAG_SET_CMDLINE(bool, UseISM, true);
+ } else if (match_option(option, "-XX:-UsePermISM", &tail)) {
+ FLAG_SET_CMDLINE(bool, UseISM, false);
+)
+ } else if (match_option(option, "-XX:+DisplayVMOutputToStderr", &tail)) {
+ FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, false);
+ FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, true);
+ } else if (match_option(option, "-XX:+DisplayVMOutputToStdout", &tail)) {
+ FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, false);
+ FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, true);
+ } else if (match_option(option, "-XX:+ExtendedDTraceProbes", &tail)) {
+#ifdef SOLARIS
+ FLAG_SET_CMDLINE(bool, ExtendedDTraceProbes, true);
+ FLAG_SET_CMDLINE(bool, DTraceMethodProbes, true);
+ FLAG_SET_CMDLINE(bool, DTraceAllocProbes, true);
+ FLAG_SET_CMDLINE(bool, DTraceMonitorProbes, true);
+#else // ndef SOLARIS
+ jio_fprintf(defaultStream::error_stream(),
+ "ExtendedDTraceProbes flag is only applicable on Solaris\n");
+ return JNI_EINVAL;
+#endif // ndef SOLARIS
+ } else
+#ifdef ASSERT
+ if (match_option(option, "-XX:+FullGCALot", &tail)) {
+ FLAG_SET_CMDLINE(bool, FullGCALot, true);
+ // disable scavenge before parallel mark-compact
+ FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false);
+ } else
+#endif
+ if (match_option(option, "-XX:ParCMSPromoteBlocksToClaim=", &tail)) {
+ julong cms_blocks_to_claim = (julong)atol(tail);
+ FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, cms_blocks_to_claim);
+ jio_fprintf(defaultStream::error_stream(),
+ "Please use -XX:CMSParPromoteBlocksToClaim in place of "
+ "-XX:ParCMSPromoteBlocksToClaim in the future\n");
+ } else
+ if (match_option(option, "-XX:ParallelGCOldGenAllocBufferSize=", &tail)) {
+ jlong old_plab_size = 0;
+ ArgsRange errcode = parse_memory_size(tail, &old_plab_size, 1);
+ if (errcode != arg_in_range) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid old PLAB size: %s\n", option->optionString);
+ describe_range_error(errcode);
+ return JNI_EINVAL;
+ }
+ FLAG_SET_CMDLINE(uintx, OldPLABSize, (julong)old_plab_size);
+ jio_fprintf(defaultStream::error_stream(),
+ "Please use -XX:OldPLABSize in place of "
+ "-XX:ParallelGCOldGenAllocBufferSize in the future\n");
+ } else
+ if (match_option(option, "-XX:ParallelGCToSpaceAllocBufferSize=", &tail)) {
+ jlong young_plab_size = 0;
+ ArgsRange errcode = parse_memory_size(tail, &young_plab_size, 1);
+ if (errcode != arg_in_range) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid young PLAB size: %s\n", option->optionString);
+ describe_range_error(errcode);
+ return JNI_EINVAL;
+ }
+ FLAG_SET_CMDLINE(uintx, YoungPLABSize, (julong)young_plab_size);
+ jio_fprintf(defaultStream::error_stream(),
+ "Please use -XX:YoungPLABSize in place of "
+ "-XX:ParallelGCToSpaceAllocBufferSize in the future\n");
+ } else
+ if (match_option(option, "-XX:", &tail)) { // -XX:xxxx
+ // Skip -XX:Flags= since that case has already been handled
+ if (strncmp(tail, "Flags=", strlen("Flags=")) != 0) {
+ if (!process_argument(tail, args->ignoreUnrecognized, origin)) {
+ return JNI_EINVAL;
+ }
+ }
+ // Unknown option
+ } else if (is_bad_option(option, args->ignoreUnrecognized)) {
+ return JNI_ERR;
+ }
+ }
+
+ return JNI_OK;
+}
+
+jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_required) {
+ // This must be done after all -D arguments have been processed.
+ scp_p->expand_endorsed();
+
+ if (scp_assembly_required || scp_p->get_endorsed() != NULL) {
+ // Assemble the bootclasspath elements into the final path.
+ Arguments::set_sysclasspath(scp_p->combined_path());
+ }
+
+ // This must be done after all arguments have been processed.
+ // java_compiler() true means set to "NONE" or empty.
+ if (java_compiler() && !xdebug_mode()) {
+ // For backwards compatibility, we switch to interpreted mode if
+ // -Djava.compiler="NONE" or "" is specified AND "-Xdebug" was
+ // not specified.
+ set_mode_flags(_int);
+ }
+ if (CompileThreshold == 0) {
+ set_mode_flags(_int);
+ }
+
+#ifdef TIERED
+ // If we are using tiered compilation in the tiered vm then c1 will
+ // do the profiling and we don't want to waste that time in the
+ // interpreter.
+ if (TieredCompilation) {
+ ProfileInterpreter = false;
+ } else {
+ // Since we are running vanilla server we must adjust the compile threshold
+ // unless the user has already adjusted it because the default threshold assumes
+ // we will run tiered.
+
+ if (FLAG_IS_DEFAULT(CompileThreshold)) {
+ CompileThreshold = Tier2CompileThreshold;
+ }
+ }
+#endif // TIERED
+
+#ifndef COMPILER2
+ // Don't degrade server performance for footprint
+ if (FLAG_IS_DEFAULT(UseLargePages) &&
+ MaxHeapSize < LargePageHeapSizeThreshold) {
+ // No need for large granularity pages w/small heaps.
+ // Note that large pages are enabled/disabled for both the
+ // Java heap and the code cache.
+ FLAG_SET_DEFAULT(UseLargePages, false);
+ SOLARIS_ONLY(FLAG_SET_DEFAULT(UseMPSS, false));
+ SOLARIS_ONLY(FLAG_SET_DEFAULT(UseISM, false));
+ }
+#else
+ if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) {
+ FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1);
+ }
+#endif
+
+ if (!check_vm_args_consistency()) {
+ return JNI_ERR;
+ }
+
+ return JNI_OK;
+}
+
+jint Arguments::parse_java_options_environment_variable(SysClassPath* scp_p, bool* scp_assembly_required_p) {
+ return parse_options_environment_variable("_JAVA_OPTIONS", scp_p,
+ scp_assembly_required_p);
+}
+
+jint Arguments::parse_java_tool_options_environment_variable(SysClassPath* scp_p, bool* scp_assembly_required_p) {
+ return parse_options_environment_variable("JAVA_TOOL_OPTIONS", scp_p,
+ scp_assembly_required_p);
+}
+
+jint Arguments::parse_options_environment_variable(const char* name, SysClassPath* scp_p, bool* scp_assembly_required_p) {
+ const int N_MAX_OPTIONS = 64;
+ const int OPTION_BUFFER_SIZE = 1024;
+ char buffer[OPTION_BUFFER_SIZE];
+
+ // The variable will be ignored if it exceeds the length of the buffer.
+ // Don't check this variable if user has special privileges
+ // (e.g. unix su command).
+ if (os::getenv(name, buffer, sizeof(buffer)) &&
+ !os::have_special_privileges()) {
+ JavaVMOption options[N_MAX_OPTIONS]; // Construct option array
+ jio_fprintf(defaultStream::error_stream(),
+ "Picked up %s: %s\n", name, buffer);
+ char* rd = buffer; // pointer to the input string (rd)
+ int i;
+ for (i = 0; i < N_MAX_OPTIONS;) { // repeat for all options in the input string
+ while (isspace(*rd)) rd++; // skip whitespace
+ if (*rd == 0) break; // we re done when the input string is read completely
+
+ // The output, option string, overwrites the input string.
+ // Because of quoting, the pointer to the option string (wrt) may lag the pointer to
+ // input string (rd).
+ char* wrt = rd;
+
+ options[i++].optionString = wrt; // Fill in option
+ while (*rd != 0 && !isspace(*rd)) { // unquoted strings terminate with a space or NULL
+ if (*rd == '\'' || *rd == '"') { // handle a quoted string
+ int quote = *rd; // matching quote to look for
+ rd++; // don't copy open quote
+ while (*rd != quote) { // include everything (even spaces) up until quote
+ if (*rd == 0) { // string termination means unmatched string
+ jio_fprintf(defaultStream::error_stream(),
+ "Unmatched quote in %s\n", name);
+ return JNI_ERR;
+ }
+ *wrt++ = *rd++; // copy to option string
+ }
+ rd++; // don't copy close quote
+ } else {
+ *wrt++ = *rd++; // copy to option string
+ }
+ }
+ // Need to check if we're done before writing a NULL,
+ // because the write could be to the byte that rd is pointing to.
+ if (*rd++ == 0) {
+ *wrt = 0;
+ break;
+ }
+ *wrt = 0; // Zero terminate option
+ }
+ // Construct JavaVMInitArgs structure and parse as if it was part of the command line
+ JavaVMInitArgs vm_args;
+ vm_args.version = JNI_VERSION_1_2;
+ vm_args.options = options;
+ vm_args.nOptions = i;
+ vm_args.ignoreUnrecognized = false;
+
+ if (PrintVMOptions) {
+ const char* tail;
+ for (int i = 0; i < vm_args.nOptions; i++) {
+ const JavaVMOption *option = vm_args.options + i;
+ if (match_option(option, "-XX:", &tail)) {
+ logOption(tail);
+ }
+ }
+ }
+
+ return(parse_each_vm_init_arg(&vm_args, scp_p, scp_assembly_required_p, ENVIRON_VAR));
+ }
+ return JNI_OK;
+}
+
+// Parse entry point called from JNI_CreateJavaVM
+
+jint Arguments::parse(const JavaVMInitArgs* args) {
+
+ // Sharing support
+ // Construct the path to the archive
+ char jvm_path[JVM_MAXPATHLEN];
+ os::jvm_path(jvm_path, sizeof(jvm_path));
+#ifdef TIERED
+ if (strstr(jvm_path, "client") != NULL) {
+ force_client_mode = true;
+ }
+#endif // TIERED
+ char *end = strrchr(jvm_path, *os::file_separator());
+ if (end != NULL) *end = '\0';
+ char *shared_archive_path = NEW_C_HEAP_ARRAY(char, strlen(jvm_path) +
+ strlen(os::file_separator()) + 20);
+ if (shared_archive_path == NULL) return JNI_ENOMEM;
+ strcpy(shared_archive_path, jvm_path);
+ strcat(shared_archive_path, os::file_separator());
+ strcat(shared_archive_path, "classes");
+ DEBUG_ONLY(strcat(shared_archive_path, "_g");)
+ strcat(shared_archive_path, ".jsa");
+ SharedArchivePath = shared_archive_path;
+
+ // Remaining part of option string
+ const char* tail;
+
+ // If flag "-XX:Flags=flags-file" is used it will be the first option to be processed.
+ bool settings_file_specified = false;
+ int index;
+ for (index = 0; index < args->nOptions; index++) {
+ const JavaVMOption *option = args->options + index;
+ if (match_option(option, "-XX:Flags=", &tail)) {
+ if (!process_settings_file(tail, true, args->ignoreUnrecognized)) {
+ return JNI_EINVAL;
+ }
+ settings_file_specified = true;
+ }
+ if (match_option(option, "-XX:+PrintVMOptions", &tail)) {
+ PrintVMOptions = true;
+ }
+ }
+
+ // Parse default .hotspotrc settings file
+ if (!settings_file_specified) {
+ if (!process_settings_file(".hotspotrc", false, args->ignoreUnrecognized)) {
+ return JNI_EINVAL;
+ }
+ }
+
+ if (PrintVMOptions) {
+ for (index = 0; index < args->nOptions; index++) {
+ const JavaVMOption *option = args->options + index;
+ if (match_option(option, "-XX:", &tail)) {
+ logOption(tail);
+ }
+ }
+ }
+
+ // Parse JavaVMInitArgs structure passed in, as well as JAVA_TOOL_OPTIONS and _JAVA_OPTIONS
+ jint result = parse_vm_init_args(args);
+ if (result != JNI_OK) {
+ return result;
+ }
+
+#ifndef PRODUCT
+ if (TraceBytecodesAt != 0) {
+ TraceBytecodes = true;
+ }
+ if (CountCompiledCalls) {
+ if (UseCounterDecay) {
+ warning("UseCounterDecay disabled because CountCalls is set");
+ UseCounterDecay = false;
+ }
+ }
+#endif // PRODUCT
+
+ if (PrintGCDetails) {
+ // Turn on -verbose:gc options as well
+ PrintGC = true;
+ if (FLAG_IS_DEFAULT(TraceClassUnloading)) {
+ TraceClassUnloading = true;
+ }
+ }
+
+#ifdef SERIALGC
+ set_serial_gc_flags();
+#endif // SERIALGC
+#ifdef KERNEL
+ no_shared_spaces();
+#endif // KERNEL
+
+ // Set some flags for ParallelGC if needed.
+ set_parallel_gc_flags();
+
+ // Set some flags for CMS and/or ParNew collectors, as needed.
+ set_cms_and_parnew_gc_flags();
+
+ // Set flags based on ergonomics.
+ set_ergonomics_flags();
+
+#ifdef SERIALGC
+ assert(verify_serial_gc_flags(), "SerialGC unset");
+#endif // SERIALGC
+
+ // Set bytecode rewriting flags
+ set_bytecode_flags();
+
+ // Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled.
+ set_aggressive_opts_flags();
+
+#ifdef CC_INTERP
+ // Biased locking is not implemented with c++ interpreter
+ FLAG_SET_DEFAULT(UseBiasedLocking, false);
+#endif /* CC_INTERP */
+
+ if (PrintCommandLineFlags) {
+ CommandLineFlags::printSetFlags();
+ }
+
+ return JNI_OK;
+}
+
+int Arguments::PropertyList_count(SystemProperty* pl) {
+ int count = 0;
+ while(pl != NULL) {
+ count++;
+ pl = pl->next();
+ }
+ return count;
+}
+
+const char* Arguments::PropertyList_get_value(SystemProperty *pl, const char* key) {
+ assert(key != NULL, "just checking");
+ SystemProperty* prop;
+ for (prop = pl; prop != NULL; prop = prop->next()) {
+ if (strcmp(key, prop->key()) == 0) return prop->value();
+ }
+ return NULL;
+}
+
+const char* Arguments::PropertyList_get_key_at(SystemProperty *pl, int index) {
+ int count = 0;
+ const char* ret_val = NULL;
+
+ while(pl != NULL) {
+ if(count >= index) {
+ ret_val = pl->key();
+ break;
+ }
+ count++;
+ pl = pl->next();
+ }
+
+ return ret_val;
+}
+
+char* Arguments::PropertyList_get_value_at(SystemProperty* pl, int index) {
+ int count = 0;
+ char* ret_val = NULL;
+
+ while(pl != NULL) {
+ if(count >= index) {
+ ret_val = pl->value();
+ break;
+ }
+ count++;
+ pl = pl->next();
+ }
+
+ return ret_val;
+}
+
+void Arguments::PropertyList_add(SystemProperty** plist, SystemProperty *new_p) {
+ SystemProperty* p = *plist;
+ if (p == NULL) {
+ *plist = new_p;
+ } else {
+ while (p->next() != NULL) {
+ p = p->next();
+ }
+ p->set_next(new_p);
+ }
+}
+
+void Arguments::PropertyList_add(SystemProperty** plist, const char* k, char* v) {
+ if (plist == NULL)
+ return;
+
+ SystemProperty* new_p = new SystemProperty(k, v, true);
+ PropertyList_add(plist, new_p);
+}
+
+// This add maintains unique property key in the list.
+void Arguments::PropertyList_unique_add(SystemProperty** plist, const char* k, char* v) {
+ if (plist == NULL)
+ return;
+
+ // If property key exist then update with new value.
+ SystemProperty* prop;
+ for (prop = *plist; prop != NULL; prop = prop->next()) {
+ if (strcmp(k, prop->key()) == 0) {
+ prop->set_value(v);
+ return;
+ }
+ }
+
+ PropertyList_add(plist, k, v);
+}
+
+#ifdef KERNEL
+char *Arguments::get_kernel_properties() {
+ // Find properties starting with kernel and append them to string
+ // We need to find out how long they are first because the URL's that they
+ // might point to could get long.
+ int length = 0;
+ SystemProperty* prop;
+ for (prop = _system_properties; prop != NULL; prop = prop->next()) {
+ if (strncmp(prop->key(), "kernel.", 7 ) == 0) {
+ length += (strlen(prop->key()) + strlen(prop->value()) + 5); // "-D ="
+ }
+ }
+ // Add one for null terminator.
+ char *props = AllocateHeap(length + 1, "get_kernel_properties");
+ if (length != 0) {
+ int pos = 0;
+ for (prop = _system_properties; prop != NULL; prop = prop->next()) {
+ if (strncmp(prop->key(), "kernel.", 7 ) == 0) {
+ jio_snprintf(&props[pos], length-pos,
+ "-D%s=%s ", prop->key(), prop->value());
+ pos = strlen(props);
+ }
+ }
+ }
+ // null terminate props in case of null
+ props[length] = '\0';
+ return props;
+}
+#endif // KERNEL
+
+// Copies src into buf, replacing "%%" with "%" and "%p" with pid
+// Returns true if all of the source pointed by src has been copied over to
+// the destination buffer pointed by buf. Otherwise, returns false.
+// Notes:
+// 1. If the length (buflen) of the destination buffer excluding the
+// NULL terminator character is not long enough for holding the expanded
+// pid characters, it also returns false instead of returning the partially
+// expanded one.
+// 2. The passed in "buflen" should be large enough to hold the null terminator.
+bool Arguments::copy_expand_pid(const char* src, size_t srclen,
+ char* buf, size_t buflen) {
+ const char* p = src;
+ char* b = buf;
+ const char* src_end = &src[srclen];
+ char* buf_end = &buf[buflen - 1];
+
+ while (p < src_end && b < buf_end) {
+ if (*p == '%') {
+ switch (*(++p)) {
+ case '%': // "%%" ==> "%"
+ *b++ = *p++;
+ break;
+ case 'p': { // "%p" ==> current process id
+ // buf_end points to the character before the last character so
+ // that we could write '\0' to the end of the buffer.
+ size_t buf_sz = buf_end - b + 1;
+ int ret = jio_snprintf(b, buf_sz, "%d", os::current_process_id());
+
+ // if jio_snprintf fails or the buffer is not long enough to hold
+ // the expanded pid, returns false.
+ if (ret < 0 || ret >= (int)buf_sz) {
+ return false;
+ } else {
+ b += ret;
+ assert(*b == '\0', "fail in copy_expand_pid");
+ if (p == src_end && b == buf_end + 1) {
+ // reach the end of the buffer.
+ return true;
+ }
+ }
+ p++;
+ break;
+ }
+ default :
+ *b++ = '%';
+ }
+ } else {
+ *b++ = *p++;
+ }
+ }
+ *b = '\0';
+ return (p == src_end); // return false if not all of the source was copied
+}
diff --git a/src/share/vm/runtime/arguments.hpp b/src/share/vm/runtime/arguments.hpp
new file mode 100644
index 000000000..fc373c41d
--- /dev/null
+++ b/src/share/vm/runtime/arguments.hpp
@@ -0,0 +1,511 @@
+/*
+ * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Arguments parses the command line and recognizes options
+
+// Invocation API hook typedefs (these should really be defined in jni.hpp)
+extern "C" {
+ typedef void (JNICALL *abort_hook_t)(void);
+ typedef void (JNICALL *exit_hook_t)(jint code);
+ typedef jint (JNICALL *vfprintf_hook_t)(FILE *fp, const char *format, va_list args);
+}
+
+// Forward declarations
+
+class SysClassPath;
+
+// Element describing System and User (-Dkey=value flags) defined property.
+
+class SystemProperty: public CHeapObj {
+ private:
+ char* _key;
+ char* _value;
+ SystemProperty* _next;
+ bool _writeable;
+ bool writeable() { return _writeable; }
+
+ public:
+ // Accessors
+ const char* key() const { return _key; }
+ char* value() const { return _value; }
+ SystemProperty* next() const { return _next; }
+ void set_next(SystemProperty* next) { _next = next; }
+ bool set_value(char *value) {
+ if (writeable()) {
+ if (_value != NULL) {
+ FreeHeap(_value);
+ }
+ _value = AllocateHeap(strlen(value)+1);
+ if (_value != NULL) {
+ strcpy(_value, value);
+ }
+ return true;
+ }
+ return false;
+ }
+
+ void append_value(const char *value) {
+ char *sp;
+ size_t len = 0;
+ if (value != NULL) {
+ len = strlen(value);
+ if (_value != NULL) {
+ len += strlen(_value);
+ }
+ sp = AllocateHeap(len+2);
+ if (sp != NULL) {
+ if (_value != NULL) {
+ strcpy(sp, _value);
+ strcat(sp, os::path_separator());
+ strcat(sp, value);
+ FreeHeap(_value);
+ } else {
+ strcpy(sp, value);
+ }
+ _value = sp;
+ }
+ }
+ }
+
+ // Constructor
+ SystemProperty(const char* key, const char* value, bool writeable) {
+ if (key == NULL) {
+ _key = NULL;
+ } else {
+ _key = AllocateHeap(strlen(key)+1);
+ strcpy(_key, key);
+ }
+ if (value == NULL) {
+ _value = NULL;
+ } else {
+ _value = AllocateHeap(strlen(value)+1);
+ strcpy(_value, value);
+ }
+ _next = NULL;
+ _writeable = writeable;
+ }
+};
+
+
+// For use by -agentlib, -agentpath and -Xrun
+class AgentLibrary : public CHeapObj {
+ friend class AgentLibraryList;
+ private:
+ char* _name;
+ char* _options;
+ void* _os_lib;
+ bool _is_absolute_path;
+ AgentLibrary* _next;
+
+ public:
+ // Accessors
+ const char* name() const { return _name; }
+ char* options() const { return _options; }
+ bool is_absolute_path() const { return _is_absolute_path; }
+ void* os_lib() const { return _os_lib; }
+ void set_os_lib(void* os_lib) { _os_lib = os_lib; }
+ AgentLibrary* next() const { return _next; }
+
+ // Constructor
+ AgentLibrary(const char* name, const char* options, bool is_absolute_path, void* os_lib) {
+ _name = AllocateHeap(strlen(name)+1);
+ strcpy(_name, name);
+ if (options == NULL) {
+ _options = NULL;
+ } else {
+ _options = AllocateHeap(strlen(options)+1);
+ strcpy(_options, options);
+ }
+ _is_absolute_path = is_absolute_path;
+ _os_lib = os_lib;
+ _next = NULL;
+ }
+};
+
+// maintain an order of entry list of AgentLibrary
+class AgentLibraryList VALUE_OBJ_CLASS_SPEC {
+ private:
+ AgentLibrary* _first;
+ AgentLibrary* _last;
+ public:
+ bool is_empty() const { return _first == NULL; }
+ AgentLibrary* first() const { return _first; }
+
+ // add to the end of the list
+ void add(AgentLibrary* lib) {
+ if (is_empty()) {
+ _first = _last = lib;
+ } else {
+ _last->_next = lib;
+ _last = lib;
+ }
+ lib->_next = NULL;
+ }
+
+ // search for and remove a library known to be in the list
+ void remove(AgentLibrary* lib) {
+ AgentLibrary* curr;
+ AgentLibrary* prev = NULL;
+ for (curr = first(); curr != NULL; prev = curr, curr = curr->next()) {
+ if (curr == lib) {
+ break;
+ }
+ }
+ assert(curr != NULL, "always should be found");
+
+ if (curr != NULL) {
+ // it was found, by-pass this library
+ if (prev == NULL) {
+ _first = curr->_next;
+ } else {
+ prev->_next = curr->_next;
+ }
+ if (curr == _last) {
+ _last = prev;
+ }
+ curr->_next = NULL;
+ }
+ }
+
+ AgentLibraryList() {
+ _first = NULL;
+ _last = NULL;
+ }
+};
+
+
+class Arguments : AllStatic {
+ friend class VMStructs;
+ friend class JvmtiExport;
+ public:
+ // Operation modi
+ enum Mode {
+ _int, // corresponds to -Xint
+ _mixed, // corresponds to -Xmixed
+ _comp // corresponds to -Xcomp
+ };
+
+ enum ArgsRange {
+ arg_unreadable = -3,
+ arg_too_small = -2,
+ arg_too_big = -1,
+ arg_in_range = 0
+ };
+
+ private:
+
+ // an array containing all flags specified in the .hotspotrc file
+ static char** _jvm_flags_array;
+ static int _num_jvm_flags;
+ // an array containing all jvm arguments specified in the command line
+ static char** _jvm_args_array;
+ static int _num_jvm_args;
+ // string containing all java command (class/jarfile name and app args)
+ static char* _java_command;
+
+ // Property list
+ static SystemProperty* _system_properties;
+
+ // Quick accessor to System properties in the list:
+ static SystemProperty *_java_ext_dirs;
+ static SystemProperty *_java_endorsed_dirs;
+ static SystemProperty *_sun_boot_library_path;
+ static SystemProperty *_java_library_path;
+ static SystemProperty *_java_home;
+ static SystemProperty *_java_class_path;
+ static SystemProperty *_sun_boot_class_path;
+
+ // Meta-index for knowing what packages are in the boot class path
+ static char* _meta_index_path;
+ static char* _meta_index_dir;
+
+ // java.vendor.url.bug, bug reporting URL for fatal errors.
+ static const char* _java_vendor_url_bug;
+
+ // sun.java.launcher, private property to provide information about
+ // java/gamma launcher
+ static const char* _sun_java_launcher;
+
+ // sun.java.launcher.pid, private property
+ static int _sun_java_launcher_pid;
+
+ // Option flags
+ static bool _has_profile;
+ static bool _has_alloc_profile;
+ static const char* _gc_log_filename;
+ static uintx _initial_heap_size;
+ static uintx _min_heap_size;
+
+ // -Xrun arguments
+ static AgentLibraryList _libraryList;
+ static void add_init_library(const char* name, char* options)
+ { _libraryList.add(new AgentLibrary(name, options, false, NULL)); }
+
+ // -agentlib and -agentpath arguments
+ static AgentLibraryList _agentList;
+ static void add_init_agent(const char* name, char* options, bool absolute_path)
+ { _agentList.add(new AgentLibrary(name, options, absolute_path, NULL)); }
+
+ // Late-binding agents not started via arguments
+ static void add_loaded_agent(const char* name, char* options, bool absolute_path, void* os_lib)
+ { _agentList.add(new AgentLibrary(name, options, absolute_path, os_lib)); }
+
+ // Operation modi
+ static Mode _mode;
+ static void set_mode_flags(Mode mode);
+ static bool _java_compiler;
+ static void set_java_compiler(bool arg) { _java_compiler = arg; }
+ static bool java_compiler() { return _java_compiler; }
+
+ // -Xdebug flag
+ static bool _xdebug_mode;
+ static void set_xdebug_mode(bool arg) { _xdebug_mode = arg; }
+ static bool xdebug_mode() { return _xdebug_mode; }
+
+ // Used to save default settings
+ static bool _AlwaysCompileLoopMethods;
+ static bool _UseOnStackReplacement;
+ static bool _BackgroundCompilation;
+ static bool _ClipInlining;
+ static bool _CIDynamicCompilePriority;
+ static intx _Tier2CompileThreshold;
+
+ // GC processing
+ static int nof_parallel_gc_threads();
+ // CMS/ParNew garbage collectors
+ static void set_parnew_gc_flags();
+ static void set_cms_and_parnew_gc_flags();
+ // UseParallelGC
+ static void set_parallel_gc_flags();
+ // GC ergonomics
+ static void set_ergonomics_flags();
+ // Based on automatic selection criteria, should the
+ // low pause collector be used.
+ static bool should_auto_select_low_pause_collector();
+
+ // Bytecode rewriting
+ static void set_bytecode_flags();
+
+ // Invocation API hooks
+ static abort_hook_t _abort_hook;
+ static exit_hook_t _exit_hook;
+ static vfprintf_hook_t _vfprintf_hook;
+
+ // System properties
+ static bool add_property(const char* prop);
+
+ // Aggressive optimization flags.
+ static void set_aggressive_opts_flags();
+
+ // Argument parsing
+ static void do_pd_flag_adjustments();
+ static bool parse_argument(const char* arg, FlagValueOrigin origin);
+ static bool process_argument(const char* arg, jboolean ignore_unrecognized, FlagValueOrigin origin);
+ static void process_java_launcher_argument(const char*, void*);
+ static void process_java_compiler_argument(char* arg);
+ static jint parse_options_environment_variable(const char* name, SysClassPath* scp_p, bool* scp_assembly_required_p);
+ static jint parse_java_tool_options_environment_variable(SysClassPath* scp_p, bool* scp_assembly_required_p);
+ static jint parse_java_options_environment_variable(SysClassPath* scp_p, bool* scp_assembly_required_p);
+ static jint parse_vm_init_args(const JavaVMInitArgs* args);
+ static jint parse_each_vm_init_arg(const JavaVMInitArgs* args, SysClassPath* scp_p, bool* scp_assembly_required_p, FlagValueOrigin origin);
+ static jint finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_required);
+ static bool is_bad_option(const JavaVMOption* option, jboolean ignore,
+ const char* option_type);
+ static bool is_bad_option(const JavaVMOption* option, jboolean ignore) {
+ return is_bad_option(option, ignore, NULL);
+ }
+ static bool verify_percentage(uintx value, const char* name);
+ static void describe_range_error(ArgsRange errcode);
+ static ArgsRange check_memory_size(jlong size, jlong min_size);
+ static ArgsRange parse_memory_size(const char* s, jlong* long_arg,
+ jlong min_size);
+
+ // methods to build strings from individual args
+ static void build_jvm_args(const char* arg);
+ static void build_jvm_flags(const char* arg);
+ static void add_string(char*** bldarray, int* count, const char* arg);
+ static const char* build_resource_string(char** args, int count);
+
+ static bool methodExists(
+ char* className, char* methodName,
+ int classesNum, char** classes, bool* allMethods,
+ int methodsNum, char** methods, bool* allClasses
+ );
+
+ static void parseOnlyLine(
+ const char* line,
+ short* classesNum, short* classesMax, char*** classes, bool** allMethods,
+ short* methodsNum, short* methodsMax, char*** methods, bool** allClasses
+ );
+
+ // Returns true if the string s is in the list of
+ // flags made obsolete in 1.5.0.
+ static bool made_obsolete_in_1_5_0(const char* s);
+
+ static short CompileOnlyClassesNum;
+ static short CompileOnlyClassesMax;
+ static char** CompileOnlyClasses;
+ static bool* CompileOnlyAllMethods;
+
+ static short CompileOnlyMethodsNum;
+ static short CompileOnlyMethodsMax;
+ static char** CompileOnlyMethods;
+ static bool* CompileOnlyAllClasses;
+
+ static short InterpretOnlyClassesNum;
+ static short InterpretOnlyClassesMax;
+ static char** InterpretOnlyClasses;
+ static bool* InterpretOnlyAllMethods;
+
+ static bool CheckCompileOnly;
+
+ static char* SharedArchivePath;
+
+ public:
+ // Parses the arguments
+ static jint parse(const JavaVMInitArgs* args);
+ // Check consistecy or otherwise of VM argument settings
+ static bool check_vm_args_consistency();
+ // Used by os_solaris
+ static bool process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized);
+
+ // return a char* array containing all options
+ static char** jvm_flags_array() { return _jvm_flags_array; }
+ static char** jvm_args_array() { return _jvm_args_array; }
+ static int num_jvm_flags() { return _num_jvm_flags; }
+ static int num_jvm_args() { return _num_jvm_args; }
+ // return the arguments passed to the Java application
+ static const char* java_command() { return _java_command; }
+
+ // print jvm_flags, jvm_args and java_command
+ static void print_on(outputStream* st);
+
+ // convenient methods to obtain / print jvm_flags and jvm_args
+ static const char* jvm_flags() { return build_resource_string(_jvm_flags_array, _num_jvm_flags); }
+ static const char* jvm_args() { return build_resource_string(_jvm_args_array, _num_jvm_args); }
+ static void print_jvm_flags_on(outputStream* st);
+ static void print_jvm_args_on(outputStream* st);
+
+ // -Dkey=value flags
+ static SystemProperty* system_properties() { return _system_properties; }
+ static const char* get_property(const char* key);
+
+ // -Djava.vendor.url.bug
+ static const char* java_vendor_url_bug() { return _java_vendor_url_bug; }
+
+ // -Dsun.java.launcher
+ static const char* sun_java_launcher() { return _sun_java_launcher; }
+ // Was VM created by a Java launcher?
+ static bool created_by_java_launcher();
+ // -Dsun.java.launcher.pid
+ static int sun_java_launcher_pid() { return _sun_java_launcher_pid; }
+
+ // -Xloggc:<file>, if not specified will be NULL
+ static const char* gc_log_filename() { return _gc_log_filename; }
+
+ // -Xprof/-Xaprof
+ static bool has_profile() { return _has_profile; }
+ static bool has_alloc_profile() { return _has_alloc_profile; }
+
+ // -Xms , -Xmx
+ static uintx initial_heap_size() { return _initial_heap_size; }
+ static void set_initial_heap_size(uintx v) { _initial_heap_size = v; }
+ static uintx min_heap_size() { return _min_heap_size; }
+ static void set_min_heap_size(uintx v) { _min_heap_size = v; }
+
+ // -Xrun
+ static AgentLibrary* libraries() { return _libraryList.first(); }
+ static bool init_libraries_at_startup() { return !_libraryList.is_empty(); }
+ static void convert_library_to_agent(AgentLibrary* lib)
+ { _libraryList.remove(lib);
+ _agentList.add(lib); }
+
+ // -agentlib -agentpath
+ static AgentLibrary* agents() { return _agentList.first(); }
+ static bool init_agents_at_startup() { return !_agentList.is_empty(); }
+
+ // abort, exit, vfprintf hooks
+ static abort_hook_t abort_hook() { return _abort_hook; }
+ static exit_hook_t exit_hook() { return _exit_hook; }
+ static vfprintf_hook_t vfprintf_hook() { return _vfprintf_hook; }
+
+ static bool GetCheckCompileOnly () { return CheckCompileOnly; }
+
+ static const char* GetSharedArchivePath() { return SharedArchivePath; }
+
+ static bool CompileMethod(char* className, char* methodName) {
+ return
+ methodExists(
+ className, methodName,
+ CompileOnlyClassesNum, CompileOnlyClasses, CompileOnlyAllMethods,
+ CompileOnlyMethodsNum, CompileOnlyMethods, CompileOnlyAllClasses
+ );
+ }
+
+ // Java launcher properties
+ static void process_sun_java_launcher_properties(JavaVMInitArgs* args);
+
+ // System properties
+ static void init_system_properties();
+
+ // Proptery List manipulation
+ static void PropertyList_add(SystemProperty** plist, SystemProperty *element);
+ static void PropertyList_add(SystemProperty** plist, const char* k, char* v);
+ static void PropertyList_unique_add(SystemProperty** plist, const char* k, char* v);
+ static const char* PropertyList_get_value(SystemProperty* plist, const char* key);
+ static int PropertyList_count(SystemProperty* pl);
+ static const char* PropertyList_get_key_at(SystemProperty* pl,int index);
+ static char* PropertyList_get_value_at(SystemProperty* pl,int index);
+
+ // Miscellaneous System property value getter and setters.
+ static void set_dll_dir(char *value) { _sun_boot_library_path->set_value(value); }
+ static void set_java_home(char *value) { _java_home->set_value(value); }
+ static void set_library_path(char *value) { _java_library_path->set_value(value); }
+ static void set_ext_dirs(char *value) { _java_ext_dirs->set_value(value); }
+ static void set_endorsed_dirs(char *value) { _java_endorsed_dirs->set_value(value); }
+ static void set_sysclasspath(char *value) { _sun_boot_class_path->set_value(value); }
+ static void append_sysclasspath(const char *value) { _sun_boot_class_path->append_value(value); }
+ static void set_meta_index_path(char* meta_index_path, char* meta_index_dir) {
+ _meta_index_path = meta_index_path;
+ _meta_index_dir = meta_index_dir;
+ }
+
+ static char *get_java_home() { return _java_home->value(); }
+ static char *get_dll_dir() { return _sun_boot_library_path->value(); }
+ static char *get_endorsed_dir() { return _java_endorsed_dirs->value(); }
+ static char *get_sysclasspath() { return _sun_boot_class_path->value(); }
+ static char* get_meta_index_path() { return _meta_index_path; }
+ static char* get_meta_index_dir() { return _meta_index_dir; }
+
+ // Operation modi
+ static Mode mode() { return _mode; }
+
+ // Utility: copies src into buf, replacing "%%" with "%" and "%p" with pid.
+ static bool copy_expand_pid(const char* src, size_t srclen, char* buf, size_t buflen);
+
+#ifdef KERNEL
+ // For java kernel vm, return property string for kernel properties.
+ static char *get_kernel_properties();
+#endif // KERNEL
+};
diff --git a/src/share/vm/runtime/atomic.cpp b/src/share/vm/runtime/atomic.cpp
new file mode 100644
index 000000000..299d2b00a
--- /dev/null
+++ b/src/share/vm/runtime/atomic.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2001-2003 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_atomic.cpp.incl"
+
+jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
+ assert(sizeof(jbyte) == 1, "assumption.");
+ uintptr_t dest_addr = (uintptr_t)dest;
+ uintptr_t offset = dest_addr % sizeof(jint);
+ volatile jint* dest_int = (volatile jint*)(dest_addr - offset);
+ jint cur = *dest_int;
+ jbyte* cur_as_bytes = (jbyte*)(&cur);
+ jint new_val = cur;
+ jbyte* new_val_as_bytes = (jbyte*)(&new_val);
+ new_val_as_bytes[offset] = exchange_value;
+ while (cur_as_bytes[offset] == compare_value) {
+ jint res = cmpxchg(new_val, dest_int, cur);
+ if (res == cur) break;
+ cur = res;
+ new_val = cur;
+ new_val_as_bytes[offset] = exchange_value;
+ }
+ return cur_as_bytes[offset];
+}
diff --git a/src/share/vm/runtime/atomic.hpp b/src/share/vm/runtime/atomic.hpp
new file mode 100644
index 000000000..cfbda4ba7
--- /dev/null
+++ b/src/share/vm/runtime/atomic.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright 1999-2003 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class Atomic : AllStatic {
+ public:
+ // Atomically store to a location
+ static void store (jbyte store_value, jbyte* dest);
+ static void store (jshort store_value, jshort* dest);
+ static void store (jint store_value, jint* dest);
+ static void store (jlong store_value, jlong* dest);
+ static void store_ptr(intptr_t store_value, intptr_t* dest);
+ static void store_ptr(void* store_value, void* dest);
+
+ static void store (jbyte store_value, volatile jbyte* dest);
+ static void store (jshort store_value, volatile jshort* dest);
+ static void store (jint store_value, volatile jint* dest);
+ static void store (jlong store_value, volatile jlong* dest);
+ static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
+ static void store_ptr(void* store_value, volatile void* dest);
+
+ // Atomically add to a location, return updated value
+ static jint add (jint add_value, volatile jint* dest);
+ static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
+ static void* add_ptr(intptr_t add_value, volatile void* dest);
+
+ // Atomically increment location
+ static void inc (volatile jint* dest);
+ static void inc_ptr(volatile intptr_t* dest);
+ static void inc_ptr(volatile void* dest);
+
+ // Atomically decrement a location
+ static void dec (volatile jint* dest);
+ static void dec_ptr(volatile intptr_t* dest);
+ static void dec_ptr(volatile void* dest);
+
+ // Performs atomic exchange of *dest with exchange_value. Returns old prior value of *dest.
+ static jint xchg (jint exchange_value, volatile jint* dest);
+ static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
+ static void* xchg_ptr(void* exchange_value, volatile void* dest);
+
+ // Performs atomic compare of *dest and compare_value, and exchanges *dest with exchange_value
+ // if the comparison succeeded. Returns prior value of *dest. Guarantees a two-way memory
+ // barrier across the cmpxchg. I.e., it's really a 'fence_cmpxchg_acquire'.
+ static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value);
+ static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value);
+ static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value);
+ static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value);
+ static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value);
+};
diff --git a/src/share/vm/runtime/biasedLocking.cpp b/src/share/vm/runtime/biasedLocking.cpp
new file mode 100644
index 000000000..bfb3b7904
--- /dev/null
+++ b/src/share/vm/runtime/biasedLocking.cpp
@@ -0,0 +1,752 @@
+
+/*
+ * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_biasedLocking.cpp.incl"
+
+static bool _biased_locking_enabled = false;
+BiasedLockingCounters BiasedLocking::_counters;
+
+static GrowableArray<Handle>* _preserved_oop_stack = NULL;
+static GrowableArray<markOop>* _preserved_mark_stack = NULL;
+
+static void enable_biased_locking(klassOop k) {
+ Klass::cast(k)->set_prototype_header(markOopDesc::biased_locking_prototype());
+}
+
+class VM_EnableBiasedLocking: public VM_Operation {
+ public:
+ VM_EnableBiasedLocking() {}
+ VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
+ void doit() {
+ // Iterate the system dictionary enabling biased locking for all
+ // currently loaded classes
+ SystemDictionary::classes_do(enable_biased_locking);
+ // Indicate that future instances should enable it as well
+ _biased_locking_enabled = true;
+
+ if (TraceBiasedLocking) {
+ tty->print_cr("Biased locking enabled");
+ }
+ }
+
+ bool allow_nested_vm_operations() const { return false; }
+};
+
+
+// One-shot PeriodicTask subclass for enabling biased locking
+class EnableBiasedLockingTask : public PeriodicTask {
+ public:
+ EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {}
+
+ virtual void task() {
+ VM_EnableBiasedLocking op;
+ VMThread::execute(&op);
+
+ // Reclaim our storage and disenroll ourself
+ delete this;
+ }
+};
+
+
+void BiasedLocking::init() {
+ // If biased locking is enabled, schedule a task to fire a few
+ // seconds into the run which turns on biased locking for all
+ // currently loaded classes as well as future ones. This is a
+ // workaround for startup time regressions due to a large number of
+ // safepoints being taken during VM startup for bias revocation.
+ // Ideally we would have a lower cost for individual bias revocation
+ // and not need a mechanism like this.
+ if (UseBiasedLocking) {
+ if (BiasedLockingStartupDelay > 0) {
+ EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay);
+ task->enroll();
+ } else {
+ VM_EnableBiasedLocking op;
+ VMThread::execute(&op);
+ }
+ }
+}
+
+
+bool BiasedLocking::enabled() {
+ return _biased_locking_enabled;
+}
+
+// Returns MonitorInfos for all objects locked on this thread in youngest to oldest order
+static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) {
+ GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info();
+ if (info != NULL) {
+ return info;
+ }
+
+ info = new GrowableArray<MonitorInfo*>();
+
+ // It's possible for the thread to not have any Java frames on it,
+ // i.e., if it's the main thread and it's already returned from main()
+ if (thread->has_last_Java_frame()) {
+ RegisterMap rm(thread);
+ for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
+ GrowableArray<MonitorInfo*> *monitors = vf->monitors();
+ if (monitors != NULL) {
+ int len = monitors->length();
+ // Walk monitors youngest to oldest
+ for (int i = len - 1; i >= 0; i--) {
+ MonitorInfo* mon_info = monitors->at(i);
+ oop owner = mon_info->owner();
+ if (owner != NULL) {
+ info->append(mon_info);
+ }
+ }
+ }
+ }
+ }
+
+ thread->set_cached_monitor_info(info);
+ return info;
+}
+
+
+static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
+ markOop mark = obj->mark();
+ if (!mark->has_bias_pattern()) {
+ if (TraceBiasedLocking) {
+ ResourceMark rm;
+ tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)",
+ Klass::cast(obj->klass())->external_name());
+ }
+ return BiasedLocking::NOT_BIASED;
+ }
+
+ int age = mark->age();
+ markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
+ markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
+
+ if (TraceBiasedLocking && (Verbose || !is_bulk)) {
+ ResourceMark rm;
+ tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
+ (intptr_t) obj, (intptr_t) mark, Klass::cast(obj->klass())->external_name(), (intptr_t) Klass::cast(obj->klass())->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
+ }
+
+ JavaThread* biased_thread = mark->biased_locker();
+ if (biased_thread == NULL) {
+ // Object is anonymously biased. We can get here if, for
+ // example, we revoke the bias due to an identity hash code
+ // being computed for an object.
+ if (!allow_rebias) {
+ obj->set_mark(unbiased_prototype);
+ }
+ if (TraceBiasedLocking && (Verbose || !is_bulk)) {
+ tty->print_cr(" Revoked bias of anonymously-biased object");
+ }
+ return BiasedLocking::BIAS_REVOKED;
+ }
+
+ // Handle case where the thread toward which the object was biased has exited
+ bool thread_is_alive = false;
+ if (requesting_thread == biased_thread) {
+ thread_is_alive = true;
+ } else {
+ for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
+ if (cur_thread == biased_thread) {
+ thread_is_alive = true;
+ break;
+ }
+ }
+ }
+ if (!thread_is_alive) {
+ if (allow_rebias) {
+ obj->set_mark(biased_prototype);
+ } else {
+ obj->set_mark(unbiased_prototype);
+ }
+ if (TraceBiasedLocking && (Verbose || !is_bulk)) {
+ tty->print_cr(" Revoked bias of object biased toward dead thread");
+ }
+ return BiasedLocking::BIAS_REVOKED;
+ }
+
+ // Thread owning bias is alive.
+ // Check to see whether it currently owns the lock and, if so,
+ // write down the needed displaced headers to the thread's stack.
+ // Otherwise, restore the object's header either to the unlocked
+ // or unbiased state.
+ GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
+ BasicLock* highest_lock = NULL;
+ for (int i = 0; i < cached_monitor_info->length(); i++) {
+ MonitorInfo* mon_info = cached_monitor_info->at(i);
+ if (mon_info->owner() == obj) {
+ if (TraceBiasedLocking && Verbose) {
+ tty->print_cr(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
+ (intptr_t) mon_info->owner(),
+ (intptr_t) obj);
+ }
+ // Assume recursive case and fix up highest lock later
+ markOop mark = markOopDesc::encode((BasicLock*) NULL);
+ highest_lock = mon_info->lock();
+ highest_lock->set_displaced_header(mark);
+ } else {
+ if (TraceBiasedLocking && Verbose) {
+ tty->print_cr(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
+ (intptr_t) mon_info->owner(),
+ (intptr_t) obj);
+ }
+ }
+ }
+ if (highest_lock != NULL) {
+ // Fix up highest lock to contain displaced header and point
+ // object at it
+ highest_lock->set_displaced_header(unbiased_prototype);
+ // Reset object header to point to displaced mark
+ obj->set_mark(markOopDesc::encode(highest_lock));
+ assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
+ if (TraceBiasedLocking && (Verbose || !is_bulk)) {
+ tty->print_cr(" Revoked bias of currently-locked object");
+ }
+ } else {
+ if (TraceBiasedLocking && (Verbose || !is_bulk)) {
+ tty->print_cr(" Revoked bias of currently-unlocked object");
+ }
+ if (allow_rebias) {
+ obj->set_mark(biased_prototype);
+ } else {
+ // Store the unlocked value into the object's header.
+ obj->set_mark(unbiased_prototype);
+ }
+ }
+
+ return BiasedLocking::BIAS_REVOKED;
+}
+
+
+enum HeuristicsResult {
+ HR_NOT_BIASED = 1,
+ HR_SINGLE_REVOKE = 2,
+ HR_BULK_REBIAS = 3,
+ HR_BULK_REVOKE = 4
+};
+
+
+static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
+ markOop mark = o->mark();
+ if (!mark->has_bias_pattern()) {
+ return HR_NOT_BIASED;
+ }
+
+ // Heuristics to attempt to throttle the number of revocations.
+ // Stages:
+ // 1. Revoke the biases of all objects in the heap of this type,
+ // but allow rebiasing of those objects if unlocked.
+ // 2. Revoke the biases of all objects in the heap of this type
+ // and don't allow rebiasing of these objects. Disable
+ // allocation of objects of that type with the bias bit set.
+ Klass* k = o->blueprint();
+ jlong cur_time = os::javaTimeMillis();
+ jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time();
+ int revocation_count = k->biased_lock_revocation_count();
+ if ((revocation_count >= BiasedLockingBulkRebiasThreshold) &&
+ (revocation_count < BiasedLockingBulkRevokeThreshold) &&
+ (last_bulk_revocation_time != 0) &&
+ (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) {
+ // This is the first revocation we've seen in a while of an
+ // object of this type since the last time we performed a bulk
+ // rebiasing operation. The application is allocating objects in
+ // bulk which are biased toward a thread and then handing them
+ // off to another thread. We can cope with this allocation
+ // pattern via the bulk rebiasing mechanism so we reset the
+ // klass's revocation count rather than allow it to increase
+ // monotonically. If we see the need to perform another bulk
+ // rebias operation later, we will, and if subsequently we see
+ // many more revocation operations in a short period of time we
+ // will completely disable biasing for this type.
+ k->set_biased_lock_revocation_count(0);
+ revocation_count = 0;
+ }
+
+ // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
+ if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
+ revocation_count = k->atomic_incr_biased_lock_revocation_count();
+ }
+
+ if (revocation_count == BiasedLockingBulkRevokeThreshold) {
+ return HR_BULK_REVOKE;
+ }
+
+ if (revocation_count == BiasedLockingBulkRebiasThreshold) {
+ return HR_BULK_REBIAS;
+ }
+
+ return HR_SINGLE_REVOKE;
+}
+
+
+static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
+ bool bulk_rebias,
+ bool attempt_rebias_of_object,
+ JavaThread* requesting_thread) {
+ assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
+
+ if (TraceBiasedLocking) {
+ tty->print_cr("* Beginning bulk revocation (kind == %s) because of object "
+ INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
+ (bulk_rebias ? "rebias" : "revoke"),
+ (intptr_t) o, (intptr_t) o->mark(), Klass::cast(o->klass())->external_name());
+ }
+
+ jlong cur_time = os::javaTimeMillis();
+ o->blueprint()->set_last_biased_lock_bulk_revocation_time(cur_time);
+
+
+ klassOop k_o = o->klass();
+ Klass* klass = Klass::cast(k_o);
+
+ if (bulk_rebias) {
+ // Use the epoch in the klass of the object to implicitly revoke
+ // all biases of objects of this data type and force them to be
+ // reacquired. However, we also need to walk the stacks of all
+ // threads and update the headers of lightweight locked objects
+ // with biases to have the current epoch.
+
+ // If the prototype header doesn't have the bias pattern, don't
+ // try to update the epoch -- assume another VM operation came in
+ // and reset the header to the unbiased state, which will
+ // implicitly cause all existing biases to be revoked
+ if (klass->prototype_header()->has_bias_pattern()) {
+ int prev_epoch = klass->prototype_header()->bias_epoch();
+ klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
+ int cur_epoch = klass->prototype_header()->bias_epoch();
+
+ // Now walk all threads' stacks and adjust epochs of any biased
+ // and locked objects of this data type we encounter
+ for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
+ GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
+ for (int i = 0; i < cached_monitor_info->length(); i++) {
+ MonitorInfo* mon_info = cached_monitor_info->at(i);
+ oop owner = mon_info->owner();
+ markOop mark = owner->mark();
+ if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
+ // We might have encountered this object already in the case of recursive locking
+ assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
+ owner->set_mark(mark->set_bias_epoch(cur_epoch));
+ }
+ }
+ }
+ }
+
+ // At this point we're done. All we have to do is potentially
+ // adjust the header of the given object to revoke its bias.
+ revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
+ } else {
+ if (TraceBiasedLocking) {
+ ResourceMark rm;
+ tty->print_cr("* Disabling biased locking for type %s", klass->external_name());
+ }
+
+ // Disable biased locking for this data type. Not only will this
+ // cause future instances to not be biased, but existing biased
+ // instances will notice that this implicitly caused their biases
+ // to be revoked.
+ klass->set_prototype_header(markOopDesc::prototype());
+
+ // Now walk all threads' stacks and forcibly revoke the biases of
+ // any locked and biased objects of this data type we encounter.
+ for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
+ GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
+ for (int i = 0; i < cached_monitor_info->length(); i++) {
+ MonitorInfo* mon_info = cached_monitor_info->at(i);
+ oop owner = mon_info->owner();
+ markOop mark = owner->mark();
+ if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
+ revoke_bias(owner, false, true, requesting_thread);
+ }
+ }
+ }
+
+ // Must force the bias of the passed object to be forcibly revoked
+ // as well to ensure guarantees to callers
+ revoke_bias(o, false, true, requesting_thread);
+ }
+
+ if (TraceBiasedLocking) {
+ tty->print_cr("* Ending bulk revocation");
+ }
+
+ BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
+
+ if (attempt_rebias_of_object &&
+ o->mark()->has_bias_pattern() &&
+ klass->prototype_header()->has_bias_pattern()) {
+ markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
+ klass->prototype_header()->bias_epoch());
+ o->set_mark(new_mark);
+ status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
+ if (TraceBiasedLocking) {
+ tty->print_cr(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
+ }
+ }
+
+ assert(!o->mark()->has_bias_pattern() ||
+ (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
+ "bug in bulk bias revocation");
+
+ return status_code;
+}
+
+
+static void clean_up_cached_monitor_info() {
+ // Walk the thread list clearing out the cached monitors
+ for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
+ thr->set_cached_monitor_info(NULL);
+ }
+}
+
+
+class VM_RevokeBias : public VM_Operation {
+protected:
+ Handle* _obj;
+ GrowableArray<Handle>* _objs;
+ JavaThread* _requesting_thread;
+ BiasedLocking::Condition _status_code;
+
+public:
+ VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
+ : _obj(obj)
+ , _objs(NULL)
+ , _requesting_thread(requesting_thread)
+ , _status_code(BiasedLocking::NOT_BIASED) {}
+
+ VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
+ : _obj(NULL)
+ , _objs(objs)
+ , _requesting_thread(requesting_thread)
+ , _status_code(BiasedLocking::NOT_BIASED) {}
+
+ virtual VMOp_Type type() const { return VMOp_RevokeBias; }
+
+ virtual bool doit_prologue() {
+ // Verify that there is actual work to do since the callers just
+ // give us locked object(s). If we don't find any biased objects
+ // there is nothing to do and we avoid a safepoint.
+ if (_obj != NULL) {
+ markOop mark = (*_obj)()->mark();
+ if (mark->has_bias_pattern()) {
+ return true;
+ }
+ } else {
+ for ( int i = 0 ; i < _objs->length(); i++ ) {
+ markOop mark = (_objs->at(i))()->mark();
+ if (mark->has_bias_pattern()) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ virtual void doit() {
+ if (_obj != NULL) {
+ if (TraceBiasedLocking) {
+ tty->print_cr("Revoking bias with potentially per-thread safepoint:");
+ }
+ _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread);
+ clean_up_cached_monitor_info();
+ return;
+ } else {
+ if (TraceBiasedLocking) {
+ tty->print_cr("Revoking bias with global safepoint:");
+ }
+ BiasedLocking::revoke_at_safepoint(_objs);
+ }
+ }
+
+ BiasedLocking::Condition status_code() const {
+ return _status_code;
+ }
+};
+
+
+class VM_BulkRevokeBias : public VM_RevokeBias {
+private:
+ bool _bulk_rebias;
+ bool _attempt_rebias_of_object;
+
+public:
+ VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
+ bool bulk_rebias,
+ bool attempt_rebias_of_object)
+ : VM_RevokeBias(obj, requesting_thread)
+ , _bulk_rebias(bulk_rebias)
+ , _attempt_rebias_of_object(attempt_rebias_of_object) {}
+
+ virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
+ virtual bool doit_prologue() { return true; }
+
+ virtual void doit() {
+ _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);
+ clean_up_cached_monitor_info();
+ }
+};
+
+
+BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
+ assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
+
+ // We can revoke the biases of anonymously-biased objects
+ // efficiently enough that we should not cause these revocations to
+ // update the heuristics because doing so may cause unwanted bulk
+ // revocations (which are expensive) to occur.
+ markOop mark = obj->mark();
+ if (mark->is_biased_anonymously() && !attempt_rebias) {
+ // We are probably trying to revoke the bias of this object due to
+ // an identity hash code computation. Try to revoke the bias
+ // without a safepoint. This is possible if we can successfully
+ // compare-and-exchange an unbiased header into the mark word of
+ // the object, meaning that no other thread has raced to acquire
+ // the bias of the object.
+ markOop biased_value = mark;
+ markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
+ markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
+ if (res_mark == biased_value) {
+ return BIAS_REVOKED;
+ }
+ } else if (mark->has_bias_pattern()) {
+ Klass* k = Klass::cast(obj->klass());
+ markOop prototype_header = k->prototype_header();
+ if (!prototype_header->has_bias_pattern()) {
+ // This object has a stale bias from before the bulk revocation
+ // for this data type occurred. It's pointless to update the
+ // heuristics at this point so simply update the header with a
+ // CAS. If we fail this race, the object's bias has been revoked
+ // by another thread so we simply return and let the caller deal
+ // with it.
+ markOop biased_value = mark;
+ markOop res_mark = (markOop) Atomic::cmpxchg_ptr(prototype_header, obj->mark_addr(), mark);
+ assert(!(*(obj->mark_addr()))->has_bias_pattern(), "even if we raced, should still be revoked");
+ return BIAS_REVOKED;
+ } else if (prototype_header->bias_epoch() != mark->bias_epoch()) {
+ // The epoch of this biasing has expired indicating that the
+ // object is effectively unbiased. Depending on whether we need
+ // to rebias or revoke the bias of this object we can do it
+ // efficiently enough with a CAS that we shouldn't update the
+ // heuristics. This is normally done in the assembly code but we
+ // can reach this point due to various points in the runtime
+ // needing to revoke biases.
+ if (attempt_rebias) {
+ assert(THREAD->is_Java_thread(), "");
+ markOop biased_value = mark;
+ markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch());
+ markOop res_mark = (markOop) Atomic::cmpxchg_ptr(rebiased_prototype, obj->mark_addr(), mark);
+ if (res_mark == biased_value) {
+ return BIAS_REVOKED_AND_REBIASED;
+ }
+ } else {
+ markOop biased_value = mark;
+ markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
+ markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
+ if (res_mark == biased_value) {
+ return BIAS_REVOKED;
+ }
+ }
+ }
+ }
+
+ HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias);
+ if (heuristics == HR_NOT_BIASED) {
+ return NOT_BIASED;
+ } else if (heuristics == HR_SINGLE_REVOKE) {
+ if (mark->biased_locker() == THREAD) {
+ // A thread is trying to revoke the bias of an object biased
+ // toward it, again likely due to an identity hash code
+ // computation. We can again avoid a safepoint in this case
+ // since we are only going to walk our own stack. There are no
+ // races with revocations occurring in other threads because we
+ // reach no safepoints in the revocation path.
+ ResourceMark rm;
+ if (TraceBiasedLocking) {
+ tty->print_cr("Revoking bias by walking my own stack:");
+ }
+ BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD);
+ ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
+ assert(cond == BIAS_REVOKED, "why not?");
+ return cond;
+ } else {
+ VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
+ VMThread::execute(&revoke);
+ return revoke.status_code();
+ }
+ }
+
+ assert((heuristics == HR_BULK_REVOKE) ||
+ (heuristics == HR_BULK_REBIAS), "?");
+ VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
+ (heuristics == HR_BULK_REBIAS),
+ attempt_rebias);
+ VMThread::execute(&bulk_revoke);
+ return bulk_revoke.status_code();
+}
+
+
+void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
+ assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
+ if (objs->length() == 0) {
+ return;
+ }
+ VM_RevokeBias revoke(objs, JavaThread::current());
+ VMThread::execute(&revoke);
+}
+
+
+void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
+ assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
+ oop obj = h_obj();
+ HeuristicsResult heuristics = update_heuristics(obj, false);
+ if (heuristics == HR_SINGLE_REVOKE) {
+ revoke_bias(obj, false, false, NULL);
+ } else if ((heuristics == HR_BULK_REBIAS) ||
+ (heuristics == HR_BULK_REVOKE)) {
+ bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
+ }
+ clean_up_cached_monitor_info();
+}
+
+
+void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
+ assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
+ int len = objs->length();
+ for (int i = 0; i < len; i++) {
+ oop obj = (objs->at(i))();
+ HeuristicsResult heuristics = update_heuristics(obj, false);
+ if (heuristics == HR_SINGLE_REVOKE) {
+ revoke_bias(obj, false, false, NULL);
+ } else if ((heuristics == HR_BULK_REBIAS) ||
+ (heuristics == HR_BULK_REVOKE)) {
+ bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
+ }
+ }
+ clean_up_cached_monitor_info();
+}
+
+
+void BiasedLocking::preserve_marks() {
+ if (!UseBiasedLocking)
+ return;
+
+ assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
+
+ assert(_preserved_oop_stack == NULL, "double initialization");
+ assert(_preserved_mark_stack == NULL, "double initialization");
+
+ // In order to reduce the number of mark words preserved during GC
+ // due to the presence of biased locking, we reinitialize most mark
+ // words to the class's prototype during GC -- even those which have
+ // a currently valid bias owner. One important situation where we
+ // must not clobber a bias is when a biased object is currently
+ // locked. To handle this case we iterate over the currently-locked
+ // monitors in a prepass and, if they are biased, preserve their
+ // mark words here. This should be a relatively small set of objects
+ // especially compared to the number of objects in the heap.
+ _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(10, true);
+ _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<Handle>(10, true);
+
+ ResourceMark rm;
+ Thread* cur = Thread::current();
+ for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
+ if (thread->has_last_Java_frame()) {
+ RegisterMap rm(thread);
+ for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
+ GrowableArray<MonitorInfo*> *monitors = vf->monitors();
+ if (monitors != NULL) {
+ int len = monitors->length();
+ // Walk monitors youngest to oldest
+ for (int i = len - 1; i >= 0; i--) {
+ MonitorInfo* mon_info = monitors->at(i);
+ oop owner = mon_info->owner();
+ if (owner != NULL) {
+ markOop mark = owner->mark();
+ if (mark->has_bias_pattern()) {
+ _preserved_oop_stack->push(Handle(cur, owner));
+ _preserved_mark_stack->push(mark);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+
+void BiasedLocking::restore_marks() {
+ if (!UseBiasedLocking)
+ return;
+
+ assert(_preserved_oop_stack != NULL, "double free");
+ assert(_preserved_mark_stack != NULL, "double free");
+
+ int len = _preserved_oop_stack->length();
+ for (int i = 0; i < len; i++) {
+ Handle owner = _preserved_oop_stack->at(i);
+ markOop mark = _preserved_mark_stack->at(i);
+ owner->set_mark(mark);
+ }
+
+ delete _preserved_oop_stack;
+ _preserved_oop_stack = NULL;
+ delete _preserved_mark_stack;
+ _preserved_mark_stack = NULL;
+}
+
+
+int* BiasedLocking::total_entry_count_addr() { return _counters.total_entry_count_addr(); }
+int* BiasedLocking::biased_lock_entry_count_addr() { return _counters.biased_lock_entry_count_addr(); }
+int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
+int* BiasedLocking::rebiased_lock_entry_count_addr() { return _counters.rebiased_lock_entry_count_addr(); }
+int* BiasedLocking::revoked_lock_entry_count_addr() { return _counters.revoked_lock_entry_count_addr(); }
+int* BiasedLocking::fast_path_entry_count_addr() { return _counters.fast_path_entry_count_addr(); }
+int* BiasedLocking::slow_path_entry_count_addr() { return _counters.slow_path_entry_count_addr(); }
+
+
+// BiasedLockingCounters
+
+int BiasedLockingCounters::slow_path_entry_count() {
+ if (_slow_path_entry_count != 0) {
+ return _slow_path_entry_count;
+ }
+ int sum = _biased_lock_entry_count + _anonymously_biased_lock_entry_count +
+ _rebiased_lock_entry_count + _revoked_lock_entry_count +
+ _fast_path_entry_count;
+
+ return _total_entry_count - sum;
+}
+
+void BiasedLockingCounters::print_on(outputStream* st) {
+ tty->print_cr("# total entries: %d", _total_entry_count);
+ tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count);
+ tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count);
+ tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
+ tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);
+ tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
+ tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
+}
diff --git a/src/share/vm/runtime/biasedLocking.hpp b/src/share/vm/runtime/biasedLocking.hpp
new file mode 100644
index 000000000..49fe77300
--- /dev/null
+++ b/src/share/vm/runtime/biasedLocking.hpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// This class describes operations to implement Store-Free Biased
+// Locking. The high-level properties of the scheme are similar to
+// IBM's lock reservation, Dice-Moir-Scherer QR locks, and other biased
+// locking mechanisms. The principal difference is in the handling of
+// recursive locking which is how this technique achieves a more
+// efficient fast path than these other schemes.
+//
+// The basic observation is that in HotSpot's current fast locking
+// scheme, recursive locking (in the fast path) causes no update to
+// the object header. The recursion is described simply by stack
+// records containing a specific value (NULL). Only the last unlock by
+// a given thread causes an update to the object header.
+//
+// This observation, coupled with the fact that HotSpot only compiles
+// methods for which monitor matching is obeyed (and which therefore
+// can not throw IllegalMonitorStateException), implies that we can
+// completely eliminate modifications to the object header for
+// recursive locking in compiled code, and perform similar recursion
+// checks and throwing of IllegalMonitorStateException in the
+// interpreter with little or no impact on the performance of the fast
+// path.
+//
+// The basic algorithm is as follows (note, see below for more details
+// and information). A pattern in the low three bits is reserved in
+// the object header to indicate whether biasing of a given object's
+// lock is currently being done or is allowed at all. If the bias
+// pattern is present, the contents of the rest of the header are
+// either the JavaThread* of the thread to which the lock is biased,
+// or NULL, indicating that the lock is "anonymously biased". The
+// first thread which locks an anonymously biased object biases the
+// lock toward that thread. If another thread subsequently attempts to
+// lock the same object, the bias is revoked.
+//
+// Because there are no updates to the object header at all during
+// recursive locking while the lock is biased, the biased lock entry
+// code is simply a test of the object header's value. If this test
+// succeeds, the lock has been acquired by the thread. If this test
+// fails, a bit test is done to see whether the bias bit is still
+// set. If not, we fall back to HotSpot's original CAS-based locking
+// scheme. If it is set, we attempt to CAS in a bias toward this
+// thread. The latter operation is expected to be the rarest operation
+// performed on these locks. We optimistically expect the biased lock
+// entry to hit most of the time, and want the CAS-based fallthrough
+// to occur quickly in the situations where the bias has been revoked.
+//
+// Revocation of the lock's bias is fairly straightforward. We want to
+// restore the object's header and stack-based BasicObjectLocks and
+// BasicLocks to the state they would have been in had the object been
+// locked by HotSpot's usual fast locking scheme. To do this, we bring
+// the system to a safepoint and walk the stack of the thread toward
+// which the lock is biased. We find all of the lock records on the
+// stack corresponding to this object, in particular the first /
+// "highest" record. We fill in the highest lock record with the
+// object's displaced header (which is a well-known value given that
+// we don't maintain an identity hash nor age bits for the object
+// while it's in the biased state) and all other lock records with 0,
+// the value for recursive locks. When the safepoint is released, the
+// formerly-biased thread and all other threads revert back to
+// HotSpot's CAS-based locking.
+//
+// This scheme can not handle transfers of biases of single objects
+// from thread to thread efficiently, but it can handle bulk transfers
+// of such biases, which is a usage pattern showing up in some
+// applications and benchmarks. We implement "bulk rebias" and "bulk
+// revoke" operations using a "bias epoch" on a per-data-type basis.
+// If too many bias revocations are occurring for a particular data
+// type, the bias epoch for the data type is incremented at a
+// safepoint, effectively meaning that all previous biases are
+// invalid. The fast path locking case checks for an invalid epoch in
+// the object header and attempts to rebias the object with a CAS if
+// found, avoiding safepoints or bulk heap sweeps (the latter which
+// was used in a prior version of this algorithm and did not scale
+// well). If too many bias revocations persist, biasing is completely
+// disabled for the data type by resetting the prototype header to the
+// unbiased markOop. The fast-path locking code checks to see whether
+// the instance's bias pattern differs from the prototype header's and
+// causes the bias to be revoked without reaching a safepoint or,
+// again, a bulk heap sweep.
+
+// Biased locking counters
+class BiasedLockingCounters VALUE_OBJ_CLASS_SPEC {
+ private:
+ int _total_entry_count;
+ int _biased_lock_entry_count;
+ int _anonymously_biased_lock_entry_count;
+ int _rebiased_lock_entry_count;
+ int _revoked_lock_entry_count;
+ int _fast_path_entry_count;
+ int _slow_path_entry_count;
+
+ public:
+ BiasedLockingCounters() :
+ _total_entry_count(0),
+ _biased_lock_entry_count(0),
+ _anonymously_biased_lock_entry_count(0),
+ _rebiased_lock_entry_count(0),
+ _revoked_lock_entry_count(0),
+ _fast_path_entry_count(0),
+ _slow_path_entry_count(0) {}
+
+ int slow_path_entry_count(); // Compute this field if necessary
+
+ int* total_entry_count_addr() { return &_total_entry_count; }
+ int* biased_lock_entry_count_addr() { return &_biased_lock_entry_count; }
+ int* anonymously_biased_lock_entry_count_addr() { return &_anonymously_biased_lock_entry_count; }
+ int* rebiased_lock_entry_count_addr() { return &_rebiased_lock_entry_count; }
+ int* revoked_lock_entry_count_addr() { return &_revoked_lock_entry_count; }
+ int* fast_path_entry_count_addr() { return &_fast_path_entry_count; }
+ int* slow_path_entry_count_addr() { return &_slow_path_entry_count; }
+
+ bool nonzero() { return _total_entry_count > 0; }
+
+ void print_on(outputStream* st);
+ void print() { print_on(tty); }
+};
+
+
+class BiasedLocking : AllStatic {
+private:
+ static BiasedLockingCounters _counters;
+
+public:
+ static int* total_entry_count_addr();
+ static int* biased_lock_entry_count_addr();
+ static int* anonymously_biased_lock_entry_count_addr();
+ static int* rebiased_lock_entry_count_addr();
+ static int* revoked_lock_entry_count_addr();
+ static int* fast_path_entry_count_addr();
+ static int* slow_path_entry_count_addr();
+
+ enum Condition {
+ NOT_BIASED = 1,
+ BIAS_REVOKED = 2,
+ BIAS_REVOKED_AND_REBIASED = 3
+ };
+
+ // This initialization routine should only be called once and
+ // schedules a PeriodicTask to turn on biased locking a few seconds
+ // into the VM run to avoid startup time regressions
+ static void init();
+
+ // This provides a global switch for leaving biased locking disabled
+ // for the first part of a run and enabling it later
+ static bool enabled();
+
+ // This should be called by JavaThreads to revoke the bias of an object
+ static Condition revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS);
+
+ // These do not allow rebiasing; they are used by deoptimization to
+ // ensure that monitors on the stack can be migrated
+ static void revoke(GrowableArray<Handle>* objs);
+ static void revoke_at_safepoint(Handle obj);
+ static void revoke_at_safepoint(GrowableArray<Handle>* objs);
+
+ static void print_counters() { _counters.print(); }
+ static BiasedLockingCounters* counters() { return &_counters; }
+
+ // These routines are GC-related and should not be called by end
+ // users. GCs which do not do preservation of mark words do not need
+ // to call these routines.
+ static void preserve_marks();
+ static void restore_marks();
+};
diff --git a/src/share/vm/runtime/compilationPolicy.cpp b/src/share/vm/runtime/compilationPolicy.cpp
new file mode 100644
index 000000000..ac870e0cc
--- /dev/null
+++ b/src/share/vm/runtime/compilationPolicy.cpp
@@ -0,0 +1,448 @@
+/*
+ * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_compilationPolicy.cpp.incl"
+
+CompilationPolicy* CompilationPolicy::_policy;
+elapsedTimer CompilationPolicy::_accumulated_time;
+bool CompilationPolicy::_in_vm_startup;
+
+// Determine compilation policy based on command line argument
+void compilationPolicy_init() {
+ CompilationPolicy::set_in_vm_startup(DelayCompilationDuringStartup);
+
+ switch(CompilationPolicyChoice) {
+ case 0:
+ CompilationPolicy::set_policy(new SimpleCompPolicy());
+ break;
+
+ case 1:
+#ifdef COMPILER2
+ CompilationPolicy::set_policy(new StackWalkCompPolicy());
+#else
+ Unimplemented();
+#endif
+ break;
+
+ default:
+ fatal("CompilationPolicyChoice must be in the range: [0-1]");
+ }
+}
+
+void CompilationPolicy::completed_vm_startup() {
+ if (TraceCompilationPolicy) {
+ tty->print("CompilationPolicy: completed vm startup.\n");
+ }
+ _in_vm_startup = false;
+}
+
+// Returns true if m must be compiled before executing it
+// This is intended to force compiles for methods (usually for
+// debugging) that would otherwise be interpreted for some reason.
+bool CompilationPolicy::mustBeCompiled(methodHandle m) {
+ if (m->has_compiled_code()) return false; // already compiled
+ if (!canBeCompiled(m)) return false;
+
+ return !UseInterpreter || // must compile all methods
+ (UseCompiler && AlwaysCompileLoopMethods && m->has_loops()); // eagerly compile loop methods
+}
+
+// Returns true if m is allowed to be compiled
+bool CompilationPolicy::canBeCompiled(methodHandle m) {
+ if (m->is_abstract()) return false;
+ if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
+
+ return !m->is_not_compilable();
+}
+
+#ifndef PRODUCT
+void CompilationPolicy::print_time() {
+ tty->print_cr ("Accumulated compilationPolicy times:");
+ tty->print_cr ("---------------------------");
+ tty->print_cr (" Total: %3.3f sec.", _accumulated_time.seconds());
+}
+
+static void trace_osr_completion(nmethod* osr_nm) {
+ if (TraceOnStackReplacement) {
+ if (osr_nm == NULL) tty->print_cr("compilation failed");
+ else tty->print_cr("nmethod " INTPTR_FORMAT, osr_nm);
+ }
+}
+#endif // !PRODUCT
+
+void CompilationPolicy::reset_counter_for_invocation_event(methodHandle m) {
+ // Make sure invocation and backedge counter doesn't overflow again right away
+ // as would be the case for native methods.
+
+ // BUT also make sure the method doesn't look like it was never executed.
+ // Set carry bit and reduce counter's value to min(count, CompileThreshold/2).
+ m->invocation_counter()->set_carry();
+ m->backedge_counter()->set_carry();
+
+ assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed");
+}
+
+void CompilationPolicy::reset_counter_for_back_branch_event(methodHandle m) {
+ // Delay next back-branch event but pump up invocation counter to triger
+ // whole method compilation.
+ InvocationCounter* i = m->invocation_counter();
+ InvocationCounter* b = m->backedge_counter();
+
+ // Don't set invocation_counter's value too low otherwise the method will
+ // look like immature (ic < ~5300) which prevents the inlining based on
+ // the type profiling.
+ i->set(i->state(), CompileThreshold);
+ // Don't reset counter too low - it is used to check if OSR method is ready.
+ b->set(b->state(), CompileThreshold / 2);
+}
+
+// SimpleCompPolicy - compile current method
+
+void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) {
+ assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now.");
+
+ int hot_count = m->invocation_count();
+ reset_counter_for_invocation_event(m);
+ const char* comment = "count";
+
+ if (!delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler) {
+ nmethod* nm = m->code();
+ if (nm == NULL ) {
+ const char* comment = "count";
+ CompileBroker::compile_method(m, InvocationEntryBci,
+ m, hot_count, comment, CHECK);
+ } else {
+#ifdef TIERED
+
+ if (nm->is_compiled_by_c1()) {
+ const char* comment = "tier1 overflow";
+ CompileBroker::compile_method(m, InvocationEntryBci,
+ m, hot_count, comment, CHECK);
+ }
+#endif // TIERED
+ }
+ }
+}
+
+void SimpleCompPolicy::method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) {
+ assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now.");
+
+ int hot_count = m->backedge_count();
+ const char* comment = "backedge_count";
+
+ if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m)) {
+ CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK);
+
+ NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));)
+ }
+}
+
+int SimpleCompPolicy::compilation_level(methodHandle m, int branch_bci)
+{
+#ifdef TIERED
+ if (!TieredCompilation) {
+ return CompLevel_highest_tier;
+ }
+ if (/* m()->tier1_compile_done() && */
+ // QQQ HACK FIX ME set tier1_compile_done!!
+ !m()->is_native()) {
+ // Grab the nmethod so it doesn't go away while it's being queried
+ nmethod* code = m()->code();
+ if (code != NULL && code->is_compiled_by_c1()) {
+ return CompLevel_highest_tier;
+ }
+ }
+ return CompLevel_fast_compile;
+#else
+ return CompLevel_highest_tier;
+#endif // TIERED
+}
+
+// StackWalkCompPolicy - walk up stack to find a suitable method to compile
+
+#ifdef COMPILER2
+const char* StackWalkCompPolicy::_msg = NULL;
+
+
+// Consider m for compilation
+void StackWalkCompPolicy::method_invocation_event(methodHandle m, TRAPS) {
+ assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now.");
+
+ int hot_count = m->invocation_count();
+ reset_counter_for_invocation_event(m);
+ const char* comment = "count";
+
+ if (m->code() == NULL && !delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler) {
+ ResourceMark rm(THREAD);
+ JavaThread *thread = (JavaThread*)THREAD;
+ frame fr = thread->last_frame();
+ assert(fr.is_interpreted_frame(), "must be interpreted");
+ assert(fr.interpreter_frame_method() == m(), "bad method");
+
+ if (TraceCompilationPolicy) {
+ tty->print("method invocation trigger: ");
+ m->print_short_name(tty);
+ tty->print(" ( interpreted " INTPTR_FORMAT ", size=%d ) ", (address)m(), m->code_size());
+ }
+ RegisterMap reg_map(thread, false);
+ javaVFrame* triggerVF = thread->last_java_vframe(&reg_map);
+ // triggerVF is the frame that triggered its counter
+ RFrame* first = new InterpretedRFrame(triggerVF->fr(), thread, m);
+
+ if (first->top_method()->code() != NULL) {
+ // called obsolete method/nmethod -- no need to recompile
+ if (TraceCompilationPolicy) tty->print_cr(" --> " INTPTR_FORMAT, first->top_method()->code());
+ } else if (compilation_level(m, InvocationEntryBci) == CompLevel_fast_compile) {
+ // Tier1 compilation policy avaoids stack walking.
+ CompileBroker::compile_method(m, InvocationEntryBci,
+ m, hot_count, comment, CHECK);
+ } else {
+ if (TimeCompilationPolicy) accumulated_time()->start();
+ GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50);
+ stack->push(first);
+ RFrame* top = findTopInlinableFrame(stack);
+ if (TimeCompilationPolicy) accumulated_time()->stop();
+ assert(top != NULL, "findTopInlinableFrame returned null");
+ if (TraceCompilationPolicy) top->print();
+ CompileBroker::compile_method(top->top_method(), InvocationEntryBci,
+ m, hot_count, comment, CHECK);
+ }
+ }
+}
+
+void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) {
+ assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now.");
+
+ int hot_count = m->backedge_count();
+ const char* comment = "backedge_count";
+
+ if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m)) {
+ CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK);
+
+ NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));)
+ }
+}
+
+int StackWalkCompPolicy::compilation_level(methodHandle m, int osr_bci)
+{
+ int comp_level = CompLevel_full_optimization;
+ if (TieredCompilation && osr_bci == InvocationEntryBci) {
+ if (CompileTheWorld) {
+ // Under CTW, the first compile is tier1, the second tier2
+ if (m->highest_tier_compile() == CompLevel_none) {
+ comp_level = CompLevel_fast_compile;
+ }
+ } else if (!m->has_osr_nmethod()) {
+ // Before tier1 is done, use invocation_count + backedge_count to
+ // compare against the threshold. After that, the counters may/will
+ // be reset, so rely on the straight interpreter_invocation_count.
+ if (m->highest_tier_compile() == CompLevel_initial_compile) {
+ if (m->interpreter_invocation_count() < Tier2CompileThreshold) {
+ comp_level = CompLevel_fast_compile;
+ }
+ } else if (m->invocation_count() + m->backedge_count() <
+ Tier2CompileThreshold) {
+ comp_level = CompLevel_fast_compile;
+ }
+ }
+
+ }
+ return comp_level;
+}
+
+
+RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) {
+ // go up the stack until finding a frame that (probably) won't be inlined
+ // into its caller
+ RFrame* current = stack->at(0); // current choice for stopping
+ assert( current && !current->is_compiled(), "" );
+ const char* msg = NULL;
+
+ while (1) {
+
+ // before going up the stack further, check if doing so would get us into
+ // compiled code
+ RFrame* next = senderOf(current, stack);
+ if( !next ) // No next frame up the stack?
+ break; // Then compile with current frame
+
+ methodHandle m = current->top_method();
+ methodHandle next_m = next->top_method();
+
+ if (TraceCompilationPolicy && Verbose) {
+ tty->print("[caller: ");
+ next_m->print_short_name(tty);
+ tty->print("] ");
+ }
+
+ if( !Inline ) { // Inlining turned off
+ msg = "Inlining turned off";
+ break;
+ }
+ if (next_m->is_not_compilable()) { // Did fail to compile this before/
+ msg = "caller not compilable";
+ break;
+ }
+ if (next->num() > MaxRecompilationSearchLength) {
+ // don't go up too high when searching for recompilees
+ msg = "don't go up any further: > MaxRecompilationSearchLength";
+ break;
+ }
+ if (next->distance() > MaxInterpretedSearchLength) {
+ // don't go up too high when searching for recompilees
+ msg = "don't go up any further: next > MaxInterpretedSearchLength";
+ break;
+ }
+ // Compiled frame above already decided not to inline;
+ // do not recompile him.
+ if (next->is_compiled()) {
+ msg = "not going up into optimized code";
+ break;
+ }
+
+ // Interpreted frame above us was already compiled. Do not force
+ // a recompile, although if the frame above us runs long enough an
+ // OSR might still happen.
+ if( current->is_interpreted() && next_m->has_compiled_code() ) {
+ msg = "not going up -- already compiled caller";
+ break;
+ }
+
+ // Compute how frequent this call site is. We have current method 'm'.
+ // We know next method 'next_m' is interpreted. Find the call site and
+ // check the various invocation counts.
+ int invcnt = 0; // Caller counts
+ if (ProfileInterpreter) {
+ invcnt = next_m->interpreter_invocation_count();
+ }
+ int cnt = 0; // Call site counts
+ if (ProfileInterpreter && next_m->method_data() != NULL) {
+ ResourceMark rm;
+ int bci = next->top_vframe()->bci();
+ ProfileData* data = next_m->method_data()->bci_to_data(bci);
+ if (data != NULL && data->is_CounterData())
+ cnt = data->as_CounterData()->count();
+ }
+
+ // Caller counts / call-site counts; i.e. is this call site
+ // a hot call site for method next_m?
+ int freq = (invcnt) ? cnt/invcnt : cnt;
+
+ // Check size and frequency limits
+ if ((msg = shouldInline(m, freq, cnt)) != NULL) {
+ break;
+ }
+ // Check inlining negative tests
+ if ((msg = shouldNotInline(m)) != NULL) {
+ break;
+ }
+
+
+ // If the caller method is too big or something then we do not want to
+ // compile it just to inline a method
+ if (!canBeCompiled(next_m)) {
+ msg = "caller cannot be compiled";
+ break;
+ }
+
+ if( next_m->name() == vmSymbols::class_initializer_name() ) {
+ msg = "do not compile class initializer (OSR ok)";
+ break;
+ }
+
+ if (TraceCompilationPolicy && Verbose) {
+ tty->print("\n\t check caller: ");
+ next_m->print_short_name(tty);
+ tty->print(" ( interpreted " INTPTR_FORMAT ", size=%d ) ", (address)next_m(), next_m->code_size());
+ }
+
+ current = next;
+ }
+
+ assert( !current || !current->is_compiled(), "" );
+
+ if (TraceCompilationPolicy && msg) tty->print("(%s)\n", msg);
+
+ return current;
+}
+
+RFrame* StackWalkCompPolicy::senderOf(RFrame* rf, GrowableArray<RFrame*>* stack) {
+ RFrame* sender = rf->caller();
+ if (sender && sender->num() == stack->length()) stack->push(sender);
+ return sender;
+}
+
+
+const char* StackWalkCompPolicy::shouldInline(methodHandle m, float freq, int cnt) {
+ // Allows targeted inlining
+ // positive filter: should send be inlined? returns NULL (--> yes)
+ // or rejection msg
+ int max_size = MaxInlineSize;
+ int cost = m->code_size();
+
+ // Check for too many throws (and not too huge)
+ if (m->interpreter_throwout_count() > InlineThrowCount && cost < InlineThrowMaxSize ) {
+ return NULL;
+ }
+
+ // bump the max size if the call is frequent
+ if ((freq >= InlineFrequencyRatio) || (cnt >= InlineFrequencyCount)) {
+ if (TraceFrequencyInlining) {
+ tty->print("(Inlined frequent method)\n");
+ m->print();
+ }
+ max_size = FreqInlineSize;
+ }
+ if (cost > max_size) {
+ return (_msg = "too big");
+ }
+ return NULL;
+}
+
+
+const char* StackWalkCompPolicy::shouldNotInline(methodHandle m) {
+ // negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg
+ if (m->is_abstract()) return (_msg = "abstract method");
+ // note: we allow ik->is_abstract()
+ if (!instanceKlass::cast(m->method_holder())->is_initialized()) return (_msg = "method holder not initialized");
+ if (m->is_native()) return (_msg = "native method");
+ nmethod* m_code = m->code();
+ if( m_code != NULL && m_code->instructions_size() > InlineSmallCode )
+ return (_msg = "already compiled into a big method");
+
+ // use frequency-based objections only for non-trivial methods
+ if (m->code_size() <= MaxTrivialSize) return NULL;
+ if (UseInterpreter) { // don't use counts with -Xcomp
+ if ((m->code() == NULL) && m->was_never_executed()) return (_msg = "never executed");
+ if (!m->was_executed_more_than(MIN2(MinInliningThreshold, CompileThreshold >> 1))) return (_msg = "executed < MinInliningThreshold times");
+ }
+ if (methodOopDesc::has_unloaded_classes_in_signature(m, JavaThread::current())) return (_msg = "unloaded signature classes");
+
+ return NULL;
+}
+
+
+
+#endif // COMPILER2
diff --git a/src/share/vm/runtime/compilationPolicy.hpp b/src/share/vm/runtime/compilationPolicy.hpp
new file mode 100644
index 000000000..937215128
--- /dev/null
+++ b/src/share/vm/runtime/compilationPolicy.hpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// The CompilationPolicy selects which method (if any) should be compiled.
+// It also decides which methods must always be compiled (i.e., are never
+// interpreted).
+
+class CompilationPolicy : public CHeapObj {
+ private:
+ static CompilationPolicy* _policy;
+ // Accumulated time
+ static elapsedTimer _accumulated_time;
+
+ static bool _in_vm_startup;
+
+ public:
+ virtual void method_invocation_event(methodHandle m, TRAPS) = 0;
+ virtual void method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) = 0;
+ virtual int compilation_level(methodHandle m, int branch_bci) = 0;
+
+ void reset_counter_for_invocation_event(methodHandle method);
+ void reset_counter_for_back_branch_event(methodHandle method);
+
+ static void set_in_vm_startup(bool in_vm_startup) { _in_vm_startup = in_vm_startup; }
+ static void completed_vm_startup();
+ static bool delayCompilationDuringStartup() { return _in_vm_startup; }
+
+ static bool mustBeCompiled(methodHandle m); // m must be compiled before executing it
+ static bool canBeCompiled(methodHandle m); // m is allowed to be compiled
+
+ static void set_policy(CompilationPolicy* policy) { _policy = policy; }
+ static CompilationPolicy* policy() { return _policy; }
+
+ // Profiling
+ elapsedTimer* accumulated_time() { return &_accumulated_time; }
+ void print_time() PRODUCT_RETURN;
+};
+
+class SimpleCompPolicy : public CompilationPolicy {
+ public:
+ void method_invocation_event( methodHandle m, TRAPS);
+ void method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS);
+ int compilation_level(methodHandle m, int branch_bci);
+};
+
+// StackWalkCompPolicy - existing C2 policy
+
+#ifdef COMPILER2
+class StackWalkCompPolicy : public CompilationPolicy {
+ public:
+ void method_invocation_event(methodHandle m, TRAPS);
+ void method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS);
+ int compilation_level(methodHandle m, int branch_bci);
+
+ private:
+ RFrame* findTopInlinableFrame(GrowableArray<RFrame*>* stack);
+ RFrame* senderOf(RFrame* rf, GrowableArray<RFrame*>* stack);
+
+ // the following variables hold values computed by the last inlining decision
+ // they are used for performance debugging only (print better messages)
+ static const char* _msg; // reason for not inlining
+
+ static const char* shouldInline (methodHandle callee, float frequency, int cnt);
+ // positive filter: should send be inlined? returns NULL (--> yes) or rejection msg
+ static const char* shouldNotInline(methodHandle callee);
+ // negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg
+
+};
+#endif
diff --git a/src/share/vm/runtime/deoptimization.cpp b/src/share/vm/runtime/deoptimization.cpp
new file mode 100644
index 000000000..a4c3a181a
--- /dev/null
+++ b/src/share/vm/runtime/deoptimization.cpp
@@ -0,0 +1,1789 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_deoptimization.cpp.incl"
+
+bool DeoptimizationMarker::_is_active = false;
+
+Deoptimization::UnrollBlock::UnrollBlock(int size_of_deoptimized_frame,
+ int caller_adjustment,
+ int number_of_frames,
+ intptr_t* frame_sizes,
+ address* frame_pcs,
+ BasicType return_type) {
+ _size_of_deoptimized_frame = size_of_deoptimized_frame;
+ _caller_adjustment = caller_adjustment;
+ _number_of_frames = number_of_frames;
+ _frame_sizes = frame_sizes;
+ _frame_pcs = frame_pcs;
+ _register_block = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2);
+ _return_type = return_type;
+ // PD (x86 only)
+ _counter_temp = 0;
+ _initial_fp = 0;
+ _unpack_kind = 0;
+ _sender_sp_temp = 0;
+
+ _total_frame_sizes = size_of_frames();
+}
+
+
+Deoptimization::UnrollBlock::~UnrollBlock() {
+ FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes);
+ FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs);
+ FREE_C_HEAP_ARRAY(intptr_t, _register_block);
+}
+
+
+intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const {
+ assert(register_number < RegisterMap::reg_count, "checking register number");
+ return &_register_block[register_number * 2];
+}
+
+
+
+int Deoptimization::UnrollBlock::size_of_frames() const {
+ // Acount first for the adjustment of the initial frame
+ int result = _caller_adjustment;
+ for (int index = 0; index < number_of_frames(); index++) {
+ result += frame_sizes()[index];
+ }
+ return result;
+}
+
+
+void Deoptimization::UnrollBlock::print() {
+ ttyLocker ttyl;
+ tty->print_cr("UnrollBlock");
+ tty->print_cr(" size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
+ tty->print( " frame_sizes: ");
+ for (int index = 0; index < number_of_frames(); index++) {
+ tty->print("%d ", frame_sizes()[index]);
+ }
+ tty->cr();
+}
+
+
+// In order to make fetch_unroll_info work properly with escape
+// analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and
+// ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
+// of previously eliminated objects occurs in realloc_objects, which is
+// called from the method fetch_unroll_info_helper below.
+JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread))
+ // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
+ // but makes the entry a little slower. There is however a little dance we have to
+ // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
+
+ // fetch_unroll_info() is called at the beginning of the deoptimization
+ // handler. Note this fact before we start generating temporary frames
+ // that can confuse an asynchronous stack walker. This counter is
+ // decremented at the end of unpack_frames().
+ thread->inc_in_deopt_handler();
+
+ return fetch_unroll_info_helper(thread);
+JRT_END
+
+
+// This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
+Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread) {
+
+ // Note: there is a safepoint safety issue here. No matter whether we enter
+ // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
+ // the vframeArray is created.
+ //
+
+ // Allocate our special deoptimization ResourceMark
+ DeoptResourceMark* dmark = new DeoptResourceMark(thread);
+ assert(thread->deopt_mark() == NULL, "Pending deopt!");
+ thread->set_deopt_mark(dmark);
+
+ frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect
+ RegisterMap map(thread, true);
+ RegisterMap dummy_map(thread, false);
+ // Now get the deoptee with a valid map
+ frame deoptee = stub_frame.sender(&map);
+
+ // Create a growable array of VFrames where each VFrame represents an inlined
+ // Java frame. This storage is allocated with the usual system arena.
+ assert(deoptee.is_compiled_frame(), "Wrong frame type");
+ GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
+ vframe* vf = vframe::new_vframe(&deoptee, &map, thread);
+ while (!vf->is_top()) {
+ assert(vf->is_compiled_frame(), "Wrong frame type");
+ chunk->push(compiledVFrame::cast(vf));
+ vf = vf->sender();
+ }
+ assert(vf->is_compiled_frame(), "Wrong frame type");
+ chunk->push(compiledVFrame::cast(vf));
+
+#ifdef COMPILER2
+ // Reallocate the non-escaping objects and restore their fields. Then
+ // relock objects if synchronization on them was eliminated.
+ if (DoEscapeAnalysis && EliminateAllocations) {
+ GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
+ bool reallocated = false;
+ if (objects != NULL) {
+ JRT_BLOCK
+ reallocated = realloc_objects(thread, &deoptee, objects, THREAD);
+ JRT_END
+ }
+ if (reallocated) {
+ reassign_fields(&deoptee, &map, objects);
+#ifndef PRODUCT
+ if (TraceDeoptimization) {
+ ttyLocker ttyl;
+ tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread);
+ print_objects(objects);
+ }
+#endif
+ }
+ for (int i = 0; i < chunk->length(); i++) {
+ GrowableArray<MonitorValue*>* monitors = chunk->at(i)->scope()->monitors();
+ if (monitors != NULL) {
+ relock_objects(&deoptee, &map, monitors);
+#ifndef PRODUCT
+ if (TraceDeoptimization) {
+ ttyLocker ttyl;
+ tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread);
+ for (int j = 0; i < monitors->length(); i++) {
+ MonitorValue* mv = monitors->at(i);
+ if (mv->eliminated()) {
+ StackValue* owner = StackValue::create_stack_value(&deoptee, &map, mv->owner());
+ tty->print_cr(" object <" INTPTR_FORMAT "> locked", owner->get_obj()());
+ }
+ }
+ }
+#endif
+ }
+ }
+ }
+#endif // COMPILER2
+ // Ensure that no safepoint is taken after pointers have been stored
+ // in fields of rematerialized objects. If a safepoint occurs from here on
+ // out the java state residing in the vframeArray will be missed.
+ No_Safepoint_Verifier no_safepoint;
+
+ vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk);
+
+ assert(thread->vframe_array_head() == NULL, "Pending deopt!");;
+ thread->set_vframe_array_head(array);
+
+ // Now that the vframeArray has been created if we have any deferred local writes
+ // added by jvmti then we can free up that structure as the data is now in the
+ // vframeArray
+
+ if (thread->deferred_locals() != NULL) {
+ GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals();
+ int i = 0;
+ do {
+ // Because of inlining we could have multiple vframes for a single frame
+ // and several of the vframes could have deferred writes. Find them all.
+ if (list->at(i)->id() == array->original().id()) {
+ jvmtiDeferredLocalVariableSet* dlv = list->at(i);
+ list->remove_at(i);
+ // individual jvmtiDeferredLocalVariableSet are CHeapObj's
+ delete dlv;
+ } else {
+ i++;
+ }
+ } while ( i < list->length() );
+ if (list->length() == 0) {
+ thread->set_deferred_locals(NULL);
+ // free the list and elements back to C heap.
+ delete list;
+ }
+
+ }
+
+ // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
+ CodeBlob* cb = stub_frame.cb();
+ // Verify we have the right vframeArray
+ assert(cb->frame_size() >= 0, "Unexpected frame size");
+ intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
+
+#ifdef ASSERT
+ assert(cb->is_deoptimization_stub() || cb->is_uncommon_trap_stub(), "just checking");
+ Events::log("fetch unroll sp " INTPTR_FORMAT, unpack_sp);
+#endif
+ // This is a guarantee instead of an assert because if vframe doesn't match
+ // we will unpack the wrong deoptimized frame and wind up in strange places
+ // where it will be very difficult to figure out what went wrong. Better
+ // to die an early death here than some very obscure death later when the
+ // trail is cold.
+ // Note: on ia64 this guarantee can be fooled by frames with no memory stack
+ // in that it will fail to detect a problem when there is one. This needs
+ // more work in tiger timeframe.
+ guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
+
+ int number_of_frames = array->frames();
+
+ // Compute the vframes' sizes. Note that frame_sizes[] entries are ordered from outermost to innermost
+ // virtual activation, which is the reverse of the elements in the vframes array.
+ intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames);
+ // +1 because we always have an interpreter return address for the final slot.
+ address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1);
+ int callee_parameters = 0;
+ int callee_locals = 0;
+ int popframe_extra_args = 0;
+ // Create an interpreter return address for the stub to use as its return
+ // address so the skeletal frames are perfectly walkable
+ frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
+
+ // PopFrame requires that the preserved incoming arguments from the recently-popped topmost
+ // activation be put back on the expression stack of the caller for reexecution
+ if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
+ popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words());
+ }
+
+ //
+ // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
+ // frame_sizes/frame_pcs[1] next oldest frame (int)
+ // frame_sizes/frame_pcs[n] youngest frame (int)
+ //
+ // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
+ // owns the space for the return address to it's caller). Confusing ain't it.
+ //
+ // The vframe array can address vframes with indices running from
+ // 0.._frames-1. Index 0 is the youngest frame and _frame - 1 is the oldest (root) frame.
+ // When we create the skeletal frames we need the oldest frame to be in the zero slot
+ // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
+ // so things look a little strange in this loop.
+ //
+ for (int index = 0; index < array->frames(); index++ ) {
+ // frame[number_of_frames - 1 ] = on_stack_size(youngest)
+ // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
+ // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
+ frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
+ callee_locals,
+ index == 0,
+ popframe_extra_args);
+ // This pc doesn't have to be perfect just good enough to identify the frame
+ // as interpreted so the skeleton frame will be walkable
+ // The correct pc will be set when the skeleton frame is completely filled out
+ // The final pc we store in the loop is wrong and will be overwritten below
+ frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
+
+ callee_parameters = array->element(index)->method()->size_of_parameters();
+ callee_locals = array->element(index)->method()->max_locals();
+ popframe_extra_args = 0;
+ }
+
+ // Compute whether the root vframe returns a float or double value.
+ BasicType return_type;
+ {
+ HandleMark hm;
+ methodHandle method(thread, array->element(0)->method());
+ Bytecode_invoke* invoke = Bytecode_invoke_at_check(method, array->element(0)->bci());
+ return_type = (invoke != NULL) ? invoke->result_type(thread) : T_ILLEGAL;
+ }
+
+ // Compute information for handling adapters and adjusting the frame size of the caller.
+ int caller_adjustment = 0;
+
+ // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
+ // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
+ // than simply use array->sender.pc(). This requires us to walk the current set of frames
+ //
+ frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
+ deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller
+
+ // Compute the amount the oldest interpreter frame will have to adjust
+ // its caller's stack by. If the caller is a compiled frame then
+ // we pretend that the callee has no parameters so that the
+ // extension counts for the full amount of locals and not just
+ // locals-parms. This is because without a c2i adapter the parm
+ // area as created by the compiled frame will not be usable by
+ // the interpreter. (Depending on the calling convention there
+ // may not even be enough space).
+
+ // QQQ I'd rather see this pushed down into last_frame_adjust
+ // and have it take the sender (aka caller).
+
+ if (deopt_sender.is_compiled_frame()) {
+ caller_adjustment = last_frame_adjust(0, callee_locals);
+ } else if (callee_locals > callee_parameters) {
+ // The caller frame may need extending to accommodate
+ // non-parameter locals of the first unpacked interpreted frame.
+ // Compute that adjustment.
+ caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
+ }
+
+
+ // If the sender is deoptimized the we must retrieve the address of the handler
+ // since the frame will "magically" show the original pc before the deopt
+ // and we'd undo the deopt.
+
+ frame_pcs[0] = deopt_sender.raw_pc();
+
+ assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
+
+ UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
+ caller_adjustment * BytesPerWord,
+ number_of_frames,
+ frame_sizes,
+ frame_pcs,
+ return_type);
+#if defined(IA32) || defined(AMD64)
+ // We need a way to pass fp to the unpacking code so the skeletal frames
+ // come out correct. This is only needed for x86 because of c2 using ebp
+ // as an allocatable register. So this update is useless (and harmless)
+ // on the other platforms. It would be nice to do this in a different
+ // way but even the old style deoptimization had a problem with deriving
+ // this value. NEEDS_CLEANUP
+ // Note: now that c1 is using c2's deopt blob we must do this on all
+ // x86 based platforms
+ intptr_t** fp_addr = (intptr_t**) (((address)info) + info->initial_fp_offset_in_bytes());
+ *fp_addr = array->sender().fp(); // was adapter_caller
+#endif /* IA32 || AMD64 */
+
+ if (array->frames() > 1) {
+ if (VerifyStack && TraceDeoptimization) {
+ tty->print_cr("Deoptimizing method containing inlining");
+ }
+ }
+
+ array->set_unroll_block(info);
+ return info;
+}
+
+// Called to cleanup deoptimization data structures in normal case
+// after unpacking to stack and when stack overflow error occurs
+void Deoptimization::cleanup_deopt_info(JavaThread *thread,
+ vframeArray *array) {
+
+ // Get array if coming from exception
+ if (array == NULL) {
+ array = thread->vframe_array_head();
+ }
+ thread->set_vframe_array_head(NULL);
+
+ // Free the previous UnrollBlock
+ vframeArray* old_array = thread->vframe_array_last();
+ thread->set_vframe_array_last(array);
+
+ if (old_array != NULL) {
+ UnrollBlock* old_info = old_array->unroll_block();
+ old_array->set_unroll_block(NULL);
+ delete old_info;
+ delete old_array;
+ }
+
+ // Deallocate any resource creating in this routine and any ResourceObjs allocated
+ // inside the vframeArray (StackValueCollections)
+
+ delete thread->deopt_mark();
+ thread->set_deopt_mark(NULL);
+
+
+ if (JvmtiExport::can_pop_frame()) {
+#ifndef CC_INTERP
+ // Regardless of whether we entered this routine with the pending
+ // popframe condition bit set, we should always clear it now
+ thread->clear_popframe_condition();
+#else
+ // C++ interpeter will clear has_pending_popframe when it enters
+ // with method_resume. For deopt_resume2 we clear it now.
+ if (thread->popframe_forcing_deopt_reexecution())
+ thread->clear_popframe_condition();
+#endif /* CC_INTERP */
+ }
+
+ // unpack_frames() is called at the end of the deoptimization handler
+ // and (in C2) at the end of the uncommon trap handler. Note this fact
+ // so that an asynchronous stack walker can work again. This counter is
+ // incremented at the beginning of fetch_unroll_info() and (in C2) at
+ // the beginning of uncommon_trap().
+ thread->dec_in_deopt_handler();
+}
+
+
+// Return BasicType of value being returned
+JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
+
+ // We are already active int he special DeoptResourceMark any ResourceObj's we
+ // allocate will be freed at the end of the routine.
+
+ // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
+ // but makes the entry a little slower. There is however a little dance we have to
+ // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
+ ResetNoHandleMark rnhm; // No-op in release/product versions
+ HandleMark hm;
+
+ frame stub_frame = thread->last_frame();
+
+ // Since the frame to unpack is the top frame of this thread, the vframe_array_head
+ // must point to the vframeArray for the unpack frame.
+ vframeArray* array = thread->vframe_array_head();
+
+#ifndef PRODUCT
+ if (TraceDeoptimization) {
+ tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", thread, array, exec_mode);
+ }
+#endif
+
+ UnrollBlock* info = array->unroll_block();
+
+ // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
+ array->unpack_to_stack(stub_frame, exec_mode);
+
+ BasicType bt = info->return_type();
+
+ // If we have an exception pending, claim that the return type is an oop
+ // so the deopt_blob does not overwrite the exception_oop.
+
+ if (exec_mode == Unpack_exception)
+ bt = T_OBJECT;
+
+ // Cleanup thread deopt data
+ cleanup_deopt_info(thread, array);
+
+#ifndef PRODUCT
+ if (VerifyStack) {
+ ResourceMark res_mark;
+
+ // Verify that the just-unpacked frames match the interpreter's
+ // notions of expression stack and locals
+ vframeArray* cur_array = thread->vframe_array_last();
+ RegisterMap rm(thread, false);
+ rm.set_include_argument_oops(false);
+ bool is_top_frame = true;
+ int callee_size_of_parameters = 0;
+ int callee_max_locals = 0;
+ for (int i = 0; i < cur_array->frames(); i++) {
+ vframeArrayElement* el = cur_array->element(i);
+ frame* iframe = el->iframe();
+ guarantee(iframe->is_interpreted_frame(), "Wrong frame type");
+
+ // Get the oop map for this bci
+ InterpreterOopMap mask;
+ int cur_invoke_parameter_size = 0;
+ bool try_next_mask = false;
+ int next_mask_expression_stack_size = -1;
+ int top_frame_expression_stack_adjustment = 0;
+ methodHandle mh(thread, iframe->interpreter_frame_method());
+ OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask);
+ BytecodeStream str(mh);
+ str.set_start(iframe->interpreter_frame_bci());
+ int max_bci = mh->code_size();
+ // Get to the next bytecode if possible
+ assert(str.bci() < max_bci, "bci in interpreter frame out of bounds");
+ // Check to see if we can grab the number of outgoing arguments
+ // at an uncommon trap for an invoke (where the compiler
+ // generates debug info before the invoke has executed)
+ Bytecodes::Code cur_code = str.next();
+ if (cur_code == Bytecodes::_invokevirtual ||
+ cur_code == Bytecodes::_invokespecial ||
+ cur_code == Bytecodes::_invokestatic ||
+ cur_code == Bytecodes::_invokeinterface) {
+ Bytecode_invoke* invoke = Bytecode_invoke_at(mh, iframe->interpreter_frame_bci());
+ symbolHandle signature(thread, invoke->signature());
+ ArgumentSizeComputer asc(signature);
+ cur_invoke_parameter_size = asc.size();
+ if (cur_code != Bytecodes::_invokestatic) {
+ // Add in receiver
+ ++cur_invoke_parameter_size;
+ }
+ }
+ if (str.bci() < max_bci) {
+ Bytecodes::Code bc = str.next();
+ if (bc >= 0) {
+ // The interpreter oop map generator reports results before
+ // the current bytecode has executed except in the case of
+ // calls. It seems to be hard to tell whether the compiler
+ // has emitted debug information matching the "state before"
+ // a given bytecode or the state after, so we try both
+ switch (cur_code) {
+ case Bytecodes::_invokevirtual:
+ case Bytecodes::_invokespecial:
+ case Bytecodes::_invokestatic:
+ case Bytecodes::_invokeinterface:
+ case Bytecodes::_athrow:
+ break;
+ default: {
+ InterpreterOopMap next_mask;
+ OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask);
+ next_mask_expression_stack_size = next_mask.expression_stack_size();
+ // Need to subtract off the size of the result type of
+ // the bytecode because this is not described in the
+ // debug info but returned to the interpreter in the TOS
+ // caching register
+ BasicType bytecode_result_type = Bytecodes::result_type(cur_code);
+ if (bytecode_result_type != T_ILLEGAL) {
+ top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
+ }
+ assert(top_frame_expression_stack_adjustment >= 0, "");
+ try_next_mask = true;
+ break;
+ }
+ }
+ }
+ }
+
+ // Verify stack depth and oops in frame
+ // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc)
+ if (!(
+ /* SPARC */
+ (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) ||
+ /* x86 */
+ (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) ||
+ (try_next_mask &&
+ (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
+ top_frame_expression_stack_adjustment))) ||
+ (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
+ (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute) &&
+ (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
+ )) {
+ ttyLocker ttyl;
+
+ // Print out some information that will help us debug the problem
+ tty->print_cr("Wrong number of expression stack elements during deoptimization");
+ tty->print_cr(" Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1);
+ tty->print_cr(" Fabricated interpreter frame had %d expression stack elements",
+ iframe->interpreter_frame_expression_stack_size());
+ tty->print_cr(" Interpreter oop map had %d expression stack elements", mask.expression_stack_size());
+ tty->print_cr(" try_next_mask = %d", try_next_mask);
+ tty->print_cr(" next_mask_expression_stack_size = %d", next_mask_expression_stack_size);
+ tty->print_cr(" callee_size_of_parameters = %d", callee_size_of_parameters);
+ tty->print_cr(" callee_max_locals = %d", callee_max_locals);
+ tty->print_cr(" top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment);
+ tty->print_cr(" exec_mode = %d", exec_mode);
+ tty->print_cr(" cur_invoke_parameter_size = %d", cur_invoke_parameter_size);
+ tty->print_cr(" Thread = " INTPTR_FORMAT ", thread ID = " UINTX_FORMAT, thread, thread->osthread()->thread_id());
+ tty->print_cr(" Interpreted frames:");
+ for (int k = 0; k < cur_array->frames(); k++) {
+ vframeArrayElement* el = cur_array->element(k);
+ tty->print_cr(" %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci());
+ }
+ cur_array->print_on_2(tty);
+ guarantee(false, "wrong number of expression stack elements during deopt");
+ }
+ VerifyOopClosure verify;
+ iframe->oops_interpreted_do(&verify, &rm, false);
+ callee_size_of_parameters = mh->size_of_parameters();
+ callee_max_locals = mh->max_locals();
+ is_top_frame = false;
+ }
+ }
+#endif /* !PRODUCT */
+
+
+ return bt;
+JRT_END
+
+
+int Deoptimization::deoptimize_dependents() {
+ Threads::deoptimized_wrt_marked_nmethods();
+ return 0;
+}
+
+
+#ifdef COMPILER2
+bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) {
+ Handle pending_exception(thread->pending_exception());
+ const char* exception_file = thread->exception_file();
+ int exception_line = thread->exception_line();
+ thread->clear_pending_exception();
+
+ for (int i = 0; i < objects->length(); i++) {
+ assert(objects->at(i)->is_object(), "invalid debug information");
+ ObjectValue* sv = (ObjectValue*) objects->at(i);
+
+ KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
+ oop obj = NULL;
+
+ if (k->oop_is_instance()) {
+ instanceKlass* ik = instanceKlass::cast(k());
+ obj = ik->allocate_instance(CHECK_(false));
+ } else if (k->oop_is_typeArray()) {
+ typeArrayKlass* ak = typeArrayKlass::cast(k());
+ assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
+ int len = sv->field_size() / type2size[ak->element_type()];
+ obj = ak->allocate(len, CHECK_(false));
+ } else if (k->oop_is_objArray()) {
+ objArrayKlass* ak = objArrayKlass::cast(k());
+ obj = ak->allocate(sv->field_size(), CHECK_(false));
+ }
+
+ assert(obj != NULL, "allocation failed");
+ assert(sv->value().is_null(), "redundant reallocation");
+ sv->set_value(obj);
+ }
+
+ if (pending_exception.not_null()) {
+ thread->set_pending_exception(pending_exception(), exception_file, exception_line);
+ }
+
+ return true;
+}
+
+// This assumes that the fields are stored in ObjectValue in the same order
+// they are yielded by do_nonstatic_fields.
+class FieldReassigner: public FieldClosure {
+ frame* _fr;
+ RegisterMap* _reg_map;
+ ObjectValue* _sv;
+ instanceKlass* _ik;
+ oop _obj;
+
+ int _i;
+public:
+ FieldReassigner(frame* fr, RegisterMap* reg_map, ObjectValue* sv, oop obj) :
+ _fr(fr), _reg_map(reg_map), _sv(sv), _obj(obj), _i(0) {}
+
+ int i() const { return _i; }
+
+
+ void do_field(fieldDescriptor* fd) {
+ StackValue* value =
+ StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(i()));
+ int offset = fd->offset();
+ switch (fd->field_type()) {
+ case T_OBJECT: case T_ARRAY:
+ assert(value->type() == T_OBJECT, "Agreement.");
+ _obj->obj_field_put(offset, value->get_obj()());
+ break;
+
+ case T_LONG: case T_DOUBLE: {
+ assert(value->type() == T_INT, "Agreement.");
+ StackValue* low =
+ StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(++_i));
+ jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
+ _obj->long_field_put(offset, res);
+ break;
+ }
+
+ case T_INT: case T_FLOAT: // 4 bytes.
+ assert(value->type() == T_INT, "Agreement.");
+ _obj->int_field_put(offset, (jint)value->get_int());
+ break;
+
+ case T_SHORT: case T_CHAR: // 2 bytes
+ assert(value->type() == T_INT, "Agreement.");
+ _obj->short_field_put(offset, (jshort)value->get_int());
+ break;
+
+ case T_BOOLEAN: // 1 byte
+ assert(value->type() == T_INT, "Agreement.");
+ _obj->bool_field_put(offset, (jboolean)value->get_int());
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+ _i++;
+ }
+};
+
+// restore elements of an eliminated type array
+void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) {
+ StackValue* low;
+ jlong lval;
+ int index = 0;
+
+ for (int i = 0; i < sv->field_size(); i++) {
+ StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
+ switch(type) {
+ case T_BOOLEAN: obj->bool_at_put (index, (jboolean) value->get_int()); break;
+ case T_BYTE: obj->byte_at_put (index, (jbyte) value->get_int()); break;
+ case T_CHAR: obj->char_at_put (index, (jchar) value->get_int()); break;
+ case T_SHORT: obj->short_at_put(index, (jshort) value->get_int()); break;
+ case T_INT: obj->int_at_put (index, (jint) value->get_int()); break;
+ case T_FLOAT: obj->float_at_put(index, (jfloat) value->get_int()); break;
+ case T_LONG:
+ case T_DOUBLE:
+ low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
+ lval = jlong_from((jint)value->get_int(), (jint)low->get_int());
+ sv->value()->long_field_put(index, lval);
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+ index++;
+ }
+}
+
+
+// restore fields of an eliminated object array
+void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
+ for (int i = 0; i < sv->field_size(); i++) {
+ StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
+ assert(value->type() == T_OBJECT, "object element expected");
+ obj->obj_at_put(i, value->get_obj()());
+ }
+}
+
+
+// restore fields of all eliminated objects and arrays
+void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects) {
+ for (int i = 0; i < objects->length(); i++) {
+ ObjectValue* sv = (ObjectValue*) objects->at(i);
+ KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
+ Handle obj = sv->value();
+ assert(obj.not_null(), "reallocation was missed");
+
+ if (k->oop_is_instance()) {
+ instanceKlass* ik = instanceKlass::cast(k());
+ FieldReassigner reassign(fr, reg_map, sv, obj());
+ ik->do_nonstatic_fields(&reassign);
+ } else if (k->oop_is_typeArray()) {
+ typeArrayKlass* ak = typeArrayKlass::cast(k());
+ reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
+ } else if (k->oop_is_objArray()) {
+ reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
+ }
+ }
+}
+
+
+// relock objects for which synchronization was eliminated
+void Deoptimization::relock_objects(frame* fr, RegisterMap* reg_map, GrowableArray<MonitorValue*>* monitors) {
+ for (int i = 0; i < monitors->length(); i++) {
+ MonitorValue* mv = monitors->at(i);
+ StackValue* owner = StackValue::create_stack_value(fr, reg_map, mv->owner());
+ if (mv->eliminated()) {
+ Handle obj = owner->get_obj();
+ assert(obj.not_null(), "reallocation was missed");
+ BasicLock* lock = StackValue::resolve_monitor_lock(fr, mv->basic_lock());
+ lock->set_displaced_header(obj->mark());
+ obj->set_mark((markOop) lock);
+ }
+ assert(owner->get_obj()->is_locked(), "object must be locked now");
+ }
+}
+
+
+#ifndef PRODUCT
+// print information about reallocated objects
+void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) {
+ fieldDescriptor fd;
+
+ for (int i = 0; i < objects->length(); i++) {
+ ObjectValue* sv = (ObjectValue*) objects->at(i);
+ KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
+ Handle obj = sv->value();
+
+ tty->print(" object <" INTPTR_FORMAT "> of type ", sv->value()());
+ k->as_klassOop()->print_value();
+ tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
+ tty->cr();
+
+ if (Verbose) {
+ k->oop_print_on(obj(), tty);
+ }
+ }
+}
+#endif
+#endif // COMPILER2
+
+vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk) {
+
+#ifndef PRODUCT
+ if (TraceDeoptimization) {
+ ttyLocker ttyl;
+ tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", thread);
+ fr.print_on(tty);
+ tty->print_cr(" Virtual frames (innermost first):");
+ for (int index = 0; index < chunk->length(); index++) {
+ compiledVFrame* vf = chunk->at(index);
+ tty->print(" %2d - ", index);
+ vf->print_value();
+ int bci = chunk->at(index)->raw_bci();
+ const char* code_name;
+ if (bci == SynchronizationEntryBCI) {
+ code_name = "sync entry";
+ } else {
+ Bytecodes::Code code = Bytecodes::code_at(vf->method(), bci);
+ code_name = Bytecodes::name(code);
+ }
+ tty->print(" - %s", code_name);
+ tty->print_cr(" @ bci %d ", bci);
+ if (Verbose) {
+ vf->print();
+ tty->cr();
+ }
+ }
+ }
+#endif
+
+ // Register map for next frame (used for stack crawl). We capture
+ // the state of the deopt'ing frame's caller. Thus if we need to
+ // stuff a C2I adapter we can properly fill in the callee-save
+ // register locations.
+ frame caller = fr.sender(reg_map);
+ int frame_size = caller.sp() - fr.sp();
+
+ frame sender = caller;
+
+ // Since the Java thread being deoptimized will eventually adjust it's own stack,
+ // the vframeArray containing the unpacking information is allocated in the C heap.
+ // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
+ vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr);
+
+ // Compare the vframeArray to the collected vframes
+ assert(array->structural_compare(thread, chunk), "just checking");
+ Events::log("# vframes = %d", (intptr_t)chunk->length());
+
+#ifndef PRODUCT
+ if (TraceDeoptimization) {
+ ttyLocker ttyl;
+ tty->print_cr(" Created vframeArray " INTPTR_FORMAT, array);
+ if (Verbose) {
+ int count = 0;
+ // this used to leak deoptimizedVFrame like it was going out of style!!!
+ for (int index = 0; index < array->frames(); index++ ) {
+ vframeArrayElement* e = array->element(index);
+ e->print(tty);
+
+ /*
+ No printing yet.
+ array->vframe_at(index)->print_activation(count++);
+ // better as...
+ array->print_activation_for(index, count++);
+ */
+ }
+ }
+ }
+#endif // PRODUCT
+
+ return array;
+}
+
+
+static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
+ GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
+ for (int i = 0; i < monitors->length(); i++) {
+ MonitorInfo* mon_info = monitors->at(i);
+ if (mon_info->owner() != NULL) {
+ objects_to_revoke->append(Handle(mon_info->owner()));
+ }
+ }
+}
+
+
+void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
+ if (!UseBiasedLocking) {
+ return;
+ }
+
+ GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
+
+ // Unfortunately we don't have a RegisterMap available in most of
+ // the places we want to call this routine so we need to walk the
+ // stack again to update the register map.
+ if (map == NULL || !map->update_map()) {
+ StackFrameStream sfs(thread, true);
+ bool found = false;
+ while (!found && !sfs.is_done()) {
+ frame* cur = sfs.current();
+ sfs.next();
+ found = cur->id() == fr.id();
+ }
+ assert(found, "frame to be deoptimized not found on target thread's stack");
+ map = sfs.register_map();
+ }
+
+ vframe* vf = vframe::new_vframe(&fr, map, thread);
+ compiledVFrame* cvf = compiledVFrame::cast(vf);
+ // Revoke monitors' biases in all scopes
+ while (!cvf->is_top()) {
+ collect_monitors(cvf, objects_to_revoke);
+ cvf = compiledVFrame::cast(cvf->sender());
+ }
+ collect_monitors(cvf, objects_to_revoke);
+
+ if (SafepointSynchronize::is_at_safepoint()) {
+ BiasedLocking::revoke_at_safepoint(objects_to_revoke);
+ } else {
+ BiasedLocking::revoke(objects_to_revoke);
+ }
+}
+
+
+void Deoptimization::revoke_biases_of_monitors(CodeBlob* cb) {
+ if (!UseBiasedLocking) {
+ return;
+ }
+
+ assert(SafepointSynchronize::is_at_safepoint(), "must only be called from safepoint");
+ GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
+ for (JavaThread* jt = Threads::first(); jt != NULL ; jt = jt->next()) {
+ if (jt->has_last_Java_frame()) {
+ StackFrameStream sfs(jt, true);
+ while (!sfs.is_done()) {
+ frame* cur = sfs.current();
+ if (cb->contains(cur->pc())) {
+ vframe* vf = vframe::new_vframe(cur, sfs.register_map(), jt);
+ compiledVFrame* cvf = compiledVFrame::cast(vf);
+ // Revoke monitors' biases in all scopes
+ while (!cvf->is_top()) {
+ collect_monitors(cvf, objects_to_revoke);
+ cvf = compiledVFrame::cast(cvf->sender());
+ }
+ collect_monitors(cvf, objects_to_revoke);
+ }
+ sfs.next();
+ }
+ }
+ }
+ BiasedLocking::revoke_at_safepoint(objects_to_revoke);
+}
+
+
+void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr) {
+ assert(fr.can_be_deoptimized(), "checking frame type");
+
+ gather_statistics(Reason_constraint, Action_none, Bytecodes::_illegal);
+
+ EventMark m("Deoptimization (pc=" INTPTR_FORMAT ", sp=" INTPTR_FORMAT ")", fr.pc(), fr.id());
+
+ // Patch the nmethod so that when execution returns to it we will
+ // deopt the execution state and return to the interpreter.
+ fr.deoptimize(thread);
+}
+
+void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
+ // Deoptimize only if the frame comes from compile code.
+ // Do not deoptimize the frame which is already patched
+ // during the execution of the loops below.
+ if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
+ return;
+ }
+ ResourceMark rm;
+ DeoptimizationMarker dm;
+ if (UseBiasedLocking) {
+ revoke_biases_of_monitors(thread, fr, map);
+ }
+ deoptimize_single_frame(thread, fr);
+
+}
+
+
+void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) {
+ // Compute frame and register map based on thread and sp.
+ RegisterMap reg_map(thread, UseBiasedLocking);
+ frame fr = thread->last_frame();
+ while (fr.id() != id) {
+ fr = fr.sender(&reg_map);
+ }
+ deoptimize(thread, fr, &reg_map);
+}
+
+
+// JVMTI PopFrame support
+JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address))
+{
+ thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address);
+}
+JRT_END
+
+
+#ifdef COMPILER2
+void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) {
+ // in case of an unresolved klass entry, load the class.
+ if (constant_pool->tag_at(index).is_unresolved_klass()) {
+ klassOop tk = constant_pool->klass_at(index, CHECK);
+ return;
+ }
+
+ if (!constant_pool->tag_at(index).is_symbol()) return;
+
+ Handle class_loader (THREAD, instanceKlass::cast(constant_pool->pool_holder())->class_loader());
+ symbolHandle symbol (THREAD, constant_pool->symbol_at(index));
+
+ // class name?
+ if (symbol->byte_at(0) != '(') {
+ Handle protection_domain (THREAD, Klass::cast(constant_pool->pool_holder())->protection_domain());
+ SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK);
+ return;
+ }
+
+ // then it must be a signature!
+ for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) {
+ if (ss.is_object()) {
+ symbolOop s = ss.as_symbol(CHECK);
+ symbolHandle class_name (THREAD, s);
+ Handle protection_domain (THREAD, Klass::cast(constant_pool->pool_holder())->protection_domain());
+ SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK);
+ }
+ }
+}
+
+
+void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index) {
+ EXCEPTION_MARK;
+ load_class_by_index(constant_pool, index, THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ // Exception happened during classloading. We ignore the exception here, since it
+ // is going to be rethrown since the current activation is going to be deoptimzied and
+ // the interpreter will re-execute the bytecode.
+ CLEAR_PENDING_EXCEPTION;
+ }
+}
+
+JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) {
+ HandleMark hm;
+
+ // uncommon_trap() is called at the beginning of the uncommon trap
+ // handler. Note this fact before we start generating temporary frames
+ // that can confuse an asynchronous stack walker. This counter is
+ // decremented at the end of unpack_frames().
+ thread->inc_in_deopt_handler();
+
+ // We need to update the map if we have biased locking.
+ RegisterMap reg_map(thread, UseBiasedLocking);
+ frame stub_frame = thread->last_frame();
+ frame fr = stub_frame.sender(&reg_map);
+ // Make sure the calling nmethod is not getting deoptimized and removed
+ // before we are done with it.
+ nmethodLocker nl(fr.pc());
+
+ {
+ ResourceMark rm;
+
+ // Revoke biases of any monitors in the frame to ensure we can migrate them
+ revoke_biases_of_monitors(thread, fr, &reg_map);
+
+ DeoptReason reason = trap_request_reason(trap_request);
+ DeoptAction action = trap_request_action(trap_request);
+ jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1
+
+ Events::log("Uncommon trap occurred @" INTPTR_FORMAT " unloaded_class_index = %d", fr.pc(), (int) trap_request);
+ vframe* vf = vframe::new_vframe(&fr, &reg_map, thread);
+ compiledVFrame* cvf = compiledVFrame::cast(vf);
+
+ nmethod* nm = cvf->code();
+
+ ScopeDesc* trap_scope = cvf->scope();
+ methodHandle trap_method = trap_scope->method();
+ int trap_bci = trap_scope->bci();
+ Bytecodes::Code trap_bc = Bytecode_at(trap_method->bcp_from(trap_bci))->java_code();
+
+ // Record this event in the histogram.
+ gather_statistics(reason, action, trap_bc);
+
+ // Ensure that we can record deopt. history:
+ bool create_if_missing = ProfileTraps;
+
+ methodDataHandle trap_mdo
+ (THREAD, get_method_data(thread, trap_method, create_if_missing));
+
+ // Print a bunch of diagnostics, if requested.
+ if (TraceDeoptimization || LogCompilation) {
+ ResourceMark rm;
+ ttyLocker ttyl;
+ char buf[100];
+ if (xtty != NULL) {
+ xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT"' %s",
+ os::current_thread_id(),
+ format_trap_request(buf, sizeof(buf), trap_request));
+ nm->log_identity(xtty);
+ }
+ symbolHandle class_name;
+ bool unresolved = false;
+ if (unloaded_class_index >= 0) {
+ constantPoolHandle constants (THREAD, trap_method->constants());
+ if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
+ class_name = symbolHandle(THREAD,
+ constants->klass_name_at(unloaded_class_index));
+ unresolved = true;
+ if (xtty != NULL)
+ xtty->print(" unresolved='1'");
+ } else if (constants->tag_at(unloaded_class_index).is_symbol()) {
+ class_name = symbolHandle(THREAD,
+ constants->symbol_at(unloaded_class_index));
+ }
+ if (xtty != NULL)
+ xtty->name(class_name);
+ }
+ if (xtty != NULL && trap_mdo.not_null()) {
+ // Dump the relevant MDO state.
+ // This is the deopt count for the current reason, any previous
+ // reasons or recompiles seen at this point.
+ int dcnt = trap_mdo->trap_count(reason);
+ if (dcnt != 0)
+ xtty->print(" count='%d'", dcnt);
+ ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
+ int dos = (pdata == NULL)? 0: pdata->trap_state();
+ if (dos != 0) {
+ xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
+ if (trap_state_is_recompiled(dos)) {
+ int recnt2 = trap_mdo->overflow_recompile_count();
+ if (recnt2 != 0)
+ xtty->print(" recompiles2='%d'", recnt2);
+ }
+ }
+ }
+ if (xtty != NULL) {
+ xtty->stamp();
+ xtty->end_head();
+ }
+ if (TraceDeoptimization) { // make noise on the tty
+ tty->print("Uncommon trap occurred in");
+ nm->method()->print_short_name(tty);
+ tty->print(" (@" INTPTR_FORMAT ") thread=%d reason=%s action=%s unloaded_class_index=%d",
+ fr.pc(),
+ (int) os::current_thread_id(),
+ trap_reason_name(reason),
+ trap_action_name(action),
+ unloaded_class_index);
+ if (class_name.not_null()) {
+ tty->print(unresolved ? " unresolved class: " : " symbol: ");
+ class_name->print_symbol_on(tty);
+ }
+ tty->cr();
+ }
+ if (xtty != NULL) {
+ // Log the precise location of the trap.
+ for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
+ xtty->begin_elem("jvms bci='%d'", sd->bci());
+ xtty->method(sd->method());
+ xtty->end_elem();
+ if (sd->is_top()) break;
+ }
+ xtty->tail("uncommon_trap");
+ }
+ }
+ // (End diagnostic printout.)
+
+ // Load class if necessary
+ if (unloaded_class_index >= 0) {
+ constantPoolHandle constants(THREAD, trap_method->constants());
+ load_class_by_index(constants, unloaded_class_index);
+ }
+
+ // Flush the nmethod if necessary and desirable.
+ //
+ // We need to avoid situations where we are re-flushing the nmethod
+ // because of a hot deoptimization site. Repeated flushes at the same
+ // point need to be detected by the compiler and avoided. If the compiler
+ // cannot avoid them (or has a bug and "refuses" to avoid them), this
+ // module must take measures to avoid an infinite cycle of recompilation
+ // and deoptimization. There are several such measures:
+ //
+ // 1. If a recompilation is ordered a second time at some site X
+ // and for the same reason R, the action is adjusted to 'reinterpret',
+ // to give the interpreter time to exercise the method more thoroughly.
+ // If this happens, the method's overflow_recompile_count is incremented.
+ //
+ // 2. If the compiler fails to reduce the deoptimization rate, then
+ // the method's overflow_recompile_count will begin to exceed the set
+ // limit PerBytecodeRecompilationCutoff. If this happens, the action
+ // is adjusted to 'make_not_compilable', and the method is abandoned
+ // to the interpreter. This is a performance hit for hot methods,
+ // but is better than a disastrous infinite cycle of recompilations.
+ // (Actually, only the method containing the site X is abandoned.)
+ //
+ // 3. In parallel with the previous measures, if the total number of
+ // recompilations of a method exceeds the much larger set limit
+ // PerMethodRecompilationCutoff, the method is abandoned.
+ // This should only happen if the method is very large and has
+ // many "lukewarm" deoptimizations. The code which enforces this
+ // limit is elsewhere (class nmethod, class methodOopDesc).
+ //
+ // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance
+ // to recompile at each bytecode independently of the per-BCI cutoff.
+ //
+ // The decision to update code is up to the compiler, and is encoded
+ // in the Action_xxx code. If the compiler requests Action_none
+ // no trap state is changed, no compiled code is changed, and the
+ // computation suffers along in the interpreter.
+ //
+ // The other action codes specify various tactics for decompilation
+ // and recompilation. Action_maybe_recompile is the loosest, and
+ // allows the compiled code to stay around until enough traps are seen,
+ // and until the compiler gets around to recompiling the trapping method.
+ //
+ // The other actions cause immediate removal of the present code.
+
+ bool update_trap_state = true;
+ bool make_not_entrant = false;
+ bool make_not_compilable = false;
+ bool reset_counters = false;
+ switch (action) {
+ case Action_none:
+ // Keep the old code.
+ update_trap_state = false;
+ break;
+ case Action_maybe_recompile:
+ // Do not need to invalidate the present code, but we can
+ // initiate another
+ // Start compiler without (necessarily) invalidating the nmethod.
+ // The system will tolerate the old code, but new code should be
+ // generated when possible.
+ break;
+ case Action_reinterpret:
+ // Go back into the interpreter for a while, and then consider
+ // recompiling form scratch.
+ make_not_entrant = true;
+ // Reset invocation counter for outer most method.
+ // This will allow the interpreter to exercise the bytecodes
+ // for a while before recompiling.
+ // By contrast, Action_make_not_entrant is immediate.
+ //
+ // Note that the compiler will track null_check, null_assert,
+ // range_check, and class_check events and log them as if they
+ // had been traps taken from compiled code. This will update
+ // the MDO trap history so that the next compilation will
+ // properly detect hot trap sites.
+ reset_counters = true;
+ break;
+ case Action_make_not_entrant:
+ // Request immediate recompilation, and get rid of the old code.
+ // Make them not entrant, so next time they are called they get
+ // recompiled. Unloaded classes are loaded now so recompile before next
+ // time they are called. Same for uninitialized. The interpreter will
+ // link the missing class, if any.
+ make_not_entrant = true;
+ break;
+ case Action_make_not_compilable:
+ // Give up on compiling this method at all.
+ make_not_entrant = true;
+ make_not_compilable = true;
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+
+ // Setting +ProfileTraps fixes the following, on all platforms:
+ // 4852688: ProfileInterpreter is off by default for ia64. The result is
+ // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the
+ // recompile relies on a methodDataOop to record heroic opt failures.
+
+ // Whether the interpreter is producing MDO data or not, we also need
+ // to use the MDO to detect hot deoptimization points and control
+ // aggressive optimization.
+ if (ProfileTraps && update_trap_state && trap_mdo.not_null()) {
+ assert(trap_mdo() == get_method_data(thread, trap_method, false), "sanity");
+ uint this_trap_count = 0;
+ bool maybe_prior_trap = false;
+ bool maybe_prior_recompile = false;
+ ProfileData* pdata
+ = query_update_method_data(trap_mdo, trap_bci, reason,
+ //outputs:
+ this_trap_count,
+ maybe_prior_trap,
+ maybe_prior_recompile);
+ // Because the interpreter also counts null, div0, range, and class
+ // checks, these traps from compiled code are double-counted.
+ // This is harmless; it just means that the PerXTrapLimit values
+ // are in effect a little smaller than they look.
+
+ DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
+ if (per_bc_reason != Reason_none) {
+ // Now take action based on the partially known per-BCI history.
+ if (maybe_prior_trap
+ && this_trap_count >= (uint)PerBytecodeTrapLimit) {
+ // If there are too many traps at this BCI, force a recompile.
+ // This will allow the compiler to see the limit overflow, and
+ // take corrective action, if possible. The compiler generally
+ // does not use the exact PerBytecodeTrapLimit value, but instead
+ // changes its tactics if it sees any traps at all. This provides
+ // a little hysteresis, delaying a recompile until a trap happens
+ // several times.
+ //
+ // Actually, since there is only one bit of counter per BCI,
+ // the possible per-BCI counts are {0,1,(per-method count)}.
+ // This produces accurate results if in fact there is only
+ // one hot trap site, but begins to get fuzzy if there are
+ // many sites. For example, if there are ten sites each
+ // trapping two or more times, they each get the blame for
+ // all of their traps.
+ make_not_entrant = true;
+ }
+
+ // Detect repeated recompilation at the same BCI, and enforce a limit.
+ if (make_not_entrant && maybe_prior_recompile) {
+ // More than one recompile at this point.
+ trap_mdo->inc_overflow_recompile_count();
+ if (maybe_prior_trap
+ && ((uint)trap_mdo->overflow_recompile_count()
+ > (uint)PerBytecodeRecompilationCutoff)) {
+ // Give up on the method containing the bad BCI.
+ if (trap_method() == nm->method()) {
+ make_not_compilable = true;
+ } else {
+ trap_method->set_not_compilable();
+ // But give grace to the enclosing nm->method().
+ }
+ }
+ }
+ } else {
+ // For reasons which are not recorded per-bytecode, we simply
+ // force recompiles unconditionally.
+ // (Note that PerMethodRecompilationCutoff is enforced elsewhere.)
+ make_not_entrant = true;
+ }
+
+ // Go back to the compiler if there are too many traps in this method.
+ if (this_trap_count >= (uint)PerMethodTrapLimit) {
+ // If there are too many traps in this method, force a recompile.
+ // This will allow the compiler to see the limit overflow, and
+ // take corrective action, if possible.
+ // (This condition is an unlikely backstop only, because the
+ // PerBytecodeTrapLimit is more likely to take effect first,
+ // if it is applicable.)
+ make_not_entrant = true;
+ }
+
+ // Here's more hysteresis: If there has been a recompile at
+ // this trap point already, run the method in the interpreter
+ // for a while to exercise it more thoroughly.
+ if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
+ reset_counters = true;
+ }
+
+ if (make_not_entrant && pdata != NULL) {
+ // Record the recompilation event, if any.
+ int tstate0 = pdata->trap_state();
+ int tstate1 = trap_state_set_recompiled(tstate0, true);
+ if (tstate1 != tstate0)
+ pdata->set_trap_state(tstate1);
+ }
+ }
+
+ // Take requested actions on the method:
+
+ // Reset invocation counters
+ if (reset_counters) {
+ if (nm->is_osr_method())
+ reset_invocation_counter(trap_scope, CompileThreshold);
+ else
+ reset_invocation_counter(trap_scope);
+ }
+
+ // Recompile
+ if (make_not_entrant) {
+ nm->make_not_entrant();
+ }
+
+ // Give up compiling
+ if (make_not_compilable) {
+ assert(make_not_entrant, "consistent");
+ nm->method()->set_not_compilable();
+ }
+
+ } // Free marked resources
+
+}
+JRT_END
+
+methodDataOop
+Deoptimization::get_method_data(JavaThread* thread, methodHandle m,
+ bool create_if_missing) {
+ Thread* THREAD = thread;
+ methodDataOop mdo = m()->method_data();
+ if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) {
+ // Build an MDO. Ignore errors like OutOfMemory;
+ // that simply means we won't have an MDO to update.
+ methodOopDesc::build_interpreter_method_data(m, THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
+ CLEAR_PENDING_EXCEPTION;
+ }
+ mdo = m()->method_data();
+ }
+ return mdo;
+}
+
+ProfileData*
+Deoptimization::query_update_method_data(methodDataHandle trap_mdo,
+ int trap_bci,
+ Deoptimization::DeoptReason reason,
+ //outputs:
+ uint& ret_this_trap_count,
+ bool& ret_maybe_prior_trap,
+ bool& ret_maybe_prior_recompile) {
+ uint prior_trap_count = trap_mdo->trap_count(reason);
+ uint this_trap_count = trap_mdo->inc_trap_count(reason);
+
+ // If the runtime cannot find a place to store trap history,
+ // it is estimated based on the general condition of the method.
+ // If the method has ever been recompiled, or has ever incurred
+ // a trap with the present reason , then this BCI is assumed
+ // (pessimistically) to be the culprit.
+ bool maybe_prior_trap = (prior_trap_count != 0);
+ bool maybe_prior_recompile = (trap_mdo->decompile_count() != 0);
+ ProfileData* pdata = NULL;
+
+
+ // For reasons which are recorded per bytecode, we check per-BCI data.
+ DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
+ if (per_bc_reason != Reason_none) {
+ // Find the profile data for this BCI. If there isn't one,
+ // try to allocate one from the MDO's set of spares.
+ // This will let us detect a repeated trap at this point.
+ pdata = trap_mdo->allocate_bci_to_data(trap_bci);
+
+ if (pdata != NULL) {
+ // Query the trap state of this profile datum.
+ int tstate0 = pdata->trap_state();
+ if (!trap_state_has_reason(tstate0, per_bc_reason))
+ maybe_prior_trap = false;
+ if (!trap_state_is_recompiled(tstate0))
+ maybe_prior_recompile = false;
+
+ // Update the trap state of this profile datum.
+ int tstate1 = tstate0;
+ // Record the reason.
+ tstate1 = trap_state_add_reason(tstate1, per_bc_reason);
+ // Store the updated state on the MDO, for next time.
+ if (tstate1 != tstate0)
+ pdata->set_trap_state(tstate1);
+ } else {
+ if (LogCompilation && xtty != NULL)
+ // Missing MDP? Leave a small complaint in the log.
+ xtty->elem("missing_mdp bci='%d'", trap_bci);
+ }
+ }
+
+ // Return results:
+ ret_this_trap_count = this_trap_count;
+ ret_maybe_prior_trap = maybe_prior_trap;
+ ret_maybe_prior_recompile = maybe_prior_recompile;
+ return pdata;
+}
+
+void
+Deoptimization::update_method_data_from_interpreter(methodDataHandle trap_mdo, int trap_bci, int reason) {
+ ResourceMark rm;
+ // Ignored outputs:
+ uint ignore_this_trap_count;
+ bool ignore_maybe_prior_trap;
+ bool ignore_maybe_prior_recompile;
+ query_update_method_data(trap_mdo, trap_bci,
+ (DeoptReason)reason,
+ ignore_this_trap_count,
+ ignore_maybe_prior_trap,
+ ignore_maybe_prior_recompile);
+}
+
+void Deoptimization::reset_invocation_counter(ScopeDesc* trap_scope, jint top_count) {
+ ScopeDesc* sd = trap_scope;
+ for (; !sd->is_top(); sd = sd->sender()) {
+ // Reset ICs of inlined methods, since they can trigger compilations also.
+ sd->method()->invocation_counter()->reset();
+ }
+ InvocationCounter* c = sd->method()->invocation_counter();
+ if (top_count != _no_count) {
+ // It was an OSR method, so bump the count higher.
+ c->set(c->state(), top_count);
+ } else {
+ c->reset();
+ }
+ sd->method()->backedge_counter()->reset();
+}
+
+Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request) {
+
+ // Still in Java no safepoints
+ {
+ // This enters VM and may safepoint
+ uncommon_trap_inner(thread, trap_request);
+ }
+ return fetch_unroll_info_helper(thread);
+}
+
+// Local derived constants.
+// Further breakdown of DataLayout::trap_state, as promised by DataLayout.
+const int DS_REASON_MASK = DataLayout::trap_mask >> 1;
+const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
+
+//---------------------------trap_state_reason---------------------------------
+Deoptimization::DeoptReason
+Deoptimization::trap_state_reason(int trap_state) {
+ // This assert provides the link between the width of DataLayout::trap_bits
+ // and the encoding of "recorded" reasons. It ensures there are enough
+ // bits to store all needed reasons in the per-BCI MDO profile.
+ assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
+ int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
+ trap_state -= recompile_bit;
+ if (trap_state == DS_REASON_MASK) {
+ return Reason_many;
+ } else {
+ assert((int)Reason_none == 0, "state=0 => Reason_none");
+ return (DeoptReason)trap_state;
+ }
+}
+//-------------------------trap_state_has_reason-------------------------------
+int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
+ assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason");
+ assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
+ int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
+ trap_state -= recompile_bit;
+ if (trap_state == DS_REASON_MASK) {
+ return -1; // true, unspecifically (bottom of state lattice)
+ } else if (trap_state == reason) {
+ return 1; // true, definitely
+ } else if (trap_state == 0) {
+ return 0; // false, definitely (top of state lattice)
+ } else {
+ return 0; // false, definitely
+ }
+}
+//-------------------------trap_state_add_reason-------------------------------
+int Deoptimization::trap_state_add_reason(int trap_state, int reason) {
+ assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason");
+ int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
+ trap_state -= recompile_bit;
+ if (trap_state == DS_REASON_MASK) {
+ return trap_state + recompile_bit; // already at state lattice bottom
+ } else if (trap_state == reason) {
+ return trap_state + recompile_bit; // the condition is already true
+ } else if (trap_state == 0) {
+ return reason + recompile_bit; // no condition has yet been true
+ } else {
+ return DS_REASON_MASK + recompile_bit; // fall to state lattice bottom
+ }
+}
+//-----------------------trap_state_is_recompiled------------------------------
+bool Deoptimization::trap_state_is_recompiled(int trap_state) {
+ return (trap_state & DS_RECOMPILE_BIT) != 0;
+}
+//-----------------------trap_state_set_recompiled-----------------------------
+int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) {
+ if (z) return trap_state | DS_RECOMPILE_BIT;
+ else return trap_state & ~DS_RECOMPILE_BIT;
+}
+//---------------------------format_trap_state---------------------------------
+// This is used for debugging and diagnostics, including hotspot.log output.
+const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
+ int trap_state) {
+ DeoptReason reason = trap_state_reason(trap_state);
+ bool recomp_flag = trap_state_is_recompiled(trap_state);
+ // Re-encode the state from its decoded components.
+ int decoded_state = 0;
+ if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many)
+ decoded_state = trap_state_add_reason(decoded_state, reason);
+ if (recomp_flag)
+ decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag);
+ // If the state re-encodes properly, format it symbolically.
+ // Because this routine is used for debugging and diagnostics,
+ // be robust even if the state is a strange value.
+ size_t len;
+ if (decoded_state != trap_state) {
+ // Random buggy state that doesn't decode??
+ len = jio_snprintf(buf, buflen, "#%d", trap_state);
+ } else {
+ len = jio_snprintf(buf, buflen, "%s%s",
+ trap_reason_name(reason),
+ recomp_flag ? " recompiled" : "");
+ }
+ if (len >= buflen)
+ buf[buflen-1] = '\0';
+ return buf;
+}
+
+
+//--------------------------------statics--------------------------------------
+Deoptimization::DeoptAction Deoptimization::_unloaded_action
+ = Deoptimization::Action_reinterpret;
+const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = {
+ // Note: Keep this in sync. with enum DeoptReason.
+ "none",
+ "null_check",
+ "null_assert",
+ "range_check",
+ "class_check",
+ "array_check",
+ "intrinsic",
+ "unloaded",
+ "uninitialized",
+ "unreached",
+ "unhandled",
+ "constraint",
+ "div0_check",
+ "age"
+};
+const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
+ // Note: Keep this in sync. with enum DeoptAction.
+ "none",
+ "maybe_recompile",
+ "reinterpret",
+ "make_not_entrant",
+ "make_not_compilable"
+};
+
+const char* Deoptimization::trap_reason_name(int reason) {
+ if (reason == Reason_many) return "many";
+ if ((uint)reason < Reason_LIMIT)
+ return _trap_reason_name[reason];
+ static char buf[20];
+ sprintf(buf, "reason%d", reason);
+ return buf;
+}
+const char* Deoptimization::trap_action_name(int action) {
+ if ((uint)action < Action_LIMIT)
+ return _trap_action_name[action];
+ static char buf[20];
+ sprintf(buf, "action%d", action);
+ return buf;
+}
+
+// This is used for debugging and diagnostics, including hotspot.log output.
+const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
+ int trap_request) {
+ jint unloaded_class_index = trap_request_index(trap_request);
+ const char* reason = trap_reason_name(trap_request_reason(trap_request));
+ const char* action = trap_action_name(trap_request_action(trap_request));
+ size_t len;
+ if (unloaded_class_index < 0) {
+ len = jio_snprintf(buf, buflen, "reason='%s' action='%s'",
+ reason, action);
+ } else {
+ len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'",
+ reason, action, unloaded_class_index);
+ }
+ if (len >= buflen)
+ buf[buflen-1] = '\0';
+ return buf;
+}
+
+juint Deoptimization::_deoptimization_hist
+ [Deoptimization::Reason_LIMIT]
+ [1 + Deoptimization::Action_LIMIT]
+ [Deoptimization::BC_CASE_LIMIT]
+ = {0};
+
+enum {
+ LSB_BITS = 8,
+ LSB_MASK = right_n_bits(LSB_BITS)
+};
+
+void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
+ Bytecodes::Code bc) {
+ assert(reason >= 0 && reason < Reason_LIMIT, "oob");
+ assert(action >= 0 && action < Action_LIMIT, "oob");
+ _deoptimization_hist[Reason_none][0][0] += 1; // total
+ _deoptimization_hist[reason][0][0] += 1; // per-reason total
+ juint* cases = _deoptimization_hist[reason][1+action];
+ juint* bc_counter_addr = NULL;
+ juint bc_counter = 0;
+ // Look for an unused counter, or an exact match to this BC.
+ if (bc != Bytecodes::_illegal) {
+ for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
+ juint* counter_addr = &cases[bc_case];
+ juint counter = *counter_addr;
+ if ((counter == 0 && bc_counter_addr == NULL)
+ || (Bytecodes::Code)(counter & LSB_MASK) == bc) {
+ // this counter is either free or is already devoted to this BC
+ bc_counter_addr = counter_addr;
+ bc_counter = counter | bc;
+ }
+ }
+ }
+ if (bc_counter_addr == NULL) {
+ // Overflow, or no given bytecode.
+ bc_counter_addr = &cases[BC_CASE_LIMIT-1];
+ bc_counter = (*bc_counter_addr & ~LSB_MASK); // clear LSB
+ }
+ *bc_counter_addr = bc_counter + (1 << LSB_BITS);
+}
+
+jint Deoptimization::total_deoptimization_count() {
+ return _deoptimization_hist[Reason_none][0][0];
+}
+
+jint Deoptimization::deoptimization_count(DeoptReason reason) {
+ assert(reason >= 0 && reason < Reason_LIMIT, "oob");
+ return _deoptimization_hist[reason][0][0];
+}
+
+void Deoptimization::print_statistics() {
+ juint total = total_deoptimization_count();
+ juint account = total;
+ if (total != 0) {
+ ttyLocker ttyl;
+ if (xtty != NULL) xtty->head("statistics type='deoptimization'");
+ tty->print_cr("Deoptimization traps recorded:");
+ #define PRINT_STAT_LINE(name, r) \
+ tty->print_cr(" %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
+ PRINT_STAT_LINE("total", total);
+ // For each non-zero entry in the histogram, print the reason,
+ // the action, and (if specifically known) the type of bytecode.
+ for (int reason = 0; reason < Reason_LIMIT; reason++) {
+ for (int action = 0; action < Action_LIMIT; action++) {
+ juint* cases = _deoptimization_hist[reason][1+action];
+ for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
+ juint counter = cases[bc_case];
+ if (counter != 0) {
+ char name[1*K];
+ Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK);
+ if (bc_case == BC_CASE_LIMIT && (int)bc == 0)
+ bc = Bytecodes::_illegal;
+ sprintf(name, "%s/%s/%s",
+ trap_reason_name(reason),
+ trap_action_name(action),
+ Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
+ juint r = counter >> LSB_BITS;
+ tty->print_cr(" %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
+ account -= r;
+ }
+ }
+ }
+ }
+ if (account != 0) {
+ PRINT_STAT_LINE("unaccounted", account);
+ }
+ #undef PRINT_STAT_LINE
+ if (xtty != NULL) xtty->tail("statistics");
+ }
+}
+#else // COMPILER2
+
+
+// Stubs for C1 only system.
+bool Deoptimization::trap_state_is_recompiled(int trap_state) {
+ return false;
+}
+
+const char* Deoptimization::trap_reason_name(int reason) {
+ return "unknown";
+}
+
+void Deoptimization::print_statistics() {
+ // no output
+}
+
+void
+Deoptimization::update_method_data_from_interpreter(methodDataHandle trap_mdo, int trap_bci, int reason) {
+ // no udpate
+}
+
+int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
+ return 0;
+}
+
+void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
+ Bytecodes::Code bc) {
+ // no update
+}
+
+const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
+ int trap_state) {
+ jio_snprintf(buf, buflen, "#%d", trap_state);
+ return buf;
+}
+
+#endif // COMPILER2
diff --git a/src/share/vm/runtime/deoptimization.hpp b/src/share/vm/runtime/deoptimization.hpp
new file mode 100644
index 000000000..63565704a
--- /dev/null
+++ b/src/share/vm/runtime/deoptimization.hpp
@@ -0,0 +1,348 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class ProfileData;
+class vframeArray;
+class MonitorValue;
+class ObjectValue;
+
+class Deoptimization : AllStatic {
+ public:
+ // What condition caused the deoptimization?
+ enum DeoptReason {
+ Reason_many = -1, // indicates presence of several reasons
+ Reason_none = 0, // indicates absence of a relevant deopt.
+ Reason_null_check, // saw unexpected null or zero divisor (@bci)
+ Reason_null_assert, // saw unexpected non-null or non-zero (@bci)
+ Reason_range_check, // saw unexpected array index (@bci)
+ Reason_class_check, // saw unexpected object class (@bci)
+ Reason_array_check, // saw unexpected array class (aastore @bci)
+ Reason_intrinsic, // saw unexpected operand to intrinsic (@bci)
+ Reason_unloaded, // unloaded class or constant pool entry
+ Reason_uninitialized, // bad class state (uninitialized)
+ Reason_unreached, // code is not reached, compiler
+ Reason_unhandled, // arbitrary compiler limitation
+ Reason_constraint, // arbitrary runtime constraint violated
+ Reason_div0_check, // a null_check due to division by zero
+ Reason_age, // nmethod too old; tier threshold reached
+ Reason_LIMIT,
+ // Note: Keep this enum in sync. with _trap_reason_name.
+ Reason_RECORDED_LIMIT = Reason_unloaded // some are not recorded per bc
+ // Note: Reason_RECORDED_LIMIT should be < 8 to fit into 3 bits of
+ // DataLayout::trap_bits. This dependency is enforced indirectly
+ // via asserts, to avoid excessive direct header-to-header dependencies.
+ // See Deoptimization::trap_state_reason and class DataLayout.
+ };
+
+ // What action must be taken by the runtime?
+ enum DeoptAction {
+ Action_none, // just interpret, do not invalidate nmethod
+ Action_maybe_recompile, // recompile the nmethod; need not invalidate
+ Action_reinterpret, // invalidate the nmethod, reset IC, maybe recompile
+ Action_make_not_entrant, // invalidate the nmethod, recompile (probably)
+ Action_make_not_compilable, // invalidate the nmethod and do not compile
+ Action_LIMIT
+ // Note: Keep this enum in sync. with _trap_action_name.
+ };
+
+ enum {
+ _action_bits = 3,
+ _reason_bits = 4,
+ _action_shift = 0,
+ _reason_shift = _action_shift+_action_bits,
+ BC_CASE_LIMIT = PRODUCT_ONLY(1) NOT_PRODUCT(4) // for _deoptimization_hist
+ };
+
+ enum UnpackType {
+ Unpack_deopt = 0, // normal deoptimization, use pc computed in unpack_vframe_on_stack
+ Unpack_exception = 1, // exception is pending
+ Unpack_uncommon_trap = 2, // redo last byte code (C2 only)
+ Unpack_reexecute = 3 // reexecute bytecode (C1 only)
+ };
+
+ // Checks all compiled methods. Invalid methods are deleted and
+ // corresponding activations are deoptimized.
+ static int deoptimize_dependents();
+
+ // Deoptimizes a frame lazily. nmethod gets patched deopt happens on return to the frame
+ static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map);
+
+ private:
+ // Does the actual work for deoptimizing a single frame
+ static void deoptimize_single_frame(JavaThread* thread, frame fr);
+
+ // Helper function to revoke biases of all monitors in frame if UseBiasedLocking
+ // is enabled
+ static void revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map);
+ // Helper function to revoke biases of all monitors in frames
+ // executing in a particular CodeBlob if UseBiasedLocking is enabled
+ static void revoke_biases_of_monitors(CodeBlob* cb);
+
+#ifdef COMPILER2
+ // Support for restoring non-escaping objects
+ static bool realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS);
+ static void reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type);
+ static void reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj);
+ static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects);
+ static void relock_objects(frame* fr, RegisterMap* reg_map, GrowableArray<MonitorValue*>* monitors);
+ NOT_PRODUCT(static void print_objects(GrowableArray<ScopeValue*>* objects);)
+#endif // COMPILER2
+
+ public:
+ static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk);
+
+ // Interface used for unpacking deoptimized frames
+
+ // UnrollBlock is returned by fetch_unroll_info() to the deoptimization handler (blob).
+ // This is only a CheapObj to ease debugging after a deopt failure
+ class UnrollBlock : public CHeapObj {
+ private:
+ int _size_of_deoptimized_frame; // Size, in bytes, of current deoptimized frame
+ int _caller_adjustment; // Adjustment, in bytes, to caller's SP by initial interpreted frame
+ int _number_of_frames; // Number frames to unroll
+ int _total_frame_sizes; // Total of number*sizes frames
+ intptr_t* _frame_sizes; // Array of frame sizes, in bytes, for unrolling the stack
+ address* _frame_pcs; // Array of frame pc's, in bytes, for unrolling the stack
+ intptr_t* _register_block; // Block for storing callee-saved registers.
+ BasicType _return_type; // Tells if we have to restore double or long return value
+ // The following fields are used as temps during the unpacking phase
+ // (which is tight on registers, especially on x86). They really ought
+ // to be PD variables but that involves moving this class into its own
+ // file to use the pd include mechanism. Maybe in a later cleanup ...
+ intptr_t _counter_temp; // SHOULD BE PD VARIABLE (x86 frame count temp)
+ intptr_t _initial_fp; // SHOULD BE PD VARIABLE (x86/c2 initial ebp)
+ intptr_t _unpack_kind; // SHOULD BE PD VARIABLE (x86 unpack kind)
+ intptr_t _sender_sp_temp; // SHOULD BE PD VARIABLE (x86 sender_sp)
+ public:
+ // Constructor
+ UnrollBlock(int size_of_deoptimized_frame,
+ int caller_adjustment,
+ int number_of_frames,
+ intptr_t* frame_sizes,
+ address* frames_pcs,
+ BasicType return_type);
+ ~UnrollBlock();
+
+ // Returns where a register is located.
+ intptr_t* value_addr_at(int register_number) const;
+
+ // Accessors
+ intptr_t* frame_sizes() const { return _frame_sizes; }
+ int number_of_frames() const { return _number_of_frames; }
+ address* frame_pcs() const { return _frame_pcs ; }
+
+ // Returns the total size of frames
+ int size_of_frames() const;
+
+ // Accessors used by the code generator for the unpack stub.
+ static int size_of_deoptimized_frame_offset_in_bytes() { return offset_of(UnrollBlock, _size_of_deoptimized_frame); }
+ static int caller_adjustment_offset_in_bytes() { return offset_of(UnrollBlock, _caller_adjustment); }
+ static int number_of_frames_offset_in_bytes() { return offset_of(UnrollBlock, _number_of_frames); }
+ static int frame_sizes_offset_in_bytes() { return offset_of(UnrollBlock, _frame_sizes); }
+ static int total_frame_sizes_offset_in_bytes() { return offset_of(UnrollBlock, _total_frame_sizes); }
+ static int frame_pcs_offset_in_bytes() { return offset_of(UnrollBlock, _frame_pcs); }
+ static int register_block_offset_in_bytes() { return offset_of(UnrollBlock, _register_block); }
+ static int return_type_offset_in_bytes() { return offset_of(UnrollBlock, _return_type); }
+ static int counter_temp_offset_in_bytes() { return offset_of(UnrollBlock, _counter_temp); }
+ static int initial_fp_offset_in_bytes() { return offset_of(UnrollBlock, _initial_fp); }
+ static int unpack_kind_offset_in_bytes() { return offset_of(UnrollBlock, _unpack_kind); }
+ static int sender_sp_temp_offset_in_bytes() { return offset_of(UnrollBlock, _sender_sp_temp); }
+
+ BasicType return_type() const { return _return_type; }
+ void print();
+ };
+
+ //** Returns an UnrollBlock continuing information
+ // how to make room for the resulting interpreter frames.
+ // Called by assembly stub after execution has returned to
+ // deoptimized frame.
+ // @argument thread. Thread where stub_frame resides.
+ // @see OptoRuntime::deoptimization_fetch_unroll_info_C
+ static UnrollBlock* fetch_unroll_info(JavaThread* thread);
+
+ //** Unpacks vframeArray onto execution stack
+ // Called by assembly stub after execution has returned to
+ // deoptimized frame and after the stack unrolling.
+ // @argument thread. Thread where stub_frame resides.
+ // @argument exec_mode. Determines how execution should be continuted in top frame.
+ // 0 means continue after current byte code
+ // 1 means exception has happened, handle exception
+ // 2 means reexecute current bytecode (for uncommon traps).
+ // @see OptoRuntime::deoptimization_unpack_frames_C
+ // Return BasicType of call return type, if any
+ static BasicType unpack_frames(JavaThread* thread, int exec_mode);
+
+ // Cleans up deoptimization bits on thread after unpacking or in the
+ // case of an exception.
+ static void cleanup_deopt_info(JavaThread *thread,
+ vframeArray * array);
+
+ // Restores callee saved values from deoptimized frame into oldest interpreter frame
+ // so caller of the deoptimized frame will get back the values it expects.
+ static void unwind_callee_save_values(frame* f, vframeArray* vframe_array);
+
+ //** Performs an uncommon trap for compiled code.
+ // The top most compiler frame is converted into interpreter frames
+ static UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
+ // Helper routine that enters the VM and may block
+ static void uncommon_trap_inner(JavaThread* thread, jint unloaded_class_index);
+
+ //** Deoptimizes the frame identified by id.
+ // Only called from VMDeoptimizeFrame
+ // @argument thread. Thread where stub_frame resides.
+ // @argument id. id of frame that should be deoptimized.
+ static void deoptimize_frame(JavaThread* thread, intptr_t* id);
+
+ // Statistics
+ static void gather_statistics(DeoptReason reason, DeoptAction action,
+ Bytecodes::Code bc = Bytecodes::_illegal);
+ static void print_statistics();
+
+ // How much room to adjust the last frame's SP by, to make space for
+ // the callee's interpreter frame (which expects locals to be next to
+ // incoming arguments)
+ static int last_frame_adjust(int callee_parameters, int callee_locals);
+
+ // trap_request codes
+ static DeoptReason trap_request_reason(int trap_request) {
+ if (trap_request < 0)
+ return (DeoptReason)
+ ((~(trap_request) >> _reason_shift) & right_n_bits(_reason_bits));
+ else
+ // standard reason for unloaded CP entry
+ return Reason_unloaded;
+ }
+ static DeoptAction trap_request_action(int trap_request) {
+ if (trap_request < 0)
+ return (DeoptAction)
+ ((~(trap_request) >> _action_shift) & right_n_bits(_action_bits));
+ else
+ // standard action for unloaded CP entry
+ return _unloaded_action;
+ }
+ static int trap_request_index(int trap_request) {
+ if (trap_request < 0)
+ return -1;
+ else
+ return trap_request;
+ }
+ static int make_trap_request(DeoptReason reason, DeoptAction action,
+ int index = -1) {
+ assert((1 << _reason_bits) >= Reason_LIMIT, "enough bits");
+ assert((1 << _action_bits) >= Action_LIMIT, "enough bits");
+ int trap_request;
+ if (index != -1)
+ trap_request = index;
+ else
+ trap_request = (~(((reason) << _reason_shift)
+ + ((action) << _action_shift)));
+ assert(reason == trap_request_reason(trap_request), "valid reason");
+ assert(action == trap_request_action(trap_request), "valid action");
+ assert(index == trap_request_index(trap_request), "valid index");
+ return trap_request;
+ }
+
+ // The trap_state stored in a MDO is decoded here.
+ // It records two items of information.
+ // reason: If a deoptimization happened here, what its reason was,
+ // or if there were multiple deopts with differing reasons.
+ // recompiled: If a deoptimization here triggered a recompilation.
+ // Note that not all reasons are recorded per-bci.
+ static DeoptReason trap_state_reason(int trap_state);
+ static int trap_state_has_reason(int trap_state, int reason);
+ static int trap_state_add_reason(int trap_state, int reason);
+ static bool trap_state_is_recompiled(int trap_state);
+ static int trap_state_set_recompiled(int trap_state, bool z);
+ static const char* format_trap_state(char* buf, size_t buflen,
+ int trap_state);
+
+ static bool reason_is_recorded_per_bytecode(DeoptReason reason) {
+ return reason > Reason_none && reason < Reason_RECORDED_LIMIT;
+ }
+
+ static DeoptReason reason_recorded_per_bytecode_if_any(DeoptReason reason) {
+ if (reason_is_recorded_per_bytecode(reason))
+ return reason;
+ else if (reason == Reason_div0_check) // null check due to divide-by-zero?
+ return Reason_null_check; // recorded per BCI as a null check
+ else
+ return Reason_none;
+ }
+
+ static const char* trap_reason_name(int reason);
+ static const char* trap_action_name(int action);
+ // Format like reason='foo' action='bar' index='123'.
+ // This is suitable both for XML and for tty output.
+ static const char* format_trap_request(char* buf, size_t buflen,
+ int trap_request);
+
+ static jint total_deoptimization_count();
+ static jint deoptimization_count(DeoptReason reason);
+
+ // JVMTI PopFrame support
+
+ // Preserves incoming arguments to the popped frame when it is
+ // returning to a deoptimized caller
+ static void popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address);
+
+ private:
+ enum {
+ _no_count = -1
+ };
+
+ static void reset_invocation_counter(ScopeDesc* trap_scope, jint count = _no_count);
+
+ static methodDataOop get_method_data(JavaThread* thread, methodHandle m, bool create_if_missing);
+ // Update the mdo's count and per-BCI reason bits, returning previous state:
+ static ProfileData* query_update_method_data(methodDataHandle trap_mdo,
+ int trap_bci,
+ DeoptReason reason,
+ //outputs:
+ uint& ret_this_trap_count,
+ bool& ret_maybe_prior_trap,
+ bool& ret_maybe_prior_recompile);
+ // class loading support for uncommon trap
+ static void load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS);
+ static void load_class_by_index(constantPoolHandle constant_pool, int index);
+
+ static UnrollBlock* fetch_unroll_info_helper(JavaThread* thread);
+
+ static DeoptAction _unloaded_action; // == Action_reinterpret;
+ static const char* _trap_reason_name[Reason_LIMIT];
+ static const char* _trap_action_name[Action_LIMIT];
+
+ static juint _deoptimization_hist[Reason_LIMIT][1+Action_LIMIT][BC_CASE_LIMIT];
+ // Note: Histogram array size is 1-2 Kb.
+
+ public:
+ static void update_method_data_from_interpreter(methodDataHandle trap_mdo, int trap_bci, int reason);
+};
+
+class DeoptimizationMarker : StackObj { // for profiling
+ static bool _is_active;
+public:
+ DeoptimizationMarker() { _is_active = true; }
+ ~DeoptimizationMarker() { _is_active = false; }
+ static bool is_active() { return _is_active; }
+};
diff --git a/src/share/vm/runtime/extendedPC.hpp b/src/share/vm/runtime/extendedPC.hpp
new file mode 100644
index 000000000..a2680851a
--- /dev/null
+++ b/src/share/vm/runtime/extendedPC.hpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright 1998-2004 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// An ExtendedPC contains the _pc from a signal handler in a platform
+// independant way.
+
+class ExtendedPC VALUE_OBJ_CLASS_SPEC {
+ private:
+ address _pc;
+
+ public:
+ address pc() const { return _pc; }
+ ExtendedPC(address pc) { _pc = pc; }
+ ExtendedPC() { _pc = NULL; }
+};
diff --git a/src/share/vm/runtime/fieldDescriptor.cpp b/src/share/vm/runtime/fieldDescriptor.cpp
new file mode 100644
index 000000000..d750981a4
--- /dev/null
+++ b/src/share/vm/runtime/fieldDescriptor.cpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+#include "incls/_fieldDescriptor.cpp.incl"
+
+
+oop fieldDescriptor::loader() const {
+ return instanceKlass::cast(_cp->pool_holder())->class_loader();
+}
+
+typeArrayOop fieldDescriptor::annotations() const {
+ instanceKlass* ik = instanceKlass::cast(field_holder());
+ objArrayOop md = ik->fields_annotations();
+ if (md == NULL)
+ return NULL;
+ assert((index() % instanceKlass::next_offset) == 0, "");
+ return typeArrayOop(md->obj_at(index() / instanceKlass::next_offset));
+}
+
+constantTag fieldDescriptor::initial_value_tag() const {
+ return constants()->tag_at(_initial_value_index);
+}
+
+jint fieldDescriptor::int_initial_value() const {
+ return constants()->int_at(_initial_value_index);
+}
+
+jlong fieldDescriptor::long_initial_value() const {
+ return constants()->long_at(_initial_value_index);
+}
+
+jfloat fieldDescriptor::float_initial_value() const {
+ return constants()->float_at(_initial_value_index);
+}
+
+jdouble fieldDescriptor::double_initial_value() const {
+ return constants()->double_at(_initial_value_index);
+}
+
+oop fieldDescriptor::string_initial_value(TRAPS) const {
+ return constants()->string_at(_initial_value_index, CHECK_0);
+}
+
+void fieldDescriptor::initialize(klassOop k, int index) {
+ instanceKlass* ik = instanceKlass::cast(k);
+ _cp = ik->constants();
+ typeArrayOop fields = ik->fields();
+
+ assert(fields->length() % instanceKlass::next_offset == 0, "Illegal size of field array");
+ assert(fields->length() >= index + instanceKlass::next_offset, "Illegal size of field array");
+
+ _access_flags.set_field_flags(fields->ushort_at(index + instanceKlass::access_flags_offset));
+ _name_index = fields->ushort_at(index + instanceKlass::name_index_offset);
+ _signature_index = fields->ushort_at(index + instanceKlass::signature_index_offset);
+ _initial_value_index = fields->ushort_at(index + instanceKlass::initval_index_offset);
+ guarantee(_name_index != 0 && _signature_index != 0, "bad constant pool index for fieldDescriptor");
+ _offset = ik->offset_from_fields( index );
+ _generic_signature_index = fields->ushort_at(index + instanceKlass::generic_signature_offset);
+ _index = index;
+}
+
+#ifndef PRODUCT
+
+void fieldDescriptor::print_on(outputStream* st) const {
+ _access_flags.print_on(st);
+ constants()->symbol_at(_name_index)->print_value_on(st);
+ st->print(" ");
+ constants()->symbol_at(_signature_index)->print_value_on(st);
+ st->print(" @%d ", offset());
+ if (WizardMode && has_initial_value()) {
+ st->print("(initval ");
+ constantTag t = initial_value_tag();
+ if (t.is_int()) {
+ st->print("int %d)", int_initial_value());
+ } else if (t.is_long()){
+ st->print_jlong(long_initial_value());
+ } else if (t.is_float()){
+ st->print("float %f)", float_initial_value());
+ } else if (t.is_double()){
+ st->print("double %lf)", double_initial_value());
+ }
+ }
+}
+
+void fieldDescriptor::print_on_for(outputStream* st, oop obj) {
+ print_on(st);
+ BasicType ft = field_type();
+ jint as_int;
+ switch (ft) {
+ case T_BYTE:
+ as_int = (jint)obj->byte_field(offset());
+ st->print(" %d", obj->byte_field(offset()));
+ break;
+ case T_CHAR:
+ {
+ jchar c = obj->char_field(offset());
+ as_int = c;
+ st->print(" %c %d", isprint(c) ? c : ' ', c);
+ }
+ break;
+ case T_DOUBLE:
+ st->print(" %lf", obj->double_field(offset()));
+ break;
+ case T_FLOAT:
+ as_int = obj->int_field(offset());
+ st->print(" %f", obj->float_field(offset()));
+ break;
+ case T_INT:
+ st->print(" %d", obj->int_field(offset()));
+ break;
+ case T_LONG:
+ st->print(" ");
+ st->print_jlong(obj->long_field(offset()));
+ break;
+ case T_SHORT:
+ as_int = obj->short_field(offset());
+ st->print(" %d", obj->short_field(offset()));
+ break;
+ case T_BOOLEAN:
+ as_int = obj->bool_field(offset());
+ st->print(" %s", obj->bool_field(offset()) ? "true" : "false");
+ break;
+ case T_ARRAY:
+ st->print(" ");
+ as_int = obj->int_field(offset());
+ obj->obj_field(offset())->print_value_on(st);
+ break;
+ case T_OBJECT:
+ st->print(" ");
+ as_int = obj->int_field(offset());
+ obj->obj_field(offset())->print_value_on(st);
+ break;
+ default:
+ ShouldNotReachHere();
+ break;
+ }
+ // Print a hint as to the underlying integer representation. This can be wrong for
+ // pointers on an LP64 machine
+ if (ft == T_LONG || ft == T_DOUBLE) {
+ st->print(" (%x %x)", obj->int_field(offset()), obj->int_field(offset()+sizeof(jint)));
+ } else {
+ st->print(" (%x)", as_int);
+ }
+}
+
+#endif /* PRODUCT */
diff --git a/src/share/vm/runtime/fieldDescriptor.hpp b/src/share/vm/runtime/fieldDescriptor.hpp
new file mode 100644
index 000000000..00cd66299
--- /dev/null
+++ b/src/share/vm/runtime/fieldDescriptor.hpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// A fieldDescriptor describes the attributes of a single field (instance or class variable).
+// It needs the class constant pool to work (because it only holds indices into the pool
+// rather than the actual info).
+
+class fieldDescriptor VALUE_OBJ_CLASS_SPEC {
+ private:
+ AccessFlags _access_flags;
+ int _name_index;
+ int _signature_index;
+ int _initial_value_index;
+ int _offset;
+ int _generic_signature_index;
+ int _index; // index into fields() array
+ constantPoolHandle _cp;
+
+ public:
+ symbolOop name() const { return _cp->symbol_at(_name_index); }
+ symbolOop signature() const { return _cp->symbol_at(_signature_index); }
+ klassOop field_holder() const { return _cp->pool_holder(); }
+ constantPoolOop constants() const { return _cp(); }
+ AccessFlags access_flags() const { return _access_flags; }
+ oop loader() const;
+ // Offset (in words) of field from start of instanceOop / klassOop
+ int offset() const { return _offset; }
+ symbolOop generic_signature() const { return (_generic_signature_index > 0 ? _cp->symbol_at(_generic_signature_index) : (symbolOop)NULL); }
+ int index() const { return _index; }
+ typeArrayOop annotations() const;
+
+ // Initial field value
+ bool has_initial_value() const { return _initial_value_index != 0; }
+ constantTag initial_value_tag() const; // The tag will return true on one of is_int(), is_long(), is_single(), is_double()
+ jint int_initial_value() const;
+ jlong long_initial_value() const;
+ jfloat float_initial_value() const;
+ jdouble double_initial_value() const;
+ oop string_initial_value(TRAPS) const;
+
+ // Field signature type
+ BasicType field_type() const { return FieldType::basic_type(signature()); }
+
+ // Access flags
+ bool is_public() const { return _access_flags.is_public(); }
+ bool is_private() const { return _access_flags.is_private(); }
+ bool is_protected() const { return _access_flags.is_protected(); }
+ bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); }
+
+ bool is_static() const { return _access_flags.is_static(); }
+ bool is_final() const { return _access_flags.is_final(); }
+ bool is_volatile() const { return _access_flags.is_volatile(); }
+ bool is_transient() const { return _access_flags.is_transient(); }
+
+ bool is_synthetic() const { return _access_flags.is_synthetic(); }
+
+ bool is_field_access_watched() const { return _access_flags.is_field_access_watched(); }
+ bool is_field_modification_watched() const
+ { return _access_flags.is_field_modification_watched(); }
+ void set_is_field_access_watched(const bool value)
+ { _access_flags.set_is_field_access_watched(value); }
+ void set_is_field_modification_watched(const bool value)
+ { _access_flags.set_is_field_modification_watched(value); }
+
+ // Initialization
+ void initialize(klassOop k, int index);
+
+ // Print
+ void print_on(outputStream* st) const PRODUCT_RETURN;
+ void print_on_for(outputStream* st, oop obj) PRODUCT_RETURN;
+};
diff --git a/src/share/vm/runtime/fieldType.cpp b/src/share/vm/runtime/fieldType.cpp
new file mode 100644
index 000000000..ab5840b80
--- /dev/null
+++ b/src/share/vm/runtime/fieldType.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright 1997-2000 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_fieldType.cpp.incl"
+
+void FieldType::skip_optional_size(symbolOop signature, int* index) {
+ jchar c = signature->byte_at(*index);
+ while (c >= '0' && c <= '9') {
+ *index = *index + 1;
+ c = signature->byte_at(*index);
+ }
+}
+
+BasicType FieldType::basic_type(symbolOop signature) {
+ return char2type(signature->byte_at(0));
+}
+
+// Check if it is a valid array signature
+bool FieldType::is_valid_array_signature(symbolOop sig) {
+ assert(sig->utf8_length() > 1, "this should already have been checked");
+ assert(sig->byte_at(0) == '[', "this should already have been checked");
+ // The first character is already checked
+ int i = 1;
+ int len = sig->utf8_length();
+ // First skip all '['s
+ while(i < len - 1 && sig->byte_at(i) == '[') i++;
+
+ // Check type
+ switch(sig->byte_at(i)) {
+ case 'B': // T_BYTE
+ case 'C': // T_CHAR
+ case 'D': // T_DOUBLE
+ case 'F': // T_FLOAT
+ case 'I': // T_INT
+ case 'J': // T_LONG
+ case 'S': // T_SHORT
+ case 'Z': // T_BOOLEAN
+ // If it is an array, the type is the last character
+ return (i + 1 == len);
+ case 'L':
+ // If it is an object, the last character must be a ';'
+ return sig->byte_at(len - 1) == ';';
+ }
+
+ return false;
+}
+
+
+BasicType FieldType::get_array_info(symbolOop signature, jint* dimension, symbolOop* object_key, TRAPS) {
+ assert(basic_type(signature) == T_ARRAY, "must be array");
+ int index = 1;
+ int dim = 1;
+ skip_optional_size(signature, &index);
+ while (signature->byte_at(index) == '[') {
+ index++;
+ dim++;
+ skip_optional_size(signature, &index);
+ }
+ ResourceMark rm;
+ symbolOop element = oopFactory::new_symbol(signature->as_C_string() + index, CHECK_(T_BYTE));
+ BasicType element_type = FieldType::basic_type(element);
+ if (element_type == T_OBJECT) {
+ char* object_type = element->as_C_string();
+ object_type[element->utf8_length() - 1] = '\0';
+ *object_key = oopFactory::new_symbol(object_type + 1, CHECK_(T_BYTE));
+ }
+ // Pass dimension back to caller
+ *dimension = dim;
+ return element_type;
+}
diff --git a/src/share/vm/runtime/fieldType.hpp b/src/share/vm/runtime/fieldType.hpp
new file mode 100644
index 000000000..9f271667c
--- /dev/null
+++ b/src/share/vm/runtime/fieldType.hpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright 1997-2002 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Note: FieldType should be based on the SignatureIterator (or vice versa).
+// In any case, this structure should be re-thought at some point.
+
+// A FieldType is used to determine the type of a field from a signature string.
+
+class FieldType: public AllStatic {
+ private:
+ static void skip_optional_size(symbolOop signature, int* index);
+ static bool is_valid_array_signature(symbolOop signature);
+ public:
+
+ // Return basic type
+ static BasicType basic_type(symbolOop signature);
+
+ // Testing
+ static bool is_array(symbolOop signature) { return signature->utf8_length() > 1 && signature->byte_at(0) == '[' && is_valid_array_signature(signature); }
+
+ static bool is_obj(symbolOop signature) {
+ int sig_length = signature->utf8_length();
+ // Must start with 'L' and end with ';'
+ return (sig_length >= 2 &&
+ (signature->byte_at(0) == 'L') &&
+ (signature->byte_at(sig_length - 1) == ';'));
+ }
+
+ // Parse field and extract array information. Works for T_ARRAY only.
+ static BasicType get_array_info(symbolOop signature, jint* dimension, symbolOop *object_key, TRAPS);
+};
diff --git a/src/share/vm/runtime/fprofiler.cpp b/src/share/vm/runtime/fprofiler.cpp
new file mode 100644
index 000000000..89d9cbaa2
--- /dev/null
+++ b/src/share/vm/runtime/fprofiler.cpp
@@ -0,0 +1,1595 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_fprofiler.cpp.incl"
+
+// Static fields of FlatProfiler
+int FlatProfiler::received_gc_ticks = 0;
+int FlatProfiler::vm_operation_ticks = 0;
+int FlatProfiler::threads_lock_ticks = 0;
+int FlatProfiler::class_loader_ticks = 0;
+int FlatProfiler::extra_ticks = 0;
+int FlatProfiler::blocked_ticks = 0;
+int FlatProfiler::deopt_ticks = 0;
+int FlatProfiler::unknown_ticks = 0;
+int FlatProfiler::interpreter_ticks = 0;
+int FlatProfiler::compiler_ticks = 0;
+int FlatProfiler::received_ticks = 0;
+int FlatProfiler::delivered_ticks = 0;
+int* FlatProfiler::bytecode_ticks = NULL;
+int* FlatProfiler::bytecode_ticks_stub = NULL;
+int FlatProfiler::all_int_ticks = 0;
+int FlatProfiler::all_comp_ticks = 0;
+int FlatProfiler::all_ticks = 0;
+bool FlatProfiler::full_profile_flag = false;
+ThreadProfiler* FlatProfiler::thread_profiler = NULL;
+ThreadProfiler* FlatProfiler::vm_thread_profiler = NULL;
+FlatProfilerTask* FlatProfiler::task = NULL;
+elapsedTimer FlatProfiler::timer;
+int FlatProfiler::interval_ticks_previous = 0;
+IntervalData* FlatProfiler::interval_data = NULL;
+
+ThreadProfiler::ThreadProfiler() {
+ // Space for the ProfilerNodes
+ const int area_size = 1 * ProfilerNodeSize * 1024;
+ area_bottom = AllocateHeap(area_size, "fprofiler");
+ area_top = area_bottom;
+ area_limit = area_bottom + area_size;
+
+ // ProfilerNode pointer table
+ table = NEW_C_HEAP_ARRAY(ProfilerNode*, table_size);
+ initialize();
+ engaged = false;
+}
+
+ThreadProfiler::~ThreadProfiler() {
+ FreeHeap(area_bottom);
+ area_bottom = NULL;
+ area_top = NULL;
+ area_limit = NULL;
+ FreeHeap(table);
+ table = NULL;
+}
+
+// Statics for ThreadProfiler
+int ThreadProfiler::table_size = 1024;
+
+int ThreadProfiler::entry(int value) {
+ value = (value > 0) ? value : -value;
+ return value % table_size;
+}
+
+ThreadProfilerMark::ThreadProfilerMark(ThreadProfilerMark::Region r) {
+ _r = r;
+ _pp = NULL;
+ assert(((r > ThreadProfilerMark::noRegion) && (r < ThreadProfilerMark::maxRegion)), "ThreadProfilerMark::Region out of bounds");
+ Thread* tp = Thread::current();
+ if (tp != NULL && tp->is_Java_thread()) {
+ JavaThread* jtp = (JavaThread*) tp;
+ ThreadProfiler* pp = jtp->get_thread_profiler();
+ _pp = pp;
+ if (pp != NULL) {
+ pp->region_flag[r] = true;
+ }
+ }
+}
+
+ThreadProfilerMark::~ThreadProfilerMark() {
+ if (_pp != NULL) {
+ _pp->region_flag[_r] = false;
+ }
+ _pp = NULL;
+}
+
+// Random other statics
+static const int col1 = 2; // position of output column 1
+static const int col2 = 11; // position of output column 2
+static const int col3 = 25; // position of output column 3
+static const int col4 = 55; // position of output column 4
+
+
+// Used for detailed profiling of nmethods.
+class PCRecorder : AllStatic {
+ private:
+ static int* counters;
+ static address base;
+ enum {
+ bucket_size = 16
+ };
+ static int index_for(address pc) { return (pc - base)/bucket_size; }
+ static address pc_for(int index) { return base + (index * bucket_size); }
+ static int size() {
+ return ((int)CodeCache::max_capacity())/bucket_size * BytesPerWord;
+ }
+ public:
+ static address bucket_start_for(address pc) {
+ if (counters == NULL) return NULL;
+ return pc_for(index_for(pc));
+ }
+ static int bucket_count_for(address pc) { return counters[index_for(pc)]; }
+ static void init();
+ static void record(address pc);
+ static void print();
+ static void print_blobs(CodeBlob* cb);
+};
+
+int* PCRecorder::counters = NULL;
+address PCRecorder::base = NULL;
+
+void PCRecorder::init() {
+ MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ int s = size();
+ counters = NEW_C_HEAP_ARRAY(int, s);
+ for (int index = 0; index < s; index++) {
+ counters[index] = 0;
+ }
+ base = CodeCache::first_address();
+}
+
+void PCRecorder::record(address pc) {
+ if (counters == NULL) return;
+ assert(CodeCache::contains(pc), "must be in CodeCache");
+ counters[index_for(pc)]++;
+}
+
+
+address FlatProfiler::bucket_start_for(address pc) {
+ return PCRecorder::bucket_start_for(pc);
+}
+
+int FlatProfiler::bucket_count_for(address pc) {
+ return PCRecorder::bucket_count_for(pc);
+}
+
+void PCRecorder::print() {
+ if (counters == NULL) return;
+
+ tty->cr();
+ tty->print_cr("Printing compiled methods with PC buckets having more than %d ticks", ProfilerPCTickThreshold);
+ tty->print_cr("===================================================================");
+ tty->cr();
+
+ GrowableArray<CodeBlob*>* candidates = new GrowableArray<CodeBlob*>(20);
+
+
+ int s;
+ {
+ MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ s = size();
+ }
+
+ for (int index = 0; index < s; index++) {
+ int count = counters[index];
+ if (count > ProfilerPCTickThreshold) {
+ address pc = pc_for(index);
+ CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
+ if (cb != NULL && candidates->find(cb) < 0) {
+ candidates->push(cb);
+ }
+ }
+ }
+ for (int i = 0; i < candidates->length(); i++) {
+ print_blobs(candidates->at(i));
+ }
+}
+
+void PCRecorder::print_blobs(CodeBlob* cb) {
+ if (cb != NULL) {
+ cb->print();
+ if (cb->is_nmethod()) {
+ ((nmethod*)cb)->print_code();
+ }
+ tty->cr();
+ } else {
+ tty->print_cr("stub code");
+ }
+}
+
+class tick_counter { // holds tick info for one node
+ public:
+ int ticks_in_code;
+ int ticks_in_native;
+
+ tick_counter() { ticks_in_code = ticks_in_native = 0; }
+ tick_counter(int code, int native) { ticks_in_code = code; ticks_in_native = native; }
+
+ int total() const {
+ return (ticks_in_code + ticks_in_native);
+ }
+
+ void add(tick_counter* a) {
+ ticks_in_code += a->ticks_in_code;
+ ticks_in_native += a->ticks_in_native;
+ }
+
+ void update(TickPosition where) {
+ switch(where) {
+ case tp_code: ticks_in_code++; break;
+ case tp_native: ticks_in_native++; break;
+ }
+ }
+
+ void print_code(outputStream* st, int total_ticks) {
+ st->print("%5.1f%% %5d ", total() * 100.0 / total_ticks, ticks_in_code);
+ }
+
+ void print_native(outputStream* st) {
+ st->print(" + %5d ", ticks_in_native);
+ }
+};
+
+class ProfilerNode {
+ private:
+ ProfilerNode* _next;
+ public:
+ tick_counter ticks;
+
+ public:
+
+ void* operator new(size_t size, ThreadProfiler* tp);
+ void operator delete(void* p);
+
+ ProfilerNode() {
+ _next = NULL;
+ }
+
+ virtual ~ProfilerNode() {
+ if (_next)
+ delete _next;
+ }
+
+ void set_next(ProfilerNode* n) { _next = n; }
+ ProfilerNode* next() { return _next; }
+
+ void update(TickPosition where) { ticks.update(where);}
+ int total_ticks() { return ticks.total(); }
+
+ virtual bool is_interpreted() const { return false; }
+ virtual bool is_compiled() const { return false; }
+ virtual bool is_stub() const { return false; }
+ virtual bool is_runtime_stub() const{ return false; }
+ virtual void oops_do(OopClosure* f) = 0;
+
+ virtual bool interpreted_match(methodOop m) const { return false; }
+ virtual bool compiled_match(methodOop m ) const { return false; }
+ virtual bool stub_match(methodOop m, const char* name) const { return false; }
+ virtual bool adapter_match() const { return false; }
+ virtual bool runtimeStub_match(const CodeBlob* stub, const char* name) const { return false; }
+ virtual bool unknown_compiled_match(const CodeBlob* cb) const { return false; }
+
+ static void print_title(outputStream* st) {
+ st->print(" + native");
+ st->fill_to(col3);
+ st->print("Method");
+ st->fill_to(col4);
+ st->cr();
+ }
+
+ static void print_total(outputStream* st, tick_counter* t, int total, const char* msg) {
+ t->print_code(st, total);
+ st->fill_to(col2);
+ t->print_native(st);
+ st->fill_to(col3);
+ st->print(msg);
+ st->cr();
+ }
+
+ virtual methodOop method() = 0;
+
+ virtual void print_method_on(outputStream* st) {
+ int limit;
+ int i;
+ methodOop m = method();
+ symbolOop k = m->klass_name();
+ // Print the class name with dots instead of slashes
+ limit = k->utf8_length();
+ for (i = 0 ; i < limit ; i += 1) {
+ char c = (char) k->byte_at(i);
+ if (c == '/') {
+ c = '.';
+ }
+ st->print("%c", c);
+ }
+ if (limit > 0) {
+ st->print(".");
+ }
+ symbolOop n = m->name();
+ limit = n->utf8_length();
+ for (i = 0 ; i < limit ; i += 1) {
+ char c = (char) n->byte_at(i);
+ st->print("%c", c);
+ }
+ if( Verbose ) {
+ // Disambiguate overloaded methods
+ symbolOop sig = m->signature();
+ sig->print_symbol_on(st);
+ }
+ }
+
+ virtual void print(outputStream* st, int total_ticks) {
+ ticks.print_code(st, total_ticks);
+ st->fill_to(col2);
+ ticks.print_native(st);
+ st->fill_to(col3);
+ print_method_on(st);
+ st->cr();
+ }
+
+ // for hashing into the table
+ static int hash(methodOop method) {
+ // The point here is to try to make something fairly unique
+ // out of the fields we can read without grabbing any locks
+ // since the method may be locked when we need the hash.
+ return (
+ method->code_size() ^
+ method->max_stack() ^
+ method->max_locals() ^
+ method->size_of_parameters());
+ }
+
+ // for sorting
+ static int compare(ProfilerNode** a, ProfilerNode** b) {
+ return (*b)->total_ticks() - (*a)->total_ticks();
+ }
+};
+
+void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp){
+ void* result = (void*) tp->area_top;
+ tp->area_top += size;
+
+ if (tp->area_top > tp->area_limit) {
+ fatal("flat profiler buffer overflow");
+ }
+ return result;
+}
+
+void ProfilerNode::operator delete(void* p){
+}
+
+class interpretedNode : public ProfilerNode {
+ private:
+ methodOop _method;
+ public:
+ interpretedNode(methodOop method, TickPosition where) : ProfilerNode() {
+ _method = method;
+ update(where);
+ }
+
+ bool is_interpreted() const { return true; }
+
+ bool interpreted_match(methodOop m) const {
+ return _method == m;
+ }
+
+ void oops_do(OopClosure* f) {
+ f->do_oop((oop*)&_method);
+ }
+
+ methodOop method() { return _method; }
+
+ static void print_title(outputStream* st) {
+ st->fill_to(col1);
+ st->print("%11s", "Interpreted");
+ ProfilerNode::print_title(st);
+ }
+
+ void print(outputStream* st, int total_ticks) {
+ ProfilerNode::print(st, total_ticks);
+ }
+
+ void print_method_on(outputStream* st) {
+ ProfilerNode::print_method_on(st);
+ if (Verbose) method()->invocation_counter()->print_short();
+ }
+};
+
+class compiledNode : public ProfilerNode {
+ private:
+ methodOop _method;
+ public:
+ compiledNode(methodOop method, TickPosition where) : ProfilerNode() {
+ _method = method;
+ update(where);
+ }
+ bool is_compiled() const { return true; }
+
+ bool compiled_match(methodOop m) const {
+ return _method == m;
+ }
+
+ methodOop method() { return _method; }
+
+ void oops_do(OopClosure* f) {
+ f->do_oop((oop*)&_method);
+ }
+
+ static void print_title(outputStream* st) {
+ st->fill_to(col1);
+ st->print("%11s", "Compiled");
+ ProfilerNode::print_title(st);
+ }
+
+ void print(outputStream* st, int total_ticks) {
+ ProfilerNode::print(st, total_ticks);
+ }
+
+ void print_method_on(outputStream* st) {
+ ProfilerNode::print_method_on(st);
+ }
+};
+
+class stubNode : public ProfilerNode {
+ private:
+ methodOop _method;
+ const char* _symbol; // The name of the nearest VM symbol (for +ProfileVM). Points to a unique string
+ public:
+ stubNode(methodOop method, const char* name, TickPosition where) : ProfilerNode() {
+ _method = method;
+ _symbol = name;
+ update(where);
+ }
+
+ bool is_stub() const { return true; }
+
+ bool stub_match(methodOop m, const char* name) const {
+ return (_method == m) && (_symbol == name);
+ }
+
+ void oops_do(OopClosure* f) {
+ f->do_oop((oop*)&_method);
+ }
+
+ methodOop method() { return _method; }
+
+ static void print_title(outputStream* st) {
+ st->fill_to(col1);
+ st->print("%11s", "Stub");
+ ProfilerNode::print_title(st);
+ }
+
+ void print(outputStream* st, int total_ticks) {
+ ProfilerNode::print(st, total_ticks);
+ }
+
+ void print_method_on(outputStream* st) {
+ ProfilerNode::print_method_on(st);
+ print_symbol_on(st);
+ }
+
+ void print_symbol_on(outputStream* st) {
+ if(_symbol) {
+ st->print(" (%s)", _symbol);
+ }
+ }
+};
+
+class adapterNode : public ProfilerNode {
+ public:
+ adapterNode(TickPosition where) : ProfilerNode() {
+ update(where);
+ }
+ bool is_compiled() const { return true; }
+
+ bool adapter_match() const { return true; }
+
+ methodOop method() { return NULL; }
+
+ void oops_do(OopClosure* f) {
+ ;
+ }
+
+ void print(outputStream* st, int total_ticks) {
+ ProfilerNode::print(st, total_ticks);
+ }
+
+ void print_method_on(outputStream* st) {
+ st->print("%s", "adapters");
+ }
+};
+
+class runtimeStubNode : public ProfilerNode {
+ private:
+ const CodeBlob* _stub;
+ const char* _symbol; // The name of the nearest VM symbol when ProfileVM is on. Points to a unique string.
+ public:
+ runtimeStubNode(const CodeBlob* stub, const char* name, TickPosition where) : ProfilerNode(), _stub(stub), _symbol(name) {
+ assert(stub->is_runtime_stub(), "wrong code blob");
+ update(where);
+ }
+
+ bool is_runtime_stub() const { return true; }
+
+ bool runtimeStub_match(const CodeBlob* stub, const char* name) const {
+ assert(stub->is_runtime_stub(), "wrong code blob");
+ return ((RuntimeStub*)_stub)->entry_point() == ((RuntimeStub*)stub)->entry_point() &&
+ (_symbol == name);
+ }
+
+ methodOop method() { return NULL; }
+
+ static void print_title(outputStream* st) {
+ st->fill_to(col1);
+ st->print("%11s", "Runtime stub");
+ ProfilerNode::print_title(st);
+ }
+
+ void oops_do(OopClosure* f) {
+ ;
+ }
+
+ void print(outputStream* st, int total_ticks) {
+ ProfilerNode::print(st, total_ticks);
+ }
+
+ void print_method_on(outputStream* st) {
+ st->print("%s", ((RuntimeStub*)_stub)->name());
+ print_symbol_on(st);
+ }
+
+ void print_symbol_on(outputStream* st) {
+ if(_symbol) {
+ st->print(" (%s)", _symbol);
+ }
+ }
+};
+
+
+class unknown_compiledNode : public ProfilerNode {
+ const char *_name;
+ public:
+ unknown_compiledNode(const CodeBlob* cb, TickPosition where) : ProfilerNode() {
+ if ( cb->is_buffer_blob() )
+ _name = ((BufferBlob*)cb)->name();
+ else
+ _name = ((SingletonBlob*)cb)->name();
+ update(where);
+ }
+ bool is_compiled() const { return true; }
+
+ bool unknown_compiled_match(const CodeBlob* cb) const {
+ if ( cb->is_buffer_blob() )
+ return !strcmp(((BufferBlob*)cb)->name(), _name);
+ else
+ return !strcmp(((SingletonBlob*)cb)->name(), _name);
+ }
+
+ methodOop method() { return NULL; }
+
+ void oops_do(OopClosure* f) {
+ ;
+ }
+
+ void print(outputStream* st, int total_ticks) {
+ ProfilerNode::print(st, total_ticks);
+ }
+
+ void print_method_on(outputStream* st) {
+ st->print("%s", _name);
+ }
+};
+
+class vmNode : public ProfilerNode {
+ private:
+ const char* _name; // "optional" name obtained by os means such as dll lookup
+ public:
+ vmNode(const TickPosition where) : ProfilerNode() {
+ _name = NULL;
+ update(where);
+ }
+
+ vmNode(const char* name, const TickPosition where) : ProfilerNode() {
+ _name = name;
+ update(where);
+ }
+
+ const char *name() const { return _name; }
+ bool is_compiled() const { return true; }
+
+ bool vm_match(const char* name) const { return strcmp(name, _name) == 0; }
+
+ methodOop method() { return NULL; }
+
+ static int hash(const char* name){
+ // Compute a simple hash
+ const char* cp = name;
+ int h = 0;
+
+ if(name != NULL){
+ while(*cp != '\0'){
+ h = (h << 1) ^ *cp;
+ cp++;
+ }
+ }
+ return h;
+ }
+
+ void oops_do(OopClosure* f) {
+ ;
+ }
+
+ void print(outputStream* st, int total_ticks) {
+ ProfilerNode::print(st, total_ticks);
+ }
+
+ void print_method_on(outputStream* st) {
+ if(_name==NULL){
+ st->print("%s", "unknown code");
+ }
+ else {
+ st->print("%s", _name);
+ }
+ }
+};
+
+void ThreadProfiler::interpreted_update(methodOop method, TickPosition where) {
+ int index = entry(ProfilerNode::hash(method));
+ if (!table[index]) {
+ table[index] = new (this) interpretedNode(method, where);
+ } else {
+ ProfilerNode* prev = table[index];
+ for(ProfilerNode* node = prev; node; node = node->next()) {
+ if (node->interpreted_match(method)) {
+ node->update(where);
+ return;
+ }
+ prev = node;
+ }
+ prev->set_next(new (this) interpretedNode(method, where));
+ }
+}
+
+void ThreadProfiler::compiled_update(methodOop method, TickPosition where) {
+ int index = entry(ProfilerNode::hash(method));
+ if (!table[index]) {
+ table[index] = new (this) compiledNode(method, where);
+ } else {
+ ProfilerNode* prev = table[index];
+ for(ProfilerNode* node = prev; node; node = node->next()) {
+ if (node->compiled_match(method)) {
+ node->update(where);
+ return;
+ }
+ prev = node;
+ }
+ prev->set_next(new (this) compiledNode(method, where));
+ }
+}
+
+void ThreadProfiler::stub_update(methodOop method, const char* name, TickPosition where) {
+ int index = entry(ProfilerNode::hash(method));
+ if (!table[index]) {
+ table[index] = new (this) stubNode(method, name, where);
+ } else {
+ ProfilerNode* prev = table[index];
+ for(ProfilerNode* node = prev; node; node = node->next()) {
+ if (node->stub_match(method, name)) {
+ node->update(where);
+ return;
+ }
+ prev = node;
+ }
+ prev->set_next(new (this) stubNode(method, name, where));
+ }
+}
+
+void ThreadProfiler::adapter_update(TickPosition where) {
+ int index = 0;
+ if (!table[index]) {
+ table[index] = new (this) adapterNode(where);
+ } else {
+ ProfilerNode* prev = table[index];
+ for(ProfilerNode* node = prev; node; node = node->next()) {
+ if (node->adapter_match()) {
+ node->update(where);
+ return;
+ }
+ prev = node;
+ }
+ prev->set_next(new (this) adapterNode(where));
+ }
+}
+
+void ThreadProfiler::runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where) {
+ int index = 0;
+ if (!table[index]) {
+ table[index] = new (this) runtimeStubNode(stub, name, where);
+ } else {
+ ProfilerNode* prev = table[index];
+ for(ProfilerNode* node = prev; node; node = node->next()) {
+ if (node->runtimeStub_match(stub, name)) {
+ node->update(where);
+ return;
+ }
+ prev = node;
+ }
+ prev->set_next(new (this) runtimeStubNode(stub, name, where));
+ }
+}
+
+
+void ThreadProfiler::unknown_compiled_update(const CodeBlob* cb, TickPosition where) {
+ int index = 0;
+ if (!table[index]) {
+ table[index] = new (this) unknown_compiledNode(cb, where);
+ } else {
+ ProfilerNode* prev = table[index];
+ for(ProfilerNode* node = prev; node; node = node->next()) {
+ if (node->unknown_compiled_match(cb)) {
+ node->update(where);
+ return;
+ }
+ prev = node;
+ }
+ prev->set_next(new (this) unknown_compiledNode(cb, where));
+ }
+}
+
+void ThreadProfiler::vm_update(TickPosition where) {
+ vm_update(NULL, where);
+}
+
+void ThreadProfiler::vm_update(const char* name, TickPosition where) {
+ int index = entry(vmNode::hash(name));
+ assert(index >= 0, "Must be positive");
+ // Note that we call strdup below since the symbol may be resource allocated
+ if (!table[index]) {
+ table[index] = new (this) vmNode(os::strdup(name), where);
+ } else {
+ ProfilerNode* prev = table[index];
+ for(ProfilerNode* node = prev; node; node = node->next()) {
+ if (((vmNode *)node)->vm_match(name)) {
+ node->update(where);
+ return;
+ }
+ prev = node;
+ }
+ prev->set_next(new (this) vmNode(os::strdup(name), where));
+ }
+}
+
+
+class FlatProfilerTask : public PeriodicTask {
+public:
+ FlatProfilerTask(int interval_time) : PeriodicTask(interval_time) {}
+ void task();
+};
+
+void FlatProfiler::record_vm_operation() {
+ if (Universe::heap()->is_gc_active()) {
+ FlatProfiler::received_gc_ticks += 1;
+ return;
+ }
+
+ if (DeoptimizationMarker::is_active()) {
+ FlatProfiler::deopt_ticks += 1;
+ return;
+ }
+
+ FlatProfiler::vm_operation_ticks += 1;
+}
+
+void FlatProfiler::record_vm_tick() {
+ // Profile the VM Thread itself if needed
+ // This is done without getting the Threads_lock and we can go deep
+ // inside Safepoint, etc.
+ if( ProfileVM ) {
+ ResourceMark rm;
+ ExtendedPC epc;
+ const char *name = NULL;
+ char buf[256];
+ buf[0] = '\0';
+
+ vm_thread_profiler->inc_thread_ticks();
+
+ // Get a snapshot of a current VMThread pc (and leave it running!)
+ // The call may fail if, for instance the VM thread is interrupted while
+ // holding the Interrupt_lock or for other reasons.
+ epc = os::get_thread_pc(VMThread::vm_thread());
+ if(epc.pc() != NULL) {
+ if (os::dll_address_to_function_name(epc.pc(), buf, sizeof(buf), NULL)) {
+ name = buf;
+ }
+ }
+ if (name != NULL) {
+ vm_thread_profiler->vm_update(name, tp_native);
+ }
+ }
+}
+
+void FlatProfiler::record_thread_ticks() {
+
+ int maxthreads, suspendedthreadcount;
+ JavaThread** threadsList;
+ bool interval_expired = false;
+
+ if (ProfileIntervals &&
+ (FlatProfiler::received_ticks >= interval_ticks_previous + ProfileIntervalsTicks)) {
+ interval_expired = true;
+ interval_ticks_previous = FlatProfiler::received_ticks;
+ }
+
+ // Try not to wait for the Threads_lock
+ if (Threads_lock->try_lock()) {
+ { // Threads_lock scope
+ maxthreads = Threads::number_of_threads();
+ threadsList = NEW_C_HEAP_ARRAY(JavaThread *, maxthreads);
+ suspendedthreadcount = 0;
+ for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
+ if (tp->is_Compiler_thread()) {
+ // Only record ticks for active compiler threads
+ CompilerThread* cthread = (CompilerThread*)tp;
+ if (cthread->task() != NULL) {
+ // The compiler is active. If we need to access any of the fields
+ // of the compiler task we should suspend the CompilerThread first.
+ FlatProfiler::compiler_ticks += 1;
+ continue;
+ }
+ }
+
+ // First externally suspend all threads by marking each for
+ // external suspension - so it will stop at its next transition
+ // Then do a safepoint
+ ThreadProfiler* pp = tp->get_thread_profiler();
+ if (pp != NULL && pp->engaged) {
+ MutexLockerEx ml(tp->SR_lock(), Mutex::_no_safepoint_check_flag);
+ if (!tp->is_external_suspend() && !tp->is_exiting()) {
+ tp->set_external_suspend();
+ threadsList[suspendedthreadcount++] = tp;
+ }
+ }
+ }
+ Threads_lock->unlock();
+ }
+ // Suspend each thread. This call should just return
+ // for any threads that have already self-suspended
+ // Net result should be one safepoint
+ for (int j = 0; j < suspendedthreadcount; j++) {
+ JavaThread *tp = threadsList[j];
+ if (tp) {
+ tp->java_suspend();
+ }
+ }
+
+ // We are responsible for resuming any thread on this list
+ for (int i = 0; i < suspendedthreadcount; i++) {
+ JavaThread *tp = threadsList[i];
+ if (tp) {
+ ThreadProfiler* pp = tp->get_thread_profiler();
+ if (pp != NULL && pp->engaged) {
+ HandleMark hm;
+ FlatProfiler::delivered_ticks += 1;
+ if (interval_expired) {
+ FlatProfiler::interval_record_thread(pp);
+ }
+ // This is the place where we check to see if a user thread is
+ // blocked waiting for compilation.
+ if (tp->blocked_on_compilation()) {
+ pp->compiler_ticks += 1;
+ pp->interval_data_ref()->inc_compiling();
+ } else {
+ pp->record_tick(tp);
+ }
+ }
+ MutexLocker ml(Threads_lock);
+ tp->java_resume();
+ }
+ }
+ if (interval_expired) {
+ FlatProfiler::interval_print();
+ FlatProfiler::interval_reset();
+ }
+ } else {
+ // Couldn't get the threads lock, just record that rather than blocking
+ FlatProfiler::threads_lock_ticks += 1;
+ }
+
+}
+
+void FlatProfilerTask::task() {
+ FlatProfiler::received_ticks += 1;
+
+ if (ProfileVM) {
+ FlatProfiler::record_vm_tick();
+ }
+
+ VM_Operation* op = VMThread::vm_operation();
+ if (op != NULL) {
+ FlatProfiler::record_vm_operation();
+ if (SafepointSynchronize::is_at_safepoint()) {
+ return;
+ }
+ }
+ FlatProfiler::record_thread_ticks();
+}
+
+void ThreadProfiler::record_interpreted_tick(frame fr, TickPosition where, int* ticks) {
+ FlatProfiler::all_int_ticks++;
+ if (!FlatProfiler::full_profile()) {
+ return;
+ }
+
+ if (!fr.is_interpreted_frame_valid()) {
+ // tick came at a bad time
+ interpreter_ticks += 1;
+ FlatProfiler::interpreter_ticks += 1;
+ return;
+ }
+
+ methodOop method = NULL;
+ if (fr.fp() != NULL) {
+ method = *fr.interpreter_frame_method_addr();
+ }
+ if (!Universe::heap()->is_valid_method(method)) {
+ // tick came at a bad time, stack frame not initialized correctly
+ interpreter_ticks += 1;
+ FlatProfiler::interpreter_ticks += 1;
+ return;
+ }
+ interpreted_update(method, where);
+
+ // update byte code table
+ InterpreterCodelet* desc = Interpreter::codelet_containing(fr.pc());
+ if (desc != NULL && desc->bytecode() >= 0) {
+ ticks[desc->bytecode()]++;
+ }
+}
+
+void ThreadProfiler::record_compiled_tick(JavaThread* thread, frame fr, TickPosition where) {
+ const char *name = NULL;
+ TickPosition localwhere = where;
+
+ FlatProfiler::all_comp_ticks++;
+ if (!FlatProfiler::full_profile()) return;
+
+ CodeBlob* cb = fr.cb();
+
+// For runtime stubs, record as native rather than as compiled
+ if (cb->is_runtime_stub()) {
+ RegisterMap map(thread, false);
+ fr = fr.sender(&map);
+ cb = fr.cb();
+ localwhere = tp_native;
+ }
+ methodOop method = (cb->is_nmethod()) ? ((nmethod *)cb)->method() :
+ (methodOop)NULL;
+
+ if (method == NULL) {
+ if (cb->is_runtime_stub())
+ runtime_stub_update(cb, name, localwhere);
+ else
+ unknown_compiled_update(cb, localwhere);
+ }
+ else {
+ if (method->is_native()) {
+ stub_update(method, name, localwhere);
+ } else {
+ compiled_update(method, localwhere);
+ }
+ }
+}
+
+extern "C" void find(int x);
+
+
+void ThreadProfiler::record_tick_for_running_frame(JavaThread* thread, frame fr) {
+ // The tick happend in real code -> non VM code
+ if (fr.is_interpreted_frame()) {
+ interval_data_ref()->inc_interpreted();
+ record_interpreted_tick(fr, tp_code, FlatProfiler::bytecode_ticks);
+ return;
+ }
+
+ if (CodeCache::contains(fr.pc())) {
+ interval_data_ref()->inc_compiled();
+ PCRecorder::record(fr.pc());
+ record_compiled_tick(thread, fr, tp_code);
+ return;
+ }
+
+ if (VtableStubs::stub_containing(fr.pc()) != NULL) {
+ unknown_ticks_array[ut_vtable_stubs] += 1;
+ return;
+ }
+
+ frame caller = fr.profile_find_Java_sender_frame(thread);
+
+ if (caller.sp() != NULL && caller.pc() != NULL) {
+ record_tick_for_calling_frame(thread, caller);
+ return;
+ }
+
+ unknown_ticks_array[ut_running_frame] += 1;
+ FlatProfiler::unknown_ticks += 1;
+}
+
+void ThreadProfiler::record_tick_for_calling_frame(JavaThread* thread, frame fr) {
+ // The tick happend in VM code
+ interval_data_ref()->inc_native();
+ if (fr.is_interpreted_frame()) {
+ record_interpreted_tick(fr, tp_native, FlatProfiler::bytecode_ticks_stub);
+ return;
+ }
+ if (CodeCache::contains(fr.pc())) {
+ record_compiled_tick(thread, fr, tp_native);
+ return;
+ }
+
+ frame caller = fr.profile_find_Java_sender_frame(thread);
+
+ if (caller.sp() != NULL && caller.pc() != NULL) {
+ record_tick_for_calling_frame(thread, caller);
+ return;
+ }
+
+ unknown_ticks_array[ut_calling_frame] += 1;
+ FlatProfiler::unknown_ticks += 1;
+}
+
+void ThreadProfiler::record_tick(JavaThread* thread) {
+ FlatProfiler::all_ticks++;
+ thread_ticks += 1;
+
+ // Here's another way to track global state changes.
+ // When the class loader starts it marks the ThreadProfiler to tell it it is in the class loader
+ // and we check that here.
+ // This is more direct, and more than one thread can be in the class loader at a time,
+ // but it does mean the class loader has to know about the profiler.
+ if (region_flag[ThreadProfilerMark::classLoaderRegion]) {
+ class_loader_ticks += 1;
+ FlatProfiler::class_loader_ticks += 1;
+ return;
+ } else if (region_flag[ThreadProfilerMark::extraRegion]) {
+ extra_ticks += 1;
+ FlatProfiler::extra_ticks += 1;
+ return;
+ }
+ // Note that the WatcherThread can now stop for safepoints
+ uint32_t debug_bits = 0;
+ if (!thread->wait_for_ext_suspend_completion(SuspendRetryCount,
+ SuspendRetryDelay, &debug_bits)) {
+ unknown_ticks_array[ut_unknown_thread_state] += 1;
+ FlatProfiler::unknown_ticks += 1;
+ return;
+ }
+
+ frame fr;
+
+ switch (thread->thread_state()) {
+ case _thread_in_native:
+ case _thread_in_native_trans:
+ case _thread_in_vm:
+ case _thread_in_vm_trans:
+ if (thread->profile_last_Java_frame(&fr)) {
+ if (fr.is_runtime_frame()) {
+ RegisterMap map(thread, false);
+ fr = fr.sender(&map);
+ }
+ record_tick_for_calling_frame(thread, fr);
+ } else {
+ unknown_ticks_array[ut_no_last_Java_frame] += 1;
+ FlatProfiler::unknown_ticks += 1;
+ }
+ break;
+ // handle_special_runtime_exit_condition self-suspends threads in Java
+ case _thread_in_Java:
+ case _thread_in_Java_trans:
+ if (thread->profile_last_Java_frame(&fr)) {
+ if (fr.is_safepoint_blob_frame()) {
+ RegisterMap map(thread, false);
+ fr = fr.sender(&map);
+ }
+ record_tick_for_running_frame(thread, fr);
+ } else {
+ unknown_ticks_array[ut_no_last_Java_frame] += 1;
+ FlatProfiler::unknown_ticks += 1;
+ }
+ break;
+ case _thread_blocked:
+ case _thread_blocked_trans:
+ if (thread->osthread() && thread->osthread()->get_state() == RUNNABLE) {
+ if (thread->profile_last_Java_frame(&fr)) {
+ if (fr.is_safepoint_blob_frame()) {
+ RegisterMap map(thread, false);
+ fr = fr.sender(&map);
+ record_tick_for_running_frame(thread, fr);
+ } else {
+ record_tick_for_calling_frame(thread, fr);
+ }
+ } else {
+ unknown_ticks_array[ut_no_last_Java_frame] += 1;
+ FlatProfiler::unknown_ticks += 1;
+ }
+ } else {
+ blocked_ticks += 1;
+ FlatProfiler::blocked_ticks += 1;
+ }
+ break;
+ case _thread_uninitialized:
+ case _thread_new:
+ // not used, included for completeness
+ case _thread_new_trans:
+ unknown_ticks_array[ut_no_last_Java_frame] += 1;
+ FlatProfiler::unknown_ticks += 1;
+ break;
+ default:
+ unknown_ticks_array[ut_unknown_thread_state] += 1;
+ FlatProfiler::unknown_ticks += 1;
+ break;
+ }
+ return;
+}
+
+void ThreadProfiler::engage() {
+ engaged = true;
+ timer.start();
+}
+
+void ThreadProfiler::disengage() {
+ engaged = false;
+ timer.stop();
+}
+
+void ThreadProfiler::initialize() {
+ for (int index = 0; index < table_size; index++) {
+ table[index] = NULL;
+ }
+ thread_ticks = 0;
+ blocked_ticks = 0;
+ compiler_ticks = 0;
+ interpreter_ticks = 0;
+ for (int ut = 0; ut < ut_end; ut += 1) {
+ unknown_ticks_array[ut] = 0;
+ }
+ region_flag[ThreadProfilerMark::classLoaderRegion] = false;
+ class_loader_ticks = 0;
+ region_flag[ThreadProfilerMark::extraRegion] = false;
+ extra_ticks = 0;
+ timer.start();
+ interval_data_ref()->reset();
+}
+
+void ThreadProfiler::reset() {
+ timer.stop();
+ if (table != NULL) {
+ for (int index = 0; index < table_size; index++) {
+ ProfilerNode* n = table[index];
+ if (n != NULL) {
+ delete n;
+ }
+ }
+ }
+ initialize();
+}
+
+void FlatProfiler::allocate_table() {
+ { // Bytecode table
+ bytecode_ticks = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes);
+ bytecode_ticks_stub = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes);
+ for(int index = 0; index < Bytecodes::number_of_codes; index++) {
+ bytecode_ticks[index] = 0;
+ bytecode_ticks_stub[index] = 0;
+ }
+ }
+
+ if (ProfilerRecordPC) PCRecorder::init();
+
+ interval_data = NEW_C_HEAP_ARRAY(IntervalData, interval_print_size);
+ FlatProfiler::interval_reset();
+}
+
+void FlatProfiler::engage(JavaThread* mainThread, bool fullProfile) {
+ full_profile_flag = fullProfile;
+ if (bytecode_ticks == NULL) {
+ allocate_table();
+ }
+ if(ProfileVM && (vm_thread_profiler == NULL)){
+ vm_thread_profiler = new ThreadProfiler();
+ }
+ if (task == NULL) {
+ task = new FlatProfilerTask(WatcherThread::delay_interval);
+ task->enroll();
+ }
+ timer.start();
+ if (mainThread != NULL) {
+ // When mainThread was created, it might not have a ThreadProfiler
+ ThreadProfiler* pp = mainThread->get_thread_profiler();
+ if (pp == NULL) {
+ mainThread->set_thread_profiler(new ThreadProfiler());
+ } else {
+ pp->reset();
+ }
+ mainThread->get_thread_profiler()->engage();
+ }
+ // This is where we would assign thread_profiler
+ // if we wanted only one thread_profiler for all threads.
+ thread_profiler = NULL;
+}
+
+void FlatProfiler::disengage() {
+ if (!task) {
+ return;
+ }
+ timer.stop();
+ task->disenroll();
+ delete task;
+ task = NULL;
+ if (thread_profiler != NULL) {
+ thread_profiler->disengage();
+ } else {
+ MutexLocker tl(Threads_lock);
+ for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
+ ThreadProfiler* pp = tp->get_thread_profiler();
+ if (pp != NULL) {
+ pp->disengage();
+ }
+ }
+ }
+}
+
+void FlatProfiler::reset() {
+ if (task) {
+ disengage();
+ }
+
+ class_loader_ticks = 0;
+ extra_ticks = 0;
+ received_gc_ticks = 0;
+ vm_operation_ticks = 0;
+ compiler_ticks = 0;
+ deopt_ticks = 0;
+ interpreter_ticks = 0;
+ blocked_ticks = 0;
+ unknown_ticks = 0;
+ received_ticks = 0;
+ delivered_ticks = 0;
+ timer.stop();
+}
+
+bool FlatProfiler::is_active() {
+ return task != NULL;
+}
+
+void FlatProfiler::print_byte_code_statistics() {
+ GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200);
+
+ tty->print_cr(" Bytecode ticks:");
+ for (int index = 0; index < Bytecodes::number_of_codes; index++) {
+ if (FlatProfiler::bytecode_ticks[index] > 0 || FlatProfiler::bytecode_ticks_stub[index] > 0) {
+ tty->print_cr(" %4d %4d = %s",
+ FlatProfiler::bytecode_ticks[index],
+ FlatProfiler::bytecode_ticks_stub[index],
+ Bytecodes::name( (Bytecodes::Code) index));
+ }
+ }
+ tty->cr();
+}
+
+void print_ticks(const char* title, int ticks, int total) {
+ if (ticks > 0) {
+ tty->print("%5.1f%% %5d", ticks * 100.0 / total, ticks);
+ tty->fill_to(col3);
+ tty->print("%s", title);
+ tty->cr();
+ }
+}
+
+void ThreadProfiler::print(const char* thread_name) {
+ ResourceMark rm;
+ MutexLocker ppl(ProfilePrint_lock);
+ int index = 0; // Declared outside for loops for portability
+
+ if (table == NULL) {
+ return;
+ }
+
+ if (thread_ticks <= 0) {
+ return;
+ }
+
+ const char* title = "too soon to tell";
+ double secs = timer.seconds();
+
+ GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200);
+ for(index = 0; index < table_size; index++) {
+ for(ProfilerNode* node = table[index]; node; node = node->next())
+ array->append(node);
+ }
+
+ array->sort(&ProfilerNode::compare);
+
+ // compute total (sanity check)
+ int active =
+ class_loader_ticks +
+ compiler_ticks +
+ interpreter_ticks +
+ unknown_ticks();
+ for (index = 0; index < array->length(); index++) {
+ active += array->at(index)->ticks.total();
+ }
+ int total = active + blocked_ticks;
+
+ tty->cr();
+ tty->print_cr("Flat profile of %3.2f secs (%d total ticks): %s", secs, total, thread_name);
+ if (total != thread_ticks) {
+ print_ticks("Lost ticks", thread_ticks-total, thread_ticks);
+ }
+ tty->cr();
+
+ // print interpreted methods
+ tick_counter interpreted_ticks;
+ bool has_interpreted_ticks = false;
+ int print_count = 0;
+ for (index = 0; index < array->length(); index++) {
+ ProfilerNode* n = array->at(index);
+ if (n->is_interpreted()) {
+ interpreted_ticks.add(&n->ticks);
+ if (!has_interpreted_ticks) {
+ interpretedNode::print_title(tty);
+ has_interpreted_ticks = true;
+ }
+ if (print_count++ < ProfilerNumberOfInterpretedMethods) {
+ n->print(tty, active);
+ }
+ }
+ }
+ if (has_interpreted_ticks) {
+ if (print_count <= ProfilerNumberOfInterpretedMethods) {
+ title = "Total interpreted";
+ } else {
+ title = "Total interpreted (including elided)";
+ }
+ interpretedNode::print_total(tty, &interpreted_ticks, active, title);
+ tty->cr();
+ }
+
+ // print compiled methods
+ tick_counter compiled_ticks;
+ bool has_compiled_ticks = false;
+ print_count = 0;
+ for (index = 0; index < array->length(); index++) {
+ ProfilerNode* n = array->at(index);
+ if (n->is_compiled()) {
+ compiled_ticks.add(&n->ticks);
+ if (!has_compiled_ticks) {
+ compiledNode::print_title(tty);
+ has_compiled_ticks = true;
+ }
+ if (print_count++ < ProfilerNumberOfCompiledMethods) {
+ n->print(tty, active);
+ }
+ }
+ }
+ if (has_compiled_ticks) {
+ if (print_count <= ProfilerNumberOfCompiledMethods) {
+ title = "Total compiled";
+ } else {
+ title = "Total compiled (including elided)";
+ }
+ compiledNode::print_total(tty, &compiled_ticks, active, title);
+ tty->cr();
+ }
+
+ // print stub methods
+ tick_counter stub_ticks;
+ bool has_stub_ticks = false;
+ print_count = 0;
+ for (index = 0; index < array->length(); index++) {
+ ProfilerNode* n = array->at(index);
+ if (n->is_stub()) {
+ stub_ticks.add(&n->ticks);
+ if (!has_stub_ticks) {
+ stubNode::print_title(tty);
+ has_stub_ticks = true;
+ }
+ if (print_count++ < ProfilerNumberOfStubMethods) {
+ n->print(tty, active);
+ }
+ }
+ }
+ if (has_stub_ticks) {
+ if (print_count <= ProfilerNumberOfStubMethods) {
+ title = "Total stub";
+ } else {
+ title = "Total stub (including elided)";
+ }
+ stubNode::print_total(tty, &stub_ticks, active, title);
+ tty->cr();
+ }
+
+ // print runtime stubs
+ tick_counter runtime_stub_ticks;
+ bool has_runtime_stub_ticks = false;
+ print_count = 0;
+ for (index = 0; index < array->length(); index++) {
+ ProfilerNode* n = array->at(index);
+ if (n->is_runtime_stub()) {
+ runtime_stub_ticks.add(&n->ticks);
+ if (!has_runtime_stub_ticks) {
+ runtimeStubNode::print_title(tty);
+ has_runtime_stub_ticks = true;
+ }
+ if (print_count++ < ProfilerNumberOfRuntimeStubNodes) {
+ n->print(tty, active);
+ }
+ }
+ }
+ if (has_runtime_stub_ticks) {
+ if (print_count <= ProfilerNumberOfRuntimeStubNodes) {
+ title = "Total runtime stubs";
+ } else {
+ title = "Total runtime stubs (including elided)";
+ }
+ runtimeStubNode::print_total(tty, &runtime_stub_ticks, active, title);
+ tty->cr();
+ }
+
+ if (blocked_ticks + class_loader_ticks + interpreter_ticks + compiler_ticks + unknown_ticks() != 0) {
+ tty->fill_to(col1);
+ tty->print_cr("Thread-local ticks:");
+ print_ticks("Blocked (of total)", blocked_ticks, total);
+ print_ticks("Class loader", class_loader_ticks, active);
+ print_ticks("Extra", extra_ticks, active);
+ print_ticks("Interpreter", interpreter_ticks, active);
+ print_ticks("Compilation", compiler_ticks, active);
+ print_ticks("Unknown: vtable stubs", unknown_ticks_array[ut_vtable_stubs], active);
+ print_ticks("Unknown: null method", unknown_ticks_array[ut_null_method], active);
+ print_ticks("Unknown: running frame", unknown_ticks_array[ut_running_frame], active);
+ print_ticks("Unknown: calling frame", unknown_ticks_array[ut_calling_frame], active);
+ print_ticks("Unknown: no pc", unknown_ticks_array[ut_no_pc], active);
+ print_ticks("Unknown: no last frame", unknown_ticks_array[ut_no_last_Java_frame], active);
+ print_ticks("Unknown: thread_state", unknown_ticks_array[ut_unknown_thread_state], active);
+ tty->cr();
+ }
+
+ if (WizardMode) {
+ tty->print_cr("Node area used: %dKb", (area_top - area_bottom) / 1024);
+ }
+ reset();
+}
+
+/*
+ThreadProfiler::print_unknown(){
+ if (table == NULL) {
+ return;
+ }
+
+ if (thread_ticks <= 0) {
+ return;
+ }
+} */
+
+void FlatProfiler::print(int unused) {
+ ResourceMark rm;
+ if (thread_profiler != NULL) {
+ thread_profiler->print("All threads");
+ } else {
+ MutexLocker tl(Threads_lock);
+ for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
+ ThreadProfiler* pp = tp->get_thread_profiler();
+ if (pp != NULL) {
+ pp->print(tp->get_thread_name());
+ }
+ }
+ }
+
+ if (ProfilerPrintByteCodeStatistics) {
+ print_byte_code_statistics();
+ }
+
+ if (non_method_ticks() > 0) {
+ tty->cr();
+ tty->print_cr("Global summary of %3.2f seconds:", timer.seconds());
+ print_ticks("Received ticks", received_ticks, received_ticks);
+ print_ticks("Received GC ticks", received_gc_ticks, received_ticks);
+ print_ticks("Compilation", compiler_ticks, received_ticks);
+ print_ticks("Deoptimization", deopt_ticks, received_ticks);
+ print_ticks("Other VM operations", vm_operation_ticks, received_ticks);
+#ifndef PRODUCT
+ print_ticks("Blocked ticks", blocked_ticks, received_ticks);
+ print_ticks("Threads_lock blocks", threads_lock_ticks, received_ticks);
+ print_ticks("Delivered ticks", delivered_ticks, received_ticks);
+ print_ticks("All ticks", all_ticks, received_ticks);
+#endif
+ print_ticks("Class loader", class_loader_ticks, received_ticks);
+ print_ticks("Extra ", extra_ticks, received_ticks);
+ print_ticks("Interpreter", interpreter_ticks, received_ticks);
+ print_ticks("Unknown code", unknown_ticks, received_ticks);
+ }
+
+ PCRecorder::print();
+
+ if(ProfileVM){
+ tty->cr();
+ vm_thread_profiler->print("VM Thread");
+ }
+}
+
+void IntervalData::print_header(outputStream* st) {
+ st->print("i/c/n/g");
+}
+
+void IntervalData::print_data(outputStream* st) {
+ st->print("%d/%d/%d/%d", interpreted(), compiled(), native(), compiling());
+}
+
+void FlatProfiler::interval_record_thread(ThreadProfiler* tp) {
+ IntervalData id = tp->interval_data();
+ int total = id.total();
+ tp->interval_data_ref()->reset();
+
+ // Insertion sort the data, if it's relevant.
+ for (int i = 0; i < interval_print_size; i += 1) {
+ if (total > interval_data[i].total()) {
+ for (int j = interval_print_size - 1; j > i; j -= 1) {
+ interval_data[j] = interval_data[j-1];
+ }
+ interval_data[i] = id;
+ break;
+ }
+ }
+}
+
+void FlatProfiler::interval_print() {
+ if ((interval_data[0].total() > 0)) {
+ tty->stamp();
+ tty->print("\t");
+ IntervalData::print_header(tty);
+ for (int i = 0; i < interval_print_size; i += 1) {
+ if (interval_data[i].total() > 0) {
+ tty->print("\t");
+ interval_data[i].print_data(tty);
+ }
+ }
+ tty->cr();
+ }
+}
+
+void FlatProfiler::interval_reset() {
+ for (int i = 0; i < interval_print_size; i += 1) {
+ interval_data[i].reset();
+ }
+}
+
+void ThreadProfiler::oops_do(OopClosure* f) {
+ if (table == NULL) return;
+
+ for(int index = 0; index < table_size; index++) {
+ for(ProfilerNode* node = table[index]; node; node = node->next())
+ node->oops_do(f);
+ }
+}
+
+void FlatProfiler::oops_do(OopClosure* f) {
+ if (thread_profiler != NULL) {
+ thread_profiler->oops_do(f);
+ } else {
+ for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
+ ThreadProfiler* pp = tp->get_thread_profiler();
+ if (pp != NULL) {
+ pp->oops_do(f);
+ }
+ }
+ }
+}
diff --git a/src/share/vm/runtime/fprofiler.hpp b/src/share/vm/runtime/fprofiler.hpp
new file mode 100644
index 000000000..5c288d2e5
--- /dev/null
+++ b/src/share/vm/runtime/fprofiler.hpp
@@ -0,0 +1,311 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// a simple flat profiler for Java
+
+
+// Forward declaration of classes defined in this header file
+class ThreadProfiler;
+class ThreadProfilerMark;
+class FlatProfiler;
+class IntervalData;
+
+// Declarations of classes defined only in the implementation.
+class ProfilerNode;
+class FlatProfilerTask;
+
+enum TickPosition {
+ tp_code,
+ tp_native
+};
+
+// One of these guys is constructed as we enter interesting regions
+// and destructed as we exit the region. While we are in the region
+// ticks are allotted to the region.
+class ThreadProfilerMark: public StackObj {
+public:
+ // For now, the only thread-specific region is the class loader.
+ enum Region { noRegion, classLoaderRegion, extraRegion, maxRegion };
+
+ ThreadProfilerMark(Region) KERNEL_RETURN;
+ ~ThreadProfilerMark() KERNEL_RETURN;
+
+private:
+ ThreadProfiler* _pp;
+ Region _r;
+};
+
+#ifndef FPROF_KERNEL
+
+class IntervalData VALUE_OBJ_CLASS_SPEC {
+ // Just to keep these things all together
+private:
+ int _interpreted;
+ int _compiled;
+ int _native;
+ int _compiling;
+public:
+ int interpreted() {
+ return _interpreted;
+ }
+ int compiled() {
+ return _compiled;
+ }
+ int native() {
+ return _native;
+ }
+ int compiling() {
+ return _compiling;
+ }
+ int total() {
+ return (interpreted() + compiled() + native() + compiling());
+ }
+ void inc_interpreted() {
+ _interpreted += 1;
+ }
+ void inc_compiled() {
+ _compiled += 1;
+ }
+ void inc_native() {
+ _native += 1;
+ }
+ void inc_compiling() {
+ _compiling += 1;
+ }
+ void reset() {
+ _interpreted = 0;
+ _compiled = 0;
+ _native = 0;
+ _compiling = 0;
+ }
+ static void print_header(outputStream* st);
+ void print_data(outputStream* st);
+};
+#endif // FPROF_KERNEL
+
+class ThreadProfiler: public CHeapObj {
+public:
+ ThreadProfiler() KERNEL_RETURN;
+ ~ThreadProfiler() KERNEL_RETURN;
+
+ // Resets the profiler
+ void reset() KERNEL_RETURN;
+
+ // Activates the profiler for a certain thread
+ void engage() KERNEL_RETURN;
+
+ // Deactivates the profiler
+ void disengage() KERNEL_RETURN;
+
+ // Prints the collected profiling information
+ void print(const char* thread_name) KERNEL_RETURN;
+
+ // Garbage Collection Support
+ void oops_do(OopClosure* f) KERNEL_RETURN;
+
+#ifndef FPROF_KERNEL
+private:
+ // for recording ticks.
+ friend class ProfilerNode;
+ char* area_bottom; // preallocated area for pnodes
+ char* area_top;
+ char* area_limit;
+ static int table_size;
+ ProfilerNode** table;
+
+private:
+ void record_interpreted_tick(frame fr, TickPosition where, int* ticks);
+ void record_compiled_tick (JavaThread* thread, frame fr, TickPosition where);
+ void interpreted_update(methodOop method, TickPosition where);
+ void compiled_update (methodOop method, TickPosition where);
+ void stub_update (methodOop method, const char* name, TickPosition where);
+ void adapter_update (TickPosition where);
+
+ void runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where);
+ void unknown_compiled_update (const CodeBlob* cb, TickPosition where);
+
+ void vm_update (TickPosition where);
+ void vm_update (const char* name, TickPosition where);
+
+ void record_tick_for_running_frame(JavaThread* thread, frame fr);
+ void record_tick_for_calling_frame(JavaThread* thread, frame fr);
+
+ void initialize();
+
+ static int entry(int value);
+
+
+private:
+ friend class FlatProfiler;
+ void record_tick(JavaThread* thread);
+ bool engaged;
+ // so we can do percentages for this thread, and quick checks for activity
+ int thread_ticks;
+ int compiler_ticks;
+ int interpreter_ticks;
+
+public:
+ void inc_thread_ticks() { thread_ticks += 1; }
+
+private:
+ friend class ThreadProfilerMark;
+ // counters for thread-specific regions
+ bool region_flag[ThreadProfilerMark::maxRegion];
+ int class_loader_ticks;
+ int extra_ticks;
+
+private:
+ // other thread-specific regions
+ int blocked_ticks;
+ enum UnknownTickSites {
+ ut_null_method,
+ ut_vtable_stubs,
+ ut_running_frame,
+ ut_calling_frame,
+ ut_no_pc,
+ ut_no_last_Java_frame,
+ ut_unknown_thread_state,
+ ut_end
+ };
+ int unknown_ticks_array[ut_end];
+ int unknown_ticks() {
+ int result = 0;
+ for (int ut = 0; ut < ut_end; ut += 1) {
+ result += unknown_ticks_array[ut];
+ }
+ return result;
+ }
+
+ elapsedTimer timer;
+
+ // For interval timing
+private:
+ IntervalData _interval_data;
+ IntervalData interval_data() {
+ return _interval_data;
+ }
+ IntervalData* interval_data_ref() {
+ return &_interval_data;
+ }
+#endif // FPROF_KERNEL
+};
+
+class FlatProfiler: AllStatic {
+public:
+ static void reset() KERNEL_RETURN ;
+ static void engage(JavaThread* mainThread, bool fullProfile) KERNEL_RETURN ;
+ static void disengage() KERNEL_RETURN ;
+ static void print(int unused) KERNEL_RETURN ;
+ static bool is_active() KERNEL_RETURN_(return false;) ;
+
+ // This is NULL if each thread has its own thread profiler,
+ // else this is the single thread profiler used by all threads.
+ // In particular it makes a difference during garbage collection,
+ // where you only want to traverse each thread profiler once.
+ static ThreadProfiler* get_thread_profiler() KERNEL_RETURN_(return NULL;);
+
+ // Garbage Collection Support
+ static void oops_do(OopClosure* f) KERNEL_RETURN ;
+
+ // Support for disassembler to inspect the PCRecorder
+
+ // Returns the start address for a given pc
+ // NULL is returned if the PCRecorder is inactive
+ static address bucket_start_for(address pc) KERNEL_RETURN_(return NULL;);
+
+ enum { MillisecsPerTick = 10 }; // ms per profiling ticks
+
+ // Returns the number of ticks recorded for the bucket
+ // pc belongs to.
+ static int bucket_count_for(address pc) KERNEL_RETURN_(return 0;);
+
+#ifndef FPROF_KERNEL
+
+ private:
+ static bool full_profile() {
+ return full_profile_flag;
+ }
+
+ friend class ThreadProfiler;
+ // the following group of ticks cover everything that's not attributed to individual Java methods
+ static int received_gc_ticks; // ticks during which gc was active
+ static int vm_operation_ticks; // total ticks in vm_operations other than GC
+ static int threads_lock_ticks; // the number of times we couldn't get the Threads_lock without blocking
+ static int blocked_ticks; // ticks when the thread was blocked.
+ static int class_loader_ticks; // total ticks in class loader
+ static int extra_ticks; // total ticks an extra temporary measuring
+ static int compiler_ticks; // total ticks in compilation
+ static int interpreter_ticks; // ticks in unknown interpreted method
+ static int deopt_ticks; // ticks in deoptimization
+ static int unknown_ticks; // ticks that cannot be categorized
+ static int received_ticks; // ticks that were received by task
+ static int delivered_ticks; // ticks that were delivered by task
+ static int non_method_ticks() {
+ return
+ ( received_gc_ticks
+ + vm_operation_ticks
+ + deopt_ticks
+ + threads_lock_ticks
+ + blocked_ticks
+ + compiler_ticks
+ + interpreter_ticks
+ + unknown_ticks );
+ }
+ static elapsedTimer timer;
+
+ // Counts of each of the byte codes
+ static int* bytecode_ticks;
+ static int* bytecode_ticks_stub;
+ static void print_byte_code_statistics();
+
+ // the ticks below are for continuous profiling (to adjust recompilation, etc.)
+ static int all_ticks; // total count of ticks received so far
+ static int all_int_ticks; // ticks in interpreter
+ static int all_comp_ticks; // ticks in compiled code (+ native)
+ static bool full_profile_flag; // collecting full profile?
+
+ // to accumulate thread-specific data
+ // if we aren't profiling individual threads.
+ static ThreadProfiler* thread_profiler;
+ static ThreadProfiler* vm_thread_profiler;
+
+ static void allocate_table();
+
+ // The task that periodically interrupts things.
+ friend class FlatProfilerTask;
+ static FlatProfilerTask* task;
+ static void record_vm_operation();
+ static void record_vm_tick();
+ static void record_thread_ticks();
+
+ // For interval analysis
+ private:
+ static int interval_ticks_previous; // delivered_ticks from the last interval
+ static void interval_record_thread(ThreadProfiler* tp); // extract ticks from ThreadProfiler.
+ static void interval_print(); // print interval data.
+ static void interval_reset(); // reset interval data.
+ enum {interval_print_size = 10};
+ static IntervalData* interval_data;
+#endif // FPROF_KERNEL
+};
diff --git a/src/share/vm/runtime/frame.cpp b/src/share/vm/runtime/frame.cpp
new file mode 100644
index 000000000..efc74a361
--- /dev/null
+++ b/src/share/vm/runtime/frame.cpp
@@ -0,0 +1,1408 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_frame.cpp.incl"
+
+RegisterMap::RegisterMap(JavaThread *thread, bool update_map) {
+ _thread = thread;
+ _update_map = update_map;
+ clear();
+ debug_only(_update_for_id = NULL;)
+#ifndef PRODUCT
+ for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL;
+#endif /* PRODUCT */
+}
+
+RegisterMap::RegisterMap(const RegisterMap* map) {
+ assert(map != this, "bad initialization parameter");
+ assert(map != NULL, "RegisterMap must be present");
+ _thread = map->thread();
+ _update_map = map->update_map();
+ _include_argument_oops = map->include_argument_oops();
+ debug_only(_update_for_id = map->_update_for_id;)
+ pd_initialize_from(map);
+ if (update_map()) {
+ for(int i = 0; i < location_valid_size; i++) {
+ LocationValidType bits = !update_map() ? 0 : map->_location_valid[i];
+ _location_valid[i] = bits;
+ // for whichever bits are set, pull in the corresponding map->_location
+ int j = i*location_valid_type_size;
+ while (bits != 0) {
+ if ((bits & 1) != 0) {
+ assert(0 <= j && j < reg_count, "range check");
+ _location[j] = map->_location[j];
+ }
+ bits >>= 1;
+ j += 1;
+ }
+ }
+ }
+}
+
+void RegisterMap::clear() {
+ set_include_argument_oops(true);
+ if (_update_map) {
+ for(int i = 0; i < location_valid_size; i++) {
+ _location_valid[i] = 0;
+ }
+ pd_clear();
+ } else {
+ pd_initialize();
+ }
+}
+
+#ifndef PRODUCT
+
+void RegisterMap::print_on(outputStream* st) const {
+ st->print_cr("Register map");
+ for(int i = 0; i < reg_count; i++) {
+
+ VMReg r = VMRegImpl::as_VMReg(i);
+ intptr_t* src = (intptr_t*) location(r);
+ if (src != NULL) {
+
+ r->print();
+ tty->print(" [" INTPTR_FORMAT "] = ", src);
+ if (((uintptr_t)src & (sizeof(*src)-1)) != 0) {
+ tty->print_cr("<misaligned>");
+ } else {
+ tty->print_cr(INTPTR_FORMAT, *src);
+ }
+ }
+ }
+}
+
+void RegisterMap::print() const {
+ print_on(tty);
+}
+
+#endif
+// This returns the pc that if you were in the debugger you'd see. Not
+// the idealized value in the frame object. This undoes the magic conversion
+// that happens for deoptimized frames. In addition it makes the value the
+// hardware would want to see in the native frame. The only user (at this point)
+// is deoptimization. It likely no one else should ever use it.
+
+address frame::raw_pc() const {
+ if (is_deoptimized_frame()) {
+ return ((nmethod*) cb())->deopt_handler_begin() - pc_return_offset;
+ } else {
+ return (pc() - pc_return_offset);
+ }
+}
+
+// Change the pc in a frame object. This does not change the actual pc in
+// actual frame. To do that use patch_pc.
+//
+void frame::set_pc(address newpc ) {
+#ifdef ASSERT
+ if (_cb != NULL && _cb->is_nmethod()) {
+ assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant violation");
+ }
+#endif // ASSERT
+
+ // Unsafe to use the is_deoptimzed tester after changing pc
+ _deopt_state = unknown;
+ _pc = newpc;
+ _cb = CodeCache::find_blob_unsafe(_pc);
+
+}
+
+// type testers
+bool frame::is_deoptimized_frame() const {
+ assert(_deopt_state != unknown, "not answerable");
+ return _deopt_state == is_deoptimized;
+}
+
+bool frame::is_native_frame() const {
+ return (_cb != NULL &&
+ _cb->is_nmethod() &&
+ ((nmethod*)_cb)->is_native_method());
+}
+
+bool frame::is_java_frame() const {
+ if (is_interpreted_frame()) return true;
+ if (is_compiled_frame()) return true;
+ return false;
+}
+
+
+bool frame::is_compiled_frame() const {
+ if (_cb != NULL &&
+ _cb->is_nmethod() &&
+ ((nmethod*)_cb)->is_java_method()) {
+ return true;
+ }
+ return false;
+}
+
+
+bool frame::is_runtime_frame() const {
+ return (_cb != NULL && _cb->is_runtime_stub());
+}
+
+bool frame::is_safepoint_blob_frame() const {
+ return (_cb != NULL && _cb->is_safepoint_stub());
+}
+
+// testers
+
+bool frame::is_first_java_frame() const {
+ RegisterMap map(JavaThread::current(), false); // No update
+ frame s;
+ for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map));
+ return s.is_first_frame();
+}
+
+
+bool frame::entry_frame_is_first() const {
+ return entry_frame_call_wrapper()->anchor()->last_Java_sp() == NULL;
+}
+
+
+bool frame::should_be_deoptimized() const {
+ if (_deopt_state == is_deoptimized ||
+ !is_compiled_frame() ) return false;
+ assert(_cb != NULL && _cb->is_nmethod(), "must be an nmethod");
+ nmethod* nm = (nmethod *)_cb;
+ if (TraceDependencies) {
+ tty->print("checking (%s) ", nm->is_marked_for_deoptimization() ? "true" : "false");
+ nm->print_value_on(tty);
+ tty->cr();
+ }
+
+ if( !nm->is_marked_for_deoptimization() )
+ return false;
+
+ // If at the return point, then the frame has already been popped, and
+ // only the return needs to be executed. Don't deoptimize here.
+ return !nm->is_at_poll_return(pc());
+}
+
+bool frame::can_be_deoptimized() const {
+ if (!is_compiled_frame()) return false;
+ nmethod* nm = (nmethod*)_cb;
+
+ if( !nm->can_be_deoptimized() )
+ return false;
+
+ return !nm->is_at_poll_return(pc());
+}
+
+void frame::deoptimize(JavaThread* thread, bool thread_is_known_safe) {
+// Schedule deoptimization of an nmethod activation with this frame.
+
+ // Store the original pc before an patch (or request to self-deopt)
+ // in the published location of the frame.
+
+ assert(_cb != NULL && _cb->is_nmethod(), "must be");
+ nmethod* nm = (nmethod*)_cb;
+
+ // This is a fix for register window patching race
+ if (NeedsDeoptSuspend && !thread_is_known_safe) {
+
+ // It is possible especially with DeoptimizeALot/DeoptimizeRandom that
+ // we could see the frame again and ask for it to be deoptimized since
+ // it might move for a long time. That is harmless and we just ignore it.
+ if (id() == thread->must_deopt_id()) {
+ assert(thread->is_deopt_suspend(), "lost suspension");
+ return;
+ }
+
+ // We are at a safepoint so the target thread can only be
+ // in 4 states:
+ // blocked - no problem
+ // blocked_trans - no problem (i.e. could have woken up from blocked
+ // during a safepoint).
+ // native - register window pc patching race
+ // native_trans - momentary state
+ //
+ // We could just wait out a thread in native_trans to block.
+ // Then we'd have all the issues that the safepoint code has as to
+ // whether to spin or block. It isn't worth it. Just treat it like
+ // native and be done with it.
+ //
+ JavaThreadState state = thread->thread_state();
+ if (state == _thread_in_native || state == _thread_in_native_trans) {
+ // Since we are at a safepoint the target thread will stop itself
+ // before it can return to java as long as we remain at the safepoint.
+ // Therefore we can put an additional request for the thread to stop
+ // no matter what no (like a suspend). This will cause the thread
+ // to notice it needs to do the deopt on its own once it leaves native.
+ //
+ // The only reason we must do this is because on machine with register
+ // windows we have a race with patching the return address and the
+ // window coming live as the thread returns to the Java code (but still
+ // in native mode) and then blocks. It is only this top most frame
+ // that is at risk. So in truth we could add an additional check to
+ // see if this frame is one that is at risk.
+ RegisterMap map(thread, false);
+ frame at_risk = thread->last_frame().sender(&map);
+ if (id() == at_risk.id()) {
+ thread->set_must_deopt_id(id());
+ thread->set_deopt_suspend();
+ return;
+ }
+ }
+ } // NeedsDeoptSuspend
+
+
+ address deopt = nm->deopt_handler_begin();
+ // Save the original pc before we patch in the new one
+ nm->set_original_pc(this, pc());
+ patch_pc(thread, deopt);
+#ifdef ASSERT
+ {
+ RegisterMap map(thread, false);
+ frame check = thread->last_frame();
+ while (id() != check.id()) {
+ check = check.sender(&map);
+ }
+ assert(check.is_deoptimized_frame(), "missed deopt");
+ }
+#endif // ASSERT
+}
+
+frame frame::java_sender() const {
+ RegisterMap map(JavaThread::current(), false);
+ frame s;
+ for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map)) ;
+ guarantee(s.is_java_frame(), "tried to get caller of first java frame");
+ return s;
+}
+
+frame frame::real_sender(RegisterMap* map) const {
+ frame result = sender(map);
+ while (result.is_runtime_frame()) {
+ result = result.sender(map);
+ }
+ return result;
+}
+
+// Note: called by profiler - NOT for current thread
+frame frame::profile_find_Java_sender_frame(JavaThread *thread) {
+// If we don't recognize this frame, walk back up the stack until we do
+ RegisterMap map(thread, false);
+ frame first_java_frame = frame();
+
+ // Find the first Java frame on the stack starting with input frame
+ if (is_java_frame()) {
+ // top frame is compiled frame or deoptimized frame
+ first_java_frame = *this;
+ } else if (safe_for_sender(thread)) {
+ for (frame sender_frame = sender(&map);
+ sender_frame.safe_for_sender(thread) && !sender_frame.is_first_frame();
+ sender_frame = sender_frame.sender(&map)) {
+ if (sender_frame.is_java_frame()) {
+ first_java_frame = sender_frame;
+ break;
+ }
+ }
+ }
+ return first_java_frame;
+}
+
+// Interpreter frames
+
+
+void frame::interpreter_frame_set_locals(intptr_t* locs) {
+ assert(is_interpreted_frame(), "Not an interpreted frame");
+ *interpreter_frame_locals_addr() = locs;
+}
+
+methodOop frame::interpreter_frame_method() const {
+ assert(is_interpreted_frame(), "interpreted frame expected");
+ methodOop m = *interpreter_frame_method_addr();
+ assert(m->is_perm(), "bad methodOop in interpreter frame");
+ assert(m->is_method(), "not a methodOop");
+ return m;
+}
+
+void frame::interpreter_frame_set_method(methodOop method) {
+ assert(is_interpreted_frame(), "interpreted frame expected");
+ *interpreter_frame_method_addr() = method;
+}
+
+void frame::interpreter_frame_set_bcx(intptr_t bcx) {
+ assert(is_interpreted_frame(), "Not an interpreted frame");
+ if (ProfileInterpreter) {
+ bool formerly_bci = is_bci(interpreter_frame_bcx());
+ bool is_now_bci = is_bci(bcx);
+ *interpreter_frame_bcx_addr() = bcx;
+
+ intptr_t mdx = interpreter_frame_mdx();
+
+ if (mdx != 0) {
+ if (formerly_bci) {
+ if (!is_now_bci) {
+ // The bcx was just converted from bci to bcp.
+ // Convert the mdx in parallel.
+ methodDataOop mdo = interpreter_frame_method()->method_data();
+ assert(mdo != NULL, "");
+ int mdi = mdx - 1; // We distinguish valid mdi from zero by adding one.
+ address mdp = mdo->di_to_dp(mdi);
+ interpreter_frame_set_mdx((intptr_t)mdp);
+ }
+ } else {
+ if (is_now_bci) {
+ // The bcx was just converted from bcp to bci.
+ // Convert the mdx in parallel.
+ methodDataOop mdo = interpreter_frame_method()->method_data();
+ assert(mdo != NULL, "");
+ int mdi = mdo->dp_to_di((address)mdx);
+ interpreter_frame_set_mdx((intptr_t)mdi + 1); // distinguish valid from 0.
+ }
+ }
+ }
+ } else {
+ *interpreter_frame_bcx_addr() = bcx;
+ }
+}
+
+jint frame::interpreter_frame_bci() const {
+ assert(is_interpreted_frame(), "interpreted frame expected");
+ intptr_t bcx = interpreter_frame_bcx();
+ return is_bci(bcx) ? bcx : interpreter_frame_method()->bci_from((address)bcx);
+}
+
+void frame::interpreter_frame_set_bci(jint bci) {
+ assert(is_interpreted_frame(), "interpreted frame expected");
+ assert(!is_bci(interpreter_frame_bcx()), "should not set bci during GC");
+ interpreter_frame_set_bcx((intptr_t)interpreter_frame_method()->bcp_from(bci));
+}
+
+address frame::interpreter_frame_bcp() const {
+ assert(is_interpreted_frame(), "interpreted frame expected");
+ intptr_t bcx = interpreter_frame_bcx();
+ return is_bci(bcx) ? interpreter_frame_method()->bcp_from(bcx) : (address)bcx;
+}
+
+void frame::interpreter_frame_set_bcp(address bcp) {
+ assert(is_interpreted_frame(), "interpreted frame expected");
+ assert(!is_bci(interpreter_frame_bcx()), "should not set bcp during GC");
+ interpreter_frame_set_bcx((intptr_t)bcp);
+}
+
+void frame::interpreter_frame_set_mdx(intptr_t mdx) {
+ assert(is_interpreted_frame(), "Not an interpreted frame");
+ assert(ProfileInterpreter, "must be profiling interpreter");
+ *interpreter_frame_mdx_addr() = mdx;
+}
+
+address frame::interpreter_frame_mdp() const {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+ assert(is_interpreted_frame(), "interpreted frame expected");
+ intptr_t bcx = interpreter_frame_bcx();
+ intptr_t mdx = interpreter_frame_mdx();
+
+ assert(!is_bci(bcx), "should not access mdp during GC");
+ return (address)mdx;
+}
+
+void frame::interpreter_frame_set_mdp(address mdp) {
+ assert(is_interpreted_frame(), "interpreted frame expected");
+ if (mdp == NULL) {
+ // Always allow the mdp to be cleared.
+ interpreter_frame_set_mdx((intptr_t)mdp);
+ }
+ intptr_t bcx = interpreter_frame_bcx();
+ assert(!is_bci(bcx), "should not set mdp during GC");
+ interpreter_frame_set_mdx((intptr_t)mdp);
+}
+
+BasicObjectLock* frame::next_monitor_in_interpreter_frame(BasicObjectLock* current) const {
+ assert(is_interpreted_frame(), "Not an interpreted frame");
+#ifdef ASSERT
+ interpreter_frame_verify_monitor(current);
+#endif
+ BasicObjectLock* next = (BasicObjectLock*) (((intptr_t*) current) + interpreter_frame_monitor_size());
+ return next;
+}
+
+BasicObjectLock* frame::previous_monitor_in_interpreter_frame(BasicObjectLock* current) const {
+ assert(is_interpreted_frame(), "Not an interpreted frame");
+#ifdef ASSERT
+// // This verification needs to be checked before being enabled
+// interpreter_frame_verify_monitor(current);
+#endif
+ BasicObjectLock* previous = (BasicObjectLock*) (((intptr_t*) current) - interpreter_frame_monitor_size());
+ return previous;
+}
+
+// Interpreter locals and expression stack locations.
+
+intptr_t* frame::interpreter_frame_local_at(int index) const {
+ const int n = Interpreter::local_offset_in_bytes(index)/wordSize;
+ return &((*interpreter_frame_locals_addr())[n]);
+}
+
+frame::Tag frame::interpreter_frame_local_tag(int index) const {
+ const int n = Interpreter::local_tag_offset_in_bytes(index)/wordSize;
+ return (Tag)(*interpreter_frame_locals_addr()) [n];
+}
+
+void frame::interpreter_frame_set_local_tag(int index, Tag tag) const {
+ const int n = Interpreter::local_tag_offset_in_bytes(index)/wordSize;
+ (*interpreter_frame_locals_addr())[n] = (intptr_t)tag;
+}
+
+intptr_t* frame::interpreter_frame_expression_stack_at(jint offset) const {
+ const int i = offset * interpreter_frame_expression_stack_direction();
+ const int n = ((i * Interpreter::stackElementSize()) +
+ Interpreter::value_offset_in_bytes())/wordSize;
+ return &(interpreter_frame_expression_stack()[n]);
+}
+
+frame::Tag frame::interpreter_frame_expression_stack_tag(jint offset) const {
+ const int i = offset * interpreter_frame_expression_stack_direction();
+ const int n = ((i * Interpreter::stackElementSize()) +
+ Interpreter::tag_offset_in_bytes())/wordSize;
+ return (Tag)(interpreter_frame_expression_stack()[n]);
+}
+
+void frame::interpreter_frame_set_expression_stack_tag(jint offset,
+ Tag tag) const {
+ const int i = offset * interpreter_frame_expression_stack_direction();
+ const int n = ((i * Interpreter::stackElementSize()) +
+ Interpreter::tag_offset_in_bytes())/wordSize;
+ interpreter_frame_expression_stack()[n] = (intptr_t)tag;
+}
+
+jint frame::interpreter_frame_expression_stack_size() const {
+ // Number of elements on the interpreter expression stack
+ // Callers should span by stackElementWords
+ int element_size = Interpreter::stackElementWords();
+ if (frame::interpreter_frame_expression_stack_direction() < 0) {
+ return (interpreter_frame_expression_stack() -
+ interpreter_frame_tos_address() + 1)/element_size;
+ } else {
+ return (interpreter_frame_tos_address() -
+ interpreter_frame_expression_stack() + 1)/element_size;
+ }
+}
+
+
+// (frame::interpreter_frame_sender_sp accessor is in frame_<arch>.cpp)
+
+const char* frame::print_name() const {
+ if (is_native_frame()) return "Native";
+ if (is_interpreted_frame()) return "Interpreted";
+ if (is_compiled_frame()) {
+ if (is_deoptimized_frame()) return "Deoptimized";
+ return "Compiled";
+ }
+ if (sp() == NULL) return "Empty";
+ return "C";
+}
+
+void frame::print_value_on(outputStream* st, JavaThread *thread) const {
+ NOT_PRODUCT(address begin = pc()-40;)
+ NOT_PRODUCT(address end = NULL;)
+
+ st->print("%s frame (sp=" INTPTR_FORMAT " unextended sp=" INTPTR_FORMAT, print_name(), sp(), unextended_sp());
+ if (sp() != NULL)
+ st->print(", fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), pc());
+
+ if (StubRoutines::contains(pc())) {
+ st->print_cr(")");
+ st->print("(");
+ StubCodeDesc* desc = StubCodeDesc::desc_for(pc());
+ st->print("~Stub::%s", desc->name());
+ NOT_PRODUCT(begin = desc->begin(); end = desc->end();)
+ } else if (Interpreter::contains(pc())) {
+ st->print_cr(")");
+ st->print("(");
+ InterpreterCodelet* desc = Interpreter::codelet_containing(pc());
+ if (desc != NULL) {
+ st->print("~");
+ desc->print();
+ NOT_PRODUCT(begin = desc->code_begin(); end = desc->code_end();)
+ } else {
+ st->print("~interpreter");
+ }
+ }
+ st->print_cr(")");
+
+ if (_cb != NULL) {
+ st->print(" ");
+ _cb->print_value_on(st);
+ st->cr();
+#ifndef PRODUCT
+ if (end == NULL) {
+ begin = _cb->instructions_begin();
+ end = _cb->instructions_end();
+ }
+#endif
+ }
+ NOT_PRODUCT(if (WizardMode && Verbose) Disassembler::decode(begin, end);)
+}
+
+
+void frame::print_on(outputStream* st) const {
+ print_value_on(st,NULL);
+ if (is_interpreted_frame()) {
+ interpreter_frame_print_on(st);
+ }
+}
+
+
+void frame::interpreter_frame_print_on(outputStream* st) const {
+#ifndef PRODUCT
+ assert(is_interpreted_frame(), "Not an interpreted frame");
+ jint i;
+ for (i = 0; i < interpreter_frame_method()->max_locals(); i++ ) {
+ intptr_t x = *interpreter_frame_local_at(i);
+ st->print(" - local [" INTPTR_FORMAT "]", x);
+ if (TaggedStackInterpreter) {
+ Tag x = interpreter_frame_local_tag(i);
+ st->print(" - local tag [" INTPTR_FORMAT "]", x);
+ }
+ st->fill_to(23);
+ st->print_cr("; #%d", i);
+ }
+ for (i = interpreter_frame_expression_stack_size() - 1; i >= 0; --i ) {
+ intptr_t x = *interpreter_frame_expression_stack_at(i);
+ st->print(" - stack [" INTPTR_FORMAT "]", x);
+ if (TaggedStackInterpreter) {
+ Tag x = interpreter_frame_expression_stack_tag(i);
+ st->print(" - stack tag [" INTPTR_FORMAT "]", x);
+ }
+ st->fill_to(23);
+ st->print_cr("; #%d", i);
+ }
+ // locks for synchronization
+ for (BasicObjectLock* current = interpreter_frame_monitor_end();
+ current < interpreter_frame_monitor_begin();
+ current = next_monitor_in_interpreter_frame(current)) {
+ st->print_cr(" [ - obj ");
+ current->obj()->print_value_on(st);
+ st->cr();
+ st->print_cr(" - lock ");
+ current->lock()->print_on(st);
+ st->cr();
+ }
+ // monitor
+ st->print_cr(" - monitor[" INTPTR_FORMAT "]", interpreter_frame_monitor_begin());
+ // bcp
+ st->print(" - bcp [" INTPTR_FORMAT "]", interpreter_frame_bcp());
+ st->fill_to(23);
+ st->print_cr("; @%d", interpreter_frame_bci());
+ // locals
+ st->print_cr(" - locals [" INTPTR_FORMAT "]", interpreter_frame_local_at(0));
+ // method
+ st->print(" - method [" INTPTR_FORMAT "]", (address)interpreter_frame_method());
+ st->fill_to(23);
+ st->print("; ");
+ interpreter_frame_method()->print_name(st);
+ st->cr();
+#endif
+}
+
+// Return whether the frame is in the VM or os indicating a Hotspot problem.
+// Otherwise, it's likely a bug in the native library that the Java code calls,
+// hopefully indicating where to submit bugs.
+static void print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
+ // C/C++ frame
+ bool in_vm = os::address_is_in_vm(pc);
+ st->print(in_vm ? "V" : "C");
+
+ int offset;
+ bool found;
+
+ // libname
+ found = os::dll_address_to_library_name(pc, buf, buflen, &offset);
+ if (found) {
+ // skip directory names
+ const char *p1, *p2;
+ p1 = buf;
+ int len = (int)strlen(os::file_separator());
+ while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
+ st->print(" [%s+0x%x]", p1, offset);
+ } else {
+ st->print(" " PTR_FORMAT, pc);
+ }
+
+ // function name - os::dll_address_to_function_name() may return confusing
+ // names if pc is within jvm.dll or libjvm.so, because JVM only has
+ // JVM_xxxx and a few other symbols in the dynamic symbol table. Do this
+ // only for native libraries.
+ if (!in_vm) {
+ found = os::dll_address_to_function_name(pc, buf, buflen, &offset);
+
+ if (found) {
+ st->print(" %s+0x%x", buf, offset);
+ }
+ }
+}
+
+// frame::print_on_error() is called by fatal error handler. Notice that we may
+// crash inside this function if stack frame is corrupted. The fatal error
+// handler can catch and handle the crash. Here we assume the frame is valid.
+//
+// First letter indicates type of the frame:
+// J: Java frame (compiled)
+// j: Java frame (interpreted)
+// V: VM frame (C/C++)
+// v: Other frames running VM generated code (e.g. stubs, adapters, etc.)
+// C: C/C++ frame
+//
+// We don't need detailed frame type as that in frame::print_name(). "C"
+// suggests the problem is in user lib; everything else is likely a VM bug.
+
+void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose) const {
+ if (_cb != NULL) {
+ if (Interpreter::contains(pc())) {
+ methodOop m = this->interpreter_frame_method();
+ if (m != NULL) {
+ m->name_and_sig_as_C_string(buf, buflen);
+ st->print("j %s", buf);
+ st->print("+%d", this->interpreter_frame_bci());
+ } else {
+ st->print("j " PTR_FORMAT, pc());
+ }
+ } else if (StubRoutines::contains(pc())) {
+ StubCodeDesc* desc = StubCodeDesc::desc_for(pc());
+ if (desc != NULL) {
+ st->print("v ~StubRoutines::%s", desc->name());
+ } else {
+ st->print("v ~StubRoutines::" PTR_FORMAT, pc());
+ }
+ } else if (_cb->is_buffer_blob()) {
+ st->print("v ~BufferBlob::%s", ((BufferBlob *)_cb)->name());
+ } else if (_cb->is_nmethod()) {
+ methodOop m = ((nmethod *)_cb)->method();
+ if (m != NULL) {
+ m->name_and_sig_as_C_string(buf, buflen);
+ st->print("J %s", buf);
+ } else {
+ st->print("J " PTR_FORMAT, pc());
+ }
+ } else if (_cb->is_runtime_stub()) {
+ st->print("v ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name());
+ } else if (_cb->is_deoptimization_stub()) {
+ st->print("v ~DeoptimizationBlob");
+ } else if (_cb->is_exception_stub()) {
+ st->print("v ~ExceptionBlob");
+ } else if (_cb->is_safepoint_stub()) {
+ st->print("v ~SafepointBlob");
+ } else {
+ st->print("v blob " PTR_FORMAT, pc());
+ }
+ } else {
+ print_C_frame(st, buf, buflen, pc());
+ }
+}
+
+
+/*
+ The interpreter_frame_expression_stack_at method in the case of SPARC needs the
+ max_stack value of the method in order to compute the expression stack address.
+ It uses the methodOop in order to get the max_stack value but during GC this
+ methodOop value saved on the frame is changed by reverse_and_push and hence cannot
+ be used. So we save the max_stack value in the FrameClosure object and pass it
+ down to the interpreter_frame_expression_stack_at method
+*/
+class InterpreterFrameClosure : public OffsetClosure {
+ private:
+ frame* _fr;
+ OopClosure* _f;
+ int _max_locals;
+ int _max_stack;
+
+ public:
+ InterpreterFrameClosure(frame* fr, int max_locals, int max_stack,
+ OopClosure* f) {
+ _fr = fr;
+ _max_locals = max_locals;
+ _max_stack = max_stack;
+ _f = f;
+ }
+
+ void offset_do(int offset) {
+ oop* addr;
+ if (offset < _max_locals) {
+ addr = (oop*) _fr->interpreter_frame_local_at(offset);
+ assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame");
+ _f->do_oop(addr);
+ } else {
+ addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals));
+ // In case of exceptions, the expression stack is invalid and the esp will be reset to express
+ // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel).
+ bool in_stack;
+ if (frame::interpreter_frame_expression_stack_direction() > 0) {
+ in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
+ } else {
+ in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
+ }
+ if (in_stack) {
+ _f->do_oop(addr);
+ }
+ }
+ }
+
+ int max_locals() { return _max_locals; }
+ frame* fr() { return _fr; }
+};
+
+
+class InterpretedArgumentOopFinder: public SignatureInfo {
+ private:
+ OopClosure* _f; // Closure to invoke
+ int _offset; // TOS-relative offset, decremented with each argument
+ bool _is_static; // true if the callee is a static method
+ frame* _fr;
+
+ void set(int size, BasicType type) {
+ _offset -= size;
+ if (type == T_OBJECT || type == T_ARRAY) oop_offset_do();
+ }
+
+ void oop_offset_do() {
+ oop* addr;
+ addr = (oop*)_fr->interpreter_frame_tos_at(_offset);
+ _f->do_oop(addr);
+ }
+
+ public:
+ InterpretedArgumentOopFinder(symbolHandle signature, bool is_static, frame* fr, OopClosure* f) : SignatureInfo(signature) {
+ // compute size of arguments
+ int args_size = ArgumentSizeComputer(signature).size() + (is_static ? 0 : 1);
+ assert(!fr->is_interpreted_frame() ||
+ args_size <= fr->interpreter_frame_expression_stack_size(),
+ "args cannot be on stack anymore");
+ // initialize InterpretedArgumentOopFinder
+ _f = f;
+ _fr = fr;
+ _offset = args_size;
+ _is_static = is_static;
+ }
+
+ void oops_do() {
+ if (!_is_static) {
+ --_offset;
+ oop_offset_do();
+ }
+ iterate_parameters();
+ }
+};
+
+
+// Entry frame has following form (n arguments)
+// +-----------+
+// sp -> | last arg |
+// +-----------+
+// : ::: :
+// +-----------+
+// (sp+n)->| first arg|
+// +-----------+
+
+
+
+// visits and GC's all the arguments in entry frame
+class EntryFrameOopFinder: public SignatureInfo {
+ private:
+ bool _is_static;
+ int _offset;
+ frame* _fr;
+ OopClosure* _f;
+
+ void set(int size, BasicType type) {
+ assert (_offset >= 0, "illegal offset");
+ if (type == T_OBJECT || type == T_ARRAY) oop_at_offset_do(_offset);
+ _offset -= size;
+ }
+
+ void oop_at_offset_do(int offset) {
+ assert (offset >= 0, "illegal offset")
+ oop* addr = (oop*) _fr->entry_frame_argument_at(offset);
+ _f->do_oop(addr);
+ }
+
+ public:
+ EntryFrameOopFinder(frame* frame, symbolHandle signature, bool is_static) : SignatureInfo(signature) {
+ _f = NULL; // will be set later
+ _fr = frame;
+ _is_static = is_static;
+ _offset = ArgumentSizeComputer(signature).size() - 1; // last parameter is at index 0
+ }
+
+ void arguments_do(OopClosure* f) {
+ _f = f;
+ if (!_is_static) oop_at_offset_do(_offset+1); // do the receiver
+ iterate_parameters();
+ }
+
+};
+
+oop* frame::interpreter_callee_receiver_addr(symbolHandle signature) {
+ ArgumentSizeComputer asc(signature);
+ int size = asc.size();
+ return (oop *)interpreter_frame_tos_at(size);
+}
+
+
+void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache) {
+ assert(is_interpreted_frame(), "Not an interpreted frame");
+ assert(map != NULL, "map must be set");
+ Thread *thread = Thread::current();
+ methodHandle m (thread, interpreter_frame_method());
+ jint bci = interpreter_frame_bci();
+
+ assert(Universe::heap()->is_in(m()), "must be valid oop");
+ assert(m->is_method(), "checking frame value");
+ assert((m->is_native() && bci == 0) || (!m->is_native() && bci >= 0 && bci < m->code_size()), "invalid bci value");
+
+ // Handle the monitor elements in the activation
+ for (
+ BasicObjectLock* current = interpreter_frame_monitor_end();
+ current < interpreter_frame_monitor_begin();
+ current = next_monitor_in_interpreter_frame(current)
+ ) {
+#ifdef ASSERT
+ interpreter_frame_verify_monitor(current);
+#endif
+ current->oops_do(f);
+ }
+
+ // process fixed part
+ f->do_oop((oop*)interpreter_frame_method_addr());
+ f->do_oop((oop*)interpreter_frame_cache_addr());
+
+ // Hmm what about the mdp?
+#ifdef CC_INTERP
+ // Interpreter frame in the midst of a call have a methodOop within the
+ // object.
+ interpreterState istate = get_interpreterState();
+ if (istate->msg() == BytecodeInterpreter::call_method) {
+ f->do_oop((oop*)&istate->_result._to_call._callee);
+ }
+
+#endif /* CC_INTERP */
+
+ if (m->is_native()) {
+#ifdef CC_INTERP
+ f->do_oop((oop*)&istate->_oop_temp);
+#else
+ f->do_oop((oop*)( fp() + interpreter_frame_oop_temp_offset ));
+#endif /* CC_INTERP */
+ }
+
+ int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
+
+ symbolHandle signature;
+ bool is_static = false;
+
+ // Process a callee's arguments if we are at a call site
+ // (i.e., if we are at an invoke bytecode)
+ // This is used sometimes for calling into the VM, not for another
+ // interpreted or compiled frame.
+ if (!m->is_native()) {
+ Bytecode_invoke *call = Bytecode_invoke_at_check(m, bci);
+ if (call != NULL) {
+ signature = symbolHandle(thread, call->signature());
+ is_static = call->is_invokestatic();
+ if (map->include_argument_oops() &&
+ interpreter_frame_expression_stack_size() > 0) {
+ ResourceMark rm(thread); // is this right ???
+ // we are at a call site & the expression stack is not empty
+ // => process callee's arguments
+ //
+ // Note: The expression stack can be empty if an exception
+ // occured during method resolution/execution. In all
+ // cases we empty the expression stack completely be-
+ // fore handling the exception (the exception handling
+ // code in the interpreter calls a blocking runtime
+ // routine which can cause this code to be executed).
+ // (was bug gri 7/27/98)
+ oops_interpreted_arguments_do(signature, is_static, f);
+ }
+ }
+ }
+
+ if (TaggedStackInterpreter) {
+ // process locals & expression stack
+ InterpreterOopMap *mask = NULL;
+#ifdef ASSERT
+ InterpreterOopMap oopmap_mask;
+ OopMapCache::compute_one_oop_map(m, bci, &oopmap_mask);
+ mask = &oopmap_mask;
+#endif // ASSERT
+ oops_interpreted_locals_do(f, max_locals, mask);
+ oops_interpreted_expressions_do(f, signature, is_static,
+ m->max_stack(),
+ max_locals, mask);
+ } else {
+ InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f);
+
+ // process locals & expression stack
+ InterpreterOopMap mask;
+ if (query_oop_map_cache) {
+ m->mask_for(bci, &mask);
+ } else {
+ OopMapCache::compute_one_oop_map(m, bci, &mask);
+ }
+ mask.iterate_oop(&blk);
+ }
+}
+
+
+void frame::oops_interpreted_locals_do(OopClosure *f,
+ int max_locals,
+ InterpreterOopMap *mask) {
+ // Process locals then interpreter expression stack
+ for (int i = 0; i < max_locals; i++ ) {
+ Tag tag = interpreter_frame_local_tag(i);
+ if (tag == TagReference) {
+ oop* addr = (oop*) interpreter_frame_local_at(i);
+ assert((intptr_t*)addr >= sp(), "must be inside the frame");
+ f->do_oop(addr);
+#ifdef ASSERT
+ } else {
+ assert(tag == TagValue, "bad tag value for locals");
+ oop* p = (oop*) interpreter_frame_local_at(i);
+ // Not always true - too bad. May have dead oops without tags in locals.
+ // assert(*p == NULL || !(*p)->is_oop(), "oop not tagged on interpreter locals");
+ assert(*p == NULL || !mask->is_oop(i), "local oop map mismatch");
+#endif // ASSERT
+ }
+ }
+}
+
+void frame::oops_interpreted_expressions_do(OopClosure *f,
+ symbolHandle signature,
+ bool is_static,
+ int max_stack,
+ int max_locals,
+ InterpreterOopMap *mask) {
+ // There is no stack no matter what the esp is pointing to (native methods
+ // might look like expression stack is nonempty).
+ if (max_stack == 0) return;
+
+ // Point the top of the expression stack above arguments to a call so
+ // arguments aren't gc'ed as both stack values for callee and callee
+ // arguments in callee's locals.
+ int args_size = 0;
+ if (!signature.is_null()) {
+ args_size = ArgumentSizeComputer(signature).size() + (is_static ? 0 : 1);
+ }
+
+ intptr_t *tos_addr = interpreter_frame_tos_at(args_size);
+ assert(args_size != 0 || tos_addr == interpreter_frame_tos_address(), "these are same");
+ intptr_t *frst_expr = interpreter_frame_expression_stack_at(0);
+ // In case of exceptions, the expression stack is invalid and the esp
+ // will be reset to express this condition. Therefore, we call f only
+ // if addr is 'inside' the stack (i.e., addr >= esp for Intel).
+ bool in_stack;
+ if (interpreter_frame_expression_stack_direction() > 0) {
+ in_stack = (intptr_t*)frst_expr <= tos_addr;
+ } else {
+ in_stack = (intptr_t*)frst_expr >= tos_addr;
+ }
+ if (!in_stack) return;
+
+ jint stack_size = interpreter_frame_expression_stack_size() - args_size;
+ for (int j = 0; j < stack_size; j++) {
+ Tag tag = interpreter_frame_expression_stack_tag(j);
+ if (tag == TagReference) {
+ oop *addr = (oop*) interpreter_frame_expression_stack_at(j);
+ f->do_oop(addr);
+#ifdef ASSERT
+ } else {
+ assert(tag == TagValue, "bad tag value for stack element");
+ oop *p = (oop*) interpreter_frame_expression_stack_at((j));
+ assert(*p == NULL || !mask->is_oop(j+max_locals), "stack oop map mismatch");
+#endif // ASSERT
+ }
+ }
+}
+
+void frame::oops_interpreted_arguments_do(symbolHandle signature, bool is_static, OopClosure* f) {
+ InterpretedArgumentOopFinder finder(signature, is_static, this, f);
+ finder.oops_do();
+}
+
+void frame::oops_code_blob_do(OopClosure* f, const RegisterMap* reg_map) {
+ assert(_cb != NULL, "sanity check");
+ if (_cb->oop_maps() != NULL) {
+ OopMapSet::oops_do(this, reg_map, f);
+
+ // Preserve potential arguments for a callee. We handle this by dispatching
+ // on the codeblob. For c2i, we do
+ if (reg_map->include_argument_oops()) {
+ _cb->preserve_callee_argument_oops(*this, reg_map, f);
+ }
+ }
+ // In cases where perm gen is collected, GC will want to mark
+ // oops referenced from nmethods active on thread stacks so as to
+ // prevent them from being collected. However, this visit should be
+ // restricted to certain phases of the collection only. The
+ // closure answers whether it wants nmethods to be traced.
+ // (All CodeBlob subtypes other than NMethod currently have
+ // an empty oops_do() method.
+ if (f->do_nmethods()) {
+ _cb->oops_do(f);
+ }
+}
+
+void frame::nmethods_code_blob_do() {
+ assert(_cb != NULL, "sanity check");
+
+ // If we see an activation belonging to a non_entrant nmethod, we mark it.
+ if (_cb->is_nmethod() && ((nmethod *)_cb)->is_not_entrant()) {
+ ((nmethod*)_cb)->mark_as_seen_on_stack();
+ }
+}
+
+class CompiledArgumentOopFinder: public SignatureInfo {
+ protected:
+ OopClosure* _f;
+ int _offset; // the current offset, incremented with each argument
+ bool _is_static; // true if the callee is a static method
+ frame _fr;
+ RegisterMap* _reg_map;
+ int _arg_size;
+ VMRegPair* _regs; // VMReg list of arguments
+
+ void set(int size, BasicType type) {
+ if (type == T_OBJECT || type == T_ARRAY) handle_oop_offset();
+ _offset += size;
+ }
+
+ virtual void handle_oop_offset() {
+ // Extract low order register number from register array.
+ // In LP64-land, the high-order bits are valid but unhelpful.
+ VMReg reg = _regs[_offset].first();
+ oop *loc = _fr.oopmapreg_to_location(reg, _reg_map);
+ _f->do_oop(loc);
+ }
+
+ public:
+ CompiledArgumentOopFinder(symbolHandle signature, bool is_static, OopClosure* f, frame fr, const RegisterMap* reg_map)
+ : SignatureInfo(signature) {
+
+ // initialize CompiledArgumentOopFinder
+ _f = f;
+ _offset = 0;
+ _is_static = is_static;
+ _fr = fr;
+ _reg_map = (RegisterMap*)reg_map;
+ _arg_size = ArgumentSizeComputer(signature).size() + (is_static ? 0 : 1);
+
+ int arg_size;
+ _regs = SharedRuntime::find_callee_arguments(signature(), is_static, &arg_size);
+ assert(arg_size == _arg_size, "wrong arg size");
+ }
+
+ void oops_do() {
+ if (!_is_static) {
+ handle_oop_offset();
+ _offset++;
+ }
+ iterate_parameters();
+ }
+};
+
+void frame::oops_compiled_arguments_do(symbolHandle signature, bool is_static, const RegisterMap* reg_map, OopClosure* f) {
+ ResourceMark rm;
+ CompiledArgumentOopFinder finder(signature, is_static, f, *this, reg_map);
+ finder.oops_do();
+}
+
+
+// Get receiver out of callers frame, i.e. find parameter 0 in callers
+// frame. Consult ADLC for where parameter 0 is to be found. Then
+// check local reg_map for it being a callee-save register or argument
+// register, both of which are saved in the local frame. If not found
+// there, it must be an in-stack argument of the caller.
+// Note: caller.sp() points to callee-arguments
+oop frame::retrieve_receiver(RegisterMap* reg_map) {
+ frame caller = *this;
+
+ // First consult the ADLC on where it puts parameter 0 for this signature.
+ VMReg reg = SharedRuntime::name_for_receiver();
+ oop r = *caller.oopmapreg_to_location(reg, reg_map);
+ assert( Universe::heap()->is_in_or_null(r), "bad receiver" );
+ return r;
+}
+
+
+oop* frame::oopmapreg_to_location(VMReg reg, const RegisterMap* reg_map) const {
+ if(reg->is_reg()) {
+ // If it is passed in a register, it got spilled in the stub frame.
+ return (oop *)reg_map->location(reg);
+ } else {
+ int sp_offset_in_stack_slots = reg->reg2stack();
+ int sp_offset = sp_offset_in_stack_slots >> (LogBytesPerWord - LogBytesPerInt);
+ return (oop *)&unextended_sp()[sp_offset];
+ }
+}
+
+BasicLock* frame::compiled_synchronized_native_monitor(nmethod* nm) {
+ if (nm == NULL) {
+ assert(_cb != NULL && _cb->is_nmethod() &&
+ nm->method()->is_native() &&
+ nm->method()->is_synchronized(),
+ "should not call this otherwise");
+ nm = (nmethod*) _cb;
+ }
+ int byte_offset = in_bytes(nm->compiled_synchronized_native_basic_lock_sp_offset());
+ assert(byte_offset >= 0, "should not see invalid offset");
+ return (BasicLock*) &sp()[byte_offset / wordSize];
+}
+
+oop frame::compiled_synchronized_native_monitor_owner(nmethod* nm) {
+ if (nm == NULL) {
+ assert(_cb != NULL && _cb->is_nmethod() &&
+ nm->method()->is_native() &&
+ nm->method()->is_synchronized(),
+ "should not call this otherwise");
+ nm = (nmethod*) _cb;
+ }
+ int byte_offset = in_bytes(nm->compiled_synchronized_native_basic_lock_owner_sp_offset());
+ assert(byte_offset >= 0, "should not see invalid offset");
+ oop owner = ((oop*) sp())[byte_offset / wordSize];
+ assert( Universe::heap()->is_in(owner), "bad receiver" );
+ return owner;
+}
+
+void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) {
+ assert(map != NULL, "map must be set");
+ if (map->include_argument_oops()) {
+ // must collect argument oops, as nobody else is doing it
+ Thread *thread = Thread::current();
+ methodHandle m (thread, entry_frame_call_wrapper()->callee_method());
+ symbolHandle signature (thread, m->signature());
+ EntryFrameOopFinder finder(this, signature, m->is_static());
+ finder.arguments_do(f);
+ }
+ // Traverse the Handle Block saved in the entry frame
+ entry_frame_call_wrapper()->oops_do(f);
+}
+
+
+void frame::oops_do_internal(OopClosure* f, RegisterMap* map, bool use_interpreter_oop_map_cache) {
+ if (is_interpreted_frame()) { oops_interpreted_do(f, map, use_interpreter_oop_map_cache);
+ } else if (is_entry_frame()) { oops_entry_do (f, map);
+ } else if (CodeCache::contains(pc())) { oops_code_blob_do (f, map);
+ } else {
+ ShouldNotReachHere();
+ }
+}
+
+void frame::nmethods_do() {
+ if (_cb != NULL && _cb->is_nmethod()) {
+ nmethods_code_blob_do();
+ }
+}
+
+
+void frame::gc_prologue() {
+ if (is_interpreted_frame()) {
+ // set bcx to bci to become methodOop position independent during GC
+ interpreter_frame_set_bcx(interpreter_frame_bci());
+ }
+}
+
+
+void frame::gc_epilogue() {
+ if (is_interpreted_frame()) {
+ // set bcx back to bcp for interpreter
+ interpreter_frame_set_bcx((intptr_t)interpreter_frame_bcp());
+ }
+ // call processor specific epilog function
+ pd_gc_epilog();
+}
+
+
+# ifdef ENABLE_ZAP_DEAD_LOCALS
+
+void frame::CheckValueClosure::do_oop(oop* p) {
+ if (CheckOopishValues && Universe::heap()->is_in_reserved(*p)) {
+ warning("value @ " INTPTR_FORMAT " looks oopish (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current());
+ }
+}
+frame::CheckValueClosure frame::_check_value;
+
+
+void frame::CheckOopClosure::do_oop(oop* p) {
+ if (*p != NULL && !(*p)->is_oop()) {
+ warning("value @ " INTPTR_FORMAT " should be an oop (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current());
+ }
+}
+frame::CheckOopClosure frame::_check_oop;
+
+void frame::check_derived_oop(oop* base, oop* derived) {
+ _check_oop.do_oop(base);
+}
+
+
+void frame::ZapDeadClosure::do_oop(oop* p) {
+ if (TraceZapDeadLocals) tty->print_cr("zapping @ " INTPTR_FORMAT " containing " INTPTR_FORMAT, p, (address)*p);
+ // Need cast because on _LP64 the conversion to oop is ambiguous. Constant
+ // can be either long or int.
+ *p = (oop)(int)0xbabebabe;
+}
+frame::ZapDeadClosure frame::_zap_dead;
+
+void frame::zap_dead_locals(JavaThread* thread, const RegisterMap* map) {
+ assert(thread == Thread::current(), "need to synchronize to do this to another thread");
+ // Tracing - part 1
+ if (TraceZapDeadLocals) {
+ ResourceMark rm(thread);
+ tty->print_cr("--------------------------------------------------------------------------------");
+ tty->print("Zapping dead locals in ");
+ print_on(tty);
+ tty->cr();
+ }
+ // Zapping
+ if (is_entry_frame ()) zap_dead_entry_locals (thread, map);
+ else if (is_interpreted_frame()) zap_dead_interpreted_locals(thread, map);
+ else if (is_compiled_frame()) zap_dead_compiled_locals (thread, map);
+
+ else
+ // could be is_runtime_frame
+ // so remove error: ShouldNotReachHere();
+ ;
+ // Tracing - part 2
+ if (TraceZapDeadLocals) {
+ tty->cr();
+ }
+}
+
+
+void frame::zap_dead_interpreted_locals(JavaThread *thread, const RegisterMap* map) {
+ // get current interpreter 'pc'
+ assert(is_interpreted_frame(), "Not an interpreted frame");
+ methodOop m = interpreter_frame_method();
+ int bci = interpreter_frame_bci();
+
+ int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
+
+ if (TaggedStackInterpreter) {
+ InterpreterOopMap *mask = NULL;
+#ifdef ASSERT
+ InterpreterOopMap oopmap_mask;
+ methodHandle method(thread, m);
+ OopMapCache::compute_one_oop_map(method, bci, &oopmap_mask);
+ mask = &oopmap_mask;
+#endif // ASSERT
+ oops_interpreted_locals_do(&_check_oop, max_locals, mask);
+ } else {
+ // process dynamic part
+ InterpreterFrameClosure value_blk(this, max_locals, m->max_stack(),
+ &_check_value);
+ InterpreterFrameClosure oop_blk(this, max_locals, m->max_stack(),
+ &_check_oop );
+ InterpreterFrameClosure dead_blk(this, max_locals, m->max_stack(),
+ &_zap_dead );
+
+ // get frame map
+ InterpreterOopMap mask;
+ m->mask_for(bci, &mask);
+ mask.iterate_all( &oop_blk, &value_blk, &dead_blk);
+ }
+}
+
+
+void frame::zap_dead_compiled_locals(JavaThread* thread, const RegisterMap* reg_map) {
+
+ ResourceMark rm(thread);
+ assert(_cb != NULL, "sanity check");
+ if (_cb->oop_maps() != NULL) {
+ OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop,
+ &_check_value, &_zap_dead);
+ }
+}
+
+
+void frame::zap_dead_entry_locals(JavaThread*, const RegisterMap*) {
+ if (TraceZapDeadLocals) warning("frame::zap_dead_entry_locals unimplemented");
+}
+
+
+void frame::zap_dead_deoptimized_locals(JavaThread*, const RegisterMap*) {
+ if (TraceZapDeadLocals) warning("frame::zap_dead_deoptimized_locals unimplemented");
+}
+
+# endif // ENABLE_ZAP_DEAD_LOCALS
+
+void frame::verify(const RegisterMap* map) {
+ // for now make sure receiver type is correct
+ if (is_interpreted_frame()) {
+ methodOop method = interpreter_frame_method();
+ guarantee(method->is_method(), "method is wrong in frame::verify");
+ if (!method->is_static()) {
+ // fetch the receiver
+ oop* p = (oop*) interpreter_frame_local_at(0);
+ // make sure we have the right receiver type
+ }
+ }
+ COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), "must be empty before verify");)
+ oops_do_internal(&VerifyOopClosure::verify_oop, (RegisterMap*)map, false);
+}
+
+
+#ifdef ASSERT
+bool frame::verify_return_pc(address x) {
+ if (StubRoutines::returns_to_call_stub(x)) {
+ return true;
+ }
+ if (CodeCache::contains(x)) {
+ return true;
+ }
+ if (Interpreter::contains(x)) {
+ return true;
+ }
+ return false;
+}
+#endif
+
+
+#ifdef ASSERT
+void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const {
+ assert(is_interpreted_frame(), "Not an interpreted frame");
+ // verify that the value is in the right part of the frame
+ address low_mark = (address) interpreter_frame_monitor_end();
+ address high_mark = (address) interpreter_frame_monitor_begin();
+ address current = (address) value;
+
+ const int monitor_size = frame::interpreter_frame_monitor_size();
+ guarantee((high_mark - current) % monitor_size == 0 , "Misaligned top of BasicObjectLock*");
+ guarantee( high_mark > current , "Current BasicObjectLock* higher than high_mark");
+
+ guarantee((current - low_mark) % monitor_size == 0 , "Misaligned bottom of BasicObjectLock*");
+ guarantee( current >= low_mark , "Current BasicObjectLock* below than low_mark");
+}
+#endif
+
+
+//-----------------------------------------------------------------------------------
+// StackFrameStream implementation
+
+StackFrameStream::StackFrameStream(JavaThread *thread, bool update) : _reg_map(thread, update) {
+ assert(thread->has_last_Java_frame(), "sanity check");
+ _fr = thread->last_frame();
+ _is_done = false;
+}
diff --git a/src/share/vm/runtime/frame.hpp b/src/share/vm/runtime/frame.hpp
new file mode 100644
index 000000000..5c6954570
--- /dev/null
+++ b/src/share/vm/runtime/frame.hpp
@@ -0,0 +1,469 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+typedef class BytecodeInterpreter* interpreterState;
+
+class CodeBlob;
+
+
+// A frame represents a physical stack frame (an activation). Frames
+// can be C or Java frames, and the Java frames can be interpreted or
+// compiled. In contrast, vframes represent source-level activations,
+// so that one physical frame can correspond to multiple source level
+// frames because of inlining.
+
+class frame VALUE_OBJ_CLASS_SPEC {
+ private:
+ // Instance variables:
+ intptr_t* _sp; // stack pointer (from Thread::last_Java_sp)
+ address _pc; // program counter (the next instruction after the call)
+
+ CodeBlob* _cb; // CodeBlob that "owns" pc
+ enum deopt_state {
+ not_deoptimized,
+ is_deoptimized,
+ unknown
+ };
+
+ deopt_state _deopt_state;
+
+ public:
+ // Constructors
+ frame();
+
+ // Accessors
+
+ // pc: Returns the pc at which this frame will continue normally.
+ // It must point at the beginning of the next instruction to execute.
+ address pc() const { return _pc; }
+
+ // This returns the pc that if you were in the debugger you'd see. Not
+ // the idealized value in the frame object. This undoes the magic conversion
+ // that happens for deoptimized frames. In addition it makes the value the
+ // hardware would want to see in the native frame. The only user (at this point)
+ // is deoptimization. It likely no one else should ever use it.
+ address raw_pc() const;
+
+ void set_pc( address newpc );
+
+ intptr_t* sp() const { return _sp; }
+ void set_sp( intptr_t* newsp ) { _sp = newsp; }
+
+
+ CodeBlob* cb() const { return _cb; }
+
+ // patching operations
+ void patch_pc(Thread* thread, address pc);
+
+ // Every frame needs to return a unique id which distinguishes it from all other frames.
+ // For sparc and ia32 use sp. ia64 can have memory frames that are empty so multiple frames
+ // will have identical sp values. For ia64 the bsp (fp) value will serve. No real frame
+ // should have an id() of NULL so it is a distinguishing value for an unmatchable frame.
+ // We also have relationals which allow comparing a frame to anoth frame's id() allow
+ // us to distinguish younger (more recent activation) from older (less recent activations)
+ // A NULL id is only valid when comparing for equality.
+
+ intptr_t* id(void) const;
+ bool is_younger(intptr_t* id) const;
+ bool is_older(intptr_t* id) const;
+
+ // testers
+
+ // Compares for strict equality. Rarely used or needed.
+ // It can return a different result than f1.id() == f2.id()
+ bool equal(frame other) const;
+
+ // type testers
+ bool is_interpreted_frame() const;
+ bool is_java_frame() const;
+ bool is_entry_frame() const; // Java frame called from C?
+ bool is_native_frame() const;
+ bool is_runtime_frame() const;
+ bool is_compiled_frame() const;
+ bool is_safepoint_blob_frame() const;
+ bool is_deoptimized_frame() const;
+
+ // testers
+ bool is_first_frame() const; // oldest frame? (has no sender)
+ bool is_first_java_frame() const; // same for Java frame
+
+ bool is_interpreted_frame_valid() const; // performs sanity checks on interpreted frames.
+
+ // tells whether this frame is marked for deoptimization
+ bool should_be_deoptimized() const;
+
+ // tells whether this frame can be deoptimized
+ bool can_be_deoptimized() const;
+
+ // returns the frame size in stack slots
+ int frame_size() const;
+
+ // returns the sending frame
+ frame sender(RegisterMap* map) const;
+
+ // for Profiling - acting on another frame. walks sender frames
+ // if valid.
+ frame profile_find_Java_sender_frame(JavaThread *thread);
+ bool safe_for_sender(JavaThread *thread);
+
+ // returns the sender, but skips conversion frames
+ frame real_sender(RegisterMap* map) const;
+
+ // returns the the sending Java frame, skipping any intermediate C frames
+ // NB: receiver must not be first frame
+ frame java_sender() const;
+
+ private:
+ // Helper methods for better factored code in frame::sender
+ frame sender_for_compiled_frame(RegisterMap* map) const;
+ frame sender_for_entry_frame(RegisterMap* map) const;
+ frame sender_for_interpreter_frame(RegisterMap* map) const;
+ frame sender_for_native_frame(RegisterMap* map) const;
+
+ // All frames:
+
+ // A low-level interface for vframes:
+
+ public:
+
+ intptr_t* addr_at(int index) const { return &fp()[index]; }
+ intptr_t at(int index) const { return *addr_at(index); }
+
+ // accessors for locals
+ oop obj_at(int offset) const { return *obj_at_addr(offset); }
+ void obj_at_put(int offset, oop value) { *obj_at_addr(offset) = value; }
+
+ jint int_at(int offset) const { return *int_at_addr(offset); }
+ void int_at_put(int offset, jint value) { *int_at_addr(offset) = value; }
+
+ oop* obj_at_addr(int offset) const { return (oop*) addr_at(offset); }
+
+ oop* adjusted_obj_at_addr(methodOop method, int index) { return obj_at_addr(adjust_offset(method, index)); }
+
+ private:
+ jint* int_at_addr(int offset) const { return (jint*) addr_at(offset); }
+
+ public:
+ // Link (i.e., the pointer to the previous frame)
+ intptr_t* link() const;
+ void set_link(intptr_t* addr);
+
+ // Return address
+ address sender_pc() const;
+
+ // Support for deoptimization
+ void deoptimize(JavaThread* thread, bool thread_is_known_safe = false);
+
+ // The frame's original SP, before any extension by an interpreted callee;
+ // used for packing debug info into vframeArray objects and vframeArray lookup.
+ intptr_t* unextended_sp() const;
+
+ // returns the stack pointer of the calling frame
+ intptr_t* sender_sp() const;
+
+
+ // Interpreter frames:
+
+ private:
+ intptr_t** interpreter_frame_locals_addr() const;
+ intptr_t* interpreter_frame_bcx_addr() const;
+ intptr_t* interpreter_frame_mdx_addr() const;
+
+ public:
+ // Tags for TaggedStackInterpreter
+ enum Tag {
+ TagValue = 0, // Important: must be zero to use G0 on sparc.
+ TagReference = 0x555, // Reference type - is an oop that needs gc.
+ TagCategory2 = 0x666 // Only used internally by interpreter
+ // and not written to the java stack.
+ // The values above are chosen so that misuse causes a crash
+ // with a recognizable value.
+ };
+
+ static Tag tag_for_basic_type(BasicType typ) {
+ return (typ == T_OBJECT ? TagReference : TagValue);
+ }
+
+ // Locals
+
+ // The _at version returns a pointer because the address is used for GC.
+ intptr_t* interpreter_frame_local_at(int index) const;
+ Tag interpreter_frame_local_tag(int index) const;
+ void interpreter_frame_set_local_tag(int index, Tag tag) const;
+
+ void interpreter_frame_set_locals(intptr_t* locs);
+
+ // byte code index/pointer (use these functions for unchecked frame access only!)
+ intptr_t interpreter_frame_bcx() const { return *interpreter_frame_bcx_addr(); }
+ void interpreter_frame_set_bcx(intptr_t bcx);
+
+ // byte code index
+ jint interpreter_frame_bci() const;
+ void interpreter_frame_set_bci(jint bci);
+
+ // byte code pointer
+ address interpreter_frame_bcp() const;
+ void interpreter_frame_set_bcp(address bcp);
+
+ // Unchecked access to the method data index/pointer.
+ // Only use this if you know what you are doing.
+ intptr_t interpreter_frame_mdx() const { return *interpreter_frame_mdx_addr(); }
+ void interpreter_frame_set_mdx(intptr_t mdx);
+
+ // method data pointer
+ address interpreter_frame_mdp() const;
+ void interpreter_frame_set_mdp(address dp);
+
+ // Find receiver out of caller's (compiled) argument list
+ oop retrieve_receiver(RegisterMap *reg_map);
+
+ // Return the monitor owner and BasicLock for compiled synchronized
+ // native methods so that biased locking can revoke the receiver's
+ // bias if necessary. Takes optional nmethod for this frame as
+ // argument to avoid performing repeated lookups in code cache.
+ BasicLock* compiled_synchronized_native_monitor (nmethod* nm = NULL);
+ oop compiled_synchronized_native_monitor_owner(nmethod* nm = NULL);
+
+ // Find receiver for an invoke when arguments are just pushed on stack (i.e., callee stack-frame is
+ // not setup)
+ oop interpreter_callee_receiver(symbolHandle signature) { return *interpreter_callee_receiver_addr(signature); }
+
+
+ oop *interpreter_callee_receiver_addr(symbolHandle signature);
+
+
+ // expression stack (may go up or down, direction == 1 or -1)
+ public:
+ intptr_t* interpreter_frame_expression_stack() const;
+ static jint interpreter_frame_expression_stack_direction();
+
+ // The _at version returns a pointer because the address is used for GC.
+ intptr_t* interpreter_frame_expression_stack_at(jint offset) const;
+ Tag interpreter_frame_expression_stack_tag(jint offset) const;
+ void interpreter_frame_set_expression_stack_tag(jint offset, Tag tag) const;
+
+ // top of expression stack
+ intptr_t* interpreter_frame_tos_at(jint offset) const;
+ intptr_t* interpreter_frame_tos_address() const;
+
+
+ jint interpreter_frame_expression_stack_size() const;
+
+ intptr_t* interpreter_frame_sender_sp() const;
+
+#ifndef CC_INTERP
+ // template based interpreter deoptimization support
+ void set_interpreter_frame_sender_sp(intptr_t* sender_sp);
+ void interpreter_frame_set_monitor_end(BasicObjectLock* value);
+#endif // CC_INTERP
+
+ // BasicObjectLocks:
+ //
+ // interpreter_frame_monitor_begin is higher in memory than interpreter_frame_monitor_end
+ // Interpreter_frame_monitor_begin points to one element beyond the oldest one,
+ // interpreter_frame_monitor_end points to the youngest one, or if there are none,
+ // it points to one beyond where the first element will be.
+ // interpreter_frame_monitor_size reports the allocation size of a monitor in the interpreter stack.
+ // this value is >= BasicObjectLock::size(), and may be rounded up
+
+ BasicObjectLock* interpreter_frame_monitor_begin() const;
+ BasicObjectLock* interpreter_frame_monitor_end() const;
+ BasicObjectLock* next_monitor_in_interpreter_frame(BasicObjectLock* current) const;
+ BasicObjectLock* previous_monitor_in_interpreter_frame(BasicObjectLock* current) const;
+ static int interpreter_frame_monitor_size();
+
+ void interpreter_frame_verify_monitor(BasicObjectLock* value) const;
+
+ // Tells whether the current interpreter_frame frame pointer
+ // corresponds to the old compiled/deoptimized fp
+ // The receiver used to be a top level frame
+ bool interpreter_frame_equals_unpacked_fp(intptr_t* fp);
+
+ // Return/result value from this interpreter frame
+ // If the method return type is T_OBJECT or T_ARRAY populates oop_result
+ // For other (non-T_VOID) the appropriate field in the jvalue is populated
+ // with the result value.
+ // Should only be called when at method exit when the method is not
+ // exiting due to an exception.
+ BasicType interpreter_frame_result(oop* oop_result, jvalue* value_result);
+
+ public:
+ // Method & constant pool cache
+ methodOop interpreter_frame_method() const;
+ void interpreter_frame_set_method(methodOop method);
+ methodOop* interpreter_frame_method_addr() const;
+ constantPoolCacheOop* interpreter_frame_cache_addr() const;
+
+ public:
+ // Entry frames
+ JavaCallWrapper* entry_frame_call_wrapper() const;
+ intptr_t* entry_frame_argument_at(int offset) const;
+
+ // tells whether there is another chunk of Delta stack above
+ bool entry_frame_is_first() const;
+
+ // Compiled frames:
+
+ public:
+ // Given the index of a local, and the number of argument words
+ // in this stack frame, tell which word of the stack frame to find
+ // the local in. Arguments are stored above the ofp/rpc pair,
+ // while other locals are stored below it.
+ // Since monitors (BasicLock blocks) are also assigned indexes,
+ // but may have different storage requirements, their presence
+ // can also affect the calculation of offsets.
+ static int local_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors);
+
+ // Given the index of a monitor, etc., tell which word of the
+ // stack frame contains the start of the BasicLock block.
+ // Note that the local index by convention is the __higher__
+ // of the two indexes allocated to the block.
+ static int monitor_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors);
+
+ // Tell the smallest value that local_offset_for_compiler will attain.
+ // This is used to help determine how much stack frame to allocate.
+ static int min_local_offset_for_compiler(int nof_args, int max_nof_locals, int max_nof_monitors);
+
+ // Tells if this register must be spilled during a call.
+ // On Intel, all registers are smashed by calls.
+ static bool volatile_across_calls(Register reg);
+
+
+ // Safepoints
+
+ public:
+ oop saved_oop_result(RegisterMap* map) const;
+ void set_saved_oop_result(RegisterMap* map, oop obj);
+
+ // For debugging
+ private:
+ const char* print_name() const;
+
+ public:
+ void print_value() const { print_value_on(tty,NULL); }
+ void print_value_on(outputStream* st, JavaThread *thread) const;
+ void print_on(outputStream* st) const;
+ void interpreter_frame_print_on(outputStream* st) const;
+ void print_on_error(outputStream* st, char* buf, int buflen, bool verbose = false) const;
+
+ // Conversion from an VMReg to physical stack location
+ oop* oopmapreg_to_location(VMReg reg, const RegisterMap* regmap) const;
+
+ // Oops-do's
+ void oops_compiled_arguments_do(symbolHandle signature, bool is_static, const RegisterMap* reg_map, OopClosure* f);
+ void oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache = true);
+
+ private:
+ void oops_interpreted_locals_do(OopClosure *f,
+ int max_locals,
+ InterpreterOopMap *mask);
+ void oops_interpreted_expressions_do(OopClosure *f, symbolHandle signature,
+ bool is_static, int max_stack, int max_locals,
+ InterpreterOopMap *mask);
+ void oops_interpreted_arguments_do(symbolHandle signature, bool is_static, OopClosure* f);
+
+ // Iteration of oops
+ void oops_do_internal(OopClosure* f, RegisterMap* map, bool use_interpreter_oop_map_cache);
+ void oops_entry_do(OopClosure* f, const RegisterMap* map);
+ void oops_code_blob_do(OopClosure* f, const RegisterMap* map);
+ int adjust_offset(methodOop method, int index); // helper for above fn
+ // Iteration of nmethods
+ void nmethods_code_blob_do();
+ public:
+ // Memory management
+ void oops_do(OopClosure* f, RegisterMap* map) { oops_do_internal(f, map, true); }
+ void nmethods_do();
+
+ void gc_prologue();
+ void gc_epilogue();
+ void pd_gc_epilog();
+
+# ifdef ENABLE_ZAP_DEAD_LOCALS
+ private:
+ class CheckValueClosure: public OopClosure {
+ public: void do_oop(oop* p);
+ };
+ static CheckValueClosure _check_value;
+
+ class CheckOopClosure: public OopClosure {
+ public: void do_oop(oop* p);
+ };
+ static CheckOopClosure _check_oop;
+
+ static void check_derived_oop(oop* base, oop* derived);
+
+ class ZapDeadClosure: public OopClosure {
+ public: void do_oop(oop* p);
+ };
+ static ZapDeadClosure _zap_dead;
+
+ public:
+ // Zapping
+ void zap_dead_locals (JavaThread* thread, const RegisterMap* map);
+ void zap_dead_interpreted_locals(JavaThread* thread, const RegisterMap* map);
+ void zap_dead_compiled_locals (JavaThread* thread, const RegisterMap* map);
+ void zap_dead_entry_locals (JavaThread* thread, const RegisterMap* map);
+ void zap_dead_deoptimized_locals(JavaThread* thread, const RegisterMap* map);
+# endif
+ // Verification
+ void verify(const RegisterMap* map);
+ static bool verify_return_pc(address x);
+ static bool is_bci(intptr_t bcx);
+ // Usage:
+ // assert(frame::verify_return_pc(return_address), "must be a return pc");
+
+ int pd_oop_map_offset_adjustment() const;
+
+# include "incls/_frame_pd.hpp.incl"
+};
+
+
+//
+// StackFrameStream iterates through the frames of a thread starting from
+// top most frame. It automatically takes care of updating the location of
+// all (callee-saved) registers. Notice: If a thread is stopped at
+// a safepoint, all registers are saved, not only the callee-saved ones.
+//
+// Use:
+//
+// for(StackFrameStream fst(thread); !fst.is_done(); fst.next()) {
+// ...
+// }
+//
+class StackFrameStream : public StackObj {
+ private:
+ frame _fr;
+ RegisterMap _reg_map;
+ bool _is_done;
+ public:
+ StackFrameStream(JavaThread *thread, bool update = true);
+
+ // Iteration
+ bool is_done() { return (_is_done) ? true : (_is_done = _fr.is_first_frame(), false); }
+ void next() { if (!_is_done) _fr = _fr.sender(&_reg_map); }
+
+ // Query
+ frame *current() { return &_fr; }
+ RegisterMap* register_map() { return &_reg_map; }
+};
diff --git a/src/share/vm/runtime/frame.inline.hpp b/src/share/vm/runtime/frame.inline.hpp
new file mode 100644
index 000000000..3449ead76
--- /dev/null
+++ b/src/share/vm/runtime/frame.inline.hpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// This file holds platform-independant bodies of inline functions for frames.
+
+// Note: The bcx usually contains the bcp; however during GC it contains the bci
+// (changed by gc_prologue() and gc_epilogue()) to be methodOop position
+// independent. These accessors make sure the correct value is returned
+// by testing the range of the bcx value. bcp's are guaranteed to be above
+// max_method_code_size, since methods are always allocated in OldSpace and
+// Eden is allocated before OldSpace.
+//
+// The bcp is accessed sometimes during GC for ArgumentDescriptors; than
+// the correct translation has to be performed (was bug).
+
+inline bool frame::is_bci(intptr_t bcx) {
+#ifdef _LP64
+ return ((uintptr_t) bcx) <= ((uintptr_t) max_method_code_size) ;
+#else
+ return 0 <= bcx && bcx <= max_method_code_size;
+#endif
+}
+
+inline bool frame::is_entry_frame() const {
+ return StubRoutines::returns_to_call_stub(pc());
+}
+
+inline bool frame::is_first_frame() const {
+ return is_entry_frame() && entry_frame_is_first();
+}
+
+// here are the platform-dependent bodies:
+
+# include "incls/_frame_pd.inline.hpp.incl"
diff --git a/src/share/vm/runtime/globals.cpp b/src/share/vm/runtime/globals.cpp
new file mode 100644
index 000000000..50bf7658b
--- /dev/null
+++ b/src/share/vm/runtime/globals.cpp
@@ -0,0 +1,429 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_globals.cpp.incl"
+
+
+RUNTIME_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, \
+ MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, \
+ MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG, \
+ MATERIALIZE_MANAGEABLE_FLAG, MATERIALIZE_PRODUCT_RW_FLAG)
+
+RUNTIME_OS_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, \
+ MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, \
+ MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
+
+bool Flag::is_unlocker() const {
+ return strcmp(name, "UnlockDiagnosticVMOptions") == 0;
+}
+
+bool Flag::is_unlocked() const {
+ if (strcmp(kind, "{diagnostic}") == 0) {
+ return UnlockDiagnosticVMOptions;
+ } else {
+ return true;
+ }
+}
+
+bool Flag::is_writeable() const {
+ return (strcmp(kind, "{manageable}") == 0 || strcmp(kind, "{product rw}") == 0);
+}
+
+// All flags except "manageable" are assumed internal flags.
+// Long term, we need to define a mechanism to specify which flags
+// are external/stable and change this function accordingly.
+bool Flag::is_external() const {
+ return (strcmp(kind, "{manageable}") == 0);
+}
+
+// Length of format string (e.g. "%.1234s") for printing ccstr below
+#define FORMAT_BUFFER_LEN 16
+
+void Flag::print_on(outputStream* st) {
+ st->print("%5s %-35s %c= ", type, name, (origin != DEFAULT ? ':' : ' '));
+ if (is_bool()) st->print("%-16s", get_bool() ? "true" : "false");
+ if (is_intx()) st->print("%-16ld", get_intx());
+ if (is_uintx()) st->print("%-16lu", get_uintx());
+ if (is_ccstr()) {
+ const char* cp = get_ccstr();
+ const char* eol;
+ while ((eol = strchr(cp, '\n')) != NULL) {
+ char format_buffer[FORMAT_BUFFER_LEN];
+ size_t llen = pointer_delta(eol, cp, sizeof(char));
+ jio_snprintf(format_buffer, FORMAT_BUFFER_LEN,
+ "%%." SIZE_FORMAT "s", llen);
+ st->print(format_buffer, cp);
+ st->cr();
+ cp = eol+1;
+ st->print("%5s %-35s += ", "", name);
+ }
+ st->print("%-16s", cp);
+ }
+ st->print(" %s", kind);
+ st->cr();
+}
+
+void Flag::print_as_flag(outputStream* st) {
+ if (is_bool()) {
+ st->print("-XX:%s%s", get_bool() ? "+" : "-", name);
+ } else if (is_intx()) {
+ st->print("-XX:%s=" INTX_FORMAT, name, get_intx());
+ } else if (is_uintx()) {
+ st->print("-XX:%s=" UINTX_FORMAT, name, get_uintx());
+ } else if (is_ccstr()) {
+ st->print("-XX:%s=", name);
+ // Need to turn embedded '\n's back into separate arguments
+ // Not so efficient to print one character at a time,
+ // but the choice is to do the transformation to a buffer
+ // and print that. And this need not be efficient.
+ for (const char* cp = get_ccstr(); *cp != '\0'; cp += 1) {
+ switch (*cp) {
+ default:
+ st->print("%c", *cp);
+ break;
+ case '\n':
+ st->print(" -XX:%s=", name);
+ break;
+ }
+ }
+ } else {
+ ShouldNotReachHere();
+ }
+}
+
+// 4991491 do not "optimize out" the was_set false values: omitting them
+// tickles a Microsoft compiler bug causing flagTable to be malformed
+
+#define RUNTIME_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{product}", DEFAULT },
+#define RUNTIME_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, "{pd product}", DEFAULT },
+#define RUNTIME_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{diagnostic}", DEFAULT },
+#define RUNTIME_MANAGEABLE_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{manageable}", DEFAULT },
+#define RUNTIME_PRODUCT_RW_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{product rw}", DEFAULT },
+
+#ifdef PRODUCT
+ #define RUNTIME_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
+ #define RUNTIME_PD_DEVELOP_FLAG_STRUCT(type, name, doc) /* flag is constant */
+ #define RUNTIME_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
+#else
+ #define RUNTIME_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "", DEFAULT },
+ #define RUNTIME_PD_DEVELOP_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, "{pd}", DEFAULT },
+ #define RUNTIME_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{notproduct}", DEFAULT },
+#endif
+
+#define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C1 product}", DEFAULT },
+#define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, "{C1 pd product}", DEFAULT },
+#ifdef PRODUCT
+ #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
+ #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc) /* flag is constant */
+ #define C1_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
+#else
+ #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C1}", DEFAULT },
+ #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, "{C1 pd}", DEFAULT },
+ #define C1_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C1 notproduct}", DEFAULT },
+#endif
+
+
+#define C2_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 product}", DEFAULT },
+#define C2_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, "{C2 pd product}", DEFAULT },
+#define C2_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 diagnostic}", DEFAULT },
+#ifdef PRODUCT
+ #define C2_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
+ #define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc) /* flag is constant */
+ #define C2_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
+#else
+ #define C2_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2}", DEFAULT },
+ #define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, "{C2 pd}", DEFAULT },
+ #define C2_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 notproduct}", DEFAULT },
+#endif
+
+
+static Flag flagTable[] = {
+ RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT)
+ RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT)
+#ifdef COMPILER1
+ C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
+#endif
+#ifdef COMPILER2
+ C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
+#endif
+ {0, NULL, NULL}
+};
+
+Flag* Flag::flags = flagTable;
+size_t Flag::numFlags = (sizeof(flagTable) / sizeof(Flag));
+
+inline bool str_equal(const char* s, char* q, size_t len) {
+ // s is null terminated, q is not!
+ if (strlen(s) != (unsigned int) len) return false;
+ return strncmp(s, q, len) == 0;
+}
+
+Flag* Flag::find_flag(char* name, size_t length) {
+ for (Flag* current = &flagTable[0]; current->name; current++) {
+ if (str_equal(current->name, name, length)) {
+ if (!(current->is_unlocked() || current->is_unlocker())) {
+ // disable use of diagnostic flags until they are unlocked
+ return NULL;
+ }
+ return current;
+ }
+ }
+ return NULL;
+}
+
+// Returns the address of the index'th element
+static Flag* address_of_flag(CommandLineFlagWithType flag) {
+ assert((size_t)flag < Flag::numFlags, "bad command line flag index");
+ return &Flag::flags[flag];
+}
+
+bool CommandLineFlagsEx::is_default(CommandLineFlag flag) {
+ assert((size_t)flag < Flag::numFlags, "bad command line flag index");
+ Flag* f = &Flag::flags[flag];
+ return (f->origin == DEFAULT);
+}
+
+bool CommandLineFlags::wasSetOnCmdline(const char* name, bool* value) {
+ Flag* result = Flag::find_flag((char*)name, strlen(name));
+ if (result == NULL) return false;
+ *value = (result->origin == COMMAND_LINE);
+ return true;
+}
+
+bool CommandLineFlags::boolAt(char* name, size_t len, bool* value) {
+ Flag* result = Flag::find_flag(name, len);
+ if (result == NULL) return false;
+ if (!result->is_bool()) return false;
+ *value = result->get_bool();
+ return true;
+}
+
+bool CommandLineFlags::boolAtPut(char* name, size_t len, bool* value, FlagValueOrigin origin) {
+ Flag* result = Flag::find_flag(name, len);
+ if (result == NULL) return false;
+ if (!result->is_bool()) return false;
+ bool old_value = result->get_bool();
+ result->set_bool(*value);
+ *value = old_value;
+ result->origin = origin;
+ return true;
+}
+
+void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, FlagValueOrigin origin) {
+ Flag* faddr = address_of_flag(flag);
+ guarantee(faddr != NULL && faddr->is_bool(), "wrong flag type");
+ faddr->set_bool(value);
+ faddr->origin = origin;
+}
+
+bool CommandLineFlags::intxAt(char* name, size_t len, intx* value) {
+ Flag* result = Flag::find_flag(name, len);
+ if (result == NULL) return false;
+ if (!result->is_intx()) return false;
+ *value = result->get_intx();
+ return true;
+}
+
+bool CommandLineFlags::intxAtPut(char* name, size_t len, intx* value, FlagValueOrigin origin) {
+ Flag* result = Flag::find_flag(name, len);
+ if (result == NULL) return false;
+ if (!result->is_intx()) return false;
+ intx old_value = result->get_intx();
+ result->set_intx(*value);
+ *value = old_value;
+ result->origin = origin;
+ return true;
+}
+
+void CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, FlagValueOrigin origin) {
+ Flag* faddr = address_of_flag(flag);
+ guarantee(faddr != NULL && faddr->is_intx(), "wrong flag type");
+ faddr->set_intx(value);
+ faddr->origin = origin;
+}
+
+bool CommandLineFlags::uintxAt(char* name, size_t len, uintx* value) {
+ Flag* result = Flag::find_flag(name, len);
+ if (result == NULL) return false;
+ if (!result->is_uintx()) return false;
+ *value = result->get_uintx();
+ return true;
+}
+
+bool CommandLineFlags::uintxAtPut(char* name, size_t len, uintx* value, FlagValueOrigin origin) {
+ Flag* result = Flag::find_flag(name, len);
+ if (result == NULL) return false;
+ if (!result->is_uintx()) return false;
+ uintx old_value = result->get_uintx();
+ result->set_uintx(*value);
+ *value = old_value;
+ result->origin = origin;
+ return true;
+}
+
+void CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, FlagValueOrigin origin) {
+ Flag* faddr = address_of_flag(flag);
+ guarantee(faddr != NULL && faddr->is_uintx(), "wrong flag type");
+ faddr->set_uintx(value);
+ faddr->origin = origin;
+}
+
+bool CommandLineFlags::doubleAt(char* name, size_t len, double* value) {
+ Flag* result = Flag::find_flag(name, len);
+ if (result == NULL) return false;
+ if (!result->is_double()) return false;
+ *value = result->get_double();
+ return true;
+}
+
+bool CommandLineFlags::doubleAtPut(char* name, size_t len, double* value, FlagValueOrigin origin) {
+ Flag* result = Flag::find_flag(name, len);
+ if (result == NULL) return false;
+ if (!result->is_double()) return false;
+ double old_value = result->get_double();
+ result->set_double(*value);
+ *value = old_value;
+ result->origin = origin;
+ return true;
+}
+
+void CommandLineFlagsEx::doubleAtPut(CommandLineFlagWithType flag, double value, FlagValueOrigin origin) {
+ Flag* faddr = address_of_flag(flag);
+ guarantee(faddr != NULL && faddr->is_double(), "wrong flag type");
+ faddr->set_double(value);
+ faddr->origin = origin;
+}
+
+bool CommandLineFlags::ccstrAt(char* name, size_t len, ccstr* value) {
+ Flag* result = Flag::find_flag(name, len);
+ if (result == NULL) return false;
+ if (!result->is_ccstr()) return false;
+ *value = result->get_ccstr();
+ return true;
+}
+
+// Contract: Flag will make private copy of the incoming value.
+// Outgoing value is always malloc-ed, and caller MUST call free.
+bool CommandLineFlags::ccstrAtPut(char* name, size_t len, ccstr* value, FlagValueOrigin origin) {
+ Flag* result = Flag::find_flag(name, len);
+ if (result == NULL) return false;
+ if (!result->is_ccstr()) return false;
+ ccstr old_value = result->get_ccstr();
+ char* new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1);
+ strcpy(new_value, *value);
+ result->set_ccstr(new_value);
+ if (result->origin == DEFAULT && old_value != NULL) {
+ // Prior value is NOT heap allocated, but was a literal constant.
+ char* old_value_to_free = NEW_C_HEAP_ARRAY(char, strlen(old_value)+1);
+ strcpy(old_value_to_free, old_value);
+ old_value = old_value_to_free;
+ }
+ *value = old_value;
+ result->origin = origin;
+ return true;
+}
+
+// Contract: Flag will make private copy of the incoming value.
+void CommandLineFlagsEx::ccstrAtPut(CommandLineFlagWithType flag, ccstr value, FlagValueOrigin origin) {
+ Flag* faddr = address_of_flag(flag);
+ guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type");
+ ccstr old_value = faddr->get_ccstr();
+ char* new_value = NEW_C_HEAP_ARRAY(char, strlen(value)+1);
+ strcpy(new_value, value);
+ faddr->set_ccstr(new_value);
+ if (faddr->origin != DEFAULT && old_value != NULL) {
+ // Prior value is heap allocated so free it.
+ FREE_C_HEAP_ARRAY(char, old_value);
+ }
+ faddr->origin = origin;
+}
+
+extern "C" {
+ static int compare_flags(const void* void_a, const void* void_b) {
+ return strcmp((*((Flag**) void_a))->name, (*((Flag**) void_b))->name);
+ }
+}
+
+void CommandLineFlags::printSetFlags() {
+ // Print which flags were set on the command line
+ // note: this method is called before the thread structure is in place
+ // which means resource allocation cannot be used.
+
+ // Compute size
+ int length= 0;
+ while (flagTable[length].name != NULL) length++;
+
+ // Sort
+ Flag** array = NEW_C_HEAP_ARRAY(Flag*, length);
+ for (int index = 0; index < length; index++) {
+ array[index] = &flagTable[index];
+ }
+ qsort(array, length, sizeof(Flag*), compare_flags);
+
+ // Print
+ for (int i = 0; i < length; i++) {
+ if (array[i]->origin /* naked field! */) {
+ array[i]->print_as_flag(tty);
+ tty->print(" ");
+ }
+ }
+ tty->cr();
+ FREE_C_HEAP_ARRAY(Flag*, array);
+}
+
+#ifndef PRODUCT
+
+
+void CommandLineFlags::verify() {
+ assert(Arguments::check_vm_args_consistency(), "Some flag settings conflict");
+}
+
+void CommandLineFlags::printFlags() {
+ // Print the flags sorted by name
+ // note: this method is called before the thread structure is in place
+ // which means resource allocation cannot be used.
+
+ // Compute size
+ int length= 0;
+ while (flagTable[length].name != NULL) length++;
+
+ // Sort
+ Flag** array = NEW_C_HEAP_ARRAY(Flag*, length);
+ for (int index = 0; index < length; index++) {
+ array[index] = &flagTable[index];
+ }
+ qsort(array, length, sizeof(Flag*), compare_flags);
+
+ // Print
+ tty->print_cr("[Global flags]");
+ for (int i = 0; i < length; i++) {
+ if (array[i]->is_unlocked()) {
+ array[i]->print_on(tty);
+ }
+ }
+ FREE_C_HEAP_ARRAY(Flag*, array);
+}
+
+#endif
diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
new file mode 100644
index 000000000..c1e8fefeb
--- /dev/null
+++ b/src/share/vm/runtime/globals.hpp
@@ -0,0 +1,3208 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#if !defined(COMPILER1) && !defined(COMPILER2)
+define_pd_global(bool, BackgroundCompilation, false);
+define_pd_global(bool, UseTLAB, false);
+define_pd_global(bool, CICompileOSR, false);
+define_pd_global(bool, UseTypeProfile, false);
+define_pd_global(bool, UseOnStackReplacement, false);
+define_pd_global(bool, InlineIntrinsics, false);
+define_pd_global(bool, PreferInterpreterNativeStubs, true);
+define_pd_global(bool, ProfileInterpreter, false);
+define_pd_global(bool, ProfileTraps, false);
+define_pd_global(bool, TieredCompilation, false);
+
+define_pd_global(intx, CompileThreshold, 0);
+define_pd_global(intx, Tier2CompileThreshold, 0);
+define_pd_global(intx, Tier3CompileThreshold, 0);
+define_pd_global(intx, Tier4CompileThreshold, 0);
+
+define_pd_global(intx, BackEdgeThreshold, 0);
+define_pd_global(intx, Tier2BackEdgeThreshold, 0);
+define_pd_global(intx, Tier3BackEdgeThreshold, 0);
+define_pd_global(intx, Tier4BackEdgeThreshold, 0);
+
+define_pd_global(intx, OnStackReplacePercentage, 0);
+define_pd_global(bool, ResizeTLAB, false);
+define_pd_global(intx, FreqInlineSize, 0);
+define_pd_global(intx, NewSizeThreadIncrease, 4*K);
+define_pd_global(intx, NewRatio, 4);
+define_pd_global(intx, InlineClassNatives, true);
+define_pd_global(intx, InlineUnsafeOps, true);
+define_pd_global(intx, InitialCodeCacheSize, 160*K);
+define_pd_global(intx, ReservedCodeCacheSize, 32*M);
+define_pd_global(intx, CodeCacheExpansionSize, 32*K);
+define_pd_global(intx, CodeCacheMinBlockLength, 1);
+define_pd_global(uintx,PermSize, ScaleForWordSize(4*M));
+define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
+define_pd_global(bool, NeverActAsServerClassMachine, true);
+define_pd_global(uintx, DefaultMaxRAM, 1*G);
+#define CI_COMPILER_COUNT 0
+#else
+
+#ifdef COMPILER2
+#define CI_COMPILER_COUNT 2
+#else
+#define CI_COMPILER_COUNT 1
+#endif // COMPILER2
+
+#endif // no compilers
+
+
+// string type aliases used only in this file
+typedef const char* ccstr;
+typedef const char* ccstrlist; // represents string arguments which accumulate
+
+enum FlagValueOrigin {
+ DEFAULT = 0,
+ COMMAND_LINE = 1,
+ ENVIRON_VAR = 2,
+ CONFIG_FILE = 3,
+ MANAGEMENT = 4,
+ ERGONOMIC = 5,
+ ATTACH_ON_DEMAND = 6,
+ INTERNAL = 99
+};
+
+struct Flag {
+ const char *type;
+ const char *name;
+ void* addr;
+ const char *kind;
+ FlagValueOrigin origin;
+
+ // points to all Flags static array
+ static Flag *flags;
+
+ // number of flags
+ static size_t numFlags;
+
+ static Flag* find_flag(char* name, size_t length);
+
+ bool is_bool() const { return strcmp(type, "bool") == 0; }
+ bool get_bool() const { return *((bool*) addr); }
+ void set_bool(bool value) { *((bool*) addr) = value; }
+
+ bool is_intx() const { return strcmp(type, "intx") == 0; }
+ intx get_intx() const { return *((intx*) addr); }
+ void set_intx(intx value) { *((intx*) addr) = value; }
+
+ bool is_uintx() const { return strcmp(type, "uintx") == 0; }
+ uintx get_uintx() const { return *((uintx*) addr); }
+ void set_uintx(uintx value) { *((uintx*) addr) = value; }
+
+ bool is_double() const { return strcmp(type, "double") == 0; }
+ double get_double() const { return *((double*) addr); }
+ void set_double(double value) { *((double*) addr) = value; }
+
+ bool is_ccstr() const { return strcmp(type, "ccstr") == 0 || strcmp(type, "ccstrlist") == 0; }
+ bool ccstr_accumulates() const { return strcmp(type, "ccstrlist") == 0; }
+ ccstr get_ccstr() const { return *((ccstr*) addr); }
+ void set_ccstr(ccstr value) { *((ccstr*) addr) = value; }
+
+ bool is_unlocker() const;
+ bool is_unlocked() const;
+ bool is_writeable() const;
+ bool is_external() const;
+
+ void print_on(outputStream* st);
+ void print_as_flag(outputStream* st);
+};
+
+// debug flags control various aspects of the VM and are global accessible
+
+// use FlagSetting to temporarily change some debug flag
+// e.g. FlagSetting fs(DebugThisAndThat, true);
+// restored to previous value upon leaving scope
+class FlagSetting {
+ bool val;
+ bool* flag;
+ public:
+ FlagSetting(bool& fl, bool newValue) { flag = &fl; val = fl; fl = newValue; }
+ ~FlagSetting() { *flag = val; }
+};
+
+
+class CounterSetting {
+ intx* counter;
+ public:
+ CounterSetting(intx* cnt) { counter = cnt; (*counter)++; }
+ ~CounterSetting() { (*counter)--; }
+};
+
+
+class IntFlagSetting {
+ intx val;
+ intx* flag;
+ public:
+ IntFlagSetting(intx& fl, intx newValue) { flag = &fl; val = fl; fl = newValue; }
+ ~IntFlagSetting() { *flag = val; }
+};
+
+
+class DoubleFlagSetting {
+ double val;
+ double* flag;
+ public:
+ DoubleFlagSetting(double& fl, double newValue) { flag = &fl; val = fl; fl = newValue; }
+ ~DoubleFlagSetting() { *flag = val; }
+};
+
+
+class CommandLineFlags {
+ public:
+ static bool boolAt(char* name, size_t len, bool* value);
+ static bool boolAt(char* name, bool* value) { return boolAt(name, strlen(name), value); }
+ static bool boolAtPut(char* name, size_t len, bool* value, FlagValueOrigin origin);
+ static bool boolAtPut(char* name, bool* value, FlagValueOrigin origin) { return boolAtPut(name, strlen(name), value, origin); }
+
+ static bool intxAt(char* name, size_t len, intx* value);
+ static bool intxAt(char* name, intx* value) { return intxAt(name, strlen(name), value); }
+ static bool intxAtPut(char* name, size_t len, intx* value, FlagValueOrigin origin);
+ static bool intxAtPut(char* name, intx* value, FlagValueOrigin origin) { return intxAtPut(name, strlen(name), value, origin); }
+
+ static bool uintxAt(char* name, size_t len, uintx* value);
+ static bool uintxAt(char* name, uintx* value) { return uintxAt(name, strlen(name), value); }
+ static bool uintxAtPut(char* name, size_t len, uintx* value, FlagValueOrigin origin);
+ static bool uintxAtPut(char* name, uintx* value, FlagValueOrigin origin) { return uintxAtPut(name, strlen(name), value, origin); }
+
+ static bool doubleAt(char* name, size_t len, double* value);
+ static bool doubleAt(char* name, double* value) { return doubleAt(name, strlen(name), value); }
+ static bool doubleAtPut(char* name, size_t len, double* value, FlagValueOrigin origin);
+ static bool doubleAtPut(char* name, double* value, FlagValueOrigin origin) { return doubleAtPut(name, strlen(name), value, origin); }
+
+ static bool ccstrAt(char* name, size_t len, ccstr* value);
+ static bool ccstrAt(char* name, ccstr* value) { return ccstrAt(name, strlen(name), value); }
+ static bool ccstrAtPut(char* name, size_t len, ccstr* value, FlagValueOrigin origin);
+ static bool ccstrAtPut(char* name, ccstr* value, FlagValueOrigin origin) { return ccstrAtPut(name, strlen(name), value, origin); }
+
+ // Returns false if name is not a command line flag.
+ static bool wasSetOnCmdline(const char* name, bool* value);
+ static void printSetFlags();
+
+ static void printFlags() PRODUCT_RETURN;
+
+ static void verify() PRODUCT_RETURN;
+};
+
+// use this for flags that are true by default in the debug version but
+// false in the optimized version, and vice versa
+#ifdef ASSERT
+#define trueInDebug true
+#define falseInDebug false
+#else
+#define trueInDebug false
+#define falseInDebug true
+#endif
+
+// use this for flags that are true per default in the product build
+// but false in development builds, and vice versa
+#ifdef PRODUCT
+#define trueInProduct true
+#define falseInProduct false
+#else
+#define trueInProduct false
+#define falseInProduct true
+#endif
+
+// use this for flags that are true per default in the tiered build
+// but false in non-tiered builds, and vice versa
+#ifdef TIERED
+#define trueInTiered true
+#define falseInTiered false
+#else
+#define trueInTiered false
+#define falseInTiered true
+#endif
+
+
+// develop flags are settable / visible only during development and are constant in the PRODUCT version
+// product flags are always settable / visible
+// notproduct flags are settable / visible only during development and are not declared in the PRODUCT version
+
+// A flag must be declared with one of the following types:
+// bool, intx, uintx, ccstr.
+// The type "ccstr" is an alias for "const char*" and is used
+// only in this file, because the macrology requires single-token type names.
+
+// Note: Diagnostic options not meant for VM tuning or for product modes.
+// They are to be used for VM quality assurance or field diagnosis
+// of VM bugs. They are hidden so that users will not be encouraged to
+// try them as if they were VM ordinary execution options. However, they
+// are available in the product version of the VM. Under instruction
+// from support engineers, VM customers can turn them on to collect
+// diagnostic information about VM problems. To use a VM diagnostic
+// option, you must first specify +UnlockDiagnosticVMOptions.
+// (This master switch also affects the behavior of -Xprintflags.)
+
+// manageable flags are writeable external product flags.
+// They are dynamically writeable through the JDK management interface
+// (com.sun.management.HotSpotDiagnosticMXBean API) and also through JConsole.
+// These flags are external exported interface (see CCC). The list of
+// manageable flags can be queried programmatically through the management
+// interface.
+//
+// A flag can be made as "manageable" only if
+// - the flag is defined in a CCC as an external exported interface.
+// - the VM implementation supports dynamic setting of the flag.
+// This implies that the VM must *always* query the flag variable
+// and not reuse state related to the flag state at any given time.
+// - you want the flag to be queried programmatically by the customers.
+//
+// product_rw flags are writeable internal product flags.
+// They are like "manageable" flags but for internal/private use.
+// The list of product_rw flags are internal/private flags which
+// may be changed/removed in a future release. It can be set
+// through the management interface to get/set value
+// when the name of flag is supplied.
+//
+// A flag can be made as "product_rw" only if
+// - the VM implementation supports dynamic setting of the flag.
+// This implies that the VM must *always* query the flag variable
+// and not reuse state related to the flag state at any given time.
+//
+// Note that when there is a need to support develop flags to be writeable,
+// it can be done in the same way as product_rw.
+
+#define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, manageable, product_rw) \
+ \
+ /* UseMembar is theoretically a temp flag used for memory barrier \
+ * removal testing. It was supposed to be removed before FCS but has \
+ * been re-added (see 6401008) */ \
+ product(bool, UseMembar, false, \
+ "(Unstable) Issues membars on thread state transitions") \
+ \
+ product(bool, PrintCommandLineFlags, false, \
+ "Prints flags that appeared on the command line") \
+ \
+ diagnostic(bool, UnlockDiagnosticVMOptions, trueInDebug, \
+ "Enable processing of flags relating to field diagnostics") \
+ \
+ product(bool, JavaMonitorsInStackTrace, true, \
+ "Print info. about Java monitor locks when the stacks are dumped")\
+ \
+ product_pd(bool, UseLargePages, \
+ "Use large page memory") \
+ \
+ develop(bool, TracePageSizes, false, \
+ "Trace page size selection and usage.") \
+ \
+ product(bool, UseNUMA, false, \
+ "Use NUMA if available") \
+ \
+ product(intx, NUMAChunkResizeWeight, 20, \
+ "Percentage (0-100) used to weight the current sample when " \
+ "computing exponentially decaying average for " \
+ "AdaptiveNUMAChunkSizing") \
+ \
+ product(intx, NUMASpaceResizeRate, 1*G, \
+ "Do not reallocate more that this amount per collection") \
+ \
+ product(bool, UseAdaptiveNUMAChunkSizing, true, \
+ "Enable adaptive chunk sizing for NUMA") \
+ \
+ product(bool, NUMAStats, false, \
+ "Print NUMA stats in detailed heap information") \
+ \
+ product(intx, NUMAPageScanRate, 256, \
+ "Maximum number of pages to include in the page scan procedure") \
+ \
+ product_pd(bool, NeedsDeoptSuspend, \
+ "True for register window machines (sparc/ia64)") \
+ \
+ product(intx, UseSSE, 99, \
+ "Highest supported SSE instructions set on x86/x64") \
+ \
+ product(uintx, LargePageSizeInBytes, 0, \
+ "Large page size (0 to let VM choose the page size") \
+ \
+ product(uintx, LargePageHeapSizeThreshold, 128*M, \
+ "Use large pages if max heap is at least this big") \
+ \
+ product(bool, ForceTimeHighResolution, false, \
+ "Using high time resolution(For Win32 only)") \
+ \
+ product(bool, CacheTimeMillis, false, \
+ "Cache os::javaTimeMillis with CacheTimeMillisGranularity") \
+ \
+ diagnostic(uintx, CacheTimeMillisGranularity, 50, \
+ "Granularity for CacheTimeMillis") \
+ \
+ develop(bool, TraceItables, false, \
+ "Trace initialization and use of itables") \
+ \
+ develop(bool, TracePcPatching, false, \
+ "Trace usage of frame::patch_pc") \
+ \
+ develop(bool, TraceJumps, false, \
+ "Trace assembly jumps in thread ring buffer") \
+ \
+ develop(bool, TraceRelocator, false, \
+ "Trace the bytecode relocator") \
+ \
+ develop(bool, TraceLongCompiles, false, \
+ "Print out every time compilation is longer than " \
+ "a given threashold") \
+ \
+ develop(bool, SafepointALot, false, \
+ "Generates a lot of safepoints. Works with " \
+ "GuaranteedSafepointInterval") \
+ \
+ product_pd(bool, BackgroundCompilation, \
+ "A thread requesting compilation is not blocked during " \
+ "compilation") \
+ \
+ product(bool, PrintVMQWaitTime, false, \
+ "Prints out the waiting time in VM operation queue") \
+ \
+ develop(bool, BailoutToInterpreterForThrows, false, \
+ "Compiled methods which throws/catches exceptions will be " \
+ "deopt and intp.") \
+ \
+ develop(bool, NoYieldsInMicrolock, false, \
+ "Disable yields in microlock") \
+ \
+ develop(bool, TraceOopMapGeneration, false, \
+ "Shows oopmap generation") \
+ \
+ product(bool, MethodFlushing, true, \
+ "Reclamation of zombie and not-entrant methods") \
+ \
+ develop(bool, VerifyStack, false, \
+ "Verify stack of each thread when it is entering a runtime call") \
+ \
+ develop(bool, ForceUnreachable, false, \
+ "(amd64) Make all non code cache addresses to be unreachable with rip-rel forcing use of 64bit literal fixups") \
+ \
+ notproduct(bool, StressDerivedPointers, false, \
+ "Force scavenge when a derived pointers is detected on stack " \
+ "after rtm call") \
+ \
+ develop(bool, TraceDerivedPointers, false, \
+ "Trace traversal of derived pointers on stack") \
+ \
+ notproduct(bool, TraceCodeBlobStacks, false, \
+ "Trace stack-walk of codeblobs") \
+ \
+ product(bool, PrintJNIResolving, false, \
+ "Used to implement -v:jni") \
+ \
+ notproduct(bool, PrintRewrites, false, \
+ "Print methods that are being rewritten") \
+ \
+ product(bool, UseInlineCaches, true, \
+ "Use Inline Caches for virtual calls ") \
+ \
+ develop(bool, InlineArrayCopy, true, \
+ "inline arraycopy native that is known to be part of " \
+ "base library DLL") \
+ \
+ develop(bool, InlineObjectHash, true, \
+ "inline Object::hashCode() native that is known to be part " \
+ "of base library DLL") \
+ \
+ develop(bool, InlineObjectCopy, true, \
+ "inline Object.clone and Arrays.copyOf[Range] intrinsics") \
+ \
+ develop(bool, InlineNatives, true, \
+ "inline natives that are known to be part of base library DLL") \
+ \
+ develop(bool, InlineMathNatives, true, \
+ "inline SinD, CosD, etc.") \
+ \
+ develop(bool, InlineClassNatives, true, \
+ "inline Class.isInstance, etc") \
+ \
+ develop(bool, InlineAtomicLong, true, \
+ "inline sun.misc.AtomicLong") \
+ \
+ develop(bool, InlineThreadNatives, true, \
+ "inline Thread.currentThread, etc") \
+ \
+ develop(bool, InlineReflectionGetCallerClass, true, \
+ "inline sun.reflect.Reflection.getCallerClass(), known to be part "\
+ "of base library DLL") \
+ \
+ develop(bool, InlineUnsafeOps, true, \
+ "inline memory ops (native methods) from sun.misc.Unsafe") \
+ \
+ develop(bool, ConvertCmpD2CmpF, true, \
+ "Convert cmpD to cmpF when one input is constant in float range") \
+ \
+ develop(bool, ConvertFloat2IntClipping, true, \
+ "Convert float2int clipping idiom to integer clipping") \
+ \
+ develop(bool, SpecialStringCompareTo, true, \
+ "special version of string compareTo") \
+ \
+ develop(bool, SpecialStringIndexOf, true, \
+ "special version of string indexOf") \
+ \
+ develop(bool, TraceCallFixup, false, \
+ "traces all call fixups") \
+ \
+ develop(bool, DeoptimizeALot, false, \
+ "deoptimize at every exit from the runtime system") \
+ \
+ develop(ccstrlist, DeoptimizeOnlyAt, "", \
+ "a comma separated list of bcis to deoptimize at") \
+ \
+ product(bool, DeoptimizeRandom, false, \
+ "deoptimize random frames on random exit from the runtime system")\
+ \
+ notproduct(bool, ZombieALot, false, \
+ "creates zombies (non-entrant) at exit from the runt. system") \
+ \
+ notproduct(bool, WalkStackALot, false, \
+ "trace stack (no print) at every exit from the runtime system") \
+ \
+ develop(bool, Debugging, false, \
+ "set when executing debug methods in debug.ccp " \
+ "(to prevent triggering assertions)") \
+ \
+ notproduct(bool, StrictSafepointChecks, trueInDebug, \
+ "Enable strict checks that safepoints cannot happen for threads " \
+ "that used No_Safepoint_Verifier") \
+ \
+ notproduct(bool, VerifyLastFrame, false, \
+ "Verify oops on last frame on entry to VM") \
+ \
+ develop(bool, TraceHandleAllocation, false, \
+ "Prints out warnings when suspicious many handles are allocated") \
+ \
+ product(bool, UseCompilerSafepoints, true, \
+ "Stop at safepoints in compiled code") \
+ \
+ product(bool, UseSplitVerifier, true, \
+ "use split verifier with StackMapTable attributes") \
+ \
+ product(bool, FailOverToOldVerifier, true, \
+ "fail over to old verifier when split verifier fails") \
+ \
+ develop(bool, ShowSafepointMsgs, false, \
+ "Show msg. about safepoint synch.") \
+ \
+ product(bool, SafepointTimeout, false, \
+ "Time out and warn or fail after SafepointTimeoutDelay " \
+ "milliseconds if failed to reach safepoint") \
+ \
+ develop(bool, DieOnSafepointTimeout, false, \
+ "Die upon failure to reach safepoint (see SafepointTimeout)") \
+ \
+ /* 50 retries * (5 * current_retry_count) millis = ~6.375 seconds */ \
+ /* typically, at most a few retries are needed */ \
+ product(intx, SuspendRetryCount, 50, \
+ "Maximum retry count for an external suspend request") \
+ \
+ product(intx, SuspendRetryDelay, 5, \
+ "Milliseconds to delay per retry (* current_retry_count)") \
+ \
+ product(bool, AssertOnSuspendWaitFailure, false, \
+ "Assert/Guarantee on external suspend wait failure") \
+ \
+ product(bool, TraceSuspendWaitFailures, false, \
+ "Trace external suspend wait failures") \
+ \
+ product(bool, MaxFDLimit, true, \
+ "Bump the number of file descriptors to max in solaris.") \
+ \
+ notproduct(bool, LogEvents, trueInDebug, \
+ "Enable Event log") \
+ \
+ product(bool, BytecodeVerificationRemote, true, \
+ "Enables the Java bytecode verifier for remote classes") \
+ \
+ product(bool, BytecodeVerificationLocal, false, \
+ "Enables the Java bytecode verifier for local classes") \
+ \
+ develop(bool, ForceFloatExceptions, trueInDebug, \
+ "Force exceptions on FP stack under/overflow") \
+ \
+ develop(bool, SoftMatchFailure, trueInProduct, \
+ "If the DFA fails to match a node, print a message and bail out") \
+ \
+ develop(bool, VerifyStackAtCalls, false, \
+ "Verify that the stack pointer is unchanged after calls") \
+ \
+ develop(bool, TraceJavaAssertions, false, \
+ "Trace java language assertions") \
+ \
+ notproduct(bool, CheckAssertionStatusDirectives, false, \
+ "temporary - see javaClasses.cpp") \
+ \
+ notproduct(bool, PrintMallocFree, false, \
+ "Trace calls to C heap malloc/free allocation") \
+ \
+ notproduct(bool, PrintOopAddress, false, \
+ "Always print the location of the oop") \
+ \
+ notproduct(bool, VerifyCodeCacheOften, false, \
+ "Verify compiled-code cache often") \
+ \
+ develop(bool, ZapDeadCompiledLocals, false, \
+ "Zap dead locals in compiler frames") \
+ \
+ notproduct(bool, ZapDeadLocalsOld, false, \
+ "Zap dead locals (old version, zaps all frames when " \
+ "entering the VM") \
+ \
+ notproduct(bool, CheckOopishValues, false, \
+ "Warn if value contains oop ( requires ZapDeadLocals)") \
+ \
+ develop(bool, UseMallocOnly, false, \
+ "use only malloc/free for allocation (no resource area/arena)") \
+ \
+ develop(bool, PrintMalloc, false, \
+ "print all malloc/free calls") \
+ \
+ develop(bool, ZapResourceArea, trueInDebug, \
+ "Zap freed resource/arena space with 0xABABABAB") \
+ \
+ notproduct(bool, ZapVMHandleArea, trueInDebug, \
+ "Zap freed VM handle space with 0xBCBCBCBC") \
+ \
+ develop(bool, ZapJNIHandleArea, trueInDebug, \
+ "Zap freed JNI handle space with 0xFEFEFEFE") \
+ \
+ develop(bool, ZapUnusedHeapArea, trueInDebug, \
+ "Zap unused heap space with 0xBAADBABE") \
+ \
+ develop(bool, PrintVMMessages, true, \
+ "Print vm messages on console") \
+ \
+ product(bool, PrintGCApplicationConcurrentTime, false, \
+ "Print the time the application has been running") \
+ \
+ product(bool, PrintGCApplicationStoppedTime, false, \
+ "Print the time the application has been stopped") \
+ \
+ develop(bool, Verbose, false, \
+ "Prints additional debugging information from other modes") \
+ \
+ develop(bool, PrintMiscellaneous, false, \
+ "Prints uncategorized debugging information (requires +Verbose)") \
+ \
+ develop(bool, WizardMode, false, \
+ "Prints much more debugging information") \
+ \
+ product(bool, ShowMessageBoxOnError, false, \
+ "Keep process alive on VM fatal error") \
+ \
+ product_pd(bool, UseOSErrorReporting, \
+ "Let VM fatal error propagate to the OS (ie. WER on Windows)") \
+ \
+ product(bool, SuppressFatalErrorMessage, false, \
+ "Do NO Fatal Error report [Avoid deadlock]") \
+ \
+ product(ccstrlist, OnError, "", \
+ "Run user-defined commands on fatal error; see VMError.cpp " \
+ "for examples") \
+ \
+ product(ccstrlist, OnOutOfMemoryError, "", \
+ "Run user-defined commands on first java.lang.OutOfMemoryError") \
+ \
+ manageable(bool, HeapDumpOnOutOfMemoryError, false, \
+ "Dump heap to file when java.lang.OutOfMemoryError is thrown") \
+ \
+ manageable(ccstr, HeapDumpPath, NULL, \
+ "When HeapDumpOnOutOfMemoryError is on, the path (filename or" \
+ "directory) of the dump file (defaults to java_pid<pid>.hprof" \
+ "in the working directory)") \
+ \
+ develop(uintx, SegmentedHeapDumpThreshold, 2*G, \
+ "Generate a segmented heap dump (JAVA PROFILE 1.0.2 format) " \
+ "when the heap usage is larger than this") \
+ \
+ develop(uintx, HeapDumpSegmentSize, 1*G, \
+ "Approximate segment size when generating a segmented heap dump") \
+ \
+ develop(bool, BreakAtWarning, false, \
+ "Execute breakpoint upon encountering VM warning") \
+ \
+ product_pd(bool, UseVectoredExceptions, \
+ "Temp Flag - Use Vectored Exceptions rather than SEH (Windows Only)") \
+ \
+ develop(bool, TraceVMOperation, false, \
+ "Trace vm operations") \
+ \
+ develop(bool, UseFakeTimers, false, \
+ "Tells whether the VM should use system time or a fake timer") \
+ \
+ diagnostic(bool, LogCompilation, false, \
+ "Log compilation activity in detail to hotspot.log or LogFile") \
+ \
+ product(bool, PrintCompilation, false, \
+ "Print compilations") \
+ \
+ diagnostic(bool, TraceNMethodInstalls, false, \
+ "Trace nmethod intallation") \
+ \
+ diagnostic(bool, TraceOSRBreakpoint, false, \
+ "Trace OSR Breakpoint ") \
+ \
+ diagnostic(bool, TraceCompileTriggered, false, \
+ "Trace compile triggered") \
+ \
+ diagnostic(bool, TraceTriggers, false, \
+ "Trace triggers") \
+ \
+ product(bool, AlwaysRestoreFPU, false, \
+ "Restore the FPU control word after every JNI call (expensive)") \
+ \
+ notproduct(bool, PrintCompilation2, false, \
+ "Print additional statistics per compilation") \
+ \
+ notproduct(bool, PrintAdapterHandlers, false, \
+ "Print code generated for i2c/c2i adapters") \
+ \
+ develop(bool, PrintAssembly, false, \
+ "Print assembly code") \
+ \
+ develop(bool, PrintNMethods, false, \
+ "Print assembly code for nmethods when generated") \
+ \
+ develop(bool, PrintNativeNMethods, false, \
+ "Print assembly code for native nmethods when generated") \
+ \
+ develop(bool, PrintDebugInfo, false, \
+ "Print debug information for all nmethods when generated") \
+ \
+ develop(bool, PrintRelocations, false, \
+ "Print relocation information for all nmethods when generated") \
+ \
+ develop(bool, PrintDependencies, false, \
+ "Print dependency information for all nmethods when generated") \
+ \
+ develop(bool, PrintExceptionHandlers, false, \
+ "Print exception handler tables for all nmethods when generated") \
+ \
+ develop(bool, InterceptOSException, false, \
+ "Starts debugger when an implicit OS (e.g., NULL) " \
+ "exception happens") \
+ \
+ notproduct(bool, PrintCodeCache, false, \
+ "Print the compiled_code cache when exiting") \
+ \
+ develop(bool, PrintCodeCache2, false, \
+ "Print detailed info on the compiled_code cache when exiting") \
+ \
+ develop(bool, PrintStubCode, false, \
+ "Print generated stub code") \
+ \
+ product(bool, StackTraceInThrowable, true, \
+ "Collect backtrace in throwable when exception happens") \
+ \
+ product(bool, OmitStackTraceInFastThrow, true, \
+ "Omit backtraces for some 'hot' exceptions in optimized code") \
+ \
+ product(bool, ProfilerPrintByteCodeStatistics, false, \
+ "Prints byte code statictics when dumping profiler output") \
+ \
+ product(bool, ProfilerRecordPC, false, \
+ "Collects tick for each 16 byte interval of compiled code") \
+ \
+ product(bool, ProfileVM, false, \
+ "Profiles ticks that fall within VM (either in the VM Thread " \
+ "or VM code called through stubs)") \
+ \
+ product(bool, ProfileIntervals, false, \
+ "Prints profiles for each interval (see ProfileIntervalsTicks)") \
+ \
+ notproduct(bool, ProfilerCheckIntervals, false, \
+ "Collect and print info on spacing of profiler ticks") \
+ \
+ develop(bool, PrintJVMWarnings, false, \
+ "Prints warnings for unimplemented JVM functions") \
+ \
+ notproduct(uintx, WarnOnStalledSpinLock, 0, \
+ "Prints warnings for stalled SpinLocks") \
+ \
+ develop(bool, InitializeJavaLangSystem, true, \
+ "Initialize java.lang.System - turn off for individual " \
+ "method debugging") \
+ \
+ develop(bool, InitializeJavaLangString, true, \
+ "Initialize java.lang.String - turn off for individual " \
+ "method debugging") \
+ \
+ develop(bool, InitializeJavaLangExceptionsErrors, true, \
+ "Initialize various error and exception classes - turn off for " \
+ "individual method debugging") \
+ \
+ product(bool, RegisterFinalizersAtInit, true, \
+ "Register finalizable objects at end of Object.<init> or " \
+ "after allocation.") \
+ \
+ develop(bool, RegisterReferences, true, \
+ "Tells whether the VM should register soft/weak/final/phantom " \
+ "references") \
+ \
+ develop(bool, IgnoreRewrites, false, \
+ "Supress rewrites of bytecodes in the oopmap generator. " \
+ "This is unsafe!") \
+ \
+ develop(bool, PrintCodeCacheExtension, false, \
+ "Print extension of code cache") \
+ \
+ develop(bool, UsePrivilegedStack, true, \
+ "Enable the security JVM functions") \
+ \
+ develop(bool, IEEEPrecision, true, \
+ "Enables IEEE precision (for INTEL only)") \
+ \
+ develop(bool, ProtectionDomainVerification, true, \
+ "Verifies protection domain before resolution in system " \
+ "dictionary") \
+ \
+ product(bool, ClassUnloading, true, \
+ "Do unloading of classes") \
+ \
+ develop(bool, DisableStartThread, false, \
+ "Disable starting of additional Java threads " \
+ "(for debugging only)") \
+ \
+ develop(bool, MemProfiling, false, \
+ "Write memory usage profiling to log file") \
+ \
+ notproduct(bool, PrintSystemDictionaryAtExit, false, \
+ "Prints the system dictionary at exit") \
+ \
+ diagnostic(bool, UnsyncloadClass, false, \
+ "Unstable: VM calls loadClass unsynchronized. Custom classloader "\
+ "must call VM synchronized for findClass & defineClass") \
+ \
+ product_pd(bool, DontYieldALot, \
+ "Throw away obvious excess yield calls (for SOLARIS only)") \
+ \
+ product_pd(bool, ConvertSleepToYield, \
+ "Converts sleep(0) to thread yield " \
+ "(may be off for SOLARIS to improve GUI)") \
+ \
+ product(bool, ConvertYieldToSleep, false, \
+ "Converts yield to a sleep of MinSleepInterval to simulate Win32 "\
+ "behavior (SOLARIS only)") \
+ \
+ product(bool, UseBoundThreads, true, \
+ "Bind user level threads to kernel threads (for SOLARIS only)") \
+ \
+ develop(bool, UseDetachedThreads, true, \
+ "Use detached threads that are recycled upon termination " \
+ "(for SOLARIS only)") \
+ \
+ product(bool, UseLWPSynchronization, true, \
+ "Use LWP-based instead of libthread-based synchronization " \
+ "(SPARC only)") \
+ \
+ product(ccstr, SyncKnobs, "", \
+ "(Unstable) Various monitor synchronization tunables") \
+ \
+ product(intx, EmitSync, 0, \
+ "(Unsafe,Unstable) " \
+ " Controls emission of inline sync fast-path code") \
+ \
+ product(intx, AlwaysInflate, 0, "(Unstable) Force inflation") \
+ \
+ product(intx, Atomics, 0, \
+ "(Unsafe,Unstable) Diagnostic - Controls emission of atomics") \
+ \
+ product(intx, FenceInstruction, 0, \
+ "(Unsafe,Unstable) Experimental") \
+ \
+ product(intx, SyncFlags, 0, "(Unsafe,Unstable) Experimental Sync flags" ) \
+ \
+ product(intx, SyncVerbose, 0, "(Unstable)" ) \
+ \
+ product(intx, ClearFPUAtPark, 0, "(Unsafe,Unstable)" ) \
+ \
+ product(intx, hashCode, 0, \
+ "(Unstable) select hashCode generation algorithm" ) \
+ \
+ product(intx, WorkAroundNPTLTimedWaitHang, 1, \
+ "(Unstable, Linux-specific)" \
+ " avoid NPTL-FUTEX hang pthread_cond_timedwait" ) \
+ \
+ product(bool, FilterSpuriousWakeups , true, \
+ "Prevent spurious or premature wakeups from object.wait" \
+ "(Solaris only)") \
+ \
+ product(intx, NativeMonitorTimeout, -1, "(Unstable)" ) \
+ product(intx, NativeMonitorFlags, 0, "(Unstable)" ) \
+ product(intx, NativeMonitorSpinLimit, 20, "(Unstable)" ) \
+ \
+ develop(bool, UsePthreads, false, \
+ "Use pthread-based instead of libthread-based synchronization " \
+ "(SPARC only)") \
+ \
+ product(bool, AdjustConcurrency, false, \
+ "call thr_setconcurrency at thread create time to avoid " \
+ "LWP starvation on MP systems (For Solaris Only)") \
+ \
+ develop(bool, UpdateHotSpotCompilerFileOnError, true, \
+ "Should the system attempt to update the compiler file when " \
+ "an error occurs?") \
+ \
+ product(bool, ReduceSignalUsage, false, \
+ "Reduce the use of OS signals in Java and/or the VM") \
+ \
+ notproduct(bool, ValidateMarkSweep, false, \
+ "Do extra validation during MarkSweep collection") \
+ \
+ notproduct(bool, RecordMarkSweepCompaction, false, \
+ "Enable GC-to-GC recording and querying of compaction during " \
+ "MarkSweep") \
+ \
+ develop_pd(bool, ShareVtableStubs, \
+ "Share vtable stubs (smaller code but worse branch prediction") \
+ \
+ develop(bool, LoadLineNumberTables, true, \
+ "Tells whether the class file parser loads line number tables") \
+ \
+ develop(bool, LoadLocalVariableTables, true, \
+ "Tells whether the class file parser loads local variable tables")\
+ \
+ develop(bool, LoadLocalVariableTypeTables, true, \
+ "Tells whether the class file parser loads local variable type tables")\
+ \
+ product(bool, AllowUserSignalHandlers, false, \
+ "Do not complain if the application installs signal handlers " \
+ "(Solaris & Linux only)") \
+ \
+ product(bool, UseSignalChaining, true, \
+ "Use signal-chaining to invoke signal handlers installed " \
+ "by the application (Solaris & Linux only)") \
+ \
+ product(bool, UseAltSigs, false, \
+ "Use alternate signals instead of SIGUSR1 & SIGUSR2 for VM " \
+ "internal signals. (Solaris only)") \
+ \
+ product(bool, UseSpinning, false, \
+ "Use spinning in monitor inflation and before entry") \
+ \
+ product(bool, PreSpinYield, false, \
+ "Yield before inner spinning loop") \
+ \
+ product(bool, PostSpinYield, true, \
+ "Yield after inner spinning loop") \
+ \
+ product(bool, AllowJNIEnvProxy, false, \
+ "Allow JNIEnv proxies for jdbx") \
+ \
+ product(bool, JNIDetachReleasesMonitors, true, \
+ "JNI DetachCurrentThread releases monitors owned by thread") \
+ \
+ product(bool, RestoreMXCSROnJNICalls, false, \
+ "Restore MXCSR when returning from JNI calls") \
+ \
+ product(bool, CheckJNICalls, false, \
+ "Verify all arguments to JNI calls") \
+ \
+ product(bool, UseFastJNIAccessors, true, \
+ "Use optimized versions of Get<Primitive>Field") \
+ \
+ product(bool, EagerXrunInit, false, \
+ "Eagerly initialize -Xrun libraries; allows startup profiling, " \
+ " but not all -Xrun libraries may support the state of the VM at this time") \
+ \
+ product(bool, PreserveAllAnnotations, false, \
+ "Preserve RuntimeInvisibleAnnotations as well as RuntimeVisibleAnnotations") \
+ \
+ develop(uintx, PreallocatedOutOfMemoryErrorCount, 4, \
+ "Number of OutOfMemoryErrors preallocated with backtrace") \
+ \
+ product(bool, LazyBootClassLoader, true, \
+ "Enable/disable lazy opening of boot class path entries") \
+ \
+ diagnostic(bool, UseIncDec, true, \
+ "Use INC, DEC instructions on x86") \
+ \
+ product(bool, UseStoreImmI16, true, \
+ "Use store immediate 16-bits value instruction on x86") \
+ \
+ product(bool, UseAddressNop, false, \
+ "Use '0F 1F [addr]' NOP instructions on x86 cpus") \
+ \
+ product(bool, UseXmmLoadAndClearUpper, true, \
+ "Load low part of XMM register and clear upper part") \
+ \
+ product(bool, UseXmmRegToRegMoveAll, false, \
+ "Copy all XMM register bits when moving value between registers") \
+ \
+ product(intx, FieldsAllocationStyle, 1, \
+ "0 - type based with oops first, 1 - with oops last") \
+ \
+ product(bool, CompactFields, true, \
+ "Allocate nonstatic fields in gaps between previous fields") \
+ \
+ notproduct(bool, PrintCompactFieldsSavings, false, \
+ "Print how many words were saved with CompactFields") \
+ \
+ product(bool, UseBiasedLocking, true, \
+ "Enable biased locking in JVM") \
+ \
+ product(intx, BiasedLockingStartupDelay, 4000, \
+ "Number of milliseconds to wait before enabling biased locking") \
+ \
+ diagnostic(bool, PrintBiasedLockingStatistics, false, \
+ "Print statistics of biased locking in JVM") \
+ \
+ product(intx, BiasedLockingBulkRebiasThreshold, 20, \
+ "Threshold of number of revocations per type to try to " \
+ "rebias all objects in the heap of that type") \
+ \
+ product(intx, BiasedLockingBulkRevokeThreshold, 40, \
+ "Threshold of number of revocations per type to permanently " \
+ "revoke biases of all objects in the heap of that type") \
+ \
+ product(intx, BiasedLockingDecayTime, 25000, \
+ "Decay time (in milliseconds) to re-enable bulk rebiasing of a " \
+ "type after previous bulk rebias") \
+ \
+ /* tracing */ \
+ \
+ notproduct(bool, TraceRuntimeCalls, false, \
+ "Trace run-time calls") \
+ \
+ develop(bool, TraceJNICalls, false, \
+ "Trace JNI calls") \
+ \
+ notproduct(bool, TraceJVMCalls, false, \
+ "Trace JVM calls") \
+ \
+ product(ccstr, TraceJVMTI, "", \
+ "Trace flags for JVMTI functions and events") \
+ \
+ /* This option can change an EMCP method into an obsolete method. */ \
+ /* This can affect tests that except specific methods to be EMCP. */ \
+ /* This option should be used with caution. */ \
+ product(bool, StressLdcRewrite, false, \
+ "Force ldc -> ldc_w rewrite during RedefineClasses") \
+ \
+ product(intx, TraceRedefineClasses, 0, \
+ "Trace level for JVMTI RedefineClasses") \
+ \
+ /* change to false by default sometime after Mustang */ \
+ product(bool, VerifyMergedCPBytecodes, true, \
+ "Verify bytecodes after RedefineClasses constant pool merging") \
+ \
+ develop(bool, TraceJNIHandleAllocation, false, \
+ "Trace allocation/deallocation of JNI handle blocks") \
+ \
+ develop(bool, TraceThreadEvents, false, \
+ "Trace all thread events") \
+ \
+ develop(bool, TraceBytecodes, false, \
+ "Trace bytecode execution") \
+ \
+ develop(bool, TraceClassInitialization, false, \
+ "Trace class initialization") \
+ \
+ develop(bool, TraceExceptions, false, \
+ "Trace exceptions") \
+ \
+ develop(bool, TraceICs, false, \
+ "Trace inline cache changes") \
+ \
+ notproduct(bool, TraceInvocationCounterOverflow, false, \
+ "Trace method invocation counter overflow") \
+ \
+ develop(bool, TraceInlineCacheClearing, false, \
+ "Trace clearing of inline caches in nmethods") \
+ \
+ develop(bool, TraceDependencies, false, \
+ "Trace dependencies") \
+ \
+ develop(bool, VerifyDependencies, trueInDebug, \
+ "Exercise and verify the compilation dependency mechanism") \
+ \
+ develop(bool, TraceNewOopMapGeneration, false, \
+ "Trace OopMapGeneration") \
+ \
+ develop(bool, TraceNewOopMapGenerationDetailed, false, \
+ "Trace OopMapGeneration: print detailed cell states") \
+ \
+ develop(bool, TimeOopMap, false, \
+ "Time calls to GenerateOopMap::compute_map() in sum") \
+ \
+ develop(bool, TimeOopMap2, false, \
+ "Time calls to GenerateOopMap::compute_map() individually") \
+ \
+ develop(bool, TraceMonitorMismatch, false, \
+ "Trace monitor matching failures during OopMapGeneration") \
+ \
+ develop(bool, TraceOopMapRewrites, false, \
+ "Trace rewritting of method oops during oop map generation") \
+ \
+ develop(bool, TraceSafepoint, false, \
+ "Trace safepoint operations") \
+ \
+ develop(bool, TraceICBuffer, false, \
+ "Trace usage of IC buffer") \
+ \
+ develop(bool, TraceCompiledIC, false, \
+ "Trace changes of compiled IC") \
+ \
+ notproduct(bool, TraceZapDeadLocals, false, \
+ "Trace zapping dead locals") \
+ \
+ develop(bool, TraceStartupTime, false, \
+ "Trace setup time") \
+ \
+ develop(bool, TraceHPI, false, \
+ "Trace Host Porting Interface (HPI)") \
+ \
+ product(ccstr, HPILibPath, NULL, \
+ "Specify alternate path to HPI library") \
+ \
+ develop(bool, TraceProtectionDomainVerification, false, \
+ "Trace protection domain verifcation") \
+ \
+ develop(bool, TraceClearedExceptions, false, \
+ "Prints when an exception is forcibly cleared") \
+ \
+ product(bool, TraceClassResolution, false, \
+ "Trace all constant pool resolutions (for debugging)") \
+ \
+ product(bool, TraceBiasedLocking, false, \
+ "Trace biased locking in JVM") \
+ \
+ product(bool, TraceMonitorInflation, false, \
+ "Trace monitor inflation in JVM") \
+ \
+ /* assembler */ \
+ product(bool, Use486InstrsOnly, false, \
+ "Use 80486 Compliant instruction subset") \
+ \
+ /* gc */ \
+ \
+ product(bool, UseSerialGC, false, \
+ "Tells whether the VM should use serial garbage collector") \
+ \
+ product(bool, UseParallelGC, false, \
+ "Use the Parallel Scavenge garbage collector") \
+ \
+ product(bool, UseParallelOldGC, false, \
+ "Use the Parallel Old garbage collector") \
+ \
+ product(bool, UseParallelOldGCCompacting, true, \
+ "In the Parallel Old garbage collector use parallel compaction") \
+ \
+ product(bool, UseParallelDensePrefixUpdate, true, \
+ "In the Parallel Old garbage collector use parallel dense" \
+ " prefix update") \
+ \
+ develop(bool, UseParallelOldGCChunkPointerCalc, true, \
+ "In the Parallel Old garbage collector use chucks to calculate" \
+ " new object locations") \
+ \
+ product(uintx, HeapMaximumCompactionInterval, 20, \
+ "How often should we maximally compact the heap (not allowing " \
+ "any dead space)") \
+ \
+ product(uintx, HeapFirstMaximumCompactionCount, 3, \
+ "The collection count for the first maximum compaction") \
+ \
+ product(bool, UseMaximumCompactionOnSystemGC, true, \
+ "In the Parallel Old garbage collector maximum compaction for " \
+ "a system GC") \
+ \
+ product(uintx, ParallelOldDeadWoodLimiterMean, 50, \
+ "The mean used by the par compact dead wood" \
+ "limiter (a number between 0-100).") \
+ \
+ product(uintx, ParallelOldDeadWoodLimiterStdDev, 80, \
+ "The standard deviation used by the par compact dead wood" \
+ "limiter (a number between 0-100).") \
+ \
+ product(bool, UseParallelOldGCDensePrefix, true, \
+ "Use a dense prefix with the Parallel Old garbage collector") \
+ \
+ product(uintx, ParallelGCThreads, 0, \
+ "Number of parallel threads parallel gc will use") \
+ \
+ product(uintx, ParallelCMSThreads, 0, \
+ "Max number of threads CMS will use for concurrent work") \
+ \
+ develop(bool, VerifyParallelOldWithMarkSweep, false, \
+ "Use the MarkSweep code to verify phases of Parallel Old") \
+ \
+ develop(uintx, VerifyParallelOldWithMarkSweepInterval, 1, \
+ "Interval at which the MarkSweep code is used to verify " \
+ "phases of Parallel Old") \
+ \
+ develop(bool, ParallelOldMTUnsafeMarkBitMap, false, \
+ "Use the Parallel Old MT unsafe in marking the bitmap") \
+ \
+ develop(bool, ParallelOldMTUnsafeUpdateLiveData, false, \
+ "Use the Parallel Old MT unsafe in update of live size") \
+ \
+ develop(bool, TraceChunkTasksQueuing, false, \
+ "Trace the queuing of the chunk tasks") \
+ \
+ product(uintx, YoungPLABSize, 4096, \
+ "Size of young gen promotion labs (in HeapWords)") \
+ \
+ product(uintx, OldPLABSize, 1024, \
+ "Size of old gen promotion labs (in HeapWords)") \
+ \
+ product(uintx, GCTaskTimeStampEntries, 200, \
+ "Number of time stamp entries per gc worker thread") \
+ \
+ product(bool, AlwaysTenure, false, \
+ "Always tenure objects in eden. (ParallelGC only)") \
+ \
+ product(bool, NeverTenure, false, \
+ "Never tenure objects in eden, May tenure on overflow" \
+ " (ParallelGC only)") \
+ \
+ product(bool, ScavengeBeforeFullGC, true, \
+ "Scavenge youngest generation before each full GC," \
+ " used with UseParallelGC") \
+ \
+ develop(bool, ScavengeWithObjectsInToSpace, false, \
+ "Allow scavenges to occur when to_space contains objects.") \
+ \
+ product(bool, UseConcMarkSweepGC, false, \
+ "Use Concurrent Mark-Sweep GC in the old generation") \
+ \
+ product(bool, ExplicitGCInvokesConcurrent, false, \
+ "A System.gc() request invokes a concurrent collection;" \
+ " (effective only when UseConcMarkSweepGC)") \
+ \
+ product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false, \
+ "A System.gc() request invokes a concurrent collection and" \
+ " also unloads classes during such a concurrent gc cycle " \
+ " (effective only when UseConcMarkSweepGC)") \
+ \
+ develop(bool, UseCMSAdaptiveFreeLists, true, \
+ "Use Adaptive Free Lists in the CMS generation") \
+ \
+ develop(bool, UseAsyncConcMarkSweepGC, true, \
+ "Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\
+ \
+ develop(bool, RotateCMSCollectionTypes, false, \
+ "Rotate the CMS collections among concurrent and STW") \
+ \
+ product(bool, UseCMSBestFit, true, \
+ "Use CMS best fit allocation strategy") \
+ \
+ product(bool, UseCMSCollectionPassing, true, \
+ "Use passing of collection from background to foreground") \
+ \
+ product(bool, UseParNewGC, false, \
+ "Use parallel threads in the new generation.") \
+ \
+ product(bool, ParallelGCVerbose, false, \
+ "Verbose output for parallel GC.") \
+ \
+ product(intx, ParallelGCBufferWastePct, 10, \
+ "wasted fraction of parallel allocation buffer.") \
+ \
+ product(bool, ParallelGCRetainPLAB, true, \
+ "Retain parallel allocation buffers across scavenges.") \
+ \
+ product(intx, TargetPLABWastePct, 10, \
+ "target wasted space in last buffer as pct of overall allocation")\
+ \
+ product(uintx, PLABWeight, 75, \
+ "Percentage (0-100) used to weight the current sample when" \
+ "computing exponentially decaying average for ResizePLAB.") \
+ \
+ product(bool, ResizePLAB, true, \
+ "Dynamically resize (survivor space) promotion labs") \
+ \
+ product(bool, PrintPLAB, false, \
+ "Print (survivor space) promotion labs sizing decisions") \
+ \
+ product(intx, ParGCArrayScanChunk, 50, \
+ "Scan a subset and push remainder, if array is bigger than this") \
+ \
+ product(intx, ParGCDesiredObjsFromOverflowList, 20, \
+ "The desired number of objects to claim from the overflow list") \
+ \
+ product(uintx, CMSParPromoteBlocksToClaim, 50, \
+ "Number of blocks to attempt to claim when refilling CMS LAB for "\
+ "parallel GC.") \
+ \
+ product(bool, AlwaysPreTouch, false, \
+ "It forces all freshly committed pages to be pre-touched.") \
+ \
+ product(bool, CMSUseOldDefaults, false, \
+ "A flag temporarily introduced to allow reverting to some older" \
+ "default settings; older as of 6.0 ") \
+ \
+ product(intx, CMSYoungGenPerWorker, 16*M, \
+ "The amount of young gen chosen by default per GC worker " \
+ "thread available ") \
+ \
+ product(bool, CMSIncrementalMode, false, \
+ "Whether CMS GC should operate in \"incremental\" mode") \
+ \
+ product(uintx, CMSIncrementalDutyCycle, 10, \
+ "CMS incremental mode duty cycle (a percentage, 0-100). If" \
+ "CMSIncrementalPacing is enabled, then this is just the initial" \
+ "value") \
+ \
+ product(bool, CMSIncrementalPacing, true, \
+ "Whether the CMS incremental mode duty cycle should be " \
+ "automatically adjusted") \
+ \
+ product(uintx, CMSIncrementalDutyCycleMin, 0, \
+ "Lower bound on the duty cycle when CMSIncrementalPacing is" \
+ "enabled (a percentage, 0-100).") \
+ \
+ product(uintx, CMSIncrementalSafetyFactor, 10, \
+ "Percentage (0-100) used to add conservatism when computing the" \
+ "duty cycle.") \
+ \
+ product(uintx, CMSIncrementalOffset, 0, \
+ "Percentage (0-100) by which the CMS incremental mode duty cycle" \
+ "is shifted to the right within the period between young GCs") \
+ \
+ product(uintx, CMSExpAvgFactor, 25, \
+ "Percentage (0-100) used to weight the current sample when" \
+ "computing exponential averages for CMS statistics.") \
+ \
+ product(uintx, CMS_FLSWeight, 50, \
+ "Percentage (0-100) used to weight the current sample when" \
+ "computing exponentially decating averages for CMS FLS statistics.") \
+ \
+ product(uintx, CMS_FLSPadding, 2, \
+ "The multiple of deviation from mean to use for buffering" \
+ "against volatility in free list demand.") \
+ \
+ product(uintx, FLSCoalescePolicy, 2, \
+ "CMS: Aggression level for coalescing, increasing from 0 to 4") \
+ \
+ product(uintx, CMS_SweepWeight, 50, \
+ "Percentage (0-100) used to weight the current sample when" \
+ "computing exponentially decaying average for inter-sweep duration.") \
+ \
+ product(uintx, CMS_SweepPadding, 2, \
+ "The multiple of deviation from mean to use for buffering" \
+ "against volatility in inter-sweep duration.") \
+ \
+ product(uintx, CMS_SweepTimerThresholdMillis, 10, \
+ "Skip block flux-rate sampling for an epoch unless inter-sweep " \
+ " duration exceeds this threhold in milliseconds") \
+ \
+ develop(bool, CMSTraceIncrementalMode, false, \
+ "Trace CMS incremental mode") \
+ \
+ develop(bool, CMSTraceIncrementalPacing, false, \
+ "Trace CMS incremental mode pacing computation") \
+ \
+ develop(bool, CMSTraceThreadState, false, \
+ "Trace the CMS thread state (enable the trace_state() method)") \
+ \
+ product(bool, CMSClassUnloadingEnabled, false, \
+ "Whether class unloading enabled when using CMS GC") \
+ \
+ product(bool, CMSCompactWhenClearAllSoftRefs, true, \
+ "Compact when asked to collect CMS gen with clear_all_soft_refs") \
+ \
+ product(bool, UseCMSCompactAtFullCollection, true, \
+ "Use mark sweep compact at full collections") \
+ \
+ product(uintx, CMSFullGCsBeforeCompaction, 0, \
+ "Number of CMS full collection done before compaction if > 0") \
+ \
+ develop(intx, CMSDictionaryChoice, 0, \
+ "Use BinaryTreeDictionary as default in the CMS generation") \
+ \
+ product(uintx, CMSIndexedFreeListReplenish, 4, \
+ "Replenish and indexed free list with this number of chunks") \
+ \
+ product(bool, CMSLoopWarn, false, \
+ "Warn in case of excessive CMS looping") \
+ \
+ develop(bool, CMSOverflowEarlyRestoration, false, \
+ "Whether preserved marks should be restored early") \
+ \
+ product(uintx, CMSMarkStackSize, 32*K, \
+ "Size of CMS marking stack") \
+ \
+ product(uintx, CMSMarkStackSizeMax, 4*M, \
+ "Max size of CMS marking stack") \
+ \
+ notproduct(bool, CMSMarkStackOverflowALot, false, \
+ "Whether we should simulate frequent marking stack / work queue" \
+ " overflow") \
+ \
+ notproduct(intx, CMSMarkStackOverflowInterval, 1000, \
+ "A per-thread `interval' counter that determines how frequently" \
+ " we simulate overflow; a smaller number increases frequency") \
+ \
+ product(uintx, CMSMaxAbortablePrecleanLoops, 0, \
+ "(Temporary, subject to experimentation)" \
+ "Maximum number of abortable preclean iterations, if > 0") \
+ \
+ product(intx, CMSMaxAbortablePrecleanTime, 5000, \
+ "(Temporary, subject to experimentation)" \
+ "Maximum time in abortable preclean in ms") \
+ \
+ product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100, \
+ "(Temporary, subject to experimentation)" \
+ "Nominal minimum work per abortable preclean iteration") \
+ \
+ product(intx, CMSAbortablePrecleanWaitMillis, 100, \
+ "(Temporary, subject to experimentation)" \
+ " Time that we sleep between iterations when not given" \
+ " enough work per iteration") \
+ \
+ product(uintx, CMSRescanMultiple, 32, \
+ "Size (in cards) of CMS parallel rescan task") \
+ \
+ product(uintx, CMSConcMarkMultiple, 32, \
+ "Size (in cards) of CMS concurrent MT marking task") \
+ \
+ product(uintx, CMSRevisitStackSize, 1*M, \
+ "Size of CMS KlassKlass revisit stack") \
+ \
+ product(bool, CMSAbortSemantics, false, \
+ "Whether abort-on-overflow semantics is implemented") \
+ \
+ product(bool, CMSParallelRemarkEnabled, true, \
+ "Whether parallel remark enabled (only if ParNewGC)") \
+ \
+ product(bool, CMSParallelSurvivorRemarkEnabled, true, \
+ "Whether parallel remark of survivor space" \
+ " enabled (effective only if CMSParallelRemarkEnabled)") \
+ \
+ product(bool, CMSPLABRecordAlways, true, \
+ "Whether to always record survivor space PLAB bdries" \
+ " (effective only if CMSParallelSurvivorRemarkEnabled)") \
+ \
+ product(bool, CMSConcurrentMTEnabled, true, \
+ "Whether multi-threaded concurrent work enabled (if ParNewGC)") \
+ \
+ product(bool, CMSPermGenPrecleaningEnabled, true, \
+ "Whether concurrent precleaning enabled in perm gen" \
+ " (effective only when CMSPrecleaningEnabled is true)") \
+ \
+ product(bool, CMSPrecleaningEnabled, true, \
+ "Whether concurrent precleaning enabled") \
+ \
+ product(uintx, CMSPrecleanIter, 3, \
+ "Maximum number of precleaning iteration passes") \
+ \
+ product(uintx, CMSPrecleanNumerator, 2, \
+ "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \
+ " ratio") \
+ \
+ product(uintx, CMSPrecleanDenominator, 3, \
+ "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \
+ " ratio") \
+ \
+ product(bool, CMSPrecleanRefLists1, true, \
+ "Preclean ref lists during (initial) preclean phase") \
+ \
+ product(bool, CMSPrecleanRefLists2, false, \
+ "Preclean ref lists during abortable preclean phase") \
+ \
+ product(bool, CMSPrecleanSurvivors1, false, \
+ "Preclean survivors during (initial) preclean phase") \
+ \
+ product(bool, CMSPrecleanSurvivors2, true, \
+ "Preclean survivors during abortable preclean phase") \
+ \
+ product(uintx, CMSPrecleanThreshold, 1000, \
+ "Don't re-iterate if #dirty cards less than this") \
+ \
+ product(bool, CMSCleanOnEnter, true, \
+ "Clean-on-enter optimization for reducing number of dirty cards") \
+ \
+ product(uintx, CMSRemarkVerifyVariant, 1, \
+ "Choose variant (1,2) of verification following remark") \
+ \
+ product(uintx, CMSScheduleRemarkEdenSizeThreshold, 2*M, \
+ "If Eden used is below this value, don't try to schedule remark") \
+ \
+ product(uintx, CMSScheduleRemarkEdenPenetration, 50, \
+ "The Eden occupancy % at which to try and schedule remark pause") \
+ \
+ product(uintx, CMSScheduleRemarkSamplingRatio, 5, \
+ "Start sampling Eden top at least before yg occupancy reaches" \
+ " 1/<ratio> of the size at which we plan to schedule remark") \
+ \
+ product(uintx, CMSSamplingGrain, 16*K, \
+ "The minimum distance between eden samples for CMS (see above)") \
+ \
+ product(bool, CMSScavengeBeforeRemark, false, \
+ "Attempt scavenge before the CMS remark step") \
+ \
+ develop(bool, CMSTraceSweeper, false, \
+ "Trace some actions of the CMS sweeper") \
+ \
+ product(uintx, CMSWorkQueueDrainThreshold, 10, \
+ "Don't drain below this size per parallel worker/thief") \
+ \
+ product(intx, CMSWaitDuration, 2000, \
+ "Time in milliseconds that CMS thread waits for young GC") \
+ \
+ product(bool, CMSYield, true, \
+ "Yield between steps of concurrent mark & sweep") \
+ \
+ product(uintx, CMSBitMapYieldQuantum, 10*M, \
+ "Bitmap operations should process at most this many bits" \
+ "between yields") \
+ \
+ diagnostic(bool, FLSVerifyAllHeapReferences, false, \
+ "Verify that all refs across the FLS boundary " \
+ " are to valid objects") \
+ \
+ diagnostic(bool, FLSVerifyLists, false, \
+ "Do lots of (expensive) FreeListSpace verification") \
+ \
+ diagnostic(bool, FLSVerifyIndexTable, false, \
+ "Do lots of (expensive) FLS index table verification") \
+ \
+ develop(bool, FLSVerifyDictionary, false, \
+ "Do lots of (expensive) FLS dictionary verification") \
+ \
+ develop(bool, VerifyBlockOffsetArray, false, \
+ "Do (expensive!) block offset array verification") \
+ \
+ product(bool, BlockOffsetArrayUseUnallocatedBlock, trueInDebug, \
+ "Maintain _unallocated_block in BlockOffsetArray" \
+ " (currently applicable only to CMS collector)") \
+ \
+ develop(bool, TraceCMSState, false, \
+ "Trace the state of the CMS collection") \
+ \
+ product(intx, RefDiscoveryPolicy, 0, \
+ "Whether reference-based(0) or referent-based(1)") \
+ \
+ product(bool, ParallelRefProcEnabled, false, \
+ "Enable parallel reference processing whenever possible") \
+ \
+ product(bool, ParallelRefProcBalancingEnabled, true, \
+ "Enable balancing of reference processing queues") \
+ \
+ product(intx, CMSTriggerRatio, 80, \
+ "Percentage of MinHeapFreeRatio in CMS generation that is " \
+ " allocated before a CMS collection cycle commences") \
+ \
+ product(intx, CMSBootstrapOccupancy, 50, \
+ "Percentage CMS generation occupancy at which to " \
+ " initiate CMS collection for bootstrapping collection stats") \
+ \
+ product(intx, CMSInitiatingOccupancyFraction, -1, \
+ "Percentage CMS generation occupancy to start a CMS collection " \
+ " cycle (A negative value means that CMSTirggerRatio is used)") \
+ \
+ product(bool, UseCMSInitiatingOccupancyOnly, false, \
+ "Only use occupancy as a crierion for starting a CMS collection") \
+ \
+ develop(bool, CMSTestInFreeList, false, \
+ "Check if the coalesced range is already in the " \
+ "free lists as claimed.") \
+ \
+ notproduct(bool, CMSVerifyReturnedBytes, false, \
+ "Check that all the garbage collected was returned to the " \
+ "free lists.") \
+ \
+ notproduct(bool, ScavengeALot, false, \
+ "Force scavenge at every Nth exit from the runtime system " \
+ "(N=ScavengeALotInterval)") \
+ \
+ develop(bool, FullGCALot, false, \
+ "Force full gc at every Nth exit from the runtime system " \
+ "(N=FullGCALotInterval)") \
+ \
+ notproduct(bool, GCALotAtAllSafepoints, false, \
+ "Enforce ScavengeALot/GCALot at all potential safepoints") \
+ \
+ product(bool, HandlePromotionFailure, true, \
+ "The youngest generation collection does not require" \
+ " a guarantee of full promotion of all live objects.") \
+ \
+ notproduct(bool, PromotionFailureALot, false, \
+ "Use promotion failure handling on every youngest generation " \
+ "collection") \
+ \
+ develop(uintx, PromotionFailureALotCount, 1000, \
+ "Number of promotion failures occurring at ParGCAllocBuffer" \
+ "refill attempts (ParNew) or promotion attempts " \
+ "(other young collectors) ") \
+ \
+ develop(uintx, PromotionFailureALotInterval, 5, \
+ "Total collections between promotion failures alot") \
+ \
+ develop(intx, WorkStealingSleepMillis, 1, \
+ "Sleep time when sleep is used for yields") \
+ \
+ develop(uintx, WorkStealingYieldsBeforeSleep, 1000, \
+ "Number of yields before a sleep is done during workstealing") \
+ \
+ product(uintx, PreserveMarkStackSize, 40, \
+ "Size for stack used in promotion failure handling") \
+ \
+ product_pd(bool, UseTLAB, "Use thread-local object allocation") \
+ \
+ product_pd(bool, ResizeTLAB, \
+ "Dynamically resize tlab size for threads") \
+ \
+ product(bool, ZeroTLAB, false, \
+ "Zero out the newly created TLAB") \
+ \
+ product(bool, PrintTLAB, false, \
+ "Print various TLAB related information") \
+ \
+ product(bool, TLABStats, true, \
+ "Print various TLAB related information") \
+ \
+ product_pd(bool, NeverActAsServerClassMachine, \
+ "Never act like a server-class machine") \
+ \
+ product(bool, AlwaysActAsServerClassMachine, false, \
+ "Always act like a server-class machine") \
+ \
+ product_pd(uintx, DefaultMaxRAM, \
+ "Maximum real memory size for setting server class heap size") \
+ \
+ product(uintx, DefaultMaxRAMFraction, 4, \
+ "Fraction (1/n) of real memory used for server class max heap") \
+ \
+ product(uintx, DefaultInitialRAMFraction, 64, \
+ "Fraction (1/n) of real memory used for server class initial heap") \
+ \
+ product(bool, UseAutoGCSelectPolicy, false, \
+ "Use automatic collection selection policy") \
+ \
+ product(uintx, AutoGCSelectPauseMillis, 5000, \
+ "Automatic GC selection pause threshhold in ms") \
+ \
+ product(bool, UseAdaptiveSizePolicy, true, \
+ "Use adaptive generation sizing policies") \
+ \
+ product(bool, UsePSAdaptiveSurvivorSizePolicy, true, \
+ "Use adaptive survivor sizing policies") \
+ \
+ product(bool, UseAdaptiveGenerationSizePolicyAtMinorCollection, true, \
+ "Use adaptive young-old sizing policies at minor collections") \
+ \
+ product(bool, UseAdaptiveGenerationSizePolicyAtMajorCollection, true, \
+ "Use adaptive young-old sizing policies at major collections") \
+ \
+ product(bool, UseAdaptiveSizePolicyWithSystemGC, false, \
+ "Use statistics from System.GC for adaptive size policy") \
+ \
+ product(bool, UseAdaptiveGCBoundary, false, \
+ "Allow young-old boundary to move") \
+ \
+ develop(bool, TraceAdaptiveGCBoundary, false, \
+ "Trace young-old boundary moves") \
+ \
+ develop(intx, PSAdaptiveSizePolicyResizeVirtualSpaceAlot, -1, \
+ "Resize the virtual spaces of the young or old generations") \
+ \
+ product(uintx, AdaptiveSizeThroughPutPolicy, 0, \
+ "Policy for changeing generation size for throughput goals") \
+ \
+ product(uintx, AdaptiveSizePausePolicy, 0, \
+ "Policy for changing generation size for pause goals") \
+ \
+ develop(bool, PSAdjustTenuredGenForMinorPause, false, \
+ "Adjust tenured generation to achive a minor pause goal") \
+ \
+ develop(bool, PSAdjustYoungGenForMajorPause, false, \
+ "Adjust young generation to achive a major pause goal") \
+ \
+ product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \
+ "Number of steps where heuristics is used before data is used") \
+ \
+ develop(uintx, AdaptiveSizePolicyReadyThreshold, 5, \
+ "Number of collections before the adaptive sizing is started") \
+ \
+ product(uintx, AdaptiveSizePolicyOutputInterval, 0, \
+ "Collecton interval for printing information, zero => never") \
+ \
+ product(bool, UseAdaptiveSizePolicyFootprintGoal, true, \
+ "Use adaptive minimum footprint as a goal") \
+ \
+ product(uintx, AdaptiveSizePolicyWeight, 10, \
+ "Weight given to exponential resizing, between 0 and 100") \
+ \
+ product(uintx, AdaptiveTimeWeight, 25, \
+ "Weight given to time in adaptive policy, between 0 and 100") \
+ \
+ product(uintx, PausePadding, 1, \
+ "How much buffer to keep for pause time") \
+ \
+ product(uintx, PromotedPadding, 3, \
+ "How much buffer to keep for promotion failure") \
+ \
+ product(uintx, SurvivorPadding, 3, \
+ "How much buffer to keep for survivor overflow") \
+ \
+ product(uintx, AdaptivePermSizeWeight, 20, \
+ "Weight for perm gen exponential resizing, between 0 and 100") \
+ \
+ product(uintx, PermGenPadding, 3, \
+ "How much buffer to keep for perm gen sizing") \
+ \
+ product(uintx, ThresholdTolerance, 10, \
+ "Allowed collection cost difference between generations") \
+ \
+ product(uintx, AdaptiveSizePolicyCollectionCostMargin, 50, \
+ "If collection costs are within margin, reduce both by full delta") \
+ \
+ product(uintx, YoungGenerationSizeIncrement, 20, \
+ "Adaptive size percentage change in young generation") \
+ \
+ product(uintx, YoungGenerationSizeSupplement, 80, \
+ "Supplement to YoungedGenerationSizeIncrement used at startup") \
+ \
+ product(uintx, YoungGenerationSizeSupplementDecay, 8, \
+ "Decay factor to YoungedGenerationSizeSupplement") \
+ \
+ product(uintx, TenuredGenerationSizeIncrement, 20, \
+ "Adaptive size percentage change in tenured generation") \
+ \
+ product(uintx, TenuredGenerationSizeSupplement, 80, \
+ "Supplement to TenuredGenerationSizeIncrement used at startup") \
+ \
+ product(uintx, TenuredGenerationSizeSupplementDecay, 2, \
+ "Decay factor to TenuredGenerationSizeIncrement") \
+ \
+ product(uintx, MaxGCPauseMillis, max_uintx, \
+ "Adaptive size policy maximum GC pause time goal in msec") \
+ \
+ product(uintx, MaxGCMinorPauseMillis, max_uintx, \
+ "Adaptive size policy maximum GC minor pause time goal in msec") \
+ \
+ product(uintx, GCTimeRatio, 99, \
+ "Adaptive size policy application time to GC time ratio") \
+ \
+ product(uintx, AdaptiveSizeDecrementScaleFactor, 4, \
+ "Adaptive size scale down factor for shrinking") \
+ \
+ product(bool, UseAdaptiveSizeDecayMajorGCCost, true, \
+ "Adaptive size decays the major cost for long major intervals") \
+ \
+ product(uintx, AdaptiveSizeMajorGCDecayTimeScale, 10, \
+ "Time scale over which major costs decay") \
+ \
+ product(uintx, MinSurvivorRatio, 3, \
+ "Minimum ratio of young generation/survivor space size") \
+ \
+ product(uintx, InitialSurvivorRatio, 8, \
+ "Initial ratio of eden/survivor space size") \
+ \
+ product(uintx, BaseFootPrintEstimate, 256*M, \
+ "Estimate of footprint other than Java Heap") \
+ \
+ product(bool, UseGCOverheadLimit, true, \
+ "Use policy to limit of proportion of time spent in GC " \
+ "before an OutOfMemory error is thrown") \
+ \
+ product(uintx, GCTimeLimit, 98, \
+ "Limit of proportion of time spent in GC before an OutOfMemory" \
+ "error is thrown (used with GCHeapFreeLimit)") \
+ \
+ product(uintx, GCHeapFreeLimit, 2, \
+ "Minimum percentage of free space after a full GC before an " \
+ "OutOfMemoryError is thrown (used with GCTimeLimit)") \
+ \
+ develop(uintx, AdaptiveSizePolicyGCTimeLimitThreshold, 5, \
+ "Number of consecutive collections before gc time limit fires") \
+ \
+ product(bool, PrintAdaptiveSizePolicy, false, \
+ "Print information about AdaptiveSizePolicy") \
+ \
+ product(intx, PrefetchCopyIntervalInBytes, -1, \
+ "How far ahead to prefetch destination area (<= 0 means off)") \
+ \
+ product(intx, PrefetchScanIntervalInBytes, -1, \
+ "How far ahead to prefetch scan area (<= 0 means off)") \
+ \
+ product(intx, PrefetchFieldsAhead, -1, \
+ "How many fields ahead to prefetch in oop scan (<= 0 means off)") \
+ \
+ develop(bool, UsePrefetchQueue, true, \
+ "Use the prefetch queue during PS promotion") \
+ \
+ diagnostic(bool, VerifyBeforeExit, trueInDebug, \
+ "Verify system before exiting") \
+ \
+ diagnostic(bool, VerifyBeforeGC, false, \
+ "Verify memory system before GC") \
+ \
+ diagnostic(bool, VerifyAfterGC, false, \
+ "Verify memory system after GC") \
+ \
+ diagnostic(bool, VerifyDuringGC, false, \
+ "Verify memory system during GC (between phases)") \
+ \
+ diagnostic(bool, VerifyRememberedSets, false, \
+ "Verify GC remembered sets") \
+ \
+ diagnostic(bool, VerifyObjectStartArray, true, \
+ "Verify GC object start array if verify before/after") \
+ \
+ product(bool, DisableExplicitGC, false, \
+ "Tells whether calling System.gc() does a full GC") \
+ \
+ notproduct(bool, CheckMemoryInitialization, false, \
+ "Checks memory initialization") \
+ \
+ product(bool, CollectGen0First, false, \
+ "Collect youngest generation before each full GC") \
+ \
+ diagnostic(bool, BindCMSThreadToCPU, false, \
+ "Bind CMS Thread to CPU if possible") \
+ \
+ diagnostic(uintx, CPUForCMSThread, 0, \
+ "When BindCMSThreadToCPU is true, the CPU to bind CMS thread to") \
+ \
+ product(bool, BindGCTaskThreadsToCPUs, false, \
+ "Bind GCTaskThreads to CPUs if possible") \
+ \
+ product(bool, UseGCTaskAffinity, false, \
+ "Use worker affinity when asking for GCTasks") \
+ \
+ product(uintx, ProcessDistributionStride, 4, \
+ "Stride through processors when distributing processes") \
+ \
+ product(uintx, CMSCoordinatorYieldSleepCount, 10, \
+ "number of times the coordinator GC thread will sleep while " \
+ "yielding before giving up and resuming GC") \
+ \
+ product(uintx, CMSYieldSleepCount, 0, \
+ "number of times a GC thread (minus the coordinator) " \
+ "will sleep while yielding before giving up and resuming GC") \
+ \
+ /* gc tracing */ \
+ manageable(bool, PrintGC, false, \
+ "Print message at garbage collect") \
+ \
+ manageable(bool, PrintGCDetails, false, \
+ "Print more details at garbage collect") \
+ \
+ manageable(bool, PrintGCDateStamps, false, \
+ "Print date stamps at garbage collect") \
+ \
+ manageable(bool, PrintGCTimeStamps, false, \
+ "Print timestamps at garbage collect") \
+ \
+ product(bool, PrintGCTaskTimeStamps, false, \
+ "Print timestamps for individual gc worker thread tasks") \
+ \
+ develop(intx, ConcGCYieldTimeout, 0, \
+ "If non-zero, assert that GC threads yield within this # of ms.") \
+ \
+ notproduct(bool, TraceMarkSweep, false, \
+ "Trace mark sweep") \
+ \
+ product(bool, PrintReferenceGC, false, \
+ "Print times spent handling reference objects during GC " \
+ " (enabled only when PrintGCDetails)") \
+ \
+ develop(bool, TraceReferenceGC, false, \
+ "Trace handling of soft/weak/final/phantom references") \
+ \
+ develop(bool, TraceFinalizerRegistration, false, \
+ "Trace registration of final references") \
+ \
+ notproduct(bool, TraceScavenge, false, \
+ "Trace scavenge") \
+ \
+ product_rw(bool, TraceClassLoading, false, \
+ "Trace all classes loaded") \
+ \
+ product(bool, TraceClassLoadingPreorder, false, \
+ "Trace all classes loaded in order referenced (not loaded)") \
+ \
+ product_rw(bool, TraceClassUnloading, false, \
+ "Trace unloading of classes") \
+ \
+ product_rw(bool, TraceLoaderConstraints, false, \
+ "Trace loader constraints") \
+ \
+ product(bool, TraceGen0Time, false, \
+ "Trace accumulated time for Gen 0 collection") \
+ \
+ product(bool, TraceGen1Time, false, \
+ "Trace accumulated time for Gen 1 collection") \
+ \
+ product(bool, PrintTenuringDistribution, false, \
+ "Print tenuring age information") \
+ \
+ product_rw(bool, PrintHeapAtGC, false, \
+ "Print heap layout before and after each GC") \
+ \
+ product(bool, PrintHeapAtSIGBREAK, true, \
+ "Print heap layout in response to SIGBREAK") \
+ \
+ manageable(bool, PrintClassHistogram, false, \
+ "Print a histogram of class instances") \
+ \
+ develop(bool, TraceWorkGang, false, \
+ "Trace activities of work gangs") \
+ \
+ product(bool, TraceParallelOldGCTasks, false, \
+ "Trace multithreaded GC activity") \
+ \
+ develop(bool, TraceBlockOffsetTable, false, \
+ "Print BlockOffsetTable maps") \
+ \
+ develop(bool, TraceCardTableModRefBS, false, \
+ "Print CardTableModRefBS maps") \
+ \
+ develop(bool, TraceGCTaskManager, false, \
+ "Trace actions of the GC task manager") \
+ \
+ develop(bool, TraceGCTaskQueue, false, \
+ "Trace actions of the GC task queues") \
+ \
+ develop(bool, TraceGCTaskThread, false, \
+ "Trace actions of the GC task threads") \
+ \
+ product(bool, PrintParallelOldGCPhaseTimes, false, \
+ "Print the time taken by each parallel old gc phase." \
+ "PrintGCDetails must also be enabled.") \
+ \
+ develop(bool, TraceParallelOldGCMarkingPhase, false, \
+ "Trace parallel old gc marking phase") \
+ \
+ develop(bool, TraceParallelOldGCSummaryPhase, false, \
+ "Trace parallel old gc summary phase") \
+ \
+ develop(bool, TraceParallelOldGCCompactionPhase, false, \
+ "Trace parallel old gc compaction phase") \
+ \
+ develop(bool, TraceParallelOldGCDensePrefix, false, \
+ "Trace parallel old gc dense prefix computation") \
+ \
+ develop(bool, IgnoreLibthreadGPFault, false, \
+ "Suppress workaround for libthread GP fault") \
+ \
+ /* JVMTI heap profiling */ \
+ \
+ diagnostic(bool, TraceJVMTIObjectTagging, false, \
+ "Trace JVMTI object tagging calls") \
+ \
+ diagnostic(bool, VerifyBeforeIteration, false, \
+ "Verify memory system before JVMTI iteration") \
+ \
+ /* compiler interface */ \
+ \
+ develop(bool, CIPrintCompilerName, false, \
+ "when CIPrint is active, print the name of the active compiler") \
+ \
+ develop(bool, CIPrintCompileQueue, false, \
+ "display the contents of the compile queue whenever a " \
+ "compilation is enqueued") \
+ \
+ develop(bool, CIPrintRequests, false, \
+ "display every request for compilation") \
+ \
+ product(bool, CITime, false, \
+ "collect timing information for compilation") \
+ \
+ develop(bool, CITimeEach, false, \
+ "display timing information after each successful compilation") \
+ \
+ develop(bool, CICountOSR, true, \
+ "use a separate counter when assigning ids to osr compilations") \
+ \
+ develop(bool, CICompileNatives, true, \
+ "compile native methods if supported by the compiler") \
+ \
+ develop_pd(bool, CICompileOSR, \
+ "compile on stack replacement methods if supported by the " \
+ "compiler") \
+ \
+ develop(bool, CIPrintMethodCodes, false, \
+ "print method bytecodes of the compiled code") \
+ \
+ develop(bool, CIPrintTypeFlow, false, \
+ "print the results of ciTypeFlow analysis") \
+ \
+ develop(bool, CITraceTypeFlow, false, \
+ "detailed per-bytecode tracing of ciTypeFlow analysis") \
+ \
+ develop(intx, CICloneLoopTestLimit, 100, \
+ "size limit for blocks heuristically cloned in ciTypeFlow") \
+ \
+ /* temp diagnostics */ \
+ \
+ diagnostic(bool, TraceRedundantCompiles, false, \
+ "Have compile broker print when a request already in the queue is"\
+ " requested again") \
+ \
+ diagnostic(bool, InitialCompileFast, false, \
+ "Initial compile at CompLevel_fast_compile") \
+ \
+ diagnostic(bool, InitialCompileReallyFast, false, \
+ "Initial compile at CompLevel_really_fast_compile (no profile)") \
+ \
+ diagnostic(bool, FullProfileOnReInterpret, true, \
+ "On re-interpret unc-trap compile next at CompLevel_fast_compile")\
+ \
+ /* compiler */ \
+ \
+ product(intx, CICompilerCount, CI_COMPILER_COUNT, \
+ "Number of compiler threads to run") \
+ \
+ product(intx, CompilationPolicyChoice, 0, \
+ "which compilation policy (0/1)") \
+ \
+ develop(bool, UseStackBanging, true, \
+ "use stack banging for stack overflow checks (required for " \
+ "proper StackOverflow handling; disable only to measure cost " \
+ "of stackbanging)") \
+ \
+ develop(bool, Use24BitFPMode, true, \
+ "Set 24-bit FPU mode on a per-compile basis ") \
+ \
+ develop(bool, Use24BitFP, true, \
+ "use FP instructions that produce 24-bit precise results") \
+ \
+ develop(bool, UseStrictFP, true, \
+ "use strict fp if modifier strictfp is set") \
+ \
+ develop(bool, GenerateSynchronizationCode, true, \
+ "generate locking/unlocking code for synchronized methods and " \
+ "monitors") \
+ \
+ develop(bool, GenerateCompilerNullChecks, true, \
+ "Generate explicit null checks for loads/stores/calls") \
+ \
+ develop(bool, GenerateRangeChecks, true, \
+ "Generate range checks for array accesses") \
+ \
+ develop_pd(bool, ImplicitNullChecks, \
+ "generate code for implicit null checks") \
+ \
+ product(bool, PrintSafepointStatistics, false, \
+ "print statistics about safepoint synchronization") \
+ \
+ product(intx, PrintSafepointStatisticsCount, 300, \
+ "total number of safepoint statistics collected " \
+ "before printing them out") \
+ \
+ product(intx, PrintSafepointStatisticsTimeout, -1, \
+ "print safepoint statistics only when safepoint takes" \
+ " more than PrintSafepointSatisticsTimeout in millis") \
+ \
+ develop(bool, InlineAccessors, true, \
+ "inline accessor methods (get/set)") \
+ \
+ product(bool, Inline, true, \
+ "enable inlining") \
+ \
+ product(bool, ClipInlining, true, \
+ "clip inlining if aggregate method exceeds DesiredMethodLimit") \
+ \
+ develop(bool, UseCHA, true, \
+ "enable CHA") \
+ \
+ product(bool, UseTypeProfile, true, \
+ "Check interpreter profile for historically monomorphic calls") \
+ \
+ product(intx, TypeProfileMajorReceiverPercent, 90, \
+ "% of major receiver type to all profiled receivers") \
+ \
+ notproduct(bool, TimeCompiler, false, \
+ "time the compiler") \
+ \
+ notproduct(bool, TimeCompiler2, false, \
+ "detailed time the compiler (requires +TimeCompiler)") \
+ \
+ diagnostic(bool, PrintInlining, false, \
+ "prints inlining optimizations") \
+ \
+ diagnostic(bool, PrintIntrinsics, false, \
+ "prints attempted and successful inlining of intrinsics") \
+ \
+ diagnostic(ccstrlist, DisableIntrinsic, "", \
+ "do not expand intrinsics whose (internal) names appear here") \
+ \
+ develop(bool, StressReflectiveCode, false, \
+ "Use inexact types at allocations, etc., to test reflection") \
+ \
+ develop(bool, EagerInitialization, false, \
+ "Eagerly initialize classes if possible") \
+ \
+ product(bool, Tier1UpdateMethodData, trueInTiered, \
+ "Update methodDataOops in Tier1-generated code") \
+ \
+ develop(bool, TraceMethodReplacement, false, \
+ "Print when methods are replaced do to recompilation") \
+ \
+ develop(bool, PrintMethodFlushing, false, \
+ "print the nmethods being flushed") \
+ \
+ notproduct(bool, LogMultipleMutexLocking, false, \
+ "log locking and unlocking of mutexes (only if multiple locks " \
+ "are held)") \
+ \
+ develop(bool, UseRelocIndex, false, \
+ "use an index to speed random access to relocations") \
+ \
+ develop(bool, StressCodeBuffers, false, \
+ "Exercise code buffer expansion and other rare state changes") \
+ \
+ diagnostic(bool, DebugNonSafepoints, trueInDebug, \
+ "Generate extra debugging info for non-safepoints in nmethods") \
+ \
+ diagnostic(bool, DebugInlinedCalls, true, \
+ "If false, restricts profiled locations to the root method only") \
+ \
+ product(bool, PrintVMOptions, trueInDebug, \
+ "print VM flag settings") \
+ \
+ diagnostic(bool, SerializeVMOutput, true, \
+ "Use a mutex to serialize output to tty and hotspot.log") \
+ \
+ diagnostic(bool, DisplayVMOutput, true, \
+ "Display all VM output on the tty, independently of LogVMOutput") \
+ \
+ diagnostic(bool, LogVMOutput, trueInDebug, \
+ "Save VM output to hotspot.log, or to LogFile") \
+ \
+ diagnostic(ccstr, LogFile, NULL, \
+ "If LogVMOutput is on, save VM output to this file [hotspot.log]") \
+ \
+ product(ccstr, ErrorFile, NULL, \
+ "If an error occurs, save the error data to this file " \
+ "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \
+ \
+ product(bool, DisplayVMOutputToStderr, false, \
+ "If DisplayVMOutput is true, display all VM output to stderr") \
+ \
+ product(bool, DisplayVMOutputToStdout, false, \
+ "If DisplayVMOutput is true, display all VM output to stdout") \
+ \
+ product(bool, UseHeavyMonitors, false, \
+ "use heavyweight instead of lightweight Java monitors") \
+ \
+ notproduct(bool, PrintSymbolTableSizeHistogram, false, \
+ "print histogram of the symbol table") \
+ \
+ notproduct(bool, ExitVMOnVerifyError, false, \
+ "standard exit from VM if bytecode verify error " \
+ "(only in debug mode)") \
+ \
+ notproduct(ccstr, AbortVMOnException, NULL, \
+ "Call fatal if this exception is thrown. Example: " \
+ "java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \
+ \
+ develop(bool, DebugVtables, false, \
+ "add debugging code to vtable dispatch") \
+ \
+ develop(bool, PrintVtables, false, \
+ "print vtables when printing klass") \
+ \
+ notproduct(bool, PrintVtableStats, false, \
+ "print vtables stats at end of run") \
+ \
+ develop(bool, TraceCreateZombies, false, \
+ "trace creation of zombie nmethods") \
+ \
+ notproduct(bool, IgnoreLockingAssertions, false, \
+ "disable locking assertions (for speed)") \
+ \
+ notproduct(bool, VerifyLoopOptimizations, false, \
+ "verify major loop optimizations") \
+ \
+ product(bool, RangeCheckElimination, true, \
+ "Split loop iterations to eliminate range checks") \
+ \
+ develop_pd(bool, UncommonNullCast, \
+ "track occurrences of null in casts; adjust compiler tactics") \
+ \
+ develop(bool, TypeProfileCasts, true, \
+ "treat casts like calls for purposes of type profiling") \
+ \
+ develop(bool, MonomorphicArrayCheck, true, \
+ "Uncommon-trap array store checks that require full type check") \
+ \
+ develop(bool, DelayCompilationDuringStartup, true, \
+ "Delay invoking the compiler until main application class is " \
+ "loaded") \
+ \
+ develop(bool, CompileTheWorld, false, \
+ "Compile all methods in all classes in bootstrap class path " \
+ "(stress test)") \
+ \
+ develop(bool, CompileTheWorldPreloadClasses, true, \
+ "Preload all classes used by a class before start loading") \
+ \
+ notproduct(bool, CompileTheWorldIgnoreInitErrors, false, \
+ "Compile all methods although class initializer failed") \
+ \
+ develop(bool, TraceIterativeGVN, false, \
+ "Print progress during Iterative Global Value Numbering") \
+ \
+ develop(bool, FillDelaySlots, true, \
+ "Fill delay slots (on SPARC only)") \
+ \
+ develop(bool, VerifyIterativeGVN, false, \
+ "Verify Def-Use modifications during sparse Iterative Global " \
+ "Value Numbering") \
+ \
+ notproduct(bool, TracePhaseCCP, false, \
+ "Print progress during Conditional Constant Propagation") \
+ \
+ develop(bool, TimeLivenessAnalysis, false, \
+ "Time computation of bytecode liveness analysis") \
+ \
+ develop(bool, TraceLivenessGen, false, \
+ "Trace the generation of liveness analysis information") \
+ \
+ notproduct(bool, TraceLivenessQuery, false, \
+ "Trace queries of liveness analysis information") \
+ \
+ notproduct(bool, CollectIndexSetStatistics, false, \
+ "Collect information about IndexSets") \
+ \
+ develop(bool, PrintDominators, false, \
+ "Print out dominator trees for GVN") \
+ \
+ develop(bool, UseLoopSafepoints, true, \
+ "Generate Safepoint nodes in every loop") \
+ \
+ notproduct(bool, TraceCISCSpill, false, \
+ "Trace allocators use of cisc spillable instructions") \
+ \
+ notproduct(bool, TraceSpilling, false, \
+ "Trace spilling") \
+ \
+ develop(bool, DeutschShiffmanExceptions, true, \
+ "Fast check to find exception handler for precisely typed " \
+ "exceptions") \
+ \
+ product(bool, SplitIfBlocks, true, \
+ "Clone compares and control flow through merge points to fold " \
+ "some branches") \
+ \
+ develop(intx, FastAllocateSizeLimit, 128*K, \
+ /* Note: This value is zero mod 1<<13 for a cheap sparc set. */ \
+ "Inline allocations larger than this in doublewords must go slow")\
+ \
+ product(bool, AggressiveOpts, false, \
+ "Enable aggressive optimizations - see arguments.cpp") \
+ \
+ /* statistics */ \
+ develop(bool, UseVTune, false, \
+ "enable support for Intel's VTune profiler") \
+ \
+ develop(bool, CountCompiledCalls, false, \
+ "counts method invocations") \
+ \
+ notproduct(bool, CountRuntimeCalls, false, \
+ "counts VM runtime calls") \
+ \
+ develop(bool, CountJNICalls, false, \
+ "counts jni method invocations") \
+ \
+ notproduct(bool, CountJVMCalls, false, \
+ "counts jvm method invocations") \
+ \
+ notproduct(bool, CountRemovableExceptions, false, \
+ "count exceptions that could be replaced by branches due to " \
+ "inlining") \
+ \
+ notproduct(bool, ICMissHistogram, false, \
+ "produce histogram of IC misses") \
+ \
+ notproduct(bool, PrintClassStatistics, false, \
+ "prints class statistics at end of run") \
+ \
+ notproduct(bool, PrintMethodStatistics, false, \
+ "prints method statistics at end of run") \
+ \
+ /* interpreter */ \
+ develop(bool, ClearInterpreterLocals, false, \
+ "Always clear local variables of interpreter activations upon " \
+ "entry") \
+ \
+ product_pd(bool, RewriteBytecodes, \
+ "Allow rewriting of bytecodes (bytecodes are not immutable)") \
+ \
+ product_pd(bool, RewriteFrequentPairs, \
+ "Rewrite frequently used bytecode pairs into a single bytecode") \
+ \
+ product(bool, PrintInterpreter, false, \
+ "Prints the generated interpreter code") \
+ \
+ product(bool, UseInterpreter, true, \
+ "Use interpreter for non-compiled methods") \
+ \
+ develop(bool, UseFastSignatureHandlers, true, \
+ "Use fast signature handlers for native calls") \
+ \
+ develop(bool, UseV8InstrsOnly, false, \
+ "Use SPARC-V8 Compliant instruction subset") \
+ \
+ product(bool, UseNiagaraInstrs, false, \
+ "Use Niagara-efficient instruction subset") \
+ \
+ develop(bool, UseCASForSwap, false, \
+ "Do not use swap instructions, but only CAS (in a loop) on SPARC")\
+ \
+ product(bool, UseLoopCounter, true, \
+ "Increment invocation counter on backward branch") \
+ \
+ product(bool, UseFastEmptyMethods, true, \
+ "Use fast method entry code for empty methods") \
+ \
+ product(bool, UseFastAccessorMethods, true, \
+ "Use fast method entry code for accessor methods") \
+ \
+ product_pd(bool, UseOnStackReplacement, \
+ "Use on stack replacement, calls runtime if invoc. counter " \
+ "overflows in loop") \
+ \
+ notproduct(bool, TraceOnStackReplacement, false, \
+ "Trace on stack replacement") \
+ \
+ develop(bool, PoisonOSREntry, true, \
+ "Detect abnormal calls to OSR code") \
+ \
+ product_pd(bool, PreferInterpreterNativeStubs, \
+ "Use always interpreter stubs for native methods invoked via " \
+ "interpreter") \
+ \
+ develop(bool, CountBytecodes, false, \
+ "Count number of bytecodes executed") \
+ \
+ develop(bool, PrintBytecodeHistogram, false, \
+ "Print histogram of the executed bytecodes") \
+ \
+ develop(bool, PrintBytecodePairHistogram, false, \
+ "Print histogram of the executed bytecode pairs") \
+ \
+ develop(bool, PrintSignatureHandlers, false, \
+ "Print code generated for native method signature handlers") \
+ \
+ develop(bool, VerifyOops, false, \
+ "Do plausibility checks for oops") \
+ \
+ develop(bool, CheckUnhandledOops, false, \
+ "Check for unhandled oops in VM code") \
+ \
+ develop(bool, VerifyJNIFields, trueInDebug, \
+ "Verify jfieldIDs for instance fields") \
+ \
+ notproduct(bool, VerifyJNIEnvThread, false, \
+ "Verify JNIEnv.thread == Thread::current() when entering VM " \
+ "from JNI") \
+ \
+ develop(bool, VerifyFPU, false, \
+ "Verify FPU state (check for NaN's, etc.)") \
+ \
+ develop(bool, VerifyThread, false, \
+ "Watch the thread register for corruption (SPARC only)") \
+ \
+ develop(bool, VerifyActivationFrameSize, false, \
+ "Verify that activation frame didn't become smaller than its " \
+ "minimal size") \
+ \
+ develop(bool, TraceFrequencyInlining, false, \
+ "Trace frequency based inlining") \
+ \
+ notproduct(bool, TraceTypeProfile, false, \
+ "Trace type profile") \
+ \
+ develop_pd(bool, InlineIntrinsics, \
+ "Inline intrinsics that can be statically resolved") \
+ \
+ product_pd(bool, ProfileInterpreter, \
+ "Profile at the bytecode level during interpretation") \
+ \
+ develop_pd(bool, ProfileTraps, \
+ "Profile deoptimization traps at the bytecode level") \
+ \
+ product(intx, ProfileMaturityPercentage, 20, \
+ "number of method invocations/branches (expressed as % of " \
+ "CompileThreshold) before using the method's profile") \
+ \
+ develop(bool, PrintMethodData, false, \
+ "Print the results of +ProfileInterpreter at end of run") \
+ \
+ develop(bool, VerifyDataPointer, trueInDebug, \
+ "Verify the method data pointer during interpreter profiling") \
+ \
+ develop(bool, VerifyCompiledCode, false, \
+ "Include miscellaneous runtime verifications in nmethod code; " \
+ "off by default because it disturbs nmethod size heuristics.") \
+ \
+ \
+ /* compilation */ \
+ product(bool, UseCompiler, true, \
+ "use compilation") \
+ \
+ develop(bool, TraceCompilationPolicy, false, \
+ "Trace compilation policy") \
+ \
+ develop(bool, TimeCompilationPolicy, false, \
+ "Time the compilation policy") \
+ \
+ product(bool, UseCounterDecay, true, \
+ "adjust recompilation counters") \
+ \
+ develop(intx, CounterHalfLifeTime, 30, \
+ "half-life time of invocation counters (in secs)") \
+ \
+ develop(intx, CounterDecayMinIntervalLength, 500, \
+ "Min. ms. between invocation of CounterDecay") \
+ \
+ product(bool, AlwaysCompileLoopMethods, false, \
+ "when using recompilation, never interpret methods " \
+ "containing loops") \
+ \
+ product(bool, DontCompileHugeMethods, true, \
+ "don't compile methods > HugeMethodLimit") \
+ \
+ /* Bytecode escape analysis estimation. */ \
+ product(bool, EstimateArgEscape, true, \
+ "Analyze bytecodes to estimate escape state of arguments") \
+ \
+ product(intx, BCEATraceLevel, 0, \
+ "How much tracing to do of bytecode escape analysis estimates") \
+ \
+ product(intx, MaxBCEAEstimateLevel, 5, \
+ "Maximum number of nested calls that are analyzed by BC EA.") \
+ \
+ product(intx, MaxBCEAEstimateSize, 150, \
+ "Maximum bytecode size of a method to be analyzed by BC EA.") \
+ \
+ product(intx, AllocatePrefetchStyle, 1, \
+ "0 = no prefetch, " \
+ "1 = prefetch instructions for each allocation, " \
+ "2 = use TLAB watermark to gate allocation prefetch") \
+ \
+ product(intx, AllocatePrefetchDistance, -1, \
+ "Distance to prefetch ahead of allocation pointer") \
+ \
+ product(intx, AllocatePrefetchLines, 1, \
+ "Number of lines to prefetch ahead of allocation pointer") \
+ \
+ product(intx, AllocatePrefetchStepSize, 16, \
+ "Step size in bytes of sequential prefetch instructions") \
+ \
+ product(intx, AllocatePrefetchInstr, 0, \
+ "Prefetch instruction to prefetch ahead of allocation pointer") \
+ \
+ product(intx, ReadPrefetchInstr, 0, \
+ "Prefetch instruction to prefetch ahead") \
+ \
+ /* deoptimization */ \
+ develop(bool, TraceDeoptimization, false, \
+ "Trace deoptimization") \
+ \
+ develop(bool, DebugDeoptimization, false, \
+ "Tracing various information while debugging deoptimization") \
+ \
+ product(intx, SelfDestructTimer, 0, \
+ "Will cause VM to terminate after a given time (in minutes) " \
+ "(0 means off)") \
+ \
+ product(intx, MaxJavaStackTraceDepth, 1024, \
+ "Max. no. of lines in the stack trace for Java exceptions " \
+ "(0 means all)") \
+ \
+ develop(intx, GuaranteedSafepointInterval, 1000, \
+ "Guarantee a safepoint (at least) every so many milliseconds " \
+ "(0 means none)") \
+ \
+ product(intx, SafepointTimeoutDelay, 10000, \
+ "Delay in milliseconds for option SafepointTimeout") \
+ \
+ product(intx, NmethodSweepFraction, 4, \
+ "Number of invocations of sweeper to cover all nmethods") \
+ \
+ notproduct(intx, MemProfilingInterval, 500, \
+ "Time between each invocation of the MemProfiler") \
+ \
+ develop(intx, MallocCatchPtr, -1, \
+ "Hit breakpoint when mallocing/freeing this pointer") \
+ \
+ notproduct(intx, AssertRepeat, 1, \
+ "number of times to evaluate expression in assert " \
+ "(to estimate overhead); only works with -DUSE_REPEATED_ASSERTS") \
+ \
+ notproduct(ccstrlist, SuppressErrorAt, "", \
+ "List of assertions (file:line) to muzzle") \
+ \
+ notproduct(uintx, HandleAllocationLimit, 1024, \
+ "Threshold for HandleMark allocation when +TraceHandleAllocation "\
+ "is used") \
+ \
+ develop(uintx, TotalHandleAllocationLimit, 1024, \
+ "Threshold for total handle allocation when " \
+ "+TraceHandleAllocation is used") \
+ \
+ develop(intx, StackPrintLimit, 100, \
+ "number of stack frames to print in VM-level stack dump") \
+ \
+ notproduct(intx, MaxElementPrintSize, 256, \
+ "maximum number of elements to print") \
+ \
+ notproduct(intx, MaxSubklassPrintSize, 4, \
+ "maximum number of subklasses to print when printing klass") \
+ \
+ develop(intx, MaxInlineLevel, 9, \
+ "maximum number of nested calls that are inlined") \
+ \
+ develop(intx, MaxRecursiveInlineLevel, 1, \
+ "maximum number of nested recursive calls that are inlined") \
+ \
+ develop(intx, InlineSmallCode, 1000, \
+ "Only inline already compiled methods if their code size is " \
+ "less than this") \
+ \
+ product(intx, MaxInlineSize, 35, \
+ "maximum bytecode size of a method to be inlined") \
+ \
+ product_pd(intx, FreqInlineSize, \
+ "maximum bytecode size of a frequent method to be inlined") \
+ \
+ develop(intx, MaxTrivialSize, 6, \
+ "maximum bytecode size of a trivial method to be inlined") \
+ \
+ develop(intx, MinInliningThreshold, 250, \
+ "min. invocation count a method needs to have to be inlined") \
+ \
+ develop(intx, AlignEntryCode, 4, \
+ "aligns entry code to specified value (in bytes)") \
+ \
+ develop(intx, MethodHistogramCutoff, 100, \
+ "cutoff value for method invoc. histogram (+CountCalls)") \
+ \
+ develop(intx, ProfilerNumberOfInterpretedMethods, 25, \
+ "# of interpreted methods to show in profile") \
+ \
+ develop(intx, ProfilerNumberOfCompiledMethods, 25, \
+ "# of compiled methods to show in profile") \
+ \
+ develop(intx, ProfilerNumberOfStubMethods, 25, \
+ "# of stub methods to show in profile") \
+ \
+ develop(intx, ProfilerNumberOfRuntimeStubNodes, 25, \
+ "# of runtime stub nodes to show in profile") \
+ \
+ product(intx, ProfileIntervalsTicks, 100, \
+ "# of ticks between printing of interval profile " \
+ "(+ProfileIntervals)") \
+ \
+ notproduct(intx, ScavengeALotInterval, 1, \
+ "Interval between which scavenge will occur with +ScavengeALot") \
+ \
+ notproduct(intx, FullGCALotInterval, 1, \
+ "Interval between which full gc will occur with +FullGCALot") \
+ \
+ notproduct(intx, FullGCALotStart, 0, \
+ "For which invocation to start FullGCAlot") \
+ \
+ notproduct(intx, FullGCALotDummies, 32*K, \
+ "Dummy object allocated with +FullGCALot, forcing all objects " \
+ "to move") \
+ \
+ develop(intx, DontYieldALotInterval, 10, \
+ "Interval between which yields will be dropped (milliseconds)") \
+ \
+ develop(intx, MinSleepInterval, 1, \
+ "Minimum sleep() interval (milliseconds) when " \
+ "ConvertSleepToYield is off (used for SOLARIS)") \
+ \
+ product(intx, EventLogLength, 2000, \
+ "maximum nof events in event log") \
+ \
+ develop(intx, ProfilerPCTickThreshold, 15, \
+ "Number of ticks in a PC buckets to be a hotspot") \
+ \
+ notproduct(intx, DeoptimizeALotInterval, 5, \
+ "Number of exits until DeoptimizeALot kicks in") \
+ \
+ notproduct(intx, ZombieALotInterval, 5, \
+ "Number of exits until ZombieALot kicks in") \
+ \
+ develop(bool, StressNonEntrant, false, \
+ "Mark nmethods non-entrant at registration") \
+ \
+ diagnostic(intx, MallocVerifyInterval, 0, \
+ "if non-zero, verify C heap after every N calls to " \
+ "malloc/realloc/free") \
+ \
+ diagnostic(intx, MallocVerifyStart, 0, \
+ "if non-zero, start verifying C heap after Nth call to " \
+ "malloc/realloc/free") \
+ \
+ product(intx, TypeProfileWidth, 2, \
+ "number of receiver types to record in call/cast profile") \
+ \
+ develop(intx, BciProfileWidth, 2, \
+ "number of return bci's to record in ret profile") \
+ \
+ product(intx, PerMethodRecompilationCutoff, 400, \
+ "After recompiling N times, stay in the interpreter (-1=>'Inf')") \
+ \
+ product(intx, PerBytecodeRecompilationCutoff, 100, \
+ "Per-BCI limit on repeated recompilation (-1=>'Inf')") \
+ \
+ product(intx, PerMethodTrapLimit, 100, \
+ "Limit on traps (of one kind) in a method (includes inlines)") \
+ \
+ product(intx, PerBytecodeTrapLimit, 4, \
+ "Limit on traps (of one kind) at a particular BCI") \
+ \
+ develop(intx, FreqCountInvocations, 1, \
+ "Scaling factor for branch frequencies (deprecated)") \
+ \
+ develop(intx, InlineFrequencyRatio, 20, \
+ "Ratio of call site execution to caller method invocation") \
+ \
+ develop_pd(intx, InlineFrequencyCount, \
+ "Count of call site execution necessary to trigger frequent " \
+ "inlining") \
+ \
+ develop(intx, InlineThrowCount, 50, \
+ "Force inlining of interpreted methods that throw this often") \
+ \
+ develop(intx, InlineThrowMaxSize, 200, \
+ "Force inlining of throwing methods smaller than this") \
+ \
+ product(intx, AliasLevel, 3, \
+ "0 for no aliasing, 1 for oop/field/static/array split, " \
+ "2 for class split, 3 for unique instances") \
+ \
+ develop(bool, VerifyAliases, false, \
+ "perform extra checks on the results of alias analysis") \
+ \
+ develop(intx, ProfilerNodeSize, 1024, \
+ "Size in K to allocate for the Profile Nodes of each thread") \
+ \
+ develop(intx, V8AtomicOperationUnderLockSpinCount, 50, \
+ "Number of times to spin wait on a v8 atomic operation lock") \
+ \
+ product(intx, ReadSpinIterations, 100, \
+ "Number of read attempts before a yield (spin inner loop)") \
+ \
+ product_pd(intx, PreInflateSpin, \
+ "Number of times to spin wait before inflation") \
+ \
+ product(intx, PreBlockSpin, 10, \
+ "Number of times to spin in an inflated lock before going to " \
+ "an OS lock") \
+ \
+ /* gc parameters */ \
+ product(uintx, MaxHeapSize, ScaleForWordSize(64*M), \
+ "Default maximum size for object heap (in bytes)") \
+ \
+ product_pd(uintx, NewSize, \
+ "Default size of new generation (in bytes)") \
+ \
+ product(uintx, MaxNewSize, max_uintx, \
+ "Maximum size of new generation (in bytes)") \
+ \
+ product(uintx, PretenureSizeThreshold, 0, \
+ "Max size in bytes of objects allocated in DefNew generation") \
+ \
+ product_pd(uintx, TLABSize, \
+ "Default (or starting) size of TLAB (in bytes)") \
+ \
+ product(uintx, MinTLABSize, 2*K, \
+ "Minimum allowed TLAB size (in bytes)") \
+ \
+ product(uintx, TLABAllocationWeight, 35, \
+ "Allocation averaging weight") \
+ \
+ product(uintx, TLABWasteTargetPercent, 1, \
+ "Percentage of Eden that can be wasted") \
+ \
+ product(uintx, TLABRefillWasteFraction, 64, \
+ "Max TLAB waste at a refill (internal fragmentation)") \
+ \
+ product(uintx, TLABWasteIncrement, 4, \
+ "Increment allowed waste at slow allocation") \
+ \
+ product_pd(intx, SurvivorRatio, \
+ "Ratio of eden/survivor space size") \
+ \
+ product_pd(intx, NewRatio, \
+ "Ratio of new/old generation sizes") \
+ \
+ product(uintx, MaxLiveObjectEvacuationRatio, 100, \
+ "Max percent of eden objects that will be live at scavenge") \
+ \
+ product_pd(uintx, NewSizeThreadIncrease, \
+ "Additional size added to desired new generation size per " \
+ "non-daemon thread (in bytes)") \
+ \
+ product(uintx, OldSize, ScaleForWordSize(4096*K), \
+ "Default size of tenured generation (in bytes)") \
+ \
+ product_pd(uintx, PermSize, \
+ "Default size of permanent generation (in bytes)") \
+ \
+ product_pd(uintx, MaxPermSize, \
+ "Maximum size of permanent generation (in bytes)") \
+ \
+ product(uintx, MinHeapFreeRatio, 40, \
+ "Min percentage of heap free after GC to avoid expansion") \
+ \
+ product(uintx, MaxHeapFreeRatio, 70, \
+ "Max percentage of heap free after GC to avoid shrinking") \
+ \
+ product(intx, SoftRefLRUPolicyMSPerMB, 1000, \
+ "Number of milliseconds per MB of free space in the heap") \
+ \
+ product(uintx, MinHeapDeltaBytes, ScaleForWordSize(128*K), \
+ "Min change in heap space due to GC (in bytes)") \
+ \
+ product(uintx, MinPermHeapExpansion, ScaleForWordSize(256*K), \
+ "Min expansion of permanent heap (in bytes)") \
+ \
+ product(uintx, MaxPermHeapExpansion, ScaleForWordSize(4*M), \
+ "Max expansion of permanent heap without full GC (in bytes)") \
+ \
+ product(intx, QueuedAllocationWarningCount, 0, \
+ "Number of times an allocation that queues behind a GC " \
+ "will retry before printing a warning") \
+ \
+ diagnostic(uintx, VerifyGCStartAt, 0, \
+ "GC invoke count where +VerifyBefore/AfterGC kicks in") \
+ \
+ diagnostic(intx, VerifyGCLevel, 0, \
+ "Generation level at which to start +VerifyBefore/AfterGC") \
+ \
+ develop(uintx, ExitAfterGCNum, 0, \
+ "If non-zero, exit after this GC.") \
+ \
+ product(intx, MaxTenuringThreshold, 15, \
+ "Maximum value for tenuring threshold") \
+ \
+ product(intx, InitialTenuringThreshold, 7, \
+ "Initial value for tenuring threshold") \
+ \
+ product(intx, TargetSurvivorRatio, 50, \
+ "Desired percentage of survivor space used after scavenge") \
+ \
+ product(intx, MarkSweepDeadRatio, 5, \
+ "Percentage (0-100) of the old gen allowed as dead wood." \
+ "Serial mark sweep treats this as both the min and max value." \
+ "CMS uses this value only if it falls back to mark sweep." \
+ "Par compact uses a variable scale based on the density of the" \
+ "generation and treats this as the max value when the heap is" \
+ "either completely full or completely empty. Par compact also" \
+ "has a smaller default value; see arguments.cpp.") \
+ \
+ product(intx, PermMarkSweepDeadRatio, 20, \
+ "Percentage (0-100) of the perm gen allowed as dead wood." \
+ "See MarkSweepDeadRatio for collector-specific comments.") \
+ \
+ product(intx, MarkSweepAlwaysCompactCount, 4, \
+ "How often should we fully compact the heap (ignoring the dead " \
+ "space parameters)") \
+ \
+ product(intx, PrintCMSStatistics, 0, \
+ "Statistics for CMS") \
+ \
+ product(bool, PrintCMSInitiationStatistics, false, \
+ "Statistics for initiating a CMS collection") \
+ \
+ product(intx, PrintFLSStatistics, 0, \
+ "Statistics for CMS' FreeListSpace") \
+ \
+ product(intx, PrintFLSCensus, 0, \
+ "Census for CMS' FreeListSpace") \
+ \
+ develop(uintx, GCExpandToAllocateDelayMillis, 0, \
+ "Delay in ms between expansion and allocation") \
+ \
+ product(intx, DeferThrSuspendLoopCount, 4000, \
+ "(Unstable) Number of times to iterate in safepoint loop " \
+ " before blocking VM threads ") \
+ \
+ product(intx, DeferPollingPageLoopCount, -1, \
+ "(Unsafe,Unstable) Number of iterations in safepoint loop " \
+ "before changing safepoint polling page to RO ") \
+ \
+ product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \
+ \
+ product(bool, UseDepthFirstScavengeOrder, true, \
+ "true: the scavenge order will be depth-first, " \
+ "false: the scavenge order will be breadth-first") \
+ \
+ product(bool, PSChunkLargeArrays, true, \
+ "true: process large arrays in chunks") \
+ \
+ product(uintx, GCDrainStackTargetSize, 64, \
+ "how many entries we'll try to leave on the stack during " \
+ "parallel GC") \
+ \
+ /* stack parameters */ \
+ product_pd(intx, StackYellowPages, \
+ "Number of yellow zone (recoverable overflows) pages") \
+ \
+ product_pd(intx, StackRedPages, \
+ "Number of red zone (unrecoverable overflows) pages") \
+ \
+ product_pd(intx, StackShadowPages, \
+ "Number of shadow zone (for overflow checking) pages" \
+ " this should exceed the depth of the VM and native call stack") \
+ \
+ product_pd(intx, ThreadStackSize, \
+ "Thread Stack Size (in Kbytes)") \
+ \
+ product_pd(intx, VMThreadStackSize, \
+ "Non-Java Thread Stack Size (in Kbytes)") \
+ \
+ product_pd(intx, CompilerThreadStackSize, \
+ "Compiler Thread Stack Size (in Kbytes)") \
+ \
+ develop_pd(uintx, JVMInvokeMethodSlack, \
+ "Stack space (bytes) required for JVM_InvokeMethod to complete") \
+ \
+ product(uintx, ThreadSafetyMargin, 50*M, \
+ "Thread safety margin is used on fixed-stack LinuxThreads (on " \
+ "Linux/x86 only) to prevent heap-stack collision. Set to 0 to " \
+ "disable this feature") \
+ \
+ /* code cache parameters */ \
+ develop(uintx, CodeCacheSegmentSize, 64, \
+ "Code cache segment size (in bytes) - smallest unit of " \
+ "allocation") \
+ \
+ develop_pd(intx, CodeEntryAlignment, \
+ "Code entry alignment for generated code (in bytes)") \
+ \
+ product_pd(uintx, InitialCodeCacheSize, \
+ "Initial code cache size (in bytes)") \
+ \
+ product_pd(uintx, ReservedCodeCacheSize, \
+ "Reserved code cache size (in bytes) - maximum code cache size") \
+ \
+ product(uintx, CodeCacheMinimumFreeSpace, 500*K, \
+ "When less than X space left, we stop compiling.") \
+ \
+ product_pd(uintx, CodeCacheExpansionSize, \
+ "Code cache expansion size (in bytes)") \
+ \
+ develop_pd(uintx, CodeCacheMinBlockLength, \
+ "Minimum number of segments in a code cache block.") \
+ \
+ notproduct(bool, ExitOnFullCodeCache, false, \
+ "Exit the VM if we fill the code cache.") \
+ \
+ /* interpreter debugging */ \
+ develop(intx, BinarySwitchThreshold, 5, \
+ "Minimal number of lookupswitch entries for rewriting to binary " \
+ "switch") \
+ \
+ develop(intx, StopInterpreterAt, 0, \
+ "Stops interpreter execution at specified bytecode number") \
+ \
+ develop(intx, TraceBytecodesAt, 0, \
+ "Traces bytecodes starting with specified bytecode number") \
+ \
+ /* compiler interface */ \
+ develop(intx, CIStart, 0, \
+ "the id of the first compilation to permit") \
+ \
+ develop(intx, CIStop, -1, \
+ "the id of the last compilation to permit") \
+ \
+ develop(intx, CIStartOSR, 0, \
+ "the id of the first osr compilation to permit " \
+ "(CICountOSR must be on)") \
+ \
+ develop(intx, CIStopOSR, -1, \
+ "the id of the last osr compilation to permit " \
+ "(CICountOSR must be on)") \
+ \
+ develop(intx, CIBreakAtOSR, -1, \
+ "id of osr compilation to break at") \
+ \
+ develop(intx, CIBreakAt, -1, \
+ "id of compilation to break at") \
+ \
+ product(ccstrlist, CompileOnly, "", \
+ "List of methods (pkg/class.name) to restrict compilation to") \
+ \
+ product(ccstr, CompileCommandFile, NULL, \
+ "Read compiler commands from this file [.hotspot_compiler]") \
+ \
+ product(ccstrlist, CompileCommand, "", \
+ "Prepend to .hotspot_compiler; e.g. log,java/lang/String.<init>") \
+ \
+ product(bool, CICompilerCountPerCPU, false, \
+ "1 compiler thread for log(N CPUs)") \
+ \
+ develop(intx, CIFireOOMAt, -1, \
+ "Fire OutOfMemoryErrors throughout CI for testing the compiler " \
+ "(non-negative value throws OOM after this many CI accesses " \
+ "in each compile)") \
+ \
+ develop(intx, CIFireOOMAtDelay, -1, \
+ "Wait for this many CI accesses to occur in all compiles before " \
+ "beginning to throw OutOfMemoryErrors in each compile") \
+ \
+ /* Priorities */ \
+ product_pd(bool, UseThreadPriorities, "Use native thread priorities") \
+ \
+ product(intx, ThreadPriorityPolicy, 0, \
+ "0 : Normal. "\
+ " VM chooses priorities that are appropriate for normal "\
+ " applications. On Solaris NORM_PRIORITY and above are mapped "\
+ " to normal native priority. Java priorities below NORM_PRIORITY"\
+ " map to lower native priority values. On Windows applications"\
+ " are allowed to use higher native priorities. However, with "\
+ " ThreadPriorityPolicy=0, VM will not use the highest possible"\
+ " native priority, THREAD_PRIORITY_TIME_CRITICAL, as it may "\
+ " interfere with system threads. On Linux thread priorities "\
+ " are ignored because the OS does not support static priority "\
+ " in SCHED_OTHER scheduling class which is the only choice for"\
+ " non-root, non-realtime applications. "\
+ "1 : Aggressive. "\
+ " Java thread priorities map over to the entire range of "\
+ " native thread priorities. Higher Java thread priorities map "\
+ " to higher native thread priorities. This policy should be "\
+ " used with care, as sometimes it can cause performance "\
+ " degradation in the application and/or the entire system. On "\
+ " Linux this policy requires root privilege.") \
+ \
+ product(bool, ThreadPriorityVerbose, false, \
+ "print priority changes") \
+ \
+ product(intx, DefaultThreadPriority, -1, \
+ "what native priority threads run at if not specified elsewhere (-1 means no change)") \
+ \
+ product(intx, CompilerThreadPriority, -1, \
+ "what priority should compiler threads run at (-1 means no change)") \
+ \
+ product(intx, VMThreadPriority, -1, \
+ "what priority should VM threads run at (-1 means no change)") \
+ \
+ product(bool, CompilerThreadHintNoPreempt, true, \
+ "(Solaris only) Give compiler threads an extra quanta") \
+ \
+ product(bool, VMThreadHintNoPreempt, false, \
+ "(Solaris only) Give VM thread an extra quanta") \
+ \
+ product(intx, JavaPriority1_To_OSPriority, -1, "Map Java priorities to OS priorities") \
+ product(intx, JavaPriority2_To_OSPriority, -1, "Map Java priorities to OS priorities") \
+ product(intx, JavaPriority3_To_OSPriority, -1, "Map Java priorities to OS priorities") \
+ product(intx, JavaPriority4_To_OSPriority, -1, "Map Java priorities to OS priorities") \
+ product(intx, JavaPriority5_To_OSPriority, -1, "Map Java priorities to OS priorities") \
+ product(intx, JavaPriority6_To_OSPriority, -1, "Map Java priorities to OS priorities") \
+ product(intx, JavaPriority7_To_OSPriority, -1, "Map Java priorities to OS priorities") \
+ product(intx, JavaPriority8_To_OSPriority, -1, "Map Java priorities to OS priorities") \
+ product(intx, JavaPriority9_To_OSPriority, -1, "Map Java priorities to OS priorities") \
+ product(intx, JavaPriority10_To_OSPriority,-1, "Map Java priorities to OS priorities") \
+ \
+ /* compiler debugging */ \
+ notproduct(intx, CompileTheWorldStartAt, 1, \
+ "First class to consider when using +CompileTheWorld") \
+ \
+ notproduct(intx, CompileTheWorldStopAt, max_jint, \
+ "Last class to consider when using +CompileTheWorld") \
+ \
+ develop(intx, NewCodeParameter, 0, \
+ "Testing Only: Create a dedicated integer parameter before " \
+ "putback") \
+ \
+ /* new oopmap storage allocation */ \
+ develop(intx, MinOopMapAllocation, 8, \
+ "Minimum number of OopMap entries in an OopMapSet") \
+ \
+ /* Background Compilation */ \
+ develop(intx, LongCompileThreshold, 50, \
+ "Used with +TraceLongCompiles") \
+ \
+ product(intx, StarvationMonitorInterval, 200, \
+ "Pause between each check in ms") \
+ \
+ /* recompilation */ \
+ product_pd(intx, CompileThreshold, \
+ "number of interpreted method invocations before (re-)compiling") \
+ \
+ product_pd(intx, BackEdgeThreshold, \
+ "Interpreter Back edge threshold at which an OSR compilation is invoked")\
+ \
+ product(intx, Tier1BytecodeLimit, 10, \
+ "Must have at least this many bytecodes before tier1" \
+ "invocation counters are used") \
+ \
+ product_pd(intx, Tier2CompileThreshold, \
+ "threshold at which a tier 2 compilation is invoked") \
+ \
+ product_pd(intx, Tier2BackEdgeThreshold, \
+ "Back edge threshold at which a tier 2 compilation is invoked") \
+ \
+ product_pd(intx, Tier3CompileThreshold, \
+ "threshold at which a tier 3 compilation is invoked") \
+ \
+ product_pd(intx, Tier3BackEdgeThreshold, \
+ "Back edge threshold at which a tier 3 compilation is invoked") \
+ \
+ product_pd(intx, Tier4CompileThreshold, \
+ "threshold at which a tier 4 compilation is invoked") \
+ \
+ product_pd(intx, Tier4BackEdgeThreshold, \
+ "Back edge threshold at which a tier 4 compilation is invoked") \
+ \
+ product_pd(bool, TieredCompilation, \
+ "Enable two-tier compilation") \
+ \
+ product(bool, StressTieredRuntime, false, \
+ "Alternate client and server compiler on compile requests") \
+ \
+ product_pd(intx, OnStackReplacePercentage, \
+ "NON_TIERED number of method invocations/branches (expressed as %"\
+ "of CompileThreshold) before (re-)compiling OSR code") \
+ \
+ product(intx, InterpreterProfilePercentage, 33, \
+ "NON_TIERED number of method invocations/branches (expressed as %"\
+ "of CompileThreshold) before profiling in the interpreter") \
+ \
+ develop(intx, MaxRecompilationSearchLength, 10, \
+ "max. # frames to inspect searching for recompilee") \
+ \
+ develop(intx, MaxInterpretedSearchLength, 3, \
+ "max. # interp. frames to skip when searching for recompilee") \
+ \
+ develop(intx, DesiredMethodLimit, 8000, \
+ "desired max. method size (in bytecodes) after inlining") \
+ \
+ develop(intx, HugeMethodLimit, 8000, \
+ "don't compile methods larger than this if " \
+ "+DontCompileHugeMethods") \
+ \
+ /* New JDK 1.4 reflection implementation */ \
+ \
+ develop(bool, UseNewReflection, true, \
+ "Temporary flag for transition to reflection based on dynamic " \
+ "bytecode generation in 1.4; can no longer be turned off in 1.4 " \
+ "JDK, and is unneeded in 1.3 JDK, but marks most places VM " \
+ "changes were needed") \
+ \
+ develop(bool, VerifyReflectionBytecodes, false, \
+ "Force verification of 1.4 reflection bytecodes. Does not work " \
+ "in situations like that described in 4486457 or for " \
+ "constructors generated for serialization, so can not be enabled "\
+ "in product.") \
+ \
+ product(bool, ReflectionWrapResolutionErrors, true, \
+ "Temporary flag for transition to AbstractMethodError wrapped " \
+ "in InvocationTargetException. See 6531596") \
+ \
+ \
+ develop(intx, FastSuperclassLimit, 8, \
+ "Depth of hardwired instanceof accelerator array") \
+ \
+ /* Properties for Java libraries */ \
+ \
+ product(intx, MaxDirectMemorySize, -1, \
+ "Maximum total size of NIO direct-buffer allocations") \
+ \
+ /* temporary developer defined flags */ \
+ \
+ diagnostic(bool, UseNewCode, false, \
+ "Testing Only: Use the new version while testing") \
+ \
+ diagnostic(bool, UseNewCode2, false, \
+ "Testing Only: Use the new version while testing") \
+ \
+ diagnostic(bool, UseNewCode3, false, \
+ "Testing Only: Use the new version while testing") \
+ \
+ /* flags for performance data collection */ \
+ \
+ product(bool, UsePerfData, true, \
+ "Flag to disable jvmstat instrumentation for performance testing" \
+ "and problem isolation purposes.") \
+ \
+ product(bool, PerfDataSaveToFile, false, \
+ "Save PerfData memory to hsperfdata_<pid> file on exit") \
+ \
+ product(ccstr, PerfDataSaveFile, NULL, \
+ "Save PerfData memory to the specified absolute pathname," \
+ "%p in the file name if present will be replaced by pid") \
+ \
+ product(intx, PerfDataSamplingInterval, 50 /*ms*/, \
+ "Data sampling interval in milliseconds") \
+ \
+ develop(bool, PerfTraceDataCreation, false, \
+ "Trace creation of Performance Data Entries") \
+ \
+ develop(bool, PerfTraceMemOps, false, \
+ "Trace PerfMemory create/attach/detach calls") \
+ \
+ product(bool, PerfDisableSharedMem, false, \
+ "Store performance data in standard memory") \
+ \
+ product(intx, PerfDataMemorySize, 32*K, \
+ "Size of performance data memory region. Will be rounded " \
+ "up to a multiple of the native os page size.") \
+ \
+ product(intx, PerfMaxStringConstLength, 1024, \
+ "Maximum PerfStringConstant string length before truncation") \
+ \
+ product(bool, PerfAllowAtExitRegistration, false, \
+ "Allow registration of atexit() methods") \
+ \
+ product(bool, PerfBypassFileSystemCheck, false, \
+ "Bypass Win32 file system criteria checks (Windows Only)") \
+ \
+ product(intx, UnguardOnExecutionViolation, 0, \
+ "Unguard page and retry on no-execute fault (Win32 only)" \
+ "0=off, 1=conservative, 2=aggressive") \
+ \
+ /* Serviceability Support */ \
+ \
+ product(bool, ManagementServer, false, \
+ "Create JMX Management Server") \
+ \
+ product(bool, DisableAttachMechanism, false, \
+ "Disable mechanism that allows tools to attach to this VM") \
+ \
+ product(bool, StartAttachListener, false, \
+ "Always start Attach Listener at VM startup") \
+ \
+ manageable(bool, PrintConcurrentLocks, false, \
+ "Print java.util.concurrent locks in thread dump") \
+ \
+ /* Shared spaces */ \
+ \
+ product(bool, UseSharedSpaces, true, \
+ "Use shared spaces in the permanent generation") \
+ \
+ product(bool, RequireSharedSpaces, false, \
+ "Require shared spaces in the permanent generation") \
+ \
+ product(bool, ForceSharedSpaces, false, \
+ "Require shared spaces in the permanent generation") \
+ \
+ product(bool, DumpSharedSpaces, false, \
+ "Special mode: JVM reads a class list, loads classes, builds " \
+ "shared spaces, and dumps the shared spaces to a file to be " \
+ "used in future JVM runs.") \
+ \
+ product(bool, PrintSharedSpaces, false, \
+ "Print usage of shared spaces") \
+ \
+ product(uintx, SharedDummyBlockSize, 512*M, \
+ "Size of dummy block used to shift heap addresses (in bytes)") \
+ \
+ product(uintx, SharedReadWriteSize, 12*M, \
+ "Size of read-write space in permanent generation (in bytes)") \
+ \
+ product(uintx, SharedReadOnlySize, 8*M, \
+ "Size of read-only space in permanent generation (in bytes)") \
+ \
+ product(uintx, SharedMiscDataSize, 4*M, \
+ "Size of the shared data area adjacent to the heap (in bytes)") \
+ \
+ product(uintx, SharedMiscCodeSize, 4*M, \
+ "Size of the shared code area adjacent to the heap (in bytes)") \
+ \
+ diagnostic(bool, SharedOptimizeColdStart, true, \
+ "At dump time, order shared objects to achieve better " \
+ "cold startup time.") \
+ \
+ develop(intx, SharedOptimizeColdStartPolicy, 2, \
+ "Reordering policy for SharedOptimizeColdStart " \
+ "0=favor classload-time locality, 1=balanced, " \
+ "2=favor runtime locality") \
+ \
+ diagnostic(bool, SharedSkipVerify, false, \
+ "Skip assert() and verify() which page-in unwanted shared " \
+ "objects. ") \
+ \
+ product(bool, TaggedStackInterpreter, false, \
+ "Insert tags in interpreter execution stack for oopmap generaion")\
+ \
+ diagnostic(bool, PauseAtStartup, false, \
+ "Causes the VM to pause at startup time and wait for the pause " \
+ "file to be removed (default: ./vm.paused.<pid>)") \
+ \
+ diagnostic(ccstr, PauseAtStartupFile, NULL, \
+ "The file to create and for whose removal to await when pausing " \
+ "at startup. (default: ./vm.paused.<pid>)") \
+ \
+ product(bool, ExtendedDTraceProbes, false, \
+ "Enable performance-impacting dtrace probes") \
+ \
+ product(bool, DTraceMethodProbes, false, \
+ "Enable dtrace probes for method-entry and method-exit") \
+ \
+ product(bool, DTraceAllocProbes, false, \
+ "Enable dtrace probes for object allocation") \
+ \
+ product(bool, DTraceMonitorProbes, false, \
+ "Enable dtrace probes for monitor events") \
+ \
+ product(bool, RelaxAccessControlCheck, false, \
+ "Relax the access control checks in the verifier") \
+ \
+ product(bool, UseVMInterruptibleIO, true, \
+ "(Unstable, Solaris-specific) Thread interrupt before or with " \
+ "EINTR for I/O operations results in OS_INTRPT")
+
+
+/*
+ * Macros for factoring of globals
+ */
+
+// Interface macros
+#define DECLARE_PRODUCT_FLAG(type, name, value, doc) extern "C" type name;
+#define DECLARE_PD_PRODUCT_FLAG(type, name, doc) extern "C" type name;
+#define DECLARE_DIAGNOSTIC_FLAG(type, name, value, doc) extern "C" type name;
+#define DECLARE_MANAGEABLE_FLAG(type, name, value, doc) extern "C" type name;
+#define DECLARE_PRODUCT_RW_FLAG(type, name, value, doc) extern "C" type name;
+#ifdef PRODUCT
+#define DECLARE_DEVELOPER_FLAG(type, name, value, doc) const type name = value;
+#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) const type name = pd_##name;
+#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc)
+#else
+#define DECLARE_DEVELOPER_FLAG(type, name, value, doc) extern "C" type name;
+#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type name;
+#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type name;
+#endif
+
+// Implementation macros
+#define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc) type name = value;
+#define MATERIALIZE_PD_PRODUCT_FLAG(type, name, doc) type name = pd_##name;
+#define MATERIALIZE_DIAGNOSTIC_FLAG(type, name, value, doc) type name = value;
+#define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value;
+#define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc) type name = value;
+#ifdef PRODUCT
+#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) /* flag name is constant */
+#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) /* flag name is constant */
+#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc)
+#else
+#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type name = value;
+#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type name = pd_##name;
+#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value;
+#endif
+
+RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
+
+RUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
diff --git a/src/share/vm/runtime/globals_extension.hpp b/src/share/vm/runtime/globals_extension.hpp
new file mode 100644
index 000000000..10d225370
--- /dev/null
+++ b/src/share/vm/runtime/globals_extension.hpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Construct enum of Flag_<cmdline-arg> constants.
+
+// Parens left off in the following for the enum decl below.
+#define FLAG_MEMBER(flag) Flag_##flag
+
+#define RUNTIME_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#define RUNTIME_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
+#define RUNTIME_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#define RUNTIME_MANAGEABLE_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#define RUNTIME_PRODUCT_RW_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#ifdef PRODUCT
+ #define RUNTIME_DEVELOP_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
+ #define RUNTIME_PD_DEVELOP_FLAG_MEMBER(type, name, doc) /* flag is constant */
+ #define RUNTIME_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
+#else
+ #define RUNTIME_DEVELOP_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+ #define RUNTIME_PD_DEVELOP_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
+ #define RUNTIME_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#endif
+
+#define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
+#ifdef PRODUCT
+ #define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
+ #define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc) /* flag is constant */
+ #define C1_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
+#else
+ #define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+ #define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
+ #define C1_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#endif
+
+
+#define C2_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#define C2_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
+#define C2_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#ifdef PRODUCT
+ #define C2_DEVELOP_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
+ #define C2_PD_DEVELOP_FLAG_MEMBER(type, name, doc) /* flag is constant */
+ #define C2_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
+#else
+ #define C2_DEVELOP_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+ #define C2_PD_DEVELOP_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
+ #define C2_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#endif
+
+typedef enum {
+ RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER,
+ RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER,
+ RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER)
+ RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER,
+ RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER,
+ RUNTIME_NOTPRODUCT_FLAG_MEMBER)
+#ifdef COMPILER1
+ C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER,
+ C1_NOTPRODUCT_FLAG_MEMBER)
+#endif
+#ifdef COMPILER2
+ C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER,
+ C2_DIAGNOSTIC_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
+#endif
+ NUM_CommandLineFlag
+} CommandLineFlag;
+
+// Construct enum of Flag_<cmdline-arg>_<type> constants.
+
+#define FLAG_MEMBER_WITH_TYPE(flag,type) Flag_##flag##_##type
+
+#define RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#define RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#define RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#define RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#define RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#ifdef PRODUCT
+ #define RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
+ #define RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) /* flag is constant */
+ #define RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
+#else
+ #define RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+ #define RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+ #define RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#endif
+
+#define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#ifdef PRODUCT
+ #define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
+ #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) /* flag is constant */
+ #define C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
+#else
+ #define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+ #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+ #define C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#endif
+
+
+#define C2_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#define C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#define C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#ifdef PRODUCT
+ #define C2_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
+ #define C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) /* flag is constant */
+ #define C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
+#else
+ #define C2_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+ #define C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+ #define C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#endif
+
+typedef enum {
+ RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
+ RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+ RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
+ RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE,
+ RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE,
+ RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE)
+RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
+ RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+ RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
+ RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
+#ifdef COMPILER1
+ C1_FLAGS(C1_DEVELOP_FLAG_MEMBER_WITH_TYPE, C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, C1_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+ C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
+#endif
+#ifdef COMPILER2
+ C2_FLAGS(C2_DEVELOP_FLAG_MEMBER_WITH_TYPE, C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, C2_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+ C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE, C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
+#endif
+ NUM_CommandLineFlagWithType
+} CommandLineFlagWithType;
+
+#define FLAG_IS_DEFAULT(name) (CommandLineFlagsEx::is_default(FLAG_MEMBER(name)))
+
+#define FLAG_SET_DEFAULT(name, value) ((name) = (value))
+
+#define FLAG_SET_CMDLINE(type, name, value) (CommandLineFlagsEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name,type), (type)(value), COMMAND_LINE))
+#define FLAG_SET_ERGO(type, name, value) (CommandLineFlagsEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name,type), (type)(value), ERGONOMIC))
+
+// Can't put the following in CommandLineFlags because
+// of a circular dependency on the enum definition.
+class CommandLineFlagsEx : CommandLineFlags {
+ public:
+ static void boolAtPut(CommandLineFlagWithType flag, bool value, FlagValueOrigin origin);
+ static void intxAtPut(CommandLineFlagWithType flag, intx value, FlagValueOrigin origin);
+ static void uintxAtPut(CommandLineFlagWithType flag, uintx value, FlagValueOrigin origin);
+ static void doubleAtPut(CommandLineFlagWithType flag, double value, FlagValueOrigin origin);
+ static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, FlagValueOrigin origin);
+
+ static bool is_default(CommandLineFlag flag);
+};
diff --git a/src/share/vm/runtime/handles.cpp b/src/share/vm/runtime/handles.cpp
new file mode 100644
index 000000000..9282eab14
--- /dev/null
+++ b/src/share/vm/runtime/handles.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright 1997-2003 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_handles.cpp.incl"
+
+#ifdef ASSERT
+oop* HandleArea::allocate_handle(oop obj) {
+ assert(_handle_mark_nesting > 1, "memory leak: allocating handle outside HandleMark");
+ assert(_no_handle_mark_nesting == 0, "allocating handle inside NoHandleMark");
+ assert(SharedSkipVerify || obj->is_oop(), "sanity check");
+ return real_allocate_handle(obj);
+}
+
+Handle::Handle(Thread* thread, oop obj) {
+ assert(thread == Thread::current(), "sanity check");
+ if (obj == NULL) {
+ _handle = NULL;
+ } else {
+ _handle = thread->handle_area()->allocate_handle(obj);
+ }
+}
+
+#endif
+
+static uintx chunk_oops_do(OopClosure* f, Chunk* chunk, char* chunk_top) {
+ oop* bottom = (oop*) chunk->bottom();
+ oop* top = (oop*) chunk_top;
+ uintx handles_visited = top - bottom;
+ assert(top >= bottom && top <= (oop*) chunk->top(), "just checking");
+ // during GC phase 3, a handle may be a forward pointer that
+ // is not yet valid, so loosen the assertion
+ while (bottom < top) {
+// assert((*bottom)->is_oop(), "handle should point to oop");
+ assert(Universe::heap()->is_in(*bottom), "handle should be valid heap address");
+ f->do_oop(bottom++);
+ }
+ return handles_visited;
+}
+
+// Used for debugging handle allocation.
+NOT_PRODUCT(jint _nof_handlemarks = 0;)
+
+void HandleArea::oops_do(OopClosure* f) {
+ uintx handles_visited = 0;
+ // First handle the current chunk. It is filled to the high water mark.
+ handles_visited += chunk_oops_do(f, _chunk, _hwm);
+ // Then handle all previous chunks. They are completely filled.
+ Chunk* k = _first;
+ while(k != _chunk) {
+ handles_visited += chunk_oops_do(f, k, k->top());
+ k = k->next();
+ }
+
+ // The thread local handle areas should not get very large
+ if (TraceHandleAllocation && handles_visited > TotalHandleAllocationLimit) {
+#ifdef ASSERT
+ warning("%d: Visited in HandleMark : %d",
+ _nof_handlemarks, handles_visited);
+#else
+ warning("Visited in HandleMark : %d", handles_visited);
+#endif
+ }
+ if (_prev != NULL) _prev->oops_do(f);
+}
+
+void HandleMark::initialize(Thread* thread) {
+ _thread = thread;
+ // Save area
+ _area = thread->handle_area();
+ // Save current top
+ _chunk = _area->_chunk;
+ _hwm = _area->_hwm;
+ _max = _area->_max;
+ NOT_PRODUCT(_size_in_bytes = _area->_size_in_bytes;)
+ debug_only(_area->_handle_mark_nesting++);
+ assert(_area->_handle_mark_nesting > 0, "must stack allocate HandleMarks");
+ debug_only(Atomic::inc(&_nof_handlemarks);)
+
+ // Link this in the thread
+ set_previous_handle_mark(thread->last_handle_mark());
+ thread->set_last_handle_mark(this);
+}
+
+
+HandleMark::~HandleMark() {
+ HandleArea* area = _area; // help compilers with poor alias analysis
+ assert(area == _thread->handle_area(), "sanity check");
+ assert(area->_handle_mark_nesting > 0, "must stack allocate HandleMarks" );
+ debug_only(area->_handle_mark_nesting--);
+
+ // Debug code to trace the number of handles allocated per mark/
+#ifdef ASSERT
+ if (TraceHandleAllocation) {
+ size_t handles = 0;
+ Chunk *c = _chunk->next();
+ if (c == NULL) {
+ handles = area->_hwm - _hwm; // no new chunk allocated
+ } else {
+ handles = _max - _hwm; // add rest in first chunk
+ while(c != NULL) {
+ handles += c->length();
+ c = c->next();
+ }
+ handles -= area->_max - area->_hwm; // adjust for last trunk not full
+ }
+ handles /= sizeof(void *); // Adjust for size of a handle
+ if (handles > HandleAllocationLimit) {
+ // Note: _nof_handlemarks is only set in debug mode
+ warning("%d: Allocated in HandleMark : %d", _nof_handlemarks, handles);
+ }
+ }
+#endif
+
+ // Delete later chunks
+ if( _chunk->next() ) {
+ _chunk->next_chop();
+ }
+ // Roll back arena to saved top markers
+ area->_chunk = _chunk;
+ area->_hwm = _hwm;
+ area->_max = _max;
+ NOT_PRODUCT(area->set_size_in_bytes(_size_in_bytes);)
+#ifdef ASSERT
+ // clear out first chunk (to detect allocation bugs)
+ if (ZapVMHandleArea) {
+ memset(_hwm, badHandleValue, _max - _hwm);
+ }
+ Atomic::dec(&_nof_handlemarks);
+#endif
+
+ // Unlink this from the thread
+ _thread->set_last_handle_mark(previous_handle_mark());
+}
+
+#ifdef ASSERT
+
+NoHandleMark::NoHandleMark() {
+ HandleArea* area = Thread::current()->handle_area();
+ area->_no_handle_mark_nesting++;
+ assert(area->_no_handle_mark_nesting > 0, "must stack allocate NoHandleMark" );
+}
+
+
+NoHandleMark::~NoHandleMark() {
+ HandleArea* area = Thread::current()->handle_area();
+ assert(area->_no_handle_mark_nesting > 0, "must stack allocate NoHandleMark" );
+ area->_no_handle_mark_nesting--;
+}
+
+
+ResetNoHandleMark::ResetNoHandleMark() {
+ HandleArea* area = Thread::current()->handle_area();
+ _no_handle_mark_nesting = area->_no_handle_mark_nesting;
+ area->_no_handle_mark_nesting = 0;
+}
+
+
+ResetNoHandleMark::~ResetNoHandleMark() {
+ HandleArea* area = Thread::current()->handle_area();
+ area->_no_handle_mark_nesting = _no_handle_mark_nesting;
+}
+
+#endif
diff --git a/src/share/vm/runtime/handles.hpp b/src/share/vm/runtime/handles.hpp
new file mode 100644
index 000000000..55e9b41fa
--- /dev/null
+++ b/src/share/vm/runtime/handles.hpp
@@ -0,0 +1,348 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+//------------------------------------------------------------------------------------------------------------------------
+// In order to preserve oops during garbage collection, they should be
+// allocated and passed around via Handles within the VM. A handle is
+// simply an extra indirection allocated in a thread local handle area.
+//
+// A handle is a ValueObj, so it can be passed around as a value, can
+// be used as a parameter w/o using &-passing, and can be returned as a
+// return value.
+//
+// oop parameters and return types should be Handles whenever feasible.
+//
+// Handles are declared in a straight-forward manner, e.g.
+//
+// oop obj = ...;
+// Handle h1(obj); // allocate new handle
+// Handle h2(thread, obj); // faster allocation when current thread is known
+// Handle h3; // declare handle only, no allocation occurs
+// ...
+// h3 = h1; // make h3 refer to same indirection as h1
+// oop obj2 = h2(); // get handle value
+// h1->print(); // invoking operation on oop
+//
+// Handles are specialized for different oop types to provide extra type
+// information and avoid unnecessary casting. For each oop type xxxOop
+// there is a corresponding handle called xxxHandle, e.g.
+//
+// oop Handle
+// methodOop methodHandle
+// instanceOop instanceHandle
+//
+// For klassOops, it is often useful to model the Klass hierarchy in order
+// to get access to the klass_part without casting. For each xxxKlass there
+// is a corresponding handle called xxxKlassHandle, e.g.
+//
+// klassOop Klass KlassHandle
+// klassOop methodKlass methodKlassHandle
+// klassOop instanceKlass instanceKlassHandle
+//
+
+//------------------------------------------------------------------------------------------------------------------------
+// Base class for all handles. Provides overloading of frequently
+// used operators for ease of use.
+
+class Handle VALUE_OBJ_CLASS_SPEC {
+ private:
+ oop* _handle;
+
+ protected:
+ oop obj() const { return _handle == NULL ? (oop)NULL : *_handle; }
+ oop non_null_obj() const { assert(_handle != NULL, "resolving NULL handle"); return *_handle; }
+
+ public:
+ // Constructors
+ Handle() { _handle = NULL; }
+ Handle(oop obj);
+#ifndef ASSERT
+ Handle(Thread* thread, oop obj);
+#else
+ // Don't inline body with assert for current thread
+ Handle(Thread* thread, oop obj);
+#endif // ASSERT
+
+ // General access
+ oop operator () () const { return obj(); }
+ oop operator -> () const { return non_null_obj(); }
+ bool operator == (oop o) const { return obj() == o; }
+ bool operator == (const Handle& h) const { return obj() == h.obj(); }
+
+ // Null checks
+ bool is_null() const { return _handle == NULL; }
+ bool not_null() const { return _handle != NULL; }
+
+ // Debugging
+ void print() { obj()->print(); }
+
+ // Direct interface, use very sparingly.
+ // Used by JavaCalls to quickly convert handles and to create handles static data structures.
+ // Constructor takes a dummy argument to prevent unintentional type conversion in C++.
+ Handle(oop *handle, bool dummy) { _handle = handle; }
+
+ // Raw handle access. Allows easy duplication of Handles. This can be very unsafe
+ // since duplicates is only valid as long as original handle is alive.
+ oop* raw_value() { return _handle; }
+ static oop raw_resolve(oop *handle) { return handle == NULL ? (oop)NULL : *handle; }
+};
+
+
+//------------------------------------------------------------------------------------------------------------------------
+// Base class for Handles containing klassOops. Provides overloading of frequently
+// used operators for ease of use and typed access to the Klass part.
+class KlassHandle: public Handle {
+ protected:
+ klassOop obj() const { return (klassOop)Handle::obj(); }
+ klassOop non_null_obj() const { return (klassOop)Handle::non_null_obj(); }
+ Klass* as_klass() const { return non_null_obj()->klass_part(); }
+
+ public:
+ // Constructors
+ KlassHandle () : Handle() {}
+ KlassHandle (oop obj) : Handle(obj) {
+ assert(SharedSkipVerify || is_null() || obj->is_klass(), "not a klassOop");
+ }
+ KlassHandle (Klass* kl) : Handle(kl ? kl->as_klassOop() : (klassOop)NULL) {
+ assert(SharedSkipVerify || is_null() || obj()->is_klass(), "not a klassOop");
+ }
+
+ // Faster versions passing Thread
+ KlassHandle (Thread* thread, oop obj) : Handle(thread, obj) {
+ assert(SharedSkipVerify || is_null() || obj->is_klass(), "not a klassOop");
+ }
+ KlassHandle (Thread *thread, Klass* kl)
+ : Handle(thread, kl ? kl->as_klassOop() : (klassOop)NULL) {
+ assert(is_null() || obj()->is_klass(), "not a klassOop");
+ }
+
+ // General access
+ klassOop operator () () const { return obj(); }
+ Klass* operator -> () const { return as_klass(); }
+};
+
+
+//------------------------------------------------------------------------------------------------------------------------
+// Specific Handles for different oop types
+#define DEF_HANDLE(type, is_a) \
+ class type##Handle; \
+ class type##Handle: public Handle { \
+ protected: \
+ type##Oop obj() const { return (type##Oop)Handle::obj(); } \
+ type##Oop non_null_obj() const { return (type##Oop)Handle::non_null_obj(); } \
+ \
+ public: \
+ /* Constructors */ \
+ type##Handle () : Handle() {} \
+ type##Handle (type##Oop obj) : Handle((oop)obj) { \
+ assert(SharedSkipVerify || is_null() || ((oop)obj)->is_a(), \
+ "illegal type"); \
+ } \
+ type##Handle (Thread* thread, type##Oop obj) : Handle(thread, (oop)obj) { \
+ assert(SharedSkipVerify || is_null() || ((oop)obj)->is_a(), "illegal type"); \
+ } \
+ \
+ /* Special constructor, use sparingly */ \
+ type##Handle (type##Oop *handle, bool dummy) : Handle((oop*)handle, dummy) {} \
+ \
+ /* Operators for ease of use */ \
+ type##Oop operator () () const { return obj(); } \
+ type##Oop operator -> () const { return non_null_obj(); } \
+ };
+
+
+DEF_HANDLE(instance , is_instance )
+DEF_HANDLE(method , is_method )
+DEF_HANDLE(constMethod , is_constMethod )
+DEF_HANDLE(methodData , is_methodData )
+DEF_HANDLE(array , is_array )
+DEF_HANDLE(constantPool , is_constantPool )
+DEF_HANDLE(constantPoolCache, is_constantPoolCache)
+DEF_HANDLE(objArray , is_objArray )
+DEF_HANDLE(typeArray , is_typeArray )
+DEF_HANDLE(symbol , is_symbol )
+
+//------------------------------------------------------------------------------------------------------------------------
+// Specific KlassHandles for different Klass types
+
+#define DEF_KLASS_HANDLE(type, is_a) \
+ class type##Handle : public KlassHandle { \
+ public: \
+ /* Constructors */ \
+ type##Handle () : KlassHandle() {} \
+ type##Handle (klassOop obj) : KlassHandle(obj) { \
+ assert(SharedSkipVerify || is_null() || obj->klass_part()->is_a(), \
+ "illegal type"); \
+ } \
+ type##Handle (Thread* thread, klassOop obj) : KlassHandle(thread, obj) { \
+ assert(SharedSkipVerify || is_null() || obj->klass_part()->is_a(), \
+ "illegal type"); \
+ } \
+ \
+ /* Access to klass part */ \
+ type* operator -> () const { return (type*)obj()->klass_part(); } \
+ \
+ static type##Handle cast(KlassHandle h) { return type##Handle(h()); } \
+ \
+ };
+
+
+DEF_KLASS_HANDLE(instanceKlass , oop_is_instance_slow )
+DEF_KLASS_HANDLE(methodKlass , oop_is_method )
+DEF_KLASS_HANDLE(constMethodKlass , oop_is_constMethod )
+DEF_KLASS_HANDLE(klassKlass , oop_is_klass )
+DEF_KLASS_HANDLE(arrayKlassKlass , oop_is_arrayKlass )
+DEF_KLASS_HANDLE(objArrayKlassKlass , oop_is_objArrayKlass )
+DEF_KLASS_HANDLE(typeArrayKlassKlass , oop_is_typeArrayKlass)
+DEF_KLASS_HANDLE(arrayKlass , oop_is_array )
+DEF_KLASS_HANDLE(typeArrayKlass , oop_is_typeArray_slow)
+DEF_KLASS_HANDLE(objArrayKlass , oop_is_objArray_slow )
+DEF_KLASS_HANDLE(symbolKlass , oop_is_symbol )
+DEF_KLASS_HANDLE(constantPoolKlass , oop_is_constantPool )
+DEF_KLASS_HANDLE(constantPoolCacheKlass, oop_is_constantPool )
+
+
+//------------------------------------------------------------------------------------------------------------------------
+// Thread local handle area
+
+class HandleArea: public Arena {
+ friend class HandleMark;
+ friend class NoHandleMark;
+ friend class ResetNoHandleMark;
+#ifdef ASSERT
+ int _handle_mark_nesting;
+ int _no_handle_mark_nesting;
+#endif
+ HandleArea* _prev; // link to outer (older) area
+ public:
+ // Constructor
+ HandleArea(HandleArea* prev) {
+ debug_only(_handle_mark_nesting = 0);
+ debug_only(_no_handle_mark_nesting = 0);
+ _prev = prev;
+ }
+
+ // Handle allocation
+ private:
+ oop* real_allocate_handle(oop obj) {
+#ifdef ASSERT
+ oop* handle = (oop*) (UseMallocOnly ? internal_malloc_4(oopSize) : Amalloc_4(oopSize));
+#else
+ oop* handle = (oop*) Amalloc_4(oopSize);
+#endif
+ *handle = obj;
+ return handle;
+ }
+ public:
+#ifdef ASSERT
+ oop* allocate_handle(oop obj);
+#else
+ oop* allocate_handle(oop obj) { return real_allocate_handle(obj); }
+#endif
+
+ // Garbage collection support
+ void oops_do(OopClosure* f);
+
+ // Number of handles in use
+ size_t used() const { return Arena::used() / oopSize; }
+
+ debug_only(bool no_handle_mark_active() { return _no_handle_mark_nesting > 0; })
+};
+
+
+//------------------------------------------------------------------------------------------------------------------------
+// Handles are allocated in a (growable) thread local handle area. Deallocation
+// is managed using a HandleMark. It should normally not be necessary to use
+// HandleMarks manually.
+//
+// A HandleMark constructor will record the current handle area top, and the
+// desctructor will reset the top, destroying all handles allocated in between.
+// The following code will therefore NOT work:
+//
+// Handle h;
+// {
+// HandleMark hm;
+// h = Handle(obj);
+// }
+// h()->print(); // WRONG, h destroyed by HandleMark destructor.
+//
+// If h has to be preserved, it can be converted to an oop or a local JNI handle
+// across the HandleMark boundary.
+
+// The base class of HandleMark should have been StackObj but we also heap allocate
+// a HandleMark when a thread is created.
+
+class HandleMark {
+ private:
+ Thread *_thread; // thread that owns this mark
+ HandleArea *_area; // saved handle area
+ Chunk *_chunk; // saved arena chunk
+ char *_hwm, *_max; // saved arena info
+ NOT_PRODUCT(size_t _size_in_bytes;) // size of handle area
+ // Link to previous active HandleMark in thread
+ HandleMark* _previous_handle_mark;
+
+ void initialize(Thread* thread); // common code for constructors
+ void set_previous_handle_mark(HandleMark* mark) { _previous_handle_mark = mark; }
+ HandleMark* previous_handle_mark() const { return _previous_handle_mark; }
+
+ public:
+ HandleMark(); // see handles_inline.hpp
+ HandleMark(Thread* thread) { initialize(thread); }
+ ~HandleMark();
+
+ // Functions used by HandleMarkCleaner
+ // called in the constructor of HandleMarkCleaner
+ void push();
+ // called in the destructor of HandleMarkCleaner
+ void pop_and_restore();
+};
+
+//------------------------------------------------------------------------------------------------------------------------
+// A NoHandleMark stack object will verify that no handles are allocated
+// in its scope. Enabled in debug mode only.
+
+class NoHandleMark: public StackObj {
+ public:
+#ifdef ASSERT
+ NoHandleMark();
+ ~NoHandleMark();
+#else
+ NoHandleMark() {}
+ ~NoHandleMark() {}
+#endif
+};
+
+
+class ResetNoHandleMark: public StackObj {
+ int _no_handle_mark_nesting;
+ public:
+#ifdef ASSERT
+ ResetNoHandleMark();
+ ~ResetNoHandleMark();
+#else
+ ResetNoHandleMark() {}
+ ~ResetNoHandleMark() {}
+#endif
+};
diff --git a/src/share/vm/runtime/handles.inline.hpp b/src/share/vm/runtime/handles.inline.hpp
new file mode 100644
index 000000000..b4d4b3197
--- /dev/null
+++ b/src/share/vm/runtime/handles.inline.hpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright 1998-2003 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// these inline functions are in a separate file to break an include cycle
+// between Thread and Handle
+
+inline Handle::Handle(oop obj) {
+ if (obj == NULL) {
+ _handle = NULL;
+ } else {
+ _handle = Thread::current()->handle_area()->allocate_handle(obj);
+ }
+}
+
+
+#ifndef ASSERT
+inline Handle::Handle(Thread* thread, oop obj) {
+ assert(thread == Thread::current(), "sanity check");
+ if (obj == NULL) {
+ _handle = NULL;
+ } else {
+ _handle = thread->handle_area()->allocate_handle(obj);
+ }
+}
+#endif // ASSERT
+
+
+inline HandleMark::HandleMark() {
+ initialize(Thread::current());
+}
+
+
+inline void HandleMark::push() {
+ // This is intentionally a NOP. pop_and_restore will reset
+ // values to the HandleMark further down the stack, typically
+ // in JavaCalls::call_helper.
+ debug_only(_area->_handle_mark_nesting++);
+}
+
+inline void HandleMark::pop_and_restore() {
+ HandleArea* area = _area; // help compilers with poor alias analysis
+ // Delete later chunks
+ if( _chunk->next() ) {
+ _chunk->next_chop();
+ }
+ // Roll back arena to saved top markers
+ area->_chunk = _chunk;
+ area->_hwm = _hwm;
+ area->_max = _max;
+ NOT_PRODUCT(area->set_size_in_bytes(_size_in_bytes);)
+ debug_only(area->_handle_mark_nesting--);
+}
diff --git a/src/share/vm/runtime/hpi.cpp b/src/share/vm/runtime/hpi.cpp
new file mode 100644
index 000000000..18e4e7976
--- /dev/null
+++ b/src/share/vm/runtime/hpi.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_hpi.cpp.incl"
+
+extern "C" {
+ static void unimplemented_panic(const char *fmt, ...) {
+ Unimplemented();
+ }
+
+ static void unimplemented_monitorRegister(sys_mon_t *mid, char *info_str) {
+ Unimplemented();
+ }
+}
+
+static vm_calls_t callbacks = {
+ jio_fprintf,
+ unimplemented_panic,
+ unimplemented_monitorRegister,
+
+ NULL, // unused
+ NULL, // unused
+ NULL // unused
+};
+
+GetInterfaceFunc hpi::_get_interface = NULL;
+HPI_FileInterface* hpi::_file = NULL;
+HPI_SocketInterface* hpi::_socket = NULL;
+HPI_LibraryInterface* hpi::_library = NULL;
+HPI_SystemInterface* hpi::_system = NULL;
+
+jint hpi::initialize()
+{
+ initialize_get_interface(&callbacks);
+ if (_get_interface == NULL)
+ return JNI_ERR;
+
+ jint result;
+
+ result = (*_get_interface)((void **)&_file, "File", 1);
+ if (result != 0) {
+ if (TraceHPI) tty->print_cr("Can't find HPI_FileInterface");
+ return JNI_ERR;
+ }
+
+
+ result = (*_get_interface)((void **)&_library, "Library", 1);
+ if (result != 0) {
+ if (TraceHPI) tty->print_cr("Can't find HPI_LibraryInterface");
+ return JNI_ERR;
+ }
+
+ result = (*_get_interface)((void **)&_system, "System", 1);
+ if (result != 0) {
+ if (TraceHPI) tty->print_cr("Can't find HPI_SystemInterface");
+ return JNI_ERR;
+ }
+
+ return JNI_OK;
+}
+
+jint hpi::initialize_socket_library()
+{
+ if (_get_interface == NULL) {
+ if (TraceHPI) {
+ tty->print_cr("Fatal HPI error: reached initialize_socket_library with NULL _get_interface");
+ }
+ return JNI_ERR;
+ }
+
+ jint result;
+ result = (*_get_interface)((void **)&_socket, "Socket", 1);
+ if (result != 0) {
+ if (TraceHPI) tty->print_cr("Can't find HPI_SocketInterface");
+ return JNI_ERR;
+ }
+
+ return JNI_OK;
+}
diff --git a/src/share/vm/runtime/hpi.hpp b/src/share/vm/runtime/hpi.hpp
new file mode 100644
index 000000000..00fc21c90
--- /dev/null
+++ b/src/share/vm/runtime/hpi.hpp
@@ -0,0 +1,224 @@
+/*
+ * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+//
+// C++ wrapper to HPI.
+//
+
+class hpi : AllStatic {
+
+private:
+ static GetInterfaceFunc _get_interface;
+ static HPI_FileInterface* _file;
+ static HPI_SocketInterface* _socket;
+ static HPI_LibraryInterface* _library;
+ static HPI_SystemInterface* _system;
+
+private:
+ static void initialize_get_interface(vm_calls_t *callbacks);
+
+public:
+ // Load and initialize everything except sockets.
+ static jint initialize();
+
+ // Socket library needs to be lazy intialized because eagerly
+ // loading Winsock is known to cause "connect to your ISP"
+ // dialog to show up. Or so goes the legend.
+ static jint initialize_socket_library();
+
+ // HPI_FileInterface
+ static inline char* native_path(char *path);
+ static inline int file_type(const char *path);
+ static inline int open(const char *name, int mode, int perm);
+ static inline int close(int fd);
+ static inline jlong lseek(int fd, jlong off, int whence);
+ static inline int ftruncate(int fd, jlong length);
+ static inline int fsync(int fd);
+ static inline int available(int fd, jlong *bytes);
+ static inline size_t read(int fd, void *buf, unsigned int nBytes);
+ static inline size_t write(int fd, const void *buf, unsigned int nBytes);
+ static inline int fsize(int fd, jlong *size);
+
+ // HPI_SocketInterface
+ static inline int socket(int domain, int type, int protocol);
+ static inline int socket_close(int fd);
+ static inline int socket_shutdown(int fd, int howto);
+ static inline int recv(int fd, char *buf, int nBytes, int flags);
+ static inline int send(int fd, char *buf, int nBytes, int flags);
+ static inline int timeout(int fd, long timeout);
+ static inline int listen(int fd, int count);
+ static inline int connect(int fd, struct sockaddr *him, int len);
+ static inline int bind(int fd, struct sockaddr *him, int len);
+ static inline int accept(int fd, struct sockaddr *him, int *len);
+ static inline int recvfrom(int fd, char *buf, int nbytes, int flags,
+ struct sockaddr *from, int *fromlen);
+ static inline int get_sock_name(int fd, struct sockaddr *him, int *len);
+ static inline int sendto(int fd, char *buf, int len, int flags,
+ struct sockaddr *to, int tolen);
+ static inline int socket_available(int fd, jint *pbytes);
+
+ static inline int get_sock_opt(int fd, int level, int optname,
+ char *optval, int* optlen);
+ static inline int set_sock_opt(int fd, int level, int optname,
+ const char *optval, int optlen);
+ static inline int get_host_name(char* name, int namelen);
+ static inline struct hostent* get_host_by_addr(const char* name, int len, int type);
+ static inline struct hostent* get_host_by_name(char* name);
+ static inline struct protoent* get_proto_by_name(char* name);
+
+ // HPI_LibraryInterface
+ static inline void dll_build_name(char *buf, int buf_len, char* path,
+ const char *name);
+ static inline void* dll_load(const char *name, char *ebuf, int ebuflen);
+ static inline void dll_unload(void *lib);
+ static inline void* dll_lookup(void *lib, const char *name);
+
+ // HPI_SystemInterface
+ static inline int lasterror(char *buf, int len);
+};
+
+//
+// Macros that provide inline bodies for the functions.
+//
+
+#define HPIDECL(name, names, intf, func, ret_type, ret_fmt, arg_type, arg_print, arg) \
+ inline ret_type hpi::name arg_type { \
+ if (TraceHPI) { \
+ tty->print("hpi::" names "("); \
+ tty->print arg_print ; \
+ tty->print(") = "); \
+ } \
+ ret_type result = (*intf->func) arg ; \
+ if (TraceHPI) { \
+ tty->print_cr(ret_fmt, result); \
+ } \
+ return result; \
+ }
+
+// Macro to facilitate moving HPI functionality into the vm.
+// See bug 6348631. The only difference between this macro and
+// HPIDECL is that we call a vm method rather than use the HPI
+// transfer vector. Ultimately, we'll replace HPIDECL with
+// VM_HPIDECL for all hpi methods.
+#define VM_HPIDECL(name, names, func, ret_type, ret_fmt, arg_type,arg_print, arg) \
+ inline ret_type hpi::name arg_type { \
+ if (TraceHPI) { \
+ tty->print("hpi::" names "("); \
+ tty->print arg_print ; \
+ tty->print(") = "); \
+ } \
+ ret_type result = func arg ; \
+ if (TraceHPI) { \
+ tty->print_cr(ret_fmt, result); \
+ } \
+ return result; \
+ }
+
+
+
+#define HPIDECL_VOID(name, names, intf, func, arg_type, arg_print, arg) \
+ inline void hpi::name arg_type { \
+ if (TraceHPI) { \
+ tty->print("hpi::" names "("); \
+ tty->print arg_print ; \
+ tty->print_cr(") = void"); \
+ } \
+ (*intf->func) arg ; \
+ }
+
+
+// The macro calls below realize into
+// inline char * hpi::native_path(...) { inlined_body; }
+// etc.
+
+// HPI_FileInterface
+
+HPIDECL(native_path, "native_path", _file, NativePath, char *, "%s",
+ (char *path),
+ ("path = %s", path),
+ (path));
+
+HPIDECL(file_type, "file_type", _file, FileType, int, "%d",
+ (const char *path),
+ ("path = %s", path),
+ (path));
+
+HPIDECL(open, "open", _file, Open, int, "%d",
+ (const char *name, int mode, int perm),
+ ("name = %s, mode = %d, perm = %d", name, mode, perm),
+ (name, mode, perm));
+
+HPIDECL(lseek, "seek", _file, Seek, jlong, "(a jlong)",
+ (int fd, jlong off, int whence),
+ ("fd = %d, off = (a jlong), whence = %d", fd, /* off, */ whence),
+ (fd, off, whence));
+
+HPIDECL(ftruncate, "ftruncate", _file, SetLength, int, "%d",
+ (int fd, jlong length),
+ ("fd = %d, length = (a jlong)", fd /*, length */),
+ (fd, length));
+
+HPIDECL(fsync, "fsync", _file, Sync, int, "%d",
+ (int fd),
+ ("fd = %d", fd),
+ (fd));
+
+HPIDECL(available, "available", _file, Available, int, "%d",
+ (int fd, jlong *bytes),
+ ("fd = %d, bytes = %p", fd, bytes),
+ (fd, bytes));
+
+HPIDECL(fsize, "fsize", _file, FileSizeFD, int, "%d",
+ (int fd, jlong *size),
+ ("fd = %d, size = %p", fd, size),
+ (fd, size));
+
+// HPI_LibraryInterface
+HPIDECL_VOID(dll_build_name, "dll_build_name", _library, BuildLibName,
+ (char *buf, int buf_len, char *path, const char *name),
+ ("buf = %p, buflen = %d, path = %s, name = %s",
+ buf, buf_len, path, name),
+ (buf, buf_len, path, name));
+
+VM_HPIDECL(dll_load, "dll_load", os::dll_load,
+ void *, "(void *)%p",
+ (const char *name, char *ebuf, int ebuflen),
+ ("name = %s, ebuf = %p, ebuflen = %d", name, ebuf, ebuflen),
+ (name, ebuf, ebuflen));
+
+HPIDECL_VOID(dll_unload, "dll_unload", _library, UnloadLibrary,
+ (void *lib),
+ ("lib = %p", lib),
+ (lib));
+
+HPIDECL(dll_lookup, "dll_lookup", _library, FindLibraryEntry, void *, "%p",
+ (void *lib, const char *name),
+ ("lib = %p, name = %s", lib, name),
+ (lib, name));
+
+// HPI_SystemInterface
+HPIDECL(lasterror, "lasterror", _system, GetLastErrorString, int, "%d",
+ (char *buf, int len),
+ ("buf = %p, len = %d", buf, len),
+ (buf, len));
diff --git a/src/share/vm/runtime/icache.cpp b/src/share/vm/runtime/icache.cpp
new file mode 100644
index 000000000..bafc17b60
--- /dev/null
+++ b/src/share/vm/runtime/icache.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_icache.cpp.incl"
+
+// The flush stub function address
+AbstractICache::flush_icache_stub_t AbstractICache::_flush_icache_stub = NULL;
+
+void AbstractICache::initialize() {
+ // Making this stub must be FIRST use of assembler
+ ResourceMark rm;
+
+ BufferBlob* b = BufferBlob::create("flush_icache_stub", ICache::stub_size);
+ CodeBuffer c(b->instructions_begin(), b->instructions_size());
+
+ ICacheStubGenerator g(&c);
+ g.generate_icache_flush(&_flush_icache_stub);
+
+ // The first use of flush_icache_stub must apply it to itself.
+ // The StubCodeMark destructor in generate_icache_flush will
+ // call Assembler::flush, which in turn will call invalidate_range,
+ // which will in turn call the flush stub. Thus we don't need an
+ // explicit call to invalidate_range here. This assumption is
+ // checked in invalidate_range.
+}
+
+void AbstractICache::call_flush_stub(address start, int lines) {
+ // The business with the magic number is just a little security.
+ // We cannot call the flush stub when generating the flush stub
+ // because it isn't there yet. So, the stub also returns its third
+ // parameter. This is a cheap check that the stub was really executed.
+ static int magic = 0xbaadbabe;
+
+ int auto_magic = magic; // Make a local copy to avoid race condition
+ int r = (*_flush_icache_stub)(start, lines, auto_magic);
+ guarantee(r == auto_magic, "flush stub routine did not execute");
+ ++magic;
+}
+
+void AbstractICache::invalidate_word(address addr) {
+ // Because this is called for instruction patching on the fly, long after
+ // bootstrapping, we execute the stub directly. Account for a 4-byte word
+ // spanning two cache lines by computing a start line address by rounding
+ // addr down to a line_size boundary, and an end line address by adding
+ // the word size - 1 and rounding the result down to a line_size boundary.
+ // If we just added word size, we'd mistakenly flush the next cache line
+ // if the word to be flushed started in the last 4 bytes of the line.
+ // Doing that would segv if the next line weren't mapped.
+
+ const int word_size_in_bytes = 4; // Always, regardless of platform
+
+ intptr_t start_line = ((intptr_t)addr + 0) & ~(ICache::line_size - 1);
+ intptr_t end_line = ((intptr_t)addr + word_size_in_bytes - 1)
+ & ~(ICache::line_size - 1);
+ (*_flush_icache_stub)((address)start_line, start_line == end_line ? 1 : 2, 0);
+}
+
+void AbstractICache::invalidate_range(address start, int nbytes) {
+ static bool firstTime = true;
+ if (firstTime) {
+ guarantee(start == CAST_FROM_FN_PTR(address, _flush_icache_stub),
+ "first flush should be for flush stub");
+ firstTime = false;
+ return;
+ }
+ if (nbytes == 0) {
+ return;
+ }
+ // Align start address to an icache line boundary and transform
+ // nbytes to an icache line count.
+ const uint line_offset = mask_address_bits(start, ICache::line_size-1);
+ if (line_offset != 0) {
+ start -= line_offset;
+ nbytes += line_offset;
+ }
+ call_flush_stub(start, round_to(nbytes, ICache::line_size) >>
+ ICache::log2_line_size);
+}
+
+// For init.cpp
+void icache_init() {
+ ICache::initialize();
+}
diff --git a/src/share/vm/runtime/icache.hpp b/src/share/vm/runtime/icache.hpp
new file mode 100644
index 000000000..13e6e73b9
--- /dev/null
+++ b/src/share/vm/runtime/icache.hpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright 1997-2004 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Interface for updating the instruction cache. Whenever the VM modifies
+// code, part of the processor instruction cache potentially has to be flushed.
+
+// Default implementation is in icache.cpp, and can be hidden per-platform.
+// Most platforms must provide only ICacheStubGenerator::generate_icache_flush().
+// Platforms that don't require icache flushing can just nullify the public
+// members of AbstractICache in their ICache class. AbstractICache should never
+// be referenced other than by deriving the ICache class from it.
+//
+// The code for the ICache class and for generate_icache_flush() must be in
+// architecture-specific files, i.e., icache_<arch>.hpp/.cpp
+
+class AbstractICache : AllStatic {
+ public:
+ // The flush stub signature
+ typedef int (*flush_icache_stub_t)(address addr, int lines, int magic);
+
+ protected:
+ // The flush stub function address
+ static flush_icache_stub_t _flush_icache_stub;
+
+ // Call the flush stub
+ static void call_flush_stub(address start, int lines);
+
+ public:
+ enum {
+ stub_size = 0, // Size of the icache flush stub in bytes
+ line_size = 0, // Icache line size in bytes
+ log2_line_size = 0 // log2(line_size)
+ };
+
+ static void initialize();
+ static void invalidate_word(address addr);
+ static void invalidate_range(address start, int nbytes);
+};
+
+
+// Must be included before the definition of ICacheStubGenerator
+// because ICacheStubGenerator uses ICache definitions.
+
+#include "incls/_icache_pd.hpp.incl"
+
+
+class ICacheStubGenerator : public StubCodeGenerator {
+ public:
+ ICacheStubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
+
+ // Generate the icache flush stub.
+ //
+ // Since we cannot flush the cache when this stub is generated,
+ // it must be generated first, and just to be sure, we do extra
+ // work to allow a check that these instructions got executed.
+ //
+ // The flush stub has three parameters (see flush_icache_stub_t).
+ //
+ // addr - Start address, must be aligned at log2_line_size
+ // lines - Number of line_size icache lines to flush
+ // magic - Magic number copied to result register to make sure
+ // the stub executed properly
+ //
+ // A template for generate_icache_flush is
+ //
+ // #define __ _masm->
+ //
+ // void ICacheStubGenerator::generate_icache_flush(
+ // ICache::flush_icache_stub_t* flush_icache_stub
+ // ) {
+ // StubCodeMark mark(this, "ICache", "flush_icache_stub");
+ //
+ // address start = __ pc();
+ //
+ // // emit flush stub asm code
+ //
+ // // Must be set here so StubCodeMark destructor can call the flush stub.
+ // *flush_icache_stub = (ICache::flush_icache_stub_t)start;
+ // };
+ //
+ // #undef __
+ //
+ // The first use of flush_icache_stub must apply it to itself. The
+ // StubCodeMark destructor in generate_icache_flush will call Assembler::flush,
+ // which in turn will call invalidate_range (see asm/assembler.cpp), which
+ // in turn will call the flush stub *before* generate_icache_flush returns.
+ // The usual method of having generate_icache_flush return the address of the
+ // stub to its caller, which would then, e.g., store that address in
+ // flush_icache_stub, won't work. generate_icache_flush must itself set
+ // flush_icache_stub to the address of the stub it generates before
+ // the StubCodeMark destructor is invoked.
+
+ void generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub);
+};
diff --git a/src/share/vm/runtime/init.cpp b/src/share/vm/runtime/init.cpp
new file mode 100644
index 000000000..b93099ecb
--- /dev/null
+++ b/src/share/vm/runtime/init.cpp
@@ -0,0 +1,160 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_init.cpp.incl"
+
+// Initialization done by VM thread in vm_init_globals()
+void check_ThreadShadow();
+void check_basic_types();
+void eventlog_init();
+void mutex_init();
+void chunkpool_init();
+void perfMemory_init();
+
+// Initialization done by Java thread in init_globals()
+void management_init();
+void vtune_init();
+void bytecodes_init();
+void classLoader_init();
+void codeCache_init();
+void VM_Version_init();
+void JDK_Version_init();
+void stubRoutines_init1();
+jint universe_init(); // dependent on codeCache_init and stubRoutines_init
+void interpreter_init(); // before any methods loaded
+void invocationCounter_init(); // before any methods loaded
+void marksweep_init();
+void accessFlags_init();
+void templateTable_init();
+void InterfaceSupport_init();
+void universe2_init(); // dependent on codeCache_init and stubRoutines_init
+void referenceProcessor_init();
+void jni_handles_init();
+void vmStructs_init();
+
+void vtableStubs_init();
+void InlineCacheBuffer_init();
+void compilerOracle_init();
+void compilationPolicy_init();
+
+
+// Initialization after compiler initialization
+bool universe_post_init(); // must happen after compiler_init
+void javaClasses_init(); // must happen after vtable initialization
+void stubRoutines_init2(); // note: StubRoutines need 2-phase init
+
+// Do not disable thread-local-storage, as it is important for some
+// JNI/JVM/JVMTI functions and signal handlers to work properly
+// during VM shutdown
+void perfMemory_exit();
+void ostream_exit();
+
+void vm_init_globals() {
+ check_ThreadShadow();
+ check_basic_types();
+ eventlog_init();
+ mutex_init();
+ chunkpool_init();
+ perfMemory_init();
+}
+
+
+jint init_globals() {
+ HandleMark hm;
+ management_init();
+ vtune_init();
+ bytecodes_init();
+ classLoader_init();
+ codeCache_init();
+ VM_Version_init();
+ JDK_Version_init();
+ stubRoutines_init1();
+ jint status = universe_init(); // dependent on codeCache_init and stubRoutines_init
+ if (status != JNI_OK)
+ return status;
+
+ interpreter_init(); // before any methods loaded
+ invocationCounter_init(); // before any methods loaded
+ marksweep_init();
+ accessFlags_init();
+ templateTable_init();
+ InterfaceSupport_init();
+ SharedRuntime::generate_stubs();
+ universe2_init(); // dependent on codeCache_init and stubRoutines_init
+ referenceProcessor_init();
+ jni_handles_init();
+#ifndef VM_STRUCTS_KERNEL
+ vmStructs_init();
+#endif // VM_STRUCTS_KERNEL
+
+ vtableStubs_init();
+ InlineCacheBuffer_init();
+ compilerOracle_init();
+ compilationPolicy_init();
+ VMRegImpl::set_regName();
+
+ if (!universe_post_init()) {
+ return JNI_ERR;
+ }
+ javaClasses_init(); // must happen after vtable initialization
+ stubRoutines_init2(); // note: StubRoutines need 2-phase init
+
+ // Although we'd like to, we can't easily do a heap verify
+ // here because the main thread isn't yet a JavaThread, so
+ // its TLAB may not be made parseable from the usual interfaces.
+ if (VerifyBeforeGC && !UseTLAB &&
+ Universe::heap()->total_collections() >= VerifyGCStartAt) {
+ Universe::heap()->prepare_for_verify();
+ Universe::verify(); // make sure we're starting with a clean slate
+ }
+
+ return JNI_OK;
+}
+
+
+void exit_globals() {
+ static bool destructorsCalled = false;
+ if (!destructorsCalled) {
+ destructorsCalled = true;
+ perfMemory_exit();
+ if (PrintSafepointStatistics) {
+ // Print the collected safepoint statistics.
+ SafepointSynchronize::print_stat_on_exit();
+ }
+ ostream_exit();
+ }
+}
+
+
+static bool _init_completed = false;
+
+bool is_init_completed() {
+ return _init_completed;
+}
+
+
+void set_init_completed() {
+ _init_completed = true;
+}
diff --git a/src/share/vm/runtime/init.hpp b/src/share/vm/runtime/init.hpp
new file mode 100644
index 000000000..f76be7765
--- /dev/null
+++ b/src/share/vm/runtime/init.hpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright 1997-2004 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// init_globals replaces C++ global objects so we can use the standard linker
+// to link Delta (which is at least twice as fast as using the GNU C++ linker).
+// Also, init.c gives explicit control over the sequence of initialization.
+
+// Programming convention: instead of using a global object (e,g, "Foo foo;"),
+// use "Foo* foo;", create a function init_foo() in foo.c, and add a call
+// to init_foo in init.cpp.
+
+jint init_globals(); // call constructors at startup (main Java thread)
+void vm_init_globals(); // call constructors at startup (VM thread)
+void exit_globals(); // call destructors before exit
+
+bool is_init_completed(); // returns true when bootstrapping has completed
+void set_init_completed(); // set basic init to completed
diff --git a/src/share/vm/runtime/interfaceSupport.cpp b/src/share/vm/runtime/interfaceSupport.cpp
new file mode 100644
index 000000000..9a6267c4e
--- /dev/null
+++ b/src/share/vm/runtime/interfaceSupport.cpp
@@ -0,0 +1,269 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_interfaceSupport.cpp.incl"
+
+
+// Implementation of InterfaceSupport
+
+#ifdef ASSERT
+
+long InterfaceSupport::_number_of_calls = 0;
+long InterfaceSupport::_scavenge_alot_counter = 1;
+long InterfaceSupport::_fullgc_alot_counter = 1;
+long InterfaceSupport::_fullgc_alot_invocation = 0;
+
+Histogram* RuntimeHistogram;
+
+RuntimeHistogramElement::RuntimeHistogramElement(const char* elementName) {
+ static volatile jint RuntimeHistogram_lock = 0;
+ _name = elementName;
+ uintx count = 0;
+
+ while (Atomic::cmpxchg(1, &RuntimeHistogram_lock, 0) != 0) {
+ while (OrderAccess::load_acquire(&RuntimeHistogram_lock) != 0) {
+ count +=1;
+ if ( (WarnOnStalledSpinLock > 0)
+ && (count % WarnOnStalledSpinLock == 0)) {
+ warning("RuntimeHistogram_lock seems to be stalled");
+ }
+ }
+ }
+
+ if (RuntimeHistogram == NULL) {
+ RuntimeHistogram = new Histogram("VM Runtime Call Counts",200);
+ }
+
+ RuntimeHistogram->add_element(this);
+ Atomic::dec(&RuntimeHistogram_lock);
+}
+
+void InterfaceSupport::trace(const char* result_type, const char* header) {
+ tty->print_cr("%6d %s", _number_of_calls, header);
+}
+
+void InterfaceSupport::gc_alot() {
+ Thread *thread = Thread::current();
+ if (thread->is_VM_thread()) return; // Avoid concurrent calls
+ // Check for new, not quite initialized thread. A thread in new mode cannot initiate a GC.
+ JavaThread *current_thread = (JavaThread *)thread;
+ if (current_thread->active_handles() == NULL) return;
+
+ if (is_init_completed()) {
+
+ if (++_fullgc_alot_invocation < FullGCALotStart) {
+ return;
+ }
+
+ // Use this line if you want to block at a specific point,
+ // e.g. one number_of_calls/scavenge/gc before you got into problems
+ if (FullGCALot) _fullgc_alot_counter--;
+
+ // Check if we should force a full gc
+ if (_fullgc_alot_counter == 0) {
+ // Release dummy so objects are forced to move
+ if (!Universe::release_fullgc_alot_dummy()) {
+ warning("FullGCALot: Unable to release more dummies at bottom of heap");
+ }
+ HandleMark hm(thread);
+ Universe::heap()->collect(GCCause::_full_gc_alot);
+ unsigned int invocations = Universe::heap()->total_full_collections();
+ // Compute new interval
+ if (FullGCALotInterval > 1) {
+ _fullgc_alot_counter = 1+(long)((double)FullGCALotInterval*os::random()/(max_jint+1.0));
+ if (PrintGCDetails && Verbose) {
+ tty->print_cr("Full gc no: %u\tInterval: %d", invocations,
+ _fullgc_alot_counter);
+ }
+ } else {
+ _fullgc_alot_counter = 1;
+ }
+ // Print progress message
+ if (invocations % 100 == 0) {
+ if (PrintGCDetails && Verbose) tty->print_cr("Full gc no: %u", invocations);
+ }
+ } else {
+ if (ScavengeALot) _scavenge_alot_counter--;
+ // Check if we should force a scavenge
+ if (_scavenge_alot_counter == 0) {
+ HandleMark hm(thread);
+ Universe::heap()->collect(GCCause::_scavenge_alot);
+ unsigned int invocations = Universe::heap()->total_collections() - Universe::heap()->total_full_collections();
+ // Compute new interval
+ if (ScavengeALotInterval > 1) {
+ _scavenge_alot_counter = 1+(long)((double)ScavengeALotInterval*os::random()/(max_jint+1.0));
+ if (PrintGCDetails && Verbose) {
+ tty->print_cr("Scavenge no: %u\tInterval: %d", invocations,
+ _scavenge_alot_counter);
+ }
+ } else {
+ _scavenge_alot_counter = 1;
+ }
+ // Print progress message
+ if (invocations % 1000 == 0) {
+ if (PrintGCDetails && Verbose) tty->print_cr("Scavenge no: %u", invocations);
+ }
+ }
+ }
+ }
+}
+
+
+vframe* vframe_array[50];
+int walk_stack_counter = 0;
+
+void InterfaceSupport::walk_stack_from(vframe* start_vf) {
+ // walk
+ int i = 0;
+ for (vframe* f = start_vf; f; f = f->sender() ) {
+ if (i < 50) vframe_array[i++] = f;
+ }
+}
+
+
+void InterfaceSupport::walk_stack() {
+ JavaThread* thread = JavaThread::current();
+ walk_stack_counter++;
+ if (!thread->has_last_Java_frame()) return;
+ ResourceMark rm(thread);
+ RegisterMap reg_map(thread);
+ walk_stack_from(thread->last_java_vframe(&reg_map));
+}
+
+
+# ifdef ENABLE_ZAP_DEAD_LOCALS
+
+static int zap_traversals = 0;
+
+void InterfaceSupport::zap_dead_locals_old() {
+ JavaThread* thread = JavaThread::current();
+ if (zap_traversals == -1) // edit constant for debugging
+ warning("I am here");
+ int zap_frame_count = 0; // count frames to help debugging
+ for (StackFrameStream sfs(thread); !sfs.is_done(); sfs.next()) {
+ sfs.current()->zap_dead_locals(thread, sfs.register_map());
+ ++zap_frame_count;
+ }
+ ++zap_traversals;
+}
+
+# endif
+
+
+int deoptimizeAllCounter = 0;
+int zombieAllCounter = 0;
+
+
+void InterfaceSupport::zombieAll() {
+ if (is_init_completed() && zombieAllCounter > ZombieALotInterval) {
+ zombieAllCounter = 0;
+ VM_ZombieAll op;
+ VMThread::execute(&op);
+ } else {
+ zombieAllCounter++;
+ }
+}
+
+void InterfaceSupport::deoptimizeAll() {
+ if (is_init_completed() ) {
+ if (DeoptimizeALot && deoptimizeAllCounter > DeoptimizeALotInterval) {
+ deoptimizeAllCounter = 0;
+ VM_DeoptimizeAll op;
+ VMThread::execute(&op);
+ } else if (DeoptimizeRandom && (deoptimizeAllCounter & 0x1f) == (os::random() & 0x1f)) {
+ VM_DeoptimizeAll op;
+ VMThread::execute(&op);
+ }
+ }
+ deoptimizeAllCounter++;
+}
+
+
+void InterfaceSupport::stress_derived_pointers() {
+#ifdef COMPILER2
+ JavaThread *thread = JavaThread::current();
+ if (!is_init_completed()) return;
+ ResourceMark rm(thread);
+ bool found = false;
+ for (StackFrameStream sfs(thread); !sfs.is_done() && !found; sfs.next()) {
+ CodeBlob* cb = sfs.current()->cb();
+ if (cb != NULL && cb->oop_maps() ) {
+ // Find oopmap for current method
+ OopMap* map = cb->oop_map_for_return_address(sfs.current()->pc());
+ assert(map != NULL, "no oopmap found for pc");
+ found = map->has_derived_pointer();
+ }
+ }
+ if (found) {
+ // $$$ Not sure what to do here.
+ /*
+ Scavenge::invoke(0);
+ */
+ }
+#endif
+}
+
+
+void InterfaceSupport::verify_stack() {
+ JavaThread* thread = JavaThread::current();
+ ResourceMark rm(thread);
+ // disabled because it throws warnings that oop maps should only be accessed
+ // in VM thread or during debugging
+
+ if (!thread->has_pending_exception()) {
+ // verification does not work if there are pending exceptions
+ StackFrameStream sfs(thread);
+ CodeBlob* cb = sfs.current()->cb();
+ // In case of exceptions we might not have a runtime_stub on
+ // top of stack, hence, all callee-saved registers are not going
+ // to be setup correctly, hence, we cannot do stack verify
+ if (cb != NULL && !(cb->is_runtime_stub() || cb->is_uncommon_trap_stub())) return;
+
+ for (; !sfs.is_done(); sfs.next()) {
+ sfs.current()->verify(sfs.register_map());
+ }
+ }
+}
+
+
+void InterfaceSupport::verify_last_frame() {
+ JavaThread* thread = JavaThread::current();
+ ResourceMark rm(thread);
+ RegisterMap reg_map(thread);
+ frame fr = thread->last_frame();
+ fr.verify(&reg_map);
+}
+
+
+#endif // ASSERT
+
+
+void InterfaceSupport_init() {
+#ifdef ASSERT
+ if (ScavengeALot || FullGCALot) {
+ srand(ScavengeALotInterval * FullGCALotInterval);
+ }
+#endif
+}
diff --git a/src/share/vm/runtime/interfaceSupport.hpp b/src/share/vm/runtime/interfaceSupport.hpp
new file mode 100644
index 000000000..0d5db3039
--- /dev/null
+++ b/src/share/vm/runtime/interfaceSupport.hpp
@@ -0,0 +1,568 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Wrapper for all entry points to the virtual machine.
+// The HandleMarkCleaner is a faster version of HandleMark.
+// It relies on the fact that there is a HandleMark further
+// down the stack (in JavaCalls::call_helper), and just resets
+// to the saved values in that HandleMark.
+
+class HandleMarkCleaner: public StackObj {
+ private:
+ Thread* _thread;
+ public:
+ HandleMarkCleaner(Thread* thread) {
+ _thread = thread;
+ _thread->last_handle_mark()->push();
+ }
+ ~HandleMarkCleaner() {
+ _thread->last_handle_mark()->pop_and_restore();
+ }
+
+ private:
+ inline void* operator new(size_t size, void* ptr) {
+ return ptr;
+ }
+};
+
+// InterfaceSupport provides functionality used by the __LEAF and __ENTRY
+// macros. These macros are used to guard entry points into the VM and
+// perform checks upon leave of the VM.
+
+
+class InterfaceSupport: AllStatic {
+# ifdef ASSERT
+ public:
+ static long _scavenge_alot_counter;
+ static long _fullgc_alot_counter;
+ static long _number_of_calls;
+ static long _fullgc_alot_invocation;
+
+ // tracing
+ static void trace(const char* result_type, const char* header);
+
+ // Helper methods used to implement +ScavengeALot and +FullGCALot
+ static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
+ static void gc_alot();
+
+ static void walk_stack_from(vframe* start_vf);
+ static void walk_stack();
+
+# ifdef ENABLE_ZAP_DEAD_LOCALS
+ static void zap_dead_locals_old();
+# endif
+
+ static void zombieAll();
+ static void deoptimizeAll();
+ static void stress_derived_pointers();
+ static void verify_stack();
+ static void verify_last_frame();
+# endif
+
+ public:
+ // OS dependent stuff
+ #include "incls/_interfaceSupport_pd.hpp.incl"
+};
+
+
+// Basic class for all thread transition classes.
+
+class ThreadStateTransition : public StackObj {
+ protected:
+ JavaThread* _thread;
+ public:
+ ThreadStateTransition(JavaThread *thread) {
+ _thread = thread;
+ assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
+ }
+
+ // Change threadstate in a manner, so safepoint can detect changes.
+ // Time-critical: called on exit from every runtime routine
+ static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
+ assert(from != _thread_in_Java, "use transition_from_java");
+ assert(from != _thread_in_native, "use transition_from_native");
+ assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
+ assert(thread->thread_state() == from, "coming from wrong thread state");
+ // Change to transition state (assumes total store ordering! -Urs)
+ thread->set_thread_state((JavaThreadState)(from + 1));
+
+ // Make sure new state is seen by VM thread
+ if (os::is_MP()) {
+ if (UseMembar) {
+ // Force a fence between the write above and read below
+ OrderAccess::fence();
+ } else {
+ // store to serialize page so VM thread can do pseudo remote membar
+ os::write_memory_serialize_page(thread);
+ }
+ }
+
+ if (SafepointSynchronize::do_call_back()) {
+ SafepointSynchronize::block(thread);
+ }
+ thread->set_thread_state(to);
+
+ CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
+ }
+
+ // transition_and_fence must be used on any thread state transition
+ // where there might not be a Java call stub on the stack, in
+ // particular on Windows where the Structured Exception Handler is
+ // set up in the call stub. os::write_memory_serialize_page() can
+ // fault and we can't recover from it on Windows without a SEH in
+ // place.
+ static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
+ assert(thread->thread_state() == from, "coming from wrong thread state");
+ assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
+ // Change to transition state (assumes total store ordering! -Urs)
+ thread->set_thread_state((JavaThreadState)(from + 1));
+
+ // Make sure new state is seen by VM thread
+ if (os::is_MP()) {
+ if (UseMembar) {
+ // Force a fence between the write above and read below
+ OrderAccess::fence();
+ } else {
+ // Must use this rather than serialization page in particular on Windows
+ InterfaceSupport::serialize_memory(thread);
+ }
+ }
+
+ if (SafepointSynchronize::do_call_back()) {
+ SafepointSynchronize::block(thread);
+ }
+ thread->set_thread_state(to);
+
+ CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
+ }
+
+ // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
+ // never block on entry to the VM. This will break the code, since e.g. preserve arguments
+ // have not been setup.
+ static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
+ assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
+ thread->set_thread_state(to);
+ }
+
+ static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
+ assert((to & 1) == 0, "odd numbers are transitions states");
+ assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
+ // Change to transition state (assumes total store ordering! -Urs)
+ thread->set_thread_state(_thread_in_native_trans);
+
+ // Make sure new state is seen by GC thread
+ if (os::is_MP()) {
+ if (UseMembar) {
+ // Force a fence between the write above and read below
+ OrderAccess::fence();
+ } else {
+ // Must use this rather than serialization page in particular on Windows
+ InterfaceSupport::serialize_memory(thread);
+ }
+ }
+
+ // We never install asynchronous exceptions when coming (back) in
+ // to the runtime from native code because the runtime is not set
+ // up to handle exceptions floating around at arbitrary points.
+ if (SafepointSynchronize::do_call_back() || thread->is_suspend_after_native()) {
+ JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
+
+ // Clear unhandled oops anywhere where we could block, even if we don't.
+ CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
+ }
+
+ thread->set_thread_state(to);
+ }
+ protected:
+ void trans(JavaThreadState from, JavaThreadState to) { transition(_thread, from, to); }
+ void trans_from_java(JavaThreadState to) { transition_from_java(_thread, to); }
+ void trans_from_native(JavaThreadState to) { transition_from_native(_thread, to); }
+ void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
+};
+
+
+class ThreadInVMfromJava : public ThreadStateTransition {
+ public:
+ ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
+ trans_from_java(_thread_in_vm);
+ }
+ ~ThreadInVMfromJava() {
+ trans(_thread_in_vm, _thread_in_Java);
+ // Check for pending. async. exceptions or suspends.
+ if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
+ }
+};
+
+
+class ThreadInVMfromUnknown {
+ private:
+ JavaThread* _thread;
+ public:
+ ThreadInVMfromUnknown() : _thread(NULL) {
+ Thread* t = Thread::current();
+ if (t->is_Java_thread()) {
+ JavaThread* t2 = (JavaThread*) t;
+ if (t2->thread_state() == _thread_in_native) {
+ _thread = t2;
+ ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
+ // Used to have a HandleMarkCleaner but that is dangerous as
+ // it could free a handle in our (indirect, nested) caller.
+ // We expect any handles will be short lived and figure we
+ // don't need an actual HandleMark.
+ }
+ }
+ }
+ ~ThreadInVMfromUnknown() {
+ if (_thread) {
+ ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
+ }
+ }
+};
+
+
+class ThreadInVMfromNative : public ThreadStateTransition {
+ public:
+ ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
+ trans_from_native(_thread_in_vm);
+ }
+ ~ThreadInVMfromNative() {
+ trans_and_fence(_thread_in_vm, _thread_in_native);
+ }
+};
+
+
+class ThreadToNativeFromVM : public ThreadStateTransition {
+ public:
+ ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
+ // We are leaving the VM at this point and going directly to native code.
+ // Block, if we are in the middle of a safepoint synchronization.
+ assert(!thread->owns_locks(), "must release all locks when leaving VM");
+ thread->frame_anchor()->make_walkable(thread);
+ trans_and_fence(_thread_in_vm, _thread_in_native);
+ // Check for pending. async. exceptions or suspends.
+ if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
+ }
+
+ ~ThreadToNativeFromVM() {
+ trans_from_native(_thread_in_vm);
+ // We don't need to clear_walkable because it will happen automagically when we return to java
+ }
+};
+
+
+class ThreadBlockInVM : public ThreadStateTransition {
+ public:
+ ThreadBlockInVM(JavaThread *thread)
+ : ThreadStateTransition(thread) {
+ // Once we are blocked vm expects stack to be walkable
+ thread->frame_anchor()->make_walkable(thread);
+ trans_and_fence(_thread_in_vm, _thread_blocked);
+ }
+ ~ThreadBlockInVM() {
+ trans_and_fence(_thread_blocked, _thread_in_vm);
+ // We don't need to clear_walkable because it will happen automagically when we return to java
+ }
+};
+
+
+// This special transition class is only used to prevent asynchronous exceptions
+// from being installed on vm exit in situations where we can't tolerate them.
+// See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
+class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
+ public:
+ ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
+ trans_from_java(_thread_in_vm);
+ }
+ ~ThreadInVMfromJavaNoAsyncException() {
+ trans(_thread_in_vm, _thread_in_Java);
+ // NOTE: We do not check for pending. async. exceptions.
+ // If we did and moved the pending async exception over into the
+ // pending exception field, we would need to deopt (currently C2
+ // only). However, to do so would require that we transition back
+ // to the _thread_in_vm state. Instead we postpone the handling of
+ // the async exception.
+
+ // Check for pending. suspends only.
+ if (_thread->has_special_runtime_exit_condition())
+ _thread->handle_special_runtime_exit_condition(false);
+ }
+};
+
+// Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
+// Can be used to verify properties on enter/exit of the VM.
+
+#ifdef ASSERT
+class VMEntryWrapper {
+ public:
+ VMEntryWrapper() {
+ if (VerifyLastFrame) {
+ InterfaceSupport::verify_last_frame();
+ }
+ }
+
+ ~VMEntryWrapper() {
+ InterfaceSupport::check_gc_alot();
+ if (WalkStackALot) {
+ InterfaceSupport::walk_stack();
+ }
+#ifdef ENABLE_ZAP_DEAD_LOCALS
+ if (ZapDeadLocalsOld) {
+ InterfaceSupport::zap_dead_locals_old();
+ }
+#endif
+#ifdef COMPILER2
+ // This option is not used by Compiler 1
+ if (StressDerivedPointers) {
+ InterfaceSupport::stress_derived_pointers();
+ }
+#endif
+ if (DeoptimizeALot || DeoptimizeRandom) {
+ InterfaceSupport::deoptimizeAll();
+ }
+ if (ZombieALot) {
+ InterfaceSupport::zombieAll();
+ }
+ // do verification AFTER potential deoptimization
+ if (VerifyStack) {
+ InterfaceSupport::verify_stack();
+ }
+
+ }
+};
+
+
+class VMNativeEntryWrapper {
+ public:
+ VMNativeEntryWrapper() {
+ if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
+ }
+
+ ~VMNativeEntryWrapper() {
+ if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
+ }
+};
+
+#endif
+
+
+// VM-internal runtime interface support
+
+#ifdef ASSERT
+
+class RuntimeHistogramElement : public HistogramElement {
+ public:
+ RuntimeHistogramElement(const char* name);
+};
+
+#define TRACE_CALL(result_type, header) \
+ InterfaceSupport::_number_of_calls++; \
+ if (TraceRuntimeCalls) \
+ InterfaceSupport::trace(#result_type, #header); \
+ if (CountRuntimeCalls) { \
+ static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
+ if (e != NULL) e->increment_count(); \
+ }
+#else
+#define TRACE_CALL(result_type, header) \
+ /* do nothing */
+#endif
+
+
+// LEAF routines do not lock, GC or throw exceptions
+
+#define __LEAF(result_type, header) \
+ TRACE_CALL(result_type, header) \
+ debug_only(NoHandleMark __hm;) \
+ /* begin of body */
+
+
+// ENTRY routines may lock, GC and throw exceptions
+
+#define __ENTRY(result_type, header, thread) \
+ TRACE_CALL(result_type, header) \
+ HandleMarkCleaner __hm(thread); \
+ Thread* THREAD = thread; \
+ /* begin of body */
+
+
+// QUICK_ENTRY routines behave like ENTRY but without a handle mark
+
+#define __QUICK_ENTRY(result_type, header, thread) \
+ TRACE_CALL(result_type, header) \
+ debug_only(NoHandleMark __hm;) \
+ Thread* THREAD = thread; \
+ /* begin of body */
+
+
+// Definitions for IRT (Interpreter Runtime)
+// (thread is an argument passed in to all these routines)
+
+#define IRT_ENTRY(result_type, header) \
+ result_type header { \
+ ThreadInVMfromJava __tiv(thread); \
+ __ENTRY(result_type, header, thread) \
+ debug_only(VMEntryWrapper __vew;)
+
+
+#define IRT_LEAF(result_type, header) \
+ result_type header { \
+ __LEAF(result_type, header) \
+ debug_only(No_Safepoint_Verifier __nspv(true);)
+
+
+#define IRT_ENTRY_NO_ASYNC(result_type, header) \
+ result_type header { \
+ ThreadInVMfromJavaNoAsyncException __tiv(thread); \
+ __ENTRY(result_type, header, thread) \
+ debug_only(VMEntryWrapper __vew;)
+
+// Another special case for nmethod_entry_point so the nmethod that the
+// interpreter is about to branch to doesn't get flushed before as we
+// branch to it's interpreter_entry_point. Skip stress testing here too.
+// Also we don't allow async exceptions because it is just too painful.
+#define IRT_ENTRY_FOR_NMETHOD(result_type, header) \
+ result_type header { \
+ nmethodLocker _nmlock(nm); \
+ ThreadInVMfromJavaNoAsyncException __tiv(thread); \
+ __ENTRY(result_type, header, thread)
+
+#define IRT_END }
+
+
+// Definitions for JRT (Java (Compiler/Shared) Runtime)
+
+#define JRT_ENTRY(result_type, header) \
+ result_type header { \
+ ThreadInVMfromJava __tiv(thread); \
+ __ENTRY(result_type, header, thread) \
+ debug_only(VMEntryWrapper __vew;)
+
+
+#define JRT_LEAF(result_type, header) \
+ result_type header { \
+ __LEAF(result_type, header) \
+ debug_only(JRT_Leaf_Verifier __jlv;)
+
+
+#define JRT_ENTRY_NO_ASYNC(result_type, header) \
+ result_type header { \
+ ThreadInVMfromJavaNoAsyncException __tiv(thread); \
+ __ENTRY(result_type, header, thread) \
+ debug_only(VMEntryWrapper __vew;)
+
+// Same as JRT Entry but allows for return value after the safepoint
+// to get back into Java from the VM
+#define JRT_BLOCK_ENTRY(result_type, header) \
+ result_type header { \
+ TRACE_CALL(result_type, header) \
+ HandleMarkCleaner __hm(thread);
+
+#define JRT_BLOCK \
+ { \
+ ThreadInVMfromJava __tiv(thread); \
+ Thread* THREAD = thread; \
+ debug_only(VMEntryWrapper __vew;)
+
+#define JRT_BLOCK_END }
+
+#define JRT_END }
+
+// Definitions for JNI
+
+#define JNI_ENTRY(result_type, header) \
+ JNI_ENTRY_NO_PRESERVE(result_type, header) \
+ WeakPreserveExceptionMark __wem(thread);
+
+#define JNI_ENTRY_NO_PRESERVE(result_type, header) \
+extern "C" { \
+ result_type JNICALL header { \
+ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
+ assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
+ ThreadInVMfromNative __tiv(thread); \
+ debug_only(VMNativeEntryWrapper __vew;) \
+ __ENTRY(result_type, header, thread)
+
+
+// Ensure that the VMNativeEntryWrapper constructor, which can cause
+// a GC, is called outside the NoHandleMark (set via __QUICK_ENTRY).
+#define JNI_QUICK_ENTRY(result_type, header) \
+extern "C" { \
+ result_type JNICALL header { \
+ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
+ assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
+ ThreadInVMfromNative __tiv(thread); \
+ debug_only(VMNativeEntryWrapper __vew;) \
+ __QUICK_ENTRY(result_type, header, thread)
+
+
+#define JNI_LEAF(result_type, header) \
+extern "C" { \
+ result_type JNICALL header { \
+ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
+ assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
+ __LEAF(result_type, header)
+
+
+// Close the routine and the extern "C"
+#define JNI_END } }
+
+
+
+// Definitions for JVM
+
+#define JVM_ENTRY(result_type, header) \
+extern "C" { \
+ result_type JNICALL header { \
+ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
+ ThreadInVMfromNative __tiv(thread); \
+ debug_only(VMNativeEntryWrapper __vew;) \
+ __ENTRY(result_type, header, thread)
+
+
+#define JVM_ENTRY_NO_ENV(result_type, header) \
+extern "C" { \
+ result_type JNICALL header { \
+ JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); \
+ ThreadInVMfromNative __tiv(thread); \
+ debug_only(VMNativeEntryWrapper __vew;) \
+ __ENTRY(result_type, header, thread)
+
+
+#define JVM_QUICK_ENTRY(result_type, header) \
+extern "C" { \
+ result_type JNICALL header { \
+ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
+ ThreadInVMfromNative __tiv(thread); \
+ debug_only(VMNativeEntryWrapper __vew;) \
+ __QUICK_ENTRY(result_type, header, thread)
+
+
+#define JVM_LEAF(result_type, header) \
+extern "C" { \
+ result_type JNICALL header { \
+ VM_Exit::block_if_vm_exited(); \
+ __LEAF(result_type, header)
+
+
+#define JVM_END } }
diff --git a/src/share/vm/runtime/java.cpp b/src/share/vm/runtime/java.cpp
new file mode 100644
index 000000000..f7ed8939e
--- /dev/null
+++ b/src/share/vm/runtime/java.cpp
@@ -0,0 +1,593 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_java.cpp.incl"
+
+HS_DTRACE_PROBE_DECL(hotspot, vm__shutdown);
+
+#ifndef PRODUCT
+
+// Statistics printing (method invocation histogram)
+
+GrowableArray<methodOop>* collected_invoked_methods;
+
+void collect_invoked_methods(methodOop m) {
+ if (m->invocation_count() + m->compiled_invocation_count() >= 1 ) {
+ collected_invoked_methods->push(m);
+ }
+}
+
+
+GrowableArray<methodOop>* collected_profiled_methods;
+
+void collect_profiled_methods(methodOop m) {
+ methodHandle mh(Thread::current(), m);
+ if ((m->method_data() != NULL) &&
+ (PrintMethodData || CompilerOracle::should_print(mh))) {
+ collected_profiled_methods->push(m);
+ }
+}
+
+
+int compare_methods(methodOop* a, methodOop* b) {
+ // %%% there can be 32-bit overflow here
+ return ((*b)->invocation_count() + (*b)->compiled_invocation_count())
+ - ((*a)->invocation_count() + (*a)->compiled_invocation_count());
+}
+
+
+void print_method_invocation_histogram() {
+ ResourceMark rm;
+ HandleMark hm;
+ collected_invoked_methods = new GrowableArray<methodOop>(1024);
+ SystemDictionary::methods_do(collect_invoked_methods);
+ collected_invoked_methods->sort(&compare_methods);
+ //
+ tty->cr();
+ tty->print_cr("Histogram Over MethodOop Invocation Counters (cutoff = %d):", MethodHistogramCutoff);
+ tty->cr();
+ tty->print_cr("____Count_(I+C)____Method________________________Module_________________");
+ unsigned total = 0, int_total = 0, comp_total = 0, static_total = 0, final_total = 0,
+ synch_total = 0, nativ_total = 0, acces_total = 0;
+ for (int index = 0; index < collected_invoked_methods->length(); index++) {
+ methodOop m = collected_invoked_methods->at(index);
+ int c = m->invocation_count() + m->compiled_invocation_count();
+ if (c >= MethodHistogramCutoff) m->print_invocation_count();
+ int_total += m->invocation_count();
+ comp_total += m->compiled_invocation_count();
+ if (m->is_final()) final_total += c;
+ if (m->is_static()) static_total += c;
+ if (m->is_synchronized()) synch_total += c;
+ if (m->is_native()) nativ_total += c;
+ if (m->is_accessor()) acces_total += c;
+ }
+ tty->cr();
+ total = int_total + comp_total;
+ tty->print_cr("Invocations summary:");
+ tty->print_cr("\t%9d (%4.1f%%) interpreted", int_total, 100.0 * int_total / total);
+ tty->print_cr("\t%9d (%4.1f%%) compiled", comp_total, 100.0 * comp_total / total);
+ tty->print_cr("\t%9d (100%%) total", total);
+ tty->print_cr("\t%9d (%4.1f%%) synchronized", synch_total, 100.0 * synch_total / total);
+ tty->print_cr("\t%9d (%4.1f%%) final", final_total, 100.0 * final_total / total);
+ tty->print_cr("\t%9d (%4.1f%%) static", static_total, 100.0 * static_total / total);
+ tty->print_cr("\t%9d (%4.1f%%) native", nativ_total, 100.0 * nativ_total / total);
+ tty->print_cr("\t%9d (%4.1f%%) accessor", acces_total, 100.0 * acces_total / total);
+ tty->cr();
+ SharedRuntime::print_call_statistics(comp_total);
+}
+
+void print_method_profiling_data() {
+ ResourceMark rm;
+ HandleMark hm;
+ collected_profiled_methods = new GrowableArray<methodOop>(1024);
+ SystemDictionary::methods_do(collect_profiled_methods);
+ collected_profiled_methods->sort(&compare_methods);
+
+ int count = collected_profiled_methods->length();
+ if (count > 0) {
+ for (int index = 0; index < count; index++) {
+ methodOop m = collected_profiled_methods->at(index);
+ ttyLocker ttyl;
+ tty->print_cr("------------------------------------------------------------------------");
+ //m->print_name(tty);
+ m->print_invocation_count();
+ tty->cr();
+ m->print_codes();
+ }
+ tty->print_cr("------------------------------------------------------------------------");
+ }
+}
+
+void print_bytecode_count() {
+ if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
+ tty->print_cr("[BytecodeCounter::counter_value = %d]", BytecodeCounter::counter_value());
+ }
+}
+
+AllocStats alloc_stats;
+
+
+
+// General statistics printing (profiling ...)
+
+void print_statistics() {
+
+#ifdef ASSERT
+
+ if (CountRuntimeCalls) {
+ extern Histogram *RuntimeHistogram;
+ RuntimeHistogram->print();
+ }
+
+ if (CountJNICalls) {
+ extern Histogram *JNIHistogram;
+ JNIHistogram->print();
+ }
+
+ if (CountJVMCalls) {
+ extern Histogram *JVMHistogram;
+ JVMHistogram->print();
+ }
+
+#endif
+
+ if (MemProfiling) {
+ MemProfiler::disengage();
+ }
+
+ if (CITime) {
+ CompileBroker::print_times();
+ }
+
+#ifdef COMPILER1
+ if ((PrintC1Statistics || LogVMOutput || LogCompilation) && UseCompiler) {
+ FlagSetting fs(DisplayVMOutput, DisplayVMOutput && PrintC1Statistics);
+ Runtime1::print_statistics();
+ Deoptimization::print_statistics();
+ nmethod::print_statistics();
+ }
+#endif /* COMPILER1 */
+
+#ifdef COMPILER2
+ if ((PrintOptoStatistics || LogVMOutput || LogCompilation) && UseCompiler) {
+ FlagSetting fs(DisplayVMOutput, DisplayVMOutput && PrintOptoStatistics);
+ Compile::print_statistics();
+#ifndef COMPILER1
+ Deoptimization::print_statistics();
+ nmethod::print_statistics();
+#endif //COMPILER1
+ SharedRuntime::print_statistics();
+ os::print_statistics();
+ }
+
+ if (PrintLockStatistics || PrintPreciseBiasedLockingStatistics) {
+ OptoRuntime::print_named_counters();
+ }
+
+ if (TimeLivenessAnalysis) {
+ MethodLiveness::print_times();
+ }
+#ifdef ASSERT
+ if (CollectIndexSetStatistics) {
+ IndexSet::print_statistics();
+ }
+#endif // ASSERT
+#endif // COMPILER2
+ if (CountCompiledCalls) {
+ print_method_invocation_histogram();
+ }
+ if (ProfileInterpreter || Tier1UpdateMethodData) {
+ print_method_profiling_data();
+ }
+ if (TimeCompiler) {
+ COMPILER2_PRESENT(Compile::print_timers();)
+ }
+ if (TimeCompilationPolicy) {
+ CompilationPolicy::policy()->print_time();
+ }
+ if (TimeOopMap) {
+ GenerateOopMap::print_time();
+ }
+ if (ProfilerCheckIntervals) {
+ PeriodicTask::print_intervals();
+ }
+ if (PrintSymbolTableSizeHistogram) {
+ SymbolTable::print_histogram();
+ }
+ if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
+ BytecodeCounter::print();
+ }
+ if (PrintBytecodePairHistogram) {
+ BytecodePairHistogram::print();
+ }
+
+ if (PrintCodeCache) {
+ MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ CodeCache::print();
+ }
+
+ if (PrintCodeCache2) {
+ MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ CodeCache::print_internals();
+ }
+
+ if (PrintClassStatistics) {
+ SystemDictionary::print_class_statistics();
+ }
+ if (PrintMethodStatistics) {
+ SystemDictionary::print_method_statistics();
+ }
+
+ if (PrintVtableStats) {
+ klassVtable::print_statistics();
+ klassItable::print_statistics();
+ }
+ if (VerifyOops) {
+ tty->print_cr("+VerifyOops count: %d", StubRoutines::verify_oop_count());
+ }
+
+ print_bytecode_count();
+ if (WizardMode) {
+ tty->print("allocation stats: ");
+ alloc_stats.print();
+ tty->cr();
+ }
+
+ if (PrintSystemDictionaryAtExit) {
+ SystemDictionary::print();
+ }
+
+ if (PrintBiasedLockingStatistics) {
+ BiasedLocking::print_counters();
+ }
+
+#ifdef ENABLE_ZAP_DEAD_LOCALS
+#ifdef COMPILER2
+ if (ZapDeadCompiledLocals) {
+ tty->print_cr("Compile::CompiledZap_count = %d", Compile::CompiledZap_count);
+ tty->print_cr("OptoRuntime::ZapDeadCompiledLocals_count = %d", OptoRuntime::ZapDeadCompiledLocals_count);
+ }
+#endif // COMPILER2
+#endif // ENABLE_ZAP_DEAD_LOCALS
+}
+
+#else // PRODUCT MODE STATISTICS
+
+void print_statistics() {
+
+ if (CITime) {
+ CompileBroker::print_times();
+ }
+#ifdef COMPILER2
+ if (PrintPreciseBiasedLockingStatistics) {
+ OptoRuntime::print_named_counters();
+ }
+#endif
+ if (PrintBiasedLockingStatistics) {
+ BiasedLocking::print_counters();
+ }
+}
+
+#endif
+
+
+// Helper class for registering on_exit calls through JVM_OnExit
+
+extern "C" {
+ typedef void (*__exit_proc)(void);
+}
+
+class ExitProc : public CHeapObj {
+ private:
+ __exit_proc _proc;
+ // void (*_proc)(void);
+ ExitProc* _next;
+ public:
+ // ExitProc(void (*proc)(void)) {
+ ExitProc(__exit_proc proc) {
+ _proc = proc;
+ _next = NULL;
+ }
+ void evaluate() { _proc(); }
+ ExitProc* next() const { return _next; }
+ void set_next(ExitProc* next) { _next = next; }
+};
+
+
+// Linked list of registered on_exit procedures
+
+static ExitProc* exit_procs = NULL;
+
+
+extern "C" {
+ void register_on_exit_function(void (*func)(void)) {
+ ExitProc *entry = new ExitProc(func);
+ // Classic vm does not throw an exception in case the allocation failed,
+ if (entry != NULL) {
+ entry->set_next(exit_procs);
+ exit_procs = entry;
+ }
+ }
+}
+
+// Note: before_exit() can be executed only once, if more than one threads
+// are trying to shutdown the VM at the same time, only one thread
+// can run before_exit() and all other threads must wait.
+void before_exit(JavaThread * thread) {
+ #define BEFORE_EXIT_NOT_RUN 0
+ #define BEFORE_EXIT_RUNNING 1
+ #define BEFORE_EXIT_DONE 2
+ static jint volatile _before_exit_status = BEFORE_EXIT_NOT_RUN;
+
+ // Note: don't use a Mutex to guard the entire before_exit(), as
+ // JVMTI post_thread_end_event and post_vm_death_event will run native code.
+ // A CAS or OSMutex would work just fine but then we need to manipulate
+ // thread state for Safepoint. Here we use Monitor wait() and notify_all()
+ // for synchronization.
+ { MutexLocker ml(BeforeExit_lock);
+ switch (_before_exit_status) {
+ case BEFORE_EXIT_NOT_RUN:
+ _before_exit_status = BEFORE_EXIT_RUNNING;
+ break;
+ case BEFORE_EXIT_RUNNING:
+ while (_before_exit_status == BEFORE_EXIT_RUNNING) {
+ BeforeExit_lock->wait();
+ }
+ assert(_before_exit_status == BEFORE_EXIT_DONE, "invalid state");
+ return;
+ case BEFORE_EXIT_DONE:
+ return;
+ }
+ }
+
+ // The only difference between this and Win32's _onexit procs is that
+ // this version is invoked before any threads get killed.
+ ExitProc* current = exit_procs;
+ while (current != NULL) {
+ ExitProc* next = current->next();
+ current->evaluate();
+ delete current;
+ current = next;
+ }
+
+ // Hang forever on exit if we're reporting an error.
+ if (ShowMessageBoxOnError && is_error_reported()) {
+ os::infinite_sleep();
+ }
+
+ // Terminate watcher thread - must before disenrolling any periodic task
+ WatcherThread::stop();
+
+ // Print statistics gathered (profiling ...)
+ if (Arguments::has_profile()) {
+ FlatProfiler::disengage();
+ FlatProfiler::print(10);
+ }
+
+ // shut down the StatSampler task
+ StatSampler::disengage();
+ StatSampler::destroy();
+
+ // shut down the TimeMillisUpdateTask
+ if (CacheTimeMillis) {
+ TimeMillisUpdateTask::disengage();
+ }
+
+#ifndef SERIALGC
+ // stop CMS threads
+ if (UseConcMarkSweepGC) {
+ ConcurrentMarkSweepThread::stop();
+ }
+#endif // SERIALGC
+
+ // Print GC/heap related information.
+ if (PrintGCDetails) {
+ Universe::print();
+ AdaptiveSizePolicyOutput(0);
+ }
+
+
+ if (Arguments::has_alloc_profile()) {
+ HandleMark hm;
+ // Do one last collection to enumerate all the objects
+ // allocated since the last one.
+ Universe::heap()->collect(GCCause::_allocation_profiler);
+ AllocationProfiler::disengage();
+ AllocationProfiler::print(0);
+ }
+
+ if (PrintBytecodeHistogram) {
+ BytecodeHistogram::print();
+ }
+
+ if (JvmtiExport::should_post_thread_life()) {
+ JvmtiExport::post_thread_end(thread);
+ }
+ // Always call even when there are not JVMTI environments yet, since environments
+ // may be attached late and JVMTI must track phases of VM execution
+ JvmtiExport::post_vm_death();
+ Threads::shutdown_vm_agents();
+
+ // Terminate the signal thread
+ // Note: we don't wait until it actually dies.
+ os::terminate_signal_thread();
+
+ print_statistics();
+ Universe::heap()->print_tracing_info();
+
+ VTune::exit();
+
+ { MutexLocker ml(BeforeExit_lock);
+ _before_exit_status = BEFORE_EXIT_DONE;
+ BeforeExit_lock->notify_all();
+ }
+
+ #undef BEFORE_EXIT_NOT_RUN
+ #undef BEFORE_EXIT_RUNNING
+ #undef BEFORE_EXIT_DONE
+}
+
+void vm_exit(int code) {
+ Thread* thread = ThreadLocalStorage::thread_index() == -1 ? NULL
+ : ThreadLocalStorage::get_thread_slow();
+ if (thread == NULL) {
+ // we have serious problems -- just exit
+ vm_direct_exit(code);
+ }
+
+ if (VMThread::vm_thread() != NULL) {
+ // Fire off a VM_Exit operation to bring VM to a safepoint and exit
+ VM_Exit op(code);
+ if (thread->is_Java_thread())
+ ((JavaThread*)thread)->set_thread_state(_thread_in_vm);
+ VMThread::execute(&op);
+ // should never reach here; but in case something wrong with VM Thread.
+ vm_direct_exit(code);
+ } else {
+ // VM thread is gone, just exit
+ vm_direct_exit(code);
+ }
+ ShouldNotReachHere();
+}
+
+void notify_vm_shutdown() {
+ // For now, just a dtrace probe.
+ HS_DTRACE_PROBE(hotspot, vm__shutdown);
+}
+
+void vm_direct_exit(int code) {
+ notify_vm_shutdown();
+ ::exit(code);
+}
+
+void vm_perform_shutdown_actions() {
+ // Warning: do not call 'exit_globals()' here. All threads are still running.
+ // Calling 'exit_globals()' will disable thread-local-storage and cause all
+ // kinds of assertions to trigger in debug mode.
+ if (is_init_completed()) {
+ Thread* thread = Thread::current();
+ if (thread->is_Java_thread()) {
+ // We are leaving the VM, set state to native (in case any OS exit
+ // handlers call back to the VM)
+ JavaThread* jt = (JavaThread*)thread;
+ // Must always be walkable or have no last_Java_frame when in
+ // thread_in_native
+ jt->frame_anchor()->make_walkable(jt);
+ jt->set_thread_state(_thread_in_native);
+ }
+ }
+ notify_vm_shutdown();
+}
+
+void vm_shutdown()
+{
+ vm_perform_shutdown_actions();
+ os::shutdown();
+}
+
+void vm_abort() {
+ vm_perform_shutdown_actions();
+ os::abort(PRODUCT_ONLY(false));
+ ShouldNotReachHere();
+}
+
+void vm_notify_during_shutdown(const char* error, const char* message) {
+ if (error != NULL) {
+ tty->print_cr("Error occurred during initialization of VM");
+ tty->print("%s", error);
+ if (message != NULL) {
+ tty->print_cr(": %s", message);
+ }
+ else {
+ tty->cr();
+ }
+ }
+ if (ShowMessageBoxOnError && WizardMode) {
+ fatal("Error occurred during initialization of VM");
+ }
+}
+
+void vm_exit_during_initialization(Handle exception) {
+ tty->print_cr("Error occurred during initialization of VM");
+ // If there are exceptions on this thread it must be cleared
+ // first and here. Any future calls to EXCEPTION_MARK requires
+ // that no pending exceptions exist.
+ Thread *THREAD = Thread::current();
+ if (HAS_PENDING_EXCEPTION) {
+ CLEAR_PENDING_EXCEPTION;
+ }
+ java_lang_Throwable::print(exception, tty);
+ tty->cr();
+ java_lang_Throwable::print_stack_trace(exception(), tty);
+ tty->cr();
+ vm_notify_during_shutdown(NULL, NULL);
+ vm_abort();
+}
+
+void vm_exit_during_initialization(symbolHandle ex, const char* message) {
+ ResourceMark rm;
+ vm_notify_during_shutdown(ex->as_C_string(), message);
+ vm_abort();
+}
+
+void vm_exit_during_initialization(const char* error, const char* message) {
+ vm_notify_during_shutdown(error, message);
+ vm_abort();
+}
+
+void vm_shutdown_during_initialization(const char* error, const char* message) {
+ vm_notify_during_shutdown(error, message);
+ vm_shutdown();
+}
+
+jdk_version_info JDK_Version::_version_info = {0};
+bool JDK_Version::_pre_jdk16_version = false;
+int JDK_Version::_jdk_version = 0;
+
+void JDK_Version::initialize() {
+ void *lib_handle = os::native_java_library();
+ jdk_version_info_fn_t func =
+ CAST_TO_FN_PTR(jdk_version_info_fn_t, hpi::dll_lookup(lib_handle, "JDK_GetVersionInfo0"));
+
+ if (func == NULL) {
+ // JDK older than 1.6
+ _pre_jdk16_version = true;
+ return;
+ }
+
+ if (func != NULL) {
+ (*func)(&_version_info, sizeof(_version_info));
+ }
+ if (jdk_major_version() == 1) {
+ _jdk_version = jdk_minor_version();
+ } else {
+ // If the release version string is changed to n.x.x (e.g. 7.0.0) in a future release
+ _jdk_version = jdk_major_version();
+ }
+}
+
+void JDK_Version_init() {
+ JDK_Version::initialize();
+}
diff --git a/src/share/vm/runtime/java.hpp b/src/share/vm/runtime/java.hpp
new file mode 100644
index 000000000..e3ce6d419
--- /dev/null
+++ b/src/share/vm/runtime/java.hpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Register function to be called by before_exit
+extern "C" { void register_on_exit_function(void (*func)(void)) ;}
+
+// Execute code before all handles are released and thread is killed; prologue to vm_exit
+extern void before_exit(JavaThread * thread);
+
+// Forced VM exit (i.e, internal error or JVM_Exit)
+extern void vm_exit(int code);
+
+// Wrapper for ::exit()
+extern void vm_direct_exit(int code);
+
+// Shutdown the VM but do not exit the process
+extern void vm_shutdown();
+// Shutdown the VM and abort the process
+extern void vm_abort();
+
+// Trigger any necessary notification of the VM being shutdown
+extern void notify_vm_shutdown();
+
+// VM exit if error occurs during initialization of VM
+extern void vm_exit_during_initialization(Handle exception);
+extern void vm_exit_during_initialization(symbolHandle exception_name, const char* message);
+extern void vm_exit_during_initialization(const char* error, const char* message = NULL);
+extern void vm_shutdown_during_initialization(const char* error, const char* message = NULL);
+
+class JDK_Version : AllStatic {
+ friend class VMStructs;
+ private:
+ static jdk_version_info _version_info;
+ static bool _pre_jdk16_version;
+ static int _jdk_version; // JDK version number representing the release
+ // i.e. n in 1.n.x (= jdk_minor_version())
+
+ public:
+ static void initialize();
+ static int jdk_major_version() { return JDK_VERSION_MAJOR(_version_info.jdk_version); }
+ static int jdk_minor_version() { return JDK_VERSION_MINOR(_version_info.jdk_version); }
+ static int jdk_micro_version() { return JDK_VERSION_MICRO(_version_info.jdk_version); }
+ static int jdk_build_number() { return JDK_VERSION_BUILD(_version_info.jdk_version); }
+
+ static bool is_pre_jdk16_version() { return _pre_jdk16_version; }
+ static bool is_jdk12x_version() { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 2; }
+ static bool is_jdk13x_version() { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 3; }
+ static bool is_jdk14x_version() { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 4; }
+ static bool is_jdk15x_version() { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 5; }
+ static bool is_jdk16x_version() { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 6; }
+ static bool is_jdk17x_version() { assert(is_jdk_version_initialized(), "must have been initialized"); return _jdk_version == 7; }
+
+ static bool supports_thread_park_blocker() { return _version_info.thread_park_blocker; }
+
+ static bool is_gte_jdk14x_version() {
+ // Keep the semantics of this that the version number is >= 1.4
+ assert(is_jdk_version_initialized(), "Not initialized");
+ return _jdk_version >= 4;
+ }
+ static bool is_gte_jdk15x_version() {
+ // Keep the semantics of this that the version number is >= 1.5
+ assert(is_jdk_version_initialized(), "Not initialized");
+ return _jdk_version >= 5;
+ }
+ static bool is_gte_jdk16x_version() {
+ // Keep the semantics of this that the version number is >= 1.6
+ assert(is_jdk_version_initialized(), "Not initialized");
+ return _jdk_version >= 6;
+ }
+
+ static bool is_gte_jdk17x_version() {
+ // Keep the semantics of this that the version number is >= 1.7
+ assert(is_jdk_version_initialized(), "Not initialized");
+ return _jdk_version >= 7;
+ }
+
+ static bool is_jdk_version_initialized() {
+ return _jdk_version > 0;
+ }
+
+ // These methods are defined to deal with pre JDK 1.6 versions
+ static void set_jdk12x_version() {
+ assert(_pre_jdk16_version && !is_jdk_version_initialized(), "must not initialize");
+ _jdk_version = 2;
+ _version_info.jdk_version = (1 << 24) | (2 << 16);
+ }
+ static void set_jdk13x_version() {
+ assert(_pre_jdk16_version && !is_jdk_version_initialized(), "must not initialize");
+ _jdk_version = 3;
+ _version_info.jdk_version = (1 << 24) | (3 << 16);
+ }
+ static void set_jdk14x_version() {
+ assert(_pre_jdk16_version && !is_jdk_version_initialized(), "must not initialize");
+ _jdk_version = 4;
+ _version_info.jdk_version = (1 << 24) | (4 << 16);
+ }
+ static void set_jdk15x_version() {
+ assert(_pre_jdk16_version && !is_jdk_version_initialized(), "must not initialize");
+ _jdk_version = 5;
+ _version_info.jdk_version = (1 << 24) | (5 << 16);
+ }
+};
diff --git a/src/share/vm/runtime/javaCalls.cpp b/src/share/vm/runtime/javaCalls.cpp
new file mode 100644
index 000000000..444bc7f77
--- /dev/null
+++ b/src/share/vm/runtime/javaCalls.cpp
@@ -0,0 +1,524 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_javaCalls.cpp.incl"
+
+// -----------------------------------------------------
+// Implementation of JavaCallWrapper
+
+JavaCallWrapper::JavaCallWrapper(methodHandle callee_method, Handle receiver, JavaValue* result, TRAPS) {
+ JavaThread* thread = (JavaThread *)THREAD;
+ bool clear_pending_exception = true;
+
+ guarantee(thread->is_Java_thread(), "crucial check - the VM thread cannot and must not escape to Java code");
+ assert(!thread->owns_locks(), "must release all locks when leaving VM");
+ guarantee(!thread->is_Compiler_thread(), "cannot make java calls from the compiler");
+ _result = result;
+
+ // Make sure that that the value of the higest_lock is at least the same as the current stackpointer,
+ // since, the Java code is highly likely to use locks.
+ // Use '(address)this' to guarantee that highest_lock address is conservative and inside our thread
+ thread->update_highest_lock((address)this);
+
+ // Allocate handle block for Java code. This must be done before we change thread_state to _thread_in_Java_or_stub,
+ // since it can potentially block.
+ JNIHandleBlock* new_handles = JNIHandleBlock::allocate_block(thread);
+
+ // After this, we are official in JavaCode. This needs to be done before we change any of the thread local
+ // info, since we cannot find oops before the new information is set up completely.
+ ThreadStateTransition::transition(thread, _thread_in_vm, _thread_in_Java);
+
+ // Make sure that we handle asynchronous stops and suspends _before_ we clear all thread state
+ // in JavaCallWrapper::JavaCallWrapper(). This way, we can decide if we need to do any pd actions
+ // to prepare for stop/suspend (flush register windows on sparcs, cache sp, or other state).
+ if (thread->has_special_runtime_exit_condition()) {
+ thread->handle_special_runtime_exit_condition();
+ if (HAS_PENDING_EXCEPTION) {
+ clear_pending_exception = false;
+ }
+ }
+
+
+ // Make sure to set the oop's after the thread transition - since we can block there. No one is GC'ing
+ // the JavaCallWrapper before the entry frame is on the stack.
+ _callee_method = callee_method();
+ _receiver = receiver();
+
+#ifdef CHECK_UNHANDLED_OOPS
+ THREAD->allow_unhandled_oop(&_callee_method);
+ THREAD->allow_unhandled_oop(&_receiver);
+#endif // CHECK_UNHANDLED_OOPS
+
+ _thread = (JavaThread *)thread;
+ _handles = _thread->active_handles(); // save previous handle block & Java frame linkage
+
+ // For the profiler, the last_Java_frame information in thread must always be in
+ // legal state. We have no last Java frame if last_Java_sp == NULL so
+ // the valid transition is to clear _last_Java_sp and then reset the rest of
+ // the (platform specific) state.
+
+ _anchor.copy(_thread->frame_anchor());
+ _thread->frame_anchor()->clear();
+
+ debug_only(_thread->inc_java_call_counter());
+ _thread->set_active_handles(new_handles); // install new handle block and reset Java frame linkage
+
+ assert (_thread->thread_state() != _thread_in_native, "cannot set native pc to NULL");
+
+ // clear any pending exception in thread (native calls start with no exception pending)
+ if(clear_pending_exception) {
+ _thread->clear_pending_exception();
+ }
+
+ if (_anchor.last_Java_sp() == NULL) {
+ _thread->record_base_of_stack_pointer();
+ }
+}
+
+
+JavaCallWrapper::~JavaCallWrapper() {
+ assert(_thread == JavaThread::current(), "must still be the same thread");
+
+ // restore previous handle block & Java frame linkage
+ JNIHandleBlock *_old_handles = _thread->active_handles();
+ _thread->set_active_handles(_handles);
+
+ _thread->frame_anchor()->zap();
+
+ debug_only(_thread->dec_java_call_counter());
+
+ if (_anchor.last_Java_sp() == NULL) {
+ _thread->set_base_of_stack_pointer(NULL);
+ }
+
+
+ // Old thread-local info. has been restored. We are not back in the VM.
+ ThreadStateTransition::transition_from_java(_thread, _thread_in_vm);
+
+ // State has been restored now make the anchor frame visible for the profiler.
+ // Do this after the transition because this allows us to put an assert
+ // the Java->vm transition which checks to see that stack is not walkable
+ // on sparc/ia64 which will catch violations of the reseting of last_Java_frame
+ // invariants (i.e. _flags always cleared on return to Java)
+
+ _thread->frame_anchor()->copy(&_anchor);
+
+ // Release handles after we are marked as being inside the VM again, since this
+ // operation might block
+ JNIHandleBlock::release_block(_old_handles, _thread);
+}
+
+
+void JavaCallWrapper::oops_do(OopClosure* f) {
+ f->do_oop((oop*)&_callee_method);
+ f->do_oop((oop*)&_receiver);
+ handles()->oops_do(f);
+}
+
+
+// Helper methods
+static BasicType runtime_type_from(JavaValue* result) {
+ switch (result->get_type()) {
+ case T_BOOLEAN: // fall through
+ case T_CHAR : // fall through
+ case T_SHORT : // fall through
+ case T_INT : // fall through
+#ifndef _LP64
+ case T_OBJECT : // fall through
+ case T_ARRAY : // fall through
+#endif
+ case T_BYTE : // fall through
+ case T_VOID : return T_INT;
+ case T_LONG : return T_LONG;
+ case T_FLOAT : return T_FLOAT;
+ case T_DOUBLE : return T_DOUBLE;
+#ifdef _LP64
+ case T_ARRAY : // fall through
+ case T_OBJECT: return T_OBJECT;
+#endif
+ }
+ ShouldNotReachHere();
+ return T_ILLEGAL;
+}
+
+// ===== object constructor calls =====
+
+void JavaCalls::call_default_constructor(JavaThread* thread, methodHandle method, Handle receiver, TRAPS) {
+ assert(method->name() == vmSymbols::object_initializer_name(), "Should only be called for default constructor");
+ assert(method->signature() == vmSymbols::void_method_signature(), "Should only be called for default constructor");
+
+ instanceKlass* ik = instanceKlass::cast(method->method_holder());
+ if (ik->is_initialized() && ik->has_vanilla_constructor()) {
+ // safe to skip constructor call
+ } else {
+ static JavaValue result(T_VOID);
+ JavaCallArguments args(receiver);
+ call(&result, method, &args, CHECK);
+ }
+}
+
+// ============ Virtual calls ============
+
+void JavaCalls::call_virtual(JavaValue* result, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, JavaCallArguments* args, TRAPS) {
+ CallInfo callinfo;
+ Handle receiver = args->receiver();
+ KlassHandle recvrKlass(THREAD, receiver.is_null() ? (klassOop)NULL : receiver->klass());
+ LinkResolver::resolve_virtual_call(
+ callinfo, receiver, recvrKlass, spec_klass, name, signature,
+ KlassHandle(), false, true, CHECK);
+ methodHandle method = callinfo.selected_method();
+ assert(method.not_null(), "should have thrown exception");
+
+ // Invoke the method
+ JavaCalls::call(result, method, args, CHECK);
+}
+
+
+void JavaCalls::call_virtual(JavaValue* result, Handle receiver, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, TRAPS) {
+ JavaCallArguments args(receiver); // One oop argument
+ call_virtual(result, spec_klass, name, signature, &args, CHECK);
+}
+
+
+void JavaCalls::call_virtual(JavaValue* result, Handle receiver, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, Handle arg1, TRAPS) {
+ JavaCallArguments args(receiver); // One oop argument
+ args.push_oop(arg1);
+ call_virtual(result, spec_klass, name, signature, &args, CHECK);
+}
+
+
+
+void JavaCalls::call_virtual(JavaValue* result, Handle receiver, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, Handle arg1, Handle arg2, TRAPS) {
+ JavaCallArguments args(receiver); // One oop argument
+ args.push_oop(arg1);
+ args.push_oop(arg2);
+ call_virtual(result, spec_klass, name, signature, &args, CHECK);
+}
+
+
+// ============ Special calls ============
+
+void JavaCalls::call_special(JavaValue* result, KlassHandle klass, symbolHandle name, symbolHandle signature, JavaCallArguments* args, TRAPS) {
+ CallInfo callinfo;
+ LinkResolver::resolve_special_call(callinfo, klass, name, signature, KlassHandle(), false, CHECK);
+ methodHandle method = callinfo.selected_method();
+ assert(method.not_null(), "should have thrown exception");
+
+ // Invoke the method
+ JavaCalls::call(result, method, args, CHECK);
+}
+
+
+void JavaCalls::call_special(JavaValue* result, Handle receiver, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS) {
+ JavaCallArguments args(receiver); // One oop argument
+ call_special(result, klass, name, signature, &args, CHECK);
+}
+
+
+void JavaCalls::call_special(JavaValue* result, Handle receiver, KlassHandle klass, symbolHandle name, symbolHandle signature, Handle arg1, TRAPS) {
+ JavaCallArguments args(receiver); // One oop argument
+ args.push_oop(arg1);
+ call_special(result, klass, name, signature, &args, CHECK);
+}
+
+
+void JavaCalls::call_special(JavaValue* result, Handle receiver, KlassHandle klass, symbolHandle name, symbolHandle signature, Handle arg1, Handle arg2, TRAPS) {
+ JavaCallArguments args(receiver); // One oop argument
+ args.push_oop(arg1);
+ args.push_oop(arg2);
+ call_special(result, klass, name, signature, &args, CHECK);
+}
+
+
+// ============ Static calls ============
+
+void JavaCalls::call_static(JavaValue* result, KlassHandle klass, symbolHandle name, symbolHandle signature, JavaCallArguments* args, TRAPS) {
+ CallInfo callinfo;
+ LinkResolver::resolve_static_call(callinfo, klass, name, signature, KlassHandle(), false, true, CHECK);
+ methodHandle method = callinfo.selected_method();
+ assert(method.not_null(), "should have thrown exception");
+
+ // Invoke the method
+ JavaCalls::call(result, method, args, CHECK);
+}
+
+
+void JavaCalls::call_static(JavaValue* result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS) {
+ JavaCallArguments args; // No argument
+ call_static(result, klass, name, signature, &args, CHECK);
+}
+
+
+void JavaCalls::call_static(JavaValue* result, KlassHandle klass, symbolHandle name, symbolHandle signature, Handle arg1, TRAPS) {
+ JavaCallArguments args(arg1); // One oop argument
+ call_static(result, klass, name, signature, &args, CHECK);
+}
+
+
+void JavaCalls::call_static(JavaValue* result, KlassHandle klass, symbolHandle name, symbolHandle signature, Handle arg1, Handle arg2, TRAPS) {
+ JavaCallArguments args; // One oop argument
+ args.push_oop(arg1);
+ args.push_oop(arg2);
+ call_static(result, klass, name, signature, &args, CHECK);
+}
+
+
+// -------------------------------------------------
+// Implementation of JavaCalls (low level)
+
+
+void JavaCalls::call(JavaValue* result, methodHandle method, JavaCallArguments* args, TRAPS) {
+ // Check if we need to wrap a potential OS exception handler around thread
+ // This is used for e.g. Win32 structured exception handlers
+ assert(THREAD->is_Java_thread(), "only JavaThreads can make JavaCalls");
+ // Need to wrap each and everytime, since there might be native code down the
+ // stack that has installed its own exception handlers
+ os::os_exception_wrapper(call_helper, result, &method, args, THREAD);
+}
+
+void JavaCalls::call_helper(JavaValue* result, methodHandle* m, JavaCallArguments* args, TRAPS) {
+ methodHandle method = *m;
+ JavaThread* thread = (JavaThread*)THREAD;
+ assert(thread->is_Java_thread(), "must be called by a java thread");
+ assert(method.not_null(), "must have a method to call");
+ assert(!SafepointSynchronize::is_at_safepoint(), "call to Java code during VM operation");
+ assert(!thread->handle_area()->no_handle_mark_active(), "cannot call out to Java here");
+
+
+ CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
+
+ // Make sure that the arguments have the right type
+ debug_only(args->verify(method, result->get_type(), thread));
+
+ // Ignore call if method is empty
+ if (method->is_empty_method()) {
+ assert(result->get_type() == T_VOID, "an empty method must return a void value");
+ return;
+ }
+
+
+#ifdef ASSERT
+ { klassOop holder = method->method_holder();
+ // A klass might not be initialized since JavaCall's might be used during the executing of
+ // the <clinit>. For example, a Thread.start might start executing on an object that is
+ // not fully initialized! (bad Java programming style)
+ assert(instanceKlass::cast(holder)->is_linked(), "rewritting must have taken place");
+ }
+#endif
+
+
+ assert(!thread->is_Compiler_thread(), "cannot compile from the compiler");
+ if (CompilationPolicy::mustBeCompiled(method)) {
+ CompileBroker::compile_method(method, InvocationEntryBci,
+ methodHandle(), 0, "mustBeCompiled", CHECK);
+ }
+
+ // Since the call stub sets up like the interpreter we call the from_interpreted_entry
+ // so we can go compiled via a i2c. Otherwise initial entry method will always
+ // run interpreted.
+ address entry_point = method->from_interpreted_entry();
+ if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) {
+ entry_point = method->interpreter_entry();
+ }
+
+ // Figure out if the result value is an oop or not (Note: This is a different value
+ // than result_type. result_type will be T_INT of oops. (it is about size)
+ BasicType result_type = runtime_type_from(result);
+ bool oop_result_flag = (result->get_type() == T_OBJECT || result->get_type() == T_ARRAY);
+
+ // NOTE: if we move the computation of the result_val_address inside
+ // the call to call_stub, the optimizer produces wrong code.
+ intptr_t* result_val_address = (intptr_t*)(result->get_value_addr());
+
+ // Find receiver
+ Handle receiver = (!method->is_static()) ? args->receiver() : Handle();
+
+ // When we reenter Java, we need to reenable the yellow zone which
+ // might already be disabled when we are in VM.
+ if (thread->stack_yellow_zone_disabled()) {
+ thread->reguard_stack();
+ }
+
+ // Check that there are shadow pages available before changing thread state
+ // to Java
+ if (!os::stack_shadow_pages_available(THREAD, method)) {
+ // Throw stack overflow exception with preinitialized exception.
+ Exceptions::throw_stack_overflow_exception(THREAD, __FILE__, __LINE__);
+ return;
+ } else {
+ // Touch pages checked if the OS needs them to be touched to be mapped.
+ os::bang_stack_shadow_pages();
+ }
+
+ // do call
+ { JavaCallWrapper link(method, receiver, result, CHECK);
+ { HandleMark hm(thread); // HandleMark used by HandleMarkCleaner
+
+ StubRoutines::call_stub()(
+ (address)&link,
+ // (intptr_t*)&(result->_value), // see NOTE above (compiler problem)
+ result_val_address, // see NOTE above (compiler problem)
+ result_type,
+ method(),
+ entry_point,
+ args->parameters(),
+ args->size_of_parameters(),
+ CHECK
+ );
+
+ result = link.result(); // circumvent MS C++ 5.0 compiler bug (result is clobbered across call)
+ // Preserve oop return value across possible gc points
+ if (oop_result_flag) {
+ thread->set_vm_result((oop) result->get_jobject());
+ }
+ }
+ } // Exit JavaCallWrapper (can block - potential return oop must be preserved)
+
+ // Check if a thread stop or suspend should be executed
+ // The following assert was not realistic. Thread.stop can set that bit at any moment.
+ //assert(!thread->has_special_runtime_exit_condition(), "no async. exceptions should be installed");
+
+ // Restore possible oop return
+ if (oop_result_flag) {
+ result->set_jobject((jobject)thread->vm_result());
+ thread->set_vm_result(NULL);
+ }
+}
+
+
+//--------------------------------------------------------------------------------------
+// Implementation of JavaCallArguments
+
+intptr_t* JavaCallArguments::parameters() {
+ // First convert all handles to oops
+ for(int i = 0; i < _size; i++) {
+ if (_is_oop[i]) {
+ // Handle conversion
+ _value[i] = (intptr_t)Handle::raw_resolve((oop *)_value[i]);
+ }
+ // The parameters are moved to the parameters array to include the tags.
+ if (TaggedStackInterpreter) {
+ // Tags are interspersed with arguments. Tags are first.
+ int tagged_index = i*2;
+ _parameters[tagged_index] = _is_oop[i] ? frame::TagReference :
+ frame::TagValue;
+ _parameters[tagged_index+1] = _value[i];
+ }
+ }
+ // Return argument vector
+ return TaggedStackInterpreter ? _parameters : _value;
+}
+
+//--------------------------------------------------------------------------------------
+// Non-Product code
+#ifndef PRODUCT
+
+class SignatureChekker : public SignatureIterator {
+ private:
+ bool *_is_oop;
+ int _pos;
+ BasicType _return_type;
+
+ public:
+ bool _is_return;
+
+ SignatureChekker(symbolHandle signature, BasicType return_type, bool is_static, bool* is_oop) : SignatureIterator(signature) {
+ _is_oop = is_oop;
+ _is_return = false;
+ _return_type = return_type;
+ _pos = 0;
+ if (!is_static) {
+ check_value(true); // Receiver must be an oop
+ }
+ }
+
+ void check_value(bool type) {
+ guarantee(_is_oop[_pos++] == type, "signature does not match pushed arguments");
+ }
+
+ void check_doing_return(bool state) { _is_return = state; }
+
+ void check_return_type(BasicType t) {
+ guarantee(_is_return && t == _return_type, "return type does not match");
+ }
+
+ void check_int(BasicType t) {
+ if (_is_return) {
+ check_return_type(t);
+ return;
+ }
+ check_value(false);
+ }
+
+ void check_double(BasicType t) { check_long(t); }
+
+ void check_long(BasicType t) {
+ if (_is_return) {
+ check_return_type(t);
+ return;
+ }
+
+ check_value(false);
+ check_value(false);
+ }
+
+ void check_obj(BasicType t) {
+ if (_is_return) {
+ check_return_type(t);
+ return;
+ }
+ check_value(true);
+ }
+
+ void do_bool() { check_int(T_BOOLEAN); }
+ void do_char() { check_int(T_CHAR); }
+ void do_float() { check_int(T_FLOAT); }
+ void do_double() { check_double(T_DOUBLE); }
+ void do_byte() { check_int(T_BYTE); }
+ void do_short() { check_int(T_SHORT); }
+ void do_int() { check_int(T_INT); }
+ void do_long() { check_long(T_LONG); }
+ void do_void() { check_return_type(T_VOID); }
+ void do_object(int begin, int end) { check_obj(T_OBJECT); }
+ void do_array(int begin, int end) { check_obj(T_OBJECT); }
+};
+
+void JavaCallArguments::verify(methodHandle method, BasicType return_type,
+ Thread *thread) {
+ guarantee(method->size_of_parameters() == size_of_parameters(), "wrong no. of arguments pushed");
+
+ // Treat T_OBJECT and T_ARRAY as the same
+ if (return_type == T_ARRAY) return_type = T_OBJECT;
+
+ // Check that oop information is correct
+ symbolHandle signature (thread, method->signature());
+
+ SignatureChekker sc(signature, return_type, method->is_static(),_is_oop);
+ sc.iterate_parameters();
+ sc.check_doing_return(true);
+ sc.iterate_returntype();
+}
+
+#endif // PRODUCT
diff --git a/src/share/vm/runtime/javaCalls.hpp b/src/share/vm/runtime/javaCalls.hpp
new file mode 100644
index 000000000..5923430a5
--- /dev/null
+++ b/src/share/vm/runtime/javaCalls.hpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// A JavaCallWrapper is constructed before each JavaCall and destructed after the call.
+// Its purpose is to allocate/deallocate a new handle block and to save/restore the last
+// Java fp/sp. A pointer to the JavaCallWrapper is stored on the stack.
+
+class JavaCallWrapper: StackObj {
+ friend class VMStructs;
+ private:
+ JavaThread* _thread; // the thread to which this call belongs
+ JNIHandleBlock* _handles; // the saved handle block
+ methodOop _callee_method; // to be able to collect arguments if entry frame is top frame
+ oop _receiver; // the receiver of the call (if a non-static call)
+
+ JavaFrameAnchor _anchor; // last thread anchor state that we must restore
+
+ JavaValue* _result; // result value
+
+ public:
+ // Construction/destruction
+ JavaCallWrapper(methodHandle callee_method, Handle receiver, JavaValue* result, TRAPS);
+ ~JavaCallWrapper();
+
+ // Accessors
+ JavaThread* thread() const { return _thread; }
+ JNIHandleBlock* handles() const { return _handles; }
+
+ JavaFrameAnchor* anchor(void) { return &_anchor; }
+
+ JavaValue* result() const { return _result; }
+ // GC support
+ methodOop callee_method() { return _callee_method; }
+ oop receiver() { return _receiver; }
+ void oops_do(OopClosure* f);
+
+};
+
+
+// Encapsulates arguments to a JavaCall (faster, safer, and more convenient than using var-args)
+class JavaCallArguments : public StackObj {
+ private:
+ enum Constants {
+ _default_size = 8 // Must be at least # of arguments in JavaCalls methods
+ };
+
+ intptr_t _value_buffer [_default_size + 1];
+ intptr_t _parameter_buffer [_default_size*2 + 1];
+ bool _is_oop_buffer[_default_size + 1];
+
+ intptr_t* _value;
+ intptr_t* _parameters;
+ bool* _is_oop;
+ int _size;
+ int _max_size;
+ bool _start_at_zero; // Support late setting of receiver
+
+ void initialize() {
+ // Starts at first element to support set_receiver.
+ _value = &_value_buffer[1];
+ _is_oop = &_is_oop_buffer[1];
+
+ _parameters = &_parameter_buffer[0];
+ _max_size = _default_size;
+ _size = 0;
+ _start_at_zero = false;
+ }
+
+ public:
+ JavaCallArguments() { initialize(); }
+
+ JavaCallArguments(Handle receiver) {
+ initialize();
+ push_oop(receiver);
+ }
+
+ JavaCallArguments(int max_size) {
+ if (max_size > _default_size) {
+ _value = NEW_RESOURCE_ARRAY(intptr_t, max_size + 1);
+ _is_oop = NEW_RESOURCE_ARRAY(bool, max_size + 1);
+ if (TaggedStackInterpreter) {
+ _parameters = NEW_RESOURCE_ARRAY(intptr_t, max_size*2 + 1);
+ }
+ // Reserve room for potential receiver in value and is_oop
+ _value++; _is_oop++;
+ _max_size = max_size;
+ _size = 0;
+ _start_at_zero = false;
+ } else {
+ initialize();
+ }
+ }
+
+ inline void push_oop(Handle h) { _is_oop[_size] = true;
+ JNITypes::put_obj((oop)h.raw_value(), _value, _size); }
+
+ inline void push_int(int i) { _is_oop[_size] = false;
+ JNITypes::put_int(i, _value, _size); }
+
+ inline void push_double(double d) { _is_oop[_size] = false; _is_oop[_size + 1] = false;
+ JNITypes::put_double(d, _value, _size); }
+
+ inline void push_long(jlong l) { _is_oop[_size] = false; _is_oop[_size + 1] = false;
+ JNITypes::put_long(l, _value, _size); }
+
+ inline void push_float(float f) { _is_oop[_size] = false;
+ JNITypes::put_float(f, _value, _size); }
+
+ // receiver
+ Handle receiver() {
+ assert(_size > 0, "must at least be one argument");
+ assert(_is_oop[0], "first argument must be an oop");
+ assert(_value[0] != 0, "receiver must be not-null");
+ return Handle((oop*)_value[0], false);
+ }
+
+ void set_receiver(Handle h) {
+ assert(_start_at_zero == false, "can only be called once");
+ _start_at_zero = true;
+ _is_oop--;
+ _value--;
+ _size++;
+ _is_oop[0] = true;
+ _value[0] = (intptr_t)h.raw_value();
+ }
+
+ // Converts all Handles to oops, and returns a reference to parameter vector
+ intptr_t* parameters() ;
+ int size_of_parameters() const { return _size; }
+
+ // Verify that pushed arguments fits a given method
+ void verify(methodHandle method, BasicType return_type, Thread *thread) PRODUCT_RETURN;
+};
+
+// All calls to Java have to go via JavaCalls. Sets up the stack frame
+// and makes sure that the last_Java_frame pointers are chained correctly.
+//
+
+class JavaCalls: AllStatic {
+ static void call_helper(JavaValue* result, methodHandle* method, JavaCallArguments* args, TRAPS);
+ public:
+ // Optimized Constuctor call
+ static void call_default_constructor(JavaThread* thread, methodHandle method, Handle receiver, TRAPS);
+
+ // call_special
+ // ------------
+ // The receiver must be first oop in argument list
+ static void call_special(JavaValue* result, KlassHandle klass, symbolHandle name, symbolHandle signature, JavaCallArguments* args, TRAPS);
+
+ static void call_special(JavaValue* result, Handle receiver, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS); // No args
+ static void call_special(JavaValue* result, Handle receiver, KlassHandle klass, symbolHandle name, symbolHandle signature, Handle arg1, TRAPS);
+ static void call_special(JavaValue* result, Handle receiver, KlassHandle klass, symbolHandle name, symbolHandle signature, Handle arg1, Handle arg2, TRAPS);
+
+ // virtual call
+ // ------------
+
+ // The receiver must be first oop in argument list
+ static void call_virtual(JavaValue* result, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, JavaCallArguments* args, TRAPS);
+
+ static void call_virtual(JavaValue* result, Handle receiver, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, TRAPS); // No args
+ static void call_virtual(JavaValue* result, Handle receiver, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, Handle arg1, TRAPS);
+ static void call_virtual(JavaValue* result, Handle receiver, KlassHandle spec_klass, symbolHandle name, symbolHandle signature, Handle arg1, Handle arg2, TRAPS);
+
+ // Static call
+ // -----------
+ static void call_static(JavaValue* result, KlassHandle klass, symbolHandle name, symbolHandle signature, JavaCallArguments* args, TRAPS);
+
+ static void call_static(JavaValue* result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
+ static void call_static(JavaValue* result, KlassHandle klass, symbolHandle name, symbolHandle signature, Handle arg1, TRAPS);
+ static void call_static(JavaValue* result, KlassHandle klass, symbolHandle name, symbolHandle signature, Handle arg1, Handle arg2, TRAPS);
+
+ // Low-level interface
+ static void call(JavaValue* result, methodHandle method, JavaCallArguments* args, TRAPS);
+};
diff --git a/src/share/vm/runtime/javaFrameAnchor.hpp b/src/share/vm/runtime/javaFrameAnchor.hpp
new file mode 100644
index 000000000..1320eb126
--- /dev/null
+++ b/src/share/vm/runtime/javaFrameAnchor.hpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2002-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+//
+// An object for encapsulating the machine/os dependent part of a JavaThread frame state
+//
+class JavaThread;
+
+class JavaFrameAnchor VALUE_OBJ_CLASS_SPEC {
+// Too many friends...
+friend class CallNativeDirectNode;
+friend class OptoRuntime;
+friend class Runtime1;
+friend class StubAssembler;
+friend class CallRuntimeDirectNode;
+friend class MacroAssembler;
+friend class InterpreterGenerator;
+friend class LIR_Assembler;
+friend class GraphKit;
+friend class StubGenerator;
+friend class JavaThread;
+friend class frame;
+friend class VMStructs;
+friend class BytecodeInterpreter;
+friend class JavaCallWrapper;
+
+ private:
+ //
+ // Whenever _last_Java_sp != NULL other anchor fields MUST be valid!
+ // The stack may not be walkable [check with walkable() ] but the values must be valid.
+ // The profiler apparently depends on this.
+ //
+ intptr_t* volatile _last_Java_sp;
+
+ // Whenever we call from Java to native we can not be assured that the return
+ // address that composes the last_Java_frame will be in an accessible location
+ // so calls from Java to native store that pc (or one good enough to locate
+ // the oopmap) in the frame anchor. Since the frames that call from Java to
+ // native are never deoptimized we never need to patch the pc and so this
+ // is acceptable.
+ volatile address _last_Java_pc;
+
+ // tells whether the last Java frame is set
+ // It is important that when last_Java_sp != NULL that the rest of the frame
+ // anchor (including platform specific) all be valid.
+
+ bool has_last_Java_frame() const { return _last_Java_sp != NULL; }
+ // This is very dangerous unless sp == NULL
+ // Invalidate the anchor so that has_last_frame is false
+ // and no one should look at the other fields.
+ void zap(void) { _last_Java_sp = NULL; }
+
+#include "incls/_javaFrameAnchor_pd.hpp.incl"
+
+public:
+ JavaFrameAnchor() { clear(); }
+ JavaFrameAnchor(JavaFrameAnchor *src) { copy(src); }
+
+ address last_Java_pc(void) { return _last_Java_pc; }
+ void set_last_Java_pc(address pc) { _last_Java_pc = pc; }
+
+ // Assembly stub generation helpers
+
+ static ByteSize last_Java_sp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_sp); }
+ static ByteSize last_Java_pc_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_pc); }
+
+};
diff --git a/src/share/vm/runtime/jfieldIDWorkaround.hpp b/src/share/vm/runtime/jfieldIDWorkaround.hpp
new file mode 100644
index 000000000..ce1c34bb3
--- /dev/null
+++ b/src/share/vm/runtime/jfieldIDWorkaround.hpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class jfieldIDWorkaround: AllStatic {
+ // This workaround is because JVMTI doesn't have distinct entry points
+ // for methods that use static jfieldIDs and instance jfieldIDs.
+ // The workaround is to steal a low-order bit:
+ // a 1 means the jfieldID is an instance jfieldID,
+ // and the rest of the word is the offset of the field.
+ // a 0 means the jfieldID is a static jfieldID,
+ // and the rest of the word is the JNIid*.
+ //
+ // Another low-order bit is used to mark if an instance field
+ // is accompanied by an indication of which class it applies to.
+ //
+ // Bit-format of a jfieldID (most significant first):
+ // address:30 instance=0:1 checked=0:1
+ // offset:30 instance=1:1 checked=0:1
+ // klass:23 offset:7 instance=1:1 checked=1:1
+ //
+ // If the offset does not fit in 7 bits, or if the fieldID is
+ // not checked, then the checked bit is zero and the rest of
+ // the word (30 bits) contains only the offset.
+ //
+ private:
+ enum {
+ checked_bits = 1,
+ instance_bits = 1,
+ address_bits = BitsPerWord - checked_bits - instance_bits,
+
+ large_offset_bits = address_bits, // unioned with address
+ small_offset_bits = 7,
+ klass_bits = address_bits - small_offset_bits,
+
+ checked_shift = 0,
+ instance_shift = checked_shift + checked_bits,
+ address_shift = instance_shift + instance_bits,
+
+ offset_shift = address_shift, // unioned with address
+ klass_shift = offset_shift + small_offset_bits,
+
+ checked_mask_in_place = right_n_bits(checked_bits) << checked_shift,
+ instance_mask_in_place = right_n_bits(instance_bits) << instance_shift,
+#ifndef _WIN64
+ large_offset_mask = right_n_bits(large_offset_bits),
+ small_offset_mask = right_n_bits(small_offset_bits),
+ klass_mask = right_n_bits(klass_bits)
+#endif
+ };
+
+#ifdef _WIN64
+ // These values are too big for Win64
+ const static uintptr_t large_offset_mask = right_n_bits(large_offset_bits);
+ const static uintptr_t small_offset_mask = right_n_bits(small_offset_bits);
+ const static uintptr_t klass_mask = right_n_bits(klass_bits);
+#endif
+
+ // helper routines:
+ static bool is_checked_jfieldID(jfieldID id) {
+ uintptr_t as_uint = (uintptr_t) id;
+ return ((as_uint & checked_mask_in_place) != 0);
+ }
+ static intptr_t raw_instance_offset(jfieldID id) {
+ uintptr_t result = (uintptr_t) id >> address_shift;
+ if (VerifyJNIFields && is_checked_jfieldID(id)) {
+ result &= small_offset_mask; // cut off the hash bits
+ }
+ return (intptr_t)result;
+ }
+ static intptr_t encode_klass_hash(klassOop k, intptr_t offset);
+ static bool klass_hash_ok(klassOop k, jfieldID id);
+ static void verify_instance_jfieldID(klassOop k, jfieldID id);
+
+ public:
+ static bool is_valid_jfieldID(klassOop k, jfieldID id);
+
+ static bool is_instance_jfieldID(klassOop k, jfieldID id) {
+ uintptr_t as_uint = (uintptr_t) id;
+ return ((as_uint & instance_mask_in_place) != 0);
+ }
+ static bool is_static_jfieldID(jfieldID id) {
+ uintptr_t as_uint = (uintptr_t) id;
+ return ((as_uint & instance_mask_in_place) == 0);
+ }
+
+ static jfieldID to_instance_jfieldID(klassOop k, int offset) {
+ intptr_t as_uint = ((offset & large_offset_mask) << offset_shift) | instance_mask_in_place;
+ if (VerifyJNIFields) {
+ as_uint |= encode_klass_hash(k, offset);
+ }
+ jfieldID result = (jfieldID) as_uint;
+#ifndef ASSERT
+ // always verify in debug mode; switchable in anything else
+ if (VerifyJNIFields)
+#endif // ASSERT
+ {
+ verify_instance_jfieldID(k, result);
+ }
+ assert(raw_instance_offset(result) == (offset & large_offset_mask), "extract right offset");
+ return result;
+ }
+
+ static intptr_t from_instance_jfieldID(klassOop k, jfieldID id) {
+#ifndef ASSERT
+ // always verify in debug mode; switchable in anything else
+ if (VerifyJNIFields)
+#endif // ASSERT
+ {
+ verify_instance_jfieldID(k, id);
+ }
+ return raw_instance_offset(id);
+ }
+
+ static jfieldID to_static_jfieldID(JNIid* id) {
+ assert(id->is_static_field_id(), "from_JNIid, but not static field id");
+ jfieldID result = (jfieldID) id;
+ assert(from_static_jfieldID(result) == id, "must produce the same static id");
+ return result;
+ }
+
+ static JNIid* from_static_jfieldID(jfieldID id) {
+ assert(jfieldIDWorkaround::is_static_jfieldID(id),
+ "to_JNIid, but not static jfieldID");
+ JNIid* result = (JNIid*) id;
+ assert(result->is_static_field_id(), "to_JNIid, but not static field id");
+ return result;
+ }
+
+ static jfieldID to_jfieldID(instanceKlassHandle k, int offset, bool is_static) {
+ if (is_static) {
+ JNIid *id = k->jni_id_for(offset);
+ debug_only(id->set_is_static_field_id());
+ return jfieldIDWorkaround::to_static_jfieldID(id);
+ } else {
+ return jfieldIDWorkaround::to_instance_jfieldID(k(), offset);
+ }
+ }
+};
diff --git a/src/share/vm/runtime/jniHandles.cpp b/src/share/vm/runtime/jniHandles.cpp
new file mode 100644
index 000000000..05078c559
--- /dev/null
+++ b/src/share/vm/runtime/jniHandles.cpp
@@ -0,0 +1,576 @@
+/*
+ * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_jniHandles.cpp.incl"
+
+
+JNIHandleBlock* JNIHandles::_global_handles = NULL;
+JNIHandleBlock* JNIHandles::_weak_global_handles = NULL;
+oop JNIHandles::_deleted_handle = NULL;
+
+
+jobject JNIHandles::make_local(oop obj) {
+ if (obj == NULL) {
+ return NULL; // ignore null handles
+ } else {
+ Thread* thread = Thread::current();
+ assert(Universe::heap()->is_in_reserved(obj), "sanity check");
+ return thread->active_handles()->allocate_handle(obj);
+ }
+}
+
+
+// optimized versions
+
+jobject JNIHandles::make_local(Thread* thread, oop obj) {
+ if (obj == NULL) {
+ return NULL; // ignore null handles
+ } else {
+ assert(Universe::heap()->is_in_reserved(obj), "sanity check");
+ return thread->active_handles()->allocate_handle(obj);
+ }
+}
+
+
+jobject JNIHandles::make_local(JNIEnv* env, oop obj) {
+ if (obj == NULL) {
+ return NULL; // ignore null handles
+ } else {
+ JavaThread* thread = JavaThread::thread_from_jni_environment(env);
+ assert(Universe::heap()->is_in_reserved(obj), "sanity check");
+ return thread->active_handles()->allocate_handle(obj);
+ }
+}
+
+
+jobject JNIHandles::make_global(Handle obj) {
+ jobject res = NULL;
+ if (!obj.is_null()) {
+ // ignore null handles
+ MutexLocker ml(JNIGlobalHandle_lock);
+ assert(Universe::heap()->is_in_reserved(obj()), "sanity check");
+ res = _global_handles->allocate_handle(obj());
+ } else {
+ CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
+ }
+
+ return res;
+}
+
+
+jobject JNIHandles::make_weak_global(Handle obj) {
+ jobject res = NULL;
+ if (!obj.is_null()) {
+ // ignore null handles
+ MutexLocker ml(JNIGlobalHandle_lock);
+ assert(Universe::heap()->is_in_reserved(obj()), "sanity check");
+ res = _weak_global_handles->allocate_handle(obj());
+ } else {
+ CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
+ }
+ return res;
+}
+
+jmethodID JNIHandles::make_jmethod_id(methodHandle mh) {
+ return (jmethodID) make_weak_global(mh);
+}
+
+
+
+void JNIHandles::change_method_associated_with_jmethod_id(jmethodID jmid, methodHandle mh) {
+ MutexLocker ml(JNIGlobalHandle_lock); // Is this necessary?
+ Handle obj = (Handle)mh;
+ oop* jobj = (oop*)jmid;
+ *jobj = obj();
+}
+
+
+void JNIHandles::destroy_global(jobject handle) {
+ if (handle != NULL) {
+ assert(is_global_handle(handle), "Invalid delete of global JNI handle");
+ *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it
+ }
+}
+
+
+void JNIHandles::destroy_weak_global(jobject handle) {
+ if (handle != NULL) {
+ assert(!CheckJNICalls || is_weak_global_handle(handle), "Invalid delete of weak global JNI handle");
+ *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it
+ }
+}
+
+void JNIHandles::destroy_jmethod_id(jmethodID mid) {
+ destroy_weak_global((jobject)mid);
+}
+
+
+void JNIHandles::oops_do(OopClosure* f) {
+ f->do_oop(&_deleted_handle);
+ _global_handles->oops_do(f);
+}
+
+
+void JNIHandles::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+ _weak_global_handles->weak_oops_do(is_alive, f);
+}
+
+
+void JNIHandles::initialize() {
+ _global_handles = JNIHandleBlock::allocate_block();
+ _weak_global_handles = JNIHandleBlock::allocate_block();
+ EXCEPTION_MARK;
+ // We will never reach the CATCH below since Exceptions::_throw will cause
+ // the VM to exit if an exception is thrown during initialization
+ klassOop k = SystemDictionary::object_klass();
+ _deleted_handle = instanceKlass::cast(k)->allocate_permanent_instance(CATCH);
+}
+
+
+bool JNIHandles::is_local_handle(Thread* thread, jobject handle) {
+ JNIHandleBlock* block = thread->active_handles();
+
+ // Look back past possible native calls to jni_PushLocalFrame.
+ while (block != NULL) {
+ if (block->chain_contains(handle)) {
+ return true;
+ }
+ block = block->pop_frame_link();
+ }
+ return false;
+}
+
+
+// Determine if the handle is somewhere in the current thread's stack.
+// We easily can't isolate any particular stack frame the handle might
+// come from, so we'll check the whole stack.
+
+bool JNIHandles::is_frame_handle(JavaThread* thr, jobject obj) {
+ // If there is no java frame, then this must be top level code, such
+ // as the java command executable, in which case, this type of handle
+ // is not permitted.
+ return (thr->has_last_Java_frame() &&
+ (void*)obj < (void*)thr->stack_base() &&
+ (void*)obj >= (void*)thr->last_Java_sp());
+}
+
+
+bool JNIHandles::is_global_handle(jobject handle) {
+ return _global_handles->chain_contains(handle);
+}
+
+
+bool JNIHandles::is_weak_global_handle(jobject handle) {
+ return _weak_global_handles->chain_contains(handle);
+}
+
+long JNIHandles::global_handle_memory_usage() {
+ return _global_handles->memory_usage();
+}
+
+long JNIHandles::weak_global_handle_memory_usage() {
+ return _weak_global_handles->memory_usage();
+}
+
+
+class AlwaysAliveClosure: public BoolObjectClosure {
+public:
+ bool do_object_b(oop obj) { return true; }
+ void do_object(oop obj) { assert(false, "Don't call"); }
+};
+
+class CountHandleClosure: public OopClosure {
+private:
+ int _count;
+public:
+ CountHandleClosure(): _count(0) {}
+ void do_oop(oop* unused) {
+ _count++;
+ }
+ int count() { return _count; }
+};
+
+// We assume this is called at a safepoint: no lock is needed.
+void JNIHandles::print_on(outputStream* st) {
+ assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+ assert(_global_handles != NULL && _weak_global_handles != NULL,
+ "JNIHandles not initialized");
+
+ CountHandleClosure global_handle_count;
+ AlwaysAliveClosure always_alive;
+ oops_do(&global_handle_count);
+ weak_oops_do(&always_alive, &global_handle_count);
+
+ st->print_cr("JNI global references: %d", global_handle_count.count());
+ st->cr();
+ st->flush();
+}
+
+class VerifyHandleClosure: public OopClosure {
+public:
+ void do_oop(oop* root) {
+ (*root)->verify();
+ }
+};
+
+void JNIHandles::verify() {
+ VerifyHandleClosure verify_handle;
+ AlwaysAliveClosure always_alive;
+
+ oops_do(&verify_handle);
+ weak_oops_do(&always_alive, &verify_handle);
+}
+
+
+
+void jni_handles_init() {
+ JNIHandles::initialize();
+}
+
+
+int JNIHandleBlock::_blocks_allocated = 0;
+JNIHandleBlock* JNIHandleBlock::_block_free_list = NULL;
+#ifndef PRODUCT
+JNIHandleBlock* JNIHandleBlock::_block_list = NULL;
+#endif
+
+
+void JNIHandleBlock::zap() {
+ // Zap block values
+ _top = 0;
+ for (int index = 0; index < block_size_in_oops; index++) {
+ _handles[index] = badJNIHandle;
+ }
+}
+
+JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread) {
+ assert(thread == NULL || thread == Thread::current(), "sanity check");
+ JNIHandleBlock* block;
+ // Check the thread-local free list for a block so we don't
+ // have to acquire a mutex.
+ if (thread != NULL && thread->free_handle_block() != NULL) {
+ block = thread->free_handle_block();
+ thread->set_free_handle_block(block->_next);
+ }
+ else {
+ // locking with safepoint checking introduces a potential deadlock:
+ // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
+ // - another would hold Threads_lock (jni_AttachCurrentThread) and then
+ // JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
+ MutexLockerEx ml(JNIHandleBlockFreeList_lock,
+ Mutex::_no_safepoint_check_flag);
+ if (_block_free_list == NULL) {
+ // Allocate new block
+ block = new JNIHandleBlock();
+ _blocks_allocated++;
+ if (TraceJNIHandleAllocation) {
+ tty->print_cr("JNIHandleBlock " INTPTR_FORMAT " allocated (%d total blocks)",
+ block, _blocks_allocated);
+ }
+ if (ZapJNIHandleArea) block->zap();
+ #ifndef PRODUCT
+ // Link new block to list of all allocated blocks
+ block->_block_list_link = _block_list;
+ _block_list = block;
+ #endif
+ } else {
+ // Get block from free list
+ block = _block_free_list;
+ _block_free_list = _block_free_list->_next;
+ }
+ }
+ block->_top = 0;
+ block->_next = NULL;
+ block->_pop_frame_link = NULL;
+ // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
+ debug_only(block->_last = NULL);
+ debug_only(block->_free_list = NULL);
+ debug_only(block->_allocate_before_rebuild = -1);
+ return block;
+}
+
+
+void JNIHandleBlock::release_block(JNIHandleBlock* block, Thread* thread) {
+ assert(thread == NULL || thread == Thread::current(), "sanity check");
+ JNIHandleBlock* pop_frame_link = block->pop_frame_link();
+ // Put returned block at the beginning of the thread-local free list.
+ // Note that if thread == NULL, we use it as an implicit argument that
+ // we _don't_ want the block to be kept on the free_handle_block.
+ // See for instance JavaThread::exit().
+ if (thread != NULL ) {
+ if (ZapJNIHandleArea) block->zap();
+ JNIHandleBlock* freelist = thread->free_handle_block();
+ block->_pop_frame_link = NULL;
+ thread->set_free_handle_block(block);
+
+ // Add original freelist to end of chain
+ if ( freelist != NULL ) {
+ while ( block->_next != NULL ) block = block->_next;
+ block->_next = freelist;
+ }
+ block = NULL;
+ }
+ if (block != NULL) {
+ // Return blocks to free list
+ // locking with safepoint checking introduces a potential deadlock:
+ // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock
+ // - another would hold Threads_lock (jni_AttachCurrentThread) and then
+ // JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block)
+ MutexLockerEx ml(JNIHandleBlockFreeList_lock,
+ Mutex::_no_safepoint_check_flag);
+ while (block != NULL) {
+ if (ZapJNIHandleArea) block->zap();
+ JNIHandleBlock* next = block->_next;
+ block->_next = _block_free_list;
+ _block_free_list = block;
+ block = next;
+ }
+ }
+ if (pop_frame_link != NULL) {
+ // As a sanity check we release blocks pointed to by the pop_frame_link.
+ // This should never happen (only if PopLocalFrame is not called the
+ // correct number of times).
+ release_block(pop_frame_link, thread);
+ }
+}
+
+
+void JNIHandleBlock::oops_do(OopClosure* f) {
+ JNIHandleBlock* current_chain = this;
+ // Iterate over chain of blocks, followed by chains linked through the
+ // pop frame links.
+ while (current_chain != NULL) {
+ for (JNIHandleBlock* current = current_chain; current != NULL;
+ current = current->_next) {
+ assert(current == current_chain || current->pop_frame_link() == NULL,
+ "only blocks first in chain should have pop frame link set");
+ for (int index = 0; index < current->_top; index++) {
+ oop* root = &(current->_handles)[index];
+ oop value = *root;
+ // traverse heap pointers only, not deleted handles or free list
+ // pointers
+ if (value != NULL && Universe::heap()->is_in_reserved(value)) {
+ f->do_oop(root);
+ }
+ }
+ // the next handle block is valid only if current block is full
+ if (current->_top < block_size_in_oops) {
+ break;
+ }
+ }
+ current_chain = current_chain->pop_frame_link();
+ }
+}
+
+
+void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive,
+ OopClosure* f) {
+ for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
+ assert(current->pop_frame_link() == NULL,
+ "blocks holding weak global JNI handles should not have pop frame link set");
+ for (int index = 0; index < current->_top; index++) {
+ oop* root = &(current->_handles)[index];
+ oop value = *root;
+ // traverse heap pointers only, not deleted handles or free list pointers
+ if (value != NULL && Universe::heap()->is_in_reserved(value)) {
+ if (is_alive->do_object_b(value)) {
+ // The weakly referenced object is alive, update pointer
+ f->do_oop(root);
+ } else {
+ // The weakly referenced object is not alive, clear the reference by storing NULL
+ if (TraceReferenceGC) {
+ tty->print_cr("Clearing JNI weak reference (" INTPTR_FORMAT ")", root);
+ }
+ *root = NULL;
+ }
+ }
+ }
+ // the next handle block is valid only if current block is full
+ if (current->_top < block_size_in_oops) {
+ break;
+ }
+ }
+}
+
+
+jobject JNIHandleBlock::allocate_handle(oop obj) {
+ assert(Universe::heap()->is_in_reserved(obj), "sanity check");
+ if (_top == 0) {
+ // This is the first allocation or the initial block got zapped when
+ // entering a native function. If we have any following blocks they are
+ // not valid anymore.
+ for (JNIHandleBlock* current = _next; current != NULL;
+ current = current->_next) {
+ assert(current->_last == NULL, "only first block should have _last set");
+ assert(current->_free_list == NULL,
+ "only first block should have _free_list set");
+ current->_top = 0;
+ if (ZapJNIHandleArea) current->zap();
+ }
+ // Clear initial block
+ _free_list = NULL;
+ _allocate_before_rebuild = 0;
+ _last = this;
+ if (ZapJNIHandleArea) zap();
+ }
+
+ // Try last block
+ if (_last->_top < block_size_in_oops) {
+ oop* handle = &(_last->_handles)[_last->_top++];
+ *handle = obj;
+ return (jobject) handle;
+ }
+
+ // Try free list
+ if (_free_list != NULL) {
+ oop* handle = _free_list;
+ _free_list = (oop*) *_free_list;
+ *handle = obj;
+ return (jobject) handle;
+ }
+ // Check if unused block follow last
+ if (_last->_next != NULL) {
+ // update last and retry
+ _last = _last->_next;
+ return allocate_handle(obj);
+ }
+
+ // No space available, we have to rebuild free list or expand
+ if (_allocate_before_rebuild == 0) {
+ rebuild_free_list(); // updates _allocate_before_rebuild counter
+ } else {
+ // Append new block
+ Thread* thread = Thread::current();
+ Handle obj_handle(thread, obj);
+ // This can block, so we need to preserve obj accross call.
+ _last->_next = JNIHandleBlock::allocate_block(thread);
+ _last = _last->_next;
+ _allocate_before_rebuild--;
+ obj = obj_handle();
+ }
+ return allocate_handle(obj); // retry
+}
+
+
+void JNIHandleBlock::rebuild_free_list() {
+ assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking");
+ int free = 0;
+ int blocks = 0;
+ for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
+ for (int index = 0; index < current->_top; index++) {
+ oop* handle = &(current->_handles)[index];
+ if (*handle == JNIHandles::deleted_handle()) {
+ // this handle was cleared out by a delete call, reuse it
+ *handle = (oop) _free_list;
+ _free_list = handle;
+ free++;
+ }
+ }
+ // we should not rebuild free list if there are unused handles at the end
+ assert(current->_top == block_size_in_oops, "just checking");
+ blocks++;
+ }
+ // Heuristic: if more than half of the handles are free we rebuild next time
+ // as well, otherwise we append a corresponding number of new blocks before
+ // attempting a free list rebuild again.
+ int total = blocks * block_size_in_oops;
+ int extra = total - 2*free;
+ if (extra > 0) {
+ // Not as many free handles as we would like - compute number of new blocks to append
+ _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops;
+ }
+ if (TraceJNIHandleAllocation) {
+ tty->print_cr("Rebuild free list JNIHandleBlock " INTPTR_FORMAT " blocks=%d used=%d free=%d add=%d",
+ this, blocks, total-free, free, _allocate_before_rebuild);
+ }
+}
+
+
+bool JNIHandleBlock::contains(jobject handle) const {
+ return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]);
+}
+
+
+bool JNIHandleBlock::chain_contains(jobject handle) const {
+ for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != NULL; current = current->_next) {
+ if (current->contains(handle)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+int JNIHandleBlock::length() const {
+ int result = 1;
+ for (JNIHandleBlock* current = _next; current != NULL; current = current->_next) {
+ result++;
+ }
+ return result;
+}
+
+// This method is not thread-safe, i.e., must be called whule holding a lock on the
+// structure.
+long JNIHandleBlock::memory_usage() const {
+ return length() * sizeof(JNIHandleBlock);
+}
+
+
+#ifndef PRODUCT
+
+bool JNIHandleBlock::any_contains(jobject handle) {
+ for (JNIHandleBlock* current = _block_list; current != NULL; current = current->_block_list_link) {
+ if (current->contains(handle)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void JNIHandleBlock::print_statistics() {
+ int used_blocks = 0;
+ int free_blocks = 0;
+ int used_handles = 0;
+ int free_handles = 0;
+ JNIHandleBlock* block = _block_list;
+ while (block != NULL) {
+ if (block->_top > 0) {
+ used_blocks++;
+ } else {
+ free_blocks++;
+ }
+ used_handles += block->_top;
+ free_handles += (block_size_in_oops - block->_top);
+ block = block->_block_list_link;
+ }
+ tty->print_cr("JNIHandleBlocks statistics");
+ tty->print_cr("- blocks allocated: %d", used_blocks + free_blocks);
+ tty->print_cr("- blocks in use: %d", used_blocks);
+ tty->print_cr("- blocks free: %d", free_blocks);
+ tty->print_cr("- handles in use: %d", used_handles);
+ tty->print_cr("- handles free: %d", free_handles);
+}
+
+#endif
diff --git a/src/share/vm/runtime/jniHandles.hpp b/src/share/vm/runtime/jniHandles.hpp
new file mode 100644
index 000000000..88cb71559
--- /dev/null
+++ b/src/share/vm/runtime/jniHandles.hpp
@@ -0,0 +1,212 @@
+/*
+ * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class JNIHandleBlock;
+
+
+// Interface for creating and resolving local/global JNI handles
+
+class JNIHandles : AllStatic {
+ friend class VMStructs;
+ private:
+ static JNIHandleBlock* _global_handles; // First global handle block
+ static JNIHandleBlock* _weak_global_handles; // First weak global handle block
+ static oop _deleted_handle; // Sentinel marking deleted handles
+
+ public:
+ // Resolve handle into oop
+ inline static oop resolve(jobject handle);
+ // Resolve externally provided handle into oop with some guards
+ inline static oop resolve_external_guard(jobject handle);
+ // Resolve handle into oop, result guaranteed not to be null
+ inline static oop resolve_non_null(jobject handle);
+
+ // Local handles
+ static jobject make_local(oop obj);
+ static jobject make_local(JNIEnv* env, oop obj); // Fast version when env is known
+ static jobject make_local(Thread* thread, oop obj); // Even faster version when current thread is known
+ inline static void destroy_local(jobject handle);
+
+ // Global handles
+ static jobject make_global(Handle obj);
+ static void destroy_global(jobject handle);
+
+ // Weak global handles
+ static jobject make_weak_global(Handle obj);
+ static void destroy_weak_global(jobject handle);
+
+ // jmethodID handling (as Weak global handles).
+ // Because the useful life-span of a jmethodID cannot be determined, once created they are
+ // never reclaimed. The methods to which they refer, however, can be GC'ed away if the class
+ // is unloaded or if the method is made obsolete or deleted -- in these cases, the jmethodID
+ // refers to NULL (as is the case for any weak reference).
+ static jmethodID make_jmethod_id(methodHandle mh);
+ static void destroy_jmethod_id(jmethodID mid);
+ inline static methodOop resolve_jmethod_id(jmethodID mid);
+ inline static methodOop checked_resolve_jmethod_id(jmethodID mid); // NULL on invalid jmethodID
+ static void change_method_associated_with_jmethod_id(jmethodID jmid, methodHandle mh);
+
+ // Sentinel marking deleted handles in block. Note that we cannot store NULL as
+ // the sentinel, since clearing weak global JNI refs are done by storing NULL in
+ // the handle. The handle may not be reused before destroy_weak_global is called.
+ static oop deleted_handle() { return _deleted_handle; }
+
+ // Initialization
+ static void initialize();
+
+ // Debugging
+ static void print_on(outputStream* st);
+ static void print() { print_on(tty); }
+ static void verify();
+ static bool is_local_handle(Thread* thread, jobject handle);
+ static bool is_frame_handle(JavaThread* thr, jobject obj);
+ static bool is_global_handle(jobject handle);
+ static bool is_weak_global_handle(jobject handle);
+ static long global_handle_memory_usage();
+ static long weak_global_handle_memory_usage();
+
+ // Garbage collection support(global handles only, local handles are traversed from thread)
+ // Traversal of regular global handles
+ static void oops_do(OopClosure* f);
+ // Traversal of weak global handles. Unreachable oops are cleared.
+ static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
+};
+
+
+
+// JNI handle blocks holding local/global JNI handles
+
+class JNIHandleBlock : public CHeapObj {
+ friend class VMStructs;
+ private:
+ enum SomeConstants {
+ block_size_in_oops = 32 // Number of handles per handle block
+ };
+
+ oop _handles[block_size_in_oops]; // The handles
+ int _top; // Index of next unused handle
+ JNIHandleBlock* _next; // Link to next block
+
+ // The following instance variables are only used by the first block in a chain.
+ // Having two types of blocks complicates the code and the space overhead in negligble.
+ JNIHandleBlock* _last; // Last block in use
+ JNIHandleBlock* _pop_frame_link; // Block to restore on PopLocalFrame call
+ oop* _free_list; // Handle free list
+ int _allocate_before_rebuild; // Number of blocks to allocate before rebuilding free list
+
+ #ifndef PRODUCT
+ JNIHandleBlock* _block_list_link; // Link for list below
+ static JNIHandleBlock* _block_list; // List of all allocated blocks (for debugging only)
+ #endif
+
+ static JNIHandleBlock* _block_free_list; // Free list of currently unused blocks
+ static int _blocks_allocated; // For debugging/printing
+
+ // Fill block with bad_handle values
+ void zap();
+
+ // No more handles in the both the current and following blocks
+ void clear() { _top = 0; }
+
+ // Free list computation
+ void rebuild_free_list();
+
+ public:
+ // Handle allocation
+ jobject allocate_handle(oop obj);
+
+ // Block allocation and block free list management
+ static JNIHandleBlock* allocate_block(Thread* thread = NULL);
+ static void release_block(JNIHandleBlock* block, Thread* thread = NULL);
+
+ // JNI PushLocalFrame/PopLocalFrame support
+ JNIHandleBlock* pop_frame_link() const { return _pop_frame_link; }
+ void set_pop_frame_link(JNIHandleBlock* block) { _pop_frame_link = block; }
+
+ // Stub generator support
+ static int top_offset_in_bytes() { return offset_of(JNIHandleBlock, _top); }
+
+ // Garbage collection support
+ // Traversal of regular handles
+ void oops_do(OopClosure* f);
+ // Traversal of weak handles. Unreachable oops are cleared.
+ void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
+
+ // Debugging
+ bool chain_contains(jobject handle) const; // Does this block or following blocks contain handle
+ bool contains(jobject handle) const; // Does this block contain handle
+ int length() const; // Length of chain starting with this block
+ long memory_usage() const;
+ #ifndef PRODUCT
+ static bool any_contains(jobject handle); // Does any block currently in use contain handle
+ static void print_statistics();
+ #endif
+};
+
+
+inline oop JNIHandles::resolve(jobject handle) {
+ oop result = (handle == NULL ? (oop)NULL : *(oop*)handle);
+ assert(result != NULL || (handle == NULL || !CheckJNICalls || is_weak_global_handle(handle)), "Invalid value read from jni handle");
+ assert(result != badJNIHandle, "Pointing to zapped jni handle area");
+ return result;
+};
+
+
+inline oop JNIHandles::resolve_external_guard(jobject handle) {
+ if (handle == NULL) return NULL;
+ oop result = *(oop*)handle;
+ if (result == NULL || result == badJNIHandle) return NULL;
+ return result;
+};
+
+
+inline oop JNIHandles::resolve_non_null(jobject handle) {
+ assert(handle != NULL, "JNI handle should not be null");
+ oop result = *(oop*)handle;
+ assert(result != NULL, "Invalid value read from jni handle");
+ assert(result != badJNIHandle, "Pointing to zapped jni handle area");
+ // Don't let that private _deleted_handle object escape into the wild.
+ assert(result != deleted_handle(), "Used a deleted global handle.");
+ return result;
+};
+
+inline methodOop JNIHandles::resolve_jmethod_id(jmethodID mid) {
+ return (methodOop) resolve_non_null((jobject)mid);
+};
+
+inline methodOop JNIHandles::checked_resolve_jmethod_id(jmethodID mid) {
+ jobject handle = (jobject)mid;
+ if (is_weak_global_handle(handle)) {
+ return (methodOop) resolve_non_null(handle);
+ } else {
+ return (methodOop) NULL;
+ }
+};
+
+
+inline void JNIHandles::destroy_local(jobject handle) {
+ if (handle != NULL) {
+ *((oop*)handle) = deleted_handle(); // Mark the handle as deleted, allocate will reuse it
+ }
+}
diff --git a/src/share/vm/runtime/jniPeriodicChecker.cpp b/src/share/vm/runtime/jniPeriodicChecker.cpp
new file mode 100644
index 000000000..7632e28f0
--- /dev/null
+++ b/src/share/vm/runtime/jniPeriodicChecker.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_jniPeriodicChecker.cpp.incl"
+
+
+// --------------------------------------------------------
+// Class to aid in periodic checking under CheckJNICalls
+class JniPeriodicCheckerTask : public PeriodicTask {
+ public:
+ JniPeriodicCheckerTask(int interval_time) : PeriodicTask(interval_time) {}
+ void task() { os::run_periodic_checks(); }
+ static void engage();
+ static void disengage();
+};
+
+
+//----------------------------------------------------------
+// Implementation of JniPeriodicChecker
+
+JniPeriodicCheckerTask* JniPeriodicChecker::_task = NULL;
+
+/*
+ * The engage() method is called at initialization time via
+ * Thread::create_vm() to initialize the JniPeriodicChecker and
+ * register it with the WatcherThread as a periodic task.
+ */
+void JniPeriodicChecker::engage() {
+ if (CheckJNICalls && !is_active()) {
+ // start up the periodic task
+ _task = new JniPeriodicCheckerTask(10);
+ _task->enroll();
+ }
+}
+
+
+/*
+ * the disengage() method is responsible for deactivating the periodic
+ * task. This method is called from before_exit() in java.cpp and is only called
+ * after the WatcherThread has been stopped.
+ */
+void JniPeriodicChecker::disengage() {
+ if (CheckJNICalls && is_active()) {
+ // remove JniPeriodicChecker
+ _task->disenroll();
+ delete _task;
+ _task = NULL;
+ }
+}
+
+void jniPeriodicChecker_exit() {
+ if (!CheckJNICalls) return;
+}
diff --git a/src/share/vm/runtime/jniPeriodicChecker.hpp b/src/share/vm/runtime/jniPeriodicChecker.hpp
new file mode 100644
index 000000000..9914b1881
--- /dev/null
+++ b/src/share/vm/runtime/jniPeriodicChecker.hpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class JniPeriodicCheckerTask;
+
+/*
+ * This gets activated under Xcheck:jni (CheckJNICalls), and is typically
+ * to detect any errors caused by JNI applications, such as signal handler,
+ * hijacking, va 0x0 hijacking either by mmap or an OS error.
+ */
+
+
+class JniPeriodicChecker : AllStatic {
+
+ friend class JniPeriodicCheckerTask;
+
+ private:
+ static JniPeriodicCheckerTask* _task;
+
+ public:
+ // Start/stop task
+ static void engage();
+ static void disengage();
+
+ static bool is_active() { return _task != NULL; }
+
+ static void initialize();
+ static void destroy();
+};
+
+void jniPeriodicChecker_exit();
diff --git a/src/share/vm/runtime/memprofiler.cpp b/src/share/vm/runtime/memprofiler.cpp
new file mode 100644
index 000000000..4d5ad533d
--- /dev/null
+++ b/src/share/vm/runtime/memprofiler.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_memprofiler.cpp.incl"
+
+#ifndef PRODUCT
+
+// --------------------------------------------------------
+// MemProfilerTask
+
+class MemProfilerTask : public PeriodicTask {
+ public:
+ MemProfilerTask(int interval_time) : PeriodicTask(interval_time) {}
+ void task();
+};
+
+
+void MemProfilerTask::task() {
+ // Get thread lock to provide mutual exclusion, and so we can iterate safely over the thread list.
+ MutexLocker mu(Threads_lock);
+ MemProfiler::do_trace();
+}
+
+
+//----------------------------------------------------------
+// Implementation of MemProfiler
+
+MemProfilerTask* MemProfiler::_task = NULL;
+FILE* MemProfiler::_log_fp = NULL;
+
+
+bool MemProfiler::is_active() {
+ return _task != NULL;
+}
+
+
+void MemProfiler::engage() {
+ const char *log_name = "mprofile.log";
+ if (!is_active()) {
+ // Create log file
+ _log_fp = fopen(log_name , "w+");
+ if (_log_fp == NULL) {
+ fatal1("MemProfiler: Cannot create log file: %s", log_name);
+ }
+ fprintf(_log_fp, "MemProfiler: sizes are in Kb, time is in seconds since startup\n\n");
+ fprintf(_log_fp, " time, #thr, #cls, heap, heap, perm, perm, code, hndls, rescs, oopmp\n");
+ fprintf(_log_fp, " used, total, used, total, total, total, total, total\n");
+ fprintf(_log_fp, "--------------------------------------------------------------------------\n");
+
+ _task = new MemProfilerTask(MemProfilingInterval);
+ _task->enroll();
+ }
+}
+
+
+void MemProfiler::disengage() {
+ if (!is_active()) return;
+ // Do one last trace at disengage time
+ do_trace();
+
+ // Close logfile
+ fprintf(_log_fp, "MemProfiler detached\n");
+ fclose(_log_fp);
+
+ // remove MemProfilerTask
+ assert(_task != NULL, "sanity check");
+ _task->disenroll();
+ delete _task;
+ _task = NULL;
+}
+
+
+void MemProfiler::do_trace() {
+ // Calculate thread local sizes
+ size_t handles_memory_usage = VMThread::vm_thread()->handle_area()->size_in_bytes();
+ size_t resource_memory_usage = VMThread::vm_thread()->resource_area()->size_in_bytes();
+ JavaThread *cur = Threads::first();
+ while (cur != NULL) {
+ handles_memory_usage += cur->handle_area()->size_in_bytes();
+ resource_memory_usage += cur->resource_area()->size_in_bytes();
+ cur = cur->next();
+ }
+
+ // Print trace line in log
+ fprintf(_log_fp, "%6.1f,%5d,%5d,%6ld,%6ld,%6ld,%6ld,",
+ os::elapsedTime(),
+ Threads::number_of_threads(),
+ SystemDictionary::number_of_classes(),
+ Universe::heap()->used() / K,
+ Universe::heap()->capacity() / K,
+ Universe::heap()->permanent_used() / HWperKB,
+ Universe::heap()->permanent_capacity() / HWperKB);
+
+ fprintf(_log_fp, "%6ld,", CodeCache::capacity() / K);
+
+ fprintf(_log_fp, "%6ld,%6ld,%6ld\n",
+ handles_memory_usage / K,
+ resource_memory_usage / K,
+ OopMapCache::memory_usage() / K);
+ fflush(_log_fp);
+}
+
+#endif
diff --git a/src/share/vm/runtime/memprofiler.hpp b/src/share/vm/runtime/memprofiler.hpp
new file mode 100644
index 000000000..e6025f064
--- /dev/null
+++ b/src/share/vm/runtime/memprofiler.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright 1998 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Prints periodic memory usage trace of HotSpot VM
+
+class MemProfilerTask;
+
+class MemProfiler : AllStatic {
+ friend class MemProfilerTask;
+ private:
+ static MemProfilerTask* _task;
+ static FILE* _log_fp;
+ // Do trace (callback from MemProfilerTask and from disengage)
+ static void do_trace() PRODUCT_RETURN;
+ public:
+ // Start/stop the profiler
+ static void engage() PRODUCT_RETURN;
+ static void disengage() PRODUCT_RETURN;
+ // Tester
+ static bool is_active() PRODUCT_RETURN0;
+};
diff --git a/src/share/vm/runtime/monitorChunk.cpp b/src/share/vm/runtime/monitorChunk.cpp
new file mode 100644
index 000000000..dd9ddf401
--- /dev/null
+++ b/src/share/vm/runtime/monitorChunk.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright 1997-2000 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_monitorChunk.cpp.incl"
+
+MonitorChunk::MonitorChunk(int number_on_monitors) {
+ _number_of_monitors = number_on_monitors;
+ _monitors = NEW_C_HEAP_ARRAY(BasicObjectLock, number_on_monitors);
+ _next = NULL;
+}
+
+
+MonitorChunk::~MonitorChunk() {
+ FreeHeap(monitors());
+}
+
+
+void MonitorChunk::oops_do(OopClosure* f) {
+ for (int index = 0; index < number_of_monitors(); index++) {
+ at(index)->oops_do(f);
+ }
+}
diff --git a/src/share/vm/runtime/monitorChunk.hpp b/src/share/vm/runtime/monitorChunk.hpp
new file mode 100644
index 000000000..5a7eef64e
--- /dev/null
+++ b/src/share/vm/runtime/monitorChunk.hpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright 1997-2000 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Data structure for holding monitors for one activation during
+// deoptimization.
+
+class MonitorChunk: public CHeapObj {
+ private:
+ int _number_of_monitors;
+ BasicObjectLock* _monitors;
+ BasicObjectLock* monitors() const { return _monitors; }
+ MonitorChunk* _next;
+ public:
+ // Constructor
+ MonitorChunk(int number_on_monitors);
+ ~MonitorChunk();
+
+ // link operations
+ MonitorChunk* next() const { return _next; }
+ void set_next(MonitorChunk* next) { _next = next; }
+
+ // Tells whether the monitor chunk is linked into the JavaThread
+ bool is_linked() const { return next() != NULL; }
+
+ // Returns the number of monitors
+ int number_of_monitors() const { return _number_of_monitors; }
+
+ // Returns the index'th monitor
+ BasicObjectLock* at(int index) { assert(index >= 0 && index < number_of_monitors(), "out of bounds check"); return &monitors()[index]; }
+
+
+ // Memory management
+ void oops_do(OopClosure* f);
+
+ // Tells whether the addr point into the monitors.
+ bool contains(void* addr) const { return (addr >= (void*) monitors()) && (addr < (void*) (monitors() + number_of_monitors())); }
+};
diff --git a/src/share/vm/runtime/mutex.cpp b/src/share/vm/runtime/mutex.cpp
new file mode 100644
index 000000000..a4d341b96
--- /dev/null
+++ b/src/share/vm/runtime/mutex.cpp
@@ -0,0 +1,1356 @@
+
+/*
+ * Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_mutex.cpp.incl"
+
+// o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
+//
+// Native Monitor-Mutex locking - theory of operations
+//
+// * Native Monitors are completely unrelated to Java-level monitors,
+// although the "back-end" slow-path implementations share a common lineage.
+// See objectMonitor:: in synchronizer.cpp.
+// Native Monitors do *not* support nesting or recursion but otherwise
+// they're basically Hoare-flavor monitors.
+//
+// * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte
+// in the _LockWord from zero to non-zero. Note that the _Owner field
+// is advisory and is used only to verify that the thread calling unlock()
+// is indeed the last thread to have acquired the lock.
+//
+// * Contending threads "push" themselves onto the front of the contention
+// queue -- called the cxq -- with CAS and then spin/park.
+// The _LockWord contains the LockByte as well as the pointer to the head
+// of the cxq. Colocating the LockByte with the cxq precludes certain races.
+//
+// * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0
+// idioms. We currently use MEMBAR in the uncontended unlock() path, as
+// MEMBAR often has less latency than CAS. If warranted, we could switch to
+// a CAS:0 mode, using timers to close the resultant race, as is done
+// with Java Monitors in synchronizer.cpp.
+//
+// See the following for a discussion of the relative cost of atomics (CAS)
+// MEMBAR, and ways to eliminate such instructions from the common-case paths:
+// -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
+// -- http://blogs.sun.com/dave/resource/MustangSync.pdf
+// -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf
+// -- synchronizer.cpp
+//
+// * Overall goals - desiderata
+// 1. Minimize context switching
+// 2. Minimize lock migration
+// 3. Minimize CPI -- affinity and locality
+// 4. Minimize the execution of high-latency instructions such as CAS or MEMBAR
+// 5. Minimize outer lock hold times
+// 6. Behave gracefully on a loaded system
+//
+// * Thread flow and list residency:
+//
+// Contention queue --> EntryList --> OnDeck --> Owner --> !Owner
+// [..resident on monitor list..]
+// [...........contending..................]
+//
+// -- The contention queue (cxq) contains recently-arrived threads (RATs).
+// Threads on the cxq eventually drain into the EntryList.
+// -- Invariant: a thread appears on at most one list -- cxq, EntryList
+// or WaitSet -- at any one time.
+// -- For a given monitor there can be at most one "OnDeck" thread at any
+// given time but if needbe this particular invariant could be relaxed.
+//
+// * The WaitSet and EntryList linked lists are composed of ParkEvents.
+// I use ParkEvent instead of threads as ParkEvents are immortal and
+// type-stable, meaning we can safely unpark() a possibly stale
+// list element in the unlock()-path. (That's benign).
+//
+// * Succession policy - providing for progress:
+//
+// As necessary, the unlock()ing thread identifies, unlinks, and unparks
+// an "heir presumptive" tentative successor thread from the EntryList.
+// This becomes the so-called "OnDeck" thread, of which there can be only
+// one at any given time for a given monitor. The wakee will recontend
+// for ownership of monitor.
+//
+// Succession is provided for by a policy of competitive handoff.
+// The exiting thread does _not_ grant or pass ownership to the
+// successor thread. (This is also referred to as "handoff" succession").
+// Instead the exiting thread releases ownership and possibly wakes
+// a successor, so the successor can (re)compete for ownership of the lock.
+//
+// Competitive handoff provides excellent overall throughput at the expense
+// of short-term fairness. If fairness is a concern then one remedy might
+// be to add an AcquireCounter field to the monitor. After a thread acquires
+// the lock it will decrement the AcquireCounter field. When the count
+// reaches 0 the thread would reset the AcquireCounter variable, abdicate
+// the lock directly to some thread on the EntryList, and then move itself to the
+// tail of the EntryList.
+//
+// But in practice most threads engage or otherwise participate in resource
+// bounded producer-consumer relationships, so lock domination is not usually
+// a practical concern. Recall too, that in general it's easier to construct
+// a fair lock from a fast lock, but not vice-versa.
+//
+// * The cxq can have multiple concurrent "pushers" but only one concurrent
+// detaching thread. This mechanism is immune from the ABA corruption.
+// More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
+// We use OnDeck as a pseudo-lock to enforce the at-most-one detaching
+// thread constraint.
+//
+// * Taken together, the cxq and the EntryList constitute or form a
+// single logical queue of threads stalled trying to acquire the lock.
+// We use two distinct lists to reduce heat on the list ends.
+// Threads in lock() enqueue onto cxq while threads in unlock() will
+// dequeue from the EntryList. (c.f. Michael Scott's "2Q" algorithm).
+// A key desideratum is to minimize queue & monitor metadata manipulation
+// that occurs while holding the "outer" monitor lock -- that is, we want to
+// minimize monitor lock holds times.
+//
+// The EntryList is ordered by the prevailing queue discipline and
+// can be organized in any convenient fashion, such as a doubly-linked list or
+// a circular doubly-linked list. If we need a priority queue then something akin
+// to Solaris' sleepq would work nicely. Viz.,
+// -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
+// -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c
+// Queue discipline is enforced at ::unlock() time, when the unlocking thread
+// drains the cxq into the EntryList, and orders or reorders the threads on the
+// EntryList accordingly.
+//
+// Barring "lock barging", this mechanism provides fair cyclic ordering,
+// somewhat similar to an elevator-scan.
+//
+// * OnDeck
+// -- For a given monitor there can be at most one OnDeck thread at any given
+// instant. The OnDeck thread is contending for the lock, but has been
+// unlinked from the EntryList and cxq by some previous unlock() operations.
+// Once a thread has been designated the OnDeck thread it will remain so
+// until it manages to acquire the lock -- being OnDeck is a stable property.
+// -- Threads on the EntryList or cxq are _not allowed to attempt lock acquisition.
+// -- OnDeck also serves as an "inner lock" as follows. Threads in unlock() will, after
+// having cleared the LockByte and dropped the outer lock, attempt to "trylock"
+// OnDeck by CASing the field from null to non-null. If successful, that thread
+// is then responsible for progress and succession and can use CAS to detach and
+// drain the cxq into the EntryList. By convention, only this thread, the holder of
+// the OnDeck inner lock, can manipulate the EntryList or detach and drain the
+// RATs on the cxq into the EntryList. This avoids ABA corruption on the cxq as
+// we allow multiple concurrent "push" operations but restrict detach concurrency
+// to at most one thread. Having selected and detached a successor, the thread then
+// changes the OnDeck to refer to that successor, and then unparks the successor.
+// That successor will eventually acquire the lock and clear OnDeck. Beware
+// that the OnDeck usage as a lock is asymmetric. A thread in unlock() transiently
+// "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor,
+// and then the successor eventually "drops" OnDeck. Note that there's never
+// any sense of contention on the inner lock, however. Threads never contend
+// or wait for the inner lock.
+// -- OnDeck provides for futile wakeup throttling a described in section 3.3 of
+// See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
+// In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter
+// TState fields found in Java-level objectMonitors. (See synchronizer.cpp).
+//
+// * Waiting threads reside on the WaitSet list -- wait() puts
+// the caller onto the WaitSet. Notify() or notifyAll() simply
+// transfers threads from the WaitSet to either the EntryList or cxq.
+// Subsequent unlock() operations will eventually unpark the notifyee.
+// Unparking a notifee in notify() proper is inefficient - if we were to do so
+// it's likely the notifyee would simply impale itself on the lock held
+// by the notifier.
+//
+// * The mechanism is obstruction-free in that if the holder of the transient
+// OnDeck lock in unlock() is preempted or otherwise stalls, other threads
+// can still acquire and release the outer lock and continue to make progress.
+// At worst, waking of already blocked contending threads may be delayed,
+// but nothing worse. (We only use "trylock" operations on the inner OnDeck
+// lock).
+//
+// * Note that thread-local storage must be initialized before a thread
+// uses Native monitors or mutexes. The native monitor-mutex subsystem
+// depends on Thread::current().
+//
+// * The monitor synchronization subsystem avoids the use of native
+// synchronization primitives except for the narrow platform-specific
+// park-unpark abstraction. See the comments in os_solaris.cpp regarding
+// the semantics of park-unpark. Put another way, this monitor implementation
+// depends only on atomic operations and park-unpark. The monitor subsystem
+// manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
+// underlying OS manages the READY<->RUN transitions.
+//
+// * The memory consistency model provide by lock()-unlock() is at least as
+// strong or stronger than the Java Memory model defined by JSR-133.
+// That is, we guarantee at least entry consistency, if not stronger.
+// See http://g.oswego.edu/dl/jmm/cookbook.html.
+//
+// * Thread:: currently contains a set of purpose-specific ParkEvents:
+// _MutexEvent, _ParkEvent, etc. A better approach might be to do away with
+// the purpose-specific ParkEvents and instead implement a general per-thread
+// stack of available ParkEvents which we could provision on-demand. The
+// stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate()
+// and ::Release(). A thread would simply pop an element from the local stack before it
+// enqueued or park()ed. When the contention was over the thread would
+// push the no-longer-needed ParkEvent back onto its stack.
+//
+// * A slightly reduced form of ILock() and IUnlock() have been partially
+// model-checked (Murphi) for safety and progress at T=1,2,3 and 4.
+// It'd be interesting to see if TLA/TLC could be useful as well.
+//
+// * Mutex-Monitor is a low-level "leaf" subsystem. That is, the monitor
+// code should never call other code in the JVM that might itself need to
+// acquire monitors or mutexes. That's true *except* in the case of the
+// ThreadBlockInVM state transition wrappers. The ThreadBlockInVM DTOR handles
+// mutator reentry (ingress) by checking for a pending safepoint in which case it will
+// call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc.
+// In that particular case a call to lock() for a given Monitor can end up recursively
+// calling lock() on another monitor. While distasteful, this is largely benign
+// as the calls come from jacket that wraps lock(), and not from deep within lock() itself.
+//
+// It's unfortunate that native mutexes and thread state transitions were convolved.
+// They're really separate concerns and should have remained that way. Melding
+// them together was facile -- a bit too facile. The current implementation badly
+// conflates the two concerns.
+//
+// * TODO-FIXME:
+//
+// -- Add DTRACE probes for contended acquire, contended acquired, contended unlock
+// We should also add DTRACE probes in the ParkEvent subsystem for
+// Park-entry, Park-exit, and Unpark.
+//
+// -- We have an excess of mutex-like constructs in the JVM, namely:
+// 1. objectMonitors for Java-level synchronization (synchronizer.cpp)
+// 2. low-level muxAcquire and muxRelease
+// 3. low-level spinAcquire and spinRelease
+// 4. native Mutex:: and Monitor::
+// 5. jvm_raw_lock() and _unlock()
+// 6. JVMTI raw monitors -- distinct from (5) despite having a confusingly
+// similar name.
+//
+// o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
+
+
+// CASPTR() uses the canonical argument order that dominates in the literature.
+// Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates.
+
+#define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c)))
+#define UNS(x) (uintptr_t(x))
+#define TRACE(m) { static volatile int ctr = 0 ; int x = ++ctr ; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }}
+
+// Simplistic low-quality Marsaglia SHIFT-XOR RNG.
+// Bijective except for the trailing mask operation.
+// Useful for spin loops as the compiler can't optimize it away.
+
+static inline jint MarsagliaXORV (jint x) {
+ if (x == 0) x = 1|os::random() ;
+ x ^= x << 6;
+ x ^= ((unsigned)x) >> 21;
+ x ^= x << 7 ;
+ return x & 0x7FFFFFFF ;
+}
+
+static inline jint MarsagliaXOR (jint * const a) {
+ jint x = *a ;
+ if (x == 0) x = UNS(a)|1 ;
+ x ^= x << 6;
+ x ^= ((unsigned)x) >> 21;
+ x ^= x << 7 ;
+ *a = x ;
+ return x & 0x7FFFFFFF ;
+}
+
+static int Stall (int its) {
+ static volatile jint rv = 1 ;
+ volatile int OnFrame = 0 ;
+ jint v = rv ^ UNS(OnFrame) ;
+ while (--its >= 0) {
+ v = MarsagliaXORV (v) ;
+ }
+ // Make this impossible for the compiler to optimize away,
+ // but (mostly) avoid W coherency sharing on MP systems.
+ if (v == 0x12345) rv = v ;
+ return v ;
+}
+
+int Monitor::TryLock () {
+ intptr_t v = _LockWord.FullWord ;
+ for (;;) {
+ if ((v & _LBIT) != 0) return 0 ;
+ const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
+ if (v == u) return 1 ;
+ v = u ;
+ }
+}
+
+int Monitor::TryFast () {
+ // Optimistic fast-path form ...
+ // Fast-path attempt for the common uncontended case.
+ // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
+ intptr_t v = CASPTR (&_LockWord, 0, _LBIT) ; // agro ...
+ if (v == 0) return 1 ;
+
+ for (;;) {
+ if ((v & _LBIT) != 0) return 0 ;
+ const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
+ if (v == u) return 1 ;
+ v = u ;
+ }
+}
+
+int Monitor::ILocked () {
+ const intptr_t w = _LockWord.FullWord & 0xFF ;
+ assert (w == 0 || w == _LBIT, "invariant") ;
+ return w == _LBIT ;
+}
+
+// Polite TATAS spinlock with exponential backoff - bounded spin.
+// Ideally we'd use processor cycles, time or vtime to control
+// the loop, but we currently use iterations.
+// All the constants within were derived empirically but work over
+// over the spectrum of J2SE reference platforms.
+// On Niagara-class systems the back-off is unnecessary but
+// is relatively harmless. (At worst it'll slightly retard
+// acquisition times). The back-off is critical for older SMP systems
+// where constant fetching of the LockWord would otherwise impair
+// scalability.
+//
+// Clamp spinning at approximately 1/2 of a context-switch round-trip.
+// See synchronizer.cpp for details and rationale.
+
+int Monitor::TrySpin (Thread * const Self) {
+ if (TryLock()) return 1 ;
+ if (!os::is_MP()) return 0 ;
+
+ int Probes = 0 ;
+ int Delay = 0 ;
+ int Steps = 0 ;
+ int SpinMax = NativeMonitorSpinLimit ;
+ int flgs = NativeMonitorFlags ;
+ for (;;) {
+ intptr_t v = _LockWord.FullWord;
+ if ((v & _LBIT) == 0) {
+ if (CASPTR (&_LockWord, v, v|_LBIT) == v) {
+ return 1 ;
+ }
+ continue ;
+ }
+
+ if ((flgs & 8) == 0) {
+ SpinPause () ;
+ }
+
+ // Periodically increase Delay -- variable Delay form
+ // conceptually: delay *= 1 + 1/Exponent
+ ++ Probes;
+ if (Probes > SpinMax) return 0 ;
+
+ if ((Probes & 0x7) == 0) {
+ Delay = ((Delay << 1)|1) & 0x7FF ;
+ // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
+ }
+
+ if (flgs & 2) continue ;
+
+ // Consider checking _owner's schedctl state, if OFFPROC abort spin.
+ // If the owner is OFFPROC then it's unlike that the lock will be dropped
+ // in a timely fashion, which suggests that spinning would not be fruitful
+ // or profitable.
+
+ // Stall for "Delay" time units - iterations in the current implementation.
+ // Avoid generating coherency traffic while stalled.
+ // Possible ways to delay:
+ // PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt,
+ // wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ...
+ // Note that on Niagara-class systems we want to minimize STs in the
+ // spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$.
+ // Furthermore, they don't have a W$ like traditional SPARC processors.
+ // We currently use a Marsaglia Shift-Xor RNG loop.
+ Steps += Delay ;
+ if (Self != NULL) {
+ jint rv = Self->rng[0] ;
+ for (int k = Delay ; --k >= 0; ) {
+ rv = MarsagliaXORV (rv) ;
+ if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0 ;
+ }
+ Self->rng[0] = rv ;
+ } else {
+ Stall (Delay) ;
+ }
+ }
+}
+
+static int ParkCommon (ParkEvent * ev, jlong timo) {
+ // Diagnostic support - periodically unwedge blocked threads
+ intx nmt = NativeMonitorTimeout ;
+ if (nmt > 0 && (nmt < timo || timo <= 0)) {
+ timo = nmt ;
+ }
+ int err = OS_OK ;
+ if (0 == timo) {
+ ev->park() ;
+ } else {
+ err = ev->park(timo) ;
+ }
+ return err ;
+}
+
+inline int Monitor::AcquireOrPush (ParkEvent * ESelf) {
+ intptr_t v = _LockWord.FullWord ;
+ for (;;) {
+ if ((v & _LBIT) == 0) {
+ const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
+ if (u == v) return 1 ; // indicate acquired
+ v = u ;
+ } else {
+ // Anticipate success ...
+ ESelf->ListNext = (ParkEvent *) (v & ~_LBIT) ;
+ const intptr_t u = CASPTR (&_LockWord, v, intptr_t(ESelf)|_LBIT) ;
+ if (u == v) return 0 ; // indicate pushed onto cxq
+ v = u ;
+ }
+ // Interference - LockWord change - just retry
+ }
+}
+
+// ILock and IWait are the lowest level primitive internal blocking
+// synchronization functions. The callers of IWait and ILock must have
+// performed any needed state transitions beforehand.
+// IWait and ILock may directly call park() without any concern for thread state.
+// Note that ILock and IWait do *not* access _owner.
+// _owner is a higher-level logical concept.
+
+void Monitor::ILock (Thread * Self) {
+ assert (_OnDeck != Self->_MutexEvent, "invariant") ;
+
+ if (TryFast()) {
+ Exeunt:
+ assert (ILocked(), "invariant") ;
+ return ;
+ }
+
+ ParkEvent * const ESelf = Self->_MutexEvent ;
+ assert (_OnDeck != ESelf, "invariant") ;
+
+ // As an optimization, spinners could conditionally try to set ONDECK to _LBIT
+ // Synchronizer.cpp uses a similar optimization.
+ if (TrySpin (Self)) goto Exeunt ;
+
+ // Slow-path - the lock is contended.
+ // Either Enqueue Self on cxq or acquire the outer lock.
+ // LockWord encoding = (cxq,LOCKBYTE)
+ ESelf->reset() ;
+ OrderAccess::fence() ;
+
+ // Optional optimization ... try barging on the inner lock
+ if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) {
+ goto OnDeck_LOOP ;
+ }
+
+ if (AcquireOrPush (ESelf)) goto Exeunt ;
+
+ // At any given time there is at most one ondeck thread.
+ // ondeck implies not resident on cxq and not resident on EntryList
+ // Only the OnDeck thread can try to acquire -- contended for -- the lock.
+ // CONSIDER: use Self->OnDeck instead of m->OnDeck.
+ // Deschedule Self so that others may run.
+ while (_OnDeck != ESelf) {
+ ParkCommon (ESelf, 0) ;
+ }
+
+ // Self is now in the ONDECK position and will remain so until it
+ // manages to acquire the lock.
+ OnDeck_LOOP:
+ for (;;) {
+ assert (_OnDeck == ESelf, "invariant") ;
+ if (TrySpin (Self)) break ;
+ // CONSIDER: if ESelf->TryPark() && TryLock() break ...
+ // It's probably wise to spin only if we *actually* blocked
+ // CONSIDER: check the lockbyte, if it remains set then
+ // preemptively drain the cxq into the EntryList.
+ // The best place and time to perform queue operations -- lock metadata --
+ // is _before having acquired the outer lock, while waiting for the lock to drop.
+ ParkCommon (ESelf, 0) ;
+ }
+
+ assert (_OnDeck == ESelf, "invariant") ;
+ _OnDeck = NULL ;
+
+ // Note that we current drop the inner lock (clear OnDeck) in the slow-path
+ // epilog immediately after having acquired the outer lock.
+ // But instead we could consider the following optimizations:
+ // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
+ // This might avoid potential reacquisition of the inner lock in IUlock().
+ // B. While still holding the inner lock, attempt to opportunistically select
+ // and unlink the next ONDECK thread from the EntryList.
+ // If successful, set ONDECK to refer to that thread, otherwise clear ONDECK.
+ // It's critical that the select-and-unlink operation run in constant-time as
+ // it executes when holding the outer lock and may artificially increase the
+ // effective length of the critical section.
+ // Note that (A) and (B) are tantamount to succession by direct handoff for
+ // the inner lock.
+ goto Exeunt ;
+}
+
+void Monitor::IUnlock (bool RelaxAssert) {
+ assert (ILocked(), "invariant") ;
+ _LockWord.Bytes[_LSBINDEX] = 0 ; // drop outer lock
+ OrderAccess::storeload ();
+ ParkEvent * const w = _OnDeck ;
+ assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
+ if (w != NULL) {
+ // Either we have a valid ondeck thread or ondeck is transiently "locked"
+ // by some exiting thread as it arranges for succession. The LSBit of
+ // OnDeck allows us to discriminate two cases. If the latter, the
+ // responsibility for progress and succession lies with that other thread.
+ // For good performance, we also depend on the fact that redundant unpark()
+ // operations are cheap. That is, repeated Unpark()ing of the ONDECK thread
+ // is inexpensive. This approach provides implicit futile wakeup throttling.
+ // Note that the referent "w" might be stale with respect to the lock.
+ // In that case the following unpark() is harmless and the worst that'll happen
+ // is a spurious return from a park() operation. Critically, if "w" _is stale,
+ // then progress is known to have occurred as that means the thread associated
+ // with "w" acquired the lock. In that case this thread need take no further
+ // action to guarantee progress.
+ if ((UNS(w) & _LBIT) == 0) w->unpark() ;
+ return ;
+ }
+
+ intptr_t cxq = _LockWord.FullWord ;
+ if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
+ return ; // normal fast-path exit - cxq and EntryList both empty
+ }
+ if (cxq & _LBIT) {
+ // Optional optimization ...
+ // Some other thread acquired the lock in the window since this
+ // thread released it. Succession is now that thread's responsibility.
+ return ;
+ }
+
+ Succession:
+ // Slow-path exit - this thread must ensure succession and progress.
+ // OnDeck serves as lock to protect cxq and EntryList.
+ // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq.
+ // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS)
+ // but only one concurrent consumer (detacher of RATs).
+ // Consider protecting this critical section with schedctl on Solaris.
+ // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
+ // picks a successor and marks that thread as OnDeck. That successor
+ // thread will then clear OnDeck once it eventually acquires the outer lock.
+ if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) {
+ return ;
+ }
+
+ ParkEvent * List = _EntryList ;
+ if (List != NULL) {
+ // Transfer the head of the EntryList to the OnDeck position.
+ // Once OnDeck, a thread stays OnDeck until it acquires the lock.
+ // For a given lock there is at most OnDeck thread at any one instant.
+ WakeOne:
+ assert (List == _EntryList, "invariant") ;
+ ParkEvent * const w = List ;
+ assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
+ _EntryList = w->ListNext ;
+ // as a diagnostic measure consider setting w->_ListNext = BAD
+ assert (UNS(_OnDeck) == _LBIT, "invariant") ;
+ _OnDeck = w ; // pass OnDeck to w.
+ // w will clear OnDeck once it acquires the outer lock
+
+ // Another optional optimization ...
+ // For heavily contended locks it's not uncommon that some other
+ // thread acquired the lock while this thread was arranging succession.
+ // Try to defer the unpark() operation - Delegate the responsibility
+ // for unpark()ing the OnDeck thread to the current or subsequent owners
+ // That is, the new owner is responsible for unparking the OnDeck thread.
+ OrderAccess::storeload() ;
+ cxq = _LockWord.FullWord ;
+ if (cxq & _LBIT) return ;
+
+ w->unpark() ;
+ return ;
+ }
+
+ cxq = _LockWord.FullWord ;
+ if ((cxq & ~_LBIT) != 0) {
+ // The EntryList is empty but the cxq is populated.
+ // drain RATs from cxq into EntryList
+ // Detach RATs segment with CAS and then merge into EntryList
+ for (;;) {
+ // optional optimization - if locked, the owner is responsible for succession
+ if (cxq & _LBIT) goto Punt ;
+ const intptr_t vfy = CASPTR (&_LockWord, cxq, cxq & _LBIT) ;
+ if (vfy == cxq) break ;
+ cxq = vfy ;
+ // Interference - LockWord changed - Just retry
+ // We can see concurrent interference from contending threads
+ // pushing themselves onto the cxq or from lock-unlock operations.
+ // From the perspective of this thread, EntryList is stable and
+ // the cxq is prepend-only -- the head is volatile but the interior
+ // of the cxq is stable. In theory if we encounter interference from threads
+ // pushing onto cxq we could simply break off the original cxq suffix and
+ // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts
+ // on the high-traffic LockWord variable. For instance lets say the cxq is "ABCD"
+ // when we first fetch cxq above. Between the fetch -- where we observed "A"
+ // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive,
+ // yielding cxq = "PQRABCD". In this case we could simply set A.ListNext
+ // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList.
+ // Note too, that it's safe for this thread to traverse the cxq
+ // without taking any special concurrency precautions.
+ }
+
+ // We don't currently reorder the cxq segment as we move it onto
+ // the EntryList, but it might make sense to reverse the order
+ // or perhaps sort by thread priority. See the comments in
+ // synchronizer.cpp objectMonitor::exit().
+ assert (_EntryList == NULL, "invariant") ;
+ _EntryList = List = (ParkEvent *)(cxq & ~_LBIT) ;
+ assert (List != NULL, "invariant") ;
+ goto WakeOne ;
+ }
+
+ // cxq|EntryList is empty.
+ // w == NULL implies that cxq|EntryList == NULL in the past.
+ // Possible race - rare inopportune interleaving.
+ // A thread could have added itself to cxq since this thread previously checked.
+ // Detect and recover by refetching cxq.
+ Punt:
+ assert (UNS(_OnDeck) == _LBIT, "invariant") ;
+ _OnDeck = NULL ; // Release inner lock.
+ OrderAccess::storeload(); // Dekker duality - pivot point
+
+ // Resample LockWord/cxq to recover from possible race.
+ // For instance, while this thread T1 held OnDeck, some other thread T2 might
+ // acquire the outer lock. Another thread T3 might try to acquire the outer
+ // lock, but encounter contention and enqueue itself on cxq. T2 then drops the
+ // outer lock, but skips succession as this thread T1 still holds OnDeck.
+ // T1 is and remains responsible for ensuring succession of T3.
+ //
+ // Note that we don't need to recheck EntryList, just cxq.
+ // If threads moved onto EntryList since we dropped OnDeck
+ // that implies some other thread forced succession.
+ cxq = _LockWord.FullWord ;
+ if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) {
+ goto Succession ; // potential race -- re-run succession
+ }
+ return ;
+}
+
+bool Monitor::notify() {
+ assert (_owner == Thread::current(), "invariant") ;
+ assert (ILocked(), "invariant") ;
+ if (_WaitSet == NULL) return true ;
+ NotifyCount ++ ;
+
+ // Transfer one thread from the WaitSet to the EntryList or cxq.
+ // Currently we just unlink the head of the WaitSet and prepend to the cxq.
+ // And of course we could just unlink it and unpark it, too, but
+ // in that case it'd likely impale itself on the reentry.
+ Thread::muxAcquire (_WaitLock, "notify:WaitLock") ;
+ ParkEvent * nfy = _WaitSet ;
+ if (nfy != NULL) { // DCL idiom
+ _WaitSet = nfy->ListNext ;
+ assert (nfy->Notified == 0, "invariant") ;
+ // push nfy onto the cxq
+ for (;;) {
+ const intptr_t v = _LockWord.FullWord ;
+ assert ((v & 0xFF) == _LBIT, "invariant") ;
+ nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
+ if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break;
+ // interference - _LockWord changed -- just retry
+ }
+ // Note that setting Notified before pushing nfy onto the cxq is
+ // also legal and safe, but the safety properties are much more
+ // subtle, so for the sake of code stewardship ...
+ OrderAccess::fence() ;
+ nfy->Notified = 1;
+ }
+ Thread::muxRelease (_WaitLock) ;
+ if (nfy != NULL && (NativeMonitorFlags & 16)) {
+ // Experimental code ... light up the wakee in the hope that this thread (the owner)
+ // will drop the lock just about the time the wakee comes ONPROC.
+ nfy->unpark() ;
+ }
+ assert (ILocked(), "invariant") ;
+ return true ;
+}
+
+// Currently notifyAll() transfers the waiters one-at-a-time from the waitset
+// to the cxq. This could be done more efficiently with a single bulk en-mass transfer,
+// but in practice notifyAll() for large #s of threads is rare and not time-critical.
+// Beware too, that we invert the order of the waiters. Lets say that the
+// waitset is "ABCD" and the cxq is "XYZ". After a notifyAll() the waitset
+// will be empty and the cxq will be "DCBAXYZ". This is benign, of course.
+
+bool Monitor::notify_all() {
+ assert (_owner == Thread::current(), "invariant") ;
+ assert (ILocked(), "invariant") ;
+ while (_WaitSet != NULL) notify() ;
+ return true ;
+}
+
+int Monitor::IWait (Thread * Self, jlong timo) {
+ assert (ILocked(), "invariant") ;
+
+ // Phases:
+ // 1. Enqueue Self on WaitSet - currently prepend
+ // 2. unlock - drop the outer lock
+ // 3. wait for either notification or timeout
+ // 4. lock - reentry - reacquire the outer lock
+
+ ParkEvent * const ESelf = Self->_MutexEvent ;
+ ESelf->Notified = 0 ;
+ ESelf->reset() ;
+ OrderAccess::fence() ;
+
+ // Add Self to WaitSet
+ // Ideally only the holder of the outer lock would manipulate the WaitSet -
+ // That is, the outer lock would implicitly protect the WaitSet.
+ // But if a thread in wait() encounters a timeout it will need to dequeue itself
+ // from the WaitSet _before it becomes the owner of the lock. We need to dequeue
+ // as the ParkEvent -- which serves as a proxy for the thread -- can't reside
+ // on both the WaitSet and the EntryList|cxq at the same time.. That is, a thread
+ // on the WaitSet can't be allowed to compete for the lock until it has managed to
+ // unlink its ParkEvent from WaitSet. Thus the need for WaitLock.
+ // Contention on the WaitLock is minimal.
+ //
+ // Another viable approach would be add another ParkEvent, "WaitEvent" to the
+ // thread class. The WaitSet would be composed of WaitEvents. Only the
+ // owner of the outer lock would manipulate the WaitSet. A thread in wait()
+ // could then compete for the outer lock, and then, if necessary, unlink itself
+ // from the WaitSet only after having acquired the outer lock. More precisely,
+ // there would be no WaitLock. A thread in in wait() would enqueue its WaitEvent
+ // on the WaitSet; release the outer lock; wait for either notification or timeout;
+ // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet.
+ //
+ // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice.
+ // One set would be for the WaitSet and one for the EntryList.
+ // We could also deconstruct the ParkEvent into a "pure" event and add a
+ // new immortal/TSM "ListElement" class that referred to ParkEvents.
+ // In that case we could have one ListElement on the WaitSet and another
+ // on the EntryList, with both referring to the same pure Event.
+
+ Thread::muxAcquire (_WaitLock, "wait:WaitLock:Add") ;
+ ESelf->ListNext = _WaitSet ;
+ _WaitSet = ESelf ;
+ Thread::muxRelease (_WaitLock) ;
+
+ // Release the outer lock
+ // We call IUnlock (RelaxAssert=true) as a thread T1 might
+ // enqueue itself on the WaitSet, call IUnlock(), drop the lock,
+ // and then stall before it can attempt to wake a successor.
+ // Some other thread T2 acquires the lock, and calls notify(), moving
+ // T1 from the WaitSet to the cxq. T2 then drops the lock. T1 resumes,
+ // and then finds *itself* on the cxq. During the course of a normal
+ // IUnlock() call a thread should _never find itself on the EntryList
+ // or cxq, but in the case of wait() it's possible.
+ // See synchronizer.cpp objectMonitor::wait().
+ IUnlock (true) ;
+
+ // Wait for either notification or timeout
+ // Beware that in some circumstances we might propagate
+ // spurious wakeups back to the caller.
+
+ for (;;) {
+ if (ESelf->Notified) break ;
+ int err = ParkCommon (ESelf, timo) ;
+ if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break ;
+ }
+
+ // Prepare for reentry - if necessary, remove ESelf from WaitSet
+ // ESelf can be:
+ // 1. Still on the WaitSet. This can happen if we exited the loop by timeout.
+ // 2. On the cxq or EntryList
+ // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position.
+
+ OrderAccess::fence() ;
+ int WasOnWaitSet = 0 ;
+ if (ESelf->Notified == 0) {
+ Thread::muxAcquire (_WaitLock, "wait:WaitLock:remove") ;
+ if (ESelf->Notified == 0) { // DCL idiom
+ assert (_OnDeck != ESelf, "invariant") ; // can't be both OnDeck and on WaitSet
+ // ESelf is resident on the WaitSet -- unlink it.
+ // A doubly-linked list would be better here so we can unlink in constant-time.
+ // We have to unlink before we potentially recontend as ESelf might otherwise
+ // end up on the cxq|EntryList -- it can't be on two lists at once.
+ ParkEvent * p = _WaitSet ;
+ ParkEvent * q = NULL ; // classic q chases p
+ while (p != NULL && p != ESelf) {
+ q = p ;
+ p = p->ListNext ;
+ }
+ assert (p == ESelf, "invariant") ;
+ if (p == _WaitSet) { // found at head
+ assert (q == NULL, "invariant") ;
+ _WaitSet = p->ListNext ;
+ } else { // found in interior
+ assert (q->ListNext == p, "invariant") ;
+ q->ListNext = p->ListNext ;
+ }
+ WasOnWaitSet = 1 ; // We were *not* notified but instead encountered timeout
+ }
+ Thread::muxRelease (_WaitLock) ;
+ }
+
+ // Reentry phase - reacquire the lock
+ if (WasOnWaitSet) {
+ // ESelf was previously on the WaitSet but we just unlinked it above
+ // because of a timeout. ESelf is not resident on any list and is not OnDeck
+ assert (_OnDeck != ESelf, "invariant") ;
+ ILock (Self) ;
+ } else {
+ // A prior notify() operation moved ESelf from the WaitSet to the cxq.
+ // ESelf is now on the cxq, EntryList or at the OnDeck position.
+ // The following fragment is extracted from Monitor::ILock()
+ for (;;) {
+ if (_OnDeck == ESelf && TrySpin(Self)) break ;
+ ParkCommon (ESelf, 0) ;
+ }
+ assert (_OnDeck == ESelf, "invariant") ;
+ _OnDeck = NULL ;
+ }
+
+ assert (ILocked(), "invariant") ;
+ return WasOnWaitSet != 0 ; // return true IFF timeout
+}
+
+
+// ON THE VMTHREAD SNEAKING PAST HELD LOCKS:
+// In particular, there are certain types of global lock that may be held
+// by a Java thread while it is blocked at a safepoint but before it has
+// written the _owner field. These locks may be sneakily acquired by the
+// VM thread during a safepoint to avoid deadlocks. Alternatively, one should
+// identify all such locks, and ensure that Java threads never block at
+// safepoints while holding them (_no_safepoint_check_flag). While it
+// seems as though this could increase the time to reach a safepoint
+// (or at least increase the mean, if not the variance), the latter
+// approach might make for a cleaner, more maintainable JVM design.
+//
+// Sneaking is vile and reprehensible and should be excised at the 1st
+// opportunity. It's possible that the need for sneaking could be obviated
+// as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock
+// or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex.
+// (b) stall at the TBIVM exit point as a safepoint is in effect. Critically,
+// it'll stall at the TBIVM reentry state transition after having acquired the
+// underlying lock, but before having set _owner and having entered the actual
+// critical section. The lock-sneaking facility leverages that fact and allowed the
+// VM thread to logically acquire locks that had already be physically locked by mutators
+// but where mutators were known blocked by the reentry thread state transition.
+//
+// If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly
+// wrapped calls to park(), then we could likely do away with sneaking. We'd
+// decouple lock acquisition and parking. The critical invariant to eliminating
+// sneaking is to ensure that we never "physically" acquire the lock while TBIVM.
+// An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket.
+// One difficulty with this approach is that the TBIVM wrapper could recurse and
+// call lock() deep from within a lock() call, while the MutexEvent was already enqueued.
+// Using a stack (N=2 at minimum) of ParkEvents would take care of that problem.
+//
+// But of course the proper ultimate approach is to avoid schemes that require explicit
+// sneaking or dependence on any any clever invariants or subtle implementation properties
+// of Mutex-Monitor and instead directly address the underlying design flaw.
+
+void Monitor::lock (Thread * Self) {
+#ifdef CHECK_UNHANDLED_OOPS
+ // Clear unhandled oops so we get a crash right away. Only clear for non-vm
+ // or GC threads.
+ if (Self->is_Java_thread()) {
+ Self->clear_unhandled_oops();
+ }
+#endif // CHECK_UNHANDLED_OOPS
+
+ debug_only(check_prelock_state(Self));
+ assert (_owner != Self , "invariant") ;
+ assert (_OnDeck != Self->_MutexEvent, "invariant") ;
+
+ if (TryFast()) {
+ Exeunt:
+ assert (ILocked(), "invariant") ;
+ assert (owner() == NULL, "invariant");
+ set_owner (Self);
+ return ;
+ }
+
+ // The lock is contended ...
+
+ bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
+ if (can_sneak && _owner == NULL) {
+ // a java thread has locked the lock but has not entered the
+ // critical region -- let's just pretend we've locked the lock
+ // and go on. we note this with _snuck so we can also
+ // pretend to unlock when the time comes.
+ _snuck = true;
+ goto Exeunt ;
+ }
+
+ // Try a brief spin to avoid passing thru thread state transition ...
+ if (TrySpin (Self)) goto Exeunt ;
+
+ check_block_state(Self);
+ if (Self->is_Java_thread()) {
+ // Horribile dictu - we suffer through a state transition
+ assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
+ ThreadBlockInVM tbivm ((JavaThread *) Self) ;
+ ILock (Self) ;
+ } else {
+ // Mirabile dictu
+ ILock (Self) ;
+ }
+ goto Exeunt ;
+}
+
+void Monitor::lock() {
+ this->lock(Thread::current());
+}
+
+// Lock without safepoint check - a degenerate variant of lock().
+// Should ONLY be used by safepoint code and other code
+// that is guaranteed not to block while running inside the VM. If this is called with
+// thread state set to be in VM, the safepoint synchronization code will deadlock!
+
+void Monitor::lock_without_safepoint_check (Thread * Self) {
+ assert (_owner != Self, "invariant") ;
+ ILock (Self) ;
+ assert (_owner == NULL, "invariant");
+ set_owner (Self);
+}
+
+void Monitor::lock_without_safepoint_check () {
+ lock_without_safepoint_check (Thread::current()) ;
+}
+
+
+// Returns true if thread succeceed [sic] in grabbing the lock, otherwise false.
+
+bool Monitor::try_lock() {
+ Thread * const Self = Thread::current();
+ debug_only(check_prelock_state(Self));
+ // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
+
+ // Special case, where all Java threads are stopped.
+ // The lock may have been acquired but _owner is not yet set.
+ // In that case the VM thread can safely grab the lock.
+ // It strikes me this should appear _after the TryLock() fails, below.
+ bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
+ if (can_sneak && _owner == NULL) {
+ set_owner(Self); // Do not need to be atomic, since we are at a safepoint
+ _snuck = true;
+ return true;
+ }
+
+ if (TryLock()) {
+ // We got the lock
+ assert (_owner == NULL, "invariant");
+ set_owner (Self);
+ return true;
+ }
+ return false;
+}
+
+void Monitor::unlock() {
+ assert (_owner == Thread::current(), "invariant") ;
+ assert (_OnDeck != Thread::current()->_MutexEvent , "invariant") ;
+ set_owner (NULL) ;
+ if (_snuck) {
+ assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
+ _snuck = false;
+ return ;
+ }
+ IUnlock (false) ;
+}
+
+// Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
+// jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter.
+//
+// There's no expectation that JVM_RawMonitors will interoperate properly with the native
+// Mutex-Monitor constructs. We happen to implement JVM_RawMonitors in terms of
+// native Mutex-Monitors simply as a matter of convenience. A simple abstraction layer
+// over a pthread_mutex_t would work equally as well, but require more platform-specific
+// code -- a "PlatformMutex". Alternatively, a simply layer over muxAcquire-muxRelease
+// would work too.
+//
+// Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent
+// instance available. Instead, we transiently allocate a ParkEvent on-demand if
+// we encounter contention. That ParkEvent remains associated with the thread
+// until it manages to acquire the lock, at which time we return the ParkEvent
+// to the global ParkEvent free list. This is correct and suffices for our purposes.
+//
+// Beware that the original jvm_raw_unlock() had a "_snuck" test but that
+// jvm_raw_lock() didn't have the corresponding test. I suspect that's an
+// oversight, but I've replicated the original suspect logic in the new code ...
+
+void Monitor::jvm_raw_lock() {
+ assert(rank() == native, "invariant");
+
+ if (TryLock()) {
+ Exeunt:
+ assert (ILocked(), "invariant") ;
+ assert (_owner == NULL, "invariant");
+ // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage
+ // might return NULL. Don't call set_owner since it will break on an NULL owner
+ // Consider installing a non-null "ANON" distinguished value instead of just NULL.
+ _owner = ThreadLocalStorage::thread();
+ return ;
+ }
+
+ if (TrySpin(NULL)) goto Exeunt ;
+
+ // slow-path - apparent contention
+ // Allocate a ParkEvent for transient use.
+ // The ParkEvent remains associated with this thread until
+ // the time the thread manages to acquire the lock.
+ ParkEvent * const ESelf = ParkEvent::Allocate(NULL) ;
+ ESelf->reset() ;
+ OrderAccess::storeload() ;
+
+ // Either Enqueue Self on cxq or acquire the outer lock.
+ if (AcquireOrPush (ESelf)) {
+ ParkEvent::Release (ESelf) ; // surrender the ParkEvent
+ goto Exeunt ;
+ }
+
+ // At any given time there is at most one ondeck thread.
+ // ondeck implies not resident on cxq and not resident on EntryList
+ // Only the OnDeck thread can try to acquire -- contended for -- the lock.
+ // CONSIDER: use Self->OnDeck instead of m->OnDeck.
+ for (;;) {
+ if (_OnDeck == ESelf && TrySpin(NULL)) break ;
+ ParkCommon (ESelf, 0) ;
+ }
+
+ assert (_OnDeck == ESelf, "invariant") ;
+ _OnDeck = NULL ;
+ ParkEvent::Release (ESelf) ; // surrender the ParkEvent
+ goto Exeunt ;
+}
+
+void Monitor::jvm_raw_unlock() {
+ // Nearly the same as Monitor::unlock() ...
+ // directly set _owner instead of using set_owner(null)
+ _owner = NULL ;
+ if (_snuck) { // ???
+ assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
+ _snuck = false;
+ return ;
+ }
+ IUnlock(false) ;
+}
+
+bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
+ Thread * const Self = Thread::current() ;
+ assert (_owner == Self, "invariant") ;
+ assert (ILocked(), "invariant") ;
+
+ // as_suspend_equivalent logically implies !no_safepoint_check
+ guarantee (!as_suspend_equivalent || !no_safepoint_check, "invariant") ;
+ // !no_safepoint_check logically implies java_thread
+ guarantee (no_safepoint_check || Self->is_Java_thread(), "invariant") ;
+
+ #ifdef ASSERT
+ Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
+ assert(least != this, "Specification of get_least_... call above");
+ if (least != NULL && least->rank() <= special) {
+ tty->print("Attempting to wait on monitor %s/%d while holding"
+ " lock %s/%d -- possible deadlock",
+ name(), rank(), least->name(), least->rank());
+ assert(false, "Shouldn't block(wait) while holding a lock of rank special");
+ }
+ #endif // ASSERT
+
+ int wait_status ;
+ // conceptually set the owner to NULL in anticipation of
+ // abdicating the lock in wait
+ set_owner(NULL);
+ if (no_safepoint_check) {
+ wait_status = IWait (Self, timeout) ;
+ } else {
+ assert (Self->is_Java_thread(), "invariant") ;
+ JavaThread *jt = (JavaThread *)Self;
+
+ // Enter safepoint region - ornate and Rococo ...
+ ThreadBlockInVM tbivm(jt);
+ OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */);
+
+ if (as_suspend_equivalent) {
+ jt->set_suspend_equivalent();
+ // cleared by handle_special_suspend_equivalent_condition() or
+ // java_suspend_self()
+ }
+
+ wait_status = IWait (Self, timeout) ;
+
+ // were we externally suspended while we were waiting?
+ if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
+ // Our event wait has finished and we own the lock, but
+ // while we were waiting another thread suspended us. We don't
+ // want to hold the lock while suspended because that
+ // would surprise the thread that suspended us.
+ assert (ILocked(), "invariant") ;
+ IUnlock (true) ;
+ jt->java_suspend_self();
+ ILock (Self) ;
+ assert (ILocked(), "invariant") ;
+ }
+ }
+
+ // Conceptually reestablish ownership of the lock.
+ // The "real" lock -- the LockByte -- was reacquired by IWait().
+ assert (ILocked(), "invariant") ;
+ assert (_owner == NULL, "invariant") ;
+ set_owner (Self) ;
+ return wait_status != 0 ; // return true IFF timeout
+}
+
+Monitor::~Monitor() {
+ assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
+}
+
+void Monitor::ClearMonitor (Monitor * m) {
+ m->_owner = NULL ;
+ m->_snuck = false ;
+ m->_name = "UNKNOWN" ;
+ m->_LockWord.FullWord = 0 ;
+ m->_EntryList = NULL ;
+ m->_OnDeck = NULL ;
+ m->_WaitSet = NULL ;
+ m->_WaitLock[0] = 0 ;
+}
+
+Monitor::Monitor() { ClearMonitor(this); }
+
+Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) {
+ ClearMonitor (this) ;
+#ifdef ASSERT
+ _allow_vm_block = allow_vm_block;
+ _rank = Rank ;
+#endif
+}
+
+Mutex::~Mutex() {
+ assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
+}
+
+Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) {
+ ClearMonitor ((Monitor *) this) ;
+#ifdef ASSERT
+ _allow_vm_block = allow_vm_block;
+ _rank = Rank ;
+#endif
+}
+
+bool Monitor::owned_by_self() const {
+ bool ret = _owner == Thread::current();
+ assert (!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant") ;
+ return ret;
+}
+
+void Monitor::print_on_error(outputStream* st) const {
+ st->print("[" PTR_FORMAT, this);
+ st->print("] %s", _name);
+ st->print(" - owner thread: " PTR_FORMAT, _owner);
+}
+
+
+
+
+// ----------------------------------------------------------------------------------
+// Non-product code
+
+#ifndef PRODUCT
+void Monitor::print_on(outputStream* st) const {
+ st->print_cr("Mutex: [0x%lx/0x%lx] %s - owner: 0x%lx", this, _LockWord.FullWord, _name, _owner);
+}
+#endif
+
+#ifndef PRODUCT
+#ifdef ASSERT
+Monitor * Monitor::get_least_ranked_lock(Monitor * locks) {
+ Monitor *res, *tmp;
+ for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) {
+ if (tmp->rank() < res->rank()) {
+ res = tmp;
+ }
+ }
+ if (!SafepointSynchronize::is_at_safepoint()) {
+ // In this case, we expect the held locks to be
+ // in increasing rank order (modulo any native ranks)
+ for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
+ if (tmp->next() != NULL) {
+ assert(tmp->rank() == Mutex::native ||
+ tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
+ }
+ }
+ }
+ return res;
+}
+
+Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) {
+ Monitor *res, *tmp;
+ for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) {
+ if (tmp != this && (res == NULL || tmp->rank() < res->rank())) {
+ res = tmp;
+ }
+ }
+ if (!SafepointSynchronize::is_at_safepoint()) {
+ // In this case, we expect the held locks to be
+ // in increasing rank order (modulo any native ranks)
+ for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
+ if (tmp->next() != NULL) {
+ assert(tmp->rank() == Mutex::native ||
+ tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
+ }
+ }
+ }
+ return res;
+}
+
+
+bool Monitor::contains(Monitor* locks, Monitor * lock) {
+ for (; locks != NULL; locks = locks->next()) {
+ if (locks == lock)
+ return true;
+ }
+ return false;
+}
+#endif
+
+// Called immediately after lock acquisition or release as a diagnostic
+// to track the lock-set of the thread and test for rank violations that
+// might indicate exposure to deadlock.
+// Rather like an EventListener for _owner (:>).
+
+void Monitor::set_owner_implementation(Thread *new_owner) {
+ // This function is solely responsible for maintaining
+ // and checking the invariant that threads and locks
+ // are in a 1/N relation, with some some locks unowned.
+ // It uses the Mutex::_owner, Mutex::_next, and
+ // Thread::_owned_locks fields, and no other function
+ // changes those fields.
+ // It is illegal to set the mutex from one non-NULL
+ // owner to another--it must be owned by NULL as an
+ // intermediate state.
+
+ if (new_owner != NULL) {
+ // the thread is acquiring this lock
+
+ assert(new_owner == Thread::current(), "Should I be doing this?");
+ assert(_owner == NULL, "setting the owner thread of an already owned mutex");
+ _owner = new_owner; // set the owner
+
+ // link "this" into the owned locks list
+
+ #ifdef ASSERT // Thread::_owned_locks is under the same ifdef
+ Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
+ // Mutex::set_owner_implementation is a friend of Thread
+
+ assert(this->rank() >= 0, "bad lock rank");
+
+ if (LogMultipleMutexLocking && locks != NULL) {
+ Events::log("thread " INTPTR_FORMAT " locks %s, already owns %s", new_owner, name(), locks->name());
+ }
+
+ // Deadlock avoidance rules require us to acquire Mutexes only in
+ // a global total order. For example m1 is the lowest ranked mutex
+ // that the thread holds and m2 is the mutex the thread is trying
+ // to acquire, then deadlock avoidance rules require that the rank
+ // of m2 be less than the rank of m1.
+ // The rank Mutex::native is an exception in that it is not subject
+ // to the verification rules.
+ // Here are some further notes relating to mutex acquisition anomalies:
+ // . under Solaris, the interrupt lock gets acquired when doing
+ // profiling, so any lock could be held.
+ // . it is also ok to acquire Safepoint_lock at the very end while we
+ // already hold Terminator_lock - may happen because of periodic safepoints
+ if (this->rank() != Mutex::native &&
+ this->rank() != Mutex::suspend_resume &&
+ locks != NULL && locks->rank() <= this->rank() &&
+ !SafepointSynchronize::is_at_safepoint() &&
+ this != Interrupt_lock && this != ProfileVM_lock &&
+ !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
+ SafepointSynchronize::is_synchronizing())) {
+ new_owner->print_owned_locks();
+ fatal4("acquiring lock %s/%d out of order with lock %s/%d -- possible deadlock",
+ this->name(), this->rank(), locks->name(), locks->rank());
+ }
+
+ this->_next = new_owner->_owned_locks;
+ new_owner->_owned_locks = this;
+ #endif
+
+ } else {
+ // the thread is releasing this lock
+
+ Thread* old_owner = _owner;
+ debug_only(_last_owner = old_owner);
+
+ assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
+ assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
+
+ _owner = NULL; // set the owner
+
+ #ifdef ASSERT
+ Monitor *locks = old_owner->owned_locks();
+
+ if (LogMultipleMutexLocking && locks != this) {
+ Events::log("thread " INTPTR_FORMAT " unlocks %s, still owns %s", old_owner, this->name(), locks->name());
+ }
+
+ // remove "this" from the owned locks list
+
+ Monitor *prev = NULL;
+ bool found = false;
+ for (; locks != NULL; prev = locks, locks = locks->next()) {
+ if (locks == this) {
+ found = true;
+ break;
+ }
+ }
+ assert(found, "Removing a lock not owned");
+ if (prev == NULL) {
+ old_owner->_owned_locks = _next;
+ } else {
+ prev->_next = _next;
+ }
+ _next = NULL;
+ #endif
+ }
+}
+
+
+// Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock()
+void Monitor::check_prelock_state(Thread *thread) {
+ assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm)
+ || rank() == Mutex::special, "wrong thread state for using locks");
+ if (StrictSafepointChecks) {
+ if (thread->is_VM_thread() && !allow_vm_block()) {
+ fatal1("VM thread using lock %s (not allowed to block on)", name());
+ }
+ debug_only(if (rank() != Mutex::special) \
+ thread->check_for_valid_safepoint_state(false);)
+ }
+}
+
+void Monitor::check_block_state(Thread *thread) {
+ if (!_allow_vm_block && thread->is_VM_thread()) {
+ warning("VM thread blocked on lock");
+ print();
+ BREAKPOINT;
+ }
+ assert(_owner != thread, "deadlock: blocking on monitor owned by current thread");
+}
+
+#endif // PRODUCT
diff --git a/src/share/vm/runtime/mutex.hpp b/src/share/vm/runtime/mutex.hpp
new file mode 100644
index 000000000..761cebb2b
--- /dev/null
+++ b/src/share/vm/runtime/mutex.hpp
@@ -0,0 +1,318 @@
+/*
+ * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// The SplitWord construct allows us to colocate the contention queue
+// (cxq) with the lock-byte. The queue elements are ParkEvents, which are
+// always aligned on 256-byte addresses - the least significant byte of
+// a ParkEvent is always 0. Colocating the lock-byte with the queue
+// allows us to easily avoid what would otherwise be a race in lock()
+// if we were to use two completely separate fields for the contention queue
+// and the lock indicator. Specifically, colocation renders us immune
+// from the race where a thread might enqueue itself in the lock() slow-path
+// immediately after the lock holder drops the outer lock in the unlock()
+// fast-path.
+//
+// Colocation allows us to use a fast-path unlock() form that uses
+// A MEMBAR instead of a CAS. MEMBAR has lower local latency than CAS
+// on many platforms.
+//
+// See:
+// + http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
+// + http://blogs.sun.com/dave/resource/synchronization-public2.pdf
+//
+// Note that we're *not* using word-tearing the classic sense.
+// The lock() fast-path will CAS the lockword and the unlock()
+// fast-path will store into the lock-byte colocated within the lockword.
+// We depend on the fact that all our reference platforms have
+// coherent and atomic byte accesses. More precisely, byte stores
+// interoperate in a safe, sane, and expected manner with respect to
+// CAS, ST and LDs to the full-word containing the byte.
+// If you're porting HotSpot to a platform where that isn't the case
+// then you'll want change the unlock() fast path from:
+// STB;MEMBAR #storeload; LDN
+// to a full-word CAS of the lockword.
+
+
+union SplitWord { // full-word with separately addressable LSB
+ volatile intptr_t FullWord ;
+ volatile void * Address ;
+ volatile jbyte Bytes [sizeof(intptr_t)] ;
+} ;
+
+// Endian-ness ... index of least-significant byte in SplitWord.Bytes[]
+#ifdef AMD64 // little
+ #define _LSBINDEX 0
+#else
+#if IA32 // little
+ #define _LSBINDEX 0
+#else
+#ifdef SPARC // big
+ #define _LSBINDEX (sizeof(intptr_t)-1)
+#else
+ #error "unknown architecture"
+#endif
+#endif
+#endif
+
+class ParkEvent ;
+
+// See orderAccess.hpp. We assume throughout the VM that mutex lock and
+// try_lock do fence-lock-acquire, and that unlock does a release-unlock,
+// *in that order*. If their implementations change such that these
+// assumptions are violated, a whole lot of code will break.
+
+class Monitor : public CHeapObj {
+
+ public:
+ // A special lock: Is a lock where you are guaranteed not to block while you are
+ // holding it, i.e., no vm operation can happen, taking other locks, etc.
+ // NOTE: It is critical that the rank 'special' be the lowest (earliest)
+ // (except for "event"?) for the deadlock dection to work correctly.
+ // The rank native is only for use in Mutex's created by JVM_RawMonitorCreate,
+ // which being external to the VM are not subject to deadlock detection.
+ // The rank safepoint is used only for synchronization in reaching a
+ // safepoint and leaving a safepoint. It is only used for the Safepoint_lock
+ // currently. While at a safepoint no mutexes of rank safepoint are held
+ // by any thread.
+ // The rank named "leaf" is probably historical (and should
+ // be changed) -- mutexes of this rank aren't really leaf mutexes
+ // at all.
+ enum lock_types {
+ event,
+ special,
+ suspend_resume,
+ leaf = suspend_resume + 2,
+ safepoint = leaf + 10,
+ barrier = safepoint + 1,
+ nonleaf = barrier + 1,
+ max_nonleaf = nonleaf + 900,
+ native = max_nonleaf + 1
+ };
+
+ // The WaitSet and EntryList linked lists are composed of ParkEvents.
+ // I use ParkEvent instead of threads as ParkEvents are immortal and
+ // type-stable, meaning we can safely unpark() a possibly stale
+ // list element in the unlock()-path.
+
+ protected: // Monitor-Mutex metadata
+ SplitWord _LockWord ; // Contention queue (cxq) colocated with Lock-byte
+ enum LockWordBits { _LBIT=1 } ;
+ Thread * volatile _owner; // The owner of the lock
+ // Consider sequestering _owner on its own $line
+ // to aid future synchronization mechanisms.
+ ParkEvent * volatile _EntryList ; // List of threads waiting for entry
+ ParkEvent * volatile _OnDeck ; // heir-presumptive
+ volatile intptr_t _WaitLock [1] ; // Protects _WaitSet
+ ParkEvent * volatile _WaitSet ; // LL of ParkEvents
+ volatile bool _snuck; // Used for sneaky locking (evil).
+ const char * _name; // Name of mutex
+ int NotifyCount ; // diagnostic assist
+ double pad [8] ; // avoid false sharing
+
+ // Debugging fields for naming, deadlock detection, etc. (some only used in debug mode)
+#ifndef PRODUCT
+ bool _allow_vm_block;
+ debug_only(int _rank;) // rank (to avoid/detect potential deadlocks)
+ debug_only(Monitor * _next;) // Used by a Thread to link up owned locks
+ debug_only(Thread* _last_owner;) // the last thread to own the lock
+ debug_only(static bool contains(Monitor * locks, Monitor * lock);)
+ debug_only(static Monitor * get_least_ranked_lock(Monitor * locks);)
+ debug_only(Monitor * get_least_ranked_lock_besides_this(Monitor * locks);)
+#endif
+
+ void set_owner_implementation(Thread* owner) PRODUCT_RETURN;
+ void check_prelock_state (Thread* thread) PRODUCT_RETURN;
+ void check_block_state (Thread* thread) PRODUCT_RETURN;
+
+ // platform-dependent support code can go here (in os_<os_family>.cpp)
+ public:
+ enum {
+ _no_safepoint_check_flag = true,
+ _allow_vm_block_flag = true,
+ _as_suspend_equivalent_flag = true
+ };
+
+ enum WaitResults {
+ CONDVAR_EVENT, // Wait returned because of condition variable notification
+ INTERRUPT_EVENT, // Wait returned because waiting thread was interrupted
+ NUMBER_WAIT_RESULTS
+ };
+
+ private:
+ int TrySpin (Thread * Self) ;
+ int TryLock () ;
+ int TryFast () ;
+ int AcquireOrPush (ParkEvent * ev) ;
+ void IUnlock (bool RelaxAssert) ;
+ void ILock (Thread * Self) ;
+ int IWait (Thread * Self, jlong timo);
+ int ILocked () ;
+
+ protected:
+ static void ClearMonitor (Monitor * m) ;
+ Monitor() ;
+
+ public:
+ Monitor(int rank, const char *name, bool allow_vm_block=false);
+ ~Monitor();
+
+ // Wait until monitor is notified (or times out).
+ // Defaults are to make safepoint checks, wait time is forever (i.e.,
+ // zero), and not a suspend-equivalent condition. Returns true if wait
+ // times out; otherwise returns false.
+ bool wait(bool no_safepoint_check = !_no_safepoint_check_flag,
+ long timeout = 0,
+ bool as_suspend_equivalent = !_as_suspend_equivalent_flag);
+ bool notify();
+ bool notify_all();
+
+
+ void lock(); // prints out warning if VM thread blocks
+ void lock(Thread *thread); // overloaded with current thread
+ void unlock();
+ bool is_locked() const { return _owner != NULL; }
+
+ bool try_lock(); // Like lock(), but unblocking. It returns false instead
+
+ // Lock without safepoint check. Should ONLY be used by safepoint code and other code
+ // that is guaranteed not to block while running inside the VM.
+ void lock_without_safepoint_check();
+ void lock_without_safepoint_check (Thread * Self) ;
+
+ // Current owner - not not MT-safe. Can only be used to guarantee that
+ // the current running thread owns the lock
+ Thread* owner() const { return _owner; }
+ bool owned_by_self() const;
+
+ // Support for JVM_RawMonitorEnter & JVM_RawMonitorExit. These can be called by
+ // non-Java thread. (We should really have a RawMonitor abstraction)
+ void jvm_raw_lock();
+ void jvm_raw_unlock();
+ const char *name() const { return _name; }
+
+ void print_on_error(outputStream* st) const;
+
+ #ifndef PRODUCT
+ void print_on(outputStream* st) const;
+ void print() const { print_on(tty); }
+ debug_only(int rank() const { return _rank; })
+ bool allow_vm_block() { return _allow_vm_block; }
+
+ debug_only(Monitor *next() const { return _next; })
+ debug_only(void set_next(Monitor *next) { _next = next; })
+ #endif
+
+ void set_owner(Thread* owner) {
+ #ifndef PRODUCT
+ set_owner_implementation(owner);
+ debug_only(void verify_Monitor(Thread* thr));
+ #else
+ _owner = owner;
+ #endif
+ }
+
+};
+
+// Normally we'd expect Monitor to extend Mutex in the sense that a monitor
+// constructed from pthreads primitives might extend a mutex by adding
+// a condvar and some extra metadata. In fact this was the case until J2SE7.
+//
+// Currently, however, the base object is a monitor. Monitor contains all the
+// logic for wait(), notify(), etc. Mutex extends monitor and restricts the
+// visiblity of wait(), notify(), and notify_all().
+//
+// Another viable alternative would have been to have Monitor extend Mutex and
+// implement all the normal mutex and wait()-notify() logic in Mutex base class.
+// The wait()-notify() facility would be exposed via special protected member functions
+// (e.g., _Wait() and _Notify()) in Mutex. Monitor would extend Mutex and expose wait()
+// as a call to _Wait(). That is, the public wait() would be a wrapper for the protected
+// _Wait().
+//
+// An even better alternative is to simply eliminate Mutex:: and use Monitor:: instead.
+// After all, monitors are sufficient for Java-level synchronization. At one point in time
+// there may have been some benefit to having distinct mutexes and monitors, but that time
+// has past.
+//
+// The Mutex/Monitor design parallels that of Java-monitors, being based on
+// thread-specific park-unpark platform-specific primitives.
+
+
+class Mutex : public Monitor { // degenerate Monitor
+ public:
+ Mutex (int rank, const char *name, bool allow_vm_block=false);
+ ~Mutex () ;
+ private:
+ bool notify () { ShouldNotReachHere(); return false; }
+ bool notify_all() { ShouldNotReachHere(); return false; }
+ bool wait (bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
+ ShouldNotReachHere() ;
+ return false ;
+ }
+};
+
+/*
+ * Per-thread blocking support for JSR166. See the Java-level
+ * Documentation for rationale. Basically, park acts like wait, unpark
+ * like notify.
+ *
+ * 6271289 --
+ * To avoid errors where an os thread expires but the JavaThread still
+ * exists, Parkers are immortal (type-stable) and are recycled across
+ * new threads. This parallels the ParkEvent implementation.
+ * Because park-unpark allow spurious wakeups it is harmless if an
+ * unpark call unparks a new thread using the old Parker reference.
+ *
+ * In the future we'll want to think about eliminating Parker and using
+ * ParkEvent instead. There's considerable duplication between the two
+ * services.
+ *
+ */
+
+class Parker : public os::PlatformParker {
+private:
+ volatile int _counter ;
+ Parker * FreeNext ;
+ JavaThread * AssociatedWith ; // Current association
+
+public:
+ Parker() : PlatformParker() {
+ _counter = 0 ;
+ FreeNext = NULL ;
+ AssociatedWith = NULL ;
+ }
+protected:
+ ~Parker() { ShouldNotReachHere(); }
+public:
+ // For simplicity of interface with Java, all forms of park (indefinite,
+ // relative, and absolute) are multiplexed into one call.
+ void park(bool isAbsolute, jlong time);
+ void unpark();
+
+ // Lifecycle operators
+ static Parker * Allocate (JavaThread * t) ;
+ static void Release (Parker * e) ;
+private:
+ static Parker * volatile FreeList ;
+ static volatile int ListLock ;
+};
diff --git a/src/share/vm/runtime/mutexLocker.cpp b/src/share/vm/runtime/mutexLocker.cpp
new file mode 100644
index 000000000..317e24ee5
--- /dev/null
+++ b/src/share/vm/runtime/mutexLocker.cpp
@@ -0,0 +1,266 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_mutexLocker.cpp.incl"
+
+// Mutexes used in the VM (see comment in mutexLocker.hpp):
+//
+// Note that the following pointers are effectively final -- after having been
+// set at JVM startup-time, they should never be subsequently mutated.
+// Instead of using pointers to malloc()ed monitors and mutexes we should consider
+// eliminating the indirection and using instances instead.
+// Consider using GCC's __read_mostly.
+
+Mutex* Patching_lock = NULL;
+Monitor* SystemDictionary_lock = NULL;
+Mutex* PackageTable_lock = NULL;
+Mutex* CompiledIC_lock = NULL;
+Mutex* InlineCacheBuffer_lock = NULL;
+Mutex* VMStatistic_lock = NULL;
+Mutex* JNIGlobalHandle_lock = NULL;
+Mutex* JNIHandleBlockFreeList_lock = NULL;
+Mutex* JNICachedItableIndex_lock = NULL;
+Mutex* JmethodIdCreation_lock = NULL;
+Mutex* JfieldIdCreation_lock = NULL;
+Monitor* JNICritical_lock = NULL;
+Mutex* JvmtiThreadState_lock = NULL;
+Monitor* JvmtiPendingEvent_lock = NULL;
+Mutex* Heap_lock = NULL;
+Mutex* ExpandHeap_lock = NULL;
+Mutex* AdapterHandlerLibrary_lock = NULL;
+Mutex* SignatureHandlerLibrary_lock = NULL;
+Mutex* VtableStubs_lock = NULL;
+Mutex* SymbolTable_lock = NULL;
+Mutex* StringTable_lock = NULL;
+Mutex* CodeCache_lock = NULL;
+Mutex* MethodData_lock = NULL;
+Mutex* RetData_lock = NULL;
+Monitor* VMOperationQueue_lock = NULL;
+Monitor* VMOperationRequest_lock = NULL;
+Monitor* Safepoint_lock = NULL;
+Monitor* SerializePage_lock = NULL;
+Monitor* Threads_lock = NULL;
+Monitor* CGC_lock = NULL;
+Mutex* STS_init_lock = NULL;
+Monitor* SLT_lock = NULL;
+Monitor* iCMS_lock = NULL;
+Monitor* FullGCCount_lock = NULL;
+Mutex* ParGCRareEvent_lock = NULL;
+Mutex* DerivedPointerTableGC_lock = NULL;
+Mutex* Compile_lock = NULL;
+Monitor* MethodCompileQueue_lock = NULL;
+#ifdef TIERED
+Monitor* C1_lock = NULL;
+#endif // TIERED
+Monitor* CompileThread_lock = NULL;
+Mutex* CompileTaskAlloc_lock = NULL;
+Mutex* CompileStatistics_lock = NULL;
+Mutex* MultiArray_lock = NULL;
+Monitor* Terminator_lock = NULL;
+Monitor* BeforeExit_lock = NULL;
+Monitor* Notify_lock = NULL;
+Monitor* Interrupt_lock = NULL;
+Monitor* ProfileVM_lock = NULL;
+Mutex* ProfilePrint_lock = NULL;
+Mutex* ExceptionCache_lock = NULL;
+Monitor* ObjAllocPost_lock = NULL;
+Mutex* OsrList_lock = NULL;
+#ifndef PRODUCT
+Mutex* FullGCALot_lock = NULL;
+#endif
+
+Mutex* Debug1_lock = NULL;
+Mutex* Debug2_lock = NULL;
+Mutex* Debug3_lock = NULL;
+
+Mutex* tty_lock = NULL;
+
+Mutex* RawMonitor_lock = NULL;
+Mutex* PerfDataMemAlloc_lock = NULL;
+Mutex* PerfDataManager_lock = NULL;
+Mutex* OopMapCacheAlloc_lock = NULL;
+
+Monitor* GCTaskManager_lock = NULL;
+
+Mutex* Management_lock = NULL;
+Monitor* LowMemory_lock = NULL;
+
+#define MAX_NUM_MUTEX 128
+static Monitor * _mutex_array[MAX_NUM_MUTEX];
+static int _num_mutex;
+
+#ifdef ASSERT
+void assert_locked_or_safepoint(const Monitor * lock) {
+ // check if this thread owns the lock (common case)
+ if (IgnoreLockingAssertions) return;
+ assert(lock != NULL, "Need non-NULL lock");
+ if (lock->owned_by_self()) return;
+ if (SafepointSynchronize::is_at_safepoint()) return;
+ if (!Universe::is_fully_initialized()) return;
+ // see if invoker of VM operation owns it
+ VM_Operation* op = VMThread::vm_operation();
+ if (op != NULL && op->calling_thread() == lock->owner()) return;
+ fatal1("must own lock %s", lock->name());
+}
+
+// a stronger assertion than the above
+void assert_lock_strong(const Monitor * lock) {
+ if (IgnoreLockingAssertions) return;
+ assert(lock != NULL, "Need non-NULL lock");
+ if (lock->owned_by_self()) return;
+ fatal1("must own lock %s", lock->name());
+}
+#endif
+
+#define def(var, type, pri, vm_block) { \
+ var = new type(Mutex::pri, #var, vm_block); \
+ assert(_num_mutex < MAX_NUM_MUTEX, \
+ "increase MAX_NUM_MUTEX"); \
+ _mutex_array[_num_mutex++] = var; \
+}
+
+void mutex_init() {
+ def(tty_lock , Mutex , event, true ); // allow to lock in VM
+
+ def(CGC_lock , Monitor, special, true ); // coordinate between fore- and background GC
+ def(STS_init_lock , Mutex, leaf, true );
+ if (UseConcMarkSweepGC) {
+ def(iCMS_lock , Monitor, special, true ); // CMS incremental mode start/stop notification
+ def(FullGCCount_lock , Monitor, leaf, true ); // in support of ExplicitGCInvokesConcurrent
+ }
+ def(ParGCRareEvent_lock , Mutex , leaf , true );
+ def(DerivedPointerTableGC_lock , Mutex, leaf, true );
+ def(CodeCache_lock , Mutex , special, true );
+ def(Interrupt_lock , Monitor, special, true ); // used for interrupt processing
+ def(RawMonitor_lock , Mutex, special, true );
+ def(OopMapCacheAlloc_lock , Mutex, leaf, true ); // used for oop_map_cache allocation.
+
+ def(Patching_lock , Mutex , special, true ); // used for safepointing and code patching.
+ def(ObjAllocPost_lock , Monitor, special, false);
+ def(LowMemory_lock , Monitor, special, true ); // used for low memory detection
+ def(JmethodIdCreation_lock , Mutex , leaf, true ); // used for creating jmethodIDs.
+
+ def(SystemDictionary_lock , Monitor, leaf, true ); // lookups done by VM thread
+ def(PackageTable_lock , Mutex , leaf, false);
+ def(InlineCacheBuffer_lock , Mutex , leaf, true );
+ def(VMStatistic_lock , Mutex , leaf, false);
+ def(ExpandHeap_lock , Mutex , leaf, true ); // Used during compilation by VM thread
+ def(JNIHandleBlockFreeList_lock , Mutex , leaf, true ); // handles are used by VM thread
+ def(SignatureHandlerLibrary_lock , Mutex , leaf, false);
+ def(SymbolTable_lock , Mutex , leaf, true );
+ def(StringTable_lock , Mutex , leaf, true );
+ def(ProfilePrint_lock , Mutex , leaf, false); // serial profile printing
+ def(ExceptionCache_lock , Mutex , leaf, false); // serial profile printing
+ def(OsrList_lock , Mutex , leaf, true );
+ def(Debug1_lock , Mutex , leaf, true );
+#ifndef PRODUCT
+ def(FullGCALot_lock , Mutex , leaf, false); // a lock to make FullGCALot MT safe
+#endif
+ def(BeforeExit_lock , Monitor, leaf, true );
+ def(PerfDataMemAlloc_lock , Mutex , leaf, true ); // used for allocating PerfData memory for performance data
+ def(PerfDataManager_lock , Mutex , leaf, true ); // used for synchronized access to PerfDataManager resources
+
+ // CMS_modUnionTable_lock leaf
+ // CMS_bitMap_lock leaf + 1
+ // CMS_freeList_lock leaf + 2
+
+ def(Safepoint_lock , Monitor, safepoint, true ); // locks SnippetCache_lock/Threads_lock
+
+ if (!UseMembar) {
+ def(SerializePage_lock , Monitor, leaf, true );
+ }
+
+ def(Threads_lock , Monitor, barrier, true );
+
+ def(VMOperationQueue_lock , Monitor, nonleaf, true ); // VM_thread allowed to block on these
+ def(VMOperationRequest_lock , Monitor, nonleaf, true );
+ def(RetData_lock , Mutex , nonleaf, false);
+ def(Terminator_lock , Monitor, nonleaf, true );
+ def(VtableStubs_lock , Mutex , nonleaf, true );
+ def(Notify_lock , Monitor, nonleaf, true );
+ def(JNIGlobalHandle_lock , Mutex , nonleaf, true ); // locks JNIHandleBlockFreeList_lock
+ def(JNICritical_lock , Monitor, nonleaf, true ); // used for JNI critical regions
+ def(AdapterHandlerLibrary_lock , Mutex , nonleaf, true);
+ if (UseConcMarkSweepGC) {
+ def(SLT_lock , Monitor, nonleaf, false );
+ // used in CMS GC for locking PLL lock
+ }
+ def(Heap_lock , Mutex , nonleaf+1, false);
+ def(JfieldIdCreation_lock , Mutex , nonleaf+1, true ); // jfieldID, Used in VM_Operation
+ def(JNICachedItableIndex_lock , Mutex , nonleaf+1, false); // Used to cache an itable index during JNI invoke
+
+ def(CompiledIC_lock , Mutex , nonleaf+2, false); // locks VtableStubs_lock, InlineCacheBuffer_lock
+ def(CompileTaskAlloc_lock , Mutex , nonleaf+2, true );
+ def(CompileStatistics_lock , Mutex , nonleaf+2, false);
+ def(MultiArray_lock , Mutex , nonleaf+2, false); // locks SymbolTable_lock
+
+ def(JvmtiThreadState_lock , Mutex , nonleaf+2, false); // Used by JvmtiThreadState/JvmtiEventController
+ def(JvmtiPendingEvent_lock , Monitor, nonleaf, false); // Used by JvmtiCodeBlobEvents
+ def(Management_lock , Mutex , nonleaf+2, false); // used for JVM management
+
+ def(Compile_lock , Mutex , nonleaf+3, true );
+ def(MethodData_lock , Mutex , nonleaf+3, false);
+
+ def(MethodCompileQueue_lock , Monitor, nonleaf+4, true );
+ def(Debug2_lock , Mutex , nonleaf+4, true );
+ def(Debug3_lock , Mutex , nonleaf+4, true );
+ def(ProfileVM_lock , Monitor, nonleaf+4, false); // used for profiling of the VMThread
+ def(CompileThread_lock , Monitor, nonleaf+5, false );
+#ifdef TIERED
+ def(C1_lock , Monitor, nonleaf+5, false );
+#endif // TIERED
+
+
+}
+
+GCMutexLocker::GCMutexLocker(Monitor * mutex) {
+ if (SafepointSynchronize::is_at_safepoint()) {
+ _locked = false;
+ } else {
+ _mutex = mutex;
+ _locked = true;
+ _mutex->lock();
+ }
+}
+
+// Print all mutexes/monitors that are currently owned by a thread; called
+// by fatal error handler.
+void print_owned_locks_on_error(outputStream* st) {
+ st->print("VM Mutex/Monitor currently owned by a thread: ");
+ bool none = true;
+ for (int i = 0; i < _num_mutex; i++) {
+ // see if it has an owner
+ if (_mutex_array[i]->owner() != NULL) {
+ if (none) {
+ // print format used by Mutex::print_on_error()
+ st->print_cr(" ([mutex/lock_event])");
+ none = false;
+ }
+ _mutex_array[i]->print_on_error(st);
+ st->cr();
+ }
+ }
+ if (none) st->print_cr("None");
+}
diff --git a/src/share/vm/runtime/mutexLocker.hpp b/src/share/vm/runtime/mutexLocker.hpp
new file mode 100644
index 000000000..59f145fcf
--- /dev/null
+++ b/src/share/vm/runtime/mutexLocker.hpp
@@ -0,0 +1,325 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Mutexes used in the VM.
+
+extern Mutex* Patching_lock; // a lock used to guard code patching of compiled code
+extern Monitor* SystemDictionary_lock; // a lock on the system dictonary
+extern Mutex* PackageTable_lock; // a lock on the class loader package table
+extern Mutex* CompiledIC_lock; // a lock used to guard compiled IC patching and access
+extern Mutex* InlineCacheBuffer_lock; // a lock used to guard the InlineCacheBuffer
+extern Mutex* VMStatistic_lock; // a lock used to guard statistics count increment
+extern Mutex* JNIGlobalHandle_lock; // a lock on creating JNI global handles
+extern Mutex* JNIHandleBlockFreeList_lock; // a lock on the JNI handle block free list
+extern Mutex* JNICachedItableIndex_lock; // a lock on caching an itable index during JNI invoke
+extern Mutex* JmethodIdCreation_lock; // a lock on creating JNI method identifiers
+extern Mutex* JfieldIdCreation_lock; // a lock on creating JNI static field identifiers
+extern Monitor* JNICritical_lock; // a lock used while entering and exiting JNI critical regions, allows GC to sometimes get in
+extern Mutex* JvmtiThreadState_lock; // a lock on modification of JVMTI thread data
+extern Monitor* JvmtiPendingEvent_lock; // a lock on the JVMTI pending events list
+extern Mutex* Heap_lock; // a lock on the heap
+extern Mutex* ExpandHeap_lock; // a lock on expanding the heap
+extern Mutex* AdapterHandlerLibrary_lock; // a lock on the AdapterHandlerLibrary
+extern Mutex* SignatureHandlerLibrary_lock; // a lock on the SignatureHandlerLibrary
+extern Mutex* VtableStubs_lock; // a lock on the VtableStubs
+extern Mutex* SymbolTable_lock; // a lock on the symbol table
+extern Mutex* StringTable_lock; // a lock on the interned string table
+extern Mutex* CodeCache_lock; // a lock on the CodeCache, rank is special, use MutexLockerEx
+extern Mutex* MethodData_lock; // a lock on installation of method data
+extern Mutex* RetData_lock; // a lock on installation of RetData inside method data
+extern Mutex* DerivedPointerTableGC_lock; // a lock to protect the derived pointer table
+extern Monitor* VMOperationQueue_lock; // a lock on queue of vm_operations waiting to execute
+extern Monitor* VMOperationRequest_lock; // a lock on Threads waiting for a vm_operation to terminate
+extern Monitor* Safepoint_lock; // a lock used by the safepoint abstraction
+extern Monitor* SerializePage_lock; // a lock used when VMThread changing serialize memory page permission during safepoint
+extern Monitor* Threads_lock; // a lock on the Threads table of active Java threads
+ // (also used by Safepoints too to block threads creation/destruction)
+extern Monitor* CGC_lock; // used for coordination between
+ // fore- & background GC threads.
+extern Mutex* STS_init_lock; // coordinate initialization of SuspendibleThreadSets.
+extern Monitor* SLT_lock; // used in CMS GC for acquiring PLL
+extern Monitor* iCMS_lock; // CMS incremental mode start/stop notification
+extern Monitor* FullGCCount_lock; // in support of "concurrent" full gc
+ // (see option ExplicitGCInvokesConcurrent)
+extern Mutex* ParGCRareEvent_lock; // Synchronizes various (rare) parallel GC ops.
+extern Mutex* Compile_lock; // a lock held when Compilation is updating code (used to block CodeCache traversal, CHA updates, etc)
+extern Monitor* MethodCompileQueue_lock; // a lock held when method compilations are enqueued, dequeued
+#ifdef TIERED
+extern Monitor* C1_lock; // a lock to ensure on single c1 compile is ever active
+#endif // TIERED
+extern Monitor* CompileThread_lock; // a lock held by compile threads during compilation system initialization
+extern Mutex* CompileTaskAlloc_lock; // a lock held when CompileTasks are allocated
+extern Mutex* CompileStatistics_lock; // a lock held when updating compilation statistics
+extern Mutex* MultiArray_lock; // a lock used to guard allocation of multi-dim arrays
+extern Monitor* Terminator_lock; // a lock used to guard termination of the vm
+extern Monitor* BeforeExit_lock; // a lock used to guard cleanups and shutdown hooks
+extern Monitor* Notify_lock; // a lock used to synchronize the start-up of the vm
+extern Monitor* Interrupt_lock; // a lock used for condition variable mediated interrupt processing
+extern Monitor* ProfileVM_lock; // a lock used for profiling the VMThread
+extern Mutex* ProfilePrint_lock; // a lock used to serialize the printing of profiles
+extern Mutex* ExceptionCache_lock; // a lock used to synchronize exception cache updates
+extern Mutex* OsrList_lock; // a lock used to serialize access to OSR queues
+
+#ifndef PRODUCT
+extern Mutex* FullGCALot_lock; // a lock to make FullGCALot MT safe
+#endif
+extern Mutex* Debug1_lock; // A bunch of pre-allocated locks that can be used for tracing
+extern Mutex* Debug2_lock; // down synchronization related bugs!
+extern Mutex* Debug3_lock;
+
+extern Mutex* RawMonitor_lock;
+extern Mutex* PerfDataMemAlloc_lock; // a lock on the allocator for PerfData memory for performance data
+extern Mutex* PerfDataManager_lock; // a long on access to PerfDataManager resources
+extern Mutex* ParkerFreeList_lock;
+extern Mutex* OopMapCacheAlloc_lock; // protects allocation of oop_map caches
+
+extern Mutex* Management_lock; // a lock used to serialize JVM management
+extern Monitor* LowMemory_lock; // a lock used for low memory detection
+
+// A MutexLocker provides mutual exclusion with respect to a given mutex
+// for the scope which contains the locker. The lock is an OS lock, not
+// an object lock, and the two do not interoperate. Do not use Mutex-based
+// locks to lock on Java objects, because they will not be respected if a
+// that object is locked using the Java locking mechanism.
+//
+// NOTE WELL!!
+//
+// See orderAccess.hpp. We assume throughout the VM that MutexLocker's
+// and friends constructors do a fence, a lock and an acquire *in that
+// order*. And that their destructors do a release and unlock, in *that*
+// order. If their implementations change such that these assumptions
+// are violated, a whole lot of code will break.
+
+// Print all mutexes/monitors that are currently owned by a thread; called
+// by fatal error handler.
+void print_owned_locks_on_error(outputStream* st);
+
+char *lock_name(Mutex *mutex);
+
+class MutexLocker: StackObj {
+ private:
+ Monitor * _mutex;
+ public:
+ MutexLocker(Monitor * mutex) {
+ assert(mutex->rank() != Mutex::special,
+ "Special ranked mutex should only use MutexLockerEx");
+ _mutex = mutex;
+ _mutex->lock();
+ }
+
+ // Overloaded constructor passing current thread
+ MutexLocker(Monitor * mutex, Thread *thread) {
+ assert(mutex->rank() != Mutex::special,
+ "Special ranked mutex should only use MutexLockerEx");
+ _mutex = mutex;
+ _mutex->lock(thread);
+ }
+
+ ~MutexLocker() {
+ _mutex->unlock();
+ }
+
+};
+
+// for debugging: check that we're already owning this lock (or are at a safepoint)
+#ifdef ASSERT
+void assert_locked_or_safepoint(const Monitor * lock);
+void assert_lock_strong(const Monitor * lock);
+#else
+#define assert_locked_or_safepoint(lock)
+#define assert_lock_strong(lock)
+#endif
+
+// A MutexLockerEx behaves like a MutexLocker when its constructor is
+// called with a Mutex. Unlike a MutexLocker, its constructor can also be
+// called with NULL, in which case the MutexLockerEx is a no-op. There
+// is also a corresponding MutexUnlockerEx. We want to keep the
+// basic MutexLocker as fast as possible. MutexLockerEx can also lock
+// without safepoint check.
+
+class MutexLockerEx: public StackObj {
+ private:
+ Monitor * _mutex;
+ public:
+ MutexLockerEx(Monitor * mutex, bool no_safepoint_check = !Mutex::_no_safepoint_check_flag) {
+ _mutex = mutex;
+ if (_mutex != NULL) {
+ assert(mutex->rank() > Mutex::special || no_safepoint_check,
+ "Mutexes with rank special or lower should not do safepoint checks");
+ if (no_safepoint_check)
+ _mutex->lock_without_safepoint_check();
+ else
+ _mutex->lock();
+ }
+ }
+
+ ~MutexLockerEx() {
+ if (_mutex != NULL) {
+ _mutex->unlock();
+ }
+ }
+};
+
+// A MonitorLockerEx is like a MutexLockerEx above, except it takes
+// a possibly null Monitor, and allows wait/notify as well which are
+// delegated to the underlying Monitor.
+
+class MonitorLockerEx: public MutexLockerEx {
+ private:
+ Monitor * _monitor;
+ public:
+ MonitorLockerEx(Monitor* monitor,
+ bool no_safepoint_check = !Mutex::_no_safepoint_check_flag):
+ MutexLockerEx(monitor, no_safepoint_check),
+ _monitor(monitor) {
+ // Superclass constructor did locking
+ }
+
+ ~MonitorLockerEx() {
+ #ifdef ASSERT
+ if (_monitor != NULL) {
+ assert_lock_strong(_monitor);
+ }
+ #endif // ASSERT
+ // Superclass destructor will do unlocking
+ }
+
+ bool wait(bool no_safepoint_check = !Mutex::_no_safepoint_check_flag,
+ long timeout = 0,
+ bool as_suspend_equivalent = !Mutex::_as_suspend_equivalent_flag) {
+ if (_monitor != NULL) {
+ return _monitor->wait(no_safepoint_check, timeout, as_suspend_equivalent);
+ }
+ return false;
+ }
+
+ bool notify_all() {
+ if (_monitor != NULL) {
+ return _monitor->notify_all();
+ }
+ return true;
+ }
+
+ bool notify() {
+ if (_monitor != NULL) {
+ return _monitor->notify();
+ }
+ return true;
+ }
+};
+
+
+
+// A GCMutexLocker is usually initialized with a mutex that is
+// automatically acquired in order to do GC. The function that
+// synchronizes using a GCMutexLocker may be called both during and between
+// GC's. Thus, it must acquire the mutex if GC is not in progress, but not
+// if GC is in progress (since the mutex is already held on its behalf.)
+
+class GCMutexLocker: public StackObj {
+private:
+ Monitor * _mutex;
+ bool _locked;
+public:
+ GCMutexLocker(Monitor * mutex);
+ ~GCMutexLocker() { if (_locked) _mutex->unlock(); }
+};
+
+
+
+// A MutexUnlocker temporarily exits a previously
+// entered mutex for the scope which contains the unlocker.
+
+class MutexUnlocker: StackObj {
+ private:
+ Monitor * _mutex;
+
+ public:
+ MutexUnlocker(Monitor * mutex) {
+ _mutex = mutex;
+ _mutex->unlock();
+ }
+
+ ~MutexUnlocker() {
+ _mutex->lock();
+ }
+};
+
+// A MutexUnlockerEx temporarily exits a previously
+// entered mutex for the scope which contains the unlocker.
+
+class MutexUnlockerEx: StackObj {
+ private:
+ Monitor * _mutex;
+ bool _no_safepoint_check;
+
+ public:
+ MutexUnlockerEx(Monitor * mutex, bool no_safepoint_check = !Mutex::_no_safepoint_check_flag) {
+ _mutex = mutex;
+ _no_safepoint_check = no_safepoint_check;
+ _mutex->unlock();
+ }
+
+ ~MutexUnlockerEx() {
+ if (_no_safepoint_check == Mutex::_no_safepoint_check_flag) {
+ _mutex->lock_without_safepoint_check();
+ } else {
+ _mutex->lock();
+ }
+ }
+};
+
+#ifndef PRODUCT
+//
+// A special MutexLocker that allows:
+// - reentrant locking
+// - locking out of order
+//
+// Only too be used for verify code, where we can relaxe out dead-lock
+// dection code a bit (unsafe, but probably ok). This code is NEVER to
+// be included in a product version.
+//
+class VerifyMutexLocker: StackObj {
+ private:
+ Monitor * _mutex;
+ bool _reentrant;
+ public:
+ VerifyMutexLocker(Monitor * mutex) {
+ _mutex = mutex;
+ _reentrant = mutex->owned_by_self();
+ if (!_reentrant) {
+ // We temp. diable strict safepoint checking, while we require the lock
+ FlagSetting fs(StrictSafepointChecks, false);
+ _mutex->lock();
+ }
+ }
+
+ ~VerifyMutexLocker() {
+ if (!_reentrant) {
+ _mutex->unlock();
+ }
+ }
+};
+
+#endif
diff --git a/src/share/vm/runtime/objectMonitor.hpp b/src/share/vm/runtime/objectMonitor.hpp
new file mode 100644
index 000000000..2a1384c53
--- /dev/null
+++ b/src/share/vm/runtime/objectMonitor.hpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// WARNING:
+// This is a very sensitive and fragile class. DO NOT make any
+// change unless you are fully aware of the underlying semantics.
+
+// This class can not inherit from any other class, because I have
+// to let the displaced header be the very first word. Otherwise I
+// have to let markOop include this file, which would export the
+// monitor data structure to everywhere.
+//
+// The ObjectMonitor class is used to implement JavaMonitors which have
+// transformed from the lightweight structure of the thread stack to a
+// heavy weight lock due to contention
+
+// It is also used as RawMonitor by the JVMTI
+
+
+class ObjectWaiter;
+
+class ObjectMonitor {
+ public:
+ enum {
+ OM_OK, // no error
+ OM_SYSTEM_ERROR, // operating system error
+ OM_ILLEGAL_MONITOR_STATE, // IllegalMonitorStateException
+ OM_INTERRUPTED, // Thread.interrupt()
+ OM_TIMED_OUT // Object.wait() timed out
+ };
+
+ public:
+ // TODO-FIXME: the "offset" routines should return a type of off_t instead of int ...
+ // ByteSize would also be an appropriate type.
+ static int header_offset_in_bytes() { return offset_of(ObjectMonitor, _header); }
+ static int object_offset_in_bytes() { return offset_of(ObjectMonitor, _object); }
+ static int owner_offset_in_bytes() { return offset_of(ObjectMonitor, _owner); }
+ static int count_offset_in_bytes() { return offset_of(ObjectMonitor, _count); }
+ static int recursions_offset_in_bytes() { return offset_of(ObjectMonitor, _recursions); }
+ static int cxq_offset_in_bytes() { return offset_of(ObjectMonitor, _cxq) ; }
+ static int succ_offset_in_bytes() { return offset_of(ObjectMonitor, _succ) ; }
+ static int EntryList_offset_in_bytes() { return offset_of(ObjectMonitor, _EntryList); }
+ static int FreeNext_offset_in_bytes() { return offset_of(ObjectMonitor, FreeNext); }
+ static int WaitSet_offset_in_bytes() { return offset_of(ObjectMonitor, _WaitSet) ; }
+ static int Responsible_offset_in_bytes() { return offset_of(ObjectMonitor, _Responsible);}
+ static int Spinner_offset_in_bytes() { return offset_of(ObjectMonitor, _Spinner); }
+
+ public:
+ // Eventaully we'll make provisions for multiple callbacks, but
+ // now one will suffice.
+ static int (*SpinCallbackFunction)(intptr_t, int) ;
+ static intptr_t SpinCallbackArgument ;
+
+
+ public:
+ ObjectMonitor();
+ ~ObjectMonitor();
+
+ markOop header() const;
+ void set_header(markOop hdr);
+
+ intptr_t is_busy() const;
+ intptr_t is_entered(Thread* current) const;
+
+ void* owner() const;
+ void set_owner(void* owner);
+
+ intptr_t waiters() const;
+
+ intptr_t count() const;
+ void set_count(intptr_t count);
+ intptr_t contentions() const ;
+
+ // JVM/DI GetMonitorInfo() needs this
+ Thread * thread_of_waiter (ObjectWaiter *) ;
+ ObjectWaiter * first_waiter () ;
+ ObjectWaiter * next_waiter(ObjectWaiter* o);
+
+ intptr_t recursions() const { return _recursions; }
+
+ void* object() const;
+ void* object_addr();
+ void set_object(void* obj);
+
+ bool check(TRAPS); // true if the thread owns the monitor.
+ void check_slow(TRAPS);
+ void clear();
+#ifndef PRODUCT
+ void verify();
+ void print();
+#endif
+
+ bool try_enter (TRAPS) ;
+ void enter(TRAPS);
+ void exit(TRAPS);
+ void wait(jlong millis, bool interruptable, TRAPS);
+ void notify(TRAPS);
+ void notifyAll(TRAPS);
+
+// Use the following at your own risk
+ intptr_t complete_exit(TRAPS);
+ void reenter(intptr_t recursions, TRAPS);
+
+ int raw_enter(TRAPS);
+ int raw_exit(TRAPS);
+ int raw_wait(jlong millis, bool interruptable, TRAPS);
+ int raw_notify(TRAPS);
+ int raw_notifyAll(TRAPS);
+
+ private:
+ // JVMTI support -- remove ASAP
+ int SimpleEnter (Thread * Self) ;
+ int SimpleExit (Thread * Self) ;
+ int SimpleWait (Thread * Self, jlong millis) ;
+ int SimpleNotify (Thread * Self, bool All) ;
+
+ private:
+ void Recycle () ;
+ void AddWaiter (ObjectWaiter * waiter) ;
+
+ ObjectWaiter * DequeueWaiter () ;
+ void DequeueSpecificWaiter (ObjectWaiter * waiter) ;
+ void EnterI (TRAPS) ;
+ void ReenterI (Thread * Self, ObjectWaiter * SelfNode) ;
+ void UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode) ;
+ int TryLock (Thread * Self) ;
+ int NotRunnable (Thread * Self, Thread * Owner) ;
+ int TrySpin_Fixed (Thread * Self) ;
+ int TrySpin_VaryFrequency (Thread * Self) ;
+ int TrySpin_VaryDuration (Thread * Self) ;
+ void ctAsserts () ;
+ void ExitEpilog (Thread * Self, ObjectWaiter * Wakee) ;
+ bool ExitSuspendEquivalent (JavaThread * Self) ;
+
+ private:
+ friend class ObjectSynchronizer;
+ friend class ObjectWaiter;
+ friend class VMStructs;
+
+ // WARNING: this must be the very first word of ObjectMonitor
+ // This means this class can't use any virtual member functions.
+ // TODO-FIXME: assert that offsetof(_header) is 0 or get rid of the
+ // implicit 0 offset in emitted code.
+
+ volatile markOop _header; // displaced object header word - mark
+ void* volatile _object; // backward object pointer - strong root
+
+ double SharingPad [1] ; // temp to reduce false sharing
+
+ // All the following fields must be machine word aligned
+ // The VM assumes write ordering wrt these fields, which can be
+ // read from other threads.
+
+ void * volatile _owner; // pointer to owning thread OR BasicLock
+ volatile intptr_t _recursions; // recursion count, 0 for first entry
+ int OwnerIsThread ; // _owner is (Thread *) vs SP/BasicLock
+ ObjectWaiter * volatile _cxq ; // LL of recently-arrived threads blocked on entry.
+ // The list is actually composed of WaitNodes, acting
+ // as proxies for Threads.
+ ObjectWaiter * volatile _EntryList ; // Threads blocked on entry or reentry.
+ Thread * volatile _succ ; // Heir presumptive thread - used for futile wakeup throttling
+ Thread * volatile _Responsible ;
+ int _PromptDrain ; // rqst to drain cxq into EntryList ASAP
+
+ volatile int _Spinner ; // for exit->spinner handoff optimization
+ volatile int _SpinFreq ; // Spin 1-out-of-N attempts: success rate
+ volatile int _SpinClock ;
+ volatile int _SpinDuration ;
+ volatile intptr_t _SpinState ; // MCS/CLH list of spinners
+
+ // TODO-FIXME: _count, _waiters and _recursions should be of
+ // type int, or int32_t but not intptr_t. There's no reason
+ // to use 64-bit fields for these variables on a 64-bit JVM.
+
+ volatile intptr_t _count; // reference count to prevent reclaimation/deflation
+ // at stop-the-world time. See deflate_idle_monitors().
+ // _count is approximately |_WaitSet| + |_EntryList|
+ volatile intptr_t _waiters; // number of waiting threads
+ ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor
+ volatile int _WaitSetLock; // protects Wait Queue - simple spinlock
+
+ public:
+ int _QMix ; // Mixed prepend queue discipline
+ ObjectMonitor * FreeNext ; // Free list linkage
+ intptr_t StatA, StatsB ;
+
+};
diff --git a/src/share/vm/runtime/objectMonitor.inline.hpp b/src/share/vm/runtime/objectMonitor.inline.hpp
new file mode 100644
index 000000000..63bb7cb26
--- /dev/null
+++ b/src/share/vm/runtime/objectMonitor.inline.hpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+inline intptr_t ObjectMonitor::is_entered(TRAPS) const {
+ if (THREAD == _owner || THREAD->is_lock_owned((address) _owner)) {
+ return 1;
+ }
+ return 0;
+}
+
+inline markOop ObjectMonitor::header() const {
+ return _header;
+}
+
+inline void ObjectMonitor::set_header(markOop hdr) {
+ _header = hdr;
+}
+
+inline intptr_t ObjectMonitor::count() const {
+ return _count;
+}
+
+inline void ObjectMonitor::set_count(intptr_t count) {
+ _count= count;
+}
+
+inline intptr_t ObjectMonitor::waiters() const {
+ return _waiters;
+}
+
+inline void* ObjectMonitor::owner() const {
+ return _owner;
+}
+
+inline void ObjectMonitor::clear() {
+ assert(_header, "Fatal logic error in ObjectMonitor header!");
+ assert(_count == 0, "Fatal logic error in ObjectMonitor count!");
+ assert(_waiters == 0, "Fatal logic error in ObjectMonitor waiters!");
+ assert(_recursions == 0, "Fatal logic error in ObjectMonitor recursions!");
+ assert(_object, "Fatal logic error in ObjectMonitor object!");
+ assert(_owner == 0, "Fatal logic error in ObjectMonitor owner!");
+
+ _header = NULL;
+ _object = NULL;
+}
+
+
+inline void* ObjectMonitor::object() const {
+ return _object;
+}
+
+inline void* ObjectMonitor::object_addr() {
+ return (void *)(&_object);
+}
+
+inline void ObjectMonitor::set_object(void* obj) {
+ _object = obj;
+}
+
+inline bool ObjectMonitor::check(TRAPS) {
+ if (THREAD != _owner) {
+ if (THREAD->is_lock_owned((address) _owner)) {
+ _owner = THREAD; // regain ownership of inflated monitor
+ OwnerIsThread = 1 ;
+ assert (_recursions == 0, "invariant") ;
+ } else {
+ check_slow(THREAD);
+ return false;
+ }
+ }
+ return true;
+}
+
+
+// return number of threads contending for this monitor
+inline intptr_t ObjectMonitor::contentions() const {
+ return _count;
+}
+
+inline void ObjectMonitor::set_owner(void* owner) {
+ _owner = owner;
+ _recursions = 0;
+ _count = 0;
+}
+
+
+// here are the platform-dependent bodies:
+
+# include "incls/_objectMonitor_pd.inline.hpp.incl"
diff --git a/src/share/vm/runtime/orderAccess.cpp b/src/share/vm/runtime/orderAccess.cpp
new file mode 100644
index 000000000..392b59781
--- /dev/null
+++ b/src/share/vm/runtime/orderAccess.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2003 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_orderAccess.cpp.incl"
+
+volatile intptr_t OrderAccess::dummy = 0;
diff --git a/src/share/vm/runtime/orderAccess.hpp b/src/share/vm/runtime/orderAccess.hpp
new file mode 100644
index 000000000..a2040ed84
--- /dev/null
+++ b/src/share/vm/runtime/orderAccess.hpp
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2003 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Memory Access Ordering Model
+//
+// This interface is based on the JSR-133 Cookbook for Compiler Writers
+// and on the IA64 memory model. It is the dynamic equivalent of the
+// C/C++ volatile specifier. I.e., volatility restricts compile-time
+// memory access reordering in a way similar to what we want to occur
+// at runtime.
+//
+// In the following, the terms 'previous', 'subsequent', 'before',
+// 'after', 'preceeding' and 'succeeding' refer to program order. The
+// terms 'down' and 'below' refer to forward load or store motion
+// relative to program order, while 'up' and 'above' refer to backward
+// motion.
+//
+//
+// We define four primitive memory barrier operations.
+//
+// LoadLoad: Load1(s); LoadLoad; Load2
+//
+// Ensures that Load1 completes (obtains the value it loads from memory)
+// before Load2 and any subsequent load operations. Loads before Load1
+// may *not* float below Load2 and any subsequent load operations.
+//
+// StoreStore: Store1(s); StoreStore; Store2
+//
+// Ensures that Store1 completes (the effect on memory of Store1 is made
+// visible to other processors) before Store2 and any subsequent store
+// operations. Stores before Store1 may *not* float below Store2 and any
+// subsequent store operations.
+//
+// LoadStore: Load1(s); LoadStore; Store2
+//
+// Ensures that Load1 completes before Store2 and any subsequent store
+// operations. Loads before Load1 may *not* float below Store2 and any
+// subseqeuent store operations.
+//
+// StoreLoad: Store1(s); StoreLoad; Load2
+//
+// Ensures that Store1 completes before Load2 and any subsequent load
+// operations. Stores before Store1 may *not* float below Load2 and any
+// subseqeuent load operations.
+//
+//
+// We define two further operations, 'release' and 'acquire'. They are
+// mirror images of each other.
+//
+// Execution by a processor of release makes the effect of all memory
+// accesses issued by it previous to the release visible to all
+// processors *before* the release completes. The effect of subsequent
+// memory accesses issued by it *may* be made visible *before* the
+// release. I.e., subsequent memory accesses may float above the
+// release, but prior ones may not float below it.
+//
+// Execution by a processor of acquire makes the effect of all memory
+// accesses issued by it subsequent to the acquire visible to all
+// processors *after* the acquire completes. The effect of prior memory
+// accesses issued by it *may* be made visible *after* the acquire.
+// I.e., prior memory accesses may float below the acquire, but
+// subsequent ones may not float above it.
+//
+// Finally, we define a 'fence' operation, which conceptually is a
+// release combined with an acquire. In the real world these operations
+// require one or more machine instructions which can float above and
+// below the release or acquire, so we usually can't just issue the
+// release-acquire back-to-back. All machines we know of implement some
+// sort of memory fence instruction.
+//
+//
+// The standalone implementations of release and acquire need an associated
+// dummy volatile store or load respectively. To avoid redundant operations,
+// we can define the composite operators: 'release_store', 'store_fence' and
+// 'load_acquire'. Here's a summary of the machine instructions corresponding
+// to each operation.
+//
+// sparc RMO ia64 x86
+// ---------------------------------------------------------------------
+// fence membar #LoadStore | mf lock addl 0,(sp)
+// #StoreStore |
+// #LoadLoad |
+// #StoreLoad
+//
+// release membar #LoadStore | st.rel [sp]=r0 movl $0,<dummy>
+// #StoreStore
+// st %g0,[]
+//
+// acquire ld [%sp],%g0 ld.acq <r>=[sp] movl (sp),<r>
+// membar #LoadLoad |
+// #LoadStore
+//
+// release_store membar #LoadStore | st.rel <store>
+// #StoreStore
+// st
+//
+// store_fence st st lock xchg
+// fence mf
+//
+// load_acquire ld ld.acq <load>
+// membar #LoadLoad |
+// #LoadStore
+//
+// Using only release_store and load_acquire, we can implement the
+// following ordered sequences.
+//
+// 1. load, load == load_acquire, load
+// or load_acquire, load_acquire
+// 2. load, store == load, release_store
+// or load_acquire, store
+// or load_acquire, release_store
+// 3. store, store == store, release_store
+// or release_store, release_store
+//
+// These require no membar instructions for sparc-TSO and no extra
+// instructions for ia64.
+//
+// Ordering a load relative to preceding stores requires a store_fence,
+// which implies a membar #StoreLoad between the store and load under
+// sparc-TSO. A fence is required by ia64. On x86, we use locked xchg.
+//
+// 4. store, load == store_fence, load
+//
+// Use store_fence to make sure all stores done in an 'interesting'
+// region are made visible prior to both subsequent loads and stores.
+//
+// Conventional usage is to issue a load_acquire for ordered loads. Use
+// release_store for ordered stores when you care only that prior stores
+// are visible before the release_store, but don't care exactly when the
+// store associated with the release_store becomes visible. Use
+// release_store_fence to update values like the thread state, where we
+// don't want the current thread to continue until all our prior memory
+// accesses (including the new thread state) are visible to other threads.
+//
+//
+// C++ Volatility
+//
+// C++ guarantees ordering at operations termed 'sequence points' (defined
+// to be volatile accesses and calls to library I/O functions). 'Side
+// effects' (defined as volatile accesses, calls to library I/O functions
+// and object modification) previous to a sequence point must be visible
+// at that sequence point. See the C++ standard, section 1.9, titled
+// "Program Execution". This means that all barrier implementations,
+// including standalone loadload, storestore, loadstore, storeload, acquire
+// and release must include a sequence point, usually via a volatile memory
+// access. Other ways to guarantee a sequence point are, e.g., use of
+// indirect calls and linux's __asm__ volatile.
+//
+//
+// os::is_MP Considered Redundant
+//
+// Callers of this interface do not need to test os::is_MP() before
+// issuing an operation. The test is taken care of by the implementation
+// of the interface (depending on the vm version and platform, the test
+// may or may not be actually done by the implementation).
+//
+//
+// A Note on Memory Ordering and Cache Coherency
+//
+// Cache coherency and memory ordering are orthogonal concepts, though they
+// interact. E.g., all existing itanium machines are cache-coherent, but
+// the hardware can freely reorder loads wrt other loads unless it sees a
+// load-acquire instruction. All existing sparc machines are cache-coherent
+// and, unlike itanium, TSO guarantees that the hardware orders loads wrt
+// loads and stores, and stores wrt to each other.
+//
+// Consider the implementation of loadload. *If* your platform *isn't*
+// cache-coherent, then loadload must not only prevent hardware load
+// instruction reordering, but it must *also* ensure that subsequent
+// loads from addresses that could be written by other processors (i.e.,
+// that are broadcast by other processors) go all the way to the first
+// level of memory shared by those processors and the one issuing
+// the loadload.
+//
+// So if we have a MP that has, say, a per-processor D$ that doesn't see
+// writes by other processors, and has a shared E$ that does, the loadload
+// barrier would have to make sure that either
+//
+// 1. cache lines in the issuing processor's D$ that contained data from
+// addresses that could be written by other processors are invalidated, so
+// subsequent loads from those addresses go to the E$, (it could do this
+// by tagging such cache lines as 'shared', though how to tell the hardware
+// to do the tagging is an interesting problem), or
+//
+// 2. there never are such cache lines in the issuing processor's D$, which
+// means all references to shared data (however identified: see above)
+// bypass the D$ (i.e., are satisfied from the E$).
+//
+// If your machine doesn't have an E$, substitute 'main memory' for 'E$'.
+//
+// Either of these alternatives is a pain, so no current machine we know of
+// has incoherent caches.
+//
+// If loadload didn't have these properties, the store-release sequence for
+// publishing a shared data structure wouldn't work, because a processor
+// trying to read data newly published by another processor might go to
+// its own incoherent caches to satisfy the read instead of to the newly
+// written shared memory.
+//
+//
+// NOTE WELL!!
+//
+// A Note on MutexLocker and Friends
+//
+// See mutexLocker.hpp. We assume throughout the VM that MutexLocker's
+// and friends' constructors do a fence, a lock and an acquire *in that
+// order*. And that their destructors do a release and unlock, in *that*
+// order. If their implementations change such that these assumptions
+// are violated, a whole lot of code will break.
+
+class OrderAccess : AllStatic {
+ public:
+ static void loadload();
+ static void storestore();
+ static void loadstore();
+ static void storeload();
+
+ static void acquire();
+ static void release();
+ static void fence();
+
+ static jbyte load_acquire(volatile jbyte* p);
+ static jshort load_acquire(volatile jshort* p);
+ static jint load_acquire(volatile jint* p);
+ static jlong load_acquire(volatile jlong* p);
+ static jubyte load_acquire(volatile jubyte* p);
+ static jushort load_acquire(volatile jushort* p);
+ static juint load_acquire(volatile juint* p);
+ static julong load_acquire(volatile julong* p);
+ static jfloat load_acquire(volatile jfloat* p);
+ static jdouble load_acquire(volatile jdouble* p);
+
+ static intptr_t load_ptr_acquire(volatile intptr_t* p);
+ static void* load_ptr_acquire(volatile void* p);
+ static void* load_ptr_acquire(const volatile void* p);
+
+ static void release_store(volatile jbyte* p, jbyte v);
+ static void release_store(volatile jshort* p, jshort v);
+ static void release_store(volatile jint* p, jint v);
+ static void release_store(volatile jlong* p, jlong v);
+ static void release_store(volatile jubyte* p, jubyte v);
+ static void release_store(volatile jushort* p, jushort v);
+ static void release_store(volatile juint* p, juint v);
+ static void release_store(volatile julong* p, julong v);
+ static void release_store(volatile jfloat* p, jfloat v);
+ static void release_store(volatile jdouble* p, jdouble v);
+
+ static void release_store_ptr(volatile intptr_t* p, intptr_t v);
+ static void release_store_ptr(volatile void* p, void* v);
+
+ static void store_fence(jbyte* p, jbyte v);
+ static void store_fence(jshort* p, jshort v);
+ static void store_fence(jint* p, jint v);
+ static void store_fence(jlong* p, jlong v);
+ static void store_fence(jubyte* p, jubyte v);
+ static void store_fence(jushort* p, jushort v);
+ static void store_fence(juint* p, juint v);
+ static void store_fence(julong* p, julong v);
+ static void store_fence(jfloat* p, jfloat v);
+ static void store_fence(jdouble* p, jdouble v);
+
+ static void store_ptr_fence(intptr_t* p, intptr_t v);
+ static void store_ptr_fence(void** p, void* v);
+
+ static void release_store_fence(volatile jbyte* p, jbyte v);
+ static void release_store_fence(volatile jshort* p, jshort v);
+ static void release_store_fence(volatile jint* p, jint v);
+ static void release_store_fence(volatile jlong* p, jlong v);
+ static void release_store_fence(volatile jubyte* p, jubyte v);
+ static void release_store_fence(volatile jushort* p, jushort v);
+ static void release_store_fence(volatile juint* p, juint v);
+ static void release_store_fence(volatile julong* p, julong v);
+ static void release_store_fence(volatile jfloat* p, jfloat v);
+ static void release_store_fence(volatile jdouble* p, jdouble v);
+
+ static void release_store_ptr_fence(volatile intptr_t* p, intptr_t v);
+ static void release_store_ptr_fence(volatile void* p, void* v);
+
+ // In order to force a memory access, implementations may
+ // need a volatile externally visible dummy variable.
+ static volatile intptr_t dummy;
+};
diff --git a/src/share/vm/runtime/os.cpp b/src/share/vm/runtime/os.cpp
new file mode 100644
index 000000000..72dfb2265
--- /dev/null
+++ b/src/share/vm/runtime/os.cpp
@@ -0,0 +1,1108 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_os.cpp.incl"
+
+# include <signal.h>
+
+OSThread* os::_starting_thread = NULL;
+address os::_polling_page = NULL;
+volatile int32_t* os::_mem_serialize_page = NULL;
+uintptr_t os::_serialize_page_mask = 0;
+long os::_rand_seed = 1;
+int os::_processor_count = 0;
+volatile jlong os::_global_time = 0;
+volatile int os::_global_time_lock = 0;
+bool os::_use_global_time = false;
+size_t os::_page_sizes[os::page_sizes_max];
+
+#ifndef PRODUCT
+int os::num_mallocs = 0; // # of calls to malloc/realloc
+size_t os::alloc_bytes = 0; // # of bytes allocated
+int os::num_frees = 0; // # of calls to free
+#endif
+
+// Atomic read of a jlong is assured by a seqlock; see update_global_time()
+jlong os::read_global_time() {
+#ifdef _LP64
+ return _global_time;
+#else
+ volatile int lock;
+ volatile jlong current_time;
+ int ctr = 0;
+
+ for (;;) {
+ lock = _global_time_lock;
+
+ // spin while locked
+ while ((lock & 0x1) != 0) {
+ ++ctr;
+ if ((ctr & 0xFFF) == 0) {
+ // Guarantee writer progress. Can't use yield; yield is advisory
+ // and has almost no effect on some platforms. Don't need a state
+ // transition - the park call will return promptly.
+ assert(Thread::current() != NULL, "TLS not initialized");
+ assert(Thread::current()->_ParkEvent != NULL, "sync not initialized");
+ Thread::current()->_ParkEvent->park(1);
+ }
+ lock = _global_time_lock;
+ }
+
+ OrderAccess::loadload();
+ current_time = _global_time;
+ OrderAccess::loadload();
+
+ // ratify seqlock value
+ if (lock == _global_time_lock) {
+ return current_time;
+ }
+ }
+#endif
+}
+
+//
+// NOTE - Assumes only one writer thread!
+//
+// We use a seqlock to guarantee that jlong _global_time is updated
+// atomically on 32-bit platforms. A locked value is indicated by
+// the lock variable LSB == 1. Readers will initially read the lock
+// value, spinning until the LSB == 0. They then speculatively read
+// the global time value, then re-read the lock value to ensure that
+// it hasn't changed. If the lock value has changed, the entire read
+// sequence is retried.
+//
+// Writers simply set the LSB = 1 (i.e. increment the variable),
+// update the global time, then release the lock and bump the version
+// number (i.e. increment the variable again.) In this case we don't
+// even need a CAS since we ensure there's only one writer.
+//
+void os::update_global_time() {
+#ifdef _LP64
+ _global_time = timeofday();
+#else
+ assert((_global_time_lock & 0x1) == 0, "multiple writers?");
+ jlong current_time = timeofday();
+ _global_time_lock++; // lock
+ OrderAccess::storestore();
+ _global_time = current_time;
+ OrderAccess::storestore();
+ _global_time_lock++; // unlock
+#endif
+}
+
+// Fill in buffer with current local time as an ISO-8601 string.
+// E.g., yyyy-mm-ddThh:mm:ss-zzzz.
+// Returns buffer, or NULL if it failed.
+// This would mostly be a call to
+// strftime(...., "%Y-%m-%d" "T" "%H:%M:%S" "%z", ....)
+// except that on Windows the %z behaves badly, so we do it ourselves.
+// Also, people wanted milliseconds on there,
+// and strftime doesn't do milliseconds.
+char* os::iso8601_time(char* buffer, size_t buffer_length) {
+ // Output will be of the form "YYYY-MM-DDThh:mm:ss.mmm+zzzz\0"
+ // 1 2
+ // 12345678901234567890123456789
+ static const char* iso8601_format =
+ "%04d-%02d-%02dT%02d:%02d:%02d.%03d%c%02d%02d";
+ static const size_t needed_buffer = 29;
+
+ // Sanity check the arguments
+ if (buffer == NULL) {
+ assert(false, "NULL buffer");
+ return NULL;
+ }
+ if (buffer_length < needed_buffer) {
+ assert(false, "buffer_length too small");
+ return NULL;
+ }
+ // Get the current time
+ jlong milliseconds_since_19700101 = timeofday();
+ const int milliseconds_per_microsecond = 1000;
+ const time_t seconds_since_19700101 =
+ milliseconds_since_19700101 / milliseconds_per_microsecond;
+ const int milliseconds_after_second =
+ milliseconds_since_19700101 % milliseconds_per_microsecond;
+ // Convert the time value to a tm and timezone variable
+ const struct tm *time_struct_temp = localtime(&seconds_since_19700101);
+ if (time_struct_temp == NULL) {
+ assert(false, "Failed localtime");
+ return NULL;
+ }
+ // Save the results of localtime
+ const struct tm time_struct = *time_struct_temp;
+ const time_t zone = timezone;
+
+ // If daylight savings time is in effect,
+ // we are 1 hour East of our time zone
+ const time_t seconds_per_minute = 60;
+ const time_t minutes_per_hour = 60;
+ const time_t seconds_per_hour = seconds_per_minute * minutes_per_hour;
+ time_t UTC_to_local = zone;
+ if (time_struct.tm_isdst > 0) {
+ UTC_to_local = UTC_to_local - seconds_per_hour;
+ }
+ // Compute the time zone offset.
+ // localtime(3C) sets timezone to the difference (in seconds)
+ // between UTC and and local time.
+ // ISO 8601 says we need the difference between local time and UTC,
+ // we change the sign of the localtime(3C) result.
+ const time_t local_to_UTC = -(UTC_to_local);
+ // Then we have to figure out if if we are ahead (+) or behind (-) UTC.
+ char sign_local_to_UTC = '+';
+ time_t abs_local_to_UTC = local_to_UTC;
+ if (local_to_UTC < 0) {
+ sign_local_to_UTC = '-';
+ abs_local_to_UTC = -(abs_local_to_UTC);
+ }
+ // Convert time zone offset seconds to hours and minutes.
+ const time_t zone_hours = (abs_local_to_UTC / seconds_per_hour);
+ const time_t zone_min =
+ ((abs_local_to_UTC % seconds_per_hour) / seconds_per_minute);
+
+ // Print an ISO 8601 date and time stamp into the buffer
+ const int year = 1900 + time_struct.tm_year;
+ const int month = 1 + time_struct.tm_mon;
+ const int printed = jio_snprintf(buffer, buffer_length, iso8601_format,
+ year,
+ month,
+ time_struct.tm_mday,
+ time_struct.tm_hour,
+ time_struct.tm_min,
+ time_struct.tm_sec,
+ milliseconds_after_second,
+ sign_local_to_UTC,
+ zone_hours,
+ zone_min);
+ if (printed == 0) {
+ assert(false, "Failed jio_printf");
+ return NULL;
+ }
+ return buffer;
+}
+
+OSReturn os::set_priority(Thread* thread, ThreadPriority p) {
+#ifdef ASSERT
+ if (!(!thread->is_Java_thread() ||
+ Thread::current() == thread ||
+ Threads_lock->owned_by_self()
+ || thread->is_Compiler_thread()
+ )) {
+ assert(false, "possibility of dangling Thread pointer");
+ }
+#endif
+
+ if (p >= MinPriority && p <= MaxPriority) {
+ int priority = java_to_os_priority[p];
+ return set_native_priority(thread, priority);
+ } else {
+ assert(false, "Should not happen");
+ return OS_ERR;
+ }
+}
+
+
+OSReturn os::get_priority(const Thread* const thread, ThreadPriority& priority) {
+ int p;
+ int os_prio;
+ OSReturn ret = get_native_priority(thread, &os_prio);
+ if (ret != OS_OK) return ret;
+
+ for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] > os_prio; p--) ;
+ priority = (ThreadPriority)p;
+ return OS_OK;
+}
+
+
+// --------------------- sun.misc.Signal (optional) ---------------------
+
+
+// SIGBREAK is sent by the keyboard to query the VM state
+#ifndef SIGBREAK
+#define SIGBREAK SIGQUIT
+#endif
+
+// sigexitnum_pd is a platform-specific special signal used for terminating the Signal thread.
+
+
+static void signal_thread_entry(JavaThread* thread, TRAPS) {
+ os::set_priority(thread, NearMaxPriority);
+ while (true) {
+ int sig;
+ {
+ // FIXME : Currently we have not decieded what should be the status
+ // for this java thread blocked here. Once we decide about
+ // that we should fix this.
+ sig = os::signal_wait();
+ }
+ if (sig == os::sigexitnum_pd()) {
+ // Terminate the signal thread
+ return;
+ }
+
+ switch (sig) {
+ case SIGBREAK: {
+ // Check if the signal is a trigger to start the Attach Listener - in that
+ // case don't print stack traces.
+ if (!DisableAttachMechanism && AttachListener::is_init_trigger()) {
+ continue;
+ }
+ // Print stack traces
+ // Any SIGBREAK operations added here should make sure to flush
+ // the output stream (e.g. tty->flush()) after output. See 4803766.
+ // Each module also prints an extra carriage return after its output.
+ VM_PrintThreads op;
+ VMThread::execute(&op);
+ VM_PrintJNI jni_op;
+ VMThread::execute(&jni_op);
+ VM_FindDeadlocks op1(tty);
+ VMThread::execute(&op1);
+ Universe::print_heap_at_SIGBREAK();
+ if (PrintClassHistogram) {
+ VM_GC_HeapInspection op1(gclog_or_tty, true /* force full GC before heap inspection */);
+ VMThread::execute(&op1);
+ }
+ if (JvmtiExport::should_post_data_dump()) {
+ JvmtiExport::post_data_dump();
+ }
+ break;
+ }
+ default: {
+ // Dispatch the signal to java
+ HandleMark hm(THREAD);
+ klassOop k = SystemDictionary::resolve_or_null(vmSymbolHandles::sun_misc_Signal(), THREAD);
+ KlassHandle klass (THREAD, k);
+ if (klass.not_null()) {
+ JavaValue result(T_VOID);
+ JavaCallArguments args;
+ args.push_int(sig);
+ JavaCalls::call_static(
+ &result,
+ klass,
+ vmSymbolHandles::dispatch_name(),
+ vmSymbolHandles::int_void_signature(),
+ &args,
+ THREAD
+ );
+ }
+ if (HAS_PENDING_EXCEPTION) {
+ // tty is initialized early so we don't expect it to be null, but
+ // if it is we can't risk doing an initialization that might
+ // trigger additional out-of-memory conditions
+ if (tty != NULL) {
+ char klass_name[256];
+ char tmp_sig_name[16];
+ const char* sig_name = "UNKNOWN";
+ instanceKlass::cast(PENDING_EXCEPTION->klass())->
+ name()->as_klass_external_name(klass_name, 256);
+ if (os::exception_name(sig, tmp_sig_name, 16) != NULL)
+ sig_name = tmp_sig_name;
+ warning("Exception %s occurred dispatching signal %s to handler"
+ "- the VM may need to be forcibly terminated",
+ klass_name, sig_name );
+ }
+ CLEAR_PENDING_EXCEPTION;
+ }
+ }
+ }
+ }
+}
+
+
+void os::signal_init() {
+ if (!ReduceSignalUsage) {
+ // Setup JavaThread for processing signals
+ EXCEPTION_MARK;
+ klassOop k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_Thread(), true, CHECK);
+ instanceKlassHandle klass (THREAD, k);
+ instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
+
+ const char thread_name[] = "Signal Dispatcher";
+ Handle string = java_lang_String::create_from_str(thread_name, CHECK);
+
+ // Initialize thread_oop to put it into the system threadGroup
+ Handle thread_group (THREAD, Universe::system_thread_group());
+ JavaValue result(T_VOID);
+ JavaCalls::call_special(&result, thread_oop,
+ klass,
+ vmSymbolHandles::object_initializer_name(),
+ vmSymbolHandles::threadgroup_string_void_signature(),
+ thread_group,
+ string,
+ CHECK);
+
+ KlassHandle group(THREAD, SystemDictionary::threadGroup_klass());
+ JavaCalls::call_special(&result,
+ thread_group,
+ group,
+ vmSymbolHandles::add_method_name(),
+ vmSymbolHandles::thread_void_signature(),
+ thread_oop, // ARG 1
+ CHECK);
+
+ os::signal_init_pd();
+
+ { MutexLocker mu(Threads_lock);
+ JavaThread* signal_thread = new JavaThread(&signal_thread_entry);
+
+ // At this point it may be possible that no osthread was created for the
+ // JavaThread due to lack of memory. We would have to throw an exception
+ // in that case. However, since this must work and we do not allow
+ // exceptions anyway, check and abort if this fails.
+ if (signal_thread == NULL || signal_thread->osthread() == NULL) {
+ vm_exit_during_initialization("java.lang.OutOfMemoryError",
+ "unable to create new native thread");
+ }
+
+ java_lang_Thread::set_thread(thread_oop(), signal_thread);
+ java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
+ java_lang_Thread::set_daemon(thread_oop());
+
+ signal_thread->set_threadObj(thread_oop());
+ Threads::add(signal_thread);
+ Thread::start(signal_thread);
+ }
+ // Handle ^BREAK
+ os::signal(SIGBREAK, os::user_handler());
+ }
+}
+
+
+void os::terminate_signal_thread() {
+ if (!ReduceSignalUsage)
+ signal_notify(sigexitnum_pd());
+}
+
+
+// --------------------- loading libraries ---------------------
+
+typedef jint (JNICALL *JNI_OnLoad_t)(JavaVM *, void *);
+extern struct JavaVM_ main_vm;
+
+static void* _native_java_library = NULL;
+
+void* os::native_java_library() {
+ if (_native_java_library == NULL) {
+ char buffer[JVM_MAXPATHLEN];
+ char ebuf[1024];
+
+ // Try to load verify dll first. In 1.3 java dll depends on it and is not always
+ // able to find it when the loading executable is outside the JDK.
+ // In order to keep working with 1.2 we ignore any loading errors.
+ hpi::dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(), "verify");
+ hpi::dll_load(buffer, ebuf, sizeof(ebuf));
+
+ // Load java dll
+ hpi::dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(), "java");
+ _native_java_library = hpi::dll_load(buffer, ebuf, sizeof(ebuf));
+ if (_native_java_library == NULL) {
+ vm_exit_during_initialization("Unable to load native library", ebuf);
+ }
+ // The JNI_OnLoad handling is normally done by method load in java.lang.ClassLoader$NativeLibrary,
+ // but the VM loads the base library explicitly so we have to check for JNI_OnLoad as well
+ const char *onLoadSymbols[] = JNI_ONLOAD_SYMBOLS;
+ JNI_OnLoad_t JNI_OnLoad = CAST_TO_FN_PTR(JNI_OnLoad_t, hpi::dll_lookup(_native_java_library, onLoadSymbols[0]));
+ if (JNI_OnLoad != NULL) {
+ JavaThread* thread = JavaThread::current();
+ ThreadToNativeFromVM ttn(thread);
+ HandleMark hm(thread);
+ jint ver = (*JNI_OnLoad)(&main_vm, NULL);
+ if (!Threads::is_supported_jni_version_including_1_1(ver)) {
+ vm_exit_during_initialization("Unsupported JNI version");
+ }
+ }
+ }
+ return _native_java_library;
+}
+
+// --------------------- heap allocation utilities ---------------------
+
+char *os::strdup(const char *str) {
+ size_t size = strlen(str);
+ char *dup_str = (char *)malloc(size + 1);
+ if (dup_str == NULL) return NULL;
+ strcpy(dup_str, str);
+ return dup_str;
+}
+
+
+
+#ifdef ASSERT
+#define space_before (MallocCushion + sizeof(double))
+#define space_after MallocCushion
+#define size_addr_from_base(p) (size_t*)(p + space_before - sizeof(size_t))
+#define size_addr_from_obj(p) ((size_t*)p - 1)
+// MallocCushion: size of extra cushion allocated around objects with +UseMallocOnly
+// NB: cannot be debug variable, because these aren't set from the command line until
+// *after* the first few allocs already happened
+#define MallocCushion 16
+#else
+#define space_before 0
+#define space_after 0
+#define size_addr_from_base(p) should not use w/o ASSERT
+#define size_addr_from_obj(p) should not use w/o ASSERT
+#define MallocCushion 0
+#endif
+#define paranoid 0 /* only set to 1 if you suspect checking code has bug */
+
+#ifdef ASSERT
+inline size_t get_size(void* obj) {
+ size_t size = *size_addr_from_obj(obj);
+ if (size < 0 )
+ fatal2("free: size field of object #%p was overwritten (%lu)", obj, size);
+ return size;
+}
+
+u_char* find_cushion_backwards(u_char* start) {
+ u_char* p = start;
+ while (p[ 0] != badResourceValue || p[-1] != badResourceValue ||
+ p[-2] != badResourceValue || p[-3] != badResourceValue) p--;
+ // ok, we have four consecutive marker bytes; find start
+ u_char* q = p - 4;
+ while (*q == badResourceValue) q--;
+ return q + 1;
+}
+
+u_char* find_cushion_forwards(u_char* start) {
+ u_char* p = start;
+ while (p[0] != badResourceValue || p[1] != badResourceValue ||
+ p[2] != badResourceValue || p[3] != badResourceValue) p++;
+ // ok, we have four consecutive marker bytes; find end of cushion
+ u_char* q = p + 4;
+ while (*q == badResourceValue) q++;
+ return q - MallocCushion;
+}
+
+void print_neighbor_blocks(void* ptr) {
+ // find block allocated before ptr (not entirely crash-proof)
+ if (MallocCushion < 4) {
+ tty->print_cr("### cannot find previous block (MallocCushion < 4)");
+ return;
+ }
+ u_char* start_of_this_block = (u_char*)ptr - space_before;
+ u_char* end_of_prev_block_data = start_of_this_block - space_after -1;
+ // look for cushion in front of prev. block
+ u_char* start_of_prev_block = find_cushion_backwards(end_of_prev_block_data);
+ ptrdiff_t size = *size_addr_from_base(start_of_prev_block);
+ u_char* obj = start_of_prev_block + space_before;
+ if (size <= 0 ) {
+ // start is bad; mayhave been confused by OS data inbetween objects
+ // search one more backwards
+ start_of_prev_block = find_cushion_backwards(start_of_prev_block);
+ size = *size_addr_from_base(start_of_prev_block);
+ obj = start_of_prev_block + space_before;
+ }
+
+ if (start_of_prev_block + space_before + size + space_after == start_of_this_block) {
+ tty->print_cr("### previous object: %p (%ld bytes)", obj, size);
+ } else {
+ tty->print_cr("### previous object (not sure if correct): %p (%ld bytes)", obj, size);
+ }
+
+ // now find successor block
+ u_char* start_of_next_block = (u_char*)ptr + *size_addr_from_obj(ptr) + space_after;
+ start_of_next_block = find_cushion_forwards(start_of_next_block);
+ u_char* next_obj = start_of_next_block + space_before;
+ ptrdiff_t next_size = *size_addr_from_base(start_of_next_block);
+ if (start_of_next_block[0] == badResourceValue &&
+ start_of_next_block[1] == badResourceValue &&
+ start_of_next_block[2] == badResourceValue &&
+ start_of_next_block[3] == badResourceValue) {
+ tty->print_cr("### next object: %p (%ld bytes)", next_obj, next_size);
+ } else {
+ tty->print_cr("### next object (not sure if correct): %p (%ld bytes)", next_obj, next_size);
+ }
+}
+
+
+void report_heap_error(void* memblock, void* bad, const char* where) {
+ tty->print_cr("## nof_mallocs = %d, nof_frees = %d", os::num_mallocs, os::num_frees);
+ tty->print_cr("## memory stomp: byte at %p %s object %p", bad, where, memblock);
+ print_neighbor_blocks(memblock);
+ fatal("memory stomping error");
+}
+
+void verify_block(void* memblock) {
+ size_t size = get_size(memblock);
+ if (MallocCushion) {
+ u_char* ptr = (u_char*)memblock - space_before;
+ for (int i = 0; i < MallocCushion; i++) {
+ if (ptr[i] != badResourceValue) {
+ report_heap_error(memblock, ptr+i, "in front of");
+ }
+ }
+ u_char* end = (u_char*)memblock + size + space_after;
+ for (int j = -MallocCushion; j < 0; j++) {
+ if (end[j] != badResourceValue) {
+ report_heap_error(memblock, end+j, "after");
+ }
+ }
+ }
+}
+#endif
+
+void* os::malloc(size_t size) {
+ NOT_PRODUCT(num_mallocs++);
+ NOT_PRODUCT(alloc_bytes += size);
+
+ if (size == 0) {
+ // return a valid pointer if size is zero
+ // if NULL is returned the calling functions assume out of memory.
+ size = 1;
+ }
+
+ NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
+ u_char* ptr = (u_char*)::malloc(size + space_before + space_after);
+#ifdef ASSERT
+ if (ptr == NULL) return NULL;
+ if (MallocCushion) {
+ for (u_char* p = ptr; p < ptr + MallocCushion; p++) *p = (u_char)badResourceValue;
+ u_char* end = ptr + space_before + size;
+ for (u_char* pq = ptr+MallocCushion; pq < end; pq++) *pq = (u_char)uninitBlockPad;
+ for (u_char* q = end; q < end + MallocCushion; q++) *q = (u_char)badResourceValue;
+ }
+ // put size just before data
+ *size_addr_from_base(ptr) = size;
+#endif
+ u_char* memblock = ptr + space_before;
+ if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
+ tty->print_cr("os::malloc caught, %lu bytes --> %p", size, memblock);
+ breakpoint();
+ }
+ debug_only(if (paranoid) verify_block(memblock));
+ if (PrintMalloc && tty != NULL) tty->print_cr("os::malloc %lu bytes --> %p", size, memblock);
+ return memblock;
+}
+
+
+void* os::realloc(void *memblock, size_t size) {
+ NOT_PRODUCT(num_mallocs++);
+ NOT_PRODUCT(alloc_bytes += size);
+#ifndef ASSERT
+ return ::realloc(memblock, size);
+#else
+ if (memblock == NULL) {
+ return os::malloc(size);
+ }
+ if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
+ tty->print_cr("os::realloc caught %p", memblock);
+ breakpoint();
+ }
+ verify_block(memblock);
+ NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
+ if (size == 0) return NULL;
+ // always move the block
+ void* ptr = malloc(size);
+ if (PrintMalloc) tty->print_cr("os::remalloc %lu bytes, %p --> %p", size, memblock, ptr);
+ // Copy to new memory if malloc didn't fail
+ if ( ptr != NULL ) {
+ memcpy(ptr, memblock, MIN2(size, get_size(memblock)));
+ if (paranoid) verify_block(ptr);
+ if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
+ tty->print_cr("os::realloc caught, %lu bytes --> %p", size, ptr);
+ breakpoint();
+ }
+ free(memblock);
+ }
+ return ptr;
+#endif
+}
+
+
+void os::free(void *memblock) {
+ NOT_PRODUCT(num_frees++);
+#ifdef ASSERT
+ if (memblock == NULL) return;
+ if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
+ if (tty != NULL) tty->print_cr("os::free caught %p", memblock);
+ breakpoint();
+ }
+ verify_block(memblock);
+ if (PrintMalloc && tty != NULL)
+ // tty->print_cr("os::free %p", memblock);
+ fprintf(stderr, "os::free %p\n", memblock);
+ NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
+ // Added by detlefs.
+ if (MallocCushion) {
+ u_char* ptr = (u_char*)memblock - space_before;
+ for (u_char* p = ptr; p < ptr + MallocCushion; p++) {
+ guarantee(*p == badResourceValue,
+ "Thing freed should be malloc result.");
+ *p = (u_char)freeBlockPad;
+ }
+ size_t size = get_size(memblock);
+ u_char* end = ptr + space_before + size;
+ for (u_char* q = end; q < end + MallocCushion; q++) {
+ guarantee(*q == badResourceValue,
+ "Thing freed should be malloc result.");
+ *q = (u_char)freeBlockPad;
+ }
+ }
+#endif
+ ::free((char*)memblock - space_before);
+}
+
+void os::init_random(long initval) {
+ _rand_seed = initval;
+}
+
+
+long os::random() {
+ /* standard, well-known linear congruential random generator with
+ * next_rand = (16807*seed) mod (2**31-1)
+ * see
+ * (1) "Random Number Generators: Good Ones Are Hard to Find",
+ * S.K. Park and K.W. Miller, Communications of the ACM 31:10 (Oct 1988),
+ * (2) "Two Fast Implementations of the 'Minimal Standard' Random
+ * Number Generator", David G. Carta, Comm. ACM 33, 1 (Jan 1990), pp. 87-88.
+ */
+ const long a = 16807;
+ const unsigned long m = 2147483647;
+ const long q = m / a; assert(q == 127773, "weird math");
+ const long r = m % a; assert(r == 2836, "weird math");
+
+ // compute az=2^31p+q
+ unsigned long lo = a * (long)(_rand_seed & 0xFFFF);
+ unsigned long hi = a * (long)((unsigned long)_rand_seed >> 16);
+ lo += (hi & 0x7FFF) << 16;
+
+ // if q overflowed, ignore the overflow and increment q
+ if (lo > m) {
+ lo &= m;
+ ++lo;
+ }
+ lo += hi >> 15;
+
+ // if (p+q) overflowed, ignore the overflow and increment (p+q)
+ if (lo > m) {
+ lo &= m;
+ ++lo;
+ }
+ return (_rand_seed = lo);
+}
+
+// The INITIALIZED state is distinguished from the SUSPENDED state because the
+// conditions in which a thread is first started are different from those in which
+// a suspension is resumed. These differences make it hard for us to apply the
+// tougher checks when starting threads that we want to do when resuming them.
+// However, when start_thread is called as a result of Thread.start, on a Java
+// thread, the operation is synchronized on the Java Thread object. So there
+// cannot be a race to start the thread and hence for the thread to exit while
+// we are working on it. Non-Java threads that start Java threads either have
+// to do so in a context in which races are impossible, or should do appropriate
+// locking.
+
+void os::start_thread(Thread* thread) {
+ // guard suspend/resume
+ MutexLockerEx ml(thread->SR_lock(), Mutex::_no_safepoint_check_flag);
+ OSThread* osthread = thread->osthread();
+ osthread->set_state(RUNNABLE);
+ pd_start_thread(thread);
+}
+
+//---------------------------------------------------------------------------
+// Helper functions for fatal error handler
+
+void os::print_hex_dump(outputStream* st, address start, address end, int unitsize) {
+ assert(unitsize == 1 || unitsize == 2 || unitsize == 4 || unitsize == 8, "just checking");
+
+ int cols = 0;
+ int cols_per_line = 0;
+ switch (unitsize) {
+ case 1: cols_per_line = 16; break;
+ case 2: cols_per_line = 8; break;
+ case 4: cols_per_line = 4; break;
+ case 8: cols_per_line = 2; break;
+ default: return;
+ }
+
+ address p = start;
+ st->print(PTR_FORMAT ": ", start);
+ while (p < end) {
+ switch (unitsize) {
+ case 1: st->print("%02x", *(u1*)p); break;
+ case 2: st->print("%04x", *(u2*)p); break;
+ case 4: st->print("%08x", *(u4*)p); break;
+ case 8: st->print("%016" FORMAT64_MODIFIER "x", *(u8*)p); break;
+ }
+ p += unitsize;
+ cols++;
+ if (cols >= cols_per_line && p < end) {
+ cols = 0;
+ st->cr();
+ st->print(PTR_FORMAT ": ", p);
+ } else {
+ st->print(" ");
+ }
+ }
+ st->cr();
+}
+
+void os::print_environment_variables(outputStream* st, const char** env_list,
+ char* buffer, int len) {
+ if (env_list) {
+ st->print_cr("Environment Variables:");
+
+ for (int i = 0; env_list[i] != NULL; i++) {
+ if (getenv(env_list[i], buffer, len)) {
+ st->print(env_list[i]);
+ st->print("=");
+ st->print_cr(buffer);
+ }
+ }
+ }
+}
+
+void os::print_cpu_info(outputStream* st) {
+ // cpu
+ st->print("CPU:");
+ st->print("total %d", os::processor_count());
+ // It's not safe to query number of active processors after crash
+ // st->print("(active %d)", os::active_processor_count());
+ st->print(" %s", VM_Version::cpu_features());
+ st->cr();
+}
+
+void os::print_date_and_time(outputStream *st) {
+ time_t tloc;
+ (void)time(&tloc);
+ st->print("time: %s", ctime(&tloc)); // ctime adds newline.
+
+ double t = os::elapsedTime();
+ // NOTE: It tends to crash after a SEGV if we want to printf("%f",...) in
+ // Linux. Must be a bug in glibc ? Workaround is to round "t" to int
+ // before printf. We lost some precision, but who cares?
+ st->print_cr("elapsed time: %d seconds", (int)t);
+}
+
+
+// Looks like all platforms except IA64 can use the same function to check
+// if C stack is walkable beyond current frame. The check for fp() is not
+// necessary on Sparc, but it's harmless.
+bool os::is_first_C_frame(frame* fr) {
+#ifdef IA64
+ // In order to walk native frames on Itanium, we need to access the unwind
+ // table, which is inside ELF. We don't want to parse ELF after fatal error,
+ // so return true for IA64. If we need to support C stack walking on IA64,
+ // this function needs to be moved to CPU specific files, as fp() on IA64
+ // is register stack, which grows towards higher memory address.
+ return true;
+#endif
+
+ // Load up sp, fp, sender sp and sender fp, check for reasonable values.
+ // Check usp first, because if that's bad the other accessors may fault
+ // on some architectures. Ditto ufp second, etc.
+ uintptr_t fp_align_mask = (uintptr_t)(sizeof(address)-1);
+ // sp on amd can be 32 bit aligned.
+ uintptr_t sp_align_mask = (uintptr_t)(sizeof(int)-1);
+
+ uintptr_t usp = (uintptr_t)fr->sp();
+ if ((usp & sp_align_mask) != 0) return true;
+
+ uintptr_t ufp = (uintptr_t)fr->fp();
+ if ((ufp & fp_align_mask) != 0) return true;
+
+ uintptr_t old_sp = (uintptr_t)fr->sender_sp();
+ if ((old_sp & sp_align_mask) != 0) return true;
+ if (old_sp == 0 || old_sp == (uintptr_t)-1) return true;
+
+ uintptr_t old_fp = (uintptr_t)fr->link();
+ if ((old_fp & fp_align_mask) != 0) return true;
+ if (old_fp == 0 || old_fp == (uintptr_t)-1 || old_fp == ufp) return true;
+
+ // stack grows downwards; if old_fp is below current fp or if the stack
+ // frame is too large, either the stack is corrupted or fp is not saved
+ // on stack (i.e. on x86, ebp may be used as general register). The stack
+ // is not walkable beyond current frame.
+ if (old_fp < ufp) return true;
+ if (old_fp - ufp > 64 * K) return true;
+
+ return false;
+}
+
+#ifdef ASSERT
+extern "C" void test_random() {
+ const double m = 2147483647;
+ double mean = 0.0, variance = 0.0, t;
+ long reps = 10000;
+ unsigned long seed = 1;
+
+ tty->print_cr("seed %ld for %ld repeats...", seed, reps);
+ os::init_random(seed);
+ long num;
+ for (int k = 0; k < reps; k++) {
+ num = os::random();
+ double u = (double)num / m;
+ assert(u >= 0.0 && u <= 1.0, "bad random number!");
+
+ // calculate mean and variance of the random sequence
+ mean += u;
+ variance += (u*u);
+ }
+ mean /= reps;
+ variance /= (reps - 1);
+
+ assert(num == 1043618065, "bad seed");
+ tty->print_cr("mean of the 1st 10000 numbers: %f", mean);
+ tty->print_cr("variance of the 1st 10000 numbers: %f", variance);
+ const double eps = 0.0001;
+ t = fabsd(mean - 0.5018);
+ assert(t < eps, "bad mean");
+ t = (variance - 0.3355) < 0.0 ? -(variance - 0.3355) : variance - 0.3355;
+ assert(t < eps, "bad variance");
+}
+#endif
+
+
+// Set up the boot classpath.
+
+char* os::format_boot_path(const char* format_string,
+ const char* home,
+ int home_len,
+ char fileSep,
+ char pathSep) {
+ assert((fileSep == '/' && pathSep == ':') ||
+ (fileSep == '\\' && pathSep == ';'), "unexpected seperator chars");
+
+ // Scan the format string to determine the length of the actual
+ // boot classpath, and handle platform dependencies as well.
+ int formatted_path_len = 0;
+ const char* p;
+ for (p = format_string; *p != 0; ++p) {
+ if (*p == '%') formatted_path_len += home_len - 1;
+ ++formatted_path_len;
+ }
+
+ char* formatted_path = NEW_C_HEAP_ARRAY(char, formatted_path_len + 1);
+ if (formatted_path == NULL) {
+ return NULL;
+ }
+
+ // Create boot classpath from format, substituting separator chars and
+ // java home directory.
+ char* q = formatted_path;
+ for (p = format_string; *p != 0; ++p) {
+ switch (*p) {
+ case '%':
+ strcpy(q, home);
+ q += home_len;
+ break;
+ case '/':
+ *q++ = fileSep;
+ break;
+ case ':':
+ *q++ = pathSep;
+ break;
+ default:
+ *q++ = *p;
+ }
+ }
+ *q = '\0';
+
+ assert((q - formatted_path) == formatted_path_len, "formatted_path size botched");
+ return formatted_path;
+}
+
+
+bool os::set_boot_path(char fileSep, char pathSep) {
+
+ const char* home = Arguments::get_java_home();
+ int home_len = (int)strlen(home);
+
+ static const char* meta_index_dir_format = "%/lib/";
+ static const char* meta_index_format = "%/lib/meta-index";
+ char* meta_index = format_boot_path(meta_index_format, home, home_len, fileSep, pathSep);
+ if (meta_index == NULL) return false;
+ char* meta_index_dir = format_boot_path(meta_index_dir_format, home, home_len, fileSep, pathSep);
+ if (meta_index_dir == NULL) return false;
+ Arguments::set_meta_index_path(meta_index, meta_index_dir);
+
+ // Any modification to the JAR-file list, for the boot classpath must be
+ // aligned with install/install/make/common/Pack.gmk. Note: boot class
+ // path class JARs, are stripped for StackMapTable to reduce download size.
+ static const char classpath_format[] =
+ "%/lib/resources.jar:"
+ "%/lib/rt.jar:"
+ "%/lib/sunrsasign.jar:"
+ "%/lib/jsse.jar:"
+ "%/lib/jce.jar:"
+ "%/lib/charsets.jar:"
+ "%/classes";
+ char* sysclasspath = format_boot_path(classpath_format, home, home_len, fileSep, pathSep);
+ if (sysclasspath == NULL) return false;
+ Arguments::set_sysclasspath(sysclasspath);
+
+ return true;
+}
+
+
+void os::set_memory_serialize_page(address page) {
+ int count = log2_intptr(sizeof(class JavaThread)) - log2_intptr(64);
+ _mem_serialize_page = (volatile int32_t *)page;
+ // We initialize the serialization page shift count here
+ // We assume a cache line size of 64 bytes
+ assert(SerializePageShiftCount == count,
+ "thread size changed, fix SerializePageShiftCount constant");
+ set_serialize_page_mask((uintptr_t)(vm_page_size() - sizeof(int32_t)));
+}
+
+// This method is called from signal handler when SIGSEGV occurs while the current
+// thread tries to store to the "read-only" memory serialize page during state
+// transition.
+void os::block_on_serialize_page_trap() {
+ if (TraceSafepoint) {
+ tty->print_cr("Block until the serialize page permission restored");
+ }
+ // When VMThread is holding the SerializePage_lock during modifying the
+ // access permission of the memory serialize page, the following call
+ // will block until the permission of that page is restored to rw.
+ // Generally, it is unsafe to manipulate locks in signal handlers, but in
+ // this case, it's OK as the signal is synchronous and we know precisely when
+ // it can occur. SerializePage_lock is a transiently-held leaf lock, so
+ // lock_without_safepoint_check should be safe.
+ SerializePage_lock->lock_without_safepoint_check();
+ SerializePage_lock->unlock();
+}
+
+// Serialize all thread state variables
+void os::serialize_thread_states() {
+ // On some platforms such as Solaris & Linux, the time duration of the page
+ // permission restoration is observed to be much longer than expected due to
+ // scheduler starvation problem etc. To avoid the long synchronization
+ // time and expensive page trap spinning, 'SerializePage_lock' is used to block
+ // the mutator thread if such case is encountered. Since this method is always
+ // called by VMThread during safepoint, lock_without_safepoint_check is used
+ // instead. See bug 6546278.
+ SerializePage_lock->lock_without_safepoint_check();
+ os::protect_memory( (char *)os::get_memory_serialize_page(), os::vm_page_size() );
+ os::unguard_memory( (char *)os::get_memory_serialize_page(), os::vm_page_size() );
+ SerializePage_lock->unlock();
+}
+
+// Returns true if the current stack pointer is above the stack shadow
+// pages, false otherwise.
+
+bool os::stack_shadow_pages_available(Thread *thread, methodHandle method) {
+ assert(StackRedPages > 0 && StackYellowPages > 0,"Sanity check");
+ address sp = current_stack_pointer();
+ // Check if we have StackShadowPages above the yellow zone. This parameter
+ // is dependant on the depth of the maximum VM call stack possible from
+ // the handler for stack overflow. 'instanceof' in the stack overflow
+ // handler or a println uses at least 8k stack of VM and native code
+ // respectively.
+ const int framesize_in_bytes =
+ Interpreter::size_top_interpreter_activation(method()) * wordSize;
+ int reserved_area = ((StackShadowPages + StackRedPages + StackYellowPages)
+ * vm_page_size()) + framesize_in_bytes;
+ // The very lower end of the stack
+ address stack_limit = thread->stack_base() - thread->stack_size();
+ return (sp > (stack_limit + reserved_area));
+}
+
+size_t os::page_size_for_region(size_t region_min_size, size_t region_max_size,
+ uint min_pages)
+{
+ assert(min_pages > 0, "sanity");
+ if (UseLargePages) {
+ const size_t max_page_size = region_max_size / min_pages;
+
+ for (unsigned int i = 0; _page_sizes[i] != 0; ++i) {
+ const size_t sz = _page_sizes[i];
+ const size_t mask = sz - 1;
+ if ((region_min_size & mask) == 0 && (region_max_size & mask) == 0) {
+ // The largest page size with no fragmentation.
+ return sz;
+ }
+
+ if (sz <= max_page_size) {
+ // The largest page size that satisfies the min_pages requirement.
+ return sz;
+ }
+ }
+ }
+
+ return vm_page_size();
+}
+
+#ifndef PRODUCT
+void os::trace_page_sizes(const char* str, const size_t region_min_size,
+ const size_t region_max_size, const size_t page_size,
+ const char* base, const size_t size)
+{
+ if (TracePageSizes) {
+ tty->print_cr("%s: min=" SIZE_FORMAT " max=" SIZE_FORMAT
+ " pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT
+ " size=" SIZE_FORMAT,
+ str, region_min_size, region_max_size,
+ page_size, base, size);
+ }
+}
+#endif // #ifndef PRODUCT
+
+// This is the working definition of a server class machine:
+// >= 2 physical CPU's and >=2GB of memory, with some fuzz
+// because the graphics memory (?) sometimes masks physical memory.
+// If you want to change the definition of a server class machine
+// on some OS or platform, e.g., >=4GB on Windohs platforms,
+// then you'll have to parameterize this method based on that state,
+// as was done for logical processors here, or replicate and
+// specialize this method for each platform. (Or fix os to have
+// some inheritance structure and use subclassing. Sigh.)
+// If you want some platform to always or never behave as a server
+// class machine, change the setting of AlwaysActAsServerClassMachine
+// and NeverActAsServerClassMachine in globals*.hpp.
+bool os::is_server_class_machine() {
+ // First check for the early returns
+ if (NeverActAsServerClassMachine) {
+ return false;
+ }
+ if (AlwaysActAsServerClassMachine) {
+ return true;
+ }
+ // Then actually look at the machine
+ bool result = false;
+ const unsigned int server_processors = 2;
+ const julong server_memory = 2UL * G;
+ // We seem not to get our full complement of memory.
+ // We allow some part (1/8?) of the memory to be "missing",
+ // based on the sizes of DIMMs, and maybe graphics cards.
+ const julong missing_memory = 256UL * M;
+
+ /* Is this a server class machine? */
+ if ((os::active_processor_count() >= (int)server_processors) &&
+ (os::physical_memory() >= (server_memory - missing_memory))) {
+ const unsigned int logical_processors =
+ VM_Version::logical_processors_per_package();
+ if (logical_processors > 1) {
+ const unsigned int physical_packages =
+ os::active_processor_count() / logical_processors;
+ if (physical_packages > server_processors) {
+ result = true;
+ }
+ } else {
+ result = true;
+ }
+ }
+ return result;
+}
diff --git a/src/share/vm/runtime/os.hpp b/src/share/vm/runtime/os.hpp
new file mode 100644
index 000000000..4762efc51
--- /dev/null
+++ b/src/share/vm/runtime/os.hpp
@@ -0,0 +1,596 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// os defines the interface to operating system; this includes traditional
+// OS services (time, I/O) as well as other functionality with system-
+// dependent code.
+
+typedef void (*dll_func)(...);
+
+class Thread;
+class JavaThread;
+class Event;
+class DLL;
+class FileHandle;
+
+// %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
+
+// Platform-independent error return values from OS functions
+enum OSReturn {
+ OS_OK = 0, // Operation was successful
+ OS_ERR = -1, // Operation failed
+ OS_INTRPT = -2, // Operation was interrupted
+ OS_TIMEOUT = -3, // Operation timed out
+ OS_NOMEM = -5, // Operation failed for lack of memory
+ OS_NORESOURCE = -6 // Operation failed for lack of nonmemory resource
+};
+
+enum ThreadPriority { // JLS 20.20.1-3
+ NoPriority = -1, // Initial non-priority value
+ MinPriority = 1, // Minimum priority
+ NormPriority = 5, // Normal (non-daemon) priority
+ NearMaxPriority = 9, // High priority, used for VMThread
+ MaxPriority = 10 // Highest priority, used for WatcherThread
+ // ensures that VMThread doesn't starve profiler
+};
+
+// Typedef for structured exception handling support
+typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
+
+class os: AllStatic {
+ private:
+ enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
+
+ static OSThread* _starting_thread;
+ static address _polling_page;
+ static volatile int32_t * _mem_serialize_page;
+ static uintptr_t _serialize_page_mask;
+ static volatile jlong _global_time;
+ static volatile int _global_time_lock;
+ static bool _use_global_time;
+ static size_t _page_sizes[page_sizes_max];
+
+ static void init_page_sizes(size_t default_page_size) {
+ _page_sizes[0] = default_page_size;
+ _page_sizes[1] = 0; // sentinel
+ }
+
+ public:
+
+ static void init(void); // Called before command line parsing
+ static jint init_2(void); // Called after command line parsing
+
+ // File names are case-insensitive on windows only
+ // Override me as needed
+ static int file_name_strcmp(const char* s1, const char* s2);
+
+ static bool getenv(const char* name, char* buffer, int len);
+ static bool have_special_privileges();
+
+ static jlong timeofday();
+ static void enable_global_time() { _use_global_time = true; }
+ static void disable_global_time() { _use_global_time = false; }
+ static jlong read_global_time();
+ static void update_global_time();
+ static jlong javaTimeMillis();
+ static jlong javaTimeNanos();
+ static void javaTimeNanos_info(jvmtiTimerInfo *info_ptr);
+ static void run_periodic_checks();
+
+
+ // Returns the elapsed time in seconds since the vm started.
+ static double elapsedTime();
+
+ // Returns real time in seconds since an arbitrary point
+ // in the past.
+ static bool getTimesSecs(double* process_real_time,
+ double* process_user_time,
+ double* process_system_time);
+
+ // Interface to the performance counter
+ static jlong elapsed_counter();
+ static jlong elapsed_frequency();
+
+ // Return current local time in a string (YYYY-MM-DD HH:MM:SS).
+ // It is MT safe, but not async-safe, as reading time zone
+ // information may require a lock on some platforms.
+ static char* local_time_string(char *buf, size_t buflen);
+ // Fill in buffer with current local time as an ISO-8601 string.
+ // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz.
+ // Returns buffer, or NULL if it failed.
+ static char* iso8601_time(char* buffer, size_t buffer_length);
+
+ // Interface for detecting multiprocessor system
+ static inline bool is_MP() {
+ assert(_processor_count > 0, "invalid processor count");
+ return _processor_count > 1;
+ }
+ static julong available_memory();
+ static julong physical_memory();
+ static julong allocatable_physical_memory(julong size);
+ static bool is_server_class_machine();
+
+ // number of CPUs
+ static int processor_count() {
+ return _processor_count;
+ }
+
+ // Returns the number of CPUs this process is currently allowed to run on.
+ // Note that on some OSes this can change dynamically.
+ static int active_processor_count();
+
+ // Bind processes to processors.
+ // This is a two step procedure:
+ // first you generate a distribution of processes to processors,
+ // then you bind processes according to that distribution.
+ // Compute a distribution for number of processes to processors.
+ // Stores the processor id's into the distribution array argument.
+ // Returns true if it worked, false if it didn't.
+ static bool distribute_processes(uint length, uint* distribution);
+ // Binds the current process to a processor.
+ // Returns true if it worked, false if it didn't.
+ static bool bind_to_processor(uint processor_id);
+
+ // Interface for stack banging (predetect possible stack overflow for
+ // exception processing) There are guard pages, and above that shadow
+ // pages for stack overflow checking.
+ static bool uses_stack_guard_pages();
+ static bool allocate_stack_guard_pages();
+ static void bang_stack_shadow_pages();
+ static bool stack_shadow_pages_available(Thread *thread, methodHandle method);
+
+ // OS interface to Virtual Memory
+
+ // Return the default page size.
+ static int vm_page_size();
+
+ // Return the page size to use for a region of memory. The min_pages argument
+ // is a hint intended to limit fragmentation; it says the returned page size
+ // should be <= region_max_size / min_pages. Because min_pages is a hint,
+ // this routine may return a size larger than region_max_size / min_pages.
+ //
+ // The current implementation ignores min_pages if a larger page size is an
+ // exact multiple of both region_min_size and region_max_size. This allows
+ // larger pages to be used when doing so would not cause fragmentation; in
+ // particular, a single page can be used when region_min_size ==
+ // region_max_size == a supported page size.
+ static size_t page_size_for_region(size_t region_min_size,
+ size_t region_max_size,
+ uint min_pages);
+
+ // Method for tracing page sizes returned by the above method; enabled by
+ // TracePageSizes. The region_{min,max}_size parameters should be the values
+ // passed to page_size_for_region() and page_size should be the result of that
+ // call. The (optional) base and size parameters should come from the
+ // ReservedSpace base() and size() methods.
+ static void trace_page_sizes(const char* str, const size_t region_min_size,
+ const size_t region_max_size,
+ const size_t page_size,
+ const char* base = NULL,
+ const size_t size = 0) PRODUCT_RETURN;
+
+ static int vm_allocation_granularity();
+ static char* reserve_memory(size_t bytes, char* addr = 0,
+ size_t alignment_hint = 0);
+ static char* attempt_reserve_memory_at(size_t bytes, char* addr);
+ static void split_reserved_memory(char *base, size_t size,
+ size_t split, bool realloc);
+ static bool commit_memory(char* addr, size_t bytes);
+ static bool commit_memory(char* addr, size_t size, size_t alignment_hint);
+ static bool uncommit_memory(char* addr, size_t bytes);
+ static bool release_memory(char* addr, size_t bytes);
+ static bool protect_memory(char* addr, size_t bytes);
+ static bool guard_memory(char* addr, size_t bytes);
+ static bool unguard_memory(char* addr, size_t bytes);
+ static char* map_memory(int fd, const char* file_name, size_t file_offset,
+ char *addr, size_t bytes, bool read_only = false,
+ bool allow_exec = false);
+ static char* remap_memory(int fd, const char* file_name, size_t file_offset,
+ char *addr, size_t bytes, bool read_only,
+ bool allow_exec);
+ static bool unmap_memory(char *addr, size_t bytes);
+ static void free_memory(char *addr, size_t bytes);
+ static void realign_memory(char *addr, size_t bytes, size_t alignment_hint);
+
+ // NUMA-specific interface
+ static void numa_make_local(char *addr, size_t bytes);
+ static void numa_make_global(char *addr, size_t bytes);
+ static size_t numa_get_groups_num();
+ static size_t numa_get_leaf_groups(int *ids, size_t size);
+ static bool numa_topology_changed();
+ static int numa_get_group_id();
+
+ // Page manipulation
+ struct page_info {
+ size_t size;
+ int lgrp_id;
+ };
+ static bool get_page_info(char *start, page_info* info);
+ static char* scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found);
+
+ static char* non_memory_address_word();
+ // reserve, commit and pin the entire memory region
+ static char* reserve_memory_special(size_t size);
+ static bool release_memory_special(char* addr, size_t bytes);
+ static bool large_page_init();
+ static size_t large_page_size();
+ static bool can_commit_large_page_memory();
+
+ // OS interface to polling page
+ static address get_polling_page() { return _polling_page; }
+ static void set_polling_page(address page) { _polling_page = page; }
+ static bool is_poll_address(address addr) { return addr >= _polling_page && addr < (_polling_page + os::vm_page_size()); }
+ static void make_polling_page_unreadable();
+ static void make_polling_page_readable();
+
+ // Routines used to serialize the thread state without using membars
+ static void serialize_thread_states();
+
+ // Since we write to the serialize page from every thread, we
+ // want stores to be on unique cache lines whenever possible
+ // in order to minimize CPU cross talk. We pre-compute the
+ // amount to shift the thread* to make this offset unique to
+ // each thread.
+ static int get_serialize_page_shift_count() {
+ return SerializePageShiftCount;
+ }
+
+ static void set_serialize_page_mask(uintptr_t mask) {
+ _serialize_page_mask = mask;
+ }
+
+ static unsigned int get_serialize_page_mask() {
+ return _serialize_page_mask;
+ }
+
+ static void set_memory_serialize_page(address page);
+
+ static address get_memory_serialize_page() {
+ return (address)_mem_serialize_page;
+ }
+
+ static inline void write_memory_serialize_page(JavaThread *thread) {
+ uintptr_t page_offset = ((uintptr_t)thread >>
+ get_serialize_page_shift_count()) &
+ get_serialize_page_mask();
+ *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1;
+ }
+
+ static bool is_memory_serialize_page(JavaThread *thread, address addr) {
+ address thr_addr;
+ if (UseMembar) return false;
+ // Calculate thread specific address
+ if (thread == NULL) return false;
+ // TODO-FIXME: some platforms mask off faulting addresses to the base pagesize.
+ // Instead of using a test for equality we should probably use something
+ // of the form:
+ // return ((_mem_serialize_page ^ addr) & -pagesize) == 0
+ //
+ thr_addr = (address)(((uintptr_t)thread >>
+ get_serialize_page_shift_count()) &
+ get_serialize_page_mask()) + (uintptr_t)_mem_serialize_page;
+ return (thr_addr == addr);
+ }
+
+ static void block_on_serialize_page_trap();
+
+ // threads
+
+ enum ThreadType {
+ vm_thread,
+ cgc_thread, // Concurrent GC thread
+ pgc_thread, // Parallel GC thread
+ java_thread,
+ compiler_thread,
+ watcher_thread
+ };
+
+ static bool create_thread(Thread* thread,
+ ThreadType thr_type,
+ size_t stack_size = 0);
+ static bool create_main_thread(JavaThread* thread);
+ static bool create_attached_thread(JavaThread* thread);
+ static void pd_start_thread(Thread* thread);
+ static void start_thread(Thread* thread);
+
+ static void initialize_thread();
+ static void free_thread(OSThread* osthread);
+
+ // thread id on Linux/64bit is 64bit, on Windows and Solaris, it's 32bit
+ static intx current_thread_id();
+ static int current_process_id();
+ // hpi::read for calls from non native state
+ // For performance, hpi::read is only callable from _thread_in_native
+ static size_t read(int fd, void *buf, unsigned int nBytes);
+ static int sleep(Thread* thread, jlong ms, bool interruptable);
+ static int naked_sleep();
+ static void infinite_sleep(); // never returns, use with CAUTION
+ static void yield(); // Yields to all threads with same priority
+ enum YieldResult {
+ YIELD_SWITCHED = 1, // caller descheduled, other ready threads exist & ran
+ YIELD_NONEREADY = 0, // No other runnable/ready threads.
+ // platform-specific yield return immediately
+ YIELD_UNKNOWN = -1 // Unknown: platform doesn't support _SWITCHED or _NONEREADY
+ // YIELD_SWITCHED and YIELD_NONREADY imply the platform supports a "strong"
+ // yield that can be used in lieu of blocking.
+ } ;
+ static YieldResult NakedYield () ;
+ static void yield_all(int attempts = 0); // Yields to all other threads including lower priority
+ static void loop_breaker(int attempts); // called from within tight loops to possibly influence time-sharing
+ static OSReturn set_priority(Thread* thread, ThreadPriority priority);
+ static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority);
+
+ static void interrupt(Thread* thread);
+ static bool is_interrupted(Thread* thread, bool clear_interrupted);
+
+ static int pd_self_suspend_thread(Thread* thread);
+
+ static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp);
+ static frame fetch_frame_from_context(void* ucVoid);
+
+ static ExtendedPC get_thread_pc(Thread *thread);
+ static void breakpoint();
+
+ static address current_stack_pointer();
+ static address current_stack_base();
+ static size_t current_stack_size();
+
+ static int message_box(const char* title, const char* message);
+ static char* do_you_want_to_debug(const char* message);
+
+ // run cmd in a separate process and return its exit code; or -1 on failures
+ static int fork_and_exec(char *cmd);
+
+ // Set file to send error reports.
+ static void set_error_file(const char *logfile);
+
+ // os::exit() is merged with vm_exit()
+ // static void exit(int num);
+
+ // Terminate the VM, but don't exit the process
+ static void shutdown();
+
+ // Terminate with an error. Default is to generate a core file on platforms
+ // that support such things. This calls shutdown() and then aborts.
+ static void abort(bool dump_core = true);
+
+ // Die immediately, no exit hook, no abort hook, no cleanup.
+ static void die();
+
+ // Reading directories.
+ static DIR* opendir(const char* dirname);
+ static int readdir_buf_size(const char *path);
+ static struct dirent* readdir(DIR* dirp, dirent* dbuf);
+ static int closedir(DIR* dirp);
+
+ // Dynamic library extension
+ static const char* dll_file_extension();
+
+ static const char* get_temp_directory();
+ static const char* get_current_directory(char *buf, int buflen);
+
+ // Symbol lookup, find nearest function name; basically it implements
+ // dladdr() for all platforms. Name of the nearest function is copied
+ // to buf. Distance from its base address is returned as offset.
+ // If function name is not found, buf[0] is set to '\0' and offset is
+ // set to -1.
+ static bool dll_address_to_function_name(address addr, char* buf,
+ int buflen, int* offset);
+
+ // Locate DLL/DSO. On success, full path of the library is copied to
+ // buf, and offset is set to be the distance between addr and the
+ // library's base address. On failure, buf[0] is set to '\0' and
+ // offset is set to -1.
+ static bool dll_address_to_library_name(address addr, char* buf,
+ int buflen, int* offset);
+
+ // Find out whether the pc is in the static code for jvm.dll/libjvm.so.
+ static bool address_is_in_vm(address addr);
+
+ // Loads .dll/.so and
+ // in case of error it checks if .dll/.so was built for the
+ // same architecture as Hotspot is running on
+ static void* dll_load(const char *name, char *ebuf, int ebuflen);
+
+ // Print out system information; they are called by fatal error handler.
+ // Output format may be different on different platforms.
+ static void print_os_info(outputStream* st);
+ static void print_cpu_info(outputStream* st);
+ static void print_memory_info(outputStream* st);
+ static void print_dll_info(outputStream* st);
+ static void print_environment_variables(outputStream* st, const char** env_list, char* buffer, int len);
+ static void print_context(outputStream* st, void* context);
+ static void print_siginfo(outputStream* st, void* siginfo);
+ static void print_signal_handlers(outputStream* st, char* buf, size_t buflen);
+ static void print_date_and_time(outputStream* st);
+
+ // The following two functions are used by fatal error handler to trace
+ // native (C) frames. They are not part of frame.hpp/frame.cpp because
+ // frame.hpp/cpp assume thread is JavaThread, and also because different
+ // OS/compiler may have different convention or provide different API to
+ // walk C frames.
+ //
+ // We don't attempt to become a debugger, so we only follow frames if that
+ // does not require a lookup in the unwind table, which is part of the binary
+ // file but may be unsafe to read after a fatal error. So on x86, we can
+ // only walk stack if %ebp is used as frame pointer; on ia64, it's not
+ // possible to walk C stack without having the unwind table.
+ static bool is_first_C_frame(frame *fr);
+ static frame get_sender_for_C_frame(frame *fr);
+
+ // return current frame. pc() and sp() are set to NULL on failure.
+ static frame current_frame();
+
+ static void print_hex_dump(outputStream* st, address start, address end, int unitsize);
+
+ // returns a string to describe the exception/signal;
+ // returns NULL if exception_code is not an OS exception/signal.
+ static const char* exception_name(int exception_code, char* buf, size_t buflen);
+
+ // Returns native Java library, loads if necessary
+ static void* native_java_library();
+
+ // Fills in path to jvm.dll/libjvm.so (this info used to find hpi).
+ static void jvm_path(char *buf, jint buflen);
+
+ // JNI names
+ static void print_jni_name_prefix_on(outputStream* st, int args_size);
+ static void print_jni_name_suffix_on(outputStream* st, int args_size);
+
+ // File conventions
+ static const char* file_separator();
+ static const char* line_separator();
+ static const char* path_separator();
+
+ // Init os specific system properties values
+ static void init_system_properties_values();
+
+ // IO operations, non-JVM_ version.
+ static int stat(const char* path, struct stat* sbuf);
+ static bool dir_is_empty(const char* path);
+
+ // IO operations on binary files
+ static int create_binary_file(const char* path, bool rewrite_existing);
+ static jlong current_file_offset(int fd);
+ static jlong seek_to_file_offset(int fd, jlong offset);
+
+ // Thread Local Storage
+ static int allocate_thread_local_storage();
+ static void thread_local_storage_at_put(int index, void* value);
+ static void* thread_local_storage_at(int index);
+ static void free_thread_local_storage(int index);
+
+ // General allocation (must be MT-safe)
+ static void* malloc (size_t size);
+ static void* realloc (void *memblock, size_t size);
+ static void free (void *memblock);
+ static bool check_heap(bool force = false); // verify C heap integrity
+ static char* strdup(const char *); // Like strdup
+
+#ifndef PRODUCT
+ static int num_mallocs; // # of calls to malloc/realloc
+ static size_t alloc_bytes; // # of bytes allocated
+ static int num_frees; // # of calls to free
+#endif
+
+ // Printing 64 bit integers
+ static const char* jlong_format_specifier();
+ static const char* julong_format_specifier();
+
+ // Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal)
+ static void signal_init();
+ static void signal_init_pd();
+ static void signal_notify(int signal_number);
+ static void* signal(int signal_number, void* handler);
+ static void signal_raise(int signal_number);
+ static int signal_wait();
+ static int signal_lookup();
+ static void* user_handler();
+ static void terminate_signal_thread();
+ static int sigexitnum_pd();
+
+ // random number generation
+ static long random(); // return 32bit pseudorandom number
+ static void init_random(long initval); // initialize random sequence
+
+ // Structured OS Exception support
+ static void os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
+
+ // JVMTI & JVM monitoring and management support
+ // The thread_cpu_time() and current_thread_cpu_time() are only
+ // supported if is_thread_cpu_time_supported() returns true.
+ // They are not supported on Solaris T1.
+
+ // Thread CPU Time - return the fast estimate on a platform
+ // On Solaris - call gethrvtime (fast) - user time only
+ // On Linux - fast clock_gettime where available - user+sys
+ // - otherwise: very slow /proc fs - user+sys
+ // On Windows - GetThreadTimes - user+sys
+ static jlong current_thread_cpu_time();
+ static jlong thread_cpu_time(Thread* t);
+
+ // Thread CPU Time with user_sys_cpu_time parameter.
+ //
+ // If user_sys_cpu_time is true, user+sys time is returned.
+ // Otherwise, only user time is returned
+ static jlong current_thread_cpu_time(bool user_sys_cpu_time);
+ static jlong thread_cpu_time(Thread* t, bool user_sys_cpu_time);
+
+ // Return a bunch of info about the timers.
+ // Note that the returned info for these two functions may be different
+ // on some platforms
+ static void current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr);
+ static void thread_cpu_time_info(jvmtiTimerInfo *info_ptr);
+
+ static bool is_thread_cpu_time_supported();
+
+ // System loadavg support. Returns -1 if load average cannot be obtained.
+ static int loadavg(double loadavg[], int nelem);
+
+ // Hook for os specific jvm options that we don't want to abort on seeing
+ static bool obsolete_option(const JavaVMOption *option);
+
+ // Platform dependent stuff
+ #include "incls/_os_pd.hpp.incl"
+
+ // debugging support (mostly used by debug.cpp)
+ static bool find(address pc) PRODUCT_RETURN0; // OS specific function to make sense out of an address
+
+ static bool dont_yield(); // when true, JVM_Yield() is nop
+ static void print_statistics();
+
+ // Thread priority helpers (implemented in OS-specific part)
+ static OSReturn set_native_priority(Thread* thread, int native_prio);
+ static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr);
+ static int java_to_os_priority[MaxPriority + 1];
+ // Hint to the underlying OS that a task switch would not be good.
+ // Void return because it's a hint and can fail.
+ static void hint_no_preempt();
+
+ // Used at creation if requested by the diagnostic flag PauseAtStartup.
+ // Causes the VM to wait until an external stimulus has been applied
+ // (for Unix, that stimulus is a signal, for Windows, an external
+ // ResumeThread call)
+ static void pause();
+
+ protected:
+ static long _rand_seed; // seed for random number generator
+ static int _processor_count; // number of processors
+
+ static char* format_boot_path(const char* format_string,
+ const char* home,
+ int home_len,
+ char fileSep,
+ char pathSep);
+ static bool set_boot_path(char fileSep, char pathSep);
+};
+
+// Note that "PAUSE" is almost always used with synchronization
+// so arguably we should provide Atomic::SpinPause() instead
+// of the global SpinPause() with C linkage.
+// It'd also be eligible for inlining on many platforms.
+
+extern "C" int SpinPause () ;
+extern "C" int SafeFetch32 (int * adr, int errValue) ;
+extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t errValue) ;
diff --git a/src/share/vm/runtime/osThread.cpp b/src/share/vm/runtime/osThread.cpp
new file mode 100644
index 000000000..f8c78628b
--- /dev/null
+++ b/src/share/vm/runtime/osThread.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_osThread.cpp.incl"
+
+
+OSThread::OSThread(OSThreadStartFunc start_proc, void* start_parm) {
+ pd_initialize();
+ set_start_proc(start_proc);
+ set_start_parm(start_parm);
+ set_interrupted(false);
+}
+
+OSThread::~OSThread() {
+ pd_destroy();
+}
+
+// Printing
+void OSThread::print_on(outputStream *st) const {
+ st->print("nid=0x%lx ", thread_id());
+ switch (_state) {
+ case ALLOCATED: st->print("allocated "); break;
+ case INITIALIZED: st->print("initialized "); break;
+ case RUNNABLE: st->print("runnable "); break;
+ case MONITOR_WAIT: st->print("waiting for monitor entry "); break;
+ case CONDVAR_WAIT: st->print("waiting on condition "); break;
+ case OBJECT_WAIT: st->print("in Object.wait() "); break;
+ case BREAKPOINTED: st->print("at breakpoint"); break;
+ case SLEEPING: st->print("sleeping"); break;
+ case ZOMBIE: st->print("zombie"); break;
+ default: st->print("unknown state %d", _state); break;
+ }
+}
diff --git a/src/share/vm/runtime/osThread.hpp b/src/share/vm/runtime/osThread.hpp
new file mode 100644
index 000000000..1491217ff
--- /dev/null
+++ b/src/share/vm/runtime/osThread.hpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// The OSThread class holds OS-specific thread information. It is equivalent
+// to the sys_thread_t structure of the classic JVM implementation.
+
+// The thread states represented by the ThreadState values are platform-specific
+// and are likely to be only approximate, because most OSes don't give you access
+// to precise thread state information.
+
+// Note: the ThreadState is legacy code and is not correctly implemented.
+// Uses of ThreadState need to be replaced by the state in the JavaThread.
+
+enum ThreadState {
+ ALLOCATED, // Memory has been allocated but not initialized
+ INITIALIZED, // The thread has been initialized but yet started
+ RUNNABLE, // Has been started and is runnable, but not necessarily running
+ MONITOR_WAIT, // Waiting on a contended monitor lock
+ CONDVAR_WAIT, // Waiting on a condition variable
+ OBJECT_WAIT, // Waiting on an Object.wait() call
+ BREAKPOINTED, // Suspended at breakpoint
+ SLEEPING, // Thread.sleep()
+ ZOMBIE // All done, but not reclaimed yet
+};
+
+// I'd make OSThread a ValueObj embedded in Thread to avoid an indirection, but
+// the assembler test in java.cpp expects that it can install the OSThread of
+// the main thread into its own Thread at will.
+
+
+class OSThread: public CHeapObj {
+ friend class VMStructs;
+ private:
+ //void* _start_proc; // Thread start routine
+ OSThreadStartFunc _start_proc; // Thread start routine
+ void* _start_parm; // Thread start routine parameter
+ volatile ThreadState _state; // Thread state *hint*
+ jint _interrupted; // Thread.isInterrupted state
+
+ // Note: _interrupted must be jint, so that Java intrinsics can access it.
+ // The value stored there must be either 0 or 1. It must be possible
+ // for Java to emulate Thread.currentThread().isInterrupted() by performing
+ // the double indirection Thread::current()->_osthread->_interrupted.
+
+ // Methods
+ public:
+ void set_state(ThreadState state) { _state = state; }
+ ThreadState get_state() { return _state; }
+
+ // Constructor
+ OSThread(OSThreadStartFunc start_proc, void* start_parm);
+
+ // Destructor
+ ~OSThread();
+
+ // Accessors
+ OSThreadStartFunc start_proc() const { return _start_proc; }
+ void set_start_proc(OSThreadStartFunc start_proc) { _start_proc = start_proc; }
+ void* start_parm() const { return _start_parm; }
+ void set_start_parm(void* start_parm) { _start_parm = start_parm; }
+
+ bool interrupted() const { return _interrupted != 0; }
+ void set_interrupted(bool z) { _interrupted = z ? 1 : 0; }
+
+ // Printing
+ void print_on(outputStream* st) const;
+ void print() const { print_on(tty); }
+
+ // For java intrinsics:
+ static ByteSize interrupted_offset() { return byte_offset_of(OSThread, _interrupted); }
+
+ // Platform dependent stuff
+ #include "incls/_osThread_pd.hpp.incl"
+};
+
+
+// Utility class for use with condition variables:
+class OSThreadWaitState : public StackObj {
+ OSThread* _osthread;
+ ThreadState _old_state;
+ public:
+ OSThreadWaitState(OSThread* osthread, bool is_object_wait) {
+ _osthread = osthread;
+ _old_state = osthread->get_state();
+ if (is_object_wait) {
+ osthread->set_state(OBJECT_WAIT);
+ } else {
+ osthread->set_state(CONDVAR_WAIT);
+ }
+ }
+ ~OSThreadWaitState() {
+ _osthread->set_state(_old_state);
+ }
+};
+
+
+// Utility class for use with contended monitors:
+class OSThreadContendState : public StackObj {
+ OSThread* _osthread;
+ ThreadState _old_state;
+ public:
+ OSThreadContendState(OSThread* osthread) {
+ _osthread = osthread;
+ _old_state = osthread->get_state();
+ osthread->set_state(MONITOR_WAIT);
+ }
+ ~OSThreadContendState() {
+ _osthread->set_state(_old_state);
+ }
+};
diff --git a/src/share/vm/runtime/perfData.cpp b/src/share/vm/runtime/perfData.cpp
new file mode 100644
index 000000000..e928e407e
--- /dev/null
+++ b/src/share/vm/runtime/perfData.cpp
@@ -0,0 +1,594 @@
+/*
+ * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_perfData.cpp.incl"
+
+PerfDataList* PerfDataManager::_all = NULL;
+PerfDataList* PerfDataManager::_sampled = NULL;
+PerfDataList* PerfDataManager::_constants = NULL;
+
+/*
+ * The jvmstat global and subsysem jvmstat counter name spaces. The top
+ * level name spaces imply the interface stability level of the counter,
+ * which generally follows the Java package, class, and property naming
+ * conventions. The CounterNS enumeration values should be used to index
+ * into this array.
+ */
+const char* PerfDataManager::_name_spaces[] = {
+ // top level name spaces
+ "java", // stable and supported name space
+ "com.sun", // unstable but supported name space
+ "sun", // unstable and unsupported name space
+ // subsystem name spaces
+ "java.gc", // Garbage Collection name spaces
+ "com.sun.gc",
+ "sun.gc",
+ "java.ci", // Compiler name spaces
+ "com.sun.ci",
+ "sun.ci",
+ "java.cls", // Class Loader name spaces
+ "com.sun.cls",
+ "sun.cls",
+ "java.rt", // Runtime name spaces
+ "com.sun.rt",
+ "sun.rt",
+ "java.os", // Operating System name spaces
+ "com.sun.os",
+ "sun.os",
+ "java.threads", // Threads System name spaces
+ "com.sun.threads",
+ "sun.threads",
+ "java.property", // Java Property name spaces
+ "com.sun.property",
+ "sun.property",
+ "",
+};
+
+PerfData::PerfData(CounterNS ns, const char* name, Units u, Variability v)
+ : _name(NULL), _u(u), _v(v), _valuep(NULL),
+ _on_c_heap(false) {
+
+ const char* prefix = PerfDataManager::ns_to_string(ns);
+
+ _name = NEW_C_HEAP_ARRAY(char, strlen(name) + strlen(prefix) + 2);
+ assert(_name != NULL && strlen(name) != 0, "invalid name");
+
+ if (ns == NULL_NS) {
+ // No prefix is added to counters with the NULL_NS namespace.
+ strcpy(_name, name);
+ // set the F_Supported flag based on the counter name prefix.
+ if (PerfDataManager::is_stable_supported(_name) ||
+ PerfDataManager::is_unstable_supported(_name)) {
+ _flags = F_Supported;
+ }
+ else {
+ _flags = F_None;
+ }
+ }
+ else {
+ sprintf(_name, "%s.%s", prefix, name);
+ // set the F_Supported flag based on the given namespace.
+ if (PerfDataManager::is_stable_supported(ns) ||
+ PerfDataManager::is_unstable_supported(ns)) {
+ _flags = F_Supported;
+ }
+ else {
+ _flags = F_None;
+ }
+ }
+}
+
+PerfData::~PerfData() {
+ if (_name != NULL) {
+ FREE_C_HEAP_ARRAY(char, _name);
+ }
+ if (is_on_c_heap()) {
+ FREE_C_HEAP_ARRAY(PerfDataEntry, _pdep);
+ }
+}
+
+void PerfData::create_entry(BasicType dtype, size_t dsize, size_t vlen) {
+
+ size_t dlen = vlen==0 ? 1 : vlen;
+
+ size_t namelen = strlen(name()) + 1; // include null terminator
+ size_t size = sizeof(PerfDataEntry) + namelen;
+ size_t pad_length = ((size % dsize) == 0) ? 0 : dsize - (size % dsize);
+ size += pad_length;
+ size_t data_start = size;
+ size += (dsize * dlen);
+
+ // align size to assure allocation in units of 8 bytes
+ int align = sizeof(jlong) - 1;
+ size = ((size + align) & ~align);
+ char* psmp = PerfMemory::alloc(size);
+
+ if (psmp == NULL) {
+ // out of PerfMemory memory resources. allocate on the C heap
+ // to avoid vm termination.
+ psmp = NEW_C_HEAP_ARRAY(char, size);
+ _on_c_heap = true;
+ }
+
+ // compute the addresses for the name and data
+ char* cname = psmp + sizeof(PerfDataEntry);
+
+ // data is in the last dsize*dlen bytes of the entry
+ void* valuep = (void*) (psmp + data_start);
+
+ assert(is_on_c_heap() || PerfMemory::contains(cname), "just checking");
+ assert(is_on_c_heap() || PerfMemory::contains((char*)valuep), "just checking");
+
+ // copy the name, including null terminator, into PerfData memory
+ strcpy(cname, name());
+
+
+ // set the header values in PerfData memory
+ PerfDataEntry* pdep = (PerfDataEntry*)psmp;
+ pdep->entry_length = (jint)size;
+ pdep->name_offset = (jint) ((uintptr_t) cname - (uintptr_t) psmp);
+ pdep->vector_length = (jint)vlen;
+ pdep->data_type = (jbyte) type2char(dtype);
+ pdep->data_units = units();
+ pdep->data_variability = variability();
+ pdep->flags = (jbyte)flags();
+ pdep->data_offset = (jint) data_start;
+
+ if (PerfTraceDataCreation) {
+ tty->print("name = %s, dtype = %d, variability = %d,"
+ " units = %d, dsize = %d, vlen = %d,"
+ " pad_length = %d, size = %d, on_c_heap = %s,"
+ " address = " INTPTR_FORMAT ","
+ " data address = " INTPTR_FORMAT "\n",
+ cname, dtype, variability(),
+ units(), dsize, vlen,
+ pad_length, size, is_on_c_heap() ? "TRUE":"FALSE",
+ psmp, valuep);
+ }
+
+ // record the start of the entry and the location of the data field.
+ _pdep = pdep;
+ _valuep = valuep;
+
+ // mark the PerfData memory region as having been updated.
+ PerfMemory::mark_updated();
+}
+
+PerfLong::PerfLong(CounterNS ns, const char* namep, Units u, Variability v)
+ : PerfData(ns, namep, u, v) {
+
+ create_entry(T_LONG, sizeof(jlong));
+}
+
+int PerfLong::format(char* buffer, int length) {
+ return jio_snprintf(buffer, length,"%lld", *(jlong*)_valuep);
+}
+
+PerfLongVariant::PerfLongVariant(CounterNS ns, const char* namep, Units u,
+ Variability v, jlong* sampled)
+ : PerfLong(ns, namep, u, v),
+ _sampled(sampled), _sample_helper(NULL) {
+
+ sample();
+}
+
+PerfLongVariant::PerfLongVariant(CounterNS ns, const char* namep, Units u,
+ Variability v, PerfLongSampleHelper* helper)
+ : PerfLong(ns, namep, u, v),
+ _sampled(NULL), _sample_helper(helper) {
+
+ sample();
+}
+
+void PerfLongVariant::sample() {
+
+ assert(_sample_helper != NULL || _sampled != NULL, "unexpected state");
+
+ if (_sample_helper != NULL) {
+ *(jlong*)_valuep = _sample_helper->take_sample();
+ }
+ else if (_sampled != NULL) {
+ *(jlong*)_valuep = *_sampled;
+ }
+}
+
+PerfByteArray::PerfByteArray(CounterNS ns, const char* namep, Units u,
+ Variability v, jint length)
+ : PerfData(ns, namep, u, v), _length(length) {
+
+ create_entry(T_BYTE, sizeof(jbyte), (size_t)_length);
+}
+
+void PerfString::set_string(const char* s2) {
+
+ // copy n bytes of the string, assuring the null string is
+ // copied if s2 == NULL.
+ strncpy((char *)_valuep, s2 == NULL ? "" : s2, _length);
+
+ // assure the string is null terminated when strlen(s2) >= _length
+ ((char*)_valuep)[_length-1] = '\0';
+}
+
+int PerfString::format(char* buffer, int length) {
+ return jio_snprintf(buffer, length, "%s", (char*)_valuep);
+}
+
+PerfStringConstant::PerfStringConstant(CounterNS ns, const char* namep,
+ const char* initial_value)
+ : PerfString(ns, namep, V_Constant,
+ initial_value == NULL ? 1 :
+ MIN2((jint)(strlen((char*)initial_value)+1),
+ (jint)(PerfMaxStringConstLength+1)),
+ initial_value) {
+
+ if (PrintMiscellaneous && Verbose) {
+ if (is_valid() && initial_value != NULL &&
+ ((jint)strlen(initial_value) > (jint)PerfMaxStringConstLength)) {
+
+ warning("Truncating PerfStringConstant: name = %s,"
+ " length = " INT32_FORMAT ","
+ " PerfMaxStringConstLength = " INT32_FORMAT "\n",
+ namep,
+ (jint)strlen(initial_value),
+ (jint)PerfMaxStringConstLength);
+ }
+ }
+}
+
+
+
+
+
+
+void PerfDataManager::destroy() {
+
+ if (_all == NULL)
+ // destroy already called, or initialization never happened
+ return;
+
+ for (int index = 0; index < _all->length(); index++) {
+ PerfData* p = _all->at(index);
+ delete p;
+ }
+
+ delete(_all);
+ delete(_sampled);
+ delete(_constants);
+
+ _all = NULL;
+ _sampled = NULL;
+ _constants = NULL;
+}
+
+void PerfDataManager::add_item(PerfData* p, bool sampled) {
+
+ MutexLocker ml(PerfDataManager_lock);
+
+ if (_all == NULL) {
+ _all = new PerfDataList(100);
+ }
+
+ assert(!_all->contains(p->name()), "duplicate name added");
+
+ // add to the list of all perf data items
+ _all->append(p);
+
+ if (p->variability() == PerfData::V_Constant) {
+ if (_constants == NULL) {
+ _constants = new PerfDataList(25);
+ }
+ _constants->append(p);
+ return;
+ }
+
+ if (sampled) {
+ if (_sampled == NULL) {
+ _sampled = new PerfDataList(25);
+ }
+ _sampled->append(p);
+ }
+}
+
+PerfDataList* PerfDataManager::all() {
+
+ MutexLocker ml(PerfDataManager_lock);
+
+ if (_all == NULL)
+ return NULL;
+
+ PerfDataList* clone = _all->clone();
+ return clone;
+}
+
+PerfDataList* PerfDataManager::sampled() {
+
+ MutexLocker ml(PerfDataManager_lock);
+
+ if (_sampled == NULL)
+ return NULL;
+
+ PerfDataList* clone = _sampled->clone();
+ return clone;
+}
+
+PerfDataList* PerfDataManager::constants() {
+
+ MutexLocker ml(PerfDataManager_lock);
+
+ if (_constants == NULL)
+ return NULL;
+
+ PerfDataList* clone = _constants->clone();
+ return clone;
+}
+
+char* PerfDataManager::counter_name(const char* ns, const char* name) {
+ assert(ns != NULL, "ns string required");
+ assert(name != NULL, "name string required");
+
+ size_t len = strlen(ns) + strlen(name) + 2;
+ char* result = NEW_RESOURCE_ARRAY(char, len);
+ sprintf(result, "%s.%s", ns, name);
+ return result;
+}
+
+char* PerfDataManager::name_space(const char* ns, const char* sub,
+ int instance) {
+ char intbuf[40];
+ jio_snprintf(intbuf, 40, UINT32_FORMAT, instance);
+ return name_space(ns, name_space(sub, intbuf));
+}
+
+char *PerfDataManager::name_space(const char* ns, int instance) {
+ char intbuf[40];
+ jio_snprintf(intbuf, 40, UINT32_FORMAT, instance);
+ return name_space(ns, intbuf);
+}
+
+PerfStringConstant* PerfDataManager::create_string_constant(CounterNS ns,
+ const char* name,
+ const char* s,
+ TRAPS) {
+
+ PerfStringConstant* p = new PerfStringConstant(ns, name, s);
+
+ if (!p->is_valid()) {
+ // allocation of native resources failed.
+ delete p;
+ THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+ }
+
+ add_item(p, false);
+
+ return p;
+}
+
+PerfLongConstant* PerfDataManager::create_long_constant(CounterNS ns,
+ const char* name,
+ PerfData::Units u,
+ jlong val, TRAPS) {
+
+ PerfLongConstant* p = new PerfLongConstant(ns, name, u, val);
+
+ if (!p->is_valid()) {
+ // allocation of native resources failed.
+ delete p;
+ THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+ }
+
+ add_item(p, false);
+
+ return p;
+}
+
+PerfStringVariable* PerfDataManager::create_string_variable(CounterNS ns,
+ const char* name,
+ jint max_length,
+ const char* s,
+ TRAPS) {
+
+ if (max_length == 0 && s != NULL) max_length = (jint)strlen(s);
+
+ assert(max_length != 0, "PerfStringVariable with length 0");
+
+ PerfStringVariable* p = new PerfStringVariable(ns, name, max_length, s);
+
+ if (!p->is_valid()) {
+ // allocation of native resources failed.
+ delete p;
+ THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+ }
+
+ add_item(p, false);
+
+ return p;
+}
+
+PerfLongVariable* PerfDataManager::create_long_variable(CounterNS ns,
+ const char* name,
+ PerfData::Units u,
+ jlong ival, TRAPS) {
+
+ PerfLongVariable* p = new PerfLongVariable(ns, name, u, ival);
+
+ if (!p->is_valid()) {
+ // allocation of native resources failed.
+ delete p;
+ THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+ }
+
+ add_item(p, false);
+
+ return p;
+}
+
+PerfLongVariable* PerfDataManager::create_long_variable(CounterNS ns,
+ const char* name,
+ PerfData::Units u,
+ jlong* sp, TRAPS) {
+
+ // Sampled counters not supported if UsePerfData is false
+ if (!UsePerfData) return NULL;
+
+ PerfLongVariable* p = new PerfLongVariable(ns, name, u, sp);
+
+ if (!p->is_valid()) {
+ // allocation of native resources failed.
+ delete p;
+ THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+ }
+
+ add_item(p, true);
+
+ return p;
+}
+
+PerfLongVariable* PerfDataManager::create_long_variable(CounterNS ns,
+ const char* name,
+ PerfData::Units u,
+ PerfSampleHelper* sh,
+ TRAPS) {
+
+ // Sampled counters not supported if UsePerfData is false
+ if (!UsePerfData) return NULL;
+
+ PerfLongVariable* p = new PerfLongVariable(ns, name, u, sh);
+
+ if (!p->is_valid()) {
+ // allocation of native resources failed.
+ delete p;
+ THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+ }
+
+ add_item(p, true);
+
+ return p;
+}
+
+PerfLongCounter* PerfDataManager::create_long_counter(CounterNS ns,
+ const char* name,
+ PerfData::Units u,
+ jlong ival, TRAPS) {
+
+ PerfLongCounter* p = new PerfLongCounter(ns, name, u, ival);
+
+ if (!p->is_valid()) {
+ // allocation of native resources failed.
+ delete p;
+ THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+ }
+
+ add_item(p, false);
+
+ return p;
+}
+
+PerfLongCounter* PerfDataManager::create_long_counter(CounterNS ns,
+ const char* name,
+ PerfData::Units u,
+ jlong* sp, TRAPS) {
+
+ // Sampled counters not supported if UsePerfData is false
+ if (!UsePerfData) return NULL;
+
+ PerfLongCounter* p = new PerfLongCounter(ns, name, u, sp);
+
+ if (!p->is_valid()) {
+ // allocation of native resources failed.
+ delete p;
+ THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+ }
+
+ add_item(p, true);
+
+ return p;
+}
+
+PerfLongCounter* PerfDataManager::create_long_counter(CounterNS ns,
+ const char* name,
+ PerfData::Units u,
+ PerfSampleHelper* sh,
+ TRAPS) {
+
+ // Sampled counters not supported if UsePerfData is false
+ if (!UsePerfData) return NULL;
+
+ PerfLongCounter* p = new PerfLongCounter(ns, name, u, sh);
+
+ if (!p->is_valid()) {
+ // allocation of native resources failed.
+ delete p;
+ THROW_0(vmSymbols::java_lang_OutOfMemoryError());
+ }
+
+ add_item(p, true);
+
+ return p;
+}
+
+PerfDataList::PerfDataList(int length) {
+
+ _set = new(ResourceObj::C_HEAP) PerfDataArray(length, true);
+}
+
+PerfDataList::PerfDataList(PerfDataList* p) {
+
+ _set = new(ResourceObj::C_HEAP) PerfDataArray(p->length(), true);
+
+ _set->appendAll(p->get_impl());
+}
+
+PerfDataList::~PerfDataList() {
+
+ delete _set;
+
+}
+
+bool PerfDataList::by_name(void* name, PerfData* pd) {
+
+ if (pd == NULL)
+ return false;
+
+ return strcmp((const char*)name, pd->name()) == 0;
+}
+
+PerfData* PerfDataList::find_by_name(const char* name) {
+
+ int i = _set->find((void*)name, PerfDataList::by_name);
+
+ if (i >= 0 && i <= _set->length())
+ return _set->at(i);
+ else
+ return NULL;
+}
+
+PerfDataList* PerfDataList::clone() {
+
+ PerfDataList* copy = new PerfDataList(this);
+
+ assert(copy != NULL, "just checking");
+
+ return copy;
+}
diff --git a/src/share/vm/runtime/perfData.hpp b/src/share/vm/runtime/perfData.hpp
new file mode 100644
index 000000000..16c0c742b
--- /dev/null
+++ b/src/share/vm/runtime/perfData.hpp
@@ -0,0 +1,955 @@
+/*
+ * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/* jvmstat global and subsystem counter name space - enumeration value
+ * serve as an index into the PerfDataManager::_name_space[] array
+ * containing the corresponding name space string. Only the top level
+ * subsystem name spaces are represented here.
+ */
+enum CounterNS {
+ // top level name spaces
+ JAVA_NS,
+ COM_NS,
+ SUN_NS,
+ // subsystem name spaces
+ JAVA_GC, // Garbage Collection name spaces
+ COM_GC,
+ SUN_GC,
+ JAVA_CI, // Compiler name spaces
+ COM_CI,
+ SUN_CI,
+ JAVA_CLS, // Class Loader name spaces
+ COM_CLS,
+ SUN_CLS,
+ JAVA_RT, // Runtime name spaces
+ COM_RT,
+ SUN_RT,
+ JAVA_OS, // Operating System name spaces
+ COM_OS,
+ SUN_OS,
+ JAVA_THREADS, // Threads System name spaces
+ COM_THREADS,
+ SUN_THREADS,
+ JAVA_PROPERTY, // Java Property name spaces
+ COM_PROPERTY,
+ SUN_PROPERTY,
+ NULL_NS,
+ COUNTERNS_LAST = NULL_NS
+};
+
+/*
+ * Classes to support access to production performance data
+ *
+ * The PerfData class structure is provided for creation, access, and update
+ * of performance data (a.k.a. instrumentation) in a specific memory region
+ * which is possibly accessible as shared memory. Although not explicitly
+ * prevented from doing so, developers should not use the values returned
+ * by accessor methods to make algorithmic decisions as they are potentially
+ * extracted from a shared memory region. Although any shared memory region
+ * created is with appropriate access restrictions, allowing read-write access
+ * only to the principal that created the JVM, it is believed that a the
+ * shared memory region facilitates an easier attack path than attacks
+ * launched through mechanisms such as /proc. For this reason, it is
+ * recommended that data returned by PerfData accessor methods be used
+ * cautiously.
+ *
+ * There are three variability classifications of performance data
+ * Constants - value is written to the PerfData memory once, on creation
+ * Variables - value is modifiable, with no particular restrictions
+ * Counters - value is monotonically changing (increasing or decreasing)
+ *
+ * The performance data items can also have various types. The class
+ * hierarchy and the structure of the memory region are designed to
+ * accommodate new types as they are needed. Types are specified in
+ * terms of Java basic types, which accommodates client applications
+ * written in the Java programming language. The class hierarchy is:
+ *
+ * - PerfData (Abstract)
+ * - PerfLong (Abstract)
+ * - PerfLongConstant (alias: PerfConstant)
+ * - PerfLongVariant (Abstract)
+ * - PerfLongVariable (alias: PerfVariable)
+ * - PerfLongCounter (alias: PerfCounter)
+ *
+ * - PerfByteArray (Abstract)
+ * - PerfString (Abstract)
+ * - PerfStringVariable
+ * - PerfStringConstant
+ *
+ *
+ * As seen in the class hierarchy, the initially supported types are:
+ *
+ * Long - performance data holds a Java long type
+ * ByteArray - performance data holds an array of Java bytes
+ * used for holding C++ char arrays.
+ *
+ * The String type is derived from the ByteArray type.
+ *
+ * A PerfData subtype is not required to provide an implementation for
+ * each variability classification. For example, the String type provides
+ * Variable and Constant variablility classifications in the PerfStringVariable
+ * and PerfStringConstant classes, but does not provide a counter type.
+ *
+ * Performance data are also described by a unit of measure. Units allow
+ * client applications to make reasonable decisions on how to treat
+ * performance data generically, preventing the need to hard-code the
+ * specifics of a particular data item in client applications. The current
+ * set of units are:
+ *
+ * None - the data has no units of measure
+ * Bytes - data is measured in bytes
+ * Ticks - data is measured in clock ticks
+ * Events - data is measured in events. For example,
+ * the number of garbage collection events or the
+ * number of methods compiled.
+ * String - data is not numerical. For example,
+ * the java command line options
+ * Hertz - data is a frequency
+ *
+ * The performance counters also provide a support attribute, indicating
+ * the stability of the counter as a programmatic interface. The support
+ * level is also implied by the name space in which the counter is created.
+ * The counter name space support conventions follow the Java package, class,
+ * and property support conventions:
+ *
+ * java.* - stable, supported interface
+ * com.sun.* - unstable, supported interface
+ * sun.* - unstable, unsupported interface
+ *
+ * In the above context, unstable is a measure of the interface support
+ * level, not the implementation stability level.
+ *
+ * Currently, instances of PerfData subtypes are considered to have
+ * a life time equal to that of the VM and are managed by the
+ * PerfDataManager class. All constructors for the PerfData class and
+ * its subtypes have protected constructors. Creation of PerfData
+ * instances is performed by invoking various create methods on the
+ * PerfDataManager class. Users should not attempt to delete these
+ * instances as the PerfDataManager class expects to perform deletion
+ * operations on exit of the VM.
+ *
+ * Examples:
+ *
+ * Creating performance counter that holds a monotonically increasing
+ * long data value with units specified in U_Bytes in the "java.gc.*"
+ * name space.
+ *
+ * PerfLongCounter* foo_counter;
+ *
+ * foo_counter = PerfDataManager::create_long_counter(JAVA_GC, "foo",
+ * PerfData::U_Bytes,
+ * optionalInitialValue,
+ * CHECK);
+ * foo_counter->inc();
+ *
+ * Creating a performance counter that holds a variably change long
+ * data value with untis specified in U_Bytes in the "com.sun.ci
+ * name space.
+ *
+ * PerfLongVariable* bar_varible;
+ * bar_variable = PerfDataManager::create_long_variable(COM_CI, "bar",
+.* PerfData::U_Bytes,
+ * optionalInitialValue,
+ * CHECK);
+ *
+ * bar_variable->inc();
+ * bar_variable->set_value(0);
+ *
+ * Creating a performance counter that holds a constant string value in
+ * the "sun.cls.*" name space.
+ *
+ * PerfDataManager::create_string_constant(SUN_CLS, "foo", string, CHECK);
+ *
+ * Although the create_string_constant() factory method returns a pointer
+ * to the PerfStringConstant object, it can safely be ignored. Developers
+ * are not encouraged to access the string constant's value via this
+ * pointer at this time due to security concerns.
+ *
+ * Creating a performance counter in an arbitrary name space that holds a
+ * value that is sampled by the StatSampler periodic task.
+ *
+ * PerfDataManager::create_counter("foo.sampled", PerfData::U_Events,
+ * &my_jlong, CHECK);
+ *
+ * In this example, the PerfData pointer can be ignored as the caller
+ * is relying on the StatSampler PeriodicTask to sample the given
+ * address at a regular interval. The interval is defined by the
+ * PerfDataSamplingInterval global variable, and is applyied on
+ * a system wide basis, not on an per-counter basis.
+ *
+ * Creating a performance counter in an arbitrary name space that utilizes
+ * a helper object to return a value to the StatSampler via the take_sample()
+ * method.
+ *
+ * class MyTimeSampler : public PerfLongSampleHelper {
+ * public:
+ * jlong take_sample() { return os::elapsed_counter(); }
+ * };
+ *
+ * PerfDataManager::create_counter(SUN_RT, "helped",
+ * PerfData::U_Ticks,
+ * new MyTimeSampler(), CHECK);
+ *
+ * In this example, a subtype of PerfLongSampleHelper is instantiated
+ * and its take_sample() method is overridden to perform whatever
+ * operation is necessary to generate the data sample. This method
+ * will be called by the StatSampler at a regular interval, defined
+ * by the PerfDataSamplingInterval global variable.
+ *
+ * As before, PerfSampleHelper is an alias for PerfLongSampleHelper.
+ *
+ * For additional uses of PerfData subtypes, see the utility classes
+ * PerfTraceTime and PerfTraceTimedEvent below.
+ *
+ * Always-on non-sampled counters can be created independent of
+ * the UsePerfData flag. Counters will be created on the c-heap
+ * if UsePerfData is false.
+ *
+ * Until further noice, all PerfData objects should be created and
+ * manipulated within a guarded block. The guard variable is
+ * UsePerfData, a product flag set to true by default. This flag may
+ * be removed from the product in the future.
+ *
+ */
+class PerfData : public CHeapObj {
+
+ friend class StatSampler; // for access to protected void sample()
+ friend class PerfDataManager; // for access to protected destructor
+
+ public:
+
+ // the Variability enum must be kept in synchronization with the
+ // the com.sun.hotspot.perfdata.Variability class
+ enum Variability {
+ V_Constant = 1,
+ V_Monotonic = 2,
+ V_Variable = 3,
+ V_last = V_Variable
+ };
+
+ // the Units enum must be kept in synchronization with the
+ // the com.sun.hotspot.perfdata.Units class
+ enum Units {
+ U_None = 1,
+ U_Bytes = 2,
+ U_Ticks = 3,
+ U_Events = 4,
+ U_String = 5,
+ U_Hertz = 6,
+ U_Last = U_Hertz
+ };
+
+ // Miscellaneous flags
+ enum Flags {
+ F_None = 0x0,
+ F_Supported = 0x1 // interface is supported - java.* and com.sun.*
+ };
+
+ private:
+ char* _name;
+ Variability _v;
+ Units _u;
+ bool _on_c_heap;
+ Flags _flags;
+
+ PerfDataEntry* _pdep;
+
+ protected:
+
+ void *_valuep;
+
+ PerfData(CounterNS ns, const char* name, Units u, Variability v);
+ ~PerfData();
+
+ // create the entry for the PerfData item in the PerfData memory region.
+ // this region is maintained separately from the PerfData objects to
+ // facilitate its use by external processes.
+ void create_entry(BasicType dtype, size_t dsize, size_t dlen = 0);
+
+ // sample the data item given at creation time and write its value
+ // into the its corresponding PerfMemory location.
+ virtual void sample() = 0;
+
+ public:
+
+ // returns a boolean indicating the validity of this object.
+ // the object is valid if and only if memory in PerfMemory
+ // region was successfully allocated.
+ inline bool is_valid() { return _valuep != NULL; }
+
+ // returns a boolean indicating whether the underlying object
+ // was allocated in the PerfMemory region or on the C heap.
+ inline bool is_on_c_heap() { return _on_c_heap; }
+
+ // returns a pointer to a char* containing the name of the item.
+ // The pointer returned is the pointer to a copy of the name
+ // passed to the constructor, not the pointer to the name in the
+ // PerfData memory region. This redundancy is maintained for
+ // security reasons as the PerfMemory region may be in shared
+ // memory.
+ const char* name() { return _name; }
+
+ // returns the variability classification associated with this item
+ Variability variability() { return _v; }
+
+ // returns the units associated with this item.
+ Units units() { return _u; }
+
+ // returns the flags associated with this item.
+ Flags flags() { return _flags; }
+
+ // returns the address of the data portion of the item in the
+ // PerfData memory region.
+ inline void* get_address() { return _valuep; }
+
+ // returns the value of the data portion of the item in the
+ // PerfData memory region formatted as a string.
+ virtual int format(char* cp, int length) = 0;
+};
+
+/*
+ * PerfLongSampleHelper, and its alias PerfSamplerHelper, is a base class
+ * for helper classes that rely upon the StatSampler periodic task to
+ * invoke the take_sample() method and write the value returned to its
+ * appropriate location in the PerfData memory region.
+ */
+class PerfLongSampleHelper : public CHeapObj {
+ public:
+ virtual jlong take_sample() = 0;
+};
+
+typedef PerfLongSampleHelper PerfSampleHelper;
+
+
+/*
+ * PerfLong is the base class for the various Long PerfData subtypes.
+ * it contains implementation details that are common among its derived
+ * types.
+ */
+class PerfLong : public PerfData {
+
+ protected:
+
+ PerfLong(CounterNS ns, const char* namep, Units u, Variability v);
+
+ public:
+ int format(char* buffer, int length);
+
+ // returns the value of the data portion of the item in the
+ // PerfData memory region.
+ inline jlong get_value() { return *(jlong*)_valuep; }
+};
+
+/*
+ * The PerfLongConstant class, and its alias PerfConstant, implement
+ * a PerfData subtype that holds a jlong data value that is set upon
+ * creation of an instance of this class. This class provides no
+ * methods for changing the data value stored in PerfData memory region.
+ */
+class PerfLongConstant : public PerfLong {
+
+ friend class PerfDataManager; // for access to protected constructor
+
+ private:
+ // hide sample() - no need to sample constants
+ void sample() { }
+
+ protected:
+
+ PerfLongConstant(CounterNS ns, const char* namep, Units u,
+ jlong initial_value=0)
+ : PerfLong(ns, namep, u, V_Constant) {
+
+ if (is_valid()) *(jlong*)_valuep = initial_value;
+ }
+};
+
+typedef PerfLongConstant PerfConstant;
+
+/*
+ * The PerfLongVariant class, and its alias PerfVariant, implement
+ * a PerfData subtype that holds a jlong data value that can be modified
+ * in an unrestricted manner. This class provides the implementation details
+ * for common functionality among its derived types.
+ */
+class PerfLongVariant : public PerfLong {
+
+ protected:
+ jlong* _sampled;
+ PerfLongSampleHelper* _sample_helper;
+
+ PerfLongVariant(CounterNS ns, const char* namep, Units u, Variability v,
+ jlong initial_value=0)
+ : PerfLong(ns, namep, u, v) {
+ if (is_valid()) *(jlong*)_valuep = initial_value;
+ }
+
+ PerfLongVariant(CounterNS ns, const char* namep, Units u, Variability v,
+ jlong* sampled);
+
+ PerfLongVariant(CounterNS ns, const char* namep, Units u, Variability v,
+ PerfLongSampleHelper* sample_helper);
+
+ void sample();
+
+ public:
+ inline void inc() { (*(jlong*)_valuep)++; }
+ inline void inc(jlong val) { (*(jlong*)_valuep) += val; }
+ inline void add(jlong val) { (*(jlong*)_valuep) += val; }
+};
+
+/*
+ * The PerfLongCounter class, and its alias PerfCounter, implement
+ * a PerfData subtype that holds a jlong data value that can (should)
+ * be modified in a monotonic manner. The inc(jlong) and add(jlong)
+ * methods can be passed negative values to implement a monotonically
+ * decreasing value. However, we rely upon the programmer to honor
+ * the notion that this counter always moves in the same direction -
+ * either increasing or decreasing.
+ */
+class PerfLongCounter : public PerfLongVariant {
+
+ friend class PerfDataManager; // for access to protected constructor
+
+ protected:
+
+ PerfLongCounter(CounterNS ns, const char* namep, Units u,
+ jlong initial_value=0)
+ : PerfLongVariant(ns, namep, u, V_Monotonic,
+ initial_value) { }
+
+ PerfLongCounter(CounterNS ns, const char* namep, Units u, jlong* sampled)
+ : PerfLongVariant(ns, namep, u, V_Monotonic, sampled) { }
+
+ PerfLongCounter(CounterNS ns, const char* namep, Units u,
+ PerfLongSampleHelper* sample_helper)
+ : PerfLongVariant(ns, namep, u, V_Monotonic,
+ sample_helper) { }
+};
+
+typedef PerfLongCounter PerfCounter;
+
+/*
+ * The PerfLongVariable class, and its alias PerfVariable, implement
+ * a PerfData subtype that holds a jlong data value that can
+ * be modified in an unrestricted manner.
+ */
+class PerfLongVariable : public PerfLongVariant {
+
+ friend class PerfDataManager; // for access to protected constructor
+
+ protected:
+
+ PerfLongVariable(CounterNS ns, const char* namep, Units u,
+ jlong initial_value=0)
+ : PerfLongVariant(ns, namep, u, V_Variable,
+ initial_value) { }
+
+ PerfLongVariable(CounterNS ns, const char* namep, Units u, jlong* sampled)
+ : PerfLongVariant(ns, namep, u, V_Variable, sampled) { }
+
+ PerfLongVariable(CounterNS ns, const char* namep, Units u,
+ PerfLongSampleHelper* sample_helper)
+ : PerfLongVariant(ns, namep, u, V_Variable,
+ sample_helper) { }
+
+ public:
+ inline void set_value(jlong val) { (*(jlong*)_valuep) = val; }
+};
+
+typedef PerfLongVariable PerfVariable;
+
+/*
+ * The PerfByteArray provides a PerfData subtype that allows the creation
+ * of a contiguous region of the PerfData memory region for storing a vector
+ * of bytes. This class is currently intended to be a base class for
+ * the PerfString class, and cannot be instantiated directly.
+ */
+class PerfByteArray : public PerfData {
+
+ protected:
+ jint _length;
+
+ PerfByteArray(CounterNS ns, const char* namep, Units u, Variability v,
+ jint length);
+};
+
+class PerfString : public PerfByteArray {
+
+ protected:
+
+ void set_string(const char* s2);
+
+ PerfString(CounterNS ns, const char* namep, Variability v, jint length,
+ const char* initial_value)
+ : PerfByteArray(ns, namep, U_String, v, length) {
+ if (is_valid()) set_string(initial_value);
+ }
+
+ public:
+
+ int format(char* buffer, int length);
+};
+
+/*
+ * The PerfStringConstant class provides a PerfData sub class that
+ * allows a null terminated string of single byte characters to be
+ * stored in the PerfData memory region.
+ */
+class PerfStringConstant : public PerfString {
+
+ friend class PerfDataManager; // for access to protected constructor
+
+ private:
+
+ // hide sample() - no need to sample constants
+ void sample() { }
+
+ protected:
+
+ // Restrict string constant lengths to be <= PerfMaxStringConstLength.
+ // This prevents long string constants, as can occur with very
+ // long classpaths or java command lines, from consuming too much
+ // PerfData memory.
+ PerfStringConstant(CounterNS ns, const char* namep,
+ const char* initial_value);
+};
+
+/*
+ * The PerfStringVariable class provides a PerfData sub class that
+ * allows a null terminated string of single byte character data
+ * to be stored in PerfData memory region. The string value can be reset
+ * after initialization. If the string value is >= max_length, then
+ * it will be truncated to max_length characters. The copied string
+ * is always null terminated.
+ */
+class PerfStringVariable : public PerfString {
+
+ friend class PerfDataManager; // for access to protected constructor
+
+ protected:
+
+ // sampling of string variables are not yet supported
+ void sample() { }
+
+ PerfStringVariable(CounterNS ns, const char* namep, jint max_length,
+ const char* initial_value)
+ : PerfString(ns, namep, V_Variable, max_length+1,
+ initial_value) { }
+
+ public:
+ inline void set_value(const char* val) { set_string(val); }
+};
+
+
+/*
+ * The PerfDataList class is a container class for managing lists
+ * of PerfData items. The intention of this class is to allow for
+ * alternative implementations for management of list of PerfData
+ * items without impacting the code that uses the lists.
+ *
+ * The initial implementation is based upon GrowableArray. Searches
+ * on GrowableArray types is linear in nature and this may become
+ * a performance issue for creation of PerfData items, particularly
+ * from Java code where a test for existence is implemented as a
+ * search over all existing PerfData items.
+ *
+ * The abstraction is not complete. A more general container class
+ * would provide an Iterator abstraction that could be used to
+ * traverse the lists. This implementation still relys upon integer
+ * iterators and the at(int index) method. However, the GrowableArray
+ * is not directly visible outside this class and can be replaced by
+ * some other implementation, as long as that implementation provides
+ * a mechanism to iterate over the container by index.
+ */
+class PerfDataList : public CHeapObj {
+
+ private:
+
+ // GrowableArray implementation
+ typedef GrowableArray<PerfData*> PerfDataArray;
+
+ PerfDataArray* _set;
+
+ // method to search for a instrumentation object by name
+ static bool by_name(void* name, PerfData* pd);
+
+ protected:
+ // we expose the implementation here to facilitate the clone
+ // method.
+ PerfDataArray* get_impl() { return _set; }
+
+ public:
+
+ // create a PerfDataList with the given initial length
+ PerfDataList(int length);
+
+ // create a PerfDataList as a shallow copy of the given PerfDataList
+ PerfDataList(PerfDataList* p);
+
+ ~PerfDataList();
+
+ // return the PerfData item indicated by name,
+ // or NULL if it doesn't exist.
+ PerfData* find_by_name(const char* name);
+
+ // return true if a PerfData item with the name specified in the
+ // argument exists, otherwise return false.
+ bool contains(const char* name) { return find_by_name(name) != NULL; }
+
+ // return the number of PerfData items in this list
+ int length() { return _set->length(); }
+
+ // add a PerfData item to this list
+ void append(PerfData *p) { _set->append(p); }
+
+ // remove the given PerfData item from this list. When called
+ // while iterating over the list, this method will result in a
+ // change in the length of the container. The at(int index)
+ // method is also impacted by this method as elements with an
+ // index greater than the index of the element removed by this
+ // method will be shifted down by one.
+ void remove(PerfData *p) { _set->remove(p); }
+
+ // create a new PerfDataList from this list. The new list is
+ // a shallow copy of the original list and care should be taken
+ // with respect to delete operations on the elements of the list
+ // as the are likely in use by another copy of the list.
+ PerfDataList* clone();
+
+ // for backward compatibility with GrowableArray - need to implement
+ // some form of iterator to provide a cleaner abstraction for
+ // iteration over the container.
+ PerfData* at(int index) { return _set->at(index); }
+};
+
+
+/*
+ * The PerfDataManager class is responsible for creating PerfData
+ * subtypes via a set a factory methods and for managing lists
+ * of the various PerfData types.
+ */
+class PerfDataManager : AllStatic {
+
+ friend class StatSampler; // for access to protected PerfDataList methods
+
+ private:
+ static PerfDataList* _all;
+ static PerfDataList* _sampled;
+ static PerfDataList* _constants;
+ static const char* _name_spaces[];
+
+ // add a PerfData item to the list(s) of know PerfData objects
+ static void add_item(PerfData* p, bool sampled);
+
+ protected:
+ // return the list of all known PerfData items
+ static PerfDataList* all();
+ static int count() { return _all->length(); }
+
+ // return the list of all known PerfData items that are to be
+ // sampled by the StatSampler.
+ static PerfDataList* sampled();
+ static int sampled_count() { return _sampled->length(); }
+
+ // return the list of all known PerfData items that have a
+ // variability classification of type Constant
+ static PerfDataList* constants();
+ static int constants_count() { return _constants->length(); }
+
+ public:
+
+ // method to check for the existence of a PerfData item with
+ // the given name.
+ static bool exists(const char* name) { return _all->contains(name); }
+
+ // method to map a CounterNS enumeration to a namespace string
+ static const char* ns_to_string(CounterNS ns) {
+ return _name_spaces[ns];
+ }
+
+ // methods to test the interface stability of a given counter namespace
+ //
+ static bool is_stable_supported(CounterNS ns) {
+ return (ns != NULL_NS) && ((ns % 3) == JAVA_NS);
+ }
+ static bool is_unstable_supported(CounterNS ns) {
+ return (ns != NULL_NS) && ((ns % 3) == COM_NS);
+ }
+ static bool is_unstable_unsupported(CounterNS ns) {
+ return (ns == NULL_NS) || ((ns % 3) == SUN_NS);
+ }
+
+ // methods to test the interface stability of a given counter name
+ //
+ static bool is_stable_supported(const char* name) {
+ const char* javadot = "java.";
+ return strncmp(name, javadot, strlen(javadot)) == 0;
+ }
+ static bool is_unstable_supported(const char* name) {
+ const char* comdot = "com.sun.";
+ return strncmp(name, comdot, strlen(comdot)) == 0;
+ }
+ static bool is_unstable_unsupported(const char* name) {
+ return !(is_stable_supported(name) && is_unstable_supported(name));
+ }
+
+ // method to construct counter name strings in a given name space.
+ // The string object is allocated from the Resource Area and calls
+ // to this method must be made within a ResourceMark.
+ //
+ static char* counter_name(const char* name_space, const char* name);
+
+ // method to construct name space strings in a given name space.
+ // The string object is allocated from the Resource Area and calls
+ // to this method must be made within a ResourceMark.
+ //
+ static char* name_space(const char* name_space, const char* sub_space) {
+ return counter_name(name_space, sub_space);
+ }
+
+ // same as above, but appends the instance number to the name space
+ //
+ static char* name_space(const char* name_space, const char* sub_space,
+ int instance);
+ static char* name_space(const char* name_space, int instance);
+
+
+ // these methods provide the general interface for creating
+ // performance data resources. The types of performance data
+ // resources can be extended by adding additional create<type>
+ // methods.
+
+ // Constant Types
+ static PerfStringConstant* create_string_constant(CounterNS ns,
+ const char* name,
+ const char *s, TRAPS);
+
+ static PerfLongConstant* create_long_constant(CounterNS ns,
+ const char* name,
+ PerfData::Units u,
+ jlong val, TRAPS);
+
+
+ // Variable Types
+ static PerfStringVariable* create_string_variable(CounterNS ns,
+ const char* name,
+ int max_length,
+ const char *s, TRAPS);
+
+ static PerfStringVariable* create_string_variable(CounterNS ns,
+ const char* name,
+ const char *s, TRAPS) {
+ return create_string_variable(ns, name, 0, s, CHECK_NULL);
+ };
+
+ static PerfLongVariable* create_long_variable(CounterNS ns,
+ const char* name,
+ PerfData::Units u,
+ jlong ival, TRAPS);
+
+ static PerfLongVariable* create_long_variable(CounterNS ns,
+ const char* name,
+ PerfData::Units u, TRAPS) {
+ return create_long_variable(ns, name, u, (jlong)0, CHECK_NULL);
+ };
+
+ static PerfLongVariable* create_long_variable(CounterNS, const char* name,
+ PerfData::Units u,
+ jlong* sp, TRAPS);
+
+ static PerfLongVariable* create_long_variable(CounterNS ns,
+ const char* name,
+ PerfData::Units u,
+ PerfLongSampleHelper* sh,
+ TRAPS);
+
+
+ // Counter Types
+ static PerfLongCounter* create_long_counter(CounterNS ns, const char* name,
+ PerfData::Units u,
+ jlong ival, TRAPS);
+
+ static PerfLongCounter* create_long_counter(CounterNS ns, const char* name,
+ PerfData::Units u, TRAPS) {
+ return create_long_counter(ns, name, u, (jlong)0, CHECK_NULL);
+ };
+
+ static PerfLongCounter* create_long_counter(CounterNS ns, const char* name,
+ PerfData::Units u, jlong* sp,
+ TRAPS);
+
+ static PerfLongCounter* create_long_counter(CounterNS ns, const char* name,
+ PerfData::Units u,
+ PerfLongSampleHelper* sh,
+ TRAPS);
+
+
+ // these creation methods are provided for ease of use. These allow
+ // Long performance data types to be created with a shorthand syntax.
+
+ static PerfConstant* create_constant(CounterNS ns, const char* name,
+ PerfData::Units u, jlong val, TRAPS) {
+ return create_long_constant(ns, name, u, val, CHECK_NULL);
+ }
+
+ static PerfVariable* create_variable(CounterNS ns, const char* name,
+ PerfData::Units u, jlong ival, TRAPS) {
+ return create_long_variable(ns, name, u, ival, CHECK_NULL);
+ }
+
+ static PerfVariable* create_variable(CounterNS ns, const char* name,
+ PerfData::Units u, TRAPS) {
+ return create_long_variable(ns, name, u, (jlong)0, CHECK_NULL);
+ }
+
+ static PerfVariable* create_variable(CounterNS ns, const char* name,
+ PerfData::Units u, jlong* sp, TRAPS) {
+ return create_long_variable(ns, name, u, sp, CHECK_NULL);
+ }
+
+ static PerfVariable* create_variable(CounterNS ns, const char* name,
+ PerfData::Units u,
+ PerfSampleHelper* sh, TRAPS) {
+ return create_long_variable(ns, name, u, sh, CHECK_NULL);
+ }
+
+ static PerfCounter* create_counter(CounterNS ns, const char* name,
+ PerfData::Units u, jlong ival, TRAPS) {
+ return create_long_counter(ns, name, u, ival, CHECK_NULL);
+ }
+
+ static PerfCounter* create_counter(CounterNS ns, const char* name,
+ PerfData::Units u, TRAPS) {
+ return create_long_counter(ns, name, u, (jlong)0, CHECK_NULL);
+ }
+
+ static PerfCounter* create_counter(CounterNS ns, const char* name,
+ PerfData::Units u, jlong* sp, TRAPS) {
+ return create_long_counter(ns, name, u, sp, CHECK_NULL);
+ }
+
+ static PerfCounter* create_counter(CounterNS ns, const char* name,
+ PerfData::Units u,
+ PerfSampleHelper* sh, TRAPS) {
+ return create_long_counter(ns, name, u, sh, CHECK_NULL);
+ }
+
+ static void destroy();
+};
+
+// Useful macros to create the performance counters
+#define NEWPERFTICKCOUNTER(counter, counter_ns, counter_name) \
+ {counter = PerfDataManager::create_counter(counter_ns, counter_name, \
+ PerfData::U_Ticks,CHECK);}
+
+#define NEWPERFEVENTCOUNTER(counter, counter_ns, counter_name) \
+ {counter = PerfDataManager::create_counter(counter_ns, counter_name, \
+ PerfData::U_Events,CHECK);}
+
+// Utility Classes
+
+/*
+ * this class will administer a PerfCounter used as a time accumulator
+ * for a basic block much like the TraceTime class.
+ *
+ * Example:
+ *
+ * static PerfCounter* my_time_counter = PerfDataManager::create_counter("my.time.counter", PerfData::U_Ticks, 0LL, CHECK);
+ *
+ * {
+ * PerfTraceTime ptt(my_time_counter);
+ * // perform the operation you want to measure
+ * }
+ *
+ * Note: use of this class does not need to occur within a guarded
+ * block. The UsePerfData guard is used with the implementation
+ * of this class.
+ */
+class PerfTraceTime : public StackObj {
+
+ protected:
+ elapsedTimer _t;
+ PerfLongCounter* _timerp;
+ // pointer to thread-local or global recursion counter variable
+ int* _recursion_counter;
+
+ public:
+ inline PerfTraceTime(PerfLongCounter* timerp) : _timerp(timerp), _recursion_counter(NULL) {
+ if (!UsePerfData) return;
+ _t.start();
+ }
+
+ inline PerfTraceTime(PerfLongCounter* timerp, int* recursion_counter) : _timerp(timerp), _recursion_counter(recursion_counter) {
+ if (!UsePerfData || (_recursion_counter != NULL &&
+ (*_recursion_counter)++ > 0)) return;
+ _t.start();
+ }
+
+ inline void suspend() { if (!UsePerfData) return; _t.stop(); }
+ inline void resume() { if (!UsePerfData) return; _t.start(); }
+
+ inline ~PerfTraceTime() {
+ if (!UsePerfData || (_recursion_counter != NULL &&
+ --(*_recursion_counter) > 0)) return;
+ _t.stop();
+ _timerp->inc(_t.ticks());
+ }
+};
+
+/* The PerfTraceTimedEvent class is responsible for counting the
+ * occurrence of some event and measuring the the elapsed time of
+ * the event in two separate PerfCounter instances.
+ *
+ * Example:
+ *
+ * static PerfCounter* my_time_counter = PerfDataManager::create_counter("my.time.counter", PerfData::U_Ticks, CHECK);
+ * static PerfCounter* my_event_counter = PerfDataManager::create_counter("my.event.counter", PerfData::U_Events, CHECK);
+ *
+ * {
+ * PerfTraceTimedEvent ptte(my_time_counter, my_event_counter);
+ * // perform the operation you want to count and measure
+ * }
+ *
+ * Note: use of this class does not need to occur within a guarded
+ * block. The UsePerfData guard is used with the implementation
+ * of this class.
+ *
+ */
+class PerfTraceTimedEvent : public PerfTraceTime {
+
+ protected:
+ PerfLongCounter* _eventp;
+
+ public:
+ inline PerfTraceTimedEvent(PerfLongCounter* timerp, PerfLongCounter* eventp): PerfTraceTime(timerp), _eventp(eventp) {
+ if (!UsePerfData) return;
+ _eventp->inc();
+ }
+
+ inline PerfTraceTimedEvent(PerfLongCounter* timerp, PerfLongCounter* eventp, int* recursion_counter): PerfTraceTime(timerp, recursion_counter), _eventp(eventp) {
+ if (!UsePerfData) return;
+ _eventp->inc();
+ }
+};
diff --git a/src/share/vm/runtime/perfMemory.cpp b/src/share/vm/runtime/perfMemory.cpp
new file mode 100644
index 000000000..0ae679bdd
--- /dev/null
+++ b/src/share/vm/runtime/perfMemory.cpp
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_perfMemory.cpp.incl"
+
+char* PerfMemory::_start = NULL;
+char* PerfMemory::_end = NULL;
+char* PerfMemory::_top = NULL;
+size_t PerfMemory::_capacity = 0;
+jint PerfMemory::_initialized = false;
+PerfDataPrologue* PerfMemory::_prologue = NULL;
+
+void perfMemory_init() {
+
+ if (!UsePerfData) return;
+
+ PerfMemory::initialize();
+}
+
+void perfMemory_exit() {
+
+ if (!UsePerfData) return;
+ if (!PerfMemory::is_initialized()) return;
+
+ // if the StatSampler is active, then we don't want to remove
+ // resources it may be dependent on. Typically, the StatSampler
+ // is disengaged from the watcher thread when this method is called,
+ // but it is not disengaged if this method is invoked during a
+ // VM abort.
+ //
+ if (!StatSampler::is_active())
+ PerfDataManager::destroy();
+
+ // remove the persistent external resources, if any. this method
+ // does not unmap or invalidate any virtual memory allocated during
+ // initialization.
+ //
+ PerfMemory::destroy();
+}
+
+void PerfMemory::initialize() {
+
+ if (_prologue != NULL)
+ // initialization already performed
+ return;
+
+ size_t capacity = align_size_up(PerfDataMemorySize,
+ os::vm_allocation_granularity());
+
+ if (PerfTraceMemOps) {
+ tty->print("PerfDataMemorySize = " SIZE_FORMAT ","
+ " os::vm_allocation_granularity = " SIZE_FORMAT ","
+ " adjusted size = " SIZE_FORMAT "\n",
+ PerfDataMemorySize,
+ os::vm_allocation_granularity(),
+ capacity);
+ }
+
+ // allocate PerfData memory region
+ create_memory_region(capacity);
+
+ if (_start == NULL) {
+
+ // the PerfMemory region could not be created as desired. Rather
+ // than terminating the JVM, we revert to creating the instrumentation
+ // on the C heap. When running in this mode, external monitoring
+ // clients cannot attach to and monitor this JVM.
+ //
+ // the warning is issued only in debug mode in order to avoid
+ // additional output to the stdout or stderr output streams.
+ //
+ if (PrintMiscellaneous && Verbose) {
+ warning("Could not create PerfData Memory region, reverting to malloc");
+ }
+
+ _prologue = NEW_C_HEAP_OBJ(PerfDataPrologue);
+ }
+ else {
+
+ // the PerfMemory region was created as expected.
+
+ if (PerfTraceMemOps) {
+ tty->print("PerfMemory created: address = " INTPTR_FORMAT ","
+ " size = " SIZE_FORMAT "\n",
+ (void*)_start,
+ _capacity);
+ }
+
+ _prologue = (PerfDataPrologue *)_start;
+ _end = _start + _capacity;
+ _top = _start + sizeof(PerfDataPrologue);
+ }
+
+ assert(_prologue != NULL, "prologue pointer must be initialized");
+
+#ifdef VM_LITTLE_ENDIAN
+ _prologue->magic = (jint)0xc0c0feca;
+ _prologue->byte_order = PERFDATA_LITTLE_ENDIAN;
+#else
+ _prologue->magic = (jint)0xcafec0c0;
+ _prologue->byte_order = PERFDATA_BIG_ENDIAN;
+#endif
+
+ _prologue->major_version = PERFDATA_MAJOR_VERSION;
+ _prologue->minor_version = PERFDATA_MINOR_VERSION;
+ _prologue->accessible = 0;
+
+ _prologue->entry_offset = sizeof(PerfDataPrologue);
+ _prologue->num_entries = 0;
+ _prologue->used = 0;
+ _prologue->overflow = 0;
+ _prologue->mod_time_stamp = 0;
+
+ OrderAccess::release_store(&_initialized, 1);
+}
+
+void PerfMemory::destroy() {
+
+ assert(_prologue != NULL, "prologue pointer must be initialized");
+
+ if (_start != NULL && _prologue->overflow != 0) {
+
+ // This state indicates that the contiguous memory region exists and
+ // that it wasn't large enough to hold all the counters. In this case,
+ // we output a warning message to the user on exit if the -XX:+Verbose
+ // flag is set (a debug only flag). External monitoring tools can detect
+ // this condition by monitoring the _prologue->overflow word.
+ //
+ // There are two tunables that can help resolve this issue:
+ // - increase the size of the PerfMemory with -XX:PerfDataMemorySize=<n>
+ // - decrease the maximum string constant length with
+ // -XX:PerfMaxStringConstLength=<n>
+ //
+ if (PrintMiscellaneous && Verbose) {
+ warning("PerfMemory Overflow Occurred.\n"
+ "\tCapacity = " SIZE_FORMAT " bytes"
+ " Used = " SIZE_FORMAT " bytes"
+ " Overflow = " INT32_FORMAT " bytes"
+ "\n\tUse -XX:PerfDataMemorySize=<size> to specify larger size.",
+ PerfMemory::capacity(),
+ PerfMemory::used(),
+ _prologue->overflow);
+ }
+ }
+
+ if (_start != NULL) {
+
+ // this state indicates that the contiguous memory region was successfully
+ // and that persistent resources may need to be cleaned up. This is
+ // expected to be the typical condition.
+ //
+ delete_memory_region();
+ }
+
+ _start = NULL;
+ _end = NULL;
+ _top = NULL;
+ _prologue = NULL;
+ _capacity = 0;
+}
+
+// allocate an aligned block of memory from the PerfData memory
+// region. This method assumes that the PerfData memory region
+// was aligned on a double word boundary when created.
+//
+char* PerfMemory::alloc(size_t size) {
+
+ if (!UsePerfData) return NULL;
+
+ MutexLocker ml(PerfDataMemAlloc_lock);
+
+ assert(_prologue != NULL, "called before initialization");
+
+ // check that there is enough memory for this request
+ if ((_top + size) >= _end) {
+
+ _prologue->overflow += (jint)size;
+
+ return NULL;
+ }
+
+ char* result = _top;
+
+ _top += size;
+
+ assert(contains(result), "PerfData memory pointer out of range");
+
+ _prologue->used = (jint)used();
+ _prologue->num_entries = _prologue->num_entries + 1;
+
+ return result;
+}
+
+void PerfMemory::mark_updated() {
+ if (!UsePerfData) return;
+
+ _prologue->mod_time_stamp = os::elapsed_counter();
+}
+
+// Returns the complete path including the file name of performance data file.
+// Caller is expected to release the allocated memory.
+char* PerfMemory::get_perfdata_file_path() {
+ char* dest_file = NULL;
+
+ if (PerfDataSaveFile != NULL) {
+ // dest_file_name stores the validated file name if file_name
+ // contains %p which will be replaced by pid.
+ dest_file = NEW_C_HEAP_ARRAY(char, JVM_MAXPATHLEN);
+ if(!Arguments::copy_expand_pid(PerfDataSaveFile, strlen(PerfDataSaveFile),
+ dest_file, JVM_MAXPATHLEN)) {
+ FREE_C_HEAP_ARRAY(char, dest_file);
+ if (PrintMiscellaneous && Verbose) {
+ warning("Invalid performance data file path name specified, "\
+ "fall back to a default name");
+ }
+ } else {
+ return dest_file;
+ }
+ }
+ // create the name of the file for retaining the instrumentation memory.
+ dest_file = NEW_C_HEAP_ARRAY(char, PERFDATA_FILENAME_LEN);
+ jio_snprintf(dest_file, PERFDATA_FILENAME_LEN,
+ "%s_%d", PERFDATA_NAME, os::current_process_id());
+
+ return dest_file;
+}
diff --git a/src/share/vm/runtime/perfMemory.hpp b/src/share/vm/runtime/perfMemory.hpp
new file mode 100644
index 000000000..812ab3bac
--- /dev/null
+++ b/src/share/vm/runtime/perfMemory.hpp
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/*
+ * PerfData Version Constants
+ * - Major Version - change whenever the structure of PerfDataEntry changes
+ * - Minor Version - change whenever the data within the PerfDataEntry
+ * structure changes. for example, new unit or variability
+ * values are added or new PerfData subtypes are added.
+ */
+#define PERFDATA_MAJOR_VERSION 2
+#define PERFDATA_MINOR_VERSION 0
+
+/* Byte order of the PerfData memory region. The byte order is exposed in
+ * the PerfData memory region as the data in the memory region may have
+ * been generated by a little endian JVM implementation. Tracking the byte
+ * order in the PerfData memory region allows Java applications to adapt
+ * to the native byte order for monitoring purposes. This indicator is
+ * also useful when a snapshot of the PerfData memory region is shipped
+ * to a machine with a native byte order different from that of the
+ * originating machine.
+ */
+#define PERFDATA_BIG_ENDIAN 0
+#define PERFDATA_LITTLE_ENDIAN 1
+
+/*
+ * The PerfDataPrologue structure is known by the PerfDataBuffer Java class
+ * libraries that read the PerfData memory region. The size and the position
+ * of the fields must be changed along with their counterparts in the
+ * PerfDataBuffer Java class. The first four bytes of this structure
+ * should never change, or compatibility problems between the monitoring
+ * applications and Hotspot VMs will result. The reserved fields are
+ * available for future enhancements.
+ */
+typedef struct {
+ jint magic; // magic number - 0xcafec0c0
+ jbyte byte_order; // byte order of the buffer
+ jbyte major_version; // major and minor version numbers
+ jbyte minor_version;
+ jbyte accessible; // ready to access
+ jint used; // number of PerfData memory bytes used
+ jint overflow; // number of bytes of overflow
+ jlong mod_time_stamp; // time stamp of last structural modification
+ jint entry_offset; // offset of the first PerfDataEntry
+ jint num_entries; // number of allocated PerfData entries
+} PerfDataPrologue;
+
+/* The PerfDataEntry structure defines the fixed portion of an entry
+ * in the PerfData memory region. The PerfDataBuffer Java libraries
+ * are aware of this structure and need to be changed when this
+ * structure changes.
+ */
+typedef struct {
+
+ jint entry_length; // entry length in bytes
+ jint name_offset; // offset of the data item name
+ jint vector_length; // length of the vector. If 0, then scalar
+ jbyte data_type; // type of the data item -
+ // 'B','Z','J','I','S','C','D','F','V','L','['
+ jbyte flags; // flags indicating misc attributes
+ jbyte data_units; // unit of measure for the data type
+ jbyte data_variability; // variability classification of data type
+ jint data_offset; // offset of the data item
+
+/*
+ body of PerfData memory entry is variable length
+
+ jbyte[name_length] data_name; // name of the data item
+ jbyte[pad_length] data_pad; // alignment of data item
+ j<data_type>[data_length] data_item; // array of appropriate types.
+ // data_length is > 1 only when the
+ // data_type is T_ARRAY.
+*/
+} PerfDataEntry;
+
+// Prefix of performance data file.
+static const char PERFDATA_NAME[] = "hsperfdata";
+
+// UINT_CHARS contains the number of characters holding a process id
+// (i.e. pid). pid is defined as unsigned "int" so the maximum possible pid value
+// would be 2^32 - 1 (4294967295) which can be represented as a 10 characters
+// string.
+static const size_t UINT_CHARS = 10;
+
+// Add 1 for the '_' character between PERFDATA_NAME and pid. The '\0' terminating
+// character will be included in the sizeof(PERFDATA_NAME) operation.
+static const size_t PERFDATA_FILENAME_LEN = sizeof(PERFDATA_NAME) +
+ UINT_CHARS + 1;
+
+/* the PerfMemory class manages creation, destruction,
+ * and allocation of the PerfData region.
+ */
+class PerfMemory : AllStatic {
+ friend class VMStructs;
+ private:
+ static char* _start;
+ static char* _end;
+ static char* _top;
+ static size_t _capacity;
+ static PerfDataPrologue* _prologue;
+ static jint _initialized;
+
+ static void create_memory_region(size_t sizep);
+ static void delete_memory_region();
+
+ public:
+ enum PerfMemoryMode {
+ PERF_MODE_RO = 0,
+ PERF_MODE_RW = 1
+ };
+
+ static char* alloc(size_t size);
+ static char* start() { return _start; }
+ static char* end() { return _end; }
+ static size_t used() { return (size_t) (_top - _start); }
+ static size_t capacity() { return _capacity; }
+ static bool is_initialized() { return _initialized != 0; }
+ static bool contains(char* addr) {
+ return ((_start != NULL) && (addr >= _start) && (addr < _end));
+ }
+ static void mark_updated();
+
+ // methods for attaching to and detaching from the PerfData
+ // memory segment of another JVM process on the same system.
+ static void attach(const char* user, int vmid, PerfMemoryMode mode,
+ char** addrp, size_t* size, TRAPS);
+ static void detach(char* addr, size_t bytes, TRAPS);
+
+ static void initialize();
+ static void destroy();
+ static void set_accessible(bool value) {
+ if (UsePerfData) {
+ _prologue->accessible = value;
+ }
+ }
+
+ // filename of backing store or NULL if none.
+ static char* backing_store_filename();
+
+ // returns the complete file path of hsperfdata.
+ // the caller is expected to free the allocated memory.
+ static char* get_perfdata_file_path();
+};
+
+void perfMemory_init();
+void perfMemory_exit();
diff --git a/src/share/vm/runtime/prefetch.hpp b/src/share/vm/runtime/prefetch.hpp
new file mode 100644
index 000000000..0c498f839
--- /dev/null
+++ b/src/share/vm/runtime/prefetch.hpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2003 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// If calls to prefetch methods are in a loop, the loop should be cloned
+// such that if Prefetch{Scan,Copy}Interval and/or PrefetchFieldInterval
+// say not to do prefetching, these methods aren't called. At the very
+// least, they take up a memory issue slot. They should be implemented
+// as inline assembly code: doing an actual call isn't worth the cost.
+
+class Prefetch : AllStatic {
+ public:
+ enum style {
+ do_none, // Do no prefetching
+ do_read, // Do read prefetching
+ do_write // Do write prefetching
+ };
+
+ // Prefetch anticipating read; must not fault, semantically a no-op
+ static void read(void* loc, intx interval);
+
+ // Prefetch anticipating write; must not fault, semantically a no-op
+ static void write(void* loc, intx interval);
+};
diff --git a/src/share/vm/runtime/reflection.cpp b/src/share/vm/runtime/reflection.cpp
new file mode 100644
index 000000000..55cf925e6
--- /dev/null
+++ b/src/share/vm/runtime/reflection.cpp
@@ -0,0 +1,1586 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_reflection.cpp.incl"
+
+#define JAVA_1_5_VERSION 49
+
+static void trace_class_resolution(klassOop to_class) {
+ ResourceMark rm;
+ int line_number = -1;
+ const char * source_file = NULL;
+ klassOop caller = NULL;
+ JavaThread* jthread = JavaThread::current();
+ if (jthread->has_last_Java_frame()) {
+ vframeStream vfst(jthread);
+ // skip over any frames belonging to java.lang.Class
+ while (!vfst.at_end() &&
+ instanceKlass::cast(vfst.method()->method_holder())->name() == vmSymbols::java_lang_Class()) {
+ vfst.next();
+ }
+ if (!vfst.at_end()) {
+ // this frame is a likely suspect
+ caller = vfst.method()->method_holder();
+ line_number = vfst.method()->line_number_from_bci(vfst.bci());
+ symbolOop s = instanceKlass::cast(vfst.method()->method_holder())->source_file_name();
+ if (s != NULL) {
+ source_file = s->as_C_string();
+ }
+ }
+ }
+ if (caller != NULL) {
+ const char * from = Klass::cast(caller)->external_name();
+ const char * to = Klass::cast(to_class)->external_name();
+ // print in a single call to reduce interleaving between threads
+ if (source_file != NULL) {
+ tty->print("RESOLVE %s %s %s:%d (reflection)\n", from, to, source_file, line_number);
+ } else {
+ tty->print("RESOLVE %s %s (reflection)\n", from, to);
+ }
+ }
+}
+
+
+oop Reflection::box(jvalue* value, BasicType type, TRAPS) {
+ if (type == T_VOID) {
+ return NULL;
+ }
+ if (type == T_OBJECT || type == T_ARRAY) {
+ // regular objects are not boxed
+ return (oop) value->l;
+ }
+ oop result = java_lang_boxing_object::create(type, value, CHECK_NULL);
+ if (result == NULL) {
+ THROW_(vmSymbols::java_lang_IllegalArgumentException(), result);
+ }
+ return result;
+}
+
+
+BasicType Reflection::unbox_for_primitive(oop box, jvalue* value, TRAPS) {
+ if (box == NULL) {
+ THROW_(vmSymbols::java_lang_IllegalArgumentException(), T_ILLEGAL);
+ }
+ return java_lang_boxing_object::get_value(box, value);
+}
+
+BasicType Reflection::unbox_for_regular_object(oop box, jvalue* value) {
+ // Note: box is really the unboxed oop. It might even be a Short, etc.!
+ value->l = (jobject) box;
+ return T_OBJECT;
+}
+
+
+void Reflection::widen(jvalue* value, BasicType current_type, BasicType wide_type, TRAPS) {
+ assert(wide_type != current_type, "widen should not be called with identical types");
+ switch (wide_type) {
+ case T_BOOLEAN:
+ case T_BYTE:
+ case T_CHAR:
+ break; // fail
+ case T_SHORT:
+ switch (current_type) {
+ case T_BYTE:
+ value->s = (jshort) value->b;
+ return;
+ }
+ break; // fail
+ case T_INT:
+ switch (current_type) {
+ case T_BYTE:
+ value->i = (jint) value->b;
+ return;
+ case T_CHAR:
+ value->i = (jint) value->c;
+ return;
+ case T_SHORT:
+ value->i = (jint) value->s;
+ return;
+ }
+ break; // fail
+ case T_LONG:
+ switch (current_type) {
+ case T_BYTE:
+ value->j = (jlong) value->b;
+ return;
+ case T_CHAR:
+ value->j = (jlong) value->c;
+ return;
+ case T_SHORT:
+ value->j = (jlong) value->s;
+ return;
+ case T_INT:
+ value->j = (jlong) value->i;
+ return;
+ }
+ break; // fail
+ case T_FLOAT:
+ switch (current_type) {
+ case T_BYTE:
+ value->f = (jfloat) value->b;
+ return;
+ case T_CHAR:
+ value->f = (jfloat) value->c;
+ return;
+ case T_SHORT:
+ value->f = (jfloat) value->s;
+ return;
+ case T_INT:
+ value->f = (jfloat) value->i;
+ return;
+ case T_LONG:
+ value->f = (jfloat) value->j;
+ return;
+ }
+ break; // fail
+ case T_DOUBLE:
+ switch (current_type) {
+ case T_BYTE:
+ value->d = (jdouble) value->b;
+ return;
+ case T_CHAR:
+ value->d = (jdouble) value->c;
+ return;
+ case T_SHORT:
+ value->d = (jdouble) value->s;
+ return;
+ case T_INT:
+ value->d = (jdouble) value->i;
+ return;
+ case T_FLOAT:
+ value->d = (jdouble) value->f;
+ return;
+ case T_LONG:
+ value->d = (jdouble) value->j;
+ return;
+ }
+ break; // fail
+ default:
+ break; // fail
+ }
+ THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "argument type mismatch");
+}
+
+
+BasicType Reflection::array_get(jvalue* value, arrayOop a, int index, TRAPS) {
+ if (!a->is_within_bounds(index)) {
+ THROW_(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), T_ILLEGAL);
+ }
+ if (a->is_objArray()) {
+ value->l = (jobject) objArrayOop(a)->obj_at(index);
+ return T_OBJECT;
+ } else {
+ assert(a->is_typeArray(), "just checking");
+ BasicType type = typeArrayKlass::cast(a->klass())->element_type();
+ switch (type) {
+ case T_BOOLEAN:
+ value->z = typeArrayOop(a)->bool_at(index);
+ break;
+ case T_CHAR:
+ value->c = typeArrayOop(a)->char_at(index);
+ break;
+ case T_FLOAT:
+ value->f = typeArrayOop(a)->float_at(index);
+ break;
+ case T_DOUBLE:
+ value->d = typeArrayOop(a)->double_at(index);
+ break;
+ case T_BYTE:
+ value->b = typeArrayOop(a)->byte_at(index);
+ break;
+ case T_SHORT:
+ value->s = typeArrayOop(a)->short_at(index);
+ break;
+ case T_INT:
+ value->i = typeArrayOop(a)->int_at(index);
+ break;
+ case T_LONG:
+ value->j = typeArrayOop(a)->long_at(index);
+ break;
+ default:
+ return T_ILLEGAL;
+ }
+ return type;
+ }
+}
+
+
+void Reflection::array_set(jvalue* value, arrayOop a, int index, BasicType value_type, TRAPS) {
+ if (!a->is_within_bounds(index)) {
+ THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException());
+ }
+ if (a->is_objArray()) {
+ if (value_type == T_OBJECT) {
+ oop obj = (oop) value->l;
+ if (obj != NULL) {
+ klassOop element_klass = objArrayKlass::cast(a->klass())->element_klass();
+ if (!obj->is_a(element_klass)) {
+ THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "array element type mismatch");
+ }
+ }
+ objArrayOop(a)->obj_at_put(index, obj);
+ }
+ } else {
+ assert(a->is_typeArray(), "just checking");
+ BasicType array_type = typeArrayKlass::cast(a->klass())->element_type();
+ if (array_type != value_type) {
+ // The widen operation can potentially throw an exception, but cannot block,
+ // so typeArrayOop a is safe if the call succeeds.
+ widen(value, value_type, array_type, CHECK);
+ }
+ switch (array_type) {
+ case T_BOOLEAN:
+ typeArrayOop(a)->bool_at_put(index, value->z);
+ break;
+ case T_CHAR:
+ typeArrayOop(a)->char_at_put(index, value->c);
+ break;
+ case T_FLOAT:
+ typeArrayOop(a)->float_at_put(index, value->f);
+ break;
+ case T_DOUBLE:
+ typeArrayOop(a)->double_at_put(index, value->d);
+ break;
+ case T_BYTE:
+ typeArrayOop(a)->byte_at_put(index, value->b);
+ break;
+ case T_SHORT:
+ typeArrayOop(a)->short_at_put(index, value->s);
+ break;
+ case T_INT:
+ typeArrayOop(a)->int_at_put(index, value->i);
+ break;
+ case T_LONG:
+ typeArrayOop(a)->long_at_put(index, value->j);
+ break;
+ default:
+ THROW(vmSymbols::java_lang_IllegalArgumentException());
+ }
+ }
+}
+
+
+klassOop Reflection::basic_type_mirror_to_arrayklass(oop basic_type_mirror, TRAPS) {
+ assert(java_lang_Class::is_primitive(basic_type_mirror), "just checking");
+ BasicType type = java_lang_Class::primitive_type(basic_type_mirror);
+ if (type == T_VOID) {
+ THROW_0(vmSymbols::java_lang_IllegalArgumentException());
+ } else {
+ return Universe::typeArrayKlassObj(type);
+ }
+}
+
+
+oop Reflection:: basic_type_arrayklass_to_mirror(klassOop basic_type_arrayklass, TRAPS) {
+ BasicType type = typeArrayKlass::cast(basic_type_arrayklass)->element_type();
+ return Universe::java_mirror(type);
+}
+
+
+arrayOop Reflection::reflect_new_array(oop element_mirror, jint length, TRAPS) {
+ if (element_mirror == NULL) {
+ THROW_0(vmSymbols::java_lang_NullPointerException());
+ }
+ if (length < 0) {
+ THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
+ }
+ if (java_lang_Class::is_primitive(element_mirror)) {
+ klassOop tak = basic_type_mirror_to_arrayklass(element_mirror, CHECK_NULL);
+ return typeArrayKlass::cast(tak)->allocate(length, THREAD);
+ } else {
+ klassOop k = java_lang_Class::as_klassOop(element_mirror);
+ if (Klass::cast(k)->oop_is_array() && arrayKlass::cast(k)->dimension() >= MAX_DIM) {
+ THROW_0(vmSymbols::java_lang_IllegalArgumentException());
+ }
+ return oopFactory::new_objArray(k, length, THREAD);
+ }
+}
+
+
+arrayOop Reflection::reflect_new_multi_array(oop element_mirror, typeArrayOop dim_array, TRAPS) {
+ assert(dim_array->is_typeArray(), "just checking");
+ assert(typeArrayKlass::cast(dim_array->klass())->element_type() == T_INT, "just checking");
+
+ if (element_mirror == NULL) {
+ THROW_0(vmSymbols::java_lang_NullPointerException());
+ }
+
+ int len = dim_array->length();
+ if (len <= 0 || len > MAX_DIM) {
+ THROW_0(vmSymbols::java_lang_IllegalArgumentException());
+ }
+
+ jint dimensions[MAX_DIM]; // C array copy of intArrayOop
+ for (int i = 0; i < len; i++) {
+ int d = dim_array->int_at(i);
+ if (d < 0) {
+ THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
+ }
+ dimensions[i] = d;
+ }
+
+ klassOop klass;
+ int dim = len;
+ if (java_lang_Class::is_primitive(element_mirror)) {
+ klass = basic_type_mirror_to_arrayklass(element_mirror, CHECK_NULL);
+ } else {
+ klass = java_lang_Class::as_klassOop(element_mirror);
+ if (Klass::cast(klass)->oop_is_array()) {
+ int k_dim = arrayKlass::cast(klass)->dimension();
+ if (k_dim + len > MAX_DIM) {
+ THROW_0(vmSymbols::java_lang_IllegalArgumentException());
+ }
+ dim += k_dim;
+ }
+ }
+ klass = Klass::cast(klass)->array_klass(dim, CHECK_NULL);
+ oop obj = arrayKlass::cast(klass)->multi_allocate(len, dimensions, THREAD);
+ assert(obj->is_array(), "just checking");
+ return arrayOop(obj);
+}
+
+
+oop Reflection::array_component_type(oop mirror, TRAPS) {
+ if (java_lang_Class::is_primitive(mirror)) {
+ return NULL;
+ }
+
+ klassOop klass = java_lang_Class::as_klassOop(mirror);
+ if (!Klass::cast(klass)->oop_is_array()) {
+ return NULL;
+ }
+
+ oop result = arrayKlass::cast(klass)->component_mirror();
+#ifdef ASSERT
+ oop result2 = NULL;
+ if (arrayKlass::cast(klass)->dimension() == 1) {
+ if (Klass::cast(klass)->oop_is_typeArray()) {
+ result2 = basic_type_arrayklass_to_mirror(klass, CHECK_NULL);
+ } else {
+ result2 = Klass::cast(objArrayKlass::cast(klass)->element_klass())->java_mirror();
+ }
+ } else {
+ klassOop lower_dim = arrayKlass::cast(klass)->lower_dimension();
+ assert(Klass::cast(lower_dim)->oop_is_array(), "just checking");
+ result2 = Klass::cast(lower_dim)->java_mirror();
+ }
+ assert(result == result2, "results must be consistent");
+#endif //ASSERT
+ return result;
+}
+
+
+bool Reflection::reflect_check_access(klassOop field_class, AccessFlags acc, klassOop target_class, bool is_method_invoke, TRAPS) {
+ // field_class : declaring class
+ // acc : declared field access
+ // target_class : for protected
+
+ // Check if field or method is accessible to client. Throw an
+ // IllegalAccessException and return false if not.
+
+ // The "client" is the class associated with the nearest real frame
+ // getCallerClass already skips Method.invoke frames, so pass 0 in
+ // that case (same as classic).
+ ResourceMark rm(THREAD);
+ assert(THREAD->is_Java_thread(), "sanity check");
+ klassOop client_class = ((JavaThread *)THREAD)->security_get_caller_class(is_method_invoke ? 0 : 1);
+
+ if (client_class != field_class) {
+ if (!verify_class_access(client_class, field_class, false)
+ || !verify_field_access(client_class,
+ field_class,
+ field_class,
+ acc,
+ false)) {
+ THROW_(vmSymbols::java_lang_IllegalAccessException(), false);
+ }
+ }
+
+ // Additional test for protected members: JLS 6.6.2
+
+ if (acc.is_protected()) {
+ if (target_class != client_class) {
+ if (!is_same_class_package(client_class, field_class)) {
+ if (!Klass::cast(target_class)->is_subclass_of(client_class)) {
+ THROW_(vmSymbols::java_lang_IllegalAccessException(), false);
+ }
+ }
+ }
+ }
+
+ // Passed all tests
+ return true;
+}
+
+
+bool Reflection::verify_class_access(klassOop current_class, klassOop new_class, bool classloader_only) {
+ // Verify that current_class can access new_class. If the classloader_only
+ // flag is set, we automatically allow any accesses in which current_class
+ // doesn't have a classloader.
+ if ((current_class == NULL) ||
+ (current_class == new_class) ||
+ (instanceKlass::cast(new_class)->is_public()) ||
+ is_same_class_package(current_class, new_class)) {
+ return true;
+ }
+ // New (1.4) reflection implementation. Allow all accesses from
+ // sun/reflect/MagicAccessorImpl subclasses to succeed trivially.
+ if ( JDK_Version::is_gte_jdk14x_version()
+ && UseNewReflection
+ && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_magic_klass())) {
+ return true;
+ }
+
+ return can_relax_access_check_for(current_class, new_class, classloader_only);
+}
+
+bool Reflection::can_relax_access_check_for(
+ klassOop accessor, klassOop accessee, bool classloader_only) {
+ instanceKlass* accessor_ik = instanceKlass::cast(accessor);
+ instanceKlass* accessee_ik = instanceKlass::cast(accessee);
+ if (RelaxAccessControlCheck ||
+ (accessor_ik->major_version() < JAVA_1_5_VERSION &&
+ accessee_ik->major_version() < JAVA_1_5_VERSION)) {
+ return classloader_only &&
+ Verifier::relax_verify_for(accessor_ik->class_loader()) &&
+ accessor_ik->protection_domain() == accessee_ik->protection_domain() &&
+ accessor_ik->class_loader() == accessee_ik->class_loader();
+ } else {
+ return false;
+ }
+}
+
+bool Reflection::verify_field_access(klassOop current_class,
+ klassOop resolved_class,
+ klassOop field_class,
+ AccessFlags access,
+ bool classloader_only,
+ bool protected_restriction) {
+ // Verify that current_class can access a field of field_class, where that
+ // field's access bits are "access". We assume that we've already verified
+ // that current_class can access field_class.
+ //
+ // If the classloader_only flag is set, we automatically allow any accesses
+ // in which current_class doesn't have a classloader.
+ //
+ // "resolved_class" is the runtime type of "field_class". Sometimes we don't
+ // need this distinction (e.g. if all we have is the runtime type, or during
+ // class file parsing when we only care about the static type); in that case
+ // callers should ensure that resolved_class == field_class.
+ //
+ if ((current_class == NULL) ||
+ (current_class == field_class) ||
+ access.is_public()) {
+ return true;
+ }
+
+ if (access.is_protected()) {
+ if (!protected_restriction) {
+ // See if current_class is a subclass of field_class
+ if (Klass::cast(current_class)->is_subclass_of(field_class)) {
+ if (current_class == resolved_class ||
+ field_class == resolved_class ||
+ Klass::cast(current_class)->is_subclass_of(resolved_class) ||
+ Klass::cast(resolved_class)->is_subclass_of(current_class)) {
+ return true;
+ }
+ }
+ }
+ }
+
+ if (!access.is_private() && is_same_class_package(current_class, field_class)) {
+ return true;
+ }
+
+ // New (1.4) reflection implementation. Allow all accesses from
+ // sun/reflect/MagicAccessorImpl subclasses to succeed trivially.
+ if ( JDK_Version::is_gte_jdk14x_version()
+ && UseNewReflection
+ && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_magic_klass())) {
+ return true;
+ }
+
+ return can_relax_access_check_for(
+ current_class, field_class, classloader_only);
+}
+
+
+bool Reflection::is_same_class_package(klassOop class1, klassOop class2) {
+ return instanceKlass::cast(class1)->is_same_class_package(class2);
+}
+
+
+// Checks that the 'outer' klass has declared 'inner' as being an inner klass. If not,
+// throw an incompatible class change exception
+void Reflection::check_for_inner_class(instanceKlassHandle outer, instanceKlassHandle inner, TRAPS) {
+ const int inner_class_info_index = 0;
+ const int outer_class_info_index = 1;
+
+ typeArrayHandle icls (THREAD, outer->inner_classes());
+ constantPoolHandle cp (THREAD, outer->constants());
+ for(int i = 0; i < icls->length(); i += 4) {
+ int ioff = icls->ushort_at(i + inner_class_info_index);
+ int ooff = icls->ushort_at(i + outer_class_info_index);
+
+ if (ioff != 0 && ooff != 0) {
+ klassOop o = cp->klass_at(ooff, CHECK);
+ if (o == outer()) {
+ klassOop i = cp->klass_at(ioff, CHECK);
+ if (i == inner()) {
+ return;
+ }
+ }
+ }
+ }
+
+ // 'inner' not declared as an inner klass in outer
+ ResourceMark rm(THREAD);
+ Exceptions::fthrow(
+ THREAD_AND_LOCATION,
+ vmSymbolHandles::java_lang_IncompatibleClassChangeError(),
+ "%s and %s disagree on InnerClasses attribute",
+ outer->external_name(),
+ inner->external_name()
+ );
+}
+
+// Utility method converting a single SignatureStream element into java.lang.Class instance
+
+oop get_mirror_from_signature(methodHandle method, SignatureStream* ss, TRAPS) {
+ switch (ss->type()) {
+ default:
+ assert(ss->type() != T_VOID || ss->at_return_type(), "T_VOID should only appear as return type");
+ return java_lang_Class::primitive_mirror(ss->type());
+ case T_OBJECT:
+ case T_ARRAY:
+ symbolOop name = ss->as_symbol(CHECK_NULL);
+ oop loader = instanceKlass::cast(method->method_holder())->class_loader();
+ oop protection_domain = instanceKlass::cast(method->method_holder())->protection_domain();
+ klassOop k = SystemDictionary::resolve_or_fail(
+ symbolHandle(THREAD, name),
+ Handle(THREAD, loader),
+ Handle(THREAD, protection_domain),
+ true, CHECK_NULL);
+ if (TraceClassResolution) {
+ trace_class_resolution(k);
+ }
+ return k->klass_part()->java_mirror();
+ };
+}
+
+
+objArrayHandle Reflection::get_parameter_types(methodHandle method, int parameter_count, oop* return_type, TRAPS) {
+ // Allocate array holding parameter types (java.lang.Class instances)
+ objArrayOop m = oopFactory::new_objArray(SystemDictionary::class_klass(), parameter_count, CHECK_(objArrayHandle()));
+ objArrayHandle mirrors (THREAD, m);
+ int index = 0;
+ // Collect parameter types
+ symbolHandle signature (THREAD, method->signature());
+ SignatureStream ss(signature);
+ while (!ss.at_return_type()) {
+ oop mirror = get_mirror_from_signature(method, &ss, CHECK_(objArrayHandle()));
+ mirrors->obj_at_put(index++, mirror);
+ ss.next();
+ }
+ assert(index == parameter_count, "invalid parameter count");
+ if (return_type != NULL) {
+ // Collect return type as well
+ assert(ss.at_return_type(), "return type should be present");
+ *return_type = get_mirror_from_signature(method, &ss, CHECK_(objArrayHandle()));
+ }
+ return mirrors;
+}
+
+objArrayHandle Reflection::get_exception_types(methodHandle method, TRAPS) {
+ return method->resolved_checked_exceptions(CHECK_(objArrayHandle()));
+}
+
+
+Handle Reflection::new_type(symbolHandle signature, KlassHandle k, TRAPS) {
+ // Basic types
+ BasicType type = vmSymbols::signature_type(signature());
+ if (type != T_OBJECT) {
+ return Handle(THREAD, Universe::java_mirror(type));
+ }
+
+ oop loader = instanceKlass::cast(k())->class_loader();
+ oop protection_domain = Klass::cast(k())->protection_domain();
+ klassOop result = SystemDictionary::resolve_or_fail(signature,
+ Handle(THREAD, loader),
+ Handle(THREAD, protection_domain),
+ true, CHECK_(Handle()));
+
+ if (TraceClassResolution) {
+ trace_class_resolution(result);
+ }
+
+ oop nt = Klass::cast(result)->java_mirror();
+ return Handle(THREAD, nt);
+}
+
+
+oop Reflection::new_method(methodHandle method, bool intern_name, bool for_constant_pool_access, TRAPS) {
+ // In jdk1.2.x, getMethods on an interface erroneously includes <clinit>, thus the complicated assert.
+ // Also allow sun.reflect.ConstantPool to refer to <clinit> methods as java.lang.reflect.Methods.
+ assert(!method()->is_initializer() ||
+ (for_constant_pool_access && method()->is_static()) ||
+ (method()->name() == vmSymbols::class_initializer_name()
+ && Klass::cast(method()->method_holder())->is_interface() && JDK_Version::is_jdk12x_version()), "should call new_constructor instead");
+ instanceKlassHandle holder (THREAD, method->method_holder());
+ int slot = method->method_idnum();
+
+ symbolHandle signature (THREAD, method->signature());
+ int parameter_count = ArgumentCount(signature).size();
+ oop return_type_oop = NULL;
+ objArrayHandle parameter_types = get_parameter_types(method, parameter_count, &return_type_oop, CHECK_NULL);
+ if (parameter_types.is_null() || return_type_oop == NULL) return NULL;
+
+ Handle return_type(THREAD, return_type_oop);
+
+ objArrayHandle exception_types = get_exception_types(method, CHECK_NULL);
+
+ if (exception_types.is_null()) return NULL;
+
+ symbolHandle method_name(THREAD, method->name());
+ Handle name;
+ if (intern_name) {
+ // intern_name is only true with UseNewReflection
+ oop name_oop = StringTable::intern(method_name(), CHECK_NULL);
+ name = Handle(THREAD, name_oop);
+ } else {
+ name = java_lang_String::create_from_symbol(method_name, CHECK_NULL);
+ }
+ if (name.is_null()) return NULL;
+
+ int modifiers = method->access_flags().as_int() & JVM_RECOGNIZED_METHOD_MODIFIERS;
+
+ Handle mh = java_lang_reflect_Method::create(CHECK_NULL);
+
+ java_lang_reflect_Method::set_clazz(mh(), holder->java_mirror());
+ java_lang_reflect_Method::set_slot(mh(), slot);
+ java_lang_reflect_Method::set_name(mh(), name());
+ java_lang_reflect_Method::set_return_type(mh(), return_type());
+ java_lang_reflect_Method::set_parameter_types(mh(), parameter_types());
+ java_lang_reflect_Method::set_exception_types(mh(), exception_types());
+ java_lang_reflect_Method::set_modifiers(mh(), modifiers);
+ java_lang_reflect_Method::set_override(mh(), false);
+ if (java_lang_reflect_Method::has_signature_field() &&
+ method->generic_signature() != NULL) {
+ symbolHandle gs(THREAD, method->generic_signature());
+ Handle sig = java_lang_String::create_from_symbol(gs, CHECK_NULL);
+ java_lang_reflect_Method::set_signature(mh(), sig());
+ }
+ if (java_lang_reflect_Method::has_annotations_field()) {
+ java_lang_reflect_Method::set_annotations(mh(), method->annotations());
+ }
+ if (java_lang_reflect_Method::has_parameter_annotations_field()) {
+ java_lang_reflect_Method::set_parameter_annotations(mh(), method->parameter_annotations());
+ }
+ if (java_lang_reflect_Method::has_annotation_default_field()) {
+ java_lang_reflect_Method::set_annotation_default(mh(), method->annotation_default());
+ }
+ return mh();
+}
+
+
+oop Reflection::new_constructor(methodHandle method, TRAPS) {
+ assert(method()->is_initializer(), "should call new_method instead");
+
+ instanceKlassHandle holder (THREAD, method->method_holder());
+ int slot = method->method_idnum();
+
+ symbolHandle signature (THREAD, method->signature());
+ int parameter_count = ArgumentCount(signature).size();
+ objArrayHandle parameter_types = get_parameter_types(method, parameter_count, NULL, CHECK_NULL);
+ if (parameter_types.is_null()) return NULL;
+
+ objArrayHandle exception_types = get_exception_types(method, CHECK_NULL);
+ if (exception_types.is_null()) return NULL;
+
+ int modifiers = method->access_flags().as_int() & JVM_RECOGNIZED_METHOD_MODIFIERS;
+
+ Handle ch = java_lang_reflect_Constructor::create(CHECK_NULL);
+
+ java_lang_reflect_Constructor::set_clazz(ch(), holder->java_mirror());
+ java_lang_reflect_Constructor::set_slot(ch(), slot);
+ java_lang_reflect_Constructor::set_parameter_types(ch(), parameter_types());
+ java_lang_reflect_Constructor::set_exception_types(ch(), exception_types());
+ java_lang_reflect_Constructor::set_modifiers(ch(), modifiers);
+ java_lang_reflect_Constructor::set_override(ch(), false);
+ if (java_lang_reflect_Constructor::has_signature_field() &&
+ method->generic_signature() != NULL) {
+ symbolHandle gs(THREAD, method->generic_signature());
+ Handle sig = java_lang_String::create_from_symbol(gs, CHECK_NULL);
+ java_lang_reflect_Constructor::set_signature(ch(), sig());
+ }
+ if (java_lang_reflect_Constructor::has_annotations_field()) {
+ java_lang_reflect_Constructor::set_annotations(ch(), method->annotations());
+ }
+ if (java_lang_reflect_Constructor::has_parameter_annotations_field()) {
+ java_lang_reflect_Constructor::set_parameter_annotations(ch(), method->parameter_annotations());
+ }
+ return ch();
+}
+
+
+oop Reflection::new_field(fieldDescriptor* fd, bool intern_name, TRAPS) {
+ symbolHandle field_name(THREAD, fd->name());
+ Handle name;
+ if (intern_name) {
+ // intern_name is only true with UseNewReflection
+ oop name_oop = StringTable::intern(field_name(), CHECK_NULL);
+ name = Handle(THREAD, name_oop);
+ } else {
+ name = java_lang_String::create_from_symbol(field_name, CHECK_NULL);
+ }
+ symbolHandle signature (THREAD, fd->signature());
+ KlassHandle holder (THREAD, fd->field_holder());
+ Handle type = new_type(signature, holder, CHECK_NULL);
+ Handle rh = java_lang_reflect_Field::create(CHECK_NULL);
+
+ java_lang_reflect_Field::set_clazz(rh(), Klass::cast(fd->field_holder())->java_mirror());
+ java_lang_reflect_Field::set_slot(rh(), fd->index());
+ java_lang_reflect_Field::set_name(rh(), name());
+ java_lang_reflect_Field::set_type(rh(), type());
+ // Note the ACC_ANNOTATION bit, which is a per-class access flag, is never set here.
+ java_lang_reflect_Field::set_modifiers(rh(), fd->access_flags().as_int() & JVM_RECOGNIZED_FIELD_MODIFIERS);
+ java_lang_reflect_Field::set_override(rh(), false);
+ if (java_lang_reflect_Field::has_signature_field() &&
+ fd->generic_signature() != NULL) {
+ symbolHandle gs(THREAD, fd->generic_signature());
+ Handle sig = java_lang_String::create_from_symbol(gs, CHECK_NULL);
+ java_lang_reflect_Field::set_signature(rh(), sig());
+ }
+ if (java_lang_reflect_Field::has_annotations_field()) {
+ java_lang_reflect_Field::set_annotations(rh(), fd->annotations());
+ }
+ return rh();
+}
+
+
+//---------------------------------------------------------------------------
+//
+// Supporting routines for old native code-based reflection (pre-JDK 1.4).
+//
+// See reflection.hpp for details.
+//
+//---------------------------------------------------------------------------
+
+#ifdef SUPPORT_OLD_REFLECTION
+
+methodHandle Reflection::resolve_interface_call(instanceKlassHandle klass, methodHandle method,
+ KlassHandle recv_klass, Handle receiver, TRAPS) {
+ assert(!method.is_null() , "method should not be null");
+
+ CallInfo info;
+ symbolHandle signature (THREAD, method->signature());
+ symbolHandle name (THREAD, method->name());
+ LinkResolver::resolve_interface_call(info, receiver, recv_klass, klass,
+ name, signature,
+ KlassHandle(), false, true,
+ CHECK_(methodHandle()));
+ return info.selected_method();
+}
+
+
+oop Reflection::invoke(instanceKlassHandle klass, methodHandle reflected_method,
+ Handle receiver, bool override, objArrayHandle ptypes,
+ BasicType rtype, objArrayHandle args, bool is_method_invoke, TRAPS) {
+ ResourceMark rm(THREAD);
+
+ methodHandle method; // actual method to invoke
+ KlassHandle target_klass; // target klass, receiver's klass for non-static
+
+ // Ensure klass is initialized
+ klass->initialize(CHECK_NULL);
+
+ bool is_static = reflected_method->is_static();
+ if (is_static) {
+ // ignore receiver argument
+ method = reflected_method;
+ target_klass = klass;
+ } else {
+ // check for null receiver
+ if (receiver.is_null()) {
+ THROW_0(vmSymbols::java_lang_NullPointerException());
+ }
+ // Check class of receiver against class declaring method
+ if (!receiver->is_a(klass())) {
+ THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "object is not an instance of declaring class");
+ }
+ // target klass is receiver's klass
+ target_klass = KlassHandle(THREAD, receiver->klass());
+ // no need to resolve if method is private or <init>
+ if (reflected_method->is_private() || reflected_method->name() == vmSymbols::object_initializer_name()) {
+ method = reflected_method;
+ } else {
+ // resolve based on the receiver
+ if (instanceKlass::cast(reflected_method->method_holder())->is_interface()) {
+ // resolve interface call
+ if (ReflectionWrapResolutionErrors) {
+ // new default: 6531596
+ // Match resolution errors with those thrown due to reflection inlining
+ // Linktime resolution & IllegalAccessCheck already done by Class.getMethod()
+ method = resolve_interface_call(klass, reflected_method, target_klass, receiver, THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ // Method resolution threw an exception; wrap it in an InvocationTargetException
+ oop resolution_exception = PENDING_EXCEPTION;
+ CLEAR_PENDING_EXCEPTION;
+ JavaCallArguments args(Handle(THREAD, resolution_exception));
+ THROW_ARG_0(vmSymbolHandles::java_lang_reflect_InvocationTargetException(),
+ vmSymbolHandles::throwable_void_signature(),
+ &args);
+ }
+ } else {
+ method = resolve_interface_call(klass, reflected_method, target_klass, receiver, CHECK_(NULL));
+ }
+ } else {
+ // if the method can be overridden, we resolve using the vtable index.
+ int index = reflected_method->vtable_index();
+ method = reflected_method;
+ if (index != methodOopDesc::nonvirtual_vtable_index) {
+ // target_klass might be an arrayKlassOop but all vtables start at
+ // the same place. The cast is to avoid virtual call and assertion.
+ instanceKlass* inst = (instanceKlass*)target_klass()->klass_part();
+ method = methodHandle(THREAD, inst->method_at_vtable(index));
+ }
+ if (!method.is_null()) {
+ // Check for abstract methods as well
+ if (method->is_abstract()) {
+ // new default: 6531596
+ if (ReflectionWrapResolutionErrors) {
+ ResourceMark rm(THREAD);
+ Handle h_origexception = Exceptions::new_exception(THREAD,
+ vmSymbols::java_lang_AbstractMethodError(),
+ methodOopDesc::name_and_sig_as_C_string(Klass::cast(target_klass()),
+ method->name(),
+ method->signature()));
+ JavaCallArguments args(h_origexception);
+ THROW_ARG_0(vmSymbolHandles::java_lang_reflect_InvocationTargetException(),
+ vmSymbolHandles::throwable_void_signature(),
+ &args);
+ } else {
+ ResourceMark rm(THREAD);
+ THROW_MSG_0(vmSymbols::java_lang_AbstractMethodError(),
+ methodOopDesc::name_and_sig_as_C_string(Klass::cast(target_klass()),
+ method->name(),
+ method->signature()));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // I believe this is a ShouldNotGetHere case which requires
+ // an internal vtable bug. If you ever get this please let Karen know.
+ if (method.is_null()) {
+ ResourceMark rm(THREAD);
+ THROW_MSG_0(vmSymbols::java_lang_NoSuchMethodError(),
+ methodOopDesc::name_and_sig_as_C_string(Klass::cast(klass()),
+ reflected_method->name(),
+ reflected_method->signature()));
+ }
+
+ // In the JDK 1.4 reflection implementation, the security check is
+ // done at the Java level
+ if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) {
+
+ // Access checking (unless overridden by Method)
+ if (!override) {
+ if (!(klass->is_public() && reflected_method->is_public())) {
+ bool access = Reflection::reflect_check_access(klass(), reflected_method->access_flags(), target_klass(), is_method_invoke, CHECK_NULL);
+ if (!access) {
+ return NULL; // exception
+ }
+ }
+ }
+
+ } // !(Universe::is_gte_jdk14x_version() && UseNewReflection)
+
+ assert(ptypes->is_objArray(), "just checking");
+ int args_len = args.is_null() ? 0 : args->length();
+ // Check number of arguments
+ if (ptypes->length() != args_len) {
+ THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "wrong number of arguments");
+ }
+
+ // Create object to contain parameters for the JavaCall
+ JavaCallArguments java_args(method->size_of_parameters());
+
+ if (!is_static) {
+ java_args.push_oop(receiver);
+ }
+
+ for (int i = 0; i < args_len; i++) {
+ oop type_mirror = ptypes->obj_at(i);
+ oop arg = args->obj_at(i);
+ if (java_lang_Class::is_primitive(type_mirror)) {
+ jvalue value;
+ BasicType ptype = basic_type_mirror_to_basic_type(type_mirror, CHECK_NULL);
+ BasicType atype = unbox_for_primitive(arg, &value, CHECK_NULL);
+ if (ptype != atype) {
+ widen(&value, atype, ptype, CHECK_NULL);
+ }
+ switch (ptype) {
+ case T_BOOLEAN: java_args.push_int(value.z); break;
+ case T_CHAR: java_args.push_int(value.c); break;
+ case T_BYTE: java_args.push_int(value.b); break;
+ case T_SHORT: java_args.push_int(value.s); break;
+ case T_INT: java_args.push_int(value.i); break;
+ case T_LONG: java_args.push_long(value.j); break;
+ case T_FLOAT: java_args.push_float(value.f); break;
+ case T_DOUBLE: java_args.push_double(value.d); break;
+ default:
+ THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "argument type mismatch");
+ }
+ } else {
+ if (arg != NULL) {
+ klassOop k = java_lang_Class::as_klassOop(type_mirror);
+ if (!arg->is_a(k)) {
+ THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "argument type mismatch");
+ }
+ }
+ Handle arg_handle(THREAD, arg); // Create handle for argument
+ java_args.push_oop(arg_handle); // Push handle
+ }
+ }
+
+ assert(java_args.size_of_parameters() == method->size_of_parameters(), "just checking");
+
+ // All oops (including receiver) is passed in as Handles. An potential oop is returned as an
+ // oop (i.e., NOT as an handle)
+ JavaValue result(rtype);
+ JavaCalls::call(&result, method, &java_args, THREAD);
+
+ if (HAS_PENDING_EXCEPTION) {
+ // Method threw an exception; wrap it in an InvocationTargetException
+ oop target_exception = PENDING_EXCEPTION;
+ CLEAR_PENDING_EXCEPTION;
+ JavaCallArguments args(Handle(THREAD, target_exception));
+ THROW_ARG_0(vmSymbolHandles::java_lang_reflect_InvocationTargetException(),
+ vmSymbolHandles::throwable_void_signature(),
+ &args);
+ } else {
+ if (rtype == T_BOOLEAN || rtype == T_BYTE || rtype == T_CHAR || rtype == T_SHORT)
+ narrow((jvalue*) result.get_value_addr(), rtype, CHECK_NULL);
+ return box((jvalue*) result.get_value_addr(), rtype, CHECK_NULL);
+ }
+}
+
+
+void Reflection::narrow(jvalue* value, BasicType narrow_type, TRAPS) {
+ switch (narrow_type) {
+ case T_BOOLEAN:
+ value->z = (jboolean) value->i;
+ return;
+ case T_BYTE:
+ value->b = (jbyte) value->i;
+ return;
+ case T_CHAR:
+ value->c = (jchar) value->i;
+ return;
+ case T_SHORT:
+ value->s = (jshort) value->i;
+ return;
+ default:
+ break; // fail
+ }
+ THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "argument type mismatch");
+}
+
+
+BasicType Reflection::basic_type_mirror_to_basic_type(oop basic_type_mirror, TRAPS) {
+ assert(java_lang_Class::is_primitive(basic_type_mirror), "just checking");
+ return java_lang_Class::primitive_type(basic_type_mirror);
+}
+
+
+bool Reflection::match_parameter_types(methodHandle method, objArrayHandle types, int parameter_count, TRAPS) {
+ int types_len = types.is_null() ? 0 : types->length();
+ if (types_len != parameter_count) return false;
+ if (parameter_count > 0) {
+ objArrayHandle method_types = get_parameter_types(method, parameter_count, NULL, CHECK_false);
+ for (int index = 0; index < parameter_count; index++) {
+ if (types->obj_at(index) != method_types->obj_at(index)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
+oop Reflection::new_field(FieldStream* st, TRAPS) {
+ symbolHandle field_name(THREAD, st->name());
+ Handle name = java_lang_String::create_from_symbol(field_name, CHECK_NULL);
+ symbolHandle signature(THREAD, st->signature());
+ Handle type = new_type(signature, st->klass(), CHECK_NULL);
+ Handle rh = java_lang_reflect_Field::create(CHECK_NULL);
+ oop result = rh();
+
+ java_lang_reflect_Field::set_clazz(result, st->klass()->java_mirror());
+ java_lang_reflect_Field::set_slot(result, st->index());
+ java_lang_reflect_Field::set_name(result, name());
+ java_lang_reflect_Field::set_type(result, type());
+ // Note the ACC_ANNOTATION bit, which is a per-class access flag, is never set here.
+ java_lang_reflect_Field::set_modifiers(result, st->access_flags().as_int() & JVM_RECOGNIZED_FIELD_MODIFIERS);
+ java_lang_reflect_Field::set_override(result, false);
+ return result;
+}
+
+
+bool Reflection::resolve_field(Handle field_mirror, Handle& receiver, fieldDescriptor* fd, bool check_final, TRAPS) {
+ if (field_mirror.is_null()) {
+ THROW_(vmSymbols::java_lang_NullPointerException(), false);
+ }
+
+ instanceKlassHandle klass (THREAD, java_lang_Class::as_klassOop(java_lang_reflect_Field::clazz(field_mirror())));
+ int slot = java_lang_reflect_Field::slot(field_mirror());
+
+ // Ensure klass is initialized
+ klass->initialize(CHECK_false);
+ fd->initialize(klass(), slot);
+
+ bool is_static = fd->is_static();
+ KlassHandle receiver_klass;
+
+ if (is_static) {
+ receiver = KlassHandle(THREAD, klass());
+ receiver_klass = klass;
+ } else {
+ // Check object is a non-null instance of declaring class
+ if (receiver.is_null()) {
+ THROW_(vmSymbols::java_lang_NullPointerException(), false);
+ }
+ if (!receiver->is_a(klass())) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), "object is not an instance of declaring class", false);
+ }
+ receiver_klass = KlassHandle(THREAD, receiver->klass());
+ }
+
+ // Access checking (unless overridden by Field)
+ if (!java_lang_reflect_Field::override(field_mirror())) {
+ if (!(klass->is_public() && fd->is_public())) {
+ bool access_check = reflect_check_access(klass(), fd->access_flags(), receiver_klass(), false, CHECK_false);
+ if (!access_check) {
+ return false; // exception
+ }
+ }
+ }
+
+ if (check_final && fd->is_final()) {
+ // In 1.3 we always throw an error when attempting to set a final field.
+ // In 1.2.x, this was allowed in the override bit was set by calling Field.setAccessible(true).
+ // We currently maintain backwards compatibility. See bug 4250960.
+ bool strict_final_check = !JDK_Version::is_jdk12x_version();
+ if (strict_final_check || !java_lang_reflect_Field::override(field_mirror())) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalAccessException(), "field is final", false);
+ }
+ }
+ return true;
+}
+
+
+BasicType Reflection::field_get(jvalue* value, fieldDescriptor* fd, Handle receiver) {
+ BasicType field_type = fd->field_type();
+ int offset = fd->offset();
+ switch (field_type) {
+ case T_BOOLEAN:
+ value->z = receiver->bool_field(offset);
+ break;
+ case T_CHAR:
+ value->c = receiver->char_field(offset);
+ break;
+ case T_FLOAT:
+ value->f = receiver->float_field(offset);
+ break;
+ case T_DOUBLE:
+ value->d = receiver->double_field(offset);
+ break;
+ case T_BYTE:
+ value->b = receiver->byte_field(offset);
+ break;
+ case T_SHORT:
+ value->s = receiver->short_field(offset);
+ break;
+ case T_INT:
+ value->i = receiver->int_field(offset);
+ break;
+ case T_LONG:
+ value->j = receiver->long_field(offset);
+ break;
+ case T_OBJECT:
+ case T_ARRAY:
+ value->l = (jobject) receiver->obj_field(offset);
+ break;
+ default:
+ return T_ILLEGAL;
+ }
+ return field_type;
+}
+
+
+void Reflection::field_set(jvalue* value, fieldDescriptor* fd, Handle receiver, BasicType value_type, TRAPS) {
+ BasicType field_type = fd->field_type();
+ if (field_type != value_type) {
+ widen(value, value_type, field_type, CHECK);
+ }
+
+ int offset = fd->offset();
+ switch (field_type) {
+ case T_BOOLEAN:
+ receiver->bool_field_put(offset, value->z);
+ break;
+ case T_CHAR:
+ receiver->char_field_put(offset, value->c);
+ break;
+ case T_FLOAT:
+ receiver->float_field_put(offset, value->f);
+ break;
+ case T_DOUBLE:
+ receiver->double_field_put(offset, value->d);
+ break;
+ case T_BYTE:
+ receiver->byte_field_put(offset, value->b);
+ break;
+ case T_SHORT:
+ receiver->short_field_put(offset, value->s);
+ break;
+ case T_INT:
+ receiver->int_field_put(offset, value->i);
+ break;
+ case T_LONG:
+ receiver->long_field_put(offset, value->j);
+ break;
+ case T_OBJECT:
+ case T_ARRAY: {
+ Handle obj(THREAD, (oop) value->l);
+ if (obj.not_null()) {
+ symbolHandle signature(THREAD, fd->signature());
+ Handle loader (THREAD, fd->loader());
+ Handle protect (THREAD, Klass::cast(fd->field_holder())->protection_domain());
+ klassOop k = SystemDictionary::resolve_or_fail(signature, loader, protect, true, CHECK); // may block
+ if (!obj->is_a(k)) {
+ THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "field type mismatch");
+ }
+ }
+ receiver->obj_field_put(offset, obj());
+ break;
+ }
+ default:
+ THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "field type mismatch");
+ }
+}
+
+
+oop Reflection::reflect_field(oop mirror, symbolOop field_name, jint which, TRAPS) {
+ // Exclude primitive types and array types
+ if (java_lang_Class::is_primitive(mirror)) return NULL;
+ if (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) return NULL;
+
+ instanceKlassHandle k(THREAD, java_lang_Class::as_klassOop(mirror));
+ bool local_fields_only = (which == DECLARED);
+
+ // Ensure class is linked
+ k->link_class(CHECK_NULL);
+
+ // Search class and interface fields
+ for (FieldStream st(k, local_fields_only, false); !st.eos(); st.next()) {
+ if (st.name() == field_name) {
+ if (local_fields_only || st.access_flags().is_public()) {
+ return new_field(&st, THREAD);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+
+objArrayOop Reflection::reflect_fields(oop mirror, jint which, TRAPS) {
+ // Exclude primitive types and array types
+ if (java_lang_Class::is_primitive(mirror)
+ || Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) {
+ symbolHandle name = vmSymbolHandles::java_lang_reflect_Field();
+ klassOop klass = SystemDictionary::resolve_or_fail(name, true, CHECK_NULL);
+ return oopFactory::new_objArray(klass, 0, CHECK_NULL); // Return empty array
+ }
+
+ instanceKlassHandle k(THREAD, java_lang_Class::as_klassOop(mirror));
+
+ // Ensure class is linked
+ k->link_class(CHECK_NULL);
+
+ bool local_fields_only = (which == DECLARED);
+ int count = 0;
+ { // Compute fields count for class and interface fields
+ for (FieldStream st(k, local_fields_only, false); !st.eos(); st.next()) {
+ if (local_fields_only || st.access_flags().is_public()) {
+ count++;
+ }
+ }
+ }
+
+ // Allocate result
+ symbolHandle name = vmSymbolHandles::java_lang_reflect_Field();
+ klassOop klass = SystemDictionary::resolve_or_fail(name, true, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(klass, count, CHECK_NULL);
+ objArrayHandle result (THREAD, r);
+
+ // Fill in results backwards
+ {
+ for (FieldStream st(k, local_fields_only, false); !st.eos(); st.next()) {
+ if (local_fields_only || st.access_flags().is_public()) {
+ oop field = new_field(&st, CHECK_NULL);
+ result->obj_at_put(--count, field);
+ }
+ }
+ assert(count == 0, "just checking");
+ }
+ return result();
+}
+
+
+oop Reflection::reflect_method(oop mirror, symbolHandle method_name, objArrayHandle types, jint which, TRAPS) {
+ if (java_lang_Class::is_primitive(mirror)) return NULL;
+ klassOop klass = java_lang_Class::as_klassOop(mirror);
+ if (Klass::cast(klass)->oop_is_array() && which == MEMBER_DECLARED) return NULL;
+
+ if (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) {
+ klass = SystemDictionary::object_klass();
+ }
+ instanceKlassHandle h_k(THREAD, klass);
+
+ // Ensure klass is linked (need not be initialized)
+ h_k->link_class(CHECK_NULL);
+
+ // For interfaces include static initializers under jdk1.2.x (since classic does that)
+ bool include_clinit = JDK_Version::is_jdk12x_version() && h_k->is_interface();
+
+ switch (which) {
+ case MEMBER_PUBLIC:
+ // First the public non-static methods (works if method holder is an interface)
+ // Note that we can ignore checks for overridden methods, since we go up the hierarchy.
+ {
+ for (MethodStream st(h_k, false, false); !st.eos(); st.next()) {
+ methodHandle m(THREAD, st.method());
+ // For interfaces include static initializers since classic does that!
+ if (method_name() == m->name() && (include_clinit || (m->is_public() && !m->is_static() && !m->is_initializer()))) {
+ symbolHandle signature(THREAD, m->signature());
+ bool parameter_match = match_parameter_types(m, types, ArgumentCount(signature).size(), CHECK_NULL);
+ if (parameter_match) {
+ return new_method(m, false, false, THREAD);
+ }
+ }
+ }
+ }
+ // Then the public static methods (works if method holder is an interface)
+ {
+ for (MethodStream st(h_k, false, false); !st.eos(); st.next()) {
+ methodHandle m(THREAD, st.method());
+ if (method_name() == m->name() && m->is_public() && m->is_static() && !m->is_initializer()) {
+ symbolHandle signature(THREAD, m->signature());
+ bool parameter_match = match_parameter_types(m, types, ArgumentCount(signature).size(), CHECK_NULL);
+ if (parameter_match) {
+ return new_method(m, false, false, THREAD);
+ }
+ }
+ }
+ }
+ break;
+ case MEMBER_DECLARED:
+ // All local methods
+ {
+ for (MethodStream st(h_k, true, true); !st.eos(); st.next()) {
+ methodHandle m(THREAD, st.method());
+ if (method_name() == m->name() && !m->is_initializer()) {
+ symbolHandle signature(THREAD, m->signature());
+ bool parameter_match = match_parameter_types(m, types, ArgumentCount(signature).size(), CHECK_NULL);
+ if (parameter_match) {
+ return new_method(m, false, false, THREAD);
+ }
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return NULL;
+}
+
+
+objArrayOop Reflection::reflect_methods(oop mirror, jint which, TRAPS) {
+ // Exclude primitive types
+ if (java_lang_Class::is_primitive(mirror) ||
+ (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array() && (which == MEMBER_DECLARED))) {
+ klassOop klass = SystemDictionary::reflect_method_klass();
+ return oopFactory::new_objArray(klass, 0, CHECK_NULL); // Return empty array
+ }
+
+ klassOop klass = java_lang_Class::as_klassOop(mirror);
+ if (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) {
+ klass = SystemDictionary::object_klass();
+ }
+ instanceKlassHandle h_k(THREAD, klass);
+
+ // Ensure klass is linked (need not be initialized)
+ h_k->link_class(CHECK_NULL);
+
+ // We search the (super)interfaces only if h_k is an interface itself
+ bool is_interface = h_k->is_interface();
+
+ // For interfaces include static initializers under jdk1.2.x (since classic does that)
+ bool include_clinit = JDK_Version::is_jdk12x_version() && is_interface;
+
+ switch (which) {
+ case MEMBER_PUBLIC:
+ {
+
+ // Count public methods (non-static and static)
+ int count = 0;
+ {
+ for (MethodStream st(h_k, false, false); !st.eos(); st.next()) {
+ methodOop m = st.method();
+ // For interfaces include static initializers since classic does that!
+ if (include_clinit || (!m->is_initializer() && m->is_public() && !m->is_overridden_in(h_k()))) {
+ count++;
+ }
+ }
+ }
+
+ // Allocate result
+ klassOop klass = SystemDictionary::reflect_method_klass();
+ objArrayOop r = oopFactory::new_objArray(klass, count, CHECK_NULL);
+ objArrayHandle h_result (THREAD, r);
+
+ // Fill in results backwards
+ {
+ // First the non-static public methods
+ for (MethodStream st(h_k, false, false); !st.eos(); st.next()) {
+ methodHandle m (THREAD, st.method());
+ if (!m->is_static() && !m->is_initializer() && m->is_public() && !m->is_overridden_in(h_k())) {
+ oop method = new_method(m, false, false, CHECK_NULL);
+ if (method == NULL) {
+ return NULL;
+ } else {
+ h_result->obj_at_put(--count, method);
+ }
+ }
+ }
+ }
+ {
+ // Then the static public methods
+ for (MethodStream st(h_k, false, !is_interface); !st.eos(); st.next()) {
+ methodHandle m (THREAD, st.method());
+ if (m->is_static() && (include_clinit || (!m->is_initializer()) && m->is_public() && !m->is_overridden_in(h_k()))) {
+ oop method = new_method(m, false, false, CHECK_NULL);
+ if (method == NULL) {
+ return NULL;
+ } else {
+ h_result->obj_at_put(--count, method);
+ }
+ }
+ }
+ }
+
+ assert(count == 0, "just checking");
+ return h_result();
+ }
+
+ case MEMBER_DECLARED:
+ {
+ // Count all methods
+ int count = 0;
+ {
+ for (MethodStream st(h_k, true, !is_interface); !st.eos(); st.next()) {
+ methodOop m = st.method();
+ if (!m->is_initializer()) {
+ count++;
+ }
+ }
+ }
+ // Allocate result
+ klassOop klass = SystemDictionary::reflect_method_klass();
+ objArrayOop r = oopFactory::new_objArray(klass, count, CHECK_NULL);
+ objArrayHandle h_result (THREAD, r);
+
+ // Fill in results backwards
+ {
+ for (MethodStream st(h_k, true, true); !st.eos(); st.next()) {
+ methodHandle m (THREAD, st.method());
+ if (!m->is_initializer()) {
+ oop method = new_method(m, false, false, CHECK_NULL);
+ if (method == NULL) {
+ return NULL;
+ } else {
+ h_result->obj_at_put(--count, method);
+ }
+ }
+ }
+ }
+ assert(count == 0, "just checking");
+ return h_result();
+ }
+ }
+ ShouldNotReachHere();
+ return NULL;
+}
+
+
+oop Reflection::reflect_constructor(oop mirror, objArrayHandle types, jint which, TRAPS) {
+
+ // Exclude primitive, interface and array types
+ bool prim = java_lang_Class::is_primitive(mirror);
+ Klass* klass = prim ? NULL : Klass::cast(java_lang_Class::as_klassOop(mirror));
+ if (prim || klass->is_interface() || klass->oop_is_array()) return NULL;
+
+ // Must be instance klass
+ instanceKlassHandle h_k(THREAD, java_lang_Class::as_klassOop(mirror));
+
+ // Ensure klass is linked (need not be initialized)
+ h_k->link_class(CHECK_NULL);
+
+ bool local_only = (which == MEMBER_DECLARED);
+ for (MethodStream st(h_k, true, true); !st.eos(); st.next()) {
+ methodHandle m(THREAD, st.method());
+ if (m->name() == vmSymbols::object_initializer_name() && (local_only || m->is_public())) {
+ symbolHandle signature(THREAD, m->signature());
+ bool parameter_match = match_parameter_types(m, types, ArgumentCount(signature).size(), CHECK_NULL);
+ if (parameter_match) {
+ return new_constructor(m, THREAD);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+
+objArrayOop Reflection::reflect_constructors(oop mirror, jint which, TRAPS) {
+ // Exclude primitive, interface and array types
+ bool prim = java_lang_Class::is_primitive(mirror);
+ Klass* k = prim ? NULL : Klass::cast(java_lang_Class::as_klassOop(mirror));
+ if (prim || k->is_interface() || k->oop_is_array()) {
+ return oopFactory::new_objArray(SystemDictionary::reflect_constructor_klass(), 0, CHECK_NULL); // Return empty array
+ }
+
+ // Must be instanceKlass at this point
+ instanceKlassHandle h_k(THREAD, java_lang_Class::as_klassOop(mirror));
+
+ // Ensure klass is linked (need not be initialized)
+ h_k->link_class(CHECK_NULL);
+
+ bool local_only = (which == MEMBER_DECLARED);
+ int count = 0;
+ {
+ for (MethodStream st(h_k, true, true); !st.eos(); st.next()) {
+ methodOop m = st.method();
+ if (m->name() == vmSymbols::object_initializer_name() && (local_only || m->is_public())) {
+ count++;
+ }
+ }
+ }
+
+ // Allocate result
+ symbolHandle name = vmSymbolHandles::java_lang_reflect_Constructor();
+ klassOop klass = SystemDictionary::resolve_or_fail(name, true, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(klass, count, CHECK_NULL);
+ objArrayHandle h_result (THREAD, r);
+
+ // Fill in results backwards
+ {
+ for (MethodStream st(h_k, true, true); !st.eos(); st.next()) {
+ methodHandle m (THREAD, st.method());
+ if (m->name() == vmSymbols::object_initializer_name() && (local_only || m->is_public())) {
+ oop constr = new_constructor(m, CHECK_NULL);
+ if (constr == NULL) {
+ return NULL;
+ } else {
+ h_result->obj_at_put(--count, constr);
+ }
+ }
+ }
+ assert(count == 0, "just checking");
+ }
+ return h_result();
+}
+
+
+// This would be nicer if, say, java.lang.reflect.Method was a subclass
+// of java.lang.reflect.Constructor
+
+oop Reflection::invoke_method(oop method_mirror, Handle receiver, objArrayHandle args, TRAPS) {
+ oop mirror = java_lang_reflect_Method::clazz(method_mirror);
+ int slot = java_lang_reflect_Method::slot(method_mirror);
+ bool override = java_lang_reflect_Method::override(method_mirror) != 0;
+ objArrayHandle ptypes(THREAD, objArrayOop(java_lang_reflect_Method::parameter_types(method_mirror)));
+
+ oop return_type_mirror = java_lang_reflect_Method::return_type(method_mirror);
+ BasicType rtype;
+ if (java_lang_Class::is_primitive(return_type_mirror)) {
+ rtype = basic_type_mirror_to_basic_type(return_type_mirror, CHECK_NULL);
+ } else {
+ rtype = T_OBJECT;
+ }
+
+ instanceKlassHandle klass(THREAD, java_lang_Class::as_klassOop(mirror));
+ if (!klass->methods()->is_within_bounds(slot)) {
+ THROW_MSG_0(vmSymbols::java_lang_InternalError(), "invoke");
+ }
+ methodHandle method(THREAD, methodOop(klass->methods()->obj_at(slot)));
+
+ return invoke(klass, method, receiver, override, ptypes, rtype, args, true, THREAD);
+}
+
+
+oop Reflection::invoke_constructor(oop constructor_mirror, objArrayHandle args, TRAPS) {
+ oop mirror = java_lang_reflect_Constructor::clazz(constructor_mirror);
+ int slot = java_lang_reflect_Constructor::slot(constructor_mirror);
+ bool override = java_lang_reflect_Constructor::override(constructor_mirror) != 0;
+ objArrayHandle ptypes(THREAD, objArrayOop(java_lang_reflect_Constructor::parameter_types(constructor_mirror)));
+
+ instanceKlassHandle klass(THREAD, java_lang_Class::as_klassOop(mirror));
+ if (!klass->methods()->is_within_bounds(slot)) {
+ THROW_MSG_0(vmSymbols::java_lang_InternalError(), "invoke");
+ }
+ methodHandle method(THREAD, methodOop(klass->methods()->obj_at(slot)));
+ assert(method->name() == vmSymbols::object_initializer_name(), "invalid constructor");
+
+ // Make sure klass gets initialize
+ klass->initialize(CHECK_NULL);
+
+ // Create new instance (the receiver)
+ klass->check_valid_for_instantiation(false, CHECK_NULL);
+ Handle receiver = klass->allocate_instance_handle(CHECK_NULL);
+
+ // Ignore result from call and return receiver
+ invoke(klass, method, receiver, override, ptypes, T_VOID, args, false, CHECK_NULL);
+ return receiver();
+}
+
+
+#endif /* SUPPORT_OLD_REFLECTION */
diff --git a/src/share/vm/runtime/reflection.hpp b/src/share/vm/runtime/reflection.hpp
new file mode 100644
index 000000000..4e8054af5
--- /dev/null
+++ b/src/share/vm/runtime/reflection.hpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Class Reflection contains utility methods needed for implementing the
+// reflection api.
+//
+// Used by functions in the JVM interface.
+//
+// NOTE that in JDK 1.4 most of reflection is now implemented in Java
+// using dynamic bytecode generation. The Array class has not yet been
+// rewritten using bytecodes; if it were, most of the rest of this
+// class could go away, as well as a few more entry points in jvm.cpp.
+
+class FieldStream;
+
+class Reflection: public AllStatic {
+ private:
+ // Access checking
+ static bool reflect_check_access(klassOop field_class, AccessFlags acc, klassOop target_class, bool is_method_invoke, TRAPS);
+
+ // Conversion
+ static klassOop basic_type_mirror_to_arrayklass(oop basic_type_mirror, TRAPS);
+ static oop basic_type_arrayklass_to_mirror(klassOop basic_type_arrayklass, TRAPS);
+
+ static objArrayHandle get_parameter_types(methodHandle method, int parameter_count, oop* return_type, TRAPS);
+ static objArrayHandle get_exception_types(methodHandle method, TRAPS);
+ // Creating new java.lang.reflect.xxx wrappers
+ static Handle new_type(symbolHandle signature, KlassHandle k, TRAPS);
+
+ public:
+ // Constants defined by java reflection api classes
+ enum SomeConstants {
+ PUBLIC = 0,
+ DECLARED = 1,
+ MEMBER_PUBLIC = 0,
+ MEMBER_DECLARED = 1,
+ MAX_DIM = 255
+ };
+
+ // Boxing. Returns boxed value of appropriate type. Throws IllegalArgumentException.
+ static oop box(jvalue* v, BasicType type, TRAPS);
+ // Unboxing. Returns type code and sets value.
+ static BasicType unbox_for_primitive(oop boxed_value, jvalue* value, TRAPS);
+ static BasicType unbox_for_regular_object(oop boxed_value, jvalue* value);
+
+ // Widening of basic types. Throws IllegalArgumentException.
+ static void widen(jvalue* value, BasicType current_type, BasicType wide_type, TRAPS);
+
+ // Reflective array access. Returns type code. Throws ArrayIndexOutOfBoundsException.
+ static BasicType array_get(jvalue* value, arrayOop a, int index, TRAPS);
+ static void array_set(jvalue* value, arrayOop a, int index, BasicType value_type, TRAPS);
+ // Returns mirror on array element type (NULL for basic type arrays and non-arrays).
+ static oop array_component_type(oop mirror, TRAPS);
+
+ // Object creation
+ static arrayOop reflect_new_array(oop element_mirror, jint length, TRAPS);
+ static arrayOop reflect_new_multi_array(oop element_mirror, typeArrayOop dimensions, TRAPS);
+
+ // Verification
+ static bool verify_class_access(klassOop current_class, klassOop new_class, bool classloader_only);
+
+ static bool verify_field_access(klassOop current_class,
+ klassOop resolved_class,
+ klassOop field_class,
+ AccessFlags access,
+ bool classloader_only,
+ bool protected_restriction = false);
+ static bool is_same_class_package(klassOop class1, klassOop class2);
+
+ static bool can_relax_access_check_for(
+ klassOop accessor, klassOop accesee, bool classloader_only);
+
+ // inner class reflection
+ static void check_for_inner_class(instanceKlassHandle outer, instanceKlassHandle inner, TRAPS);
+
+ //
+ // Support for reflection based on dynamic bytecode generation (JDK 1.4)
+ //
+
+ // Create a java.lang.reflect.Method object based on a method
+ static oop new_method(methodHandle method, bool intern_name, bool for_constant_pool_access, TRAPS);
+ // Create a java.lang.reflect.Constructor object based on a method
+ static oop new_constructor(methodHandle method, TRAPS);
+ // Create a java.lang.reflect.Field object based on a field descriptor
+ static oop new_field(fieldDescriptor* fd, bool intern_name, TRAPS);
+
+ //---------------------------------------------------------------------------
+ //
+ // Support for old native code-based reflection (pre-JDK 1.4)
+ //
+ // NOTE: the method and constructor invocation code is still used
+ // for startup time reasons; see reflectionCompat.hpp.
+ //
+ //---------------------------------------------------------------------------
+
+#ifdef SUPPORT_OLD_REFLECTION
+private:
+ // method resolution for invoke
+ static methodHandle resolve_interface_call(instanceKlassHandle klass, methodHandle method, KlassHandle recv_klass, Handle receiver, TRAPS);
+ // Method call (shared by invoke_method and invoke_constructor)
+ static oop invoke(instanceKlassHandle klass, methodHandle method, Handle receiver, bool override, objArrayHandle ptypes, BasicType rtype, objArrayHandle args, bool is_method_invoke, TRAPS);
+
+ // Narrowing of basic types. Used to create correct jvalues for
+ // boolean, byte, char and short return return values from interpreter
+ // which are returned as ints. Throws IllegalArgumentException.
+ static void narrow(jvalue* value, BasicType narrow_type, TRAPS);
+
+ // Conversion
+ static BasicType basic_type_mirror_to_basic_type(oop basic_type_mirror, TRAPS);
+
+ static bool match_parameter_types(methodHandle method, objArrayHandle types, int parameter_count, TRAPS);
+ // Creating new java.lang.reflect.xxx wrappers
+ static oop new_field(FieldStream* st, TRAPS);
+
+public:
+ // Field lookup and verification.
+ static bool resolve_field(Handle field_mirror, Handle& receiver, fieldDescriptor* fd, bool check_final, TRAPS);
+
+ // Reflective field access. Returns type code. Throws IllegalArgumentException.
+ static BasicType field_get(jvalue* value, fieldDescriptor* fd, Handle receiver);
+ static void field_set(jvalue* value, fieldDescriptor* fd, Handle receiver, BasicType value_type, TRAPS);
+
+ // Reflective lookup of fields. Returns java.lang.reflect.Field instances.
+ static oop reflect_field(oop mirror, symbolOop field_name, jint which, TRAPS);
+ static objArrayOop reflect_fields(oop mirror, jint which, TRAPS);
+
+ // Reflective lookup of methods. Returns java.lang.reflect.Method instances.
+ static oop reflect_method(oop mirror, symbolHandle method_name, objArrayHandle types, jint which, TRAPS);
+ static objArrayOop reflect_methods(oop mirror, jint which, TRAPS);
+
+ // Reflective lookup of constructors. Returns java.lang.reflect.Constructor instances.
+ static oop reflect_constructor(oop mirror, objArrayHandle types, jint which, TRAPS);
+ static objArrayOop reflect_constructors(oop mirror, jint which, TRAPS);
+
+ // Method invokation through java.lang.reflect.Method
+ static oop invoke_method(oop method_mirror, Handle receiver, objArrayHandle args, TRAPS);
+ // Method invokation through java.lang.reflect.Constructor
+ static oop invoke_constructor(oop method_mirror, objArrayHandle args, TRAPS);
+#endif /* SUPPORT_OLD_REFLECTION */
+
+};
diff --git a/src/share/vm/runtime/reflectionCompat.hpp b/src/share/vm/runtime/reflectionCompat.hpp
new file mode 100644
index 000000000..fba01e487
--- /dev/null
+++ b/src/share/vm/runtime/reflectionCompat.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2001 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// During the development of the JDK 1.4 reflection implementation
+// based on dynamic bytecode generation, it was hoped that the bulk of
+// the native code for reflection could be removed. Unfortunately
+// there is currently a significant cost associated with loading the
+// stub classes which impacts startup time. Until this cost can be
+// reduced, the JVM entry points JVM_InvokeMethod and
+// JVM_NewInstanceFromConstructor are still needed; these and their
+// dependents currently constitute the bulk of the native code for
+// reflection. If this cost is reduced in the future, the
+// NativeMethodAccessorImpl and NativeConstructorAccessorImpl classes
+// can be removed from sun.reflect and all of the code guarded by this
+// flag removed from the product build. (Non-product builds,
+// specifically the "optimized" target, would retain the code so they
+// could be dropped into earlier JDKs for comparative benchmarking.)
+
+//#ifndef PRODUCT
+# define SUPPORT_OLD_REFLECTION
+//#endif
diff --git a/src/share/vm/runtime/reflectionUtils.cpp b/src/share/vm/runtime/reflectionUtils.cpp
new file mode 100644
index 000000000..7ea22e112
--- /dev/null
+++ b/src/share/vm/runtime/reflectionUtils.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_reflectionUtils.cpp.incl"
+
+KlassStream::KlassStream(instanceKlassHandle klass, bool local_only, bool classes_only) {
+ _klass = klass;
+ if (classes_only) {
+ _interfaces = Universe::the_empty_system_obj_array();
+ } else {
+ _interfaces = klass->transitive_interfaces();
+ }
+ _interface_index = _interfaces->length();
+ _local_only = local_only;
+ _classes_only = classes_only;
+}
+
+bool KlassStream::eos() {
+ if (index() >= 0) return false;
+ if (_local_only) return true;
+ if (!_klass->is_interface() && _klass->super() != NULL) {
+ // go up superclass chain (not for interfaces)
+ _klass = _klass->super();
+ } else {
+ if (_interface_index > 0) {
+ _klass = klassOop(_interfaces->obj_at(--_interface_index));
+ } else {
+ return true;
+ }
+ }
+ _index = length();
+ next();
+ return eos();
+}
+
+
+GrowableArray<FilteredField*> *FilteredFieldsMap::_filtered_fields =
+ new (ResourceObj::C_HEAP) GrowableArray<FilteredField*>(3,true);
+
+
+void FilteredFieldsMap::initialize() {
+ int offset;
+ offset = java_lang_Throwable::get_backtrace_offset();
+ _filtered_fields->append(new FilteredField(SystemDictionary::throwable_klass(), offset));
+ // The latest version of vm may be used with old jdk.
+ if (JDK_Version::is_gte_jdk16x_version()) {
+ // The following class fields do not exist in
+ // previous version of jdk.
+ offset = sun_reflect_ConstantPool::cp_oop_offset();
+ _filtered_fields->append(new FilteredField(SystemDictionary::reflect_constant_pool_klass(), offset));
+ offset = sun_reflect_UnsafeStaticFieldAccessorImpl::base_offset();
+ _filtered_fields->append(new FilteredField(SystemDictionary::reflect_unsafe_static_field_accessor_impl_klass(), offset));
+ }
+}
+
+int FilteredFieldStream::field_count() {
+ int numflds = 0;
+ for (;!eos(); next()) {
+ numflds++;
+ }
+ return numflds;
+}
diff --git a/src/share/vm/runtime/reflectionUtils.hpp b/src/share/vm/runtime/reflectionUtils.hpp
new file mode 100644
index 000000000..b65c21522
--- /dev/null
+++ b/src/share/vm/runtime/reflectionUtils.hpp
@@ -0,0 +1,211 @@
+/*
+ * Copyright 1999-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// A KlassStream is an abstract stream for streaming over self, superclasses
+// and (super)interfaces. Streaming is done in reverse order (subclasses first,
+// interfaces last).
+//
+// for (KlassStream st(k, false, false); !st.eos(); st.next()) {
+// klassOop k = st.klass();
+// ...
+// }
+
+class KlassStream VALUE_OBJ_CLASS_SPEC {
+ protected:
+ instanceKlassHandle _klass; // current klass/interface iterated over
+ objArrayHandle _interfaces; // transitive interfaces for initial class
+ int _interface_index; // current interface being processed
+ bool _local_only; // process initial class/interface only
+ bool _classes_only; // process classes only (no interfaces)
+ int _index;
+
+ virtual int length() const = 0;
+
+ public:
+ // constructor
+ KlassStream(instanceKlassHandle klass, bool local_only, bool classes_only);
+
+ // testing
+ bool eos();
+
+ // iterating
+ virtual void next() = 0;
+
+ // accessors
+ instanceKlassHandle klass() const { return _klass; }
+ int index() const { return _index; }
+};
+
+
+// A MethodStream streams over all methods in a class, superclasses and (super)interfaces.
+// Streaming is done in reverse order (subclasses first, methods in reverse order)
+// Usage:
+//
+// for (MethodStream st(k, false, false); !st.eos(); st.next()) {
+// methodOop m = st.method();
+// ...
+// }
+
+class MethodStream : public KlassStream {
+ private:
+ int length() const { return methods()->length(); }
+ objArrayOop methods() const { return _klass->methods(); }
+ public:
+ MethodStream(instanceKlassHandle klass, bool local_only, bool classes_only)
+ : KlassStream(klass, local_only, classes_only) {
+ _index = length();
+ next();
+ }
+
+ void next() { _index--; }
+ methodOop method() const { return methodOop(methods()->obj_at(index())); }
+};
+
+
+// A FieldStream streams over all fields in a class, superclasses and (super)interfaces.
+// Streaming is done in reverse order (subclasses first, fields in reverse order)
+// Usage:
+//
+// for (FieldStream st(k, false, false); !st.eos(); st.next()) {
+// symbolOop field_name = st.name();
+// ...
+// }
+
+
+class FieldStream : public KlassStream {
+ private:
+ int length() const { return fields()->length(); }
+ constantPoolOop constants() const { return _klass->constants(); }
+ protected:
+ typeArrayOop fields() const { return _klass->fields(); }
+ public:
+ FieldStream(instanceKlassHandle klass, bool local_only, bool classes_only)
+ : KlassStream(klass, local_only, classes_only) {
+ _index = length();
+ next();
+ }
+
+ void next() { _index -= instanceKlass::next_offset; }
+
+ // Accessors for current field
+ AccessFlags access_flags() const {
+ AccessFlags flags;
+ flags.set_flags(fields()->ushort_at(index() + instanceKlass::access_flags_offset));
+ return flags;
+ }
+ symbolOop name() const {
+ int name_index = fields()->ushort_at(index() + instanceKlass::name_index_offset);
+ return constants()->symbol_at(name_index);
+ }
+ symbolOop signature() const {
+ int signature_index = fields()->ushort_at(index() +
+ instanceKlass::signature_index_offset);
+ return constants()->symbol_at(signature_index);
+ }
+ // missing: initval()
+ int offset() const {
+ return _klass->offset_from_fields( index() );
+ }
+};
+
+class FilteredField {
+ private:
+ klassOop _klass;
+ int _field_offset;
+
+ public:
+ FilteredField(klassOop klass, int field_offset) {
+ _klass = klass;
+ _field_offset = field_offset;
+ }
+ klassOop klass() { return _klass; }
+ oop* klass_addr() { return (oop*) &_klass; }
+ int field_offset() { return _field_offset; }
+};
+
+class FilteredFieldsMap : AllStatic {
+ private:
+ static GrowableArray<FilteredField *> *_filtered_fields;
+ public:
+ static void initialize();
+ static bool is_filtered_field(klassOop klass, int field_offset) {
+ for (int i=0; i < _filtered_fields->length(); i++) {
+ if (klass == _filtered_fields->at(i)->klass() &&
+ field_offset == _filtered_fields->at(i)->field_offset()) {
+ return true;
+ }
+ }
+ return false;
+ }
+ static int filtered_fields_count(klassOop klass, bool local_only) {
+ int nflds = 0;
+ for (int i=0; i < _filtered_fields->length(); i++) {
+ if (local_only && klass == _filtered_fields->at(i)->klass()) {
+ nflds++;
+ } else if (klass->klass_part()->is_subtype_of(_filtered_fields->at(i)->klass())) {
+ nflds++;
+ }
+ }
+ return nflds;
+ }
+ // GC support.
+ static void klasses_oops_do(OopClosure* f) {
+ for (int i = 0; i < _filtered_fields->length(); i++) {
+ f->do_oop((oop*)_filtered_fields->at(i)->klass_addr());
+ }
+ }
+};
+
+
+// A FilteredFieldStream streams over all fields in a class, superclasses and
+// (super)interfaces. Streaming is done in reverse order (subclasses first,
+// fields in reverse order)
+//
+// Usage:
+//
+// for (FilteredFieldStream st(k, false, false); !st.eos(); st.next()) {
+// symbolOop field_name = st.name();
+// ...
+// }
+
+class FilteredFieldStream : public FieldStream {
+ private:
+ int _filtered_fields_count;
+ bool has_filtered_field() { return (_filtered_fields_count > 0); }
+
+ public:
+ FilteredFieldStream(instanceKlassHandle klass, bool local_only, bool classes_only)
+ : FieldStream(klass, local_only, classes_only) {
+ _filtered_fields_count = FilteredFieldsMap::filtered_fields_count((klassOop)klass(), local_only);
+ }
+ int field_count();
+ void next() {
+ _index -= instanceKlass::next_offset;
+ if (has_filtered_field()) {
+ while (_index >=0 && FilteredFieldsMap::is_filtered_field((klassOop)_klass(), offset())) {
+ _index -= instanceKlass::next_offset;
+ }
+ }
+ }
+};
diff --git a/src/share/vm/runtime/registerMap.hpp b/src/share/vm/runtime/registerMap.hpp
new file mode 100644
index 000000000..84d1c42a8
--- /dev/null
+++ b/src/share/vm/runtime/registerMap.hpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class JavaThread;
+
+//
+// RegisterMap
+//
+// A companion structure used for stack traversal. The RegisterMap contains
+// misc. information needed in order to do correct stack traversal of stack
+// frames. Hence, it must always be passed in as an argument to
+// frame::sender(RegisterMap*).
+//
+// In particular,
+// 1) It provides access to the thread for which the stack belongs. The
+// thread object is needed in order to get sender of a deoptimized frame.
+//
+// 2) It is used to pass information from a callee frame to its caller
+// frame about how the frame should be traversed. This is used to let
+// the caller frame take care of calling oops-do of out-going
+// arguments, when the callee frame is not instantiated yet. This
+// happens, e.g., when a compiled frame calls into
+// resolve_virtual_call. (Hence, it is critical that the same
+// RegisterMap object is used for the entire stack walk. Normally,
+// this is hidden by using the StackFrameStream.) This is used when
+// doing follow_oops and oops_do.
+//
+// 3) The RegisterMap keeps track of the values of callee-saved registers
+// from frame to frame (hence, the name). For some stack traversal the
+// values of the callee-saved registers does not matter, e.g., if you
+// only need the static properies such as frame type, pc, and such.
+// Updating of the RegisterMap can be turned off by instantiating the
+// register map as: RegisterMap map(thread, false);
+
+class RegisterMap : public StackObj {
+ public:
+ typedef julong LocationValidType;
+ enum {
+ reg_count = ConcreteRegisterImpl::number_of_registers,
+ location_valid_type_size = sizeof(LocationValidType)*8,
+ location_valid_size = (reg_count+location_valid_type_size-1)/location_valid_type_size
+ };
+ private:
+ intptr_t* _location[reg_count]; // Location of registers (intptr_t* looks better than address in the debugger)
+ LocationValidType _location_valid[location_valid_size];
+ bool _include_argument_oops; // Should include argument_oop marked locations for compiler
+ JavaThread* _thread; // Reference to current thread
+ bool _update_map; // Tells if the register map need to be
+ // updated when traversing the stack
+
+#ifdef ASSERT
+ void check_location_valid();
+#else
+ void check_location_valid() {}
+#endif
+
+ public:
+ debug_only(intptr_t* _update_for_id;) // Assert that RegisterMap is not updated twice for same frame
+ RegisterMap(JavaThread *thread, bool update_map = true);
+ RegisterMap(const RegisterMap* map);
+
+ address location(VMReg reg) const {
+ int index = reg->value() / location_valid_type_size;
+ assert(0 <= reg->value() && reg->value() < reg_count, "range check");
+ assert(0 <= index && index < location_valid_size, "range check");
+ if (_location_valid[index] & ((LocationValidType)1 << (reg->value() % location_valid_type_size))) {
+ return (address) _location[reg->value()];
+ } else {
+ return pd_location(reg);
+ }
+ }
+
+ void set_location(VMReg reg, address loc) {
+ int index = reg->value() / location_valid_type_size;
+ assert(0 <= reg->value() && reg->value() < reg_count, "range check");
+ assert(0 <= index && index < location_valid_size, "range check");
+ assert(_update_map, "updating map that does not need updating");
+ _location[reg->value()] = (intptr_t*) loc;
+ _location_valid[index] |= ((LocationValidType)1 << (reg->value() % location_valid_type_size));
+ check_location_valid();
+ }
+
+ // Called by an entry frame.
+ void clear();
+
+ bool include_argument_oops() const { return _include_argument_oops; }
+ void set_include_argument_oops(bool f) { _include_argument_oops = f; }
+
+ JavaThread *thread() const { return _thread; }
+ bool update_map() const { return _update_map; }
+
+ void print_on(outputStream* st) const;
+ void print() const;
+
+ // the following contains the definition of pd_xxx methods
+# include "incls/_registerMap_pd.hpp.incl"
+};
diff --git a/src/share/vm/runtime/relocator.cpp b/src/share/vm/runtime/relocator.cpp
new file mode 100644
index 000000000..4696ac6af
--- /dev/null
+++ b/src/share/vm/runtime/relocator.cpp
@@ -0,0 +1,647 @@
+/*
+ * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_relocator.cpp.incl"
+
+#define MAX_METHOD_LENGTH 65535
+
+#define MAX_SHORT ((1 << 15) - 1)
+#define MIN_SHORT (- (1 << 15))
+
+// Encapsulates a code change request. There are 3 types.
+// General instruction, jump instruction, and table/lookup switches
+//
+class ChangeItem : public ResourceObj {
+ int _bci;
+ public:
+ ChangeItem(int bci) { _bci = bci; }
+ virtual bool handle_code_change(Relocator *r) = 0;
+
+ // type info
+ virtual bool is_widen() { return false; }
+ virtual bool is_jump_widen() { return false; }
+ virtual bool is_switch_pad() { return false; }
+
+ // accessors
+ int bci() { return _bci; }
+ void relocate(int break_bci, int delta) { if (_bci > break_bci) { _bci += delta; } }
+
+ virtual bool adjust(int bci, int delta) { return false; }
+
+ // debug
+ virtual void print() = 0;
+};
+
+class ChangeWiden : public ChangeItem {
+ int _new_ilen; // New length of instruction at bci
+ u_char* _inst_buffer; // New bytecodes
+ public:
+ ChangeWiden(int bci, int new_ilen, u_char* inst_buffer) : ChangeItem(bci) {
+ _new_ilen = new_ilen;
+ _inst_buffer = inst_buffer;
+ }
+
+ // Callback to do instruction
+ bool handle_code_change(Relocator *r) { return r->handle_widen(bci(), _new_ilen, _inst_buffer); };
+
+ bool is_widen() { return true; }
+
+ void print() { tty->print_cr("ChangeWiden. bci: %d New_ilen: %d", bci(), _new_ilen); }
+};
+
+class ChangeJumpWiden : public ChangeItem {
+ int _delta; // New length of instruction at bci
+ public:
+ ChangeJumpWiden(int bci, int delta) : ChangeItem(bci) { _delta = delta; }
+
+ // Callback to do instruction
+ bool handle_code_change(Relocator *r) { return r->handle_jump_widen(bci(), _delta); };
+
+ bool is_jump_widen() { return true; }
+
+ // If the bci matches, adjust the delta in the change jump request.
+ bool adjust(int jump_bci, int delta) {
+ if (bci() == jump_bci) {
+ if (_delta > 0)
+ _delta += delta;
+ else
+ _delta -= delta;
+ return true;
+ }
+ return false;
+ }
+
+ void print() { tty->print_cr("ChangeJumpWiden. bci: %d Delta: %d", bci(), _delta); }
+};
+
+class ChangeSwitchPad : public ChangeItem {
+ int _padding;
+ bool _is_lookup_switch;
+ public:
+ ChangeSwitchPad(int bci, int padding, bool is_lookup_switch) : ChangeItem(bci) {
+ _padding = padding;
+ _is_lookup_switch = is_lookup_switch;
+ }
+
+ // Callback to do instruction
+ bool handle_code_change(Relocator *r) { return r->handle_switch_pad(bci(), _padding, _is_lookup_switch); };
+
+ bool is_switch_pad() { return true; }
+ int padding() { return _padding; }
+ bool is_lookup_switch() { return _is_lookup_switch; }
+
+ void print() { tty->print_cr("ChangeSwitchPad. bci: %d Padding: %d IsLookupSwitch: %d", bci(), _padding, _is_lookup_switch); }
+};
+
+//-----------------------------------------------------------------------------------------------------------
+// Relocator code
+
+Relocator::Relocator(methodHandle m, RelocatorListener* listener) {
+ set_method(m);
+ set_code_length(method()->code_size());
+ set_code_array(NULL);
+ // Allocate code array and copy bytecodes
+ if (!expand_code_array(0)) {
+ // Should have at least MAX_METHOD_LENGTH available or the verifier
+ // would have failed.
+ ShouldNotReachHere();
+ }
+ set_compressed_line_number_table(NULL);
+ set_compressed_line_number_table_size(0);
+ _listener = listener;
+}
+
+// size is the new size of the instruction at bci. Hence, if size is less than the current
+// instruction sice, we will shrink the code.
+methodHandle Relocator::insert_space_at(int bci, int size, u_char inst_buffer[], TRAPS) {
+ _changes = new GrowableArray<ChangeItem*> (10);
+ _changes->push(new ChangeWiden(bci, size, inst_buffer));
+
+ if (TraceRelocator) {
+ tty->print_cr("Space at: %d Size: %d", bci, size);
+ _method->print();
+ _method->print_codes();
+ tty->print_cr("-------------------------------------------------");
+ }
+
+ if (!handle_code_changes()) return methodHandle();
+
+ // Construct the new method
+ methodHandle new_method = methodOopDesc::clone_with_new_data(method(),
+ code_array(), code_length(),
+ compressed_line_number_table(),
+ compressed_line_number_table_size(),
+ CHECK_(methodHandle()));
+ set_method(new_method);
+
+ if (TraceRelocator) {
+ tty->print_cr("-------------------------------------------------");
+ tty->print_cr("new method");
+ _method->print_codes();
+ }
+
+ return new_method;
+}
+
+
+bool Relocator::handle_code_changes() {
+ assert(_changes != NULL, "changes vector must be initialized");
+
+ while (!_changes->is_empty()) {
+ // Inv: everything is aligned.
+ ChangeItem* ci = _changes->first();
+
+ if (TraceRelocator) {
+ ci->print();
+ }
+
+ // Execute operation
+ if (!ci->handle_code_change(this)) return false;
+
+ // Shuffel items up
+ for (int index = 1; index < _changes->length(); index++) {
+ _changes->at_put(index-1, _changes->at(index));
+ }
+ _changes->pop();
+ }
+ return true;
+}
+
+
+bool Relocator::is_opcode_lookupswitch(Bytecodes::Code bc) {
+ switch (bc) {
+ case Bytecodes::_tableswitch: return false;
+ case Bytecodes::_lookupswitch: // not rewritten on ia64
+ case Bytecodes::_fast_linearswitch: // rewritten _lookupswitch
+ case Bytecodes::_fast_binaryswitch: return true; // rewritten _lookupswitch
+ default: ShouldNotReachHere();
+ }
+ return true; // dummy
+}
+
+// We need a special instruction size method, since lookupswitches and tableswitches might not be
+// properly alligned during relocation
+int Relocator::rc_instr_len(int bci) {
+ Bytecodes::Code bc= code_at(bci);
+ switch (bc) {
+ // In the case of switch instructions, see if we have the original
+ // padding recorded.
+ case Bytecodes::_tableswitch:
+ case Bytecodes::_lookupswitch:
+ case Bytecodes::_fast_linearswitch:
+ case Bytecodes::_fast_binaryswitch:
+ {
+ int pad = get_orig_switch_pad(bci, is_opcode_lookupswitch(bc));
+ if (pad == -1) {
+ return instruction_length_at(bci);
+ }
+ // Otherwise, depends on the switch type.
+ switch (bc) {
+ case Bytecodes::_tableswitch: {
+ int lo = int_at(bci + 1 + pad + 4 * 1);
+ int hi = int_at(bci + 1 + pad + 4 * 2);
+ int n = hi - lo + 1;
+ return 1 + pad + 4*(3 + n);
+ }
+ case Bytecodes::_lookupswitch:
+ case Bytecodes::_fast_linearswitch:
+ case Bytecodes::_fast_binaryswitch: {
+ int npairs = int_at(bci + 1 + pad + 4 * 1);
+ return 1 + pad + 4*(2 + 2*npairs);
+ }
+ default:
+ ShouldNotReachHere();
+ }
+ }
+ }
+ return instruction_length_at(bci);
+}
+
+// If a change item is recorded for "pc", with type "ct", returns the
+// associated padding, else -1.
+int Relocator::get_orig_switch_pad(int bci, bool is_lookup_switch) {
+ for (int k = 0; k < _changes->length(); k++) {
+ ChangeItem* ci = _changes->at(k);
+ if (ci->is_switch_pad()) {
+ ChangeSwitchPad* csp = (ChangeSwitchPad*)ci;
+ if (csp->is_lookup_switch() == is_lookup_switch && csp->bci() == bci) {
+ return csp->padding();
+ }
+ }
+ }
+ return -1;
+}
+
+
+// Push a ChangeJumpWiden if it doesn't already exist on the work queue,
+// otherwise adjust the item already there by delta. The calculation for
+// new_delta is wrong for this because it uses the offset stored in the
+// code stream itself which wasn't fixed when item was pushed on the work queue.
+void Relocator::push_jump_widen(int bci, int delta, int new_delta) {
+ for (int j = 0; j < _changes->length(); j++) {
+ ChangeItem* ci = _changes->at(j);
+ if (ci->adjust(bci, delta)) return;
+ }
+ _changes->push(new ChangeJumpWiden(bci, new_delta));
+}
+
+
+// The current instruction of "c" is a jump; one of its offset starts
+// at "offset" and is a short if "isShort" is "TRUE",
+// and an integer otherwise. If the jump crosses "breakPC", change
+// the span of the jump by "delta".
+void Relocator::change_jump(int bci, int offset, bool is_short, int break_bci, int delta) {
+ int bci_delta = (is_short) ? short_at(offset) : int_at(offset);
+ int targ = bci + bci_delta;
+
+ if ((bci <= break_bci && targ > break_bci) ||
+ (bci > break_bci && targ <= break_bci)) {
+ int new_delta;
+ if (bci_delta > 0)
+ new_delta = bci_delta + delta;
+ else
+ new_delta = bci_delta - delta;
+
+ if (is_short && ((new_delta > MAX_SHORT) || new_delta < MIN_SHORT)) {
+ push_jump_widen(bci, delta, new_delta);
+ } else if (is_short) {
+ short_at_put(offset, new_delta);
+ } else {
+ int_at_put(offset, new_delta);
+ }
+ }
+}
+
+
+// Changes all jumps crossing "break_bci" by "delta". May enqueue things
+// on "rc->changes"
+void Relocator::change_jumps(int break_bci, int delta) {
+ int bci = 0;
+ Bytecodes::Code bc;
+ // Now, adjust any affected instructions.
+ while (bci < code_length()) {
+ switch (bc= code_at(bci)) {
+ case Bytecodes::_ifeq:
+ case Bytecodes::_ifne:
+ case Bytecodes::_iflt:
+ case Bytecodes::_ifge:
+ case Bytecodes::_ifgt:
+ case Bytecodes::_ifle:
+ case Bytecodes::_if_icmpeq:
+ case Bytecodes::_if_icmpne:
+ case Bytecodes::_if_icmplt:
+ case Bytecodes::_if_icmpge:
+ case Bytecodes::_if_icmpgt:
+ case Bytecodes::_if_icmple:
+ case Bytecodes::_if_acmpeq:
+ case Bytecodes::_if_acmpne:
+ case Bytecodes::_ifnull:
+ case Bytecodes::_ifnonnull:
+ case Bytecodes::_goto:
+ case Bytecodes::_jsr:
+ change_jump(bci, bci+1, true, break_bci, delta);
+ break;
+ case Bytecodes::_goto_w:
+ case Bytecodes::_jsr_w:
+ change_jump(bci, bci+1, false, break_bci, delta);
+ break;
+ case Bytecodes::_tableswitch:
+ case Bytecodes::_lookupswitch:
+ case Bytecodes::_fast_linearswitch:
+ case Bytecodes::_fast_binaryswitch: {
+ int recPad = get_orig_switch_pad(bci, (bc != Bytecodes::_tableswitch));
+ int oldPad = (recPad != -1) ? recPad : align(bci+1) - (bci+1);
+ if (bci > break_bci) {
+ int new_bci = bci + delta;
+ int newPad = align(new_bci+1) - (new_bci+1);
+ // Do we need to check the padding?
+ if (newPad != oldPad) {
+ if (recPad == -1) {
+ _changes->push(new ChangeSwitchPad(bci, oldPad, (bc != Bytecodes::_tableswitch)));
+ }
+ }
+ }
+
+ // Then the rest, which depend on the kind of switch.
+ switch (bc) {
+ case Bytecodes::_tableswitch: {
+ change_jump(bci, bci +1 + oldPad, false, break_bci, delta);
+ // We cannot use the Bytecode_tableswitch abstraction, since the padding might not be correct.
+ int lo = int_at(bci + 1 + oldPad + 4 * 1);
+ int hi = int_at(bci + 1 + oldPad + 4 * 2);
+ int n = hi - lo + 1;
+ for (int k = 0; k < n; k++) {
+ change_jump(bci, bci +1 + oldPad + 4*(k+3), false, break_bci, delta);
+ }
+ // Special next-bci calculation here...
+ bci += 1 + oldPad + (n+3)*4;
+ continue;
+ }
+ case Bytecodes::_lookupswitch:
+ case Bytecodes::_fast_linearswitch:
+ case Bytecodes::_fast_binaryswitch: {
+ change_jump(bci, bci +1 + oldPad, false, break_bci, delta);
+ // We cannot use the Bytecode_lookupswitch abstraction, since the padding might not be correct.
+ int npairs = int_at(bci + 1 + oldPad + 4 * 1);
+ for (int k = 0; k < npairs; k++) {
+ change_jump(bci, bci + 1 + oldPad + 4*(2 + 2*k + 1), false, break_bci, delta);
+ }
+ /* Special next-bci calculation here... */
+ bci += 1 + oldPad + (2 + (npairs*2))*4;
+ continue;
+ }
+ default:
+ ShouldNotReachHere();
+ }
+ }
+ default:
+ break;
+ }
+ bci += rc_instr_len(bci);
+ }
+}
+
+// The width of instruction at "pc" is changing by "delta". Adjust the
+// exception table, if any, of "rc->mb".
+void Relocator::adjust_exception_table(int bci, int delta) {
+ typeArrayOop table = method()->exception_table();
+ for (int index = 0; index < table->length(); index +=4) {
+ if (table->int_at(index) > bci) {
+ table->int_at_put(index+0, table->int_at(index+0) + delta);
+ table->int_at_put(index+1, table->int_at(index+1) + delta);
+ } else if (bci < table->int_at(index+1)) {
+ table->int_at_put(index+1, table->int_at(index+1) + delta);
+ }
+ if (table->int_at(index+2) > bci)
+ table->int_at_put(index+2, table->int_at(index+2) + delta);
+ }
+}
+
+
+// The width of instruction at "bci" is changing by "delta". Adjust the line number table.
+void Relocator::adjust_line_no_table(int bci, int delta) {
+ if (method()->has_linenumber_table()) {
+ CompressedLineNumberReadStream reader(method()->compressed_linenumber_table());
+ CompressedLineNumberWriteStream writer(64); // plenty big for most line number tables
+ while (reader.read_pair()) {
+ int adjustment = (reader.bci() > bci) ? delta : 0;
+ writer.write_pair(reader.bci() + adjustment, reader.line());
+ }
+ writer.write_terminator();
+ set_compressed_line_number_table(writer.buffer());
+ set_compressed_line_number_table_size(writer.position());
+ }
+}
+
+
+// The width of instruction at "bci" is changing by "delta". Adjust the local variable table.
+void Relocator::adjust_local_var_table(int bci, int delta) {
+ int localvariable_table_length = method()->localvariable_table_length();
+ if (localvariable_table_length > 0) {
+ LocalVariableTableElement* table = method()->localvariable_table_start();
+ for (int i = 0; i < localvariable_table_length; i++) {
+ u2 current_bci = table[i].start_bci;
+ if (current_bci > bci) {
+ table[i].start_bci = current_bci + delta;
+ } else {
+ u2 current_length = table[i].length;
+ if (current_bci + current_length > bci) {
+ table[i].length = current_length + delta;
+ }
+ }
+ }
+ }
+}
+
+
+bool Relocator::expand_code_array(int delta) {
+ int length = MAX2(code_length() + delta, code_length() * (100+code_slop_pct()) / 100);
+
+ if (length > MAX_METHOD_LENGTH) {
+ if (delta == 0 && code_length() <= MAX_METHOD_LENGTH) {
+ length = MAX_METHOD_LENGTH;
+ } else {
+ return false;
+ }
+ }
+
+ unsigned char* new_code_array = NEW_RESOURCE_ARRAY(unsigned char, length);
+ if (!new_code_array) return false;
+
+ // Expanding current array
+ if (code_array() != NULL) {
+ memcpy(new_code_array, code_array(), code_length());
+ } else {
+ // Initial copy. Copy directly from methodOop
+ memcpy(new_code_array, method()->code_base(), code_length());
+ }
+
+ set_code_array(new_code_array);
+ set_code_array_length(length);
+
+ return true;
+}
+
+
+// The instruction at "bci", whose size is "ilen", is changing size by
+// "delta". Reallocate, move code, recalculate jumps, and enqueue
+// change items as necessary.
+bool Relocator::relocate_code(int bci, int ilen, int delta) {
+ int next_bci = bci + ilen;
+ if (delta > 0 && code_length() + delta > code_array_length()) {
+ // Expand allocated code space, if necessary.
+ if (!expand_code_array(delta)) {
+ return false;
+ }
+ }
+
+ // We require 4-byte alignment of code arrays.
+ assert(((intptr_t)code_array() & 3) == 0, "check code alignment");
+ // Change jumps before doing the copying; this routine requires aligned switches.
+ change_jumps(bci, delta);
+
+ // In case we have shrunken a tableswitch/lookupswitch statement, we store the last
+ // bytes that get overwritten. We have to copy the bytes after the change_jumps method
+ // has been called, since it is likly to update last offset in a tableswitch/lookupswitch
+ if (delta < 0) {
+ assert(delta>=-3, "we cannot overwrite more than 3 bytes");
+ memcpy(_overwrite, addr_at(bci + ilen + delta), -delta);
+ }
+
+ memmove(addr_at(next_bci + delta), addr_at(next_bci), code_length() - next_bci);
+ set_code_length(code_length() + delta);
+ // Also adjust exception tables...
+ adjust_exception_table(bci, delta);
+ // Line number tables...
+ adjust_line_no_table(bci, delta);
+ // And local variable table...
+ adjust_local_var_table(bci, delta);
+
+ // Relocate the pending change stack...
+ for (int j = 0; j < _changes->length(); j++) {
+ ChangeItem* ci = _changes->at(j);
+ ci->relocate(bci, delta);
+ }
+
+ // Notify any listeners about code relocation
+ notify(bci, delta, code_length());
+
+ return true;
+}
+
+// relocate a general instruction. Called by ChangeWiden class
+bool Relocator::handle_widen(int bci, int new_ilen, u_char inst_buffer[]) {
+ int ilen = rc_instr_len(bci);
+ if (!relocate_code(bci, ilen, new_ilen - ilen))
+ return false;
+
+ // Insert new bytecode(s)
+ for(int k = 0; k < new_ilen; k++) {
+ code_at_put(bci + k, (Bytecodes::Code)inst_buffer[k]);
+ }
+
+ return true;
+}
+
+// handle jump_widen instruction. Called be ChangeJumpWiden class
+bool Relocator::handle_jump_widen(int bci, int delta) {
+ int ilen = rc_instr_len(bci);
+
+ Bytecodes::Code bc = code_at(bci);
+ switch (bc) {
+ case Bytecodes::_ifeq:
+ case Bytecodes::_ifne:
+ case Bytecodes::_iflt:
+ case Bytecodes::_ifge:
+ case Bytecodes::_ifgt:
+ case Bytecodes::_ifle:
+ case Bytecodes::_if_icmpeq:
+ case Bytecodes::_if_icmpne:
+ case Bytecodes::_if_icmplt:
+ case Bytecodes::_if_icmpge:
+ case Bytecodes::_if_icmpgt:
+ case Bytecodes::_if_icmple:
+ case Bytecodes::_if_acmpeq:
+ case Bytecodes::_if_acmpne:
+ case Bytecodes::_ifnull:
+ case Bytecodes::_ifnonnull: {
+ const int goto_length = Bytecodes::length_for(Bytecodes::_goto);
+
+ // If 'if' points to the next bytecode after goto, it's already handled.
+ // it shouldn't be.
+ assert (short_at(bci+1) != ilen+goto_length, "if relocation already handled");
+ assert(ilen == 3, "check length");
+
+ // Convert to 0 if <cond> goto 6
+ // 3 _goto 11
+ // 6 _goto_w <wide delta offset>
+ // 11 <else code>
+ const int goto_w_length = Bytecodes::length_for(Bytecodes::_goto_w);
+ const int add_bci = goto_length + goto_w_length;
+
+ if (!relocate_code(bci, 3, /*delta*/add_bci)) return false;
+
+ // if bytecode points to goto_w instruction
+ short_at_put(bci + 1, ilen + goto_length);
+
+ int cbci = bci + ilen;
+ // goto around
+ code_at_put(cbci, Bytecodes::_goto);
+ short_at_put(cbci + 1, add_bci);
+ // goto_w <wide delta>
+ cbci = cbci + goto_length;
+ code_at_put(cbci, Bytecodes::_goto_w);
+ if (delta > 0) {
+ delta += 2; // goto_w is 2 bytes more than "if" code
+ } else {
+ delta -= ilen+goto_length; // branch starts at goto_w offset
+ }
+ int_at_put(cbci + 1, delta);
+ break;
+
+ }
+ case Bytecodes::_goto:
+ case Bytecodes::_jsr:
+ assert(ilen == 3, "check length");
+
+ if (!relocate_code(bci, 3, 2)) return false;
+ if (bc == Bytecodes::_goto)
+ code_at_put(bci, Bytecodes::_goto_w);
+ else
+ code_at_put(bci, Bytecodes::_jsr_w);
+
+ // If it's a forward jump, add 2 for the widening.
+ if (delta > 0) delta += 2;
+ int_at_put(bci + 1, delta);
+ break;
+
+ default: ShouldNotReachHere();
+ }
+
+ return true;
+}
+
+// handle lookup/table switch instructions. Called be ChangeSwitchPad class
+bool Relocator::handle_switch_pad(int bci, int old_pad, bool is_lookup_switch) {
+ int ilen = rc_instr_len(bci);
+ int new_pad = align(bci+1) - (bci+1);
+ int pad_delta = new_pad - old_pad;
+ if (pad_delta != 0) {
+ int len;
+ if (!is_lookup_switch) {
+ int low = int_at(bci+1+old_pad+4);
+ int high = int_at(bci+1+old_pad+8);
+ len = high-low+1 + 3; // 3 for default, hi, lo.
+ } else {
+ int npairs = int_at(bci+1+old_pad+4);
+ len = npairs*2 + 2; // 2 for default, npairs.
+ }
+ // Because "relocateCode" does a "changeJumps" loop,
+ // which parses instructions to determine their length,
+ // we need to call that before messing with the current
+ // instruction. Since it may also overwrite the current
+ // instruction when moving down, remember the possibly
+ // overwritten part.
+
+ // Move the code following the instruction...
+ if (!relocate_code(bci, ilen, pad_delta)) return false;
+
+ if (pad_delta < 0) {
+ // Move the shrunken instruction down.
+ memmove(addr_at(bci + 1 + new_pad),
+ addr_at(bci + 1 + old_pad),
+ len * 4 + pad_delta);
+ memmove(addr_at(bci + 1 + new_pad + len*4 + pad_delta),
+ _overwrite, -pad_delta);
+ } else {
+ assert(pad_delta > 0, "check");
+ // Move the expanded instruction up.
+ memmove(addr_at(bci +1 + new_pad),
+ addr_at(bci +1 + old_pad),
+ len * 4);
+ }
+ }
+ return true;
+}
diff --git a/src/share/vm/runtime/relocator.hpp b/src/share/vm/runtime/relocator.hpp
new file mode 100644
index 000000000..598f9ec14
--- /dev/null
+++ b/src/share/vm/runtime/relocator.hpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright 1997-2004 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// This code has been converted from the 1.1E java virtual machine
+// Thanks to the JavaTopics group for using the code
+
+class ChangeItem;
+
+// Callback object for code relocations
+class RelocatorListener : public StackObj {
+ public:
+ RelocatorListener() {};
+ virtual void relocated(int bci, int delta, int new_method_size) = 0;
+};
+
+
+class Relocator : public ResourceObj {
+ public:
+ Relocator(methodHandle method, RelocatorListener* listener);
+ methodHandle insert_space_at(int bci, int space, u_char inst_buffer[], TRAPS);
+
+ // Callbacks from ChangeItem's
+ bool handle_code_changes();
+ bool handle_widen (int bci, int new_ilen, u_char inst_buffer[]); // handles general instructions
+ void push_jump_widen (int bci, int delta, int new_delta); // pushes jumps
+ bool handle_jump_widen (int bci, int delta); // handles jumps
+ bool handle_switch_pad (int bci, int old_pad, bool is_lookup_switch); // handles table and lookup switches
+
+ private:
+ unsigned char* _code_array;
+ int _code_array_length;
+ int _code_length;
+ unsigned char* _compressed_line_number_table;
+ int _compressed_line_number_table_size;
+ methodHandle _method;
+ u_char _overwrite[3]; // stores overwritten bytes for shrunken instructions
+
+ GrowableArray<ChangeItem*>* _changes;
+
+ unsigned char* code_array() const { return _code_array; }
+ void set_code_array(unsigned char* array) { _code_array = array; }
+
+ int code_length() const { return _code_length; }
+ void set_code_length(int length) { _code_length = length; }
+
+ int code_array_length() const { return _code_array_length; }
+ void set_code_array_length(int length) { _code_array_length = length; }
+
+ unsigned char* compressed_line_number_table() const { return _compressed_line_number_table; }
+ void set_compressed_line_number_table(unsigned char* table) { _compressed_line_number_table = table; }
+
+ int compressed_line_number_table_size() const { return _compressed_line_number_table_size; }
+ void set_compressed_line_number_table_size(int size) { _compressed_line_number_table_size = size; }
+
+ methodHandle method() const { return _method; }
+ void set_method(methodHandle method) { _method = method; }
+
+ // This will return a raw bytecode, which is possibly rewritten.
+ Bytecodes::Code code_at(int bci) const { return (Bytecodes::Code) code_array()[bci]; }
+ void code_at_put(int bci, Bytecodes::Code code) { code_array()[bci] = (char) code; }
+
+ // get and set signed integers in the code_array
+ inline int int_at(int bci) const { return Bytes::get_Java_u4(&code_array()[bci]); }
+ inline void int_at_put(int bci, int value) { Bytes::put_Java_u4(&code_array()[bci], value); }
+
+ // get and set signed shorts in the code_array
+ inline short short_at(int bci) const { return (short)Bytes::get_Java_u2(&code_array()[bci]); }
+ inline void short_at_put(int bci, short value) { Bytes::put_Java_u2((address) &code_array()[bci], value); }
+
+ // get the address of in the code_array
+ inline char* addr_at(int bci) const { return (char*) &code_array()[bci]; }
+
+ int instruction_length_at(int bci) { return Bytecodes::length_at(code_array() + bci); }
+
+ // Helper methods
+ int align(int n) const { return (n+3) & ~3; }
+ int code_slop_pct() const { return 25; }
+ bool is_opcode_lookupswitch(Bytecodes::Code bc);
+
+ // basic relocation methods
+ bool relocate_code (int bci, int ilen, int delta);
+ void change_jumps (int break_bci, int delta);
+ void change_jump (int bci, int offset, bool is_short, int break_bci, int delta);
+ void adjust_exception_table(int bci, int delta);
+ void adjust_line_no_table (int bci, int delta);
+ void adjust_local_var_table(int bci, int delta);
+ int get_orig_switch_pad (int bci, bool is_lookup_switch);
+ int rc_instr_len (int bci);
+ bool expand_code_array (int delta);
+
+ // Callback support
+ RelocatorListener *_listener;
+ void notify(int bci, int delta, int new_code_length) {
+ if (_listener != NULL)
+ _listener->relocated(bci, delta, new_code_length);
+ }
+};
diff --git a/src/share/vm/runtime/rframe.cpp b/src/share/vm/runtime/rframe.cpp
new file mode 100644
index 000000000..348ef4757
--- /dev/null
+++ b/src/share/vm/runtime/rframe.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+
+#include "incls/_rframe.cpp.incl"
+
+static RFrame*const noCaller = (RFrame*) 0x1; // no caller (i.e., initial frame)
+static RFrame*const noCallerYet = (RFrame*) 0x0; // caller not yet computed
+
+RFrame::RFrame(frame fr, JavaThread* thread, RFrame*const callee) :
+ _fr(fr), _thread(thread), _callee(callee), _num(callee ? callee->num() + 1 : 0) {
+ _caller = (RFrame*)noCallerYet;
+ _invocations = 0;
+ _distance = 0;
+}
+
+void RFrame::set_distance(int d) {
+ assert(is_compiled() || d >= 0, "should be positive");
+ _distance = d;
+}
+
+InterpretedRFrame::InterpretedRFrame(frame fr, JavaThread* thread, RFrame*const callee)
+: RFrame(fr, thread, callee) {
+ RegisterMap map(thread, false);
+ _vf = javaVFrame::cast(vframe::new_vframe(&_fr, &map, thread));
+ _method = methodHandle(thread, _vf->method());
+ assert( _vf->is_interpreted_frame(), "must be interpreted");
+ init();
+}
+
+InterpretedRFrame::InterpretedRFrame(frame fr, JavaThread* thread, methodHandle m)
+: RFrame(fr, thread, NULL) {
+ RegisterMap map(thread, false);
+ _vf = javaVFrame::cast(vframe::new_vframe(&_fr, &map, thread));
+ _method = m;
+
+ assert( _vf->is_interpreted_frame(), "must be interpreted");
+ init();
+}
+
+CompiledRFrame::CompiledRFrame(frame fr, JavaThread* thread, RFrame*const callee)
+: RFrame(fr, thread, callee) {
+ init();
+}
+
+CompiledRFrame::CompiledRFrame(frame fr, JavaThread* thread)
+: RFrame(fr, thread, NULL) {
+ init();
+}
+
+DeoptimizedRFrame::DeoptimizedRFrame(frame fr, JavaThread* thread, RFrame*const callee)
+: InterpretedRFrame(fr, thread, callee) {}
+
+RFrame* RFrame::new_RFrame(frame fr, JavaThread* thread, RFrame*const callee) {
+ RFrame* rf;
+ int dist = callee ? callee->distance() : -1;
+ if (fr.is_interpreted_frame()) {
+ rf = new InterpretedRFrame(fr, thread, callee);
+ dist++;
+ } else if (fr.is_compiled_frame()) {
+ // Even deopted frames look compiled because the deopt
+ // is invisible until it happens.
+ rf = new CompiledRFrame(fr, thread, callee);
+ } else {
+ assert(false, "Unhandled frame type");
+ }
+ rf->set_distance(dist);
+ rf->init();
+ return rf;
+}
+
+RFrame* RFrame::caller() {
+ if (_caller != noCallerYet) return (_caller == noCaller) ? NULL : _caller; // already computed caller
+
+ // caller not yet computed; do it now
+ if (_fr.is_first_java_frame()) {
+ _caller = (RFrame*)noCaller;
+ return NULL;
+ }
+
+ RegisterMap map(_thread, false);
+ frame sender = _fr.real_sender(&map);
+ if (sender.is_java_frame()) {
+ _caller = new_RFrame(sender, thread(), this);
+ return _caller;
+ }
+
+ // Real caller is not java related
+ _caller = (RFrame*)noCaller;
+ return NULL;
+}
+
+int InterpretedRFrame::cost() const {
+ return _method->code_size(); // fix this
+ //return _method->estimated_inline_cost(_receiverKlass);
+}
+
+int CompiledRFrame::cost() const {
+ nmethod* nm = top_method()->code();
+ if (nm != NULL) {
+ return nm->code_size();
+ } else {
+ return top_method()->code_size();
+ }
+}
+
+void CompiledRFrame::init() {
+ RegisterMap map(thread(), false);
+ vframe* vf = vframe::new_vframe(&_fr, &map, thread());
+ assert(vf->is_compiled_frame(), "must be compiled");
+ _nm = compiledVFrame::cast(vf)->code();
+ vf = vf->top();
+ _vf = javaVFrame::cast(vf);
+ _method = methodHandle(thread(), CodeCache::find_nmethod(_fr.pc())->method());
+ assert(_method(), "should have found a method");
+#ifndef PRODUCT
+ _invocations = _method->compiled_invocation_count();
+#endif
+}
+
+void InterpretedRFrame::init() {
+ _invocations = _method->invocation_count() + _method->backedge_count();
+}
+
+void RFrame::print(const char* kind) {
+#ifndef PRODUCT
+#ifdef COMPILER2
+ int cnt = top_method()->interpreter_invocation_count();
+#else
+ int cnt = top_method()->invocation_count();
+#endif
+ tty->print("%3d %s ", _num, is_interpreted() ? "I" : "C");
+ top_method()->print_short_name(tty);
+ tty->print_cr(": inv=%5d(%d) cst=%4d", _invocations, cnt, cost());
+#endif
+}
+
+void CompiledRFrame::print() {
+ RFrame::print("comp");
+}
+
+void InterpretedRFrame::print() {
+ RFrame::print("int.");
+}
+
+void DeoptimizedRFrame::print() {
+ RFrame::print("deopt.");
+}
diff --git a/src/share/vm/runtime/rframe.hpp b/src/share/vm/runtime/rframe.hpp
new file mode 100644
index 000000000..a20cc419c
--- /dev/null
+++ b/src/share/vm/runtime/rframe.hpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright 1997-2000 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// rframes ("recompiler frames") decorate stack frames with some extra information
+// needed by the recompiler. The recompiler views the stack (at the time of recompilation)
+// as a list of rframes.
+
+class RFrame : public ResourceObj {
+ protected:
+ const frame _fr; // my frame
+ JavaThread* const _thread; // thread where frame resides.
+ RFrame* _caller; // caller / callee rframes (or NULL)
+ RFrame*const _callee;
+ const int _num; // stack frame number (0 = most recent)
+ int _invocations; // current invocation estimate (for this frame)
+ // (i.e., how often was this frame called)
+ int _distance; // recompilation search "distance" (measured in # of interpreted frames)
+
+ RFrame(frame fr, JavaThread* thread, RFrame*const callee);
+ virtual void init() = 0; // compute invocations, loopDepth, etc.
+ void print(const char* name);
+
+ public:
+
+ static RFrame* new_RFrame(frame fr, JavaThread* thread, RFrame*const callee);
+
+ virtual bool is_interpreted() const { return false; }
+ virtual bool is_compiled() const { return false; }
+ int distance() const { return _distance; }
+ void set_distance(int d);
+ int invocations() const { return _invocations; }
+ int num() const { return _num; }
+ frame fr() const { return _fr; }
+ JavaThread* thread() const { return _thread; }
+ virtual int cost() const = 0; // estimated inlining cost (size)
+ virtual methodHandle top_method() const = 0;
+ virtual javaVFrame* top_vframe() const = 0;
+ virtual nmethod* nm() const { ShouldNotCallThis(); return NULL; }
+
+ RFrame* caller();
+ RFrame* callee() const { return _callee; }
+ RFrame* parent() const; // rframe containing lexical scope (if any)
+ virtual void print() = 0;
+
+ static int computeSends(methodOop m);
+ static int computeSends(nmethod* nm);
+ static int computeCumulSends(methodOop m);
+ static int computeCumulSends(nmethod* nm);
+};
+
+class CompiledRFrame : public RFrame { // frame containing a compiled method
+ protected:
+ nmethod* _nm;
+ javaVFrame* _vf; // top vframe; may be NULL (for most recent frame)
+ methodHandle _method; // top method
+
+ CompiledRFrame(frame fr, JavaThread* thread, RFrame*const callee);
+ void init();
+ friend class RFrame;
+
+ public:
+ CompiledRFrame(frame fr, JavaThread* thread); // for nmethod triggering its counter (callee == NULL)
+ bool is_compiled() const { return true; }
+ methodHandle top_method() const { return _method; }
+ javaVFrame* top_vframe() const { return _vf; }
+ nmethod* nm() const { return _nm; }
+ int cost() const;
+ void print();
+};
+
+class InterpretedRFrame : public RFrame { // interpreter frame
+ protected:
+ javaVFrame* _vf; // may be NULL (for most recent frame)
+ methodHandle _method;
+
+ InterpretedRFrame(frame fr, JavaThread* thread, RFrame*const callee);
+ void init();
+ friend class RFrame;
+
+ public:
+ InterpretedRFrame(frame fr, JavaThread* thread, methodHandle m); // constructor for method triggering its invocation counter
+ bool is_interpreted() const { return true; }
+ methodHandle top_method() const { return _method; }
+ javaVFrame* top_vframe() const { return _vf; }
+ int cost() const;
+ void print();
+};
+
+// treat deoptimized frames as interpreted
+class DeoptimizedRFrame : public InterpretedRFrame {
+ protected:
+ DeoptimizedRFrame(frame fr, JavaThread* thread, RFrame*const callee);
+ friend class RFrame;
+ public:
+ void print();
+};
diff --git a/src/share/vm/runtime/safepoint.cpp b/src/share/vm/runtime/safepoint.cpp
new file mode 100644
index 000000000..2a3b838f6
--- /dev/null
+++ b/src/share/vm/runtime/safepoint.cpp
@@ -0,0 +1,1215 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_safepoint.cpp.incl"
+
+// --------------------------------------------------------------------------------------------------
+// Implementation of Safepoint begin/end
+
+SafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized;
+volatile int SafepointSynchronize::_waiting_to_block = 0;
+jlong SafepointSynchronize::_last_safepoint = 0;
+volatile int SafepointSynchronize::_safepoint_counter = 0;
+static volatile int PageArmed = 0 ; // safepoint polling page is RO|RW vs PROT_NONE
+static volatile int TryingToBlock = 0 ; // proximate value -- for advisory use only
+static bool timeout_error_printed = false;
+
+// Roll all threads forward to a safepoint and suspend them all
+void SafepointSynchronize::begin() {
+
+ Thread* myThread = Thread::current();
+ assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");
+
+ _last_safepoint = os::javaTimeNanos();
+
+#ifndef SERIALGC
+ if (UseConcMarkSweepGC) {
+ // In the future we should investigate whether CMS can use the
+ // more-general mechanism below. DLD (01/05).
+ ConcurrentMarkSweepThread::synchronize(false);
+ } else {
+ ConcurrentGCThread::safepoint_synchronize();
+ }
+#endif // SERIALGC
+
+ // By getting the Threads_lock, we assure that no threads are about to start or
+ // exit. It is released again in SafepointSynchronize::end().
+ Threads_lock->lock();
+
+ assert( _state == _not_synchronized, "trying to safepoint synchronize with wrong state");
+
+ int nof_threads = Threads::number_of_threads();
+
+ if (TraceSafepoint) {
+ tty->print_cr("Safepoint synchronization initiated. (%d)", nof_threads);
+ }
+
+ RuntimeService::record_safepoint_begin();
+
+ {
+ MutexLocker mu(Safepoint_lock);
+
+ // Set number of threads to wait for, before we initiate the callbacks
+ _waiting_to_block = nof_threads;
+ TryingToBlock = 0 ;
+ int still_running = nof_threads;
+
+ // Save the starting time, so that it can be compared to see if this has taken
+ // too long to complete.
+ jlong safepoint_limit_time;
+ timeout_error_printed = false;
+
+ // Begin the process of bringing the system to a safepoint.
+ // Java threads can be in several different states and are
+ // stopped by different mechanisms:
+ //
+ // 1. Running interpreted
+ // The interpeter dispatch table is changed to force it to
+ // check for a safepoint condition between bytecodes.
+ // 2. Running in native code
+ // When returning from the native code, a Java thread must check
+ // the safepoint _state to see if we must block. If the
+ // VM thread sees a Java thread in native, it does
+ // not wait for this thread to block. The order of the memory
+ // writes and reads of both the safepoint state and the Java
+ // threads state is critical. In order to guarantee that the
+ // memory writes are serialized with respect to each other,
+ // the VM thread issues a memory barrier instruction
+ // (on MP systems). In order to avoid the overhead of issuing
+ // a memory barrier for each Java thread making native calls, each Java
+ // thread performs a write to a single memory page after changing
+ // the thread state. The VM thread performs a sequence of
+ // mprotect OS calls which forces all previous writes from all
+ // Java threads to be serialized. This is done in the
+ // os::serialize_thread_states() call. This has proven to be
+ // much more efficient than executing a membar instruction
+ // on every call to native code.
+ // 3. Running compiled Code
+ // Compiled code reads a global (Safepoint Polling) page that
+ // is set to fault if we are trying to get to a safepoint.
+ // 4. Blocked
+ // A thread which is blocked will not be allowed to return from the
+ // block condition until the safepoint operation is complete.
+ // 5. In VM or Transitioning between states
+ // If a Java thread is currently running in the VM or transitioning
+ // between states, the safepointing code will wait for the thread to
+ // block itself when it attempts transitions to a new state.
+ //
+ _state = _synchronizing;
+ OrderAccess::fence();
+
+ // Flush all thread states to memory
+ if (!UseMembar) {
+ os::serialize_thread_states();
+ }
+
+ // Make interpreter safepoint aware
+ Interpreter::notice_safepoints();
+
+ if (UseCompilerSafepoints && DeferPollingPageLoopCount < 0) {
+ // Make polling safepoint aware
+ guarantee (PageArmed == 0, "invariant") ;
+ PageArmed = 1 ;
+ os::make_polling_page_unreadable();
+ }
+
+ // Consider using active_processor_count() ... but that call is expensive.
+ int ncpus = os::processor_count() ;
+
+#ifdef ASSERT
+ for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
+ assert(cur->safepoint_state()->is_running(), "Illegal initial state");
+ }
+#endif // ASSERT
+
+ if (SafepointTimeout)
+ safepoint_limit_time = os::javaTimeNanos() + (jlong)SafepointTimeoutDelay * MICROUNITS;
+
+ // Iterate through all threads until it have been determined how to stop them all at a safepoint
+ unsigned int iterations = 0;
+ int steps = 0 ;
+ while(still_running > 0) {
+ for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
+ assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
+ ThreadSafepointState *cur_state = cur->safepoint_state();
+ if (cur_state->is_running()) {
+ cur_state->examine_state_of_thread();
+ if (!cur_state->is_running()) {
+ still_running--;
+ // consider adjusting steps downward:
+ // steps = 0
+ // steps -= NNN
+ // steps >>= 1
+ // steps = MIN(steps, 2000-100)
+ // if (iterations != 0) steps -= NNN
+ }
+ if (TraceSafepoint && Verbose) cur_state->print();
+ }
+ }
+
+ if ( (PrintSafepointStatistics || (PrintSafepointStatisticsTimeout > 0))
+ && iterations == 0) {
+ begin_statistics(nof_threads, still_running);
+ }
+
+ if (still_running > 0) {
+ // Check for if it takes to long
+ if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
+ print_safepoint_timeout(_spinning_timeout);
+ }
+
+ // Spin to avoid context switching.
+ // There's a tension between allowing the mutators to run (and rendezvous)
+ // vs spinning. As the VM thread spins, wasting cycles, it consumes CPU that
+ // a mutator might otherwise use profitably to reach a safepoint. Excessive
+ // spinning by the VM thread on a saturated system can increase rendezvous latency.
+ // Blocking or yielding incur their own penalties in the form of context switching
+ // and the resultant loss of $ residency.
+ //
+ // Further complicating matters is that yield() does not work as naively expected
+ // on many platforms -- yield() does not guarantee that any other ready threads
+ // will run. As such we revert yield_all() after some number of iterations.
+ // Yield_all() is implemented as a short unconditional sleep on some platforms.
+ // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping
+ // can actually increase the time it takes the VM thread to detect that a system-wide
+ // stop-the-world safepoint has been reached. In a pathological scenario such as that
+ // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe.
+ // In that case the mutators will be stalled waiting for the safepoint to complete and the
+ // the VMthread will be sleeping, waiting for the mutators to rendezvous. The VMthread
+ // will eventually wake up and detect that all mutators are safe, at which point
+ // we'll again make progress.
+ //
+ // Beware too that that the VMThread typically runs at elevated priority.
+ // Its default priority is higher than the default mutator priority.
+ // Obviously, this complicates spinning.
+ //
+ // Note too that on Windows XP SwitchThreadTo() has quite different behavior than Sleep(0).
+ // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will.
+ //
+ // See the comments in synchronizer.cpp for additional remarks on spinning.
+ //
+ // In the future we might:
+ // 1. Modify the safepoint scheme to avoid potentally unbounded spinning.
+ // This is tricky as the path used by a thread exiting the JVM (say on
+ // on JNI call-out) simply stores into its state field. The burden
+ // is placed on the VM thread, which must poll (spin).
+ // 2. Find something useful to do while spinning. If the safepoint is GC-related
+ // we might aggressively scan the stacks of threads that are already safe.
+ // 3. Use Solaris schedctl to examine the state of the still-running mutators.
+ // If all the mutators are ONPROC there's no reason to sleep or yield.
+ // 4. YieldTo() any still-running mutators that are ready but OFFPROC.
+ // 5. Check system saturation. If the system is not fully saturated then
+ // simply spin and avoid sleep/yield.
+ // 6. As still-running mutators rendezvous they could unpark the sleeping
+ // VMthread. This works well for still-running mutators that become
+ // safe. The VMthread must still poll for mutators that call-out.
+ // 7. Drive the policy on time-since-begin instead of iterations.
+ // 8. Consider making the spin duration a function of the # of CPUs:
+ // Spin = (((ncpus-1) * M) + K) + F(still_running)
+ // Alternately, instead of counting iterations of the outer loop
+ // we could count the # of threads visited in the inner loop, above.
+ // 9. On windows consider using the return value from SwitchThreadTo()
+ // to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.
+
+ if (UseCompilerSafepoints && int(iterations) == DeferPollingPageLoopCount) {
+ guarantee (PageArmed == 0, "invariant") ;
+ PageArmed = 1 ;
+ os::make_polling_page_unreadable();
+ }
+
+ // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or
+ // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus)
+ ++steps ;
+ if (ncpus > 1 && steps < SafepointSpinBeforeYield) {
+ SpinPause() ; // MP-Polite spin
+ } else
+ if (steps < DeferThrSuspendLoopCount) {
+ os::NakedYield() ;
+ } else {
+ os::yield_all(steps) ;
+ // Alternately, the VM thread could transiently depress its scheduling priority or
+ // transiently increase the priority of the tardy mutator(s).
+ }
+
+ iterations ++ ;
+ }
+ assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
+ }
+ assert(still_running == 0, "sanity check");
+
+ if (PrintSafepointStatistics) {
+ update_statistics_on_spin_end();
+ }
+
+ // wait until all threads are stopped
+ while (_waiting_to_block > 0) {
+ if (TraceSafepoint) tty->print_cr("Waiting for %d thread(s) to block", _waiting_to_block);
+ if (!SafepointTimeout || timeout_error_printed) {
+ Safepoint_lock->wait(true); // true, means with no safepoint checks
+ } else {
+ // Compute remaining time
+ jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();
+
+ // If there is no remaining time, then there is an error
+ if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
+ print_safepoint_timeout(_blocking_timeout);
+ }
+ }
+ }
+ assert(_waiting_to_block == 0, "sanity check");
+
+#ifndef PRODUCT
+ if (SafepointTimeout) {
+ jlong current_time = os::javaTimeNanos();
+ if (safepoint_limit_time < current_time) {
+ tty->print_cr("# SafepointSynchronize: Finished after "
+ INT64_FORMAT_W(6) " ms",
+ ((current_time - safepoint_limit_time) / MICROUNITS +
+ SafepointTimeoutDelay));
+ }
+ }
+#endif
+
+ assert((_safepoint_counter & 0x1) == 0, "must be even");
+ assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
+ _safepoint_counter ++;
+
+ // Record state
+ _state = _synchronized;
+
+ OrderAccess::fence();
+
+ if (TraceSafepoint) {
+ VM_Operation *op = VMThread::vm_operation();
+ tty->print_cr("Entering safepoint region: %s", (op != NULL) ? op->name() : "no vm operation");
+ }
+
+ RuntimeService::record_safepoint_synchronized();
+ if (PrintSafepointStatistics) {
+ update_statistics_on_sync_end(os::javaTimeNanos());
+ }
+
+ // Call stuff that needs to be run when a safepoint is just about to be completed
+ do_cleanup_tasks();
+ }
+}
+
+// Wake up all threads, so they are ready to resume execution after the safepoint
+// operation has been carried out
+void SafepointSynchronize::end() {
+
+ assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
+ assert((_safepoint_counter & 0x1) == 1, "must be odd");
+ _safepoint_counter ++;
+ // memory fence isn't required here since an odd _safepoint_counter
+ // value can do no harm and a fence is issued below anyway.
+
+ DEBUG_ONLY(Thread* myThread = Thread::current();)
+ assert(myThread->is_VM_thread(), "Only VM thread can execute a safepoint");
+
+ if (PrintSafepointStatistics) {
+ end_statistics(os::javaTimeNanos());
+ }
+
+#ifdef ASSERT
+ // A pending_exception cannot be installed during a safepoint. The threads
+ // may install an async exception after they come back from a safepoint into
+ // pending_exception after they unblock. But that should happen later.
+ for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
+ assert (!(cur->has_pending_exception() &&
+ cur->safepoint_state()->is_at_poll_safepoint()),
+ "safepoint installed a pending exception");
+ }
+#endif // ASSERT
+
+ if (PageArmed) {
+ // Make polling safepoint aware
+ os::make_polling_page_readable();
+ PageArmed = 0 ;
+ }
+
+ // Remove safepoint check from interpreter
+ Interpreter::ignore_safepoints();
+
+ {
+ MutexLocker mu(Safepoint_lock);
+
+ assert(_state == _synchronized, "must be synchronized before ending safepoint synchronization");
+
+ // Set to not synchronized, so the threads will not go into the signal_thread_blocked method
+ // when they get restarted.
+ _state = _not_synchronized;
+ OrderAccess::fence();
+
+ if (TraceSafepoint) {
+ tty->print_cr("Leaving safepoint region");
+ }
+
+ // Start suspended threads
+ for(JavaThread *current = Threads::first(); current; current = current->next()) {
+ // A problem occuring on Solaris is when attempting to restart threads
+ // the first #cpus - 1 go well, but then the VMThread is preempted when we get
+ // to the next one (since it has been running the longest). We then have
+ // to wait for a cpu to become available before we can continue restarting
+ // threads.
+ // FIXME: This causes the performance of the VM to degrade when active and with
+ // large numbers of threads. Apparently this is due to the synchronous nature
+ // of suspending threads.
+ //
+ // TODO-FIXME: the comments above are vestigial and no longer apply.
+ // Furthermore, using solaris' schedctl in this particular context confers no benefit
+ if (VMThreadHintNoPreempt) {
+ os::hint_no_preempt();
+ }
+ ThreadSafepointState* cur_state = current->safepoint_state();
+ assert(cur_state->type() != ThreadSafepointState::_running, "Thread not suspended at safepoint");
+ cur_state->restart();
+ assert(cur_state->is_running(), "safepoint state has not been reset");
+ }
+
+ RuntimeService::record_safepoint_end();
+
+ // Release threads lock, so threads can be created/destroyed again. It will also starts all threads
+ // blocked in signal_thread_blocked
+ Threads_lock->unlock();
+
+ }
+#ifndef SERIALGC
+ // If there are any concurrent GC threads resume them.
+ if (UseConcMarkSweepGC) {
+ ConcurrentMarkSweepThread::desynchronize(false);
+ } else {
+ ConcurrentGCThread::safepoint_desynchronize();
+ }
+#endif // SERIALGC
+}
+
+bool SafepointSynchronize::is_cleanup_needed() {
+ // Need a safepoint if some inline cache buffers is non-empty
+ if (!InlineCacheBuffer::is_empty()) return true;
+ return false;
+}
+
+jlong CounterDecay::_last_timestamp = 0;
+
+static void do_method(methodOop m) {
+ m->invocation_counter()->decay();
+}
+
+void CounterDecay::decay() {
+ _last_timestamp = os::javaTimeMillis();
+
+ // This operation is going to be performed only at the end of a safepoint
+ // and hence GC's will not be going on, all Java mutators are suspended
+ // at this point and hence SystemDictionary_lock is also not needed.
+ assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
+ int nclasses = SystemDictionary::number_of_classes();
+ double classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
+ CounterHalfLifeTime);
+ for (int i = 0; i < classes_per_tick; i++) {
+ klassOop k = SystemDictionary::try_get_next_class();
+ if (k != NULL && k->klass_part()->oop_is_instance()) {
+ instanceKlass::cast(k)->methods_do(do_method);
+ }
+ }
+}
+
+// Various cleaning tasks that should be done periodically at safepoints
+void SafepointSynchronize::do_cleanup_tasks() {
+ jlong cleanup_time;
+
+ // Update fat-monitor pool, since this is a safepoint.
+ if (TraceSafepoint) {
+ cleanup_time = os::javaTimeNanos();
+ }
+
+ ObjectSynchronizer::deflate_idle_monitors();
+ InlineCacheBuffer::update_inline_caches();
+ if(UseCounterDecay && CounterDecay::is_decay_needed()) {
+ CounterDecay::decay();
+ }
+ NMethodSweeper::sweep();
+
+ if (TraceSafepoint) {
+ tty->print_cr("do_cleanup_tasks takes "INT64_FORMAT_W(6) "ms",
+ (os::javaTimeNanos() - cleanup_time) / MICROUNITS);
+ }
+}
+
+
+bool SafepointSynchronize::safepoint_safe(JavaThread *thread, JavaThreadState state) {
+ switch(state) {
+ case _thread_in_native:
+ // native threads are safe if they have no java stack or have walkable stack
+ return !thread->has_last_Java_frame() || thread->frame_anchor()->walkable();
+
+ // blocked threads should have already have walkable stack
+ case _thread_blocked:
+ assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "blocked and not walkable");
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+
+// -------------------------------------------------------------------------------------------------------
+// Implementation of Safepoint callback point
+
+void SafepointSynchronize::block(JavaThread *thread) {
+ assert(thread != NULL, "thread must be set");
+ assert(thread->is_Java_thread(), "not a Java thread");
+
+ // Threads shouldn't block if they are in the middle of printing, but...
+ ttyLocker::break_tty_lock_for_safepoint(os::current_thread_id());
+
+ // Only bail from the block() call if the thread is gone from the
+ // thread list; starting to exit should still block.
+ if (thread->is_terminated()) {
+ // block current thread if we come here from native code when VM is gone
+ thread->block_if_vm_exited();
+
+ // otherwise do nothing
+ return;
+ }
+
+ JavaThreadState state = thread->thread_state();
+ thread->frame_anchor()->make_walkable(thread);
+
+ // Check that we have a valid thread_state at this point
+ switch(state) {
+ case _thread_in_vm_trans:
+ case _thread_in_Java: // From compiled code
+
+ // We are highly likely to block on the Safepoint_lock. In order to avoid blocking in this case,
+ // we pretend we are still in the VM.
+ thread->set_thread_state(_thread_in_vm);
+
+ if (is_synchronizing()) {
+ Atomic::inc (&TryingToBlock) ;
+ }
+
+ // We will always be holding the Safepoint_lock when we are examine the state
+ // of a thread. Hence, the instructions between the Safepoint_lock->lock() and
+ // Safepoint_lock->unlock() are happening atomic with regards to the safepoint code
+ Safepoint_lock->lock_without_safepoint_check();
+ if (is_synchronizing()) {
+ // Decrement the number of threads to wait for and signal vm thread
+ assert(_waiting_to_block > 0, "sanity check");
+ _waiting_to_block--;
+ thread->safepoint_state()->set_has_called_back(true);
+
+ // Consider (_waiting_to_block < 2) to pipeline the wakeup of the VM thread
+ if (_waiting_to_block == 0) {
+ Safepoint_lock->notify_all();
+ }
+ }
+
+ // We transition the thread to state _thread_blocked here, but
+ // we can't do our usual check for external suspension and then
+ // self-suspend after the lock_without_safepoint_check() call
+ // below because we are often called during transitions while
+ // we hold different locks. That would leave us suspended while
+ // holding a resource which results in deadlocks.
+ thread->set_thread_state(_thread_blocked);
+ Safepoint_lock->unlock();
+
+ // We now try to acquire the threads lock. Since this lock is hold by the VM thread during
+ // the entire safepoint, the threads will all line up here during the safepoint.
+ Threads_lock->lock_without_safepoint_check();
+ // restore original state. This is important if the thread comes from compiled code, so it
+ // will continue to execute with the _thread_in_Java state.
+ thread->set_thread_state(state);
+ Threads_lock->unlock();
+ break;
+
+ case _thread_in_native_trans:
+ case _thread_blocked_trans:
+ case _thread_new_trans:
+ if (thread->safepoint_state()->type() == ThreadSafepointState::_call_back) {
+ thread->print_thread_state();
+ fatal("Deadlock in safepoint code. "
+ "Should have called back to the VM before blocking.");
+ }
+
+ // We transition the thread to state _thread_blocked here, but
+ // we can't do our usual check for external suspension and then
+ // self-suspend after the lock_without_safepoint_check() call
+ // below because we are often called during transitions while
+ // we hold different locks. That would leave us suspended while
+ // holding a resource which results in deadlocks.
+ thread->set_thread_state(_thread_blocked);
+
+ // It is not safe to suspend a thread if we discover it is in _thread_in_native_trans. Hence,
+ // the safepoint code might still be waiting for it to block. We need to change the state here,
+ // so it can see that it is at a safepoint.
+
+ // Block until the safepoint operation is completed.
+ Threads_lock->lock_without_safepoint_check();
+
+ // Restore state
+ thread->set_thread_state(state);
+
+ Threads_lock->unlock();
+ break;
+
+ default:
+ fatal1("Illegal threadstate encountered: %d", state);
+ }
+
+ // Check for pending. async. exceptions or suspends - except if the
+ // thread was blocked inside the VM. has_special_runtime_exit_condition()
+ // is called last since it grabs a lock and we only want to do that when
+ // we must.
+ //
+ // Note: we never deliver an async exception at a polling point as the
+ // compiler may not have an exception handler for it. The polling
+ // code will notice the async and deoptimize and the exception will
+ // be delivered. (Polling at a return point is ok though). Sure is
+ // a lot of bother for a deprecated feature...
+ //
+ // We don't deliver an async exception if the thread state is
+ // _thread_in_native_trans so JNI functions won't be called with
+ // a surprising pending exception. If the thread state is going back to java,
+ // async exception is checked in check_special_condition_for_native_trans().
+
+ if (state != _thread_blocked_trans &&
+ state != _thread_in_vm_trans &&
+ thread->has_special_runtime_exit_condition()) {
+ thread->handle_special_runtime_exit_condition(
+ !thread->is_at_poll_safepoint() && (state != _thread_in_native_trans));
+ }
+}
+
+// ------------------------------------------------------------------------------------------------------
+// Exception handlers
+
+#ifndef PRODUCT
+#ifdef _LP64
+#define PTR_PAD ""
+#else
+#define PTR_PAD " "
+#endif
+
+static void print_ptrs(intptr_t oldptr, intptr_t newptr, bool wasoop) {
+ bool is_oop = newptr ? ((oop)newptr)->is_oop() : false;
+ tty->print_cr(PTR_FORMAT PTR_PAD " %s %c " PTR_FORMAT PTR_PAD " %s %s",
+ oldptr, wasoop?"oop":" ", oldptr == newptr ? ' ' : '!',
+ newptr, is_oop?"oop":" ", (wasoop && !is_oop) ? "STALE" : ((wasoop==false&&is_oop==false&&oldptr !=newptr)?"STOMP":" "));
+}
+
+static void print_longs(jlong oldptr, jlong newptr, bool wasoop) {
+ bool is_oop = newptr ? ((oop)(intptr_t)newptr)->is_oop() : false;
+ tty->print_cr(PTR64_FORMAT " %s %c " PTR64_FORMAT " %s %s",
+ oldptr, wasoop?"oop":" ", oldptr == newptr ? ' ' : '!',
+ newptr, is_oop?"oop":" ", (wasoop && !is_oop) ? "STALE" : ((wasoop==false&&is_oop==false&&oldptr !=newptr)?"STOMP":" "));
+}
+
+#ifdef SPARC
+static void print_me(intptr_t *new_sp, intptr_t *old_sp, bool *was_oops) {
+#ifdef _LP64
+ tty->print_cr("--------+------address-----+------before-----------+-------after----------+");
+ const int incr = 1; // Increment to skip a long, in units of intptr_t
+#else
+ tty->print_cr("--------+--address-+------before-----------+-------after----------+");
+ const int incr = 2; // Increment to skip a long, in units of intptr_t
+#endif
+ tty->print_cr("---SP---|");
+ for( int i=0; i<16; i++ ) {
+ tty->print("blob %c%d |"PTR_FORMAT" ","LO"[i>>3],i&7,new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++); }
+ tty->print_cr("--------|");
+ for( int i1=0; i1<frame::memory_parameter_word_sp_offset-16; i1++ ) {
+ tty->print("argv pad|"PTR_FORMAT" ",new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++); }
+ tty->print(" pad|"PTR_FORMAT" ",new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++);
+ tty->print_cr("--------|");
+ tty->print(" G1 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr;
+ tty->print(" G3 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr;
+ tty->print(" G4 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr;
+ tty->print(" G5 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr;
+ tty->print_cr(" FSR |"PTR_FORMAT" "PTR64_FORMAT" "PTR64_FORMAT,new_sp,*(jlong*)old_sp,*(jlong*)new_sp);
+ old_sp += incr; new_sp += incr; was_oops += incr;
+ // Skip the floats
+ tty->print_cr("--Float-|"PTR_FORMAT,new_sp);
+ tty->print_cr("---FP---|");
+ old_sp += incr*32; new_sp += incr*32; was_oops += incr*32;
+ for( int i2=0; i2<16; i2++ ) {
+ tty->print("call %c%d |"PTR_FORMAT" ","LI"[i2>>3],i2&7,new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++); }
+ tty->print_cr("");
+}
+#endif // SPARC
+#endif // PRODUCT
+
+
+void SafepointSynchronize::handle_polling_page_exception(JavaThread *thread) {
+ assert(thread->is_Java_thread(), "polling reference encountered by VM thread");
+ assert(thread->thread_state() == _thread_in_Java, "should come from Java code");
+ assert(SafepointSynchronize::is_synchronizing(), "polling encountered outside safepoint synchronization");
+
+ // Uncomment this to get some serious before/after printing of the
+ // Sparc safepoint-blob frame structure.
+ /*
+ intptr_t* sp = thread->last_Java_sp();
+ intptr_t stack_copy[150];
+ for( int i=0; i<150; i++ ) stack_copy[i] = sp[i];
+ bool was_oops[150];
+ for( int i=0; i<150; i++ )
+ was_oops[i] = stack_copy[i] ? ((oop)stack_copy[i])->is_oop() : false;
+ */
+
+ if (ShowSafepointMsgs) {
+ tty->print("handle_polling_page_exception: ");
+ }
+
+ if (PrintSafepointStatistics) {
+ inc_page_trap_count();
+ }
+
+ ThreadSafepointState* state = thread->safepoint_state();
+
+ state->handle_polling_page_exception();
+ // print_me(sp,stack_copy,was_oops);
+}
+
+
+void SafepointSynchronize::print_safepoint_timeout(SafepointTimeoutReason reason) {
+ if (!timeout_error_printed) {
+ timeout_error_printed = true;
+ // Print out the thread infor which didn't reach the safepoint for debugging
+ // purposes (useful when there are lots of threads in the debugger).
+ tty->print_cr("");
+ tty->print_cr("# SafepointSynchronize::begin: Timeout detected:");
+ if (reason == _spinning_timeout) {
+ tty->print_cr("# SafepointSynchronize::begin: Timed out while spinning to reach a safepoint.");
+ } else if (reason == _blocking_timeout) {
+ tty->print_cr("# SafepointSynchronize::begin: Timed out while waiting for threads to stop.");
+ }
+
+ tty->print_cr("# SafepointSynchronize::begin: Threads which did not reach the safepoint:");
+ ThreadSafepointState *cur_state;
+ ResourceMark rm;
+ for(JavaThread *cur_thread = Threads::first(); cur_thread;
+ cur_thread = cur_thread->next()) {
+ cur_state = cur_thread->safepoint_state();
+
+ if (cur_thread->thread_state() != _thread_blocked &&
+ ((reason == _spinning_timeout && cur_state->is_running()) ||
+ (reason == _blocking_timeout && !cur_state->has_called_back()))) {
+ tty->print("# ");
+ cur_thread->print();
+ tty->print_cr("");
+ }
+ }
+ tty->print_cr("# SafepointSynchronize::begin: (End of list)");
+ }
+
+ // To debug the long safepoint, specify both DieOnSafepointTimeout &
+ // ShowMessageBoxOnError.
+ if (DieOnSafepointTimeout) {
+ char msg[1024];
+ VM_Operation *op = VMThread::vm_operation();
+ sprintf(msg, "Safepoint sync time longer than %d ms detected when executing %s.",
+ SafepointTimeoutDelay,
+ op != NULL ? op->name() : "no vm operation");
+ fatal(msg);
+ }
+}
+
+
+// -------------------------------------------------------------------------------------------------------
+// Implementation of ThreadSafepointState
+
+ThreadSafepointState::ThreadSafepointState(JavaThread *thread) {
+ _thread = thread;
+ _type = _running;
+ _has_called_back = false;
+ _at_poll_safepoint = false;
+}
+
+void ThreadSafepointState::create(JavaThread *thread) {
+ ThreadSafepointState *state = new ThreadSafepointState(thread);
+ thread->set_safepoint_state(state);
+}
+
+void ThreadSafepointState::destroy(JavaThread *thread) {
+ if (thread->safepoint_state()) {
+ delete(thread->safepoint_state());
+ thread->set_safepoint_state(NULL);
+ }
+}
+
+void ThreadSafepointState::examine_state_of_thread() {
+ assert(is_running(), "better be running or just have hit safepoint poll");
+
+ JavaThreadState state = _thread->thread_state();
+
+ // Check for a thread that is suspended. Note that thread resume tries
+ // to grab the Threads_lock which we own here, so a thread cannot be
+ // resumed during safepoint synchronization.
+
+ // We check with locking because another thread that has not yet
+ // synchronized may be trying to suspend this one.
+ bool is_suspended = _thread->is_any_suspended_with_lock();
+ if (is_suspended) {
+ roll_forward(_at_safepoint);
+ return;
+ }
+
+ // Some JavaThread states have an initial safepoint state of
+ // running, but are actually at a safepoint. We will happily
+ // agree and update the safepoint state here.
+ if (SafepointSynchronize::safepoint_safe(_thread, state)) {
+ roll_forward(_at_safepoint);
+ return;
+ }
+
+ if (state == _thread_in_vm) {
+ roll_forward(_call_back);
+ return;
+ }
+
+ // All other thread states will continue to run until they
+ // transition and self-block in state _blocked
+ // Safepoint polling in compiled code causes the Java threads to do the same.
+ // Note: new threads may require a malloc so they must be allowed to finish
+
+ assert(is_running(), "examine_state_of_thread on non-running thread");
+ return;
+}
+
+// Returns true is thread could not be rolled forward at present position.
+void ThreadSafepointState::roll_forward(suspend_type type) {
+ _type = type;
+
+ switch(_type) {
+ case _at_safepoint:
+ SafepointSynchronize::signal_thread_at_safepoint();
+ break;
+
+ case _call_back:
+ set_has_called_back(false);
+ break;
+
+ case _running:
+ default:
+ ShouldNotReachHere();
+ }
+}
+
+void ThreadSafepointState::restart() {
+ switch(type()) {
+ case _at_safepoint:
+ case _call_back:
+ break;
+
+ case _running:
+ default:
+ tty->print_cr("restart thread "INTPTR_FORMAT" with state %d",
+ _thread, _type);
+ _thread->print();
+ ShouldNotReachHere();
+ }
+ _type = _running;
+ set_has_called_back(false);
+}
+
+
+void ThreadSafepointState::print_on(outputStream *st) const {
+ const char *s;
+
+ switch(_type) {
+ case _running : s = "_running"; break;
+ case _at_safepoint : s = "_at_safepoint"; break;
+ case _call_back : s = "_call_back"; break;
+ default:
+ ShouldNotReachHere();
+ }
+
+ st->print_cr("Thread: " INTPTR_FORMAT
+ " [0x%2x] State: %s _has_called_back %d _at_poll_safepoint %d",
+ _thread, _thread->osthread()->thread_id(), s, _has_called_back,
+ _at_poll_safepoint);
+
+ _thread->print_thread_state_on(st);
+}
+
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// Block the thread at the safepoint poll or poll return.
+void ThreadSafepointState::handle_polling_page_exception() {
+
+ // Check state. block() will set thread state to thread_in_vm which will
+ // cause the safepoint state _type to become _call_back.
+ assert(type() == ThreadSafepointState::_running,
+ "polling page exception on thread not running state");
+
+ // Step 1: Find the nmethod from the return address
+ if (ShowSafepointMsgs && Verbose) {
+ tty->print_cr("Polling page exception at " INTPTR_FORMAT, thread()->saved_exception_pc());
+ }
+ address real_return_addr = thread()->saved_exception_pc();
+
+ CodeBlob *cb = CodeCache::find_blob(real_return_addr);
+ assert(cb != NULL && cb->is_nmethod(), "return address should be in nmethod");
+ nmethod* nm = (nmethod*)cb;
+
+ // Find frame of caller
+ frame stub_fr = thread()->last_frame();
+ CodeBlob* stub_cb = stub_fr.cb();
+ assert(stub_cb->is_safepoint_stub(), "must be a safepoint stub");
+ RegisterMap map(thread(), true);
+ frame caller_fr = stub_fr.sender(&map);
+
+ // Should only be poll_return or poll
+ assert( nm->is_at_poll_or_poll_return(real_return_addr), "should not be at call" );
+
+ // This is a poll immediately before a return. The exception handling code
+ // has already had the effect of causing the return to occur, so the execution
+ // will continue immediately after the call. In addition, the oopmap at the
+ // return point does not mark the return value as an oop (if it is), so
+ // it needs a handle here to be updated.
+ if( nm->is_at_poll_return(real_return_addr) ) {
+ // See if return type is an oop.
+ bool return_oop = nm->method()->is_returning_oop();
+ Handle return_value;
+ if (return_oop) {
+ // The oop result has been saved on the stack together with all
+ // the other registers. In order to preserve it over GCs we need
+ // to keep it in a handle.
+ oop result = caller_fr.saved_oop_result(&map);
+ assert(result == NULL || result->is_oop(), "must be oop");
+ return_value = Handle(thread(), result);
+ assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
+ }
+
+ // Block the thread
+ SafepointSynchronize::block(thread());
+
+ // restore oop result, if any
+ if (return_oop) {
+ caller_fr.set_saved_oop_result(&map, return_value());
+ }
+ }
+
+ // This is a safepoint poll. Verify the return address and block.
+ else {
+ set_at_poll_safepoint(true);
+
+ // verify the blob built the "return address" correctly
+ assert(real_return_addr == caller_fr.pc(), "must match");
+
+ // Block the thread
+ SafepointSynchronize::block(thread());
+ set_at_poll_safepoint(false);
+
+ // If we have a pending async exception deoptimize the frame
+ // as otherwise we may never deliver it.
+ if (thread()->has_async_condition()) {
+ ThreadInVMfromJavaNoAsyncException __tiv(thread());
+ VM_DeoptimizeFrame deopt(thread(), caller_fr.id());
+ VMThread::execute(&deopt);
+ }
+
+ // If an exception has been installed we must check for a pending deoptimization
+ // Deoptimize frame if exception has been thrown.
+
+ if (thread()->has_pending_exception() ) {
+ RegisterMap map(thread(), true);
+ frame caller_fr = stub_fr.sender(&map);
+ if (caller_fr.is_deoptimized_frame()) {
+ // The exception patch will destroy registers that are still
+ // live and will be needed during deoptimization. Defer the
+ // Async exception should have defered the exception until the
+ // next safepoint which will be detected when we get into
+ // the interpreter so if we have an exception now things
+ // are messed up.
+
+ fatal("Exception installed and deoptimization is pending");
+ }
+ }
+ }
+}
+
+
+//
+// Statistics & Instrumentations
+//
+SafepointSynchronize::SafepointStats* SafepointSynchronize::_safepoint_stats = NULL;
+int SafepointSynchronize::_cur_stat_index = 0;
+julong SafepointSynchronize::_safepoint_reasons[VM_Operation::VMOp_Terminating];
+julong SafepointSynchronize::_coalesced_vmop_count = 0;
+jlong SafepointSynchronize::_max_sync_time = 0;
+
+// last_safepoint_start_time records the start time of last safepoint.
+static jlong last_safepoint_start_time = 0;
+static jlong sync_end_time = 0;
+static bool need_to_track_page_armed_status = false;
+static bool init_done = false;
+
+void SafepointSynchronize::deferred_initialize_stat() {
+ if (init_done) return;
+
+ if (PrintSafepointStatisticsCount <= 0) {
+ fatal("Wrong PrintSafepointStatisticsCount");
+ }
+
+ // If PrintSafepointStatisticsTimeout is specified, the statistics data will
+ // be printed right away, in which case, _safepoint_stats will regress to
+ // a single element array. Otherwise, it is a circular ring buffer with default
+ // size of PrintSafepointStatisticsCount.
+ int stats_array_size;
+ if (PrintSafepointStatisticsTimeout > 0) {
+ stats_array_size = 1;
+ PrintSafepointStatistics = true;
+ } else {
+ stats_array_size = PrintSafepointStatisticsCount;
+ }
+ _safepoint_stats = (SafepointStats*)os::malloc(stats_array_size
+ * sizeof(SafepointStats));
+ guarantee(_safepoint_stats != NULL,
+ "not enough memory for safepoint instrumentation data");
+
+ if (UseCompilerSafepoints && DeferPollingPageLoopCount >= 0) {
+ need_to_track_page_armed_status = true;
+ }
+
+ tty->print(" vmop_name "
+ "[threads: total initially_running wait_to_block] ");
+ tty->print("[time: spin block sync] "
+ "[vmop_time time_elapsed] ");
+
+ // no page armed status printed out if it is always armed.
+ if (need_to_track_page_armed_status) {
+ tty->print("page_armed ");
+ }
+
+ tty->print_cr("page_trap_count");
+
+ init_done = true;
+}
+
+void SafepointSynchronize::begin_statistics(int nof_threads, int nof_running) {
+ deferred_initialize_stat();
+
+ SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
+
+ VM_Operation *op = VMThread::vm_operation();
+ spstat->_vmop_type = (op != NULL ? op->type() : -1);
+ if (op != NULL) {
+ _safepoint_reasons[spstat->_vmop_type]++;
+ }
+
+ spstat->_nof_total_threads = nof_threads;
+ spstat->_nof_initial_running_threads = nof_running;
+ spstat->_nof_threads_hit_page_trap = 0;
+
+ // Records the start time of spinning. The real time spent on spinning
+ // will be adjusted when spin is done. Same trick is applied for time
+ // spent on waiting for threads to block.
+ if (nof_running != 0) {
+ spstat->_time_to_spin = os::javaTimeNanos();
+ } else {
+ spstat->_time_to_spin = 0;
+ }
+
+ if (last_safepoint_start_time == 0) {
+ spstat->_time_elapsed_since_last_safepoint = 0;
+ } else {
+ spstat->_time_elapsed_since_last_safepoint = _last_safepoint -
+ last_safepoint_start_time;
+ }
+ last_safepoint_start_time = _last_safepoint;
+}
+
+void SafepointSynchronize::update_statistics_on_spin_end() {
+ SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
+
+ jlong cur_time = os::javaTimeNanos();
+
+ spstat->_nof_threads_wait_to_block = _waiting_to_block;
+ if (spstat->_nof_initial_running_threads != 0) {
+ spstat->_time_to_spin = cur_time - spstat->_time_to_spin;
+ }
+
+ if (need_to_track_page_armed_status) {
+ spstat->_page_armed = (PageArmed == 1);
+ }
+
+ // Records the start time of waiting for to block. Updated when block is done.
+ if (_waiting_to_block != 0) {
+ spstat->_time_to_wait_to_block = cur_time;
+ } else {
+ spstat->_time_to_wait_to_block = 0;
+ }
+}
+
+void SafepointSynchronize::update_statistics_on_sync_end(jlong end_time) {
+ SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
+
+ if (spstat->_nof_threads_wait_to_block != 0) {
+ spstat->_time_to_wait_to_block = end_time -
+ spstat->_time_to_wait_to_block;
+ }
+
+ // Records the end time of sync which will be used to calculate the total
+ // vm operation time. Again, the real time spending in syncing will be deducted
+ // from the start of the sync time later when end_statistics is called.
+ spstat->_time_to_sync = end_time - _last_safepoint;
+ if (spstat->_time_to_sync > _max_sync_time) {
+ _max_sync_time = spstat->_time_to_sync;
+ }
+ sync_end_time = end_time;
+}
+
+void SafepointSynchronize::end_statistics(jlong vmop_end_time) {
+ SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
+
+ // Update the vm operation time.
+ spstat->_time_to_exec_vmop = vmop_end_time - sync_end_time;
+ // Only the sync time longer than the specified
+ // PrintSafepointStatisticsTimeout will be printed out right away.
+ // By default, it is -1 meaning all samples will be put into the list.
+ if ( PrintSafepointStatisticsTimeout > 0) {
+ if (spstat->_time_to_sync > PrintSafepointStatisticsTimeout * MICROUNITS) {
+ print_statistics();
+ }
+ } else {
+ // The safepoint statistics will be printed out when the _safepoin_stats
+ // array fills up.
+ if (_cur_stat_index != PrintSafepointStatisticsCount - 1) {
+ _cur_stat_index ++;
+ } else {
+ print_statistics();
+ _cur_stat_index = 0;
+ tty->print_cr("");
+ }
+ }
+}
+
+void SafepointSynchronize::print_statistics() {
+ int index;
+ SafepointStats* sstats = _safepoint_stats;
+
+ for (index = 0; index <= _cur_stat_index; index++) {
+ sstats = &_safepoint_stats[index];
+ tty->print("%-28s ["
+ INT32_FORMAT_W(8)INT32_FORMAT_W(11)INT32_FORMAT_W(15)
+ "] ",
+ sstats->_vmop_type == -1 ? "no vm operation" :
+ VM_Operation::name(sstats->_vmop_type),
+ sstats->_nof_total_threads,
+ sstats->_nof_initial_running_threads,
+ sstats->_nof_threads_wait_to_block);
+ // "/ MICROUNITS " is to convert the unit from nanos to millis.
+ tty->print(" ["
+ INT64_FORMAT_W(6)INT64_FORMAT_W(6)INT64_FORMAT_W(6)
+ "] "
+ "["INT64_FORMAT_W(6)INT64_FORMAT_W(9) "] ",
+ sstats->_time_to_spin / MICROUNITS,
+ sstats->_time_to_wait_to_block / MICROUNITS,
+ sstats->_time_to_sync / MICROUNITS,
+ sstats->_time_to_exec_vmop / MICROUNITS,
+ sstats->_time_elapsed_since_last_safepoint / MICROUNITS);
+
+ if (need_to_track_page_armed_status) {
+ tty->print(INT32_FORMAT" ", sstats->_page_armed);
+ }
+ tty->print_cr(INT32_FORMAT" ", sstats->_nof_threads_hit_page_trap);
+ }
+}
+
+// This method will be called when VM exits. It will first call
+// print_statistics to print out the rest of the sampling. Then
+// it tries to summarize the sampling.
+void SafepointSynchronize::print_stat_on_exit() {
+ if (_safepoint_stats == NULL) return;
+
+ SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
+
+ // During VM exit, end_statistics may not get called and in that
+ // case, if the sync time is less than PrintSafepointStatisticsTimeout,
+ // don't print it out.
+ // Approximate the vm op time.
+ _safepoint_stats[_cur_stat_index]._time_to_exec_vmop =
+ os::javaTimeNanos() - sync_end_time;
+
+ if ( PrintSafepointStatisticsTimeout < 0 ||
+ spstat->_time_to_sync > PrintSafepointStatisticsTimeout * MICROUNITS) {
+ print_statistics();
+ }
+ tty->print_cr("");
+
+ // Print out polling page sampling status.
+ if (!need_to_track_page_armed_status) {
+ if (UseCompilerSafepoints) {
+ tty->print_cr("Polling page always armed");
+ }
+ } else {
+ tty->print_cr("Defer polling page loop count = %d\n",
+ DeferPollingPageLoopCount);
+ }
+
+ for (int index = 0; index < VM_Operation::VMOp_Terminating; index++) {
+ if (_safepoint_reasons[index] != 0) {
+ tty->print_cr("%-26s"UINT64_FORMAT_W(10), VM_Operation::name(index),
+ _safepoint_reasons[index]);
+ }
+ }
+
+ tty->print_cr(UINT64_FORMAT_W(5)" VM operations coalesced during safepoint",
+ _coalesced_vmop_count);
+ tty->print_cr("Maximum sync time "INT64_FORMAT_W(5)" ms",
+ _max_sync_time / MICROUNITS);
+}
+
+// ------------------------------------------------------------------------------------------------
+// Non-product code
+
+#ifndef PRODUCT
+
+void SafepointSynchronize::print_state() {
+ if (_state == _not_synchronized) {
+ tty->print_cr("not synchronized");
+ } else if (_state == _synchronizing || _state == _synchronized) {
+ tty->print_cr("State: %s", (_state == _synchronizing) ? "synchronizing" :
+ "synchronized");
+
+ for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
+ cur->safepoint_state()->print();
+ }
+ }
+}
+
+void SafepointSynchronize::safepoint_msg(const char* format, ...) {
+ if (ShowSafepointMsgs) {
+ va_list ap;
+ va_start(ap, format);
+ tty->vprint_cr(format, ap);
+ va_end(ap);
+ }
+}
+
+#endif // !PRODUCT
diff --git a/src/share/vm/runtime/safepoint.hpp b/src/share/vm/runtime/safepoint.hpp
new file mode 100644
index 000000000..86b16e7e0
--- /dev/null
+++ b/src/share/vm/runtime/safepoint.hpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+//
+// Safepoint synchronization
+////
+// The VMThread or CMS_thread uses the SafepointSynchronize::begin/end
+// methods to enter/exit a safepoint region. The begin method will roll
+// all JavaThreads forward to a safepoint.
+//
+// JavaThreads must use the ThreadSafepointState abstraction (defined in
+// thread.hpp) to indicate that that they are at a safepoint.
+//
+// The Mutex/Condition variable and ObjectLocker classes calls the enter/
+// exit safepoint methods, when a thread is blocked/restarted. Hence, all mutex exter/
+// exit points *must* be at a safepoint.
+
+
+class ThreadSafepointState;
+class SnippetCache;
+class nmethod;
+
+//
+// Implements roll-forward to safepoint (safepoint synchronization)
+//
+class SafepointSynchronize : AllStatic {
+ public:
+ enum SynchronizeState {
+ _not_synchronized = 0, // Threads not synchronized at a safepoint
+ // Keep this value 0. See the coment in do_call_back()
+ _synchronizing = 1, // Synchronizing in progress
+ _synchronized = 2 // All Java threads are stopped at a safepoint. Only VM thread is running
+ };
+
+ enum SafepointingThread {
+ _null_thread = 0,
+ _vm_thread = 1,
+ _other_thread = 2
+ };
+
+ enum SafepointTimeoutReason {
+ _spinning_timeout = 0,
+ _blocking_timeout = 1
+ };
+
+ typedef struct {
+ int _vmop_type; // type of VM operation triggers the safepoint
+ int _nof_total_threads; // total number of Java threads
+ int _nof_initial_running_threads; // total number of initially seen running threads
+ int _nof_threads_wait_to_block; // total number of threads waiting for to block
+ bool _page_armed; // true if polling page is armed, false otherwise
+ int _nof_threads_hit_page_trap; // total number of threads hitting the page trap
+ jlong _time_to_spin; // total time in millis spent in spinning
+ jlong _time_to_wait_to_block; // total time in millis spent in waiting for to block
+ jlong _time_to_sync; // total time in millis spent in getting to _synchronized
+ jlong _time_to_exec_vmop; // total time in millis spent in vm operation itself
+ jlong _time_elapsed_since_last_safepoint; // time elasped since last safepoint
+ } SafepointStats;
+
+ private:
+ static volatile SynchronizeState _state; // Threads might read this flag directly, without acquireing the Threads_lock
+ static volatile int _waiting_to_block; // No. of threads we are waiting for to block.
+
+ // This counter is used for fast versions of jni_Get<Primitive>Field.
+ // An even value means there is no ongoing safepoint operations.
+ // The counter is incremented ONLY at the beginning and end of each
+ // safepoint. The fact that Threads_lock is held throughout each pair of
+ // increments (at the beginning and end of each safepoint) guarantees
+ // race freedom.
+public:
+ static volatile int _safepoint_counter;
+private:
+
+ static jlong _last_safepoint; // Time of last safepoint
+
+ // statistics
+ static SafepointStats* _safepoint_stats; // array of SafepointStats struct
+ static int _cur_stat_index; // current index to the above array
+ static julong _safepoint_reasons[]; // safepoint count for each VM op
+ static julong _coalesced_vmop_count;// coalesced vmop count
+ static jlong _max_sync_time; // maximum sync time in nanos
+
+ static void begin_statistics(int nof_threads, int nof_running);
+ static void update_statistics_on_spin_end();
+ static void update_statistics_on_sync_end(jlong end_time);
+ static void end_statistics(jlong end_time);
+ static void print_statistics();
+ inline static void inc_page_trap_count() {
+ Atomic::inc(&_safepoint_stats[_cur_stat_index]._nof_threads_hit_page_trap);
+ }
+
+ // For debug long safepoint
+ static void print_safepoint_timeout(SafepointTimeoutReason timeout_reason);
+
+public:
+
+ // Main entry points
+
+ // Roll all threads forward to safepoint. Must be called by the
+ // VMThread or CMS_thread.
+ static void begin();
+ static void end(); // Start all suspended threads again...
+
+ static bool safepoint_safe(JavaThread *thread, JavaThreadState state);
+
+ // Query
+ inline static bool is_at_safepoint() { return _state == _synchronized; }
+ inline static bool is_synchronizing() { return _state == _synchronizing; }
+
+ inline static bool do_call_back() {
+ return (_state != _not_synchronized);
+ }
+
+ // Called when a thread volantary blocks
+ static void block(JavaThread *thread);
+ static void signal_thread_at_safepoint() { _waiting_to_block--; }
+
+ // Exception handling for page polling
+ static void handle_polling_page_exception(JavaThread *thread);
+
+ // VM Thread interface for determining safepoint rate
+ static long last_non_safepoint_interval() { return os::javaTimeMillis() - _last_safepoint; }
+ static bool is_cleanup_needed();
+ static void do_cleanup_tasks();
+
+ // debugging
+ static void print_state() PRODUCT_RETURN;
+ static void safepoint_msg(const char* format, ...) PRODUCT_RETURN;
+
+ static void deferred_initialize_stat();
+ static void print_stat_on_exit();
+ inline static void inc_vmop_coalesced_count() { _coalesced_vmop_count++; }
+
+ static void set_is_at_safepoint() { _state = _synchronized; }
+ static void set_is_not_at_safepoint() { _state = _not_synchronized; }
+
+ // assembly support
+ static address address_of_state() { return (address)&_state; }
+
+ static address safepoint_counter_addr() { return (address)&_safepoint_counter; }
+};
+
+// State class for a thread suspended at a safepoint
+class ThreadSafepointState: public CHeapObj {
+ public:
+ // These states are maintained by VM thread while threads are being brought
+ // to a safepoint. After SafepointSynchronize::end(), they are reset to
+ // _running.
+ enum suspend_type {
+ _running = 0, // Thread state not yet determined (i.e., not at a safepoint yet)
+ _at_safepoint = 1, // Thread at a safepoint (f.ex., when blocked on a lock)
+ _call_back = 2 // Keep executing and wait for callback (if thread is in interpreted or vm)
+ };
+ private:
+ volatile bool _at_poll_safepoint; // At polling page safepoint (NOT a poll return safepoint)
+ // Thread has called back the safepoint code (for debugging)
+ bool _has_called_back;
+
+ JavaThread * _thread;
+ volatile suspend_type _type;
+
+
+ public:
+ ThreadSafepointState(JavaThread *thread);
+
+ // examine/roll-forward/restart
+ void examine_state_of_thread();
+ void roll_forward(suspend_type type);
+ void restart();
+
+ // Query
+ JavaThread* thread() const { return _thread; }
+ suspend_type type() const { return _type; }
+ bool is_running() const { return (_type==_running); }
+
+ // Support for safepoint timeout (debugging)
+ bool has_called_back() const { return _has_called_back; }
+ void set_has_called_back(bool val) { _has_called_back = val; }
+ bool is_at_poll_safepoint() { return _at_poll_safepoint; }
+ void set_at_poll_safepoint(bool val) { _at_poll_safepoint = val; }
+
+ void handle_polling_page_exception();
+
+ // debugging
+ void print_on(outputStream* st) const;
+ void print() const { print_on(tty); }
+
+ // Initialize
+ static void create(JavaThread *thread);
+ static void destroy(JavaThread *thread);
+
+ void safepoint_msg(const char* format, ...) {
+ if (ShowSafepointMsgs) {
+ va_list ap;
+ va_start(ap, format);
+ tty->vprint_cr(format, ap);
+ va_end(ap);
+ }
+ }
+};
+
+//
+// CounterDecay
+//
+// Interates through invocation counters and decrements them. This
+// is done at each safepoint.
+//
+class CounterDecay : public AllStatic {
+ static jlong _last_timestamp;
+ public:
+ static void decay();
+ static bool is_decay_needed() { return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength; }
+};
diff --git a/src/share/vm/runtime/sharedRuntime.cpp b/src/share/vm/runtime/sharedRuntime.cpp
new file mode 100644
index 000000000..e9a568588
--- /dev/null
+++ b/src/share/vm/runtime/sharedRuntime.cpp
@@ -0,0 +1,2181 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharedRuntime.cpp.incl"
+#include <math.h>
+
+HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t);
+HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int,
+ char*, int, char*, int, char*, int);
+HS_DTRACE_PROBE_DECL7(hotspot, method__return, int,
+ char*, int, char*, int, char*, int);
+
+// Implementation of SharedRuntime
+
+#ifndef PRODUCT
+// For statistics
+int SharedRuntime::_ic_miss_ctr = 0;
+int SharedRuntime::_wrong_method_ctr = 0;
+int SharedRuntime::_resolve_static_ctr = 0;
+int SharedRuntime::_resolve_virtual_ctr = 0;
+int SharedRuntime::_resolve_opt_virtual_ctr = 0;
+int SharedRuntime::_implicit_null_throws = 0;
+int SharedRuntime::_implicit_div0_throws = 0;
+int SharedRuntime::_throw_null_ctr = 0;
+
+int SharedRuntime::_nof_normal_calls = 0;
+int SharedRuntime::_nof_optimized_calls = 0;
+int SharedRuntime::_nof_inlined_calls = 0;
+int SharedRuntime::_nof_megamorphic_calls = 0;
+int SharedRuntime::_nof_static_calls = 0;
+int SharedRuntime::_nof_inlined_static_calls = 0;
+int SharedRuntime::_nof_interface_calls = 0;
+int SharedRuntime::_nof_optimized_interface_calls = 0;
+int SharedRuntime::_nof_inlined_interface_calls = 0;
+int SharedRuntime::_nof_megamorphic_interface_calls = 0;
+int SharedRuntime::_nof_removable_exceptions = 0;
+
+int SharedRuntime::_new_instance_ctr=0;
+int SharedRuntime::_new_array_ctr=0;
+int SharedRuntime::_multi1_ctr=0;
+int SharedRuntime::_multi2_ctr=0;
+int SharedRuntime::_multi3_ctr=0;
+int SharedRuntime::_multi4_ctr=0;
+int SharedRuntime::_multi5_ctr=0;
+int SharedRuntime::_mon_enter_stub_ctr=0;
+int SharedRuntime::_mon_exit_stub_ctr=0;
+int SharedRuntime::_mon_enter_ctr=0;
+int SharedRuntime::_mon_exit_ctr=0;
+int SharedRuntime::_partial_subtype_ctr=0;
+int SharedRuntime::_jbyte_array_copy_ctr=0;
+int SharedRuntime::_jshort_array_copy_ctr=0;
+int SharedRuntime::_jint_array_copy_ctr=0;
+int SharedRuntime::_jlong_array_copy_ctr=0;
+int SharedRuntime::_oop_array_copy_ctr=0;
+int SharedRuntime::_checkcast_array_copy_ctr=0;
+int SharedRuntime::_unsafe_array_copy_ctr=0;
+int SharedRuntime::_generic_array_copy_ctr=0;
+int SharedRuntime::_slow_array_copy_ctr=0;
+int SharedRuntime::_find_handler_ctr=0;
+int SharedRuntime::_rethrow_ctr=0;
+
+int SharedRuntime::_ICmiss_index = 0;
+int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
+address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
+
+void SharedRuntime::trace_ic_miss(address at) {
+ for (int i = 0; i < _ICmiss_index; i++) {
+ if (_ICmiss_at[i] == at) {
+ _ICmiss_count[i]++;
+ return;
+ }
+ }
+ int index = _ICmiss_index++;
+ if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
+ _ICmiss_at[index] = at;
+ _ICmiss_count[index] = 1;
+}
+
+void SharedRuntime::print_ic_miss_histogram() {
+ if (ICMissHistogram) {
+ tty->print_cr ("IC Miss Histogram:");
+ int tot_misses = 0;
+ for (int i = 0; i < _ICmiss_index; i++) {
+ tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", _ICmiss_at[i], _ICmiss_count[i]);
+ tot_misses += _ICmiss_count[i];
+ }
+ tty->print_cr ("Total IC misses: %7d", tot_misses);
+ }
+}
+#endif // PRODUCT
+
+
+JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
+ return x * y;
+JRT_END
+
+
+JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
+ if (x == min_jlong && y == CONST64(-1)) {
+ return x;
+ } else {
+ return x / y;
+ }
+JRT_END
+
+
+JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
+ if (x == min_jlong && y == CONST64(-1)) {
+ return 0;
+ } else {
+ return x % y;
+ }
+JRT_END
+
+
+const juint float_sign_mask = 0x7FFFFFFF;
+const juint float_infinity = 0x7F800000;
+const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
+const julong double_infinity = CONST64(0x7FF0000000000000);
+
+JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y))
+#ifdef _WIN64
+ // 64-bit Windows on amd64 returns the wrong values for
+ // infinity operands.
+ union { jfloat f; juint i; } xbits, ybits;
+ xbits.f = x;
+ ybits.f = y;
+ // x Mod Infinity == x unless x is infinity
+ if ( ((xbits.i & float_sign_mask) != float_infinity) &&
+ ((ybits.i & float_sign_mask) == float_infinity) ) {
+ return x;
+ }
+#endif
+ return ((jfloat)fmod((double)x,(double)y));
+JRT_END
+
+
+JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
+#ifdef _WIN64
+ union { jdouble d; julong l; } xbits, ybits;
+ xbits.d = x;
+ ybits.d = y;
+ // x Mod Infinity == x unless x is infinity
+ if ( ((xbits.l & double_sign_mask) != double_infinity) &&
+ ((ybits.l & double_sign_mask) == double_infinity) ) {
+ return x;
+ }
+#endif
+ return ((jdouble)fmod((double)x,(double)y));
+JRT_END
+
+
+JRT_LEAF(jint, SharedRuntime::f2i(jfloat x))
+ if (g_isnan(x)) {return 0;}
+ jlong lltmp = (jlong)x;
+ jint ltmp = (jint)lltmp;
+ if (ltmp == lltmp) {
+ return ltmp;
+ } else {
+ if (x < 0) {
+ return min_jint;
+ } else {
+ return max_jint;
+ }
+ }
+JRT_END
+
+
+JRT_LEAF(jlong, SharedRuntime::f2l(jfloat x))
+ if (g_isnan(x)) {return 0;}
+ jlong lltmp = (jlong)x;
+ if (lltmp != min_jlong) {
+ return lltmp;
+ } else {
+ if (x < 0) {
+ return min_jlong;
+ } else {
+ return max_jlong;
+ }
+ }
+JRT_END
+
+
+JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
+ if (g_isnan(x)) {return 0;}
+ jlong lltmp = (jlong)x;
+ jint ltmp = (jint)lltmp;
+ if (ltmp == lltmp) {
+ return ltmp;
+ } else {
+ if (x < 0) {
+ return min_jint;
+ } else {
+ return max_jint;
+ }
+ }
+JRT_END
+
+
+JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
+ if (g_isnan(x)) {return 0;}
+ jlong lltmp = (jlong)x;
+ if (lltmp != min_jlong) {
+ return lltmp;
+ } else {
+ if (x < 0) {
+ return min_jlong;
+ } else {
+ return max_jlong;
+ }
+ }
+JRT_END
+
+
+JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
+ return (jfloat)x;
+JRT_END
+
+
+JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
+ return (jfloat)x;
+JRT_END
+
+
+JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
+ return (jdouble)x;
+JRT_END
+
+// Exception handling accross interpreter/compiler boundaries
+//
+// exception_handler_for_return_address(...) returns the continuation address.
+// The continuation address is the entry point of the exception handler of the
+// previous frame depending on the return address.
+
+address SharedRuntime::raw_exception_handler_for_return_address(address return_address) {
+ assert(frame::verify_return_pc(return_address), "must be a return pc");
+
+ // the fastest case first
+ CodeBlob* blob = CodeCache::find_blob(return_address);
+ if (blob != NULL && blob->is_nmethod()) {
+ nmethod* code = (nmethod*)blob;
+ assert(code != NULL, "nmethod must be present");
+ // native nmethods don't have exception handlers
+ assert(!code->is_native_method(), "no exception handler");
+ assert(code->header_begin() != code->exception_begin(), "no exception handler");
+ if (code->is_deopt_pc(return_address)) {
+ return SharedRuntime::deopt_blob()->unpack_with_exception();
+ } else {
+ return code->exception_begin();
+ }
+ }
+
+ // Entry code
+ if (StubRoutines::returns_to_call_stub(return_address)) {
+ return StubRoutines::catch_exception_entry();
+ }
+ // Interpreted code
+ if (Interpreter::contains(return_address)) {
+ return Interpreter::rethrow_exception_entry();
+ }
+
+ // Compiled code
+ if (CodeCache::contains(return_address)) {
+ CodeBlob* blob = CodeCache::find_blob(return_address);
+ if (blob->is_nmethod()) {
+ nmethod* code = (nmethod*)blob;
+ assert(code != NULL, "nmethod must be present");
+ assert(code->header_begin() != code->exception_begin(), "no exception handler");
+ return code->exception_begin();
+ }
+ if (blob->is_runtime_stub()) {
+ ShouldNotReachHere(); // callers are responsible for skipping runtime stub frames
+ }
+ }
+ guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
+#ifndef PRODUCT
+ { ResourceMark rm;
+ tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address);
+ tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
+ tty->print_cr("b) other problem");
+ }
+#endif // PRODUCT
+ ShouldNotReachHere();
+ return NULL;
+}
+
+
+JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(address return_address))
+ return raw_exception_handler_for_return_address(return_address);
+JRT_END
+
+address SharedRuntime::get_poll_stub(address pc) {
+ address stub;
+ // Look up the code blob
+ CodeBlob *cb = CodeCache::find_blob(pc);
+
+ // Should be an nmethod
+ assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" );
+
+ // Look up the relocation information
+ assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc),
+ "safepoint polling: type must be poll" );
+
+ assert( ((NativeInstruction*)pc)->is_safepoint_poll(),
+ "Only polling locations are used for safepoint");
+
+ bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc);
+ if (at_poll_return) {
+ assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
+ "polling page return stub not created yet");
+ stub = SharedRuntime::polling_page_return_handler_blob()->instructions_begin();
+ } else {
+ assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL,
+ "polling page safepoint stub not created yet");
+ stub = SharedRuntime::polling_page_safepoint_handler_blob()->instructions_begin();
+ }
+#ifndef PRODUCT
+ if( TraceSafepoint ) {
+ char buf[256];
+ jio_snprintf(buf, sizeof(buf),
+ "... found polling page %s exception at pc = "
+ INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
+ at_poll_return ? "return" : "loop",
+ (intptr_t)pc, (intptr_t)stub);
+ tty->print_raw_cr(buf);
+ }
+#endif // PRODUCT
+ return stub;
+}
+
+
+oop SharedRuntime::retrieve_receiver( symbolHandle sig, frame caller ) {
+ assert(caller.is_interpreted_frame(), "");
+ int args_size = ArgumentSizeComputer(sig).size() + 1;
+ assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack");
+ oop result = (oop) *caller.interpreter_frame_tos_at(args_size - 1);
+ assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop");
+ return result;
+}
+
+
+void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) {
+ if (JvmtiExport::can_post_exceptions()) {
+ vframeStream vfst(thread, true);
+ methodHandle method = methodHandle(thread, vfst.method());
+ address bcp = method()->bcp_from(vfst.bci());
+ JvmtiExport::post_exception_throw(thread, method(), bcp, h_exception());
+ }
+ Exceptions::_throw(thread, __FILE__, __LINE__, h_exception);
+}
+
+void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, symbolOop name, const char *message) {
+ Handle h_exception = Exceptions::new_exception(thread, name, message);
+ throw_and_post_jvmti_exception(thread, h_exception);
+}
+
+// ret_pc points into caller; we are returning caller's exception handler
+// for given exception
+address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
+ bool force_unwind, bool top_frame_only) {
+ assert(nm != NULL, "must exist");
+ ResourceMark rm;
+
+ ScopeDesc* sd = nm->scope_desc_at(ret_pc);
+ // determine handler bci, if any
+ EXCEPTION_MARK;
+
+ int handler_bci = -1;
+ int scope_depth = 0;
+ if (!force_unwind) {
+ int bci = sd->bci();
+ do {
+ bool skip_scope_increment = false;
+ // exception handler lookup
+ KlassHandle ek (THREAD, exception->klass());
+ handler_bci = sd->method()->fast_exception_handler_bci_for(ek, bci, THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ // We threw an exception while trying to find the exception handler.
+ // Transfer the new exception to the exception handle which will
+ // be set into thread local storage, and do another lookup for an
+ // exception handler for this exception, this time starting at the
+ // BCI of the exception handler which caused the exception to be
+ // thrown (bugs 4307310 and 4546590). Set "exception" reference
+ // argument to ensure that the correct exception is thrown (4870175).
+ exception = Handle(THREAD, PENDING_EXCEPTION);
+ CLEAR_PENDING_EXCEPTION;
+ if (handler_bci >= 0) {
+ bci = handler_bci;
+ handler_bci = -1;
+ skip_scope_increment = true;
+ }
+ }
+ if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
+ sd = sd->sender();
+ if (sd != NULL) {
+ bci = sd->bci();
+ }
+ ++scope_depth;
+ }
+ } while (!top_frame_only && handler_bci < 0 && sd != NULL);
+ }
+
+ // found handling method => lookup exception handler
+ int catch_pco = ret_pc - nm->instructions_begin();
+
+ ExceptionHandlerTable table(nm);
+ HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
+ if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
+ // Allow abbreviated catch tables. The idea is to allow a method
+ // to materialize its exceptions without committing to the exact
+ // routing of exceptions. In particular this is needed for adding
+ // a synthethic handler to unlock monitors when inlining
+ // synchonized methods since the unlock path isn't represented in
+ // the bytecodes.
+ t = table.entry_for(catch_pco, -1, 0);
+ }
+
+#ifdef COMPILER1
+ if (nm->is_compiled_by_c1() && t == NULL && handler_bci == -1) {
+ // Exception is not handled by this frame so unwind. Note that
+ // this is not the same as how C2 does this. C2 emits a table
+ // entry that dispatches to the unwind code in the nmethod.
+ return NULL;
+ }
+#endif /* COMPILER1 */
+
+
+ if (t == NULL) {
+ tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci);
+ tty->print_cr(" Exception:");
+ exception->print();
+ tty->cr();
+ tty->print_cr(" Compiled exception table :");
+ table.print();
+ nm->print_code();
+ guarantee(false, "missing exception handler");
+ return NULL;
+ }
+
+ return nm->instructions_begin() + t->pco();
+}
+
+JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
+ // These errors occur only at call sites
+ throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
+JRT_END
+
+JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread))
+ throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
+JRT_END
+
+JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* thread))
+ throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
+JRT_END
+
+JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* thread))
+ // This entry point is effectively only used for NullPointerExceptions which occur at inline
+ // cache sites (when the callee activation is not yet set up) so we are at a call site
+ throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
+JRT_END
+
+JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread))
+ // We avoid using the normal exception construction in this case because
+ // it performs an upcall to Java, and we're already out of stack space.
+ klassOop k = SystemDictionary::StackOverflowError_klass();
+ oop exception_oop = instanceKlass::cast(k)->allocate_instance(CHECK);
+ Handle exception (thread, exception_oop);
+ if (StackTraceInThrowable) {
+ java_lang_Throwable::fill_in_stack_trace(exception);
+ }
+ throw_and_post_jvmti_exception(thread, exception);
+JRT_END
+
+address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
+ address pc,
+ SharedRuntime::ImplicitExceptionKind exception_kind)
+{
+ address target_pc = NULL;
+
+ if (Interpreter::contains(pc)) {
+#ifdef CC_INTERP
+ // C++ interpreter doesn't throw implicit exceptions
+ ShouldNotReachHere();
+#else
+ switch (exception_kind) {
+ case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry();
+ case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
+ case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry();
+ default: ShouldNotReachHere();
+ }
+#endif // !CC_INTERP
+ } else {
+ switch (exception_kind) {
+ case STACK_OVERFLOW: {
+ // Stack overflow only occurs upon frame setup; the callee is
+ // going to be unwound. Dispatch to a shared runtime stub
+ // which will cause the StackOverflowError to be fabricated
+ // and processed.
+ // For stack overflow in deoptimization blob, cleanup thread.
+ if (thread->deopt_mark() != NULL) {
+ Deoptimization::cleanup_deopt_info(thread, NULL);
+ }
+ return StubRoutines::throw_StackOverflowError_entry();
+ }
+
+ case IMPLICIT_NULL: {
+ if (VtableStubs::contains(pc)) {
+ // We haven't yet entered the callee frame. Fabricate an
+ // exception and begin dispatching it in the caller. Since
+ // the caller was at a call site, it's safe to destroy all
+ // caller-saved registers, as these entry points do.
+ VtableStub* vt_stub = VtableStubs::stub_containing(pc);
+ guarantee(vt_stub != NULL, "unable to find SEGVing vtable stub");
+ if (vt_stub->is_abstract_method_error(pc)) {
+ assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
+ return StubRoutines::throw_AbstractMethodError_entry();
+ } else {
+ return StubRoutines::throw_NullPointerException_at_call_entry();
+ }
+ } else {
+ CodeBlob* cb = CodeCache::find_blob(pc);
+ guarantee(cb != NULL, "exception happened outside interpreter, nmethods and vtable stubs (1)");
+
+ // Exception happened in CodeCache. Must be either:
+ // 1. Inline-cache check in C2I handler blob,
+ // 2. Inline-cache check in nmethod, or
+ // 3. Implict null exception in nmethod
+
+ if (!cb->is_nmethod()) {
+ guarantee(cb->is_adapter_blob(),
+ "exception happened outside interpreter, nmethods and vtable stubs (2)");
+ // There is no handler here, so we will simply unwind.
+ return StubRoutines::throw_NullPointerException_at_call_entry();
+ }
+
+ // Otherwise, it's an nmethod. Consult its exception handlers.
+ nmethod* nm = (nmethod*)cb;
+ if (nm->inlinecache_check_contains(pc)) {
+ // exception happened inside inline-cache check code
+ // => the nmethod is not yet active (i.e., the frame
+ // is not set up yet) => use return address pushed by
+ // caller => don't push another return address
+ return StubRoutines::throw_NullPointerException_at_call_entry();
+ }
+
+#ifndef PRODUCT
+ _implicit_null_throws++;
+#endif
+ target_pc = nm->continuation_for_implicit_exception(pc);
+ guarantee(target_pc != 0, "must have a continuation point");
+ }
+
+ break; // fall through
+ }
+
+
+ case IMPLICIT_DIVIDE_BY_ZERO: {
+ nmethod* nm = CodeCache::find_nmethod(pc);
+ guarantee(nm != NULL, "must have containing nmethod for implicit division-by-zero exceptions");
+#ifndef PRODUCT
+ _implicit_div0_throws++;
+#endif
+ target_pc = nm->continuation_for_implicit_exception(pc);
+ guarantee(target_pc != 0, "must have a continuation point");
+ break; // fall through
+ }
+
+ default: ShouldNotReachHere();
+ }
+
+ guarantee(target_pc != NULL, "must have computed destination PC for implicit exception");
+ assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
+
+ // for AbortVMOnException flag
+ NOT_PRODUCT(Exceptions::debug_check_abort("java.lang.NullPointerException"));
+ if (exception_kind == IMPLICIT_NULL) {
+ Events::log("Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
+ } else {
+ Events::log("Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
+ }
+ return target_pc;
+ }
+
+ ShouldNotReachHere();
+ return NULL;
+}
+
+
+JNI_ENTRY(void, throw_unsatisfied_link_error(JNIEnv* env, ...))
+{
+ THROW(vmSymbols::java_lang_UnsatisfiedLinkError());
+}
+JNI_END
+
+
+address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
+ return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
+}
+
+
+#ifndef PRODUCT
+JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2))
+ const frame f = thread->last_frame();
+ assert(f.is_interpreted_frame(), "must be an interpreted frame");
+#ifndef PRODUCT
+ methodHandle mh(THREAD, f.interpreter_frame_method());
+ BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2);
+#endif // !PRODUCT
+ return preserve_this_value;
+JRT_END
+#endif // !PRODUCT
+
+
+JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts))
+ os::yield_all(attempts);
+JRT_END
+
+
+// ---------------------------------------------------------------------------------------------------------
+// Non-product code
+#ifndef PRODUCT
+
+void SharedRuntime::verify_caller_frame(frame caller_frame, methodHandle callee_method) {
+ ResourceMark rm;
+ assert (caller_frame.is_interpreted_frame(), "sanity check");
+ assert (callee_method->has_compiled_code(), "callee must be compiled");
+ methodHandle caller_method (Thread::current(), caller_frame.interpreter_frame_method());
+ jint bci = caller_frame.interpreter_frame_bci();
+ methodHandle method = find_callee_method_inside_interpreter(caller_frame, caller_method, bci);
+ assert (callee_method == method, "incorrect method");
+}
+
+methodHandle SharedRuntime::find_callee_method_inside_interpreter(frame caller_frame, methodHandle caller_method, int bci) {
+ EXCEPTION_MARK;
+ Bytecode_invoke* bytecode = Bytecode_invoke_at(caller_method, bci);
+ methodHandle staticCallee = bytecode->static_target(CATCH); // Non-product code
+
+ bytecode = Bytecode_invoke_at(caller_method, bci);
+ int bytecode_index = bytecode->index();
+ Bytecodes::Code bc = bytecode->adjusted_invoke_code();
+
+ Handle receiver;
+ if (bc == Bytecodes::_invokeinterface ||
+ bc == Bytecodes::_invokevirtual ||
+ bc == Bytecodes::_invokespecial) {
+ symbolHandle signature (THREAD, staticCallee->signature());
+ receiver = Handle(THREAD, retrieve_receiver(signature, caller_frame));
+ } else {
+ receiver = Handle();
+ }
+ CallInfo result;
+ constantPoolHandle constants (THREAD, caller_method->constants());
+ LinkResolver::resolve_invoke(result, receiver, constants, bytecode_index, bc, CATCH); // Non-product code
+ methodHandle calleeMethod = result.selected_method();
+ return calleeMethod;
+}
+
+#endif // PRODUCT
+
+
+JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
+ assert(obj->is_oop(), "must be a valid oop");
+ assert(obj->klass()->klass_part()->has_finalizer(), "shouldn't be here otherwise");
+ instanceKlass::register_finalizer(instanceOop(obj), CHECK);
+JRT_END
+
+
+jlong SharedRuntime::get_java_tid(Thread* thread) {
+ if (thread != NULL) {
+ if (thread->is_Java_thread()) {
+ oop obj = ((JavaThread*)thread)->threadObj();
+ return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
+ }
+ }
+ return 0;
+}
+
+/**
+ * This function ought to be a void function, but cannot be because
+ * it gets turned into a tail-call on sparc, which runs into dtrace bug
+ * 6254741. Once that is fixed we can remove the dummy return value.
+ */
+int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
+ return dtrace_object_alloc_base(Thread::current(), o);
+}
+
+int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) {
+ assert(DTraceAllocProbes, "wrong call");
+ Klass* klass = o->blueprint();
+ int size = o->size();
+ symbolOop name = klass->name();
+ HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread),
+ name->bytes(), name->utf8_length(), size * HeapWordSize);
+ return 0;
+}
+
+JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
+ JavaThread* thread, methodOopDesc* method))
+ assert(DTraceMethodProbes, "wrong call");
+ symbolOop kname = method->klass_name();
+ symbolOop name = method->name();
+ symbolOop sig = method->signature();
+ HS_DTRACE_PROBE7(hotspot, method__entry, get_java_tid(thread),
+ kname->bytes(), kname->utf8_length(),
+ name->bytes(), name->utf8_length(),
+ sig->bytes(), sig->utf8_length());
+ return 0;
+JRT_END
+
+JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
+ JavaThread* thread, methodOopDesc* method))
+ assert(DTraceMethodProbes, "wrong call");
+ symbolOop kname = method->klass_name();
+ symbolOop name = method->name();
+ symbolOop sig = method->signature();
+ HS_DTRACE_PROBE7(hotspot, method__return, get_java_tid(thread),
+ kname->bytes(), kname->utf8_length(),
+ name->bytes(), name->utf8_length(),
+ sig->bytes(), sig->utf8_length());
+ return 0;
+JRT_END
+
+
+// Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
+// for a call current in progress, i.e., arguments has been pushed on stack
+// put callee has not been invoked yet. Used by: resolve virtual/static,
+// vtable updates, etc. Caller frame must be compiled.
+Handle SharedRuntime::find_callee_info(JavaThread* thread, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
+ ResourceMark rm(THREAD);
+
+ // last java frame on stack (which includes native call frames)
+ vframeStream vfst(thread, true); // Do not skip and javaCalls
+
+ return find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(Handle()));
+}
+
+
+// Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
+// for a call current in progress, i.e., arguments has been pushed on stack
+// but callee has not been invoked yet. Caller frame must be compiled.
+Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
+ vframeStream& vfst,
+ Bytecodes::Code& bc,
+ CallInfo& callinfo, TRAPS) {
+ Handle receiver;
+ Handle nullHandle; //create a handy null handle for exception returns
+
+ assert(!vfst.at_end(), "Java frame must exist");
+
+ // Find caller and bci from vframe
+ methodHandle caller (THREAD, vfst.method());
+ int bci = vfst.bci();
+
+ // Find bytecode
+ Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci);
+ bc = bytecode->adjusted_invoke_code();
+ int bytecode_index = bytecode->index();
+
+ // Find receiver for non-static call
+ if (bc != Bytecodes::_invokestatic) {
+ // This register map must be update since we need to find the receiver for
+ // compiled frames. The receiver might be in a register.
+ RegisterMap reg_map2(thread);
+ frame stubFrame = thread->last_frame();
+ // Caller-frame is a compiled frame
+ frame callerFrame = stubFrame.sender(&reg_map2);
+
+ methodHandle callee = bytecode->static_target(CHECK_(nullHandle));
+ if (callee.is_null()) {
+ THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
+ }
+ // Retrieve from a compiled argument list
+ receiver = Handle(THREAD, callerFrame.retrieve_receiver(&reg_map2));
+
+ if (receiver.is_null()) {
+ THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
+ }
+ }
+
+ // Resolve method. This is parameterized by bytecode.
+ constantPoolHandle constants (THREAD, caller->constants());
+ assert (receiver.is_null() || receiver->is_oop(), "wrong receiver");
+ LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle));
+
+#ifdef ASSERT
+ // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
+ if (bc != Bytecodes::_invokestatic) {
+ assert(receiver.not_null(), "should have thrown exception");
+ KlassHandle receiver_klass (THREAD, receiver->klass());
+ klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
+ // klass is already loaded
+ KlassHandle static_receiver_klass (THREAD, rk);
+ assert(receiver_klass->is_subtype_of(static_receiver_klass()), "actual receiver must be subclass of static receiver klass");
+ if (receiver_klass->oop_is_instance()) {
+ if (instanceKlass::cast(receiver_klass())->is_not_initialized()) {
+ tty->print_cr("ERROR: Klass not yet initialized!!");
+ receiver_klass.print();
+ }
+ assert (!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized");
+ }
+ }
+#endif
+
+ return receiver;
+}
+
+methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) {
+ ResourceMark rm(THREAD);
+ // We need first to check if any Java activations (compiled, interpreted)
+ // exist on the stack since last JavaCall. If not, we need
+ // to get the target method from the JavaCall wrapper.
+ vframeStream vfst(thread, true); // Do not skip any javaCalls
+ methodHandle callee_method;
+ if (vfst.at_end()) {
+ // No Java frames were found on stack since we did the JavaCall.
+ // Hence the stack can only contain an entry_frame. We need to
+ // find the target method from the stub frame.
+ RegisterMap reg_map(thread, false);
+ frame fr = thread->last_frame();
+ assert(fr.is_runtime_frame(), "must be a runtimeStub");
+ fr = fr.sender(&reg_map);
+ assert(fr.is_entry_frame(), "must be");
+ // fr is now pointing to the entry frame.
+ callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method());
+ assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??");
+ } else {
+ Bytecodes::Code bc;
+ CallInfo callinfo;
+ find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle()));
+ callee_method = callinfo.selected_method();
+ }
+ assert(callee_method()->is_method(), "must be");
+ return callee_method;
+}
+
+// Resolves a call.
+methodHandle SharedRuntime::resolve_helper(JavaThread *thread,
+ bool is_virtual,
+ bool is_optimized, TRAPS) {
+ methodHandle callee_method;
+ callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
+ if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
+ int retry_count = 0;
+ while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
+ callee_method->method_holder() != SystemDictionary::object_klass()) {
+ // If has a pending exception then there is no need to re-try to
+ // resolve this method.
+ // If the method has been redefined, we need to try again.
+ // Hack: we have no way to update the vtables of arrays, so don't
+ // require that java.lang.Object has been updated.
+
+ // It is very unlikely that method is redefined more than 100 times
+ // in the middle of resolve. If it is looping here more than 100 times
+ // means then there could be a bug here.
+ guarantee((retry_count++ < 100),
+ "Could not resolve to latest version of redefined method");
+ // method is redefined in the middle of resolve so re-try.
+ callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
+ }
+ }
+ return callee_method;
+}
+
+// Resolves a call. The compilers generate code for calls that go here
+// and are patched with the real destination of the call.
+methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
+ bool is_virtual,
+ bool is_optimized, TRAPS) {
+
+ ResourceMark rm(thread);
+ RegisterMap cbl_map(thread, false);
+ frame caller_frame = thread->last_frame().sender(&cbl_map);
+
+ CodeBlob* cb = caller_frame.cb();
+ guarantee(cb != NULL && cb->is_nmethod(), "must be called from nmethod");
+ // make sure caller is not getting deoptimized
+ // and removed before we are done with it.
+ // CLEANUP - with lazy deopt shouldn't need this lock
+ nmethodLocker caller_lock((nmethod*)cb);
+
+
+ // determine call info & receiver
+ // note: a) receiver is NULL for static calls
+ // b) an exception is thrown if receiver is NULL for non-static calls
+ CallInfo call_info;
+ Bytecodes::Code invoke_code = Bytecodes::_illegal;
+ Handle receiver = find_callee_info(thread, invoke_code,
+ call_info, CHECK_(methodHandle()));
+ methodHandle callee_method = call_info.selected_method();
+
+ assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) ||
+ ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode");
+
+#ifndef PRODUCT
+ // tracing/debugging/statistics
+ int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
+ (is_virtual) ? (&_resolve_virtual_ctr) :
+ (&_resolve_static_ctr);
+ Atomic::inc(addr);
+
+ if (TraceCallFixup) {
+ ResourceMark rm(thread);
+ tty->print("resolving %s%s (%s) call to",
+ (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
+ Bytecodes::name(invoke_code));
+ callee_method->print_short_name(tty);
+ tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
+ }
+#endif
+
+ // Compute entry points. This might require generation of C2I converter
+ // frames, so we cannot be holding any locks here. Furthermore, the
+ // computation of the entry points is independent of patching the call. We
+ // always return the entry-point, but we only patch the stub if the call has
+ // not been deoptimized. Return values: For a virtual call this is an
+ // (cached_oop, destination address) pair. For a static call/optimized
+ // virtual this is just a destination address.
+
+ StaticCallInfo static_call_info;
+ CompiledICInfo virtual_call_info;
+
+
+ // Make sure the callee nmethod does not get deoptimized and removed before
+ // we are done patching the code.
+ nmethod* nm = callee_method->code();
+ nmethodLocker nl_callee(nm);
+#ifdef ASSERT
+ address dest_entry_point = nm == NULL ? 0 : nm->entry_point(); // used below
+#endif
+
+ if (is_virtual) {
+ assert(receiver.not_null(), "sanity check");
+ bool static_bound = call_info.resolved_method()->can_be_statically_bound();
+ KlassHandle h_klass(THREAD, receiver->klass());
+ CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
+ is_optimized, static_bound, virtual_call_info,
+ CHECK_(methodHandle()));
+ } else {
+ // static call
+ CompiledStaticCall::compute_entry(callee_method, static_call_info);
+ }
+
+ // grab lock, check for deoptimization and potentially patch caller
+ {
+ MutexLocker ml_patch(CompiledIC_lock);
+
+ // Now that we are ready to patch if the methodOop was redefined then
+ // don't update call site and let the caller retry.
+
+ if (!callee_method->is_old()) {
+#ifdef ASSERT
+ // We must not try to patch to jump to an already unloaded method.
+ if (dest_entry_point != 0) {
+ assert(CodeCache::find_blob(dest_entry_point) != NULL,
+ "should not unload nmethod while locked");
+ }
+#endif
+ if (is_virtual) {
+ CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
+ if (inline_cache->is_clean()) {
+ inline_cache->set_to_monomorphic(virtual_call_info);
+ }
+ } else {
+ CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc());
+ if (ssc->is_clean()) ssc->set(static_call_info);
+ }
+ }
+
+ } // unlock CompiledIC_lock
+
+ return callee_method;
+}
+
+
+// Inline caches exist only in compiled code
+JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
+#ifdef ASSERT
+ RegisterMap reg_map(thread, false);
+ frame stub_frame = thread->last_frame();
+ assert(stub_frame.is_runtime_frame(), "sanity check");
+ frame caller_frame = stub_frame.sender(&reg_map);
+ assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
+#endif /* ASSERT */
+
+ methodHandle callee_method;
+ JRT_BLOCK
+ callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL);
+ // Return methodOop through TLS
+ thread->set_vm_result(callee_method());
+ JRT_BLOCK_END
+ // return compiled code entry point after potential safepoints
+ assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
+ return callee_method->verified_code_entry();
+JRT_END
+
+
+// Handle call site that has been made non-entrant
+JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
+ // 6243940 We might end up in here if the callee is deoptimized
+ // as we race to call it. We don't want to take a safepoint if
+ // the caller was interpreted because the caller frame will look
+ // interpreted to the stack walkers and arguments are now
+ // "compiled" so it is much better to make this transition
+ // invisible to the stack walking code. The i2c path will
+ // place the callee method in the callee_target. It is stashed
+ // there because if we try and find the callee by normal means a
+ // safepoint is possible and have trouble gc'ing the compiled args.
+ RegisterMap reg_map(thread, false);
+ frame stub_frame = thread->last_frame();
+ assert(stub_frame.is_runtime_frame(), "sanity check");
+ frame caller_frame = stub_frame.sender(&reg_map);
+ if (caller_frame.is_interpreted_frame() || caller_frame.is_entry_frame() ) {
+ methodOop callee = thread->callee_target();
+ guarantee(callee != NULL && callee->is_method(), "bad handshake");
+ thread->set_vm_result(callee);
+ thread->set_callee_target(NULL);
+ return callee->get_c2i_entry();
+ }
+
+ // Must be compiled to compiled path which is safe to stackwalk
+ methodHandle callee_method;
+ JRT_BLOCK
+ // Force resolving of caller (if we called from compiled frame)
+ callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL);
+ thread->set_vm_result(callee_method());
+ JRT_BLOCK_END
+ // return compiled code entry point after potential safepoints
+ assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
+ return callee_method->verified_code_entry();
+JRT_END
+
+
+// resolve a static call and patch code
+JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread ))
+ methodHandle callee_method;
+ JRT_BLOCK
+ callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL);
+ thread->set_vm_result(callee_method());
+ JRT_BLOCK_END
+ // return compiled code entry point after potential safepoints
+ assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
+ return callee_method->verified_code_entry();
+JRT_END
+
+
+// resolve virtual call and update inline cache to monomorphic
+JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread ))
+ methodHandle callee_method;
+ JRT_BLOCK
+ callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL);
+ thread->set_vm_result(callee_method());
+ JRT_BLOCK_END
+ // return compiled code entry point after potential safepoints
+ assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
+ return callee_method->verified_code_entry();
+JRT_END
+
+
+// Resolve a virtual call that can be statically bound (e.g., always
+// monomorphic, so it has no inline cache). Patch code to resolved target.
+JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
+ methodHandle callee_method;
+ JRT_BLOCK
+ callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
+ thread->set_vm_result(callee_method());
+ JRT_BLOCK_END
+ // return compiled code entry point after potential safepoints
+ assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
+ return callee_method->verified_code_entry();
+JRT_END
+
+
+
+
+
+methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
+ ResourceMark rm(thread);
+ CallInfo call_info;
+ Bytecodes::Code bc;
+
+ // receiver is NULL for static calls. An exception is thrown for NULL
+ // receivers for non-static calls
+ Handle receiver = find_callee_info(thread, bc, call_info,
+ CHECK_(methodHandle()));
+ // Compiler1 can produce virtual call sites that can actually be statically bound
+ // If we fell thru to below we would think that the site was going megamorphic
+ // when in fact the site can never miss. Worse because we'd think it was megamorphic
+ // we'd try and do a vtable dispatch however methods that can be statically bound
+ // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
+ // reresolution of the call site (as if we did a handle_wrong_method and not an
+ // plain ic_miss) and the site will be converted to an optimized virtual call site
+ // never to miss again. I don't believe C2 will produce code like this but if it
+ // did this would still be the correct thing to do for it too, hence no ifdef.
+ //
+ if (call_info.resolved_method()->can_be_statically_bound()) {
+ methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle()));
+ if (TraceCallFixup) {
+ RegisterMap reg_map(thread, false);
+ frame caller_frame = thread->last_frame().sender(&reg_map);
+ ResourceMark rm(thread);
+ tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
+ callee_method->print_short_name(tty);
+ tty->print_cr(" from pc: " INTPTR_FORMAT, caller_frame.pc());
+ tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
+ }
+ return callee_method;
+ }
+
+ methodHandle callee_method = call_info.selected_method();
+
+ bool should_be_mono = false;
+
+#ifndef PRODUCT
+ Atomic::inc(&_ic_miss_ctr);
+
+ // Statistics & Tracing
+ if (TraceCallFixup) {
+ ResourceMark rm(thread);
+ tty->print("IC miss (%s) call to", Bytecodes::name(bc));
+ callee_method->print_short_name(tty);
+ tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
+ }
+
+ if (ICMissHistogram) {
+ MutexLocker m(VMStatistic_lock);
+ RegisterMap reg_map(thread, false);
+ frame f = thread->last_frame().real_sender(&reg_map);// skip runtime stub
+ // produce statistics under the lock
+ trace_ic_miss(f.pc());
+ }
+#endif
+
+ // install an event collector so that when a vtable stub is created the
+ // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
+ // event can't be posted when the stub is created as locks are held
+ // - instead the event will be deferred until the event collector goes
+ // out of scope.
+ JvmtiDynamicCodeEventCollector event_collector;
+
+ // Update inline cache to megamorphic. Skip update if caller has been
+ // made non-entrant or we are called from interpreted.
+ { MutexLocker ml_patch (CompiledIC_lock);
+ RegisterMap reg_map(thread, false);
+ frame caller_frame = thread->last_frame().sender(&reg_map);
+ CodeBlob* cb = caller_frame.cb();
+ if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) {
+ // Not a non-entrant nmethod, so find inline_cache
+ CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
+ bool should_be_mono = false;
+ if (inline_cache->is_optimized()) {
+ if (TraceCallFixup) {
+ ResourceMark rm(thread);
+ tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
+ callee_method->print_short_name(tty);
+ tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
+ }
+ should_be_mono = true;
+ } else {
+ compiledICHolderOop ic_oop = (compiledICHolderOop) inline_cache->cached_oop();
+ if ( ic_oop != NULL && ic_oop->is_compiledICHolder()) {
+
+ if (receiver()->klass() == ic_oop->holder_klass()) {
+ // This isn't a real miss. We must have seen that compiled code
+ // is now available and we want the call site converted to a
+ // monomorphic compiled call site.
+ // We can't assert for callee_method->code() != NULL because it
+ // could have been deoptimized in the meantime
+ if (TraceCallFixup) {
+ ResourceMark rm(thread);
+ tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
+ callee_method->print_short_name(tty);
+ tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
+ }
+ should_be_mono = true;
+ }
+ }
+ }
+
+ if (should_be_mono) {
+
+ // We have a path that was monomorphic but was going interpreted
+ // and now we have (or had) a compiled entry. We correct the IC
+ // by using a new icBuffer.
+ CompiledICInfo info;
+ KlassHandle receiver_klass(THREAD, receiver()->klass());
+ inline_cache->compute_monomorphic_entry(callee_method,
+ receiver_klass,
+ inline_cache->is_optimized(),
+ false,
+ info, CHECK_(methodHandle()));
+ inline_cache->set_to_monomorphic(info);
+ } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
+ // Change to megamorphic
+ inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
+ } else {
+ // Either clean or megamorphic
+ }
+ }
+ } // Release CompiledIC_lock
+
+ return callee_method;
+}
+
+//
+// Resets a call-site in compiled code so it will get resolved again.
+// This routines handles both virtual call sites, optimized virtual call
+// sites, and static call sites. Typically used to change a call sites
+// destination from compiled to interpreted.
+//
+methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
+ ResourceMark rm(thread);
+ RegisterMap reg_map(thread, false);
+ frame stub_frame = thread->last_frame();
+ assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
+ frame caller = stub_frame.sender(&reg_map);
+
+ // Do nothing if the frame isn't a live compiled frame.
+ // nmethod could be deoptimized by the time we get here
+ // so no update to the caller is needed.
+
+ if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
+
+ address pc = caller.pc();
+ Events::log("update call-site at pc " INTPTR_FORMAT, pc);
+
+ // Default call_addr is the location of the "basic" call.
+ // Determine the address of the call we a reresolving. With
+ // Inline Caches we will always find a recognizable call.
+ // With Inline Caches disabled we may or may not find a
+ // recognizable call. We will always find a call for static
+ // calls and for optimized virtual calls. For vanilla virtual
+ // calls it depends on the state of the UseInlineCaches switch.
+ //
+ // With Inline Caches disabled we can get here for a virtual call
+ // for two reasons:
+ // 1 - calling an abstract method. The vtable for abstract methods
+ // will run us thru handle_wrong_method and we will eventually
+ // end up in the interpreter to throw the ame.
+ // 2 - a racing deoptimization. We could be doing a vanilla vtable
+ // call and between the time we fetch the entry address and
+ // we jump to it the target gets deoptimized. Similar to 1
+ // we will wind up in the interprter (thru a c2i with c2).
+ //
+ address call_addr = NULL;
+ {
+ // Get call instruction under lock because another thread may be
+ // busy patching it.
+ MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
+ // Location of call instruction
+ if (NativeCall::is_call_before(pc)) {
+ NativeCall *ncall = nativeCall_before(pc);
+ call_addr = ncall->instruction_address();
+ }
+ }
+
+ // Check for static or virtual call
+ bool is_static_call = false;
+ nmethod* caller_nm = CodeCache::find_nmethod(pc);
+ // Make sure nmethod doesn't get deoptimized and removed until
+ // this is done with it.
+ // CLEANUP - with lazy deopt shouldn't need this lock
+ nmethodLocker nmlock(caller_nm);
+
+ if (call_addr != NULL) {
+ RelocIterator iter(caller_nm, call_addr, call_addr+1);
+ int ret = iter.next(); // Get item
+ if (ret) {
+ assert(iter.addr() == call_addr, "must find call");
+ if (iter.type() == relocInfo::static_call_type) {
+ is_static_call = true;
+ } else {
+ assert(iter.type() == relocInfo::virtual_call_type ||
+ iter.type() == relocInfo::opt_virtual_call_type
+ , "unexpected relocInfo. type");
+ }
+ } else {
+ assert(!UseInlineCaches, "relocation info. must exist for this address");
+ }
+
+ // Cleaning the inline cache will force a new resolve. This is more robust
+ // than directly setting it to the new destination, since resolving of calls
+ // is always done through the same code path. (experience shows that it
+ // leads to very hard to track down bugs, if an inline cache gets updated
+ // to a wrong method). It should not be performance critical, since the
+ // resolve is only done once.
+
+ MutexLocker ml(CompiledIC_lock);
+ //
+ // We do not patch the call site if the nmethod has been made non-entrant
+ // as it is a waste of time
+ //
+ if (caller_nm->is_in_use()) {
+ if (is_static_call) {
+ CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
+ ssc->set_to_clean();
+ } else {
+ // compiled, dispatched call (which used to call an interpreted method)
+ CompiledIC* inline_cache = CompiledIC_at(call_addr);
+ inline_cache->set_to_clean();
+ }
+ }
+ }
+
+ }
+
+ methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
+
+
+#ifndef PRODUCT
+ Atomic::inc(&_wrong_method_ctr);
+
+ if (TraceCallFixup) {
+ ResourceMark rm(thread);
+ tty->print("handle_wrong_method reresolving call to");
+ callee_method->print_short_name(tty);
+ tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
+ }
+#endif
+
+ return callee_method;
+}
+
+// ---------------------------------------------------------------------------
+// We are calling the interpreter via a c2i. Normally this would mean that
+// we were called by a compiled method. However we could have lost a race
+// where we went int -> i2c -> c2i and so the caller could in fact be
+// interpreted. If the caller is compiled we attampt to patch the caller
+// so he no longer calls into the interpreter.
+IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc))
+ methodOop moop(method);
+
+ address entry_point = moop->from_compiled_entry();
+
+ // It's possible that deoptimization can occur at a call site which hasn't
+ // been resolved yet, in which case this function will be called from
+ // an nmethod that has been patched for deopt and we can ignore the
+ // request for a fixup.
+ // Also it is possible that we lost a race in that from_compiled_entry
+ // is now back to the i2c in that case we don't need to patch and if
+ // we did we'd leap into space because the callsite needs to use
+ // "to interpreter" stub in order to load up the methodOop. Don't
+ // ask me how I know this...
+ //
+
+ CodeBlob* cb = CodeCache::find_blob(caller_pc);
+ if ( !cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
+ return;
+ }
+
+ // There is a benign race here. We could be attempting to patch to a compiled
+ // entry point at the same time the callee is being deoptimized. If that is
+ // the case then entry_point may in fact point to a c2i and we'd patch the
+ // call site with the same old data. clear_code will set code() to NULL
+ // at the end of it. If we happen to see that NULL then we can skip trying
+ // to patch. If we hit the window where the callee has a c2i in the
+ // from_compiled_entry and the NULL isn't present yet then we lose the race
+ // and patch the code with the same old data. Asi es la vida.
+
+ if (moop->code() == NULL) return;
+
+ if (((nmethod*)cb)->is_in_use()) {
+
+ // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
+ MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
+ if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) {
+ NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset);
+ //
+ // bug 6281185. We might get here after resolving a call site to a vanilla
+ // virtual call. Because the resolvee uses the verified entry it may then
+ // see compiled code and attempt to patch the site by calling us. This would
+ // then incorrectly convert the call site to optimized and its downhill from
+ // there. If you're lucky you'll get the assert in the bugid, if not you've
+ // just made a call site that could be megamorphic into a monomorphic site
+ // for the rest of its life! Just another racing bug in the life of
+ // fixup_callers_callsite ...
+ //
+ RelocIterator iter(cb, call->instruction_address(), call->next_instruction_address());
+ iter.next();
+ assert(iter.has_current(), "must have a reloc at java call site");
+ relocInfo::relocType typ = iter.reloc()->type();
+ if ( typ != relocInfo::static_call_type &&
+ typ != relocInfo::opt_virtual_call_type &&
+ typ != relocInfo::static_stub_type) {
+ return;
+ }
+ address destination = call->destination();
+ if (destination != entry_point) {
+ CodeBlob* callee = CodeCache::find_blob(destination);
+ // callee == cb seems weird. It means calling interpreter thru stub.
+ if (callee == cb || callee->is_adapter_blob()) {
+ // static call or optimized virtual
+ if (TraceCallFixup) {
+ tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
+ moop->print_short_name(tty);
+ tty->print_cr(" to " INTPTR_FORMAT, entry_point);
+ }
+ call->set_destination_mt_safe(entry_point);
+ } else {
+ if (TraceCallFixup) {
+ tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
+ moop->print_short_name(tty);
+ tty->print_cr(" to " INTPTR_FORMAT, entry_point);
+ }
+ // assert is too strong could also be resolve destinations.
+ // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be");
+ }
+ } else {
+ if (TraceCallFixup) {
+ tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
+ moop->print_short_name(tty);
+ tty->print_cr(" to " INTPTR_FORMAT, entry_point);
+ }
+ }
+ }
+ }
+
+IRT_END
+
+
+// same as JVM_Arraycopy, but called directly from compiled code
+JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
+ oopDesc* dest, jint dest_pos,
+ jint length,
+ JavaThread* thread)) {
+#ifndef PRODUCT
+ _slow_array_copy_ctr++;
+#endif
+ // Check if we have null pointers
+ if (src == NULL || dest == NULL) {
+ THROW(vmSymbols::java_lang_NullPointerException());
+ }
+ // Do the copy. The casts to arrayOop are necessary to the copy_array API,
+ // even though the copy_array API also performs dynamic checks to ensure
+ // that src and dest are truly arrays (and are conformable).
+ // The copy_array mechanism is awkward and could be removed, but
+ // the compilers don't call this function except as a last resort,
+ // so it probably doesn't matter.
+ Klass::cast(src->klass())->copy_array((arrayOopDesc*)src, src_pos,
+ (arrayOopDesc*)dest, dest_pos,
+ length, thread);
+}
+JRT_END
+
+char* SharedRuntime::generate_class_cast_message(
+ JavaThread* thread, const char* objName) {
+
+ // Get target class name from the checkcast instruction
+ vframeStream vfst(thread, true);
+ assert(!vfst.at_end(), "Java frame must exist");
+ Bytecode_checkcast* cc = Bytecode_checkcast_at(
+ vfst.method()->bcp_from(vfst.bci()));
+ Klass* targetKlass = Klass::cast(vfst.method()->constants()->klass_at(
+ cc->index(), thread));
+ return generate_class_cast_message(objName, targetKlass->external_name());
+}
+
+char* SharedRuntime::generate_class_cast_message(
+ const char* objName, const char* targetKlassName) {
+ const char* desc = " cannot be cast to ";
+ size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1;
+
+ char* message = NEW_C_HEAP_ARRAY(char, msglen);
+ if (NULL == message) {
+ // out of memory - can't use a detailed message. Since caller is
+ // using a resource mark to free memory, returning this should be
+ // safe (caller won't explicitly delete it).
+ message = const_cast<char*>(objName);
+ } else {
+ jio_snprintf(message, msglen, "%s%s%s", objName, desc, targetKlassName);
+ }
+ return message;
+}
+
+JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
+ (void) JavaThread::current()->reguard_stack();
+JRT_END
+
+
+// Handles the uncommon case in locking, i.e., contention or an inflated lock.
+#ifndef PRODUCT
+int SharedRuntime::_monitor_enter_ctr=0;
+#endif
+JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread))
+ oop obj(_obj);
+#ifndef PRODUCT
+ _monitor_enter_ctr++; // monitor enter slow
+#endif
+ if (PrintBiasedLockingStatistics) {
+ Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
+ }
+ Handle h_obj(THREAD, obj);
+ if (UseBiasedLocking) {
+ // Retry fast entry if bias is revoked to avoid unnecessary inflation
+ ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
+ } else {
+ ObjectSynchronizer::slow_enter(h_obj, lock, CHECK);
+ }
+ assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
+JRT_END
+
+#ifndef PRODUCT
+int SharedRuntime::_monitor_exit_ctr=0;
+#endif
+// Handles the uncommon cases of monitor unlocking in compiled code
+JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock))
+ oop obj(_obj);
+#ifndef PRODUCT
+ _monitor_exit_ctr++; // monitor exit slow
+#endif
+ Thread* THREAD = JavaThread::current();
+ // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore
+ // testing was unable to ever fire the assert that guarded it so I have removed it.
+ assert(!HAS_PENDING_EXCEPTION, "Do we need code below anymore?");
+#undef MIGHT_HAVE_PENDING
+#ifdef MIGHT_HAVE_PENDING
+ // Save and restore any pending_exception around the exception mark.
+ // While the slow_exit must not throw an exception, we could come into
+ // this routine with one set.
+ oop pending_excep = NULL;
+ const char* pending_file;
+ int pending_line;
+ if (HAS_PENDING_EXCEPTION) {
+ pending_excep = PENDING_EXCEPTION;
+ pending_file = THREAD->exception_file();
+ pending_line = THREAD->exception_line();
+ CLEAR_PENDING_EXCEPTION;
+ }
+#endif /* MIGHT_HAVE_PENDING */
+
+ {
+ // Exit must be non-blocking, and therefore no exceptions can be thrown.
+ EXCEPTION_MARK;
+ ObjectSynchronizer::slow_exit(obj, lock, THREAD);
+ }
+
+#ifdef MIGHT_HAVE_PENDING
+ if (pending_excep != NULL) {
+ THREAD->set_pending_exception(pending_excep, pending_file, pending_line);
+ }
+#endif /* MIGHT_HAVE_PENDING */
+JRT_END
+
+#ifndef PRODUCT
+
+void SharedRuntime::print_statistics() {
+ ttyLocker ttyl;
+ if (xtty != NULL) xtty->head("statistics type='SharedRuntime'");
+
+ if (_monitor_enter_ctr ) tty->print_cr("%5d monitor enter slow", _monitor_enter_ctr);
+ if (_monitor_exit_ctr ) tty->print_cr("%5d monitor exit slow", _monitor_exit_ctr);
+ if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr);
+
+ SharedRuntime::print_ic_miss_histogram();
+
+ if (CountRemovableExceptions) {
+ if (_nof_removable_exceptions > 0) {
+ Unimplemented(); // this counter is not yet incremented
+ tty->print_cr("Removable exceptions: %d", _nof_removable_exceptions);
+ }
+ }
+
+ // Dump the JRT_ENTRY counters
+ if( _new_instance_ctr ) tty->print_cr("%5d new instance requires GC", _new_instance_ctr);
+ if( _new_array_ctr ) tty->print_cr("%5d new array requires GC", _new_array_ctr);
+ if( _multi1_ctr ) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr);
+ if( _multi2_ctr ) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr);
+ if( _multi3_ctr ) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr);
+ if( _multi4_ctr ) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr);
+ if( _multi5_ctr ) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr);
+
+ tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr );
+ tty->print_cr("%5d wrong method", _wrong_method_ctr );
+ tty->print_cr("%5d unresolved static call site", _resolve_static_ctr );
+ tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr );
+ tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr );
+
+ if( _mon_enter_stub_ctr ) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr );
+ if( _mon_exit_stub_ctr ) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr );
+ if( _mon_enter_ctr ) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr );
+ if( _mon_exit_ctr ) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr );
+ if( _partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr );
+ if( _jbyte_array_copy_ctr ) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr );
+ if( _jshort_array_copy_ctr ) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr );
+ if( _jint_array_copy_ctr ) tty->print_cr("%5d int array copies", _jint_array_copy_ctr );
+ if( _jlong_array_copy_ctr ) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr );
+ if( _oop_array_copy_ctr ) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr );
+ if( _checkcast_array_copy_ctr ) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr );
+ if( _unsafe_array_copy_ctr ) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr );
+ if( _generic_array_copy_ctr ) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr );
+ if( _slow_array_copy_ctr ) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr );
+ if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr );
+ if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr );
+
+ if (xtty != NULL) xtty->tail("statistics");
+}
+
+inline double percent(int x, int y) {
+ return 100.0 * x / MAX2(y, 1);
+}
+
+class MethodArityHistogram {
+ public:
+ enum { MAX_ARITY = 256 };
+ private:
+ static int _arity_histogram[MAX_ARITY]; // histogram of #args
+ static int _size_histogram[MAX_ARITY]; // histogram of arg size in words
+ static int _max_arity; // max. arity seen
+ static int _max_size; // max. arg size seen
+
+ static void add_method_to_histogram(nmethod* nm) {
+ methodOop m = nm->method();
+ ArgumentCount args(m->signature());
+ int arity = args.size() + (m->is_static() ? 0 : 1);
+ int argsize = m->size_of_parameters();
+ arity = MIN2(arity, MAX_ARITY-1);
+ argsize = MIN2(argsize, MAX_ARITY-1);
+ int count = nm->method()->compiled_invocation_count();
+ _arity_histogram[arity] += count;
+ _size_histogram[argsize] += count;
+ _max_arity = MAX2(_max_arity, arity);
+ _max_size = MAX2(_max_size, argsize);
+ }
+
+ void print_histogram_helper(int n, int* histo, const char* name) {
+ const int N = MIN2(5, n);
+ tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
+ double sum = 0;
+ double weighted_sum = 0;
+ int i;
+ for (i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; }
+ double rest = sum;
+ double percent = sum / 100;
+ for (i = 0; i <= N; i++) {
+ rest -= histo[i];
+ tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent);
+ }
+ tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent);
+ tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
+ }
+
+ void print_histogram() {
+ tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
+ print_histogram_helper(_max_arity, _arity_histogram, "arity");
+ tty->print_cr("\nSame for parameter size (in words):");
+ print_histogram_helper(_max_size, _size_histogram, "size");
+ tty->cr();
+ }
+
+ public:
+ MethodArityHistogram() {
+ MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ _max_arity = _max_size = 0;
+ for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram [i] = 0;
+ CodeCache::nmethods_do(add_method_to_histogram);
+ print_histogram();
+ }
+};
+
+int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
+int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
+int MethodArityHistogram::_max_arity;
+int MethodArityHistogram::_max_size;
+
+void SharedRuntime::print_call_statistics(int comp_total) {
+ tty->print_cr("Calls from compiled code:");
+ int total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
+ int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls;
+ int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls;
+ tty->print_cr("\t%9d (%4.1f%%) total non-inlined ", total, percent(total, total));
+ tty->print_cr("\t%9d (%4.1f%%) virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total));
+ tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
+ tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls));
+ tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_c, percent(mono_c, _nof_normal_calls));
+ tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
+ tty->print_cr("\t%9d (%4.1f%%) interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total));
+ tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
+ tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls));
+ tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_i, percent(mono_i, _nof_interface_calls));
+ tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls));
+ tty->print_cr("\t%9d (%4.1f%%) static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
+ tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
+ tty->cr();
+ tty->print_cr("Note 1: counter updates are not MT-safe.");
+ tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
+ tty->print_cr(" %% in nested categories are relative to their category");
+ tty->print_cr(" (and thus add up to more than 100%% with inlining)");
+ tty->cr();
+
+ MethodArityHistogram h;
+}
+#endif
+
+
+// ---------------------------------------------------------------------------
+// Implementation of AdapterHandlerLibrary
+const char* AdapterHandlerEntry::name = "I2C/C2I adapters";
+GrowableArray<uint64_t>* AdapterHandlerLibrary::_fingerprints = NULL;
+GrowableArray<AdapterHandlerEntry* >* AdapterHandlerLibrary::_handlers = NULL;
+const int AdapterHandlerLibrary_size = 16*K;
+u_char AdapterHandlerLibrary::_buffer[AdapterHandlerLibrary_size + 32];
+
+void AdapterHandlerLibrary::initialize() {
+ if (_fingerprints != NULL) return;
+ _fingerprints = new(ResourceObj::C_HEAP)GrowableArray<uint64_t>(32, true);
+ _handlers = new(ResourceObj::C_HEAP)GrowableArray<AdapterHandlerEntry*>(32, true);
+ // Index 0 reserved for the slow path handler
+ _fingerprints->append(0/*the never-allowed 0 fingerprint*/);
+ _handlers->append(NULL);
+
+ // Create a special handler for abstract methods. Abstract methods
+ // are never compiled so an i2c entry is somewhat meaningless, but
+ // fill it in with something appropriate just in case. Pass handle
+ // wrong method for the c2i transitions.
+ address wrong_method = SharedRuntime::get_handle_wrong_method_stub();
+ _fingerprints->append(0/*the never-allowed 0 fingerprint*/);
+ assert(_handlers->length() == AbstractMethodHandler, "in wrong slot");
+ _handlers->append(new AdapterHandlerEntry(StubRoutines::throw_AbstractMethodError_entry(),
+ wrong_method, wrong_method));
+}
+
+int AdapterHandlerLibrary::get_create_adapter_index(methodHandle method) {
+ // Use customized signature handler. Need to lock around updates to the
+ // _fingerprints array (it is not safe for concurrent readers and a single
+ // writer: this can be fixed if it becomes a problem).
+
+ // Shouldn't be here if running -Xint
+ if (Arguments::mode() == Arguments::_int) {
+ ShouldNotReachHere();
+ }
+
+ // Get the address of the ic_miss handlers before we grab the
+ // AdapterHandlerLibrary_lock. This fixes bug 6236259 which
+ // was caused by the initialization of the stubs happening
+ // while we held the lock and then notifying jvmti while
+ // holding it. This just forces the initialization to be a little
+ // earlier.
+ address ic_miss = SharedRuntime::get_ic_miss_stub();
+ assert(ic_miss != NULL, "must have handler");
+
+ int result;
+ BufferBlob *B = NULL;
+ uint64_t fingerprint;
+ {
+ MutexLocker mu(AdapterHandlerLibrary_lock);
+ // make sure data structure is initialized
+ initialize();
+
+ if (method->is_abstract()) {
+ return AbstractMethodHandler;
+ }
+
+ // Lookup method signature's fingerprint
+ fingerprint = Fingerprinter(method).fingerprint();
+ assert( fingerprint != CONST64( 0), "no zero fingerprints allowed" );
+ // Fingerprints are small fixed-size condensed representations of
+ // signatures. If the signature is too large, it won't fit in a
+ // fingerprint. Signatures which cannot support a fingerprint get a new i2c
+ // adapter gen'd each time, instead of searching the cache for one. This -1
+ // game can be avoided if I compared signatures instead of using
+ // fingerprints. However, -1 fingerprints are very rare.
+ if( fingerprint != UCONST64(-1) ) { // If this is a cache-able fingerprint
+ // Turns out i2c adapters do not care what the return value is. Mask it
+ // out so signatures that only differ in return type will share the same
+ // adapter.
+ fingerprint &= ~(SignatureIterator::result_feature_mask << SignatureIterator::static_feature_size);
+ // Search for a prior existing i2c/c2i adapter
+ int index = _fingerprints->find(fingerprint);
+ if( index >= 0 ) return index; // Found existing handlers?
+ } else {
+ // Annoyingly, I end up adding -1 fingerprints to the array of handlers,
+ // because I need a unique handler index. It cannot be scanned for
+ // because all -1's look alike. Instead, the matching index is passed out
+ // and immediately used to collect the 2 return values (the c2i and i2c
+ // adapters).
+ }
+
+ // Create I2C & C2I handlers
+ ResourceMark rm;
+ // Improve alignment slightly
+ u_char *buf = (u_char*)(((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1));
+ CodeBuffer buffer(buf, AdapterHandlerLibrary_size);
+ short buffer_locs[20];
+ buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
+ sizeof(buffer_locs)/sizeof(relocInfo));
+ MacroAssembler _masm(&buffer);
+
+ // Fill in the signature array, for the calling-convention call.
+ int total_args_passed = method->size_of_parameters(); // All args on stack
+
+ BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
+ VMRegPair * regs = NEW_RESOURCE_ARRAY(VMRegPair ,total_args_passed);
+ int i=0;
+ if( !method->is_static() ) // Pass in receiver first
+ sig_bt[i++] = T_OBJECT;
+ for( SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
+ sig_bt[i++] = ss.type(); // Collect remaining bits of signature
+ if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
+ sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
+ }
+ assert( i==total_args_passed, "" );
+
+ // Now get the re-packed compiled-Java layout.
+ int comp_args_on_stack;
+
+ // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
+ comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
+
+ AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
+ total_args_passed,
+ comp_args_on_stack,
+ sig_bt,
+ regs);
+
+ B = BufferBlob::create(AdapterHandlerEntry::name, &buffer);
+ if (B == NULL) return -2; // Out of CodeCache space
+ entry->relocate(B->instructions_begin());
+#ifndef PRODUCT
+ // debugging suppport
+ if (PrintAdapterHandlers) {
+ tty->cr();
+ tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = 0x%llx, %d bytes generated)",
+ _handlers->length(), (method->is_static() ? "static" : "receiver"),
+ method->signature()->as_C_string(), fingerprint, buffer.code_size() );
+ tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry());
+ Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + buffer.code_size());
+ }
+#endif
+
+ // add handlers to library
+ _fingerprints->append(fingerprint);
+ _handlers->append(entry);
+ // set handler index
+ assert(_fingerprints->length() == _handlers->length(), "sanity check");
+ result = _fingerprints->length() - 1;
+ }
+ // Outside of the lock
+ if (B != NULL) {
+ char blob_id[256];
+ jio_snprintf(blob_id,
+ sizeof(blob_id),
+ "%s(" PTR64_FORMAT ")@" PTR_FORMAT,
+ AdapterHandlerEntry::name,
+ fingerprint,
+ B->instructions_begin());
+ VTune::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
+ Forte::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
+
+ if (JvmtiExport::should_post_dynamic_code_generated()) {
+ JvmtiExport::post_dynamic_code_generated(blob_id,
+ B->instructions_begin(),
+ B->instructions_end());
+ }
+ }
+ return result;
+}
+
+void AdapterHandlerEntry::relocate(address new_base) {
+ ptrdiff_t delta = new_base - _i2c_entry;
+ _i2c_entry += delta;
+ _c2i_entry += delta;
+ _c2i_unverified_entry += delta;
+}
+
+// Create a native wrapper for this native method. The wrapper converts the
+// java compiled calling convention to the native convention, handlizes
+// arguments, and transitions to native. On return from the native we transition
+// back to java blocking if a safepoint is in progress.
+nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
+ ResourceMark rm;
+ nmethod* nm = NULL;
+
+ if (PrintCompilation) {
+ ttyLocker ttyl;
+ tty->print("--- n%s ", (method->is_synchronized() ? "s" : " "));
+ method->print_short_name(tty);
+ if (method->is_static()) {
+ tty->print(" (static)");
+ }
+ tty->cr();
+ }
+
+ assert(method->has_native_function(), "must have something valid to call!");
+
+ {
+ // perform the work while holding the lock, but perform any printing outside the lock
+ MutexLocker mu(AdapterHandlerLibrary_lock);
+ // See if somebody beat us to it
+ nm = method->code();
+ if (nm) {
+ return nm;
+ }
+
+ // Improve alignment slightly
+ u_char* buf = (u_char*)(((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1));
+ CodeBuffer buffer(buf, AdapterHandlerLibrary_size);
+ // Need a few relocation entries
+ double locs_buf[20];
+ buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
+ MacroAssembler _masm(&buffer);
+
+ // Fill in the signature array, for the calling-convention call.
+ int total_args_passed = method->size_of_parameters();
+
+ BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
+ VMRegPair * regs = NEW_RESOURCE_ARRAY(VMRegPair ,total_args_passed);
+ int i=0;
+ if( !method->is_static() ) // Pass in receiver first
+ sig_bt[i++] = T_OBJECT;
+ SignatureStream ss(method->signature());
+ for( ; !ss.at_return_type(); ss.next()) {
+ sig_bt[i++] = ss.type(); // Collect remaining bits of signature
+ if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
+ sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
+ }
+ assert( i==total_args_passed, "" );
+ BasicType ret_type = ss.type();
+
+ // Now get the compiled-Java layout as input arguments
+ int comp_args_on_stack;
+ comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
+
+ // Generate the compiled-to-native wrapper code
+ nm = SharedRuntime::generate_native_wrapper(&_masm,
+ method,
+ total_args_passed,
+ comp_args_on_stack,
+ sig_bt,regs,
+ ret_type);
+ }
+
+ // Must unlock before calling set_code
+ // Install the generated code.
+ if (nm != NULL) {
+ method->set_code(method, nm);
+ nm->post_compiled_method_load_event();
+ } else {
+ // CodeCache is full, disable compilation
+ // Ought to log this but compile log is only per compile thread
+ // and we're some non descript Java thread.
+ UseInterpreter = true;
+ if (UseCompiler || AlwaysCompileLoopMethods ) {
+#ifndef PRODUCT
+ warning("CodeCache is full. Compiler has been disabled");
+ if (CompileTheWorld || ExitOnFullCodeCache) {
+ before_exit(JavaThread::current());
+ exit_globals(); // will delete tty
+ vm_direct_exit(CompileTheWorld ? 0 : 1);
+ }
+#endif
+ UseCompiler = false;
+ AlwaysCompileLoopMethods = false;
+ }
+ }
+ return nm;
+}
+
+// -------------------------------------------------------------------------
+// Java-Java calling convention
+// (what you use when Java calls Java)
+
+//------------------------------name_for_receiver----------------------------------
+// For a given signature, return the VMReg for parameter 0.
+VMReg SharedRuntime::name_for_receiver() {
+ VMRegPair regs;
+ BasicType sig_bt = T_OBJECT;
+ (void) java_calling_convention(&sig_bt, &regs, 1, true);
+ // Return argument 0 register. In the LP64 build pointers
+ // take 2 registers, but the VM wants only the 'main' name.
+ return regs.first();
+}
+
+VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool is_static, int* arg_size) {
+ // This method is returning a data structure allocating as a
+ // ResourceObject, so do not put any ResourceMarks in here.
+ char *s = sig->as_C_string();
+ int len = (int)strlen(s);
+ *s++; len--; // Skip opening paren
+ char *t = s+len;
+ while( *(--t) != ')' ) ; // Find close paren
+
+ BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 );
+ VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 );
+ int cnt = 0;
+ if (!is_static) {
+ sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
+ }
+
+ while( s < t ) {
+ switch( *s++ ) { // Switch on signature character
+ case 'B': sig_bt[cnt++] = T_BYTE; break;
+ case 'C': sig_bt[cnt++] = T_CHAR; break;
+ case 'D': sig_bt[cnt++] = T_DOUBLE; sig_bt[cnt++] = T_VOID; break;
+ case 'F': sig_bt[cnt++] = T_FLOAT; break;
+ case 'I': sig_bt[cnt++] = T_INT; break;
+ case 'J': sig_bt[cnt++] = T_LONG; sig_bt[cnt++] = T_VOID; break;
+ case 'S': sig_bt[cnt++] = T_SHORT; break;
+ case 'Z': sig_bt[cnt++] = T_BOOLEAN; break;
+ case 'V': sig_bt[cnt++] = T_VOID; break;
+ case 'L': // Oop
+ while( *s++ != ';' ) ; // Skip signature
+ sig_bt[cnt++] = T_OBJECT;
+ break;
+ case '[': { // Array
+ do { // Skip optional size
+ while( *s >= '0' && *s <= '9' ) s++;
+ } while( *s++ == '[' ); // Nested arrays?
+ // Skip element type
+ if( s[-1] == 'L' )
+ while( *s++ != ';' ) ; // Skip signature
+ sig_bt[cnt++] = T_ARRAY;
+ break;
+ }
+ default : ShouldNotReachHere();
+ }
+ }
+ assert( cnt < 256, "grow table size" );
+
+ int comp_args_on_stack;
+ comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true);
+
+ // the calling convention doesn't count out_preserve_stack_slots so
+ // we must add that in to get "true" stack offsets.
+
+ if (comp_args_on_stack) {
+ for (int i = 0; i < cnt; i++) {
+ VMReg reg1 = regs[i].first();
+ if( reg1->is_stack()) {
+ // Yuck
+ reg1 = reg1->bias(out_preserve_stack_slots());
+ }
+ VMReg reg2 = regs[i].second();
+ if( reg2->is_stack()) {
+ // Yuck
+ reg2 = reg2->bias(out_preserve_stack_slots());
+ }
+ regs[i].set_pair(reg2, reg1);
+ }
+ }
+
+ // results
+ *arg_size = cnt;
+ return regs;
+}
+
+// OSR Migration Code
+//
+// This code is used convert interpreter frames into compiled frames. It is
+// called from very start of a compiled OSR nmethod. A temp array is
+// allocated to hold the interesting bits of the interpreter frame. All
+// active locks are inflated to allow them to move. The displaced headers and
+// active interpeter locals are copied into the temp buffer. Then we return
+// back to the compiled code. The compiled code then pops the current
+// interpreter frame off the stack and pushes a new compiled frame. Then it
+// copies the interpreter locals and displaced headers where it wants.
+// Finally it calls back to free the temp buffer.
+//
+// All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
+
+JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
+
+#ifdef IA64
+ ShouldNotReachHere(); // NYI
+#endif /* IA64 */
+
+ //
+ // This code is dependent on the memory layout of the interpreter local
+ // array and the monitors. On all of our platforms the layout is identical
+ // so this code is shared. If some platform lays the their arrays out
+ // differently then this code could move to platform specific code or
+ // the code here could be modified to copy items one at a time using
+ // frame accessor methods and be platform independent.
+
+ frame fr = thread->last_frame();
+ assert( fr.is_interpreted_frame(), "" );
+ assert( fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks" );
+
+ // Figure out how many monitors are active.
+ int active_monitor_count = 0;
+ for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
+ kptr < fr.interpreter_frame_monitor_begin();
+ kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
+ if( kptr->obj() != NULL ) active_monitor_count++;
+ }
+
+ // QQQ we could place number of active monitors in the array so that compiled code
+ // could double check it.
+
+ methodOop moop = fr.interpreter_frame_method();
+ int max_locals = moop->max_locals();
+ // Allocate temp buffer, 1 word per local & 2 per active monitor
+ int buf_size_words = max_locals + active_monitor_count*2;
+ intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words);
+
+ // Copy the locals. Order is preserved so that loading of longs works.
+ // Since there's no GC I can copy the oops blindly.
+ assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
+ if (TaggedStackInterpreter) {
+ for (int i = 0; i < max_locals; i++) {
+ // copy only each local separately to the buffer avoiding the tag
+ buf[i] = *fr.interpreter_frame_local_at(max_locals-i-1);
+ }
+ } else {
+ Copy::disjoint_words(
+ (HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
+ (HeapWord*)&buf[0],
+ max_locals);
+ }
+
+ // Inflate locks. Copy the displaced headers. Be careful, there can be holes.
+ int i = max_locals;
+ for( BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
+ kptr2 < fr.interpreter_frame_monitor_begin();
+ kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
+ if( kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array
+ BasicLock *lock = kptr2->lock();
+ // Inflate so the displaced header becomes position-independent
+ if (lock->displaced_header()->is_unlocked())
+ ObjectSynchronizer::inflate_helper(kptr2->obj());
+ // Now the displaced header is free to move
+ buf[i++] = (intptr_t)lock->displaced_header();
+ buf[i++] = (intptr_t)kptr2->obj();
+ }
+ }
+ assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" );
+
+ return buf;
+JRT_END
+
+JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
+ FREE_C_HEAP_ARRAY(intptr_t,buf);
+JRT_END
+
+#ifndef PRODUCT
+bool AdapterHandlerLibrary::contains(CodeBlob* b) {
+
+ for (int i = 0 ; i < _handlers->length() ; i++) {
+ AdapterHandlerEntry* a = get_entry(i);
+ if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) return true;
+ }
+ return false;
+}
+
+void AdapterHandlerLibrary::print_handler(CodeBlob* b) {
+
+ for (int i = 0 ; i < _handlers->length() ; i++) {
+ AdapterHandlerEntry* a = get_entry(i);
+ if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) {
+ tty->print("Adapter for signature: ");
+ // Fingerprinter::print(_fingerprints->at(i));
+ tty->print("0x%" FORMAT64_MODIFIER "x", _fingerprints->at(i));
+ tty->print_cr(" i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
+ a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry());
+
+ return;
+ }
+ }
+ assert(false, "Should have found handler");
+}
+#endif /* PRODUCT */
diff --git a/src/share/vm/runtime/sharedRuntime.hpp b/src/share/vm/runtime/sharedRuntime.hpp
new file mode 100644
index 000000000..b91837f36
--- /dev/null
+++ b/src/share/vm/runtime/sharedRuntime.hpp
@@ -0,0 +1,536 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class AdapterHandlerEntry;
+class vframeStream;
+
+// Runtime is the base class for various runtime interfaces
+// (InterpreterRuntime, CompilerRuntime, etc.). It provides
+// shared functionality such as exception forwarding (C++ to
+// Java exceptions), locking/unlocking mechanisms, statistical
+// information, etc.
+
+class SharedRuntime: AllStatic {
+ private:
+ static methodHandle resolve_sub_helper(JavaThread *thread,
+ bool is_virtual,
+ bool is_optimized, TRAPS);
+
+ // Shared stub locations
+
+ static RuntimeStub* _wrong_method_blob;
+ static RuntimeStub* _ic_miss_blob;
+ static RuntimeStub* _resolve_opt_virtual_call_blob;
+ static RuntimeStub* _resolve_virtual_call_blob;
+ static RuntimeStub* _resolve_static_call_blob;
+
+ static SafepointBlob* _polling_page_safepoint_handler_blob;
+ static SafepointBlob* _polling_page_return_handler_blob;
+#ifdef COMPILER2
+ static ExceptionBlob* _exception_blob;
+ static UncommonTrapBlob* _uncommon_trap_blob;
+#endif // COMPILER2
+
+#ifndef PRODUCT
+
+ // Counters
+ static int _nof_megamorphic_calls; // total # of megamorphic calls (through vtable)
+
+#endif // !PRODUCT
+ public:
+ // The following arithmetic routines are used on platforms that do
+ // not have machine instructions to implement their functionality.
+ // Do not remove these.
+
+ // long arithmetics
+ static jlong lmul(jlong y, jlong x);
+ static jlong ldiv(jlong y, jlong x);
+ static jlong lrem(jlong y, jlong x);
+
+ // float and double remainder
+ static jfloat frem(jfloat x, jfloat y);
+ static jdouble drem(jdouble x, jdouble y);
+
+ // float conversion (needs to set appropriate rounding mode)
+ static jint f2i (jfloat x);
+ static jlong f2l (jfloat x);
+ static jint d2i (jdouble x);
+ static jlong d2l (jdouble x);
+ static jfloat d2f (jdouble x);
+ static jfloat l2f (jlong x);
+ static jdouble l2d (jlong x);
+
+ // double trigonometrics and transcendentals
+ static jdouble dsin(jdouble x);
+ static jdouble dcos(jdouble x);
+ static jdouble dtan(jdouble x);
+ static jdouble dlog(jdouble x);
+ static jdouble dlog10(jdouble x);
+ static jdouble dexp(jdouble x);
+ static jdouble dpow(jdouble x, jdouble y);
+
+
+ // exception handling across interpreter/compiler boundaries
+ static address raw_exception_handler_for_return_address(address return_address);
+ static address exception_handler_for_return_address(address return_address);
+
+ // exception handling and implicit exceptions
+ static address compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
+ bool force_unwind, bool top_frame_only);
+ enum ImplicitExceptionKind {
+ IMPLICIT_NULL,
+ IMPLICIT_DIVIDE_BY_ZERO,
+ STACK_OVERFLOW
+ };
+ static void throw_AbstractMethodError(JavaThread* thread);
+ static void throw_ArithmeticException(JavaThread* thread);
+ static void throw_NullPointerException(JavaThread* thread);
+ static void throw_NullPointerException_at_call(JavaThread* thread);
+ static void throw_StackOverflowError(JavaThread* thread);
+ static address continuation_for_implicit_exception(JavaThread* thread,
+ address faulting_pc,
+ ImplicitExceptionKind exception_kind);
+
+ // Shared stub locations
+ static address get_poll_stub(address pc);
+
+ static address get_ic_miss_stub() {
+ assert(_ic_miss_blob!= NULL, "oops");
+ return _ic_miss_blob->instructions_begin();
+ }
+
+ static address get_handle_wrong_method_stub() {
+ assert(_wrong_method_blob!= NULL, "oops");
+ return _wrong_method_blob->instructions_begin();
+ }
+
+#ifdef COMPILER2
+ static void generate_uncommon_trap_blob(void);
+ static UncommonTrapBlob* uncommon_trap_blob() { return _uncommon_trap_blob; }
+#endif // COMPILER2
+
+ static address get_resolve_opt_virtual_call_stub(){
+ assert(_resolve_opt_virtual_call_blob != NULL, "oops");
+ return _resolve_opt_virtual_call_blob->instructions_begin();
+ }
+ static address get_resolve_virtual_call_stub() {
+ assert(_resolve_virtual_call_blob != NULL, "oops");
+ return _resolve_virtual_call_blob->instructions_begin();
+ }
+ static address get_resolve_static_call_stub() {
+ assert(_resolve_static_call_blob != NULL, "oops");
+ return _resolve_static_call_blob->instructions_begin();
+ }
+
+ static SafepointBlob* polling_page_return_handler_blob() { return _polling_page_return_handler_blob; }
+ static SafepointBlob* polling_page_safepoint_handler_blob() { return _polling_page_safepoint_handler_blob; }
+
+ // Counters
+#ifndef PRODUCT
+ static address nof_megamorphic_calls_addr() { return (address)&_nof_megamorphic_calls; }
+#endif // PRODUCT
+
+ // Helper routine for full-speed JVMTI exception throwing support
+ static void throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception);
+ static void throw_and_post_jvmti_exception(JavaThread *thread, symbolOop name, const char *message = NULL);
+
+ // To be used as the entry point for unresolved native methods.
+ static address native_method_throw_unsatisfied_link_error_entry();
+
+ // bytecode tracing is only used by the TraceBytecodes
+ static intptr_t trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2) PRODUCT_RETURN0;
+
+ // Used to back off a spin lock that is under heavy contention
+ static void yield_all(JavaThread* thread, int attempts = 0);
+
+ static oop retrieve_receiver( symbolHandle sig, frame caller );
+
+ static void verify_caller_frame(frame caller_frame, methodHandle callee_method) PRODUCT_RETURN;
+ static methodHandle find_callee_method_inside_interpreter(frame caller_frame, methodHandle caller_method, int bci) PRODUCT_RETURN_(return methodHandle(););
+
+ static void register_finalizer(JavaThread* thread, oopDesc* obj);
+
+ // dtrace notifications
+ static int dtrace_object_alloc(oopDesc* o);
+ static int dtrace_object_alloc_base(Thread* thread, oopDesc* o);
+ static int dtrace_method_entry(JavaThread* thread, methodOopDesc* m);
+ static int dtrace_method_exit(JavaThread* thread, methodOopDesc* m);
+
+ // Utility method for retrieving the Java thread id, returns 0 if the
+ // thread is not a well formed Java thread.
+ static jlong get_java_tid(Thread* thread);
+
+
+ // used by native wrappers to reenable yellow if overflow happened in native code
+ static void reguard_yellow_pages();
+
+ /**
+ * Fill in the "X cannot be cast to a Y" message for ClassCastException
+ *
+ * @param thr the current thread
+ * @param name the name of the class of the object attempted to be cast
+ * @return the dynamically allocated exception message (must be freed
+ * by the caller using a resource mark)
+ *
+ * BCP must refer to the current 'checkcast' opcode for the frame
+ * on top of the stack.
+ * The caller (or one of it's callers) must use a ResourceMark
+ * in order to correctly free the result.
+ */
+ static char* generate_class_cast_message(JavaThread* thr, const char* name);
+
+ /**
+ * Fill in the "X cannot be cast to a Y" message for ClassCastException
+ *
+ * @param name the name of the class of the object attempted to be cast
+ * @param klass the name of the target klass attempt
+ * @return the dynamically allocated exception message (must be freed
+ * by the caller using a resource mark)
+ *
+ * This version does not require access the frame, so it can be called
+ * from interpreted code
+ * The caller (or one of it's callers) must use a ResourceMark
+ * in order to correctly free the result.
+ */
+ static char* generate_class_cast_message(const char* name, const char* klass);
+
+ // Resolves a call site- may patch in the destination of the call into the
+ // compiled code.
+ static methodHandle resolve_helper(JavaThread *thread,
+ bool is_virtual,
+ bool is_optimized, TRAPS);
+
+ static void generate_stubs(void);
+
+ private:
+ // deopt blob
+ static void generate_deopt_blob(void);
+ static DeoptimizationBlob* _deopt_blob;
+
+ public:
+ static DeoptimizationBlob* deopt_blob(void) { return _deopt_blob; }
+
+ // Resets a call-site in compiled code so it will get resolved again.
+ static methodHandle reresolve_call_site(JavaThread *thread, TRAPS);
+
+ // In the code prolog, if the klass comparison fails, the inline cache
+ // misses and the call site is patched to megamorphic
+ static methodHandle handle_ic_miss_helper(JavaThread* thread, TRAPS);
+
+ // Find the method that called us.
+ static methodHandle find_callee_method(JavaThread* thread, TRAPS);
+
+
+ private:
+ static Handle find_callee_info(JavaThread* thread,
+ Bytecodes::Code& bc,
+ CallInfo& callinfo, TRAPS);
+ static Handle find_callee_info_helper(JavaThread* thread,
+ vframeStream& vfst,
+ Bytecodes::Code& bc,
+ CallInfo& callinfo, TRAPS);
+
+ static address clean_virtual_call_entry();
+ static address clean_opt_virtual_call_entry();
+ static address clean_static_call_entry();
+
+ public:
+
+
+ static void create_native_wrapper (JavaThread* thread, methodOop method);
+
+ // Read the array of BasicTypes from a Java signature, and compute where
+ // compiled Java code would like to put the results. Values in reg_lo and
+ // reg_hi refer to 4-byte quantities. Values less than SharedInfo::stack0 are
+ // registers, those above refer to 4-byte stack slots. All stack slots are
+ // based off of the window top. SharedInfo::stack0 refers to the first usable
+ // slot in the bottom of the frame. SharedInfo::stack0+1 refers to the memory word
+ // 4-bytes higher. So for sparc because the register window save area is at
+ // the bottom of the frame the first 16 words will be skipped and SharedInfo::stack0
+ // will be just above it. (
+ // return value is the maximum number of VMReg stack slots the convention will use.
+ static int java_calling_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed, int is_outgoing);
+
+ // Ditto except for calling C
+ static int c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed);
+
+ // Generate I2C and C2I adapters. These adapters are simple argument marshalling
+ // blobs. Unlike adapters in the tiger and earlier releases the code in these
+ // blobs does not create a new frame and are therefore virtually invisible
+ // to the stack walking code. In general these blobs extend the callers stack
+ // as needed for the conversion of argument locations.
+
+ // When calling a c2i blob the code will always call the interpreter even if
+ // by the time we reach the blob there is compiled code available. This allows
+ // the blob to pass the incoming stack pointer (the sender sp) in a known
+ // location for the interpreter to record. This is used by the frame code
+ // to correct the sender code to match up with the stack pointer when the
+ // thread left the compiled code. In addition it allows the interpreter
+ // to remove the space the c2i adapter allocated to do it argument conversion.
+
+ // Although a c2i blob will always run interpreted even if compiled code is
+ // present if we see that compiled code is present the compiled call site
+ // will be patched/re-resolved so that later calls will run compiled.
+
+ // Aditionally a c2i blob need to have a unverified entry because it can be reached
+ // in situations where the call site is an inlined cache site and may go megamorphic.
+
+ // A i2c adapter is simpler than the c2i adapter. This is because it is assumed
+ // that the interpreter before it does any call dispatch will record the current
+ // stack pointer in the interpreter frame. On return it will restore the stack
+ // pointer as needed. This means the i2c adapter code doesn't need any special
+ // handshaking path with compiled code to keep the stack walking correct.
+
+ static AdapterHandlerEntry* generate_i2c2i_adapters(MacroAssembler *_masm,
+ int total_args_passed,
+ int max_arg,
+ const BasicType *sig_bt,
+ const VMRegPair *regs);
+
+ // OSR support
+
+ // OSR_migration_begin will extract the jvm state from an interpreter
+ // frame (locals, monitors) and store the data in a piece of C heap
+ // storage. This then allows the interpreter frame to be removed from the
+ // stack and the OSR nmethod to be called. That method is called with a
+ // pointer to the C heap storage. This pointer is the return value from
+ // OSR_migration_begin.
+
+ static intptr_t* OSR_migration_begin( JavaThread *thread);
+
+ // OSR_migration_end is a trivial routine. It is called after the compiled
+ // method has extracted the jvm state from the C heap that OSR_migration_begin
+ // created. It's entire job is to simply free this storage.
+ static void OSR_migration_end ( intptr_t* buf);
+
+ // Convert a sig into a calling convention register layout
+ // and find interesting things about it.
+ static VMRegPair* find_callee_arguments(symbolOop sig, bool is_static, int *arg_size);
+ static VMReg name_for_receiver();
+
+ // "Top of Stack" slots that may be unused by the calling convention but must
+ // otherwise be preserved.
+ // On Intel these are not necessary and the value can be zero.
+ // On Sparc this describes the words reserved for storing a register window
+ // when an interrupt occurs.
+ static uint out_preserve_stack_slots();
+
+ // Save and restore a native result
+ static void save_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots );
+ static void restore_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots );
+
+ // Generate a native wrapper for a given method. The method takes arguments
+ // in the Java compiled code convention, marshals them to the native
+ // convention (handlizes oops, etc), transitions to native, makes the call,
+ // returns to java state (possibly blocking), unhandlizes any result and
+ // returns.
+ static nmethod *generate_native_wrapper(MacroAssembler* masm,
+ methodHandle method,
+ int total_args_passed,
+ int max_arg,
+ BasicType *sig_bt,
+ VMRegPair *regs,
+ BasicType ret_type );
+
+ // A compiled caller has just called the interpreter, but compiled code
+ // exists. Patch the caller so he no longer calls into the interpreter.
+ static void fixup_callers_callsite(methodOopDesc* moop, address ret_pc);
+
+ // Slow-path Locking and Unlocking
+ static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* thread);
+ static void complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock);
+
+ // Resolving of calls
+ static address resolve_static_call_C (JavaThread *thread);
+ static address resolve_virtual_call_C (JavaThread *thread);
+ static address resolve_opt_virtual_call_C(JavaThread *thread);
+
+ // arraycopy, the non-leaf version. (See StubRoutines for all the leaf calls.)
+ static void slow_arraycopy_C(oopDesc* src, jint src_pos,
+ oopDesc* dest, jint dest_pos,
+ jint length, JavaThread* thread);
+
+ // handle ic miss with caller being compiled code
+ // wrong method handling (inline cache misses, zombie methods)
+ static address handle_wrong_method(JavaThread* thread);
+ static address handle_wrong_method_ic_miss(JavaThread* thread);
+
+#ifndef PRODUCT
+
+ // Collect and print inline cache miss statistics
+ private:
+ enum { maxICmiss_count = 100 };
+ static int _ICmiss_index; // length of IC miss histogram
+ static int _ICmiss_count[maxICmiss_count]; // miss counts
+ static address _ICmiss_at[maxICmiss_count]; // miss addresses
+ static void trace_ic_miss(address at);
+
+ public:
+ static int _monitor_enter_ctr; // monitor enter slow
+ static int _monitor_exit_ctr; // monitor exit slow
+ static int _throw_null_ctr; // throwing a null-pointer exception
+ static int _ic_miss_ctr; // total # of IC misses
+ static int _wrong_method_ctr;
+ static int _resolve_static_ctr;
+ static int _resolve_virtual_ctr;
+ static int _resolve_opt_virtual_ctr;
+ static int _implicit_null_throws;
+ static int _implicit_div0_throws;
+
+ static int _jbyte_array_copy_ctr; // Slow-path byte array copy
+ static int _jshort_array_copy_ctr; // Slow-path short array copy
+ static int _jint_array_copy_ctr; // Slow-path int array copy
+ static int _jlong_array_copy_ctr; // Slow-path long array copy
+ static int _oop_array_copy_ctr; // Slow-path oop array copy
+ static int _checkcast_array_copy_ctr; // Slow-path oop array copy, with cast
+ static int _unsafe_array_copy_ctr; // Slow-path includes alignment checks
+ static int _generic_array_copy_ctr; // Slow-path includes type decoding
+ static int _slow_array_copy_ctr; // Slow-path failed out to a method call
+
+ static int _new_instance_ctr; // 'new' object requires GC
+ static int _new_array_ctr; // 'new' array requires GC
+ static int _multi1_ctr, _multi2_ctr, _multi3_ctr, _multi4_ctr, _multi5_ctr;
+ static int _find_handler_ctr; // find exception handler
+ static int _rethrow_ctr; // rethrow exception
+ static int _mon_enter_stub_ctr; // monitor enter stub
+ static int _mon_exit_stub_ctr; // monitor exit stub
+ static int _mon_enter_ctr; // monitor enter slow
+ static int _mon_exit_ctr; // monitor exit slow
+ static int _partial_subtype_ctr; // SubRoutines::partial_subtype_check
+
+ // Statistics code
+ // stats for "normal" compiled calls (non-interface)
+ static int _nof_normal_calls; // total # of calls
+ static int _nof_optimized_calls; // total # of statically-bound calls
+ static int _nof_inlined_calls; // total # of inlined normal calls
+ static int _nof_static_calls; // total # of calls to static methods or super methods (invokespecial)
+ static int _nof_inlined_static_calls; // total # of inlined static calls
+ // stats for compiled interface calls
+ static int _nof_interface_calls; // total # of compiled calls
+ static int _nof_optimized_interface_calls; // total # of statically-bound interface calls
+ static int _nof_inlined_interface_calls; // total # of inlined interface calls
+ static int _nof_megamorphic_interface_calls;// total # of megamorphic interface calls
+ // stats for runtime exceptions
+ static int _nof_removable_exceptions; // total # of exceptions that could be replaced by branches due to inlining
+
+ public: // for compiler
+ static address nof_normal_calls_addr() { return (address)&_nof_normal_calls; }
+ static address nof_optimized_calls_addr() { return (address)&_nof_optimized_calls; }
+ static address nof_inlined_calls_addr() { return (address)&_nof_inlined_calls; }
+ static address nof_static_calls_addr() { return (address)&_nof_static_calls; }
+ static address nof_inlined_static_calls_addr() { return (address)&_nof_inlined_static_calls; }
+ static address nof_interface_calls_addr() { return (address)&_nof_interface_calls; }
+ static address nof_optimized_interface_calls_addr() { return (address)&_nof_optimized_interface_calls; }
+ static address nof_inlined_interface_calls_addr() { return (address)&_nof_inlined_interface_calls; }
+ static address nof_megamorphic_interface_calls_addr() { return (address)&_nof_megamorphic_interface_calls; }
+ static void print_call_statistics(int comp_total);
+ static void print_statistics();
+ static void print_ic_miss_histogram();
+
+#endif // PRODUCT
+};
+
+
+// ---------------------------------------------------------------------------
+// Implementation of AdapterHandlerLibrary
+//
+// This library manages argument marshaling adapters and native wrappers.
+// There are 2 flavors of adapters: I2C and C2I.
+//
+// The I2C flavor takes a stock interpreted call setup, marshals the arguments
+// for a Java-compiled call, and jumps to Rmethod-> code()->
+// instructions_begin(). It is broken to call it without an nmethod assigned.
+// The usual behavior is to lift any register arguments up out of the stack
+// and possibly re-pack the extra arguments to be contigious. I2C adapters
+// will save what the interpreter's stack pointer will be after arguments are
+// popped, then adjust the interpreter's frame size to force alignment and
+// possibly to repack the arguments. After re-packing, it jumps to the
+// compiled code start. There are no safepoints in this adapter code and a GC
+// cannot happen while marshaling is in progress.
+//
+// The C2I flavor takes a stock compiled call setup plus the target method in
+// Rmethod, marshals the arguments for an interpreted call and jumps to
+// Rmethod->_i2i_entry. On entry, the interpreted frame has not yet been
+// setup. Compiled frames are fixed-size and the args are likely not in the
+// right place. Hence all the args will likely be copied into the
+// interpreter's frame, forcing that frame to grow. The compiled frame's
+// outgoing stack args will be dead after the copy.
+//
+// Native wrappers, like adapters, marshal arguments. Unlike adapters they
+// also perform an offical frame push & pop. They have a call to the native
+// routine in their middles and end in a return (instead of ending in a jump).
+// The native wrappers are stored in real nmethods instead of the BufferBlobs
+// used by the adapters. The code generation happens here because it's very
+// similar to what the adapters have to do.
+
+class AdapterHandlerEntry : public CHeapObj {
+ private:
+ address _i2c_entry;
+ address _c2i_entry;
+ address _c2i_unverified_entry;
+
+ public:
+ AdapterHandlerEntry(address i2c_entry, address c2i_entry, address c2i_unverified_entry):
+ _i2c_entry(i2c_entry),
+ _c2i_entry(c2i_entry),
+ _c2i_unverified_entry(c2i_unverified_entry) {
+ }
+ // The name we give all buffer blobs
+ static const char* name;
+
+ address get_i2c_entry() { return _i2c_entry; }
+ address get_c2i_entry() { return _c2i_entry; }
+ address get_c2i_unverified_entry() { return _c2i_unverified_entry; }
+ void relocate(address new_base);
+#ifndef PRODUCT
+ void print();
+#endif /* PRODUCT */
+};
+
+
+class AdapterHandlerLibrary: public AllStatic {
+ private:
+ enum {
+ AbstractMethodHandler = 1 // special handler for abstract methods
+ };
+ static GrowableArray<uint64_t>* _fingerprints; // the fingerprint collection
+ static GrowableArray<AdapterHandlerEntry*> * _handlers; // the corresponding handlers
+ static u_char _buffer[]; // the temporary code buffer
+ static void initialize();
+ static AdapterHandlerEntry* get_entry( int index ) { return _handlers->at(index); }
+ static int get_create_adapter_index(methodHandle method);
+ static address get_i2c_entry( int index ) { return get_entry(index)->get_i2c_entry(); }
+ static address get_c2i_entry( int index ) { return get_entry(index)->get_c2i_entry(); }
+ static address get_c2i_unverified_entry( int index ) { return get_entry(index)->get_c2i_unverified_entry(); }
+
+ public:
+ static nmethod* create_native_wrapper(methodHandle method);
+ static AdapterHandlerEntry* get_adapter(methodHandle method) { return get_entry(get_create_adapter_index(method)); }
+
+#ifndef PRODUCT
+ static void print_handler(CodeBlob* b);
+ static bool contains(CodeBlob* b);
+#endif /* PRODUCT */
+
+};
diff --git a/src/share/vm/runtime/sharedRuntimeTrans.cpp b/src/share/vm/runtime/sharedRuntimeTrans.cpp
new file mode 100644
index 000000000..96db2c203
--- /dev/null
+++ b/src/share/vm/runtime/sharedRuntimeTrans.cpp
@@ -0,0 +1,719 @@
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharedRuntimeTrans.cpp.incl"
+
+// This file contains copies of the fdlibm routines used by
+// StrictMath. It turns out that it is almost always required to use
+// these runtime routines; the Intel CPU doesn't meet the Java
+// specification for sin/cos outside a certain limited argument range,
+// and the SPARC CPU doesn't appear to have sin/cos instructions. It
+// also turns out that avoiding the indirect call through function
+// pointer out to libjava.so in SharedRuntime speeds these routines up
+// by roughly 15% on both Win32/x86 and Solaris/SPARC.
+
+// Enabling optimizations in this file causes incorrect code to be
+// generated; can not figure out how to turn down optimization for one
+// file in the IDE on Windows
+#ifdef WIN32
+# pragma optimize ( "", off )
+#endif
+
+#include <math.h>
+
+// VM_LITTLE_ENDIAN is #defined appropriately in the Makefiles
+// [jk] this is not 100% correct because the float word order may different
+// from the byte order (e.g. on ARM)
+#ifdef VM_LITTLE_ENDIAN
+# define __HI(x) *(1+(int*)&x)
+# define __LO(x) *(int*)&x
+#else
+# define __HI(x) *(int*)&x
+# define __LO(x) *(1+(int*)&x)
+#endif
+
+double copysign(double x, double y) {
+ __HI(x) = (__HI(x)&0x7fffffff)|(__HI(y)&0x80000000);
+ return x;
+}
+
+/*
+ * ====================================================
+ * Copyright (C) 1998 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * scalbn (double x, int n)
+ * scalbn(x,n) returns x* 2**n computed by exponent
+ * manipulation rather than by actually performing an
+ * exponentiation or a multiplication.
+ */
+
+static const double
+two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
+ twom54 = 5.55111512312578270212e-17, /* 0x3C900000, 0x00000000 */
+ hugeX = 1.0e+300,
+ tiny = 1.0e-300;
+
+double scalbn (double x, int n) {
+ int k,hx,lx;
+ hx = __HI(x);
+ lx = __LO(x);
+ k = (hx&0x7ff00000)>>20; /* extract exponent */
+ if (k==0) { /* 0 or subnormal x */
+ if ((lx|(hx&0x7fffffff))==0) return x; /* +-0 */
+ x *= two54;
+ hx = __HI(x);
+ k = ((hx&0x7ff00000)>>20) - 54;
+ if (n< -50000) return tiny*x; /*underflow*/
+ }
+ if (k==0x7ff) return x+x; /* NaN or Inf */
+ k = k+n;
+ if (k > 0x7fe) return hugeX*copysign(hugeX,x); /* overflow */
+ if (k > 0) /* normal result */
+ {__HI(x) = (hx&0x800fffff)|(k<<20); return x;}
+ if (k <= -54) {
+ if (n > 50000) /* in case integer overflow in n+k */
+ return hugeX*copysign(hugeX,x); /*overflow*/
+ else return tiny*copysign(tiny,x); /*underflow*/
+ }
+ k += 54; /* subnormal result */
+ __HI(x) = (hx&0x800fffff)|(k<<20);
+ return x*twom54;
+}
+
+/* __ieee754_log(x)
+ * Return the logrithm of x
+ *
+ * Method :
+ * 1. Argument Reduction: find k and f such that
+ * x = 2^k * (1+f),
+ * where sqrt(2)/2 < 1+f < sqrt(2) .
+ *
+ * 2. Approximation of log(1+f).
+ * Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
+ * = 2s + 2/3 s**3 + 2/5 s**5 + .....,
+ * = 2s + s*R
+ * We use a special Reme algorithm on [0,0.1716] to generate
+ * a polynomial of degree 14 to approximate R The maximum error
+ * of this polynomial approximation is bounded by 2**-58.45. In
+ * other words,
+ * 2 4 6 8 10 12 14
+ * R(z) ~ Lg1*s +Lg2*s +Lg3*s +Lg4*s +Lg5*s +Lg6*s +Lg7*s
+ * (the values of Lg1 to Lg7 are listed in the program)
+ * and
+ * | 2 14 | -58.45
+ * | Lg1*s +...+Lg7*s - R(z) | <= 2
+ * | |
+ * Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
+ * In order to guarantee error in log below 1ulp, we compute log
+ * by
+ * log(1+f) = f - s*(f - R) (if f is not too large)
+ * log(1+f) = f - (hfsq - s*(hfsq+R)). (better accuracy)
+ *
+ * 3. Finally, log(x) = k*ln2 + log(1+f).
+ * = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
+ * Here ln2 is split into two floating point number:
+ * ln2_hi + ln2_lo,
+ * where n*ln2_hi is always exact for |n| < 2000.
+ *
+ * Special cases:
+ * log(x) is NaN with signal if x < 0 (including -INF) ;
+ * log(+INF) is +INF; log(0) is -INF with signal;
+ * log(NaN) is that NaN with no signal.
+ *
+ * Accuracy:
+ * according to an error analysis, the error is always less than
+ * 1 ulp (unit in the last place).
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+static const double
+ln2_hi = 6.93147180369123816490e-01, /* 3fe62e42 fee00000 */
+ ln2_lo = 1.90821492927058770002e-10, /* 3dea39ef 35793c76 */
+ Lg1 = 6.666666666666735130e-01, /* 3FE55555 55555593 */
+ Lg2 = 3.999999999940941908e-01, /* 3FD99999 9997FA04 */
+ Lg3 = 2.857142874366239149e-01, /* 3FD24924 94229359 */
+ Lg4 = 2.222219843214978396e-01, /* 3FCC71C5 1D8E78AF */
+ Lg5 = 1.818357216161805012e-01, /* 3FC74664 96CB03DE */
+ Lg6 = 1.531383769920937332e-01, /* 3FC39A09 D078C69F */
+ Lg7 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
+
+static double zero = 0.0;
+
+static double __ieee754_log(double x) {
+ double hfsq,f,s,z,R,w,t1,t2,dk;
+ int k,hx,i,j;
+ unsigned lx;
+
+ hx = __HI(x); /* high word of x */
+ lx = __LO(x); /* low word of x */
+
+ k=0;
+ if (hx < 0x00100000) { /* x < 2**-1022 */
+ if (((hx&0x7fffffff)|lx)==0)
+ return -two54/zero; /* log(+-0)=-inf */
+ if (hx<0) return (x-x)/zero; /* log(-#) = NaN */
+ k -= 54; x *= two54; /* subnormal number, scale up x */
+ hx = __HI(x); /* high word of x */
+ }
+ if (hx >= 0x7ff00000) return x+x;
+ k += (hx>>20)-1023;
+ hx &= 0x000fffff;
+ i = (hx+0x95f64)&0x100000;
+ __HI(x) = hx|(i^0x3ff00000); /* normalize x or x/2 */
+ k += (i>>20);
+ f = x-1.0;
+ if((0x000fffff&(2+hx))<3) { /* |f| < 2**-20 */
+ if(f==zero) {
+ if (k==0) return zero;
+ else {dk=(double)k; return dk*ln2_hi+dk*ln2_lo;}
+ }
+ R = f*f*(0.5-0.33333333333333333*f);
+ if(k==0) return f-R; else {dk=(double)k;
+ return dk*ln2_hi-((R-dk*ln2_lo)-f);}
+ }
+ s = f/(2.0+f);
+ dk = (double)k;
+ z = s*s;
+ i = hx-0x6147a;
+ w = z*z;
+ j = 0x6b851-hx;
+ t1= w*(Lg2+w*(Lg4+w*Lg6));
+ t2= z*(Lg1+w*(Lg3+w*(Lg5+w*Lg7)));
+ i |= j;
+ R = t2+t1;
+ if(i>0) {
+ hfsq=0.5*f*f;
+ if(k==0) return f-(hfsq-s*(hfsq+R)); else
+ return dk*ln2_hi-((hfsq-(s*(hfsq+R)+dk*ln2_lo))-f);
+ } else {
+ if(k==0) return f-s*(f-R); else
+ return dk*ln2_hi-((s*(f-R)-dk*ln2_lo)-f);
+ }
+}
+
+JRT_LEAF(jdouble, SharedRuntime::dlog(jdouble x))
+ return __ieee754_log(x);
+JRT_END
+
+/* __ieee754_log10(x)
+ * Return the base 10 logarithm of x
+ *
+ * Method :
+ * Let log10_2hi = leading 40 bits of log10(2) and
+ * log10_2lo = log10(2) - log10_2hi,
+ * ivln10 = 1/log(10) rounded.
+ * Then
+ * n = ilogb(x),
+ * if(n<0) n = n+1;
+ * x = scalbn(x,-n);
+ * log10(x) := n*log10_2hi + (n*log10_2lo + ivln10*log(x))
+ *
+ * Note 1:
+ * To guarantee log10(10**n)=n, where 10**n is normal, the rounding
+ * mode must set to Round-to-Nearest.
+ * Note 2:
+ * [1/log(10)] rounded to 53 bits has error .198 ulps;
+ * log10 is monotonic at all binary break points.
+ *
+ * Special cases:
+ * log10(x) is NaN with signal if x < 0;
+ * log10(+INF) is +INF with no signal; log10(0) is -INF with signal;
+ * log10(NaN) is that NaN with no signal;
+ * log10(10**N) = N for N=0,1,...,22.
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following constants.
+ * The decimal values may be used, provided that the compiler will convert
+ * from decimal to binary accurately enough to produce the hexadecimal values
+ * shown.
+ */
+
+static const double
+ivln10 = 4.34294481903251816668e-01, /* 0x3FDBCB7B, 0x1526E50E */
+ log10_2hi = 3.01029995663611771306e-01, /* 0x3FD34413, 0x509F6000 */
+ log10_2lo = 3.69423907715893078616e-13; /* 0x3D59FEF3, 0x11F12B36 */
+
+static double __ieee754_log10(double x) {
+ double y,z;
+ int i,k,hx;
+ unsigned lx;
+
+ hx = __HI(x); /* high word of x */
+ lx = __LO(x); /* low word of x */
+
+ k=0;
+ if (hx < 0x00100000) { /* x < 2**-1022 */
+ if (((hx&0x7fffffff)|lx)==0)
+ return -two54/zero; /* log(+-0)=-inf */
+ if (hx<0) return (x-x)/zero; /* log(-#) = NaN */
+ k -= 54; x *= two54; /* subnormal number, scale up x */
+ hx = __HI(x); /* high word of x */
+ }
+ if (hx >= 0x7ff00000) return x+x;
+ k += (hx>>20)-1023;
+ i = ((unsigned)k&0x80000000)>>31;
+ hx = (hx&0x000fffff)|((0x3ff-i)<<20);
+ y = (double)(k+i);
+ __HI(x) = hx;
+ z = y*log10_2lo + ivln10*__ieee754_log(x);
+ return z+y*log10_2hi;
+}
+
+JRT_LEAF(jdouble, SharedRuntime::dlog10(jdouble x))
+ return __ieee754_log10(x);
+JRT_END
+
+
+/* __ieee754_exp(x)
+ * Returns the exponential of x.
+ *
+ * Method
+ * 1. Argument reduction:
+ * Reduce x to an r so that |r| <= 0.5*ln2 ~ 0.34658.
+ * Given x, find r and integer k such that
+ *
+ * x = k*ln2 + r, |r| <= 0.5*ln2.
+ *
+ * Here r will be represented as r = hi-lo for better
+ * accuracy.
+ *
+ * 2. Approximation of exp(r) by a special rational function on
+ * the interval [0,0.34658]:
+ * Write
+ * R(r**2) = r*(exp(r)+1)/(exp(r)-1) = 2 + r*r/6 - r**4/360 + ...
+ * We use a special Reme algorithm on [0,0.34658] to generate
+ * a polynomial of degree 5 to approximate R. The maximum error
+ * of this polynomial approximation is bounded by 2**-59. In
+ * other words,
+ * R(z) ~ 2.0 + P1*z + P2*z**2 + P3*z**3 + P4*z**4 + P5*z**5
+ * (where z=r*r, and the values of P1 to P5 are listed below)
+ * and
+ * | 5 | -59
+ * | 2.0+P1*z+...+P5*z - R(z) | <= 2
+ * | |
+ * The computation of exp(r) thus becomes
+ * 2*r
+ * exp(r) = 1 + -------
+ * R - r
+ * r*R1(r)
+ * = 1 + r + ----------- (for better accuracy)
+ * 2 - R1(r)
+ * where
+ * 2 4 10
+ * R1(r) = r - (P1*r + P2*r + ... + P5*r ).
+ *
+ * 3. Scale back to obtain exp(x):
+ * From step 1, we have
+ * exp(x) = 2^k * exp(r)
+ *
+ * Special cases:
+ * exp(INF) is INF, exp(NaN) is NaN;
+ * exp(-INF) is 0, and
+ * for finite argument, only exp(0)=1 is exact.
+ *
+ * Accuracy:
+ * according to an error analysis, the error is always less than
+ * 1 ulp (unit in the last place).
+ *
+ * Misc. info.
+ * For IEEE double
+ * if x > 7.09782712893383973096e+02 then exp(x) overflow
+ * if x < -7.45133219101941108420e+02 then exp(x) underflow
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+static const double
+one = 1.0,
+ halF[2] = {0.5,-0.5,},
+ twom1000= 9.33263618503218878990e-302, /* 2**-1000=0x01700000,0*/
+ o_threshold= 7.09782712893383973096e+02, /* 0x40862E42, 0xFEFA39EF */
+ u_threshold= -7.45133219101941108420e+02, /* 0xc0874910, 0xD52D3051 */
+ ln2HI[2] ={ 6.93147180369123816490e-01, /* 0x3fe62e42, 0xfee00000 */
+ -6.93147180369123816490e-01,},/* 0xbfe62e42, 0xfee00000 */
+ ln2LO[2] ={ 1.90821492927058770002e-10, /* 0x3dea39ef, 0x35793c76 */
+ -1.90821492927058770002e-10,},/* 0xbdea39ef, 0x35793c76 */
+ invln2 = 1.44269504088896338700e+00, /* 0x3ff71547, 0x652b82fe */
+ P1 = 1.66666666666666019037e-01, /* 0x3FC55555, 0x5555553E */
+ P2 = -2.77777777770155933842e-03, /* 0xBF66C16C, 0x16BEBD93 */
+ P3 = 6.61375632143793436117e-05, /* 0x3F11566A, 0xAF25DE2C */
+ P4 = -1.65339022054652515390e-06, /* 0xBEBBBD41, 0xC5D26BF1 */
+ P5 = 4.13813679705723846039e-08; /* 0x3E663769, 0x72BEA4D0 */
+
+static double __ieee754_exp(double x) {
+ double y,hi=0,lo=0,c,t;
+ int k=0,xsb;
+ unsigned hx;
+
+ hx = __HI(x); /* high word of x */
+ xsb = (hx>>31)&1; /* sign bit of x */
+ hx &= 0x7fffffff; /* high word of |x| */
+
+ /* filter out non-finite argument */
+ if(hx >= 0x40862E42) { /* if |x|>=709.78... */
+ if(hx>=0x7ff00000) {
+ if(((hx&0xfffff)|__LO(x))!=0)
+ return x+x; /* NaN */
+ else return (xsb==0)? x:0.0; /* exp(+-inf)={inf,0} */
+ }
+ if(x > o_threshold) return hugeX*hugeX; /* overflow */
+ if(x < u_threshold) return twom1000*twom1000; /* underflow */
+ }
+
+ /* argument reduction */
+ if(hx > 0x3fd62e42) { /* if |x| > 0.5 ln2 */
+ if(hx < 0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
+ hi = x-ln2HI[xsb]; lo=ln2LO[xsb]; k = 1-xsb-xsb;
+ } else {
+ k = (int)(invln2*x+halF[xsb]);
+ t = k;
+ hi = x - t*ln2HI[0]; /* t*ln2HI is exact here */
+ lo = t*ln2LO[0];
+ }
+ x = hi - lo;
+ }
+ else if(hx < 0x3e300000) { /* when |x|<2**-28 */
+ if(hugeX+x>one) return one+x;/* trigger inexact */
+ }
+ else k = 0;
+
+ /* x is now in primary range */
+ t = x*x;
+ c = x - t*(P1+t*(P2+t*(P3+t*(P4+t*P5))));
+ if(k==0) return one-((x*c)/(c-2.0)-x);
+ else y = one-((lo-(x*c)/(2.0-c))-hi);
+ if(k >= -1021) {
+ __HI(y) += (k<<20); /* add k to y's exponent */
+ return y;
+ } else {
+ __HI(y) += ((k+1000)<<20);/* add k to y's exponent */
+ return y*twom1000;
+ }
+}
+
+JRT_LEAF(jdouble, SharedRuntime::dexp(jdouble x))
+ return __ieee754_exp(x);
+JRT_END
+
+/* __ieee754_pow(x,y) return x**y
+ *
+ * n
+ * Method: Let x = 2 * (1+f)
+ * 1. Compute and return log2(x) in two pieces:
+ * log2(x) = w1 + w2,
+ * where w1 has 53-24 = 29 bit trailing zeros.
+ * 2. Perform y*log2(x) = n+y' by simulating muti-precision
+ * arithmetic, where |y'|<=0.5.
+ * 3. Return x**y = 2**n*exp(y'*log2)
+ *
+ * Special cases:
+ * 1. (anything) ** 0 is 1
+ * 2. (anything) ** 1 is itself
+ * 3. (anything) ** NAN is NAN
+ * 4. NAN ** (anything except 0) is NAN
+ * 5. +-(|x| > 1) ** +INF is +INF
+ * 6. +-(|x| > 1) ** -INF is +0
+ * 7. +-(|x| < 1) ** +INF is +0
+ * 8. +-(|x| < 1) ** -INF is +INF
+ * 9. +-1 ** +-INF is NAN
+ * 10. +0 ** (+anything except 0, NAN) is +0
+ * 11. -0 ** (+anything except 0, NAN, odd integer) is +0
+ * 12. +0 ** (-anything except 0, NAN) is +INF
+ * 13. -0 ** (-anything except 0, NAN, odd integer) is +INF
+ * 14. -0 ** (odd integer) = -( +0 ** (odd integer) )
+ * 15. +INF ** (+anything except 0,NAN) is +INF
+ * 16. +INF ** (-anything except 0,NAN) is +0
+ * 17. -INF ** (anything) = -0 ** (-anything)
+ * 18. (-anything) ** (integer) is (-1)**(integer)*(+anything**integer)
+ * 19. (-anything except 0 and inf) ** (non-integer) is NAN
+ *
+ * Accuracy:
+ * pow(x,y) returns x**y nearly rounded. In particular
+ * pow(integer,integer)
+ * always returns the correct integer provided it is
+ * representable.
+ *
+ * Constants :
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+static const double
+bp[] = {1.0, 1.5,},
+ dp_h[] = { 0.0, 5.84962487220764160156e-01,}, /* 0x3FE2B803, 0x40000000 */
+ dp_l[] = { 0.0, 1.35003920212974897128e-08,}, /* 0x3E4CFDEB, 0x43CFD006 */
+ zeroX = 0.0,
+ two = 2.0,
+ two53 = 9007199254740992.0, /* 0x43400000, 0x00000000 */
+ /* poly coefs for (3/2)*(log(x)-2s-2/3*s**3 */
+ L1X = 5.99999999999994648725e-01, /* 0x3FE33333, 0x33333303 */
+ L2X = 4.28571428578550184252e-01, /* 0x3FDB6DB6, 0xDB6FABFF */
+ L3X = 3.33333329818377432918e-01, /* 0x3FD55555, 0x518F264D */
+ L4X = 2.72728123808534006489e-01, /* 0x3FD17460, 0xA91D4101 */
+ L5X = 2.30660745775561754067e-01, /* 0x3FCD864A, 0x93C9DB65 */
+ L6X = 2.06975017800338417784e-01, /* 0x3FCA7E28, 0x4A454EEF */
+ lg2 = 6.93147180559945286227e-01, /* 0x3FE62E42, 0xFEFA39EF */
+ lg2_h = 6.93147182464599609375e-01, /* 0x3FE62E43, 0x00000000 */
+ lg2_l = -1.90465429995776804525e-09, /* 0xBE205C61, 0x0CA86C39 */
+ ovt = 8.0085662595372944372e-0017, /* -(1024-log2(ovfl+.5ulp)) */
+ cp = 9.61796693925975554329e-01, /* 0x3FEEC709, 0xDC3A03FD =2/(3ln2) */
+ cp_h = 9.61796700954437255859e-01, /* 0x3FEEC709, 0xE0000000 =(float)cp */
+ cp_l = -7.02846165095275826516e-09, /* 0xBE3E2FE0, 0x145B01F5 =tail of cp_h*/
+ ivln2 = 1.44269504088896338700e+00, /* 0x3FF71547, 0x652B82FE =1/ln2 */
+ ivln2_h = 1.44269502162933349609e+00, /* 0x3FF71547, 0x60000000 =24b 1/ln2*/
+ ivln2_l = 1.92596299112661746887e-08; /* 0x3E54AE0B, 0xF85DDF44 =1/ln2 tail*/
+
+double __ieee754_pow(double x, double y) {
+ double z,ax,z_h,z_l,p_h,p_l;
+ double y1,t1,t2,r,s,t,u,v,w;
+ int i0,i1,i,j,k,yisint,n;
+ int hx,hy,ix,iy;
+ unsigned lx,ly;
+
+ i0 = ((*(int*)&one)>>29)^1; i1=1-i0;
+ hx = __HI(x); lx = __LO(x);
+ hy = __HI(y); ly = __LO(y);
+ ix = hx&0x7fffffff; iy = hy&0x7fffffff;
+
+ /* y==zero: x**0 = 1 */
+ if((iy|ly)==0) return one;
+
+ /* +-NaN return x+y */
+ if(ix > 0x7ff00000 || ((ix==0x7ff00000)&&(lx!=0)) ||
+ iy > 0x7ff00000 || ((iy==0x7ff00000)&&(ly!=0)))
+ return x+y;
+
+ /* determine if y is an odd int when x < 0
+ * yisint = 0 ... y is not an integer
+ * yisint = 1 ... y is an odd int
+ * yisint = 2 ... y is an even int
+ */
+ yisint = 0;
+ if(hx<0) {
+ if(iy>=0x43400000) yisint = 2; /* even integer y */
+ else if(iy>=0x3ff00000) {
+ k = (iy>>20)-0x3ff; /* exponent */
+ if(k>20) {
+ j = ly>>(52-k);
+ if((unsigned)(j<<(52-k))==ly) yisint = 2-(j&1);
+ } else if(ly==0) {
+ j = iy>>(20-k);
+ if((j<<(20-k))==iy) yisint = 2-(j&1);
+ }
+ }
+ }
+
+ /* special value of y */
+ if(ly==0) {
+ if (iy==0x7ff00000) { /* y is +-inf */
+ if(((ix-0x3ff00000)|lx)==0)
+ return y - y; /* inf**+-1 is NaN */
+ else if (ix >= 0x3ff00000)/* (|x|>1)**+-inf = inf,0 */
+ return (hy>=0)? y: zeroX;
+ else /* (|x|<1)**-,+inf = inf,0 */
+ return (hy<0)?-y: zeroX;
+ }
+ if(iy==0x3ff00000) { /* y is +-1 */
+ if(hy<0) return one/x; else return x;
+ }
+ if(hy==0x40000000) return x*x; /* y is 2 */
+ if(hy==0x3fe00000) { /* y is 0.5 */
+ if(hx>=0) /* x >= +0 */
+ return sqrt(x);
+ }
+ }
+
+ ax = fabsd(x);
+ /* special value of x */
+ if(lx==0) {
+ if(ix==0x7ff00000||ix==0||ix==0x3ff00000){
+ z = ax; /*x is +-0,+-inf,+-1*/
+ if(hy<0) z = one/z; /* z = (1/|x|) */
+ if(hx<0) {
+ if(((ix-0x3ff00000)|yisint)==0) {
+ z = (z-z)/(z-z); /* (-1)**non-int is NaN */
+ } else if(yisint==1)
+ z = -1.0*z; /* (x<0)**odd = -(|x|**odd) */
+ }
+ return z;
+ }
+ }
+
+ n = (hx>>31)+1;
+
+ /* (x<0)**(non-int) is NaN */
+ if((n|yisint)==0) return (x-x)/(x-x);
+
+ s = one; /* s (sign of result -ve**odd) = -1 else = 1 */
+ if((n|(yisint-1))==0) s = -one;/* (-ve)**(odd int) */
+
+ /* |y| is huge */
+ if(iy>0x41e00000) { /* if |y| > 2**31 */
+ if(iy>0x43f00000){ /* if |y| > 2**64, must o/uflow */
+ if(ix<=0x3fefffff) return (hy<0)? hugeX*hugeX:tiny*tiny;
+ if(ix>=0x3ff00000) return (hy>0)? hugeX*hugeX:tiny*tiny;
+ }
+ /* over/underflow if x is not close to one */
+ if(ix<0x3fefffff) return (hy<0)? s*hugeX*hugeX:s*tiny*tiny;
+ if(ix>0x3ff00000) return (hy>0)? s*hugeX*hugeX:s*tiny*tiny;
+ /* now |1-x| is tiny <= 2**-20, suffice to compute
+ log(x) by x-x^2/2+x^3/3-x^4/4 */
+ t = ax-one; /* t has 20 trailing zeros */
+ w = (t*t)*(0.5-t*(0.3333333333333333333333-t*0.25));
+ u = ivln2_h*t; /* ivln2_h has 21 sig. bits */
+ v = t*ivln2_l-w*ivln2;
+ t1 = u+v;
+ __LO(t1) = 0;
+ t2 = v-(t1-u);
+ } else {
+ double ss,s2,s_h,s_l,t_h,t_l;
+ n = 0;
+ /* take care subnormal number */
+ if(ix<0x00100000)
+ {ax *= two53; n -= 53; ix = __HI(ax); }
+ n += ((ix)>>20)-0x3ff;
+ j = ix&0x000fffff;
+ /* determine interval */
+ ix = j|0x3ff00000; /* normalize ix */
+ if(j<=0x3988E) k=0; /* |x|<sqrt(3/2) */
+ else if(j<0xBB67A) k=1; /* |x|<sqrt(3) */
+ else {k=0;n+=1;ix -= 0x00100000;}
+ __HI(ax) = ix;
+
+ /* compute ss = s_h+s_l = (x-1)/(x+1) or (x-1.5)/(x+1.5) */
+ u = ax-bp[k]; /* bp[0]=1.0, bp[1]=1.5 */
+ v = one/(ax+bp[k]);
+ ss = u*v;
+ s_h = ss;
+ __LO(s_h) = 0;
+ /* t_h=ax+bp[k] High */
+ t_h = zeroX;
+ __HI(t_h)=((ix>>1)|0x20000000)+0x00080000+(k<<18);
+ t_l = ax - (t_h-bp[k]);
+ s_l = v*((u-s_h*t_h)-s_h*t_l);
+ /* compute log(ax) */
+ s2 = ss*ss;
+ r = s2*s2*(L1X+s2*(L2X+s2*(L3X+s2*(L4X+s2*(L5X+s2*L6X)))));
+ r += s_l*(s_h+ss);
+ s2 = s_h*s_h;
+ t_h = 3.0+s2+r;
+ __LO(t_h) = 0;
+ t_l = r-((t_h-3.0)-s2);
+ /* u+v = ss*(1+...) */
+ u = s_h*t_h;
+ v = s_l*t_h+t_l*ss;
+ /* 2/(3log2)*(ss+...) */
+ p_h = u+v;
+ __LO(p_h) = 0;
+ p_l = v-(p_h-u);
+ z_h = cp_h*p_h; /* cp_h+cp_l = 2/(3*log2) */
+ z_l = cp_l*p_h+p_l*cp+dp_l[k];
+ /* log2(ax) = (ss+..)*2/(3*log2) = n + dp_h + z_h + z_l */
+ t = (double)n;
+ t1 = (((z_h+z_l)+dp_h[k])+t);
+ __LO(t1) = 0;
+ t2 = z_l-(((t1-t)-dp_h[k])-z_h);
+ }
+
+ /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */
+ y1 = y;
+ __LO(y1) = 0;
+ p_l = (y-y1)*t1+y*t2;
+ p_h = y1*t1;
+ z = p_l+p_h;
+ j = __HI(z);
+ i = __LO(z);
+ if (j>=0x40900000) { /* z >= 1024 */
+ if(((j-0x40900000)|i)!=0) /* if z > 1024 */
+ return s*hugeX*hugeX; /* overflow */
+ else {
+ if(p_l+ovt>z-p_h) return s*hugeX*hugeX; /* overflow */
+ }
+ } else if((j&0x7fffffff)>=0x4090cc00 ) { /* z <= -1075 */
+ if(((j-0xc090cc00)|i)!=0) /* z < -1075 */
+ return s*tiny*tiny; /* underflow */
+ else {
+ if(p_l<=z-p_h) return s*tiny*tiny; /* underflow */
+ }
+ }
+ /*
+ * compute 2**(p_h+p_l)
+ */
+ i = j&0x7fffffff;
+ k = (i>>20)-0x3ff;
+ n = 0;
+ if(i>0x3fe00000) { /* if |z| > 0.5, set n = [z+0.5] */
+ n = j+(0x00100000>>(k+1));
+ k = ((n&0x7fffffff)>>20)-0x3ff; /* new k for n */
+ t = zeroX;
+ __HI(t) = (n&~(0x000fffff>>k));
+ n = ((n&0x000fffff)|0x00100000)>>(20-k);
+ if(j<0) n = -n;
+ p_h -= t;
+ }
+ t = p_l+p_h;
+ __LO(t) = 0;
+ u = t*lg2_h;
+ v = (p_l-(t-p_h))*lg2+t*lg2_l;
+ z = u+v;
+ w = v-(z-u);
+ t = z*z;
+ t1 = z - t*(P1+t*(P2+t*(P3+t*(P4+t*P5))));
+ r = (z*t1)/(t1-two)-(w+z*w);
+ z = one-(r-z);
+ j = __HI(z);
+ j += (n<<20);
+ if((j>>20)<=0) z = scalbn(z,n); /* subnormal output */
+ else __HI(z) += (n<<20);
+ return s*z;
+}
+
+
+JRT_LEAF(jdouble, SharedRuntime::dpow(jdouble x, jdouble y))
+ return __ieee754_pow(x, y);
+JRT_END
+
+#ifdef WIN32
+# pragma optimize ( "", on )
+#endif
diff --git a/src/share/vm/runtime/sharedRuntimeTrig.cpp b/src/share/vm/runtime/sharedRuntimeTrig.cpp
new file mode 100644
index 000000000..b6fe6613d
--- /dev/null
+++ b/src/share/vm/runtime/sharedRuntimeTrig.cpp
@@ -0,0 +1,957 @@
+/*
+ * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharedRuntimeTrig.cpp.incl"
+
+// This file contains copies of the fdlibm routines used by
+// StrictMath. It turns out that it is almost always required to use
+// these runtime routines; the Intel CPU doesn't meet the Java
+// specification for sin/cos outside a certain limited argument range,
+// and the SPARC CPU doesn't appear to have sin/cos instructions. It
+// also turns out that avoiding the indirect call through function
+// pointer out to libjava.so in SharedRuntime speeds these routines up
+// by roughly 15% on both Win32/x86 and Solaris/SPARC.
+
+// Enabling optimizations in this file causes incorrect code to be
+// generated; can not figure out how to turn down optimization for one
+// file in the IDE on Windows
+#ifdef WIN32
+# pragma optimize ( "", off )
+#endif
+
+#include <math.h>
+
+// VM_LITTLE_ENDIAN is #defined appropriately in the Makefiles
+// [jk] this is not 100% correct because the float word order may different
+// from the byte order (e.g. on ARM)
+#ifdef VM_LITTLE_ENDIAN
+# define __HI(x) *(1+(int*)&x)
+# define __LO(x) *(int*)&x
+#else
+# define __HI(x) *(int*)&x
+# define __LO(x) *(1+(int*)&x)
+#endif
+
+static double copysignA(double x, double y) {
+ __HI(x) = (__HI(x)&0x7fffffff)|(__HI(y)&0x80000000);
+ return x;
+}
+
+/*
+ * scalbn (double x, int n)
+ * scalbn(x,n) returns x* 2**n computed by exponent
+ * manipulation rather than by actually performing an
+ * exponentiation or a multiplication.
+ */
+
+static const double
+two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
+twom54 = 5.55111512312578270212e-17, /* 0x3C900000, 0x00000000 */
+hugeX = 1.0e+300,
+tiny = 1.0e-300;
+
+static double scalbnA (double x, int n) {
+ int k,hx,lx;
+ hx = __HI(x);
+ lx = __LO(x);
+ k = (hx&0x7ff00000)>>20; /* extract exponent */
+ if (k==0) { /* 0 or subnormal x */
+ if ((lx|(hx&0x7fffffff))==0) return x; /* +-0 */
+ x *= two54;
+ hx = __HI(x);
+ k = ((hx&0x7ff00000)>>20) - 54;
+ if (n< -50000) return tiny*x; /*underflow*/
+ }
+ if (k==0x7ff) return x+x; /* NaN or Inf */
+ k = k+n;
+ if (k > 0x7fe) return hugeX*copysignA(hugeX,x); /* overflow */
+ if (k > 0) /* normal result */
+ {__HI(x) = (hx&0x800fffff)|(k<<20); return x;}
+ if (k <= -54) {
+ if (n > 50000) /* in case integer overflow in n+k */
+ return hugeX*copysignA(hugeX,x); /*overflow*/
+ else return tiny*copysignA(tiny,x); /*underflow*/
+ }
+ k += 54; /* subnormal result */
+ __HI(x) = (hx&0x800fffff)|(k<<20);
+ return x*twom54;
+}
+
+/*
+ * __kernel_rem_pio2(x,y,e0,nx,prec,ipio2)
+ * double x[],y[]; int e0,nx,prec; int ipio2[];
+ *
+ * __kernel_rem_pio2 return the last three digits of N with
+ * y = x - N*pi/2
+ * so that |y| < pi/2.
+ *
+ * The method is to compute the integer (mod 8) and fraction parts of
+ * (2/pi)*x without doing the full multiplication. In general we
+ * skip the part of the product that are known to be a huge integer (
+ * more accurately, = 0 mod 8 ). Thus the number of operations are
+ * independent of the exponent of the input.
+ *
+ * (2/pi) is represented by an array of 24-bit integers in ipio2[].
+ *
+ * Input parameters:
+ * x[] The input value (must be positive) is broken into nx
+ * pieces of 24-bit integers in double precision format.
+ * x[i] will be the i-th 24 bit of x. The scaled exponent
+ * of x[0] is given in input parameter e0 (i.e., x[0]*2^e0
+ * match x's up to 24 bits.
+ *
+ * Example of breaking a double positive z into x[0]+x[1]+x[2]:
+ * e0 = ilogb(z)-23
+ * z = scalbn(z,-e0)
+ * for i = 0,1,2
+ * x[i] = floor(z)
+ * z = (z-x[i])*2**24
+ *
+ *
+ * y[] ouput result in an array of double precision numbers.
+ * The dimension of y[] is:
+ * 24-bit precision 1
+ * 53-bit precision 2
+ * 64-bit precision 2
+ * 113-bit precision 3
+ * The actual value is the sum of them. Thus for 113-bit
+ * precsion, one may have to do something like:
+ *
+ * long double t,w,r_head, r_tail;
+ * t = (long double)y[2] + (long double)y[1];
+ * w = (long double)y[0];
+ * r_head = t+w;
+ * r_tail = w - (r_head - t);
+ *
+ * e0 The exponent of x[0]
+ *
+ * nx dimension of x[]
+ *
+ * prec an interger indicating the precision:
+ * 0 24 bits (single)
+ * 1 53 bits (double)
+ * 2 64 bits (extended)
+ * 3 113 bits (quad)
+ *
+ * ipio2[]
+ * integer array, contains the (24*i)-th to (24*i+23)-th
+ * bit of 2/pi after binary point. The corresponding
+ * floating value is
+ *
+ * ipio2[i] * 2^(-24(i+1)).
+ *
+ * External function:
+ * double scalbn(), floor();
+ *
+ *
+ * Here is the description of some local variables:
+ *
+ * jk jk+1 is the initial number of terms of ipio2[] needed
+ * in the computation. The recommended value is 2,3,4,
+ * 6 for single, double, extended,and quad.
+ *
+ * jz local integer variable indicating the number of
+ * terms of ipio2[] used.
+ *
+ * jx nx - 1
+ *
+ * jv index for pointing to the suitable ipio2[] for the
+ * computation. In general, we want
+ * ( 2^e0*x[0] * ipio2[jv-1]*2^(-24jv) )/8
+ * is an integer. Thus
+ * e0-3-24*jv >= 0 or (e0-3)/24 >= jv
+ * Hence jv = max(0,(e0-3)/24).
+ *
+ * jp jp+1 is the number of terms in PIo2[] needed, jp = jk.
+ *
+ * q[] double array with integral value, representing the
+ * 24-bits chunk of the product of x and 2/pi.
+ *
+ * q0 the corresponding exponent of q[0]. Note that the
+ * exponent for q[i] would be q0-24*i.
+ *
+ * PIo2[] double precision array, obtained by cutting pi/2
+ * into 24 bits chunks.
+ *
+ * f[] ipio2[] in floating point
+ *
+ * iq[] integer array by breaking up q[] in 24-bits chunk.
+ *
+ * fq[] final product of x*(2/pi) in fq[0],..,fq[jk]
+ *
+ * ih integer. If >0 it indicats q[] is >= 0.5, hence
+ * it also indicates the *sign* of the result.
+ *
+ */
+
+
+/*
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+
+static const int init_jk[] = {2,3,4,6}; /* initial value for jk */
+
+static const double PIo2[] = {
+ 1.57079625129699707031e+00, /* 0x3FF921FB, 0x40000000 */
+ 7.54978941586159635335e-08, /* 0x3E74442D, 0x00000000 */
+ 5.39030252995776476554e-15, /* 0x3CF84698, 0x80000000 */
+ 3.28200341580791294123e-22, /* 0x3B78CC51, 0x60000000 */
+ 1.27065575308067607349e-29, /* 0x39F01B83, 0x80000000 */
+ 1.22933308981111328932e-36, /* 0x387A2520, 0x40000000 */
+ 2.73370053816464559624e-44, /* 0x36E38222, 0x80000000 */
+ 2.16741683877804819444e-51, /* 0x3569F31D, 0x00000000 */
+};
+
+static const double
+zeroB = 0.0,
+one = 1.0,
+two24B = 1.67772160000000000000e+07, /* 0x41700000, 0x00000000 */
+twon24 = 5.96046447753906250000e-08; /* 0x3E700000, 0x00000000 */
+
+static int __kernel_rem_pio2(double *x, double *y, int e0, int nx, int prec, const int *ipio2) {
+ int jz,jx,jv,jp,jk,carry,n,iq[20],i,j,k,m,q0,ih;
+ double z,fw,f[20],fq[20],q[20];
+
+ /* initialize jk*/
+ jk = init_jk[prec];
+ jp = jk;
+
+ /* determine jx,jv,q0, note that 3>q0 */
+ jx = nx-1;
+ jv = (e0-3)/24; if(jv<0) jv=0;
+ q0 = e0-24*(jv+1);
+
+ /* set up f[0] to f[jx+jk] where f[jx+jk] = ipio2[jv+jk] */
+ j = jv-jx; m = jx+jk;
+ for(i=0;i<=m;i++,j++) f[i] = (j<0)? zeroB : (double) ipio2[j];
+
+ /* compute q[0],q[1],...q[jk] */
+ for (i=0;i<=jk;i++) {
+ for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j]; q[i] = fw;
+ }
+
+ jz = jk;
+recompute:
+ /* distill q[] into iq[] reversingly */
+ for(i=0,j=jz,z=q[jz];j>0;i++,j--) {
+ fw = (double)((int)(twon24* z));
+ iq[i] = (int)(z-two24B*fw);
+ z = q[j-1]+fw;
+ }
+
+ /* compute n */
+ z = scalbnA(z,q0); /* actual value of z */
+ z -= 8.0*floor(z*0.125); /* trim off integer >= 8 */
+ n = (int) z;
+ z -= (double)n;
+ ih = 0;
+ if(q0>0) { /* need iq[jz-1] to determine n */
+ i = (iq[jz-1]>>(24-q0)); n += i;
+ iq[jz-1] -= i<<(24-q0);
+ ih = iq[jz-1]>>(23-q0);
+ }
+ else if(q0==0) ih = iq[jz-1]>>23;
+ else if(z>=0.5) ih=2;
+
+ if(ih>0) { /* q > 0.5 */
+ n += 1; carry = 0;
+ for(i=0;i<jz ;i++) { /* compute 1-q */
+ j = iq[i];
+ if(carry==0) {
+ if(j!=0) {
+ carry = 1; iq[i] = 0x1000000- j;
+ }
+ } else iq[i] = 0xffffff - j;
+ }
+ if(q0>0) { /* rare case: chance is 1 in 12 */
+ switch(q0) {
+ case 1:
+ iq[jz-1] &= 0x7fffff; break;
+ case 2:
+ iq[jz-1] &= 0x3fffff; break;
+ }
+ }
+ if(ih==2) {
+ z = one - z;
+ if(carry!=0) z -= scalbnA(one,q0);
+ }
+ }
+
+ /* check if recomputation is needed */
+ if(z==zeroB) {
+ j = 0;
+ for (i=jz-1;i>=jk;i--) j |= iq[i];
+ if(j==0) { /* need recomputation */
+ for(k=1;iq[jk-k]==0;k++); /* k = no. of terms needed */
+
+ for(i=jz+1;i<=jz+k;i++) { /* add q[jz+1] to q[jz+k] */
+ f[jx+i] = (double) ipio2[jv+i];
+ for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j];
+ q[i] = fw;
+ }
+ jz += k;
+ goto recompute;
+ }
+ }
+
+ /* chop off zero terms */
+ if(z==0.0) {
+ jz -= 1; q0 -= 24;
+ while(iq[jz]==0) { jz--; q0-=24;}
+ } else { /* break z into 24-bit if neccessary */
+ z = scalbnA(z,-q0);
+ if(z>=two24B) {
+ fw = (double)((int)(twon24*z));
+ iq[jz] = (int)(z-two24B*fw);
+ jz += 1; q0 += 24;
+ iq[jz] = (int) fw;
+ } else iq[jz] = (int) z ;
+ }
+
+ /* convert integer "bit" chunk to floating-point value */
+ fw = scalbnA(one,q0);
+ for(i=jz;i>=0;i--) {
+ q[i] = fw*(double)iq[i]; fw*=twon24;
+ }
+
+ /* compute PIo2[0,...,jp]*q[jz,...,0] */
+ for(i=jz;i>=0;i--) {
+ for(fw=0.0,k=0;k<=jp&&k<=jz-i;k++) fw += PIo2[k]*q[i+k];
+ fq[jz-i] = fw;
+ }
+
+ /* compress fq[] into y[] */
+ switch(prec) {
+ case 0:
+ fw = 0.0;
+ for (i=jz;i>=0;i--) fw += fq[i];
+ y[0] = (ih==0)? fw: -fw;
+ break;
+ case 1:
+ case 2:
+ fw = 0.0;
+ for (i=jz;i>=0;i--) fw += fq[i];
+ y[0] = (ih==0)? fw: -fw;
+ fw = fq[0]-fw;
+ for (i=1;i<=jz;i++) fw += fq[i];
+ y[1] = (ih==0)? fw: -fw;
+ break;
+ case 3: /* painful */
+ for (i=jz;i>0;i--) {
+ fw = fq[i-1]+fq[i];
+ fq[i] += fq[i-1]-fw;
+ fq[i-1] = fw;
+ }
+ for (i=jz;i>1;i--) {
+ fw = fq[i-1]+fq[i];
+ fq[i] += fq[i-1]-fw;
+ fq[i-1] = fw;
+ }
+ for (fw=0.0,i=jz;i>=2;i--) fw += fq[i];
+ if(ih==0) {
+ y[0] = fq[0]; y[1] = fq[1]; y[2] = fw;
+ } else {
+ y[0] = -fq[0]; y[1] = -fq[1]; y[2] = -fw;
+ }
+ }
+ return n&7;
+}
+
+
+/*
+ * ====================================================
+ * Copyright 13 Dec 1993 Sun Microsystems, Inc. All Rights Reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_rem_pio2(x,y)
+ *
+ * return the remainder of x rem pi/2 in y[0]+y[1]
+ * use __kernel_rem_pio2()
+ */
+
+/*
+ * Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
+ */
+static const int two_over_pi[] = {
+ 0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62,
+ 0x95993C, 0x439041, 0xFE5163, 0xABDEBB, 0xC561B7, 0x246E3A,
+ 0x424DD2, 0xE00649, 0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129,
+ 0xA73EE8, 0x8235F5, 0x2EBB44, 0x84E99C, 0x7026B4, 0x5F7E41,
+ 0x3991D6, 0x398353, 0x39F49C, 0x845F8B, 0xBDF928, 0x3B1FF8,
+ 0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D, 0x367ECF,
+ 0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5,
+ 0xF17B3D, 0x0739F7, 0x8A5292, 0xEA6BFB, 0x5FB11F, 0x8D5D08,
+ 0x560330, 0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3,
+ 0x91615E, 0xE61B08, 0x659985, 0x5F14A0, 0x68408D, 0xFFD880,
+ 0x4D7327, 0x310606, 0x1556CA, 0x73A8C9, 0x60E27B, 0xC08C6B,
+};
+
+static const int npio2_hw[] = {
+ 0x3FF921FB, 0x400921FB, 0x4012D97C, 0x401921FB, 0x401F6A7A, 0x4022D97C,
+ 0x4025FDBB, 0x402921FB, 0x402C463A, 0x402F6A7A, 0x4031475C, 0x4032D97C,
+ 0x40346B9C, 0x4035FDBB, 0x40378FDB, 0x403921FB, 0x403AB41B, 0x403C463A,
+ 0x403DD85A, 0x403F6A7A, 0x40407E4C, 0x4041475C, 0x4042106C, 0x4042D97C,
+ 0x4043A28C, 0x40446B9C, 0x404534AC, 0x4045FDBB, 0x4046C6CB, 0x40478FDB,
+ 0x404858EB, 0x404921FB,
+};
+
+/*
+ * invpio2: 53 bits of 2/pi
+ * pio2_1: first 33 bit of pi/2
+ * pio2_1t: pi/2 - pio2_1
+ * pio2_2: second 33 bit of pi/2
+ * pio2_2t: pi/2 - (pio2_1+pio2_2)
+ * pio2_3: third 33 bit of pi/2
+ * pio2_3t: pi/2 - (pio2_1+pio2_2+pio2_3)
+ */
+
+static const double
+zeroA = 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
+half = 5.00000000000000000000e-01, /* 0x3FE00000, 0x00000000 */
+two24A = 1.67772160000000000000e+07, /* 0x41700000, 0x00000000 */
+invpio2 = 6.36619772367581382433e-01, /* 0x3FE45F30, 0x6DC9C883 */
+pio2_1 = 1.57079632673412561417e+00, /* 0x3FF921FB, 0x54400000 */
+pio2_1t = 6.07710050650619224932e-11, /* 0x3DD0B461, 0x1A626331 */
+pio2_2 = 6.07710050630396597660e-11, /* 0x3DD0B461, 0x1A600000 */
+pio2_2t = 2.02226624879595063154e-21, /* 0x3BA3198A, 0x2E037073 */
+pio2_3 = 2.02226624871116645580e-21, /* 0x3BA3198A, 0x2E000000 */
+pio2_3t = 8.47842766036889956997e-32; /* 0x397B839A, 0x252049C1 */
+
+static int __ieee754_rem_pio2(double x, double *y) {
+ double z,w,t,r,fn;
+ double tx[3];
+ int e0,i,j,nx,n,ix,hx,i0;
+
+ i0 = ((*(int*)&two24A)>>30)^1; /* high word index */
+ hx = *(i0+(int*)&x); /* high word of x */
+ ix = hx&0x7fffffff;
+ if(ix<=0x3fe921fb) /* |x| ~<= pi/4 , no need for reduction */
+ {y[0] = x; y[1] = 0; return 0;}
+ if(ix<0x4002d97c) { /* |x| < 3pi/4, special case with n=+-1 */
+ if(hx>0) {
+ z = x - pio2_1;
+ if(ix!=0x3ff921fb) { /* 33+53 bit pi is good enough */
+ y[0] = z - pio2_1t;
+ y[1] = (z-y[0])-pio2_1t;
+ } else { /* near pi/2, use 33+33+53 bit pi */
+ z -= pio2_2;
+ y[0] = z - pio2_2t;
+ y[1] = (z-y[0])-pio2_2t;
+ }
+ return 1;
+ } else { /* negative x */
+ z = x + pio2_1;
+ if(ix!=0x3ff921fb) { /* 33+53 bit pi is good enough */
+ y[0] = z + pio2_1t;
+ y[1] = (z-y[0])+pio2_1t;
+ } else { /* near pi/2, use 33+33+53 bit pi */
+ z += pio2_2;
+ y[0] = z + pio2_2t;
+ y[1] = (z-y[0])+pio2_2t;
+ }
+ return -1;
+ }
+ }
+ if(ix<=0x413921fb) { /* |x| ~<= 2^19*(pi/2), medium size */
+ t = fabsd(x);
+ n = (int) (t*invpio2+half);
+ fn = (double)n;
+ r = t-fn*pio2_1;
+ w = fn*pio2_1t; /* 1st round good to 85 bit */
+ if(n<32&&ix!=npio2_hw[n-1]) {
+ y[0] = r-w; /* quick check no cancellation */
+ } else {
+ j = ix>>20;
+ y[0] = r-w;
+ i = j-(((*(i0+(int*)&y[0]))>>20)&0x7ff);
+ if(i>16) { /* 2nd iteration needed, good to 118 */
+ t = r;
+ w = fn*pio2_2;
+ r = t-w;
+ w = fn*pio2_2t-((t-r)-w);
+ y[0] = r-w;
+ i = j-(((*(i0+(int*)&y[0]))>>20)&0x7ff);
+ if(i>49) { /* 3rd iteration need, 151 bits acc */
+ t = r; /* will cover all possible cases */
+ w = fn*pio2_3;
+ r = t-w;
+ w = fn*pio2_3t-((t-r)-w);
+ y[0] = r-w;
+ }
+ }
+ }
+ y[1] = (r-y[0])-w;
+ if(hx<0) {y[0] = -y[0]; y[1] = -y[1]; return -n;}
+ else return n;
+ }
+ /*
+ * all other (large) arguments
+ */
+ if(ix>=0x7ff00000) { /* x is inf or NaN */
+ y[0]=y[1]=x-x; return 0;
+ }
+ /* set z = scalbn(|x|,ilogb(x)-23) */
+ *(1-i0+(int*)&z) = *(1-i0+(int*)&x);
+ e0 = (ix>>20)-1046; /* e0 = ilogb(z)-23; */
+ *(i0+(int*)&z) = ix - (e0<<20);
+ for(i=0;i<2;i++) {
+ tx[i] = (double)((int)(z));
+ z = (z-tx[i])*two24A;
+ }
+ tx[2] = z;
+ nx = 3;
+ while(tx[nx-1]==zeroA) nx--; /* skip zero term */
+ n = __kernel_rem_pio2(tx,y,e0,nx,2,two_over_pi);
+ if(hx<0) {y[0] = -y[0]; y[1] = -y[1]; return -n;}
+ return n;
+}
+
+
+/* __kernel_sin( x, y, iy)
+ * kernel sin function on [-pi/4, pi/4], pi/4 ~ 0.7854
+ * Input x is assumed to be bounded by ~pi/4 in magnitude.
+ * Input y is the tail of x.
+ * Input iy indicates whether y is 0. (if iy=0, y assume to be 0).
+ *
+ * Algorithm
+ * 1. Since sin(-x) = -sin(x), we need only to consider positive x.
+ * 2. if x < 2^-27 (hx<0x3e400000 0), return x with inexact if x!=0.
+ * 3. sin(x) is approximated by a polynomial of degree 13 on
+ * [0,pi/4]
+ * 3 13
+ * sin(x) ~ x + S1*x + ... + S6*x
+ * where
+ *
+ * |sin(x) 2 4 6 8 10 12 | -58
+ * |----- - (1+S1*x +S2*x +S3*x +S4*x +S5*x +S6*x )| <= 2
+ * | x |
+ *
+ * 4. sin(x+y) = sin(x) + sin'(x')*y
+ * ~ sin(x) + (1-x*x/2)*y
+ * For better accuracy, let
+ * 3 2 2 2 2
+ * r = x *(S2+x *(S3+x *(S4+x *(S5+x *S6))))
+ * then 3 2
+ * sin(x) = x + (S1*x + (x *(r-y/2)+y))
+ */
+
+static const double
+S1 = -1.66666666666666324348e-01, /* 0xBFC55555, 0x55555549 */
+S2 = 8.33333333332248946124e-03, /* 0x3F811111, 0x1110F8A6 */
+S3 = -1.98412698298579493134e-04, /* 0xBF2A01A0, 0x19C161D5 */
+S4 = 2.75573137070700676789e-06, /* 0x3EC71DE3, 0x57B1FE7D */
+S5 = -2.50507602534068634195e-08, /* 0xBE5AE5E6, 0x8A2B9CEB */
+S6 = 1.58969099521155010221e-10; /* 0x3DE5D93A, 0x5ACFD57C */
+
+static double __kernel_sin(double x, double y, int iy)
+{
+ double z,r,v;
+ int ix;
+ ix = __HI(x)&0x7fffffff; /* high word of x */
+ if(ix<0x3e400000) /* |x| < 2**-27 */
+ {if((int)x==0) return x;} /* generate inexact */
+ z = x*x;
+ v = z*x;
+ r = S2+z*(S3+z*(S4+z*(S5+z*S6)));
+ if(iy==0) return x+v*(S1+z*r);
+ else return x-((z*(half*y-v*r)-y)-v*S1);
+}
+
+/*
+ * __kernel_cos( x, y )
+ * kernel cos function on [-pi/4, pi/4], pi/4 ~ 0.785398164
+ * Input x is assumed to be bounded by ~pi/4 in magnitude.
+ * Input y is the tail of x.
+ *
+ * Algorithm
+ * 1. Since cos(-x) = cos(x), we need only to consider positive x.
+ * 2. if x < 2^-27 (hx<0x3e400000 0), return 1 with inexact if x!=0.
+ * 3. cos(x) is approximated by a polynomial of degree 14 on
+ * [0,pi/4]
+ * 4 14
+ * cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x
+ * where the remez error is
+ *
+ * | 2 4 6 8 10 12 14 | -58
+ * |cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x +C6*x )| <= 2
+ * | |
+ *
+ * 4 6 8 10 12 14
+ * 4. let r = C1*x +C2*x +C3*x +C4*x +C5*x +C6*x , then
+ * cos(x) = 1 - x*x/2 + r
+ * since cos(x+y) ~ cos(x) - sin(x)*y
+ * ~ cos(x) - x*y,
+ * a correction term is necessary in cos(x) and hence
+ * cos(x+y) = 1 - (x*x/2 - (r - x*y))
+ * For better accuracy when x > 0.3, let qx = |x|/4 with
+ * the last 32 bits mask off, and if x > 0.78125, let qx = 0.28125.
+ * Then
+ * cos(x+y) = (1-qx) - ((x*x/2-qx) - (r-x*y)).
+ * Note that 1-qx and (x*x/2-qx) is EXACT here, and the
+ * magnitude of the latter is at least a quarter of x*x/2,
+ * thus, reducing the rounding error in the subtraction.
+ */
+
+static const double
+C1 = 4.16666666666666019037e-02, /* 0x3FA55555, 0x5555554C */
+C2 = -1.38888888888741095749e-03, /* 0xBF56C16C, 0x16C15177 */
+C3 = 2.48015872894767294178e-05, /* 0x3EFA01A0, 0x19CB1590 */
+C4 = -2.75573143513906633035e-07, /* 0xBE927E4F, 0x809C52AD */
+C5 = 2.08757232129817482790e-09, /* 0x3E21EE9E, 0xBDB4B1C4 */
+C6 = -1.13596475577881948265e-11; /* 0xBDA8FAE9, 0xBE8838D4 */
+
+static double __kernel_cos(double x, double y)
+{
+ double a,hz,z,r,qx;
+ int ix;
+ ix = __HI(x)&0x7fffffff; /* ix = |x|'s high word*/
+ if(ix<0x3e400000) { /* if x < 2**27 */
+ if(((int)x)==0) return one; /* generate inexact */
+ }
+ z = x*x;
+ r = z*(C1+z*(C2+z*(C3+z*(C4+z*(C5+z*C6)))));
+ if(ix < 0x3FD33333) /* if |x| < 0.3 */
+ return one - (0.5*z - (z*r - x*y));
+ else {
+ if(ix > 0x3fe90000) { /* x > 0.78125 */
+ qx = 0.28125;
+ } else {
+ __HI(qx) = ix-0x00200000; /* x/4 */
+ __LO(qx) = 0;
+ }
+ hz = 0.5*z-qx;
+ a = one-qx;
+ return a - (hz - (z*r-x*y));
+ }
+}
+
+/* __kernel_tan( x, y, k )
+ * kernel tan function on [-pi/4, pi/4], pi/4 ~ 0.7854
+ * Input x is assumed to be bounded by ~pi/4 in magnitude.
+ * Input y is the tail of x.
+ * Input k indicates whether tan (if k=1) or
+ * -1/tan (if k= -1) is returned.
+ *
+ * Algorithm
+ * 1. Since tan(-x) = -tan(x), we need only to consider positive x.
+ * 2. if x < 2^-28 (hx<0x3e300000 0), return x with inexact if x!=0.
+ * 3. tan(x) is approximated by a odd polynomial of degree 27 on
+ * [0,0.67434]
+ * 3 27
+ * tan(x) ~ x + T1*x + ... + T13*x
+ * where
+ *
+ * |tan(x) 2 4 26 | -59.2
+ * |----- - (1+T1*x +T2*x +.... +T13*x )| <= 2
+ * | x |
+ *
+ * Note: tan(x+y) = tan(x) + tan'(x)*y
+ * ~ tan(x) + (1+x*x)*y
+ * Therefore, for better accuracy in computing tan(x+y), let
+ * 3 2 2 2 2
+ * r = x *(T2+x *(T3+x *(...+x *(T12+x *T13))))
+ * then
+ * 3 2
+ * tan(x+y) = x + (T1*x + (x *(r+y)+y))
+ *
+ * 4. For x in [0.67434,pi/4], let y = pi/4 - x, then
+ * tan(x) = tan(pi/4-y) = (1-tan(y))/(1+tan(y))
+ * = 1 - 2*(tan(y) - (tan(y)^2)/(1+tan(y)))
+ */
+
+static const double
+pio4 = 7.85398163397448278999e-01, /* 0x3FE921FB, 0x54442D18 */
+pio4lo= 3.06161699786838301793e-17, /* 0x3C81A626, 0x33145C07 */
+T[] = {
+ 3.33333333333334091986e-01, /* 0x3FD55555, 0x55555563 */
+ 1.33333333333201242699e-01, /* 0x3FC11111, 0x1110FE7A */
+ 5.39682539762260521377e-02, /* 0x3FABA1BA, 0x1BB341FE */
+ 2.18694882948595424599e-02, /* 0x3F9664F4, 0x8406D637 */
+ 8.86323982359930005737e-03, /* 0x3F8226E3, 0xE96E8493 */
+ 3.59207910759131235356e-03, /* 0x3F6D6D22, 0xC9560328 */
+ 1.45620945432529025516e-03, /* 0x3F57DBC8, 0xFEE08315 */
+ 5.88041240820264096874e-04, /* 0x3F4344D8, 0xF2F26501 */
+ 2.46463134818469906812e-04, /* 0x3F3026F7, 0x1A8D1068 */
+ 7.81794442939557092300e-05, /* 0x3F147E88, 0xA03792A6 */
+ 7.14072491382608190305e-05, /* 0x3F12B80F, 0x32F0A7E9 */
+ -1.85586374855275456654e-05, /* 0xBEF375CB, 0xDB605373 */
+ 2.59073051863633712884e-05, /* 0x3EFB2A70, 0x74BF7AD4 */
+};
+
+static double __kernel_tan(double x, double y, int iy)
+{
+ double z,r,v,w,s;
+ int ix,hx;
+ hx = __HI(x); /* high word of x */
+ ix = hx&0x7fffffff; /* high word of |x| */
+ if(ix<0x3e300000) { /* x < 2**-28 */
+ if((int)x==0) { /* generate inexact */
+ if (((ix | __LO(x)) | (iy + 1)) == 0)
+ return one / fabsd(x);
+ else {
+ if (iy == 1)
+ return x;
+ else { /* compute -1 / (x+y) carefully */
+ double a, t;
+
+ z = w = x + y;
+ __LO(z) = 0;
+ v = y - (z - x);
+ t = a = -one / w;
+ __LO(t) = 0;
+ s = one + t * z;
+ return t + a * (s + t * v);
+ }
+ }
+ }
+ }
+ if(ix>=0x3FE59428) { /* |x|>=0.6744 */
+ if(hx<0) {x = -x; y = -y;}
+ z = pio4-x;
+ w = pio4lo-y;
+ x = z+w; y = 0.0;
+ }
+ z = x*x;
+ w = z*z;
+ /* Break x^5*(T[1]+x^2*T[2]+...) into
+ * x^5(T[1]+x^4*T[3]+...+x^20*T[11]) +
+ * x^5(x^2*(T[2]+x^4*T[4]+...+x^22*[T12]))
+ */
+ r = T[1]+w*(T[3]+w*(T[5]+w*(T[7]+w*(T[9]+w*T[11]))));
+ v = z*(T[2]+w*(T[4]+w*(T[6]+w*(T[8]+w*(T[10]+w*T[12])))));
+ s = z*x;
+ r = y + z*(s*(r+v)+y);
+ r += T[0]*s;
+ w = x+r;
+ if(ix>=0x3FE59428) {
+ v = (double)iy;
+ return (double)(1-((hx>>30)&2))*(v-2.0*(x-(w*w/(w+v)-r)));
+ }
+ if(iy==1) return w;
+ else { /* if allow error up to 2 ulp,
+ simply return -1.0/(x+r) here */
+ /* compute -1.0/(x+r) accurately */
+ double a,t;
+ z = w;
+ __LO(z) = 0;
+ v = r-(z - x); /* z+v = r+x */
+ t = a = -1.0/w; /* a = -1.0/w */
+ __LO(t) = 0;
+ s = 1.0+t*z;
+ return t+a*(s+t*v);
+ }
+}
+
+
+//----------------------------------------------------------------------
+//
+// Routines for new sin/cos implementation
+//
+//----------------------------------------------------------------------
+
+/* sin(x)
+ * Return sine function of x.
+ *
+ * kernel function:
+ * __kernel_sin ... sine function on [-pi/4,pi/4]
+ * __kernel_cos ... cose function on [-pi/4,pi/4]
+ * __ieee754_rem_pio2 ... argument reduction routine
+ *
+ * Method.
+ * Let S,C and T denote the sin, cos and tan respectively on
+ * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+ * in [-pi/4 , +pi/4], and let n = k mod 4.
+ * We have
+ *
+ * n sin(x) cos(x) tan(x)
+ * ----------------------------------------------------------
+ * 0 S C T
+ * 1 C -S -1/T
+ * 2 -S -C T
+ * 3 -C S -1/T
+ * ----------------------------------------------------------
+ *
+ * Special cases:
+ * Let trig be any of sin, cos, or tan.
+ * trig(+-INF) is NaN, with signals;
+ * trig(NaN) is that NaN;
+ *
+ * Accuracy:
+ * TRIG(x) returns trig(x) nearly rounded
+ */
+
+JRT_LEAF(jdouble, SharedRuntime::dsin(jdouble x))
+ double y[2],z=0.0;
+ int n, ix;
+
+ /* High word of x. */
+ ix = __HI(x);
+
+ /* |x| ~< pi/4 */
+ ix &= 0x7fffffff;
+ if(ix <= 0x3fe921fb) return __kernel_sin(x,z,0);
+
+ /* sin(Inf or NaN) is NaN */
+ else if (ix>=0x7ff00000) return x-x;
+
+ /* argument reduction needed */
+ else {
+ n = __ieee754_rem_pio2(x,y);
+ switch(n&3) {
+ case 0: return __kernel_sin(y[0],y[1],1);
+ case 1: return __kernel_cos(y[0],y[1]);
+ case 2: return -__kernel_sin(y[0],y[1],1);
+ default:
+ return -__kernel_cos(y[0],y[1]);
+ }
+ }
+JRT_END
+
+/* cos(x)
+ * Return cosine function of x.
+ *
+ * kernel function:
+ * __kernel_sin ... sine function on [-pi/4,pi/4]
+ * __kernel_cos ... cosine function on [-pi/4,pi/4]
+ * __ieee754_rem_pio2 ... argument reduction routine
+ *
+ * Method.
+ * Let S,C and T denote the sin, cos and tan respectively on
+ * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+ * in [-pi/4 , +pi/4], and let n = k mod 4.
+ * We have
+ *
+ * n sin(x) cos(x) tan(x)
+ * ----------------------------------------------------------
+ * 0 S C T
+ * 1 C -S -1/T
+ * 2 -S -C T
+ * 3 -C S -1/T
+ * ----------------------------------------------------------
+ *
+ * Special cases:
+ * Let trig be any of sin, cos, or tan.
+ * trig(+-INF) is NaN, with signals;
+ * trig(NaN) is that NaN;
+ *
+ * Accuracy:
+ * TRIG(x) returns trig(x) nearly rounded
+ */
+
+JRT_LEAF(jdouble, SharedRuntime::dcos(jdouble x))
+ double y[2],z=0.0;
+ int n, ix;
+
+ /* High word of x. */
+ ix = __HI(x);
+
+ /* |x| ~< pi/4 */
+ ix &= 0x7fffffff;
+ if(ix <= 0x3fe921fb) return __kernel_cos(x,z);
+
+ /* cos(Inf or NaN) is NaN */
+ else if (ix>=0x7ff00000) return x-x;
+
+ /* argument reduction needed */
+ else {
+ n = __ieee754_rem_pio2(x,y);
+ switch(n&3) {
+ case 0: return __kernel_cos(y[0],y[1]);
+ case 1: return -__kernel_sin(y[0],y[1],1);
+ case 2: return -__kernel_cos(y[0],y[1]);
+ default:
+ return __kernel_sin(y[0],y[1],1);
+ }
+ }
+JRT_END
+
+/* tan(x)
+ * Return tangent function of x.
+ *
+ * kernel function:
+ * __kernel_tan ... tangent function on [-pi/4,pi/4]
+ * __ieee754_rem_pio2 ... argument reduction routine
+ *
+ * Method.
+ * Let S,C and T denote the sin, cos and tan respectively on
+ * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+ * in [-pi/4 , +pi/4], and let n = k mod 4.
+ * We have
+ *
+ * n sin(x) cos(x) tan(x)
+ * ----------------------------------------------------------
+ * 0 S C T
+ * 1 C -S -1/T
+ * 2 -S -C T
+ * 3 -C S -1/T
+ * ----------------------------------------------------------
+ *
+ * Special cases:
+ * Let trig be any of sin, cos, or tan.
+ * trig(+-INF) is NaN, with signals;
+ * trig(NaN) is that NaN;
+ *
+ * Accuracy:
+ * TRIG(x) returns trig(x) nearly rounded
+ */
+
+JRT_LEAF(jdouble, SharedRuntime::dtan(jdouble x))
+ double y[2],z=0.0;
+ int n, ix;
+
+ /* High word of x. */
+ ix = __HI(x);
+
+ /* |x| ~< pi/4 */
+ ix &= 0x7fffffff;
+ if(ix <= 0x3fe921fb) return __kernel_tan(x,z,1);
+
+ /* tan(Inf or NaN) is NaN */
+ else if (ix>=0x7ff00000) return x-x; /* NaN */
+
+ /* argument reduction needed */
+ else {
+ n = __ieee754_rem_pio2(x,y);
+ return __kernel_tan(y[0],y[1],1-((n&1)<<1)); /* 1 -- n even
+ -1 -- n odd */
+ }
+JRT_END
+
+
+#ifdef WIN32
+# pragma optimize ( "", on )
+#endif
diff --git a/src/share/vm/runtime/signature.cpp b/src/share/vm/runtime/signature.cpp
new file mode 100644
index 000000000..c9c3859a6
--- /dev/null
+++ b/src/share/vm/runtime/signature.cpp
@@ -0,0 +1,432 @@
+/*
+ * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_signature.cpp.incl"
+
+
+// Implementation of SignatureIterator
+
+// Signature syntax:
+//
+// Signature = "(" {Parameter} ")" ReturnType.
+// Parameter = FieldType.
+// ReturnType = FieldType | "V".
+// FieldType = "B" | "C" | "D" | "F" | "I" | "J" | "S" | "Z" | "L" ClassName ";" | "[" FieldType.
+// ClassName = string.
+
+
+SignatureIterator::SignatureIterator(symbolHandle signature) {
+ assert(signature->is_symbol(), "not a symbol");
+ _signature = signature;
+ _parameter_index = 0;
+}
+
+// Overloaded version called without handle
+SignatureIterator::SignatureIterator(symbolOop signature) {
+ symbolHandle sh(Thread::current(), signature);
+ _signature = sh;
+ _parameter_index = 0;
+}
+
+SignatureIterator::SignatureIterator(Thread *thread, symbolOop signature) {
+ symbolHandle sh(thread, signature);
+ _signature = sh;
+ _parameter_index = 0;
+}
+
+void SignatureIterator::expect(char c) {
+ if (_signature->byte_at(_index) != c) fatal1("expecting %c", c);
+ _index++;
+}
+
+
+void SignatureIterator::skip_optional_size() {
+ symbolOop sig = _signature();
+ char c = sig->byte_at(_index);
+ while ('0' <= c && c <= '9') c = sig->byte_at(++_index);
+}
+
+
+int SignatureIterator::parse_type() {
+ // Note: This function could be simplified by using "return T_XXX_size;"
+ // instead of the assignment and the break statements. However, it
+ // seems that the product build for win32_i486 with MS VC++ 6.0 doesn't
+ // work (stack underflow for some tests) - this seems to be a VC++ 6.0
+ // compiler bug (was problem - gri 4/27/2000).
+ int size = -1;
+ switch(_signature->byte_at(_index)) {
+ case 'B': do_byte (); if (_parameter_index < 0 ) _return_type = T_BYTE;
+ _index++; size = T_BYTE_size ; break;
+ case 'C': do_char (); if (_parameter_index < 0 ) _return_type = T_CHAR;
+ _index++; size = T_CHAR_size ; break;
+ case 'D': do_double(); if (_parameter_index < 0 ) _return_type = T_DOUBLE;
+ _index++; size = T_DOUBLE_size ; break;
+ case 'F': do_float (); if (_parameter_index < 0 ) _return_type = T_FLOAT;
+ _index++; size = T_FLOAT_size ; break;
+ case 'I': do_int (); if (_parameter_index < 0 ) _return_type = T_INT;
+ _index++; size = T_INT_size ; break;
+ case 'J': do_long (); if (_parameter_index < 0 ) _return_type = T_LONG;
+ _index++; size = T_LONG_size ; break;
+ case 'S': do_short (); if (_parameter_index < 0 ) _return_type = T_SHORT;
+ _index++; size = T_SHORT_size ; break;
+ case 'Z': do_bool (); if (_parameter_index < 0 ) _return_type = T_BOOLEAN;
+ _index++; size = T_BOOLEAN_size; break;
+ case 'V': do_void (); if (_parameter_index < 0 ) _return_type = T_VOID;
+ _index++; size = T_VOID_size; ; break;
+ case 'L':
+ { int begin = ++_index;
+ symbolOop sig = _signature();
+ while (sig->byte_at(_index++) != ';') ;
+ do_object(begin, _index);
+ }
+ if (_parameter_index < 0 ) _return_type = T_OBJECT;
+ size = T_OBJECT_size;
+ break;
+ case '[':
+ { int begin = ++_index;
+ skip_optional_size();
+ symbolOop sig = _signature();
+ while (sig->byte_at(_index) == '[') {
+ _index++;
+ skip_optional_size();
+ }
+ if (sig->byte_at(_index) == 'L') {
+ while (sig->byte_at(_index++) != ';') ;
+ } else {
+ _index++;
+ }
+ do_array(begin, _index);
+ if (_parameter_index < 0 ) _return_type = T_ARRAY;
+ }
+ size = T_ARRAY_size;
+ break;
+ default:
+ ShouldNotReachHere();
+ break;
+ }
+ assert(size >= 0, "size must be set");
+ return size;
+}
+
+
+void SignatureIterator::check_signature_end() {
+ if (_index < _signature->utf8_length()) {
+ tty->print_cr("too many chars in signature");
+ _signature->print_value_on(tty);
+ tty->print_cr(" @ %d", _index);
+ }
+}
+
+
+void SignatureIterator::dispatch_field() {
+ // no '(', just one (field) type
+ _index = 0;
+ _parameter_index = 0;
+ parse_type();
+ check_signature_end();
+}
+
+
+void SignatureIterator::iterate_parameters() {
+ // Parse parameters
+ _index = 0;
+ _parameter_index = 0;
+ expect('(');
+ while (_signature->byte_at(_index) != ')') _parameter_index += parse_type();
+ expect(')');
+ _parameter_index = 0;
+}
+
+// Optimized version of iterat_parameters when fingerprint is known
+void SignatureIterator::iterate_parameters( uint64_t fingerprint ) {
+ uint64_t saved_fingerprint = fingerprint;
+
+ // Check for too many arguments
+ if ( fingerprint == UCONST64(-1) ) {
+ SignatureIterator::iterate_parameters();
+ return;
+ }
+
+ assert(fingerprint, "Fingerprint should not be 0");
+
+ _parameter_index = 0;
+ fingerprint = fingerprint >> (static_feature_size + result_feature_size);
+ while ( 1 ) {
+ switch ( fingerprint & parameter_feature_mask ) {
+ case bool_parm:
+ do_bool();
+ _parameter_index += T_BOOLEAN_size;
+ break;
+ case byte_parm:
+ do_byte();
+ _parameter_index += T_BYTE_size;
+ break;
+ case char_parm:
+ do_char();
+ _parameter_index += T_CHAR_size;
+ break;
+ case short_parm:
+ do_short();
+ _parameter_index += T_SHORT_size;
+ break;
+ case int_parm:
+ do_int();
+ _parameter_index += T_INT_size;
+ break;
+ case obj_parm:
+ do_object(0, 0);
+ _parameter_index += T_OBJECT_size;
+ break;
+ case long_parm:
+ do_long();
+ _parameter_index += T_LONG_size;
+ break;
+ case float_parm:
+ do_float();
+ _parameter_index += T_FLOAT_size;
+ break;
+ case double_parm:
+ do_double();
+ _parameter_index += T_DOUBLE_size;
+ break;
+ case done_parm:
+ return;
+ break;
+ default:
+ tty->print_cr("*** parameter is %d", fingerprint & parameter_feature_mask);
+ tty->print_cr("*** fingerprint is " PTR64_FORMAT, saved_fingerprint);
+ ShouldNotReachHere();
+ break;
+ }
+ fingerprint >>= parameter_feature_size;
+ }
+ _parameter_index = 0;
+}
+
+
+void SignatureIterator::iterate_returntype() {
+ // Ignore parameters
+ _index = 0;
+ expect('(');
+ symbolOop sig = _signature();
+ while (sig->byte_at(_index) != ')') _index++;
+ expect(')');
+ // Parse return type
+ _parameter_index = -1;
+ parse_type();
+ check_signature_end();
+ _parameter_index = 0;
+}
+
+
+void SignatureIterator::iterate() {
+ // Parse parameters
+ _parameter_index = 0;
+ _index = 0;
+ expect('(');
+ while (_signature->byte_at(_index) != ')') _parameter_index += parse_type();
+ expect(')');
+ // Parse return type
+ _parameter_index = -1;
+ parse_type();
+ check_signature_end();
+ _parameter_index = 0;
+}
+
+
+// Implementation of SignatureStream
+
+bool SignatureStream::is_done() const {
+ return _end > _signature()->utf8_length();
+}
+
+
+void SignatureStream::next_non_primitive(int t) {
+ switch (t) {
+ case 'L': {
+ _type = T_OBJECT;
+ symbolOop sig = _signature();
+ while (sig->byte_at(_end++) != ';');
+ break;
+ }
+ case '[': {
+ _type = T_ARRAY;
+ symbolOop sig = _signature();
+ char c = sig->byte_at(_end);
+ while ('0' <= c && c <= '9') c = sig->byte_at(_end++);
+ while (sig->byte_at(_end) == '[') {
+ _end++;
+ c = sig->byte_at(_end);
+ while ('0' <= c && c <= '9') c = sig->byte_at(_end++);
+ }
+ switch(sig->byte_at(_end)) {
+ case 'B':
+ case 'C':
+ case 'D':
+ case 'F':
+ case 'I':
+ case 'J':
+ case 'S':
+ case 'Z':_end++; break;
+ default: {
+ while (sig->byte_at(_end++) != ';');
+ break;
+ }
+ }
+ break;
+ }
+ case ')': _end++; next(); _at_return_type = true; break;
+ default : ShouldNotReachHere();
+ }
+}
+
+
+bool SignatureStream::is_object() const {
+ return _type == T_OBJECT
+ || _type == T_ARRAY;
+}
+
+bool SignatureStream::is_array() const {
+ return _type == T_ARRAY;
+}
+
+symbolOop SignatureStream::as_symbol(TRAPS) {
+ // Create a symbol from for string _begin _end
+ int begin = _begin;
+ int end = _end;
+
+ if ( _signature()->byte_at(_begin) == 'L'
+ && _signature()->byte_at(_end-1) == ';') {
+ begin++;
+ end--;
+ }
+
+ symbolOop result = oopFactory::new_symbol(_signature, begin, end, CHECK_NULL);
+ return result;
+}
+
+
+symbolOop SignatureStream::as_symbol_or_null() {
+ // Create a symbol from for string _begin _end
+ ResourceMark rm;
+
+ int begin = _begin;
+ int end = _end;
+
+ if ( _signature()->byte_at(_begin) == 'L'
+ && _signature()->byte_at(_end-1) == ';') {
+ begin++;
+ end--;
+ }
+
+ char* buffer = NEW_RESOURCE_ARRAY(char, end - begin);
+ for (int index = begin; index < end; index++) {
+ buffer[index - begin] = _signature()->byte_at(index);
+ }
+ symbolOop result = SymbolTable::probe(buffer, end - begin);
+ return result;
+}
+
+bool SignatureVerifier::is_valid_signature(symbolHandle sig) {
+ const char* signature = (const char*)sig->bytes();
+ ssize_t len = sig->utf8_length();
+ if (signature == NULL || signature[0] == '\0' || len < 1) {
+ return false;
+ } else if (signature[0] == '(') {
+ return is_valid_method_signature(sig);
+ } else {
+ return is_valid_type_signature(sig);
+ }
+}
+
+bool SignatureVerifier::is_valid_method_signature(symbolHandle sig) {
+ const char* method_sig = (const char*)sig->bytes();
+ ssize_t len = sig->utf8_length();
+ ssize_t index = 0;
+ if (method_sig != NULL && len > 1 && method_sig[index] == '(') {
+ ++index;
+ while (index < len && method_sig[index] != ')') {
+ ssize_t res = is_valid_type(&method_sig[index], len - index);
+ if (res == -1) {
+ return false;
+ } else {
+ index += res;
+ }
+ }
+ if (index < len && method_sig[index] == ')') {
+ // check the return type
+ ++index;
+ return (is_valid_type(&method_sig[index], len - index) == (len - index));
+ }
+ }
+ return false;
+}
+
+bool SignatureVerifier::is_valid_type_signature(symbolHandle sig) {
+ const char* type_sig = (const char*)sig->bytes();
+ ssize_t len = sig->utf8_length();
+ return (type_sig != NULL && len >= 1 &&
+ (is_valid_type(type_sig, len) == len));
+}
+
+// Checks to see if the type (not to go beyond 'limit') refers to a valid type.
+// Returns -1 if it is not, or the index of the next character that is not part
+// of the type. The type encoding may end before 'limit' and that's ok.
+ssize_t SignatureVerifier::is_valid_type(const char* type, ssize_t limit) {
+ ssize_t index = 0;
+
+ // Iterate over any number of array dimensions
+ while (index < limit && type[index] == '[') ++index;
+ if (index >= limit) {
+ return -1;
+ }
+ switch (type[index]) {
+ case 'B': case 'C': case 'D': case 'F': case 'I':
+ case 'J': case 'S': case 'Z': case 'V':
+ return index + 1;
+ case 'L':
+ for (index = index + 1; index < limit; ++index) {
+ char c = type[index];
+ if (c == ';') {
+ return index + 1;
+ }
+ if (invalid_name_char(c)) {
+ return -1;
+ }
+ }
+ // fall through
+ default: ; // fall through
+ }
+ return -1;
+}
+
+bool SignatureVerifier::invalid_name_char(char c) {
+ switch (c) {
+ case '\0': case '.': case ';': case '[':
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/src/share/vm/runtime/signature.hpp b/src/share/vm/runtime/signature.hpp
new file mode 100644
index 000000000..51b45a0a0
--- /dev/null
+++ b/src/share/vm/runtime/signature.hpp
@@ -0,0 +1,416 @@
+/*
+ * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// SignatureIterators iterate over a Java signature (or parts of it).
+// (Syntax according to: "The Java Virtual Machine Specification" by
+// Tim Lindholm & Frank Yellin; section 4.3 Descriptors; p. 89ff.)
+//
+// Example: Iterating over ([Lfoo;D)I using
+// 0123456789
+//
+// iterate_parameters() calls: do_array(2, 7); do_double();
+// iterate_returntype() calls: do_int();
+// iterate() calls: do_array(2, 7); do_double(); do_int();
+//
+// is_return_type() is: false ; false ; true
+//
+// NOTE: The new optimizer has an alternate, for-loop based signature
+// iterator implemented in opto/type.cpp, TypeTuple::make().
+
+class SignatureIterator: public ResourceObj {
+ protected:
+ symbolHandle _signature; // the signature to iterate over
+ int _index; // the current character index (only valid during iteration)
+ int _parameter_index; // the current parameter index (0 outside iteration phase)
+ BasicType _return_type;
+
+ void expect(char c);
+ void skip_optional_size();
+ int parse_type(); // returns the parameter size in words (0 for void)
+ void check_signature_end();
+
+ public:
+ // Definitions used in generating and iterating the
+ // bit field form of the signature generated by the
+ // Fingerprinter.
+ enum {
+ static_feature_size = 1,
+ result_feature_size = 4,
+ result_feature_mask = 0xF,
+ parameter_feature_size = 4,
+ parameter_feature_mask = 0xF,
+
+ bool_parm = 1,
+ byte_parm = 2,
+ char_parm = 3,
+ short_parm = 4,
+ int_parm = 5,
+ long_parm = 6,
+ float_parm = 7,
+ double_parm = 8,
+ obj_parm = 9,
+ done_parm = 10, // marker for end of parameters
+
+ // max parameters is wordsize minus
+ // The sign bit, termination field, the result and static bit fields
+ max_size_of_parameters = (BitsPerLong-1 -
+ result_feature_size - parameter_feature_size -
+ static_feature_size) / parameter_feature_size
+ };
+
+ // Constructors
+ SignatureIterator(symbolOop signature);
+ SignatureIterator(Thread *thread, symbolOop signature);
+ SignatureIterator(symbolHandle signature);
+
+ // Iteration
+ void dispatch_field(); // dispatches once for field signatures
+ void iterate_parameters(); // iterates over parameters only
+ void iterate_parameters( uint64_t fingerprint );
+ void iterate_returntype(); // iterates over returntype only
+ void iterate(); // iterates over whole signature
+ // Returns the word index of the current parameter;
+ int parameter_index() const { return _parameter_index; }
+ bool is_return_type() const { return parameter_index() < 0; }
+ BasicType get_ret_type() const { return _return_type; }
+
+ // Basic types
+ virtual void do_bool () = 0;
+ virtual void do_char () = 0;
+ virtual void do_float () = 0;
+ virtual void do_double() = 0;
+ virtual void do_byte () = 0;
+ virtual void do_short () = 0;
+ virtual void do_int () = 0;
+ virtual void do_long () = 0;
+ virtual void do_void () = 0;
+
+ // Object types (begin indexes the first character of the entry, end indexes the first character after the entry)
+ virtual void do_object(int begin, int end) = 0;
+ virtual void do_array (int begin, int end) = 0;
+};
+
+
+// Specialized SignatureIterators: Used to compute signature specific values.
+
+class SignatureTypeNames : public SignatureIterator {
+ protected:
+ virtual void type_name(const char* name) = 0;
+
+ void do_bool() { type_name("jboolean"); }
+ void do_char() { type_name("jchar" ); }
+ void do_float() { type_name("jfloat" ); }
+ void do_double() { type_name("jdouble" ); }
+ void do_byte() { type_name("jbyte" ); }
+ void do_short() { type_name("jshort" ); }
+ void do_int() { type_name("jint" ); }
+ void do_long() { type_name("jlong" ); }
+ void do_void() { type_name("void" ); }
+ void do_object(int begin, int end) { type_name("jobject" ); }
+ void do_array (int begin, int end) { type_name("jobject" ); }
+
+ public:
+ SignatureTypeNames(symbolHandle signature) : SignatureIterator(signature) {}
+};
+
+
+class SignatureInfo: public SignatureIterator {
+ protected:
+ bool _has_iterated; // need this because iterate cannot be called in constructor (set is virtual!)
+ bool _has_iterated_return;
+ int _size;
+
+ void lazy_iterate_parameters() { if (!_has_iterated) { iterate_parameters(); _has_iterated = true; } }
+ void lazy_iterate_return() { if (!_has_iterated_return) { iterate_returntype(); _has_iterated_return = true; } }
+
+ virtual void set(int size, BasicType type) = 0;
+
+ void do_bool () { set(T_BOOLEAN_size, T_BOOLEAN); }
+ void do_char () { set(T_CHAR_size , T_CHAR ); }
+ void do_float () { set(T_FLOAT_size , T_FLOAT ); }
+ void do_double() { set(T_DOUBLE_size , T_DOUBLE ); }
+ void do_byte () { set(T_BYTE_size , T_BYTE ); }
+ void do_short () { set(T_SHORT_size , T_SHORT ); }
+ void do_int () { set(T_INT_size , T_INT ); }
+ void do_long () { set(T_LONG_size , T_LONG ); }
+ void do_void () { set(T_VOID_size , T_VOID ); }
+ void do_object(int begin, int end) { set(T_OBJECT_size , T_OBJECT ); }
+ void do_array (int begin, int end) { set(T_ARRAY_size , T_ARRAY ); }
+
+ public:
+ SignatureInfo(symbolHandle signature) : SignatureIterator(signature) {
+ _has_iterated = _has_iterated_return = false;
+ _size = 0;
+ _return_type = T_ILLEGAL;
+ }
+
+};
+
+
+// Specialized SignatureIterator: Used to compute the argument size.
+
+class ArgumentSizeComputer: public SignatureInfo {
+ private:
+ void set(int size, BasicType type) { _size += size; }
+ public:
+ ArgumentSizeComputer(symbolHandle signature) : SignatureInfo(signature) {}
+
+ int size() { lazy_iterate_parameters(); return _size; }
+};
+
+
+class ArgumentCount: public SignatureInfo {
+ private:
+ void set(int size, BasicType type) { _size ++; }
+ public:
+ ArgumentCount(symbolHandle signature) : SignatureInfo(signature) {}
+
+ int size() { lazy_iterate_parameters(); return _size; }
+};
+
+
+// Specialized SignatureIterator: Used to compute the result type.
+
+class ResultTypeFinder: public SignatureInfo {
+ private:
+ void set(int size, BasicType type) { _return_type = type; }
+ public:
+ BasicType type() { lazy_iterate_return(); return _return_type; }
+
+ ResultTypeFinder(symbolHandle signature) : SignatureInfo(signature) {}
+};
+
+
+// Fingerprinter computes a unique ID for a given method. The ID
+// is a bitvector characterizing the methods signature (incl. the receiver).
+class Fingerprinter: public SignatureIterator {
+ private:
+ uint64_t _fingerprint;
+ int _shift_count;
+ methodHandle mh;
+
+ public:
+
+ void do_bool() { _fingerprint |= (((uint64_t)bool_parm) << _shift_count); _shift_count += parameter_feature_size; }
+ void do_char() { _fingerprint |= (((uint64_t)char_parm) << _shift_count); _shift_count += parameter_feature_size; }
+ void do_byte() { _fingerprint |= (((uint64_t)byte_parm) << _shift_count); _shift_count += parameter_feature_size; }
+ void do_short() { _fingerprint |= (((uint64_t)short_parm) << _shift_count); _shift_count += parameter_feature_size; }
+ void do_int() { _fingerprint |= (((uint64_t)int_parm) << _shift_count); _shift_count += parameter_feature_size; }
+ void do_long() { _fingerprint |= (((uint64_t)long_parm) << _shift_count); _shift_count += parameter_feature_size; }
+ void do_float() { _fingerprint |= (((uint64_t)float_parm) << _shift_count); _shift_count += parameter_feature_size; }
+ void do_double() { _fingerprint |= (((uint64_t)double_parm) << _shift_count); _shift_count += parameter_feature_size; }
+
+ void do_object(int begin, int end) { _fingerprint |= (((uint64_t)obj_parm) << _shift_count); _shift_count += parameter_feature_size; }
+ void do_array (int begin, int end) { _fingerprint |= (((uint64_t)obj_parm) << _shift_count); _shift_count += parameter_feature_size; }
+
+ void do_void() { ShouldNotReachHere(); }
+
+ Fingerprinter(methodHandle method) : SignatureIterator(method->signature()) {
+ mh = method;
+ _fingerprint = 0;
+ }
+
+ Fingerprinter(Thread *thread, methodHandle method) : SignatureIterator(thread, method->signature()) {
+ mh = method;
+ _fingerprint = 0;
+ }
+
+ uint64_t fingerprint() {
+ // See if we fingerprinted this method already
+ if (mh->constMethod()->fingerprint() != CONST64(0)) {
+ return mh->constMethod()->fingerprint();
+ }
+
+ if (mh->size_of_parameters() > max_size_of_parameters ) {
+ _fingerprint = UCONST64(-1);
+ mh->constMethod()->set_fingerprint(_fingerprint);
+ return _fingerprint;
+ }
+
+ assert( (int)mh->result_type() <= (int)result_feature_mask, "bad result type");
+ _fingerprint = mh->result_type();
+ _fingerprint <<= static_feature_size;
+ if (mh->is_static()) _fingerprint |= 1;
+ _shift_count = result_feature_size + static_feature_size;
+ iterate_parameters();
+ _fingerprint |= ((uint64_t)done_parm) << _shift_count;// mark end of sig
+ mh->constMethod()->set_fingerprint(_fingerprint);
+ return _fingerprint;
+ }
+};
+
+
+// Specialized SignatureIterator: Used for native call purposes
+
+class NativeSignatureIterator: public SignatureIterator {
+ private:
+ methodHandle _method;
+// We need seperate JNI and Java offset values because in 64 bit mode,
+// the argument offsets are not in sync with the Java stack.
+// For example a long takes up 1 "C" stack entry but 2 Java stack entries.
+ int _offset; // The java stack offset
+ int _prepended; // number of prepended JNI parameters (1 JNIEnv, plus 1 mirror if static)
+ int _jni_offset; // the current parameter offset, starting with 0
+
+ void do_bool () { pass_int(); _jni_offset++; _offset++; }
+ void do_char () { pass_int(); _jni_offset++; _offset++; }
+#ifdef _LP64
+ void do_float () { pass_float(); _jni_offset++; _offset++; }
+ void do_double() { pass_double(); _jni_offset++; _offset += 2; }
+#else
+ void do_float () { pass_int(); _jni_offset++; _offset++; }
+ void do_double() { pass_double(); _jni_offset += 2; _offset += 2; }
+#endif
+ void do_byte () { pass_int(); _jni_offset++; _offset++; }
+ void do_short () { pass_int(); _jni_offset++; _offset++; }
+ void do_int () { pass_int(); _jni_offset++; _offset++; }
+#ifdef _LP64
+ void do_long () { pass_long(); _jni_offset++; _offset += 2; }
+#else
+ void do_long () { pass_long(); _jni_offset += 2; _offset += 2; }
+#endif
+ void do_void () { ShouldNotReachHere(); }
+ void do_object(int begin, int end) { pass_object(); _jni_offset++; _offset++; }
+ void do_array (int begin, int end) { pass_object(); _jni_offset++; _offset++; }
+
+ public:
+ methodHandle method() const { return _method; }
+ int offset() const { return _offset; }
+ int jni_offset() const { return _jni_offset + _prepended; }
+// int java_offset() const { return method()->size_of_parameters() - _offset - 1; }
+ bool is_static() const { return method()->is_static(); }
+ virtual void pass_int() = 0;
+ virtual void pass_long() = 0;
+ virtual void pass_object() = 0;
+#ifdef _LP64
+ virtual void pass_float() = 0;
+ virtual void pass_double() = 0;
+#else
+ virtual void pass_double() { pass_long(); } // may be same as long
+#endif
+
+ NativeSignatureIterator(methodHandle method) : SignatureIterator(method->signature()) {
+ _method = method;
+ _offset = 0;
+ _jni_offset = 0;
+
+ const int JNIEnv_words = 1;
+ const int mirror_words = 1;
+ _prepended = !is_static() ? JNIEnv_words : JNIEnv_words + mirror_words;
+ }
+
+ // iterate() calles the 2 virtual methods according to the following invocation syntax:
+ //
+ // {pass_int | pass_long | pass_object}
+ //
+ // Arguments are handled from left to right (receiver first, if any).
+ // The offset() values refer to the Java stack offsets but are 0 based and increasing.
+ // The java_offset() values count down to 0, and refer to the Java TOS.
+ // The jni_offset() values increase from 1 or 2, and refer to C arguments.
+
+ void iterate() { iterate(Fingerprinter(method()).fingerprint());
+ }
+
+
+ // Optimized path if we have the bitvector form of signature
+ void iterate( uint64_t fingerprint ) {
+
+ if (!is_static()) {
+ // handle receiver (not handled by iterate because not in signature)
+ pass_object(); _jni_offset++; _offset++;
+ }
+
+ SignatureIterator::iterate_parameters( fingerprint );
+ }
+};
+
+
+// Handy stream for iterating over signature
+
+class SignatureStream : public StackObj {
+ private:
+ symbolHandle _signature;
+ int _begin;
+ int _end;
+ BasicType _type;
+ bool _at_return_type;
+
+ public:
+ bool at_return_type() const { return _at_return_type; }
+ bool is_done() const;
+ void next_non_primitive(int t);
+ void next() {
+ symbolOop sig = _signature();
+ int len = sig->utf8_length();
+ if (_end >= len) {
+ _end = len + 1;
+ return;
+ }
+
+ _begin = _end;
+ int t = sig->byte_at(_begin);
+ switch (t) {
+ case 'B': _type = T_BYTE; break;
+ case 'C': _type = T_CHAR; break;
+ case 'D': _type = T_DOUBLE; break;
+ case 'F': _type = T_FLOAT; break;
+ case 'I': _type = T_INT; break;
+ case 'J': _type = T_LONG; break;
+ case 'S': _type = T_SHORT; break;
+ case 'Z': _type = T_BOOLEAN; break;
+ case 'V': _type = T_VOID; break;
+ default : next_non_primitive(t);
+ return;
+ }
+ _end++;
+ }
+
+ SignatureStream(symbolHandle signature,
+ bool is_method = true) :
+ _signature(signature), _at_return_type(false) {
+ _begin = _end = (is_method ? 1 : 0); // skip first '(' in method signatures
+ next();
+ }
+
+ bool is_object() const; // True if this argument is an object
+ bool is_array() const; // True if this argument is an array
+ BasicType type() const { return _type; }
+ symbolOop as_symbol(TRAPS);
+
+ // return same as_symbol except allocation of new symbols is avoided.
+ symbolOop as_symbol_or_null();
+};
+
+class SignatureVerifier : public StackObj {
+ public:
+ // Returns true if the symbol is valid method or type signature
+ static bool is_valid_signature(symbolHandle sig);
+
+ static bool is_valid_method_signature(symbolHandle sig);
+ static bool is_valid_type_signature(symbolHandle sig);
+ private:
+
+ static ssize_t is_valid_type(const char*, ssize_t);
+ static bool invalid_name_char(char);
+};
diff --git a/src/share/vm/runtime/stackValue.cpp b/src/share/vm/runtime/stackValue.cpp
new file mode 100644
index 000000000..65387af23
--- /dev/null
+++ b/src/share/vm/runtime/stackValue.cpp
@@ -0,0 +1,180 @@
+/*
+ * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_stackValue.cpp.incl"
+
+StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMap* reg_map, ScopeValue* sv) {
+ if (sv->is_location()) {
+ // Stack or register value
+ Location loc = ((LocationValue *)sv)->location();
+
+#ifdef SPARC
+ // %%%%% Callee-save floats will NOT be working on a Sparc until we
+ // handle the case of a 2 floats in a single double register.
+ assert( !(loc.is_register() && loc.type() == Location::float_in_dbl), "Sparc does not handle callee-save floats yet" );
+#endif // SPARC
+
+ // First find address of value
+
+ address value_addr = loc.is_register()
+ // Value was in a callee-save register
+ ? reg_map->location(VMRegImpl::as_VMReg(loc.register_number()))
+ // Else value was directly saved on the stack. The frame's original stack pointer,
+ // before any extension by its callee (due to Compiler1 linkage on SPARC), must be used.
+ : ((address)fr->unextended_sp()) + loc.stack_offset();
+
+ // Then package it right depending on type
+ // Note: the transfer of the data is thru a union that contains
+ // an intptr_t. This is because an interpreter stack slot is
+ // really an intptr_t. The use of a union containing an intptr_t
+ // ensures that on a 64 bit platform we have proper alignment
+ // and that we store the value where the interpreter will expect
+ // to find it (i.e. proper endian). Similarly on a 32bit platform
+ // using the intptr_t ensures that when a value is larger than
+ // a stack slot (jlong/jdouble) that we capture the proper part
+ // of the value for the stack slot in question.
+ //
+ switch( loc.type() ) {
+ case Location::float_in_dbl: { // Holds a float in a double register?
+ // The callee has no clue whether the register holds a float,
+ // double or is unused. He always saves a double. Here we know
+ // a double was saved, but we only want a float back. Narrow the
+ // saved double to the float that the JVM wants.
+ assert( loc.is_register(), "floats always saved to stack in 1 word" );
+ union { intptr_t p; jfloat jf; } value;
+ value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
+ value.jf = (jfloat) *(jdouble*) value_addr;
+ return new StackValue(value.p); // 64-bit high half is stack junk
+ }
+ case Location::int_in_long: { // Holds an int in a long register?
+ // The callee has no clue whether the register holds an int,
+ // long or is unused. He always saves a long. Here we know
+ // a long was saved, but we only want an int back. Narrow the
+ // saved long to the int that the JVM wants.
+ assert( loc.is_register(), "ints always saved to stack in 1 word" );
+ union { intptr_t p; jint ji;} value;
+ value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
+ value.ji = (jint) *(jlong*) value_addr;
+ return new StackValue(value.p); // 64-bit high half is stack junk
+ }
+#ifdef _LP64
+ case Location::dbl:
+ // Double value in an aligned adjacent pair
+ return new StackValue(*(intptr_t*)value_addr);
+ case Location::lng:
+ // Long value in an aligned adjacent pair
+ return new StackValue(*(intptr_t*)value_addr);
+#endif
+ case Location::oop: {
+ Handle h(*(oop *)value_addr); // Wrap a handle around the oop
+ return new StackValue(h);
+ }
+ case Location::addr: {
+ ShouldNotReachHere(); // both C1 and C2 now inline jsrs
+ }
+ case Location::normal: {
+ // Just copy all other bits straight through
+ union { intptr_t p; jint ji;} value;
+ value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
+ value.ji = *(jint*)value_addr;
+ return new StackValue(value.p);
+ }
+ case Location::invalid:
+ return new StackValue();
+ default:
+ ShouldNotReachHere();
+ }
+
+ } else if (sv->is_constant_int()) {
+ // Constant int: treat same as register int.
+ union { intptr_t p; jint ji;} value;
+ value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
+ value.ji = (jint)((ConstantIntValue*)sv)->value();
+ return new StackValue(value.p);
+ } else if (sv->is_constant_oop()) {
+ // constant oop
+ return new StackValue(((ConstantOopReadValue *)sv)->value());
+#ifdef _LP64
+ } else if (sv->is_constant_double()) {
+ // Constant double in a single stack slot
+ union { intptr_t p; double d; } value;
+ value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
+ value.d = ((ConstantDoubleValue *)sv)->value();
+ return new StackValue(value.p);
+ } else if (sv->is_constant_long()) {
+ // Constant long in a single stack slot
+ union { intptr_t p; jlong jl; } value;
+ value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
+ value.jl = ((ConstantLongValue *)sv)->value();
+ return new StackValue(value.p);
+#endif
+ } else if (sv->is_object()) {
+ return new StackValue(((ObjectValue *)sv)->value());
+ }
+
+ // Unknown ScopeValue type
+ ShouldNotReachHere();
+ return new StackValue((intptr_t) 0); // dummy
+}
+
+
+BasicLock* StackValue::resolve_monitor_lock(const frame* fr, Location location) {
+ assert(location.is_stack(), "for now we only look at the stack");
+ int word_offset = location.stack_offset() / wordSize;
+ // (stack picture)
+ // high: [ ] word_offset + 1
+ // low [ ] word_offset
+ //
+ // sp-> [ ] 0
+ // the word_offset is the distance from the stack pointer to the lowest address
+ // The frame's original stack pointer, before any extension by its callee
+ // (due to Compiler1 linkage on SPARC), must be used.
+ return (BasicLock*) (fr->unextended_sp() + word_offset);
+}
+
+
+#ifndef PRODUCT
+
+void StackValue::print_on(outputStream* st) const {
+ switch(_type) {
+ case T_INT:
+ st->print("%d (int) %f (float) %x (hex)", *(int *)&_i, *(float *)&_i, *(int *)&_i);
+ break;
+
+ case T_OBJECT:
+ _o()->print_value_on(st);
+ st->print(" <" INTPTR_FORMAT ">", (address)_o());
+ break;
+
+ case T_CONFLICT:
+ st->print("conflict");
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+}
+
+#endif
diff --git a/src/share/vm/runtime/stackValue.hpp b/src/share/vm/runtime/stackValue.hpp
new file mode 100644
index 000000000..6296b8073
--- /dev/null
+++ b/src/share/vm/runtime/stackValue.hpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class StackValue : public ResourceObj {
+ private:
+ BasicType _type;
+ intptr_t _i; // Blank java stack slot value
+ Handle _o; // Java stack slot value interpreted as a Handle
+ public:
+
+ StackValue(intptr_t value) {
+ _type = T_INT;
+ _i = value;
+ }
+
+ StackValue(Handle value) {
+ _type = T_OBJECT;
+ _o = value;
+ }
+
+ StackValue() {
+ _type = T_CONFLICT;
+ _i = 0;
+ }
+
+ // Only used during deopt- preserve object type.
+ StackValue(intptr_t o, BasicType t) {
+ assert(t == T_OBJECT, "should not be used");
+ _type = t;
+ _i = o;
+ }
+
+ Handle get_obj() const {
+ assert(type() == T_OBJECT, "type check");
+ return _o;
+ }
+
+ void set_obj(Handle value) {
+ assert(type() == T_OBJECT, "type check");
+ _o = value;
+ }
+
+ intptr_t get_int() const {
+ assert(type() == T_INT, "type check");
+ return _i;
+ }
+
+ // For special case in deopt.
+ intptr_t get_int(BasicType t) const {
+ assert(t == T_OBJECT && type() == T_OBJECT, "type check");
+ return _i;
+ }
+
+ void set_int(intptr_t value) {
+ assert(type() == T_INT, "type check");
+ _i = value;
+ }
+
+ BasicType type() const { return _type; }
+
+ bool equal(StackValue *value) {
+ if (_type != value->_type) return false;
+ if (_type == T_OBJECT)
+ return (_o == value->_o);
+ else {
+ assert(_type == T_INT, "sanity check");
+ // [phh] compare only low addressed portions of intptr_t slots
+ return (*(int *)&_i == *(int *)&value->_i);
+ }
+ }
+
+ static StackValue* create_stack_value(const frame* fr, const RegisterMap* reg_map, ScopeValue* sv);
+ static BasicLock* resolve_monitor_lock(const frame* fr, Location location);
+
+#ifndef PRODUCT
+ public:
+ // Printing
+ void print_on(outputStream* st) const;
+#endif
+};
diff --git a/src/share/vm/runtime/stackValueCollection.cpp b/src/share/vm/runtime/stackValueCollection.cpp
new file mode 100644
index 000000000..09f27732d
--- /dev/null
+++ b/src/share/vm/runtime/stackValueCollection.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_stackValueCollection.cpp.incl"
+
+jint StackValueCollection::int_at(int slot) const {
+ intptr_t val = at(slot)->get_int();
+ jint ival = *((jint*) (&val));
+ return ival;
+}
+
+jlong StackValueCollection::long_at(int slot) const {
+#ifdef _LP64
+ return at(slot+1)->get_int();
+#else
+ union {
+ jlong jl;
+ jint array[2];
+ } value;
+ // Interpreter stack is reversed in memory:
+ // low memory location is in higher java local slot.
+ value.array[0] = at(slot+1)->get_int();
+ value.array[1] = at(slot )->get_int();
+ return value.jl;
+#endif
+}
+
+Handle StackValueCollection::obj_at(int slot) const {
+ return at(slot)->get_obj();
+}
+
+jfloat StackValueCollection::float_at(int slot) const {
+ intptr_t res = at(slot)->get_int();
+ return *((jfloat*) (&res));
+}
+
+jdouble StackValueCollection::double_at(int slot) const {
+#ifdef _LP64
+ intptr_t res = at(slot+1)->get_int();
+ return *((jdouble*) (&res));
+#else
+ union {
+ jdouble jd;
+ jint array[2];
+ } value;
+ // Interpreter stack is reversed in memory:
+ // low memory location is in higher java local slot.
+ value.array[0] = at(slot+1)->get_int();
+ value.array[1] = at(slot )->get_int();
+ return value.jd;
+#endif
+}
+
+void StackValueCollection::set_int_at(int slot, jint value) {
+ intptr_t val;
+ *((jint*) (&val)) = value;
+ at(slot)->set_int(val);
+}
+
+void StackValueCollection::set_long_at(int slot, jlong value) {
+#ifdef _LP64
+ at(slot+1)->set_int(value);
+#else
+ union {
+ jlong jl;
+ jint array[2];
+ } x;
+ // Interpreter stack is reversed in memory:
+ // low memory location is in higher java local slot.
+ x.jl = value;
+ at(slot+1)->set_int(x.array[0]);
+ at(slot+0)->set_int(x.array[1]);
+#endif
+}
+
+void StackValueCollection::set_obj_at(int slot, Handle value) {
+ at(slot)->set_obj(value);
+}
+
+void StackValueCollection::set_float_at(int slot, jfloat value) {
+#ifdef _LP64
+ union {
+ intptr_t jd;
+ jint array[2];
+ } val;
+ // Interpreter stores 32 bit floats in first half of 64 bit word.
+ val.array[0] = *(jint*)(&value);
+ val.array[1] = 0;
+ at(slot)->set_int(val.jd);
+#else
+ at(slot)->set_int(*(jint*)(&value));
+#endif
+}
+
+void StackValueCollection::set_double_at(int slot, jdouble value) {
+#ifdef _LP64
+ at(slot+1)->set_int(*(intptr_t*)(&value));
+#else
+ union {
+ jdouble jd;
+ jint array[2];
+ } x;
+ // Interpreter stack is reversed in memory:
+ // low memory location is in higher java local slot.
+ x.jd = value;
+ at(slot+1)->set_int(x.array[0]);
+ at(slot+0)->set_int(x.array[1]);
+#endif
+}
+
+#ifndef PRODUCT
+void StackValueCollection::print() {
+ for(int index = 0; index < size(); index++) {
+ tty->print("\t %2d ", index);
+ at(index)->print_on(tty);
+ if( at(index )->type() == T_INT &&
+ index+1 < size() &&
+ at(index+1)->type() == T_INT ) {
+ tty->print(" " INT64_FORMAT " (long)", long_at(index));
+ tty->cr();
+ tty->print("\t %.15e (double)", double_at(index));
+ tty->print(" " PTR64_FORMAT " (longhex)", long_at(index));
+ }
+ tty->cr();
+ }
+}
+#endif
diff --git a/src/share/vm/runtime/stackValueCollection.hpp b/src/share/vm/runtime/stackValueCollection.hpp
new file mode 100644
index 000000000..14de40517
--- /dev/null
+++ b/src/share/vm/runtime/stackValueCollection.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2001-2002 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class StackValueCollection : public ResourceObj {
+ private:
+ GrowableArray<StackValue*>* _values;
+
+ public:
+ StackValueCollection() { _values = new GrowableArray<StackValue*>(); }
+ StackValueCollection(int length) { _values = new GrowableArray<StackValue*>(length); }
+
+ void add(StackValue *val) const { _values->push(val); }
+ int size() const { return _values->length(); }
+ bool is_empty() const { return (size() == 0); }
+ StackValue* at(int i) const { return _values->at(i); }
+
+ // Get typed locals/expressions
+ jint int_at(int slot) const;
+ jlong long_at(int slot) const;
+ Handle obj_at(int slot) const;
+ jfloat float_at(int slot) const;
+ jdouble double_at(int slot) const;
+
+ // Set typed locals/expressions
+ void set_int_at(int slot, jint value);
+ void set_long_at(int slot, jlong value);
+ void set_obj_at(int slot, Handle value);
+ void set_float_at(int slot, jfloat value);
+ void set_double_at(int slot, jdouble value);
+
+ void print();
+};
diff --git a/src/share/vm/runtime/statSampler.cpp b/src/share/vm/runtime/statSampler.cpp
new file mode 100644
index 000000000..f7205b9e1
--- /dev/null
+++ b/src/share/vm/runtime/statSampler.cpp
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_statSampler.cpp.incl"
+
+// --------------------------------------------------------
+// StatSamplerTask
+
+class StatSamplerTask : public PeriodicTask {
+ public:
+ StatSamplerTask(int interval_time) : PeriodicTask(interval_time) {}
+ void task() { StatSampler::collect_sample(); }
+};
+
+
+//----------------------------------------------------------
+// Implementation of StatSampler
+
+StatSamplerTask* StatSampler::_task = NULL;
+PerfDataList* StatSampler::_sampled = NULL;
+
+/*
+ * the initialize method is called from the engage() method
+ * and is responsible for initializing various global variables.
+ */
+void StatSampler::initialize() {
+
+ if (!UsePerfData) return;
+
+ // create performance data that could not be created prior
+ // to vm_init_globals() or otherwise have no logical home.
+
+ create_misc_perfdata();
+
+ // get copy of the sampled list
+ _sampled = PerfDataManager::sampled();
+
+}
+
+/*
+ * The engage() method is called at initialization time via
+ * Thread::create_vm() to initialize the StatSampler and
+ * register it with the WatcherThread as a periodic task.
+ */
+void StatSampler::engage() {
+
+ if (!UsePerfData) return;
+
+ if (!is_active()) {
+
+ initialize();
+
+ // start up the periodic task
+ _task = new StatSamplerTask(PerfDataSamplingInterval);
+ _task->enroll();
+ }
+}
+
+
+/*
+ * the disengage() method is responsible for deactivating the periodic
+ * task and, if logging was enabled, for logging the final sample. This
+ * method is called from before_exit() in java.cpp and is only called
+ * after the WatcherThread has been stopped.
+ */
+void StatSampler::disengage() {
+
+ if (!UsePerfData) return;
+
+ if (!is_active())
+ return;
+
+ // remove StatSamplerTask
+ _task->disenroll();
+ delete _task;
+ _task = NULL;
+
+ // force a final sample
+ sample_data(_sampled);
+}
+
+/*
+ * the destroy method is responsible for releasing any resources used by
+ * the StatSampler prior to shutdown of the VM. this method is called from
+ * before_exit() in java.cpp and is only called after the WatcherThread
+ * has stopped.
+ */
+void StatSampler::destroy() {
+
+ if (!UsePerfData) return;
+
+ if (_sampled != NULL) {
+ delete(_sampled);
+ _sampled = NULL;
+ }
+}
+
+/*
+ * The sample_data() method is responsible for sampling the
+ * the data value for each PerfData instance in the given list.
+ */
+void StatSampler::sample_data(PerfDataList* list) {
+
+ assert(list != NULL, "null list unexpected");
+
+ for (int index = 0; index < list->length(); index++) {
+ PerfData* item = list->at(index);
+ item->sample();
+ }
+}
+
+/*
+ * the collect_sample() method is the method invoked by the
+ * WatcherThread via the PeriodicTask::task() method. This method
+ * is responsible for collecting data samples from sampled
+ * PerfData instances every PerfDataSamplingInterval milliseconds.
+ * It is also responsible for logging the requested set of
+ * PerfData instances every _sample_count milliseconds. While
+ * logging data, it will output a column header after every _print_header
+ * rows of data have been logged.
+ */
+void StatSampler::collect_sample() {
+
+ // future - check for new PerfData objects. PerfData objects might
+ // get added to the PerfDataManager lists after we have already
+ // built our local copies.
+ //
+ // if (PerfDataManager::count() > previous) {
+ // // get a new copy of the sampled list
+ // if (_sampled != NULL) {
+ // delete(_sampled);
+ // _sampled = NULL;
+ // }
+ // _sampled = PerfDataManager::sampled();
+ // }
+
+ assert(_sampled != NULL, "list not initialized");
+
+ sample_data(_sampled);
+}
+
+/*
+ * method to upcall into Java to return the value of the specified
+ * property as a utf8 string, or NULL if does not exist. The caller
+ * is responsible for setting a ResourceMark for proper cleanup of
+ * the utf8 strings.
+ */
+const char* StatSampler::get_system_property(const char* name, TRAPS) {
+
+ // setup the arguments to getProperty
+ Handle key_str = java_lang_String::create_from_str(name, CHECK_NULL);
+
+ // return value
+ JavaValue result(T_OBJECT);
+
+ // public static String getProperty(String key, String def);
+ JavaCalls::call_static(&result,
+ KlassHandle(THREAD, SystemDictionary::system_klass()),
+ vmSymbolHandles::getProperty_name(),
+ vmSymbolHandles::string_string_signature(),
+ key_str,
+ CHECK_NULL);
+
+ oop value_oop = (oop)result.get_jobject();
+ if (value_oop == NULL) {
+ return NULL;
+ }
+
+ // convert Java String to utf8 string
+ char* value = java_lang_String::as_utf8_string(value_oop);
+
+ return value;
+}
+
+/*
+ * The list of System Properties that have corresponding PerfData
+ * string instrumentation created by retrieving the named property's
+ * value from System.getProperty() and unconditionally creating a
+ * PerfStringConstant object initialized to the retreived value. This
+ * is not an exhustive list of Java properties with corresponding string
+ * instrumentation as the create_system_property_instrumentation() method
+ * creates other property based instrumentation conditionally.
+ */
+
+// stable interface, supported counters
+static const char* property_counters_ss[] = {
+ "java.vm.specification.version",
+ "java.vm.specification.name",
+ "java.vm.specification.vendor",
+ "java.vm.version",
+ "java.vm.name",
+ "java.vm.vendor",
+ "java.vm.info",
+ "java.library.path",
+ "java.class.path",
+ "java.endorsed.dirs",
+ "java.ext.dirs",
+ "java.home",
+ NULL
+};
+
+// unstable interface, supported counters
+static const char* property_counters_us[] = {
+ NULL
+};
+
+// unstable interface, unsupported counters
+static const char* property_counters_uu[] = {
+ "sun.boot.class.path",
+ "sun.boot.library.path",
+ NULL
+};
+
+typedef struct {
+ const char** property_list;
+ CounterNS name_space;
+} PropertyCounters;
+
+static PropertyCounters property_counters[] = {
+ { property_counters_ss, JAVA_PROPERTY },
+ { property_counters_us, COM_PROPERTY },
+ { property_counters_uu, SUN_PROPERTY },
+ { NULL, SUN_PROPERTY }
+};
+
+
+/*
+ * Method to create PerfData string instruments that contain the values
+ * of various system properties. String instruments are created for each
+ * property specified in the property lists provided in property_counters[].
+ * Property counters have a counter name space prefix prepended to the
+ * property name as indicated in property_counters[].
+ */
+void StatSampler::create_system_property_instrumentation(TRAPS) {
+
+ ResourceMark rm;
+
+ for (int i = 0; property_counters[i].property_list != NULL; i++) {
+
+ for (int j = 0; property_counters[i].property_list[j] != NULL; j++) {
+
+ const char* property_name = property_counters[i].property_list[j];
+ assert(property_name != NULL, "property name should not be NULL");
+
+ const char* value = get_system_property(property_name, CHECK);
+
+ // the property must exist
+ assert(value != NULL, "property name should be valid");
+
+ if (value != NULL) {
+ // create the property counter
+ PerfDataManager::create_string_constant(property_counters[i].name_space,
+ property_name, value, CHECK);
+ }
+ }
+ }
+}
+
+/*
+ * The create_misc_perfdata() method provides a place to create
+ * PerfData instances that would otherwise have no better place
+ * to exist.
+ */
+void StatSampler::create_misc_perfdata() {
+
+ ResourceMark rm;
+ EXCEPTION_MARK;
+
+ // numeric constants
+
+ // frequency of the native high resolution timer
+ PerfDataManager::create_constant(SUN_OS, "hrt.frequency",
+ PerfData::U_Hertz, os::elapsed_frequency(),
+ CHECK);
+
+ // string constants
+
+ // create string instrumentation for various Java properties.
+ create_system_property_instrumentation(CHECK);
+
+ // hotspot flags (from .hotspotrc) and args (from command line)
+ //
+ PerfDataManager::create_string_constant(JAVA_RT, "vmFlags",
+ Arguments::jvm_flags(), CHECK);
+ PerfDataManager::create_string_constant(JAVA_RT, "vmArgs",
+ Arguments::jvm_args(), CHECK);
+
+ // java class name/jar file and arguments to main class
+ // note: name is cooridnated with launcher and Arguments.cpp
+ PerfDataManager::create_string_constant(SUN_RT, "javaCommand",
+ Arguments::java_command(), CHECK);
+
+ // the Java VM Internal version string
+ PerfDataManager::create_string_constant(SUN_RT, "internalVersion",
+ VM_Version::internal_vm_info_string(),
+ CHECK);
+
+ // create sampled instrumentation objects
+ create_sampled_perfdata();
+}
+
+/*
+ * helper class to provide for sampling of the elapsed_counter value
+ * maintained in the OS class.
+ */
+class HighResTimeSampler : public PerfSampleHelper {
+ public:
+ jlong take_sample() { return os::elapsed_counter(); }
+};
+
+/*
+ * the create_sampled_perdata() method provides a place to instantiate
+ * sampled PerfData instances that would otherwise have no better place
+ * to exist.
+ */
+void StatSampler::create_sampled_perfdata() {
+
+ EXCEPTION_MARK;
+
+ // setup sampling of the elapsed time counter maintained in the
+ // the os class. This counter can be used as either a time stamp
+ // for each logged entry or as a liveness indicator for the VM.
+ PerfSampleHelper* psh = new HighResTimeSampler();
+ PerfDataManager::create_counter(SUN_OS, "hrt.ticks",
+ PerfData::U_Ticks, psh, CHECK);
+}
+
+/*
+ * the statSampler_exit() function is called from os_init.cpp on
+ * exit of the vm.
+ */
+void statSampler_exit() {
+
+ if (!UsePerfData) return;
+
+ StatSampler::destroy();
+}
diff --git a/src/share/vm/runtime/statSampler.hpp b/src/share/vm/runtime/statSampler.hpp
new file mode 100644
index 000000000..173825cde
--- /dev/null
+++ b/src/share/vm/runtime/statSampler.hpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2001-2002 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class StatSamplerTask;
+
+/*
+ * The StatSampler class is responsible for periodically updating
+ * sampled PerfData instances and writing the sampled values to the
+ * PerfData memory region.
+ *
+ * In addition it is also responsible for providing a home for
+ * PerfData instances that otherwise have no better home.
+ */
+class StatSampler : AllStatic {
+
+ friend class StatSamplerTask;
+
+ private:
+
+ static StatSamplerTask* _task;
+ static PerfDataList* _sampled;
+
+ static void collect_sample();
+ static void create_misc_perfdata();
+ static void create_sampled_perfdata();
+ static void sample_data(PerfDataList* list);
+ static const char* get_system_property(const char* name, TRAPS);
+ static void create_system_property_instrumentation(TRAPS);
+
+ public:
+ // Start/stop the sampler
+ static void engage();
+ static void disengage();
+
+ static bool is_active() { return _task != NULL; }
+
+ static void initialize();
+ static void destroy();
+};
+
+void statSampler_exit();
diff --git a/src/share/vm/runtime/stubCodeGenerator.cpp b/src/share/vm/runtime/stubCodeGenerator.cpp
new file mode 100644
index 000000000..bbdd6898b
--- /dev/null
+++ b/src/share/vm/runtime/stubCodeGenerator.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright 1997-2004 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_stubCodeGenerator.cpp.incl"
+
+
+// Implementation of StubCodeDesc
+
+StubCodeDesc* StubCodeDesc::_list = NULL;
+int StubCodeDesc::_count = 0;
+
+
+StubCodeDesc* StubCodeDesc::desc_for(address pc) {
+ StubCodeDesc* p = _list;
+ while (p != NULL && !p->contains(pc)) p = p->_next;
+ // p == NULL || p->contains(pc)
+ return p;
+}
+
+
+StubCodeDesc* StubCodeDesc::desc_for_index(int index) {
+ StubCodeDesc* p = _list;
+ while (p != NULL && p->index() != index) p = p->_next;
+ return p;
+}
+
+
+const char* StubCodeDesc::name_for(address pc) {
+ StubCodeDesc* p = desc_for(pc);
+ return p == NULL ? NULL : p->name();
+}
+
+
+void StubCodeDesc::print() {
+ tty->print(group());
+ tty->print("::");
+ tty->print(name());
+ tty->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT "[ (%d bytes)", begin(), end(), size_in_bytes());
+}
+
+
+
+// Implementation of StubCodeGenerator
+
+StubCodeGenerator::StubCodeGenerator(CodeBuffer* code) {
+ _masm = new MacroAssembler(code);
+ _first_stub = _last_stub = NULL;
+}
+
+#ifndef PRODUCT
+extern "C" {
+ static int compare_cdesc(const void* void_a, const void* void_b) {
+ int ai = (*((StubCodeDesc**) void_a))->index();
+ int bi = (*((StubCodeDesc**) void_b))->index();
+ return ai - bi;
+ }
+}
+#endif
+
+StubCodeGenerator::~StubCodeGenerator() {
+#ifndef PRODUCT
+ if (PrintStubCode) {
+ CodeBuffer* cbuf = _masm->code();
+ CodeBlob* blob = CodeCache::find_blob_unsafe(cbuf->insts()->start());
+ if (blob != NULL) {
+ blob->set_comments(cbuf->comments());
+ }
+ bool saw_first = false;
+ StubCodeDesc* toprint[1000];
+ int toprint_len = 0;
+ for (StubCodeDesc* cdesc = _last_stub; cdesc != NULL; cdesc = cdesc->_next) {
+ toprint[toprint_len++] = cdesc;
+ if (cdesc == _first_stub) { saw_first = true; break; }
+ }
+ assert(saw_first, "must get both first & last");
+ // Print in reverse order:
+ qsort(toprint, toprint_len, sizeof(toprint[0]), compare_cdesc);
+ for (int i = 0; i < toprint_len; i++) {
+ StubCodeDesc* cdesc = toprint[i];
+ cdesc->print();
+ tty->cr();
+ Disassembler::decode(cdesc->begin(), cdesc->end());
+ tty->cr();
+ }
+ }
+#endif //PRODUCT
+}
+
+
+void StubCodeGenerator::stub_prolog(StubCodeDesc* cdesc) {
+ // default implementation - do nothing
+}
+
+
+void StubCodeGenerator::stub_epilog(StubCodeDesc* cdesc) {
+ // default implementation - record the cdesc
+ if (_first_stub == NULL) _first_stub = cdesc;
+ _last_stub = cdesc;
+}
+
+
+// Implementation of CodeMark
+
+StubCodeMark::StubCodeMark(StubCodeGenerator* cgen, const char* group, const char* name) {
+ _cgen = cgen;
+ _cdesc = new StubCodeDesc(group, name, _cgen->assembler()->pc());
+ _cgen->stub_prolog(_cdesc);
+ // define the stub's beginning (= entry point) to be after the prolog:
+ _cdesc->set_begin(_cgen->assembler()->pc());
+}
+
+StubCodeMark::~StubCodeMark() {
+ _cgen->assembler()->flush();
+ _cdesc->set_end(_cgen->assembler()->pc());
+ assert(StubCodeDesc::_list == _cdesc, "expected order on list");
+ _cgen->stub_epilog(_cdesc);
+ VTune::register_stub(_cdesc->name(), _cdesc->begin(), _cdesc->end());
+ Forte::register_stub(_cdesc->name(), _cdesc->begin(), _cdesc->end());
+
+ if (JvmtiExport::should_post_dynamic_code_generated()) {
+ JvmtiExport::post_dynamic_code_generated(_cdesc->name(), _cdesc->begin(), _cdesc->end());
+ }
+}
diff --git a/src/share/vm/runtime/stubCodeGenerator.hpp b/src/share/vm/runtime/stubCodeGenerator.hpp
new file mode 100644
index 000000000..530d954cd
--- /dev/null
+++ b/src/share/vm/runtime/stubCodeGenerator.hpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright 1997-2000 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// All the basic framework for stubcode generation/debugging/printing.
+
+
+// A StubCodeDesc describes a piece of generated code (usually stubs).
+// This information is mainly useful for debugging and printing.
+// Currently, code descriptors are simply chained in a linked list,
+// this may have to change if searching becomes too slow.
+
+class StubCodeDesc: public CHeapObj {
+ protected:
+ static StubCodeDesc* _list; // the list of all descriptors
+ static int _count; // length of list
+
+ StubCodeDesc* _next; // the next element in the linked list
+ const char* _group; // the group to which the stub code belongs
+ const char* _name; // the name assigned to the stub code
+ int _index; // serial number assigned to the stub
+ address _begin; // points to the first byte of the stub code (included)
+ address _end; // points to the first byte after the stub code (excluded)
+
+ void set_end(address end) {
+ assert(_begin <= end, "begin & end not properly ordered");
+ _end = end;
+ }
+
+ void set_begin(address begin) {
+ assert(begin >= _begin, "begin may not decrease");
+ assert(_end == NULL || begin <= _end, "begin & end not properly ordered");
+ _begin = begin;
+ }
+
+ friend class StubCodeMark;
+ friend class StubCodeGenerator;
+
+ public:
+ static StubCodeDesc* desc_for(address pc); // returns the code descriptor for the code containing pc or NULL
+ static StubCodeDesc* desc_for_index(int); // returns the code descriptor for the index or NULL
+ static const char* name_for(address pc); // returns the name of the code containing pc or NULL
+
+ StubCodeDesc(const char* group, const char* name, address begin) {
+ assert(name != NULL, "no name specified");
+ _next = _list;
+ _group = group;
+ _name = name;
+ _index = ++_count; // (never zero)
+ _begin = begin;
+ _end = NULL;
+ _list = this;
+ };
+
+ const char* group() const { return _group; }
+ const char* name() const { return _name; }
+ int index() const { return _index; }
+ address begin() const { return _begin; }
+ address end() const { return _end; }
+ int size_in_bytes() const { return _end - _begin; }
+ bool contains(address pc) const { return _begin <= pc && pc < _end; }
+ void print();
+};
+
+// The base class for all stub-generating code generators.
+// Provides utility functions.
+
+class StubCodeGenerator: public StackObj {
+ protected:
+ MacroAssembler* _masm;
+
+ StubCodeDesc* _first_stub;
+ StubCodeDesc* _last_stub;
+
+ public:
+ StubCodeGenerator(CodeBuffer* code);
+ ~StubCodeGenerator();
+
+ MacroAssembler* assembler() const { return _masm; }
+
+ virtual void stub_prolog(StubCodeDesc* cdesc); // called by StubCodeMark constructor
+ virtual void stub_epilog(StubCodeDesc* cdesc); // called by StubCodeMark destructor
+};
+
+
+// Stack-allocated helper class used to assciate a stub code with a name.
+// All stub code generating functions that use a StubCodeMark will be registered
+// in the global StubCodeDesc list and the generated stub code can be identified
+// later via an address pointing into it.
+
+class StubCodeMark: public StackObj {
+ protected:
+ StubCodeGenerator* _cgen;
+ StubCodeDesc* _cdesc;
+
+ public:
+ StubCodeMark(StubCodeGenerator* cgen, const char* group, const char* name);
+ ~StubCodeMark();
+
+};
diff --git a/src/share/vm/runtime/stubRoutines.cpp b/src/share/vm/runtime/stubRoutines.cpp
new file mode 100644
index 000000000..63b30d86d
--- /dev/null
+++ b/src/share/vm/runtime/stubRoutines.cpp
@@ -0,0 +1,277 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_stubRoutines.cpp.incl"
+
+
+// Implementation of StubRoutines - for a description
+// of how to extend it, see the header file.
+
+// Class Variables
+
+BufferBlob* StubRoutines::_code1 = NULL;
+BufferBlob* StubRoutines::_code2 = NULL;
+
+address StubRoutines::_call_stub_return_address = NULL;
+address StubRoutines::_call_stub_entry = NULL;
+
+address StubRoutines::_catch_exception_entry = NULL;
+address StubRoutines::_forward_exception_entry = NULL;
+address StubRoutines::_throw_AbstractMethodError_entry = NULL;
+address StubRoutines::_throw_ArithmeticException_entry = NULL;
+address StubRoutines::_throw_NullPointerException_entry = NULL;
+address StubRoutines::_throw_NullPointerException_at_call_entry = NULL;
+address StubRoutines::_throw_StackOverflowError_entry = NULL;
+address StubRoutines::_handler_for_unsafe_access_entry = NULL;
+jint StubRoutines::_verify_oop_count = 0;
+address StubRoutines::_verify_oop_subroutine_entry = NULL;
+address StubRoutines::_atomic_xchg_entry = NULL;
+address StubRoutines::_atomic_xchg_ptr_entry = NULL;
+address StubRoutines::_atomic_store_entry = NULL;
+address StubRoutines::_atomic_store_ptr_entry = NULL;
+address StubRoutines::_atomic_cmpxchg_entry = NULL;
+address StubRoutines::_atomic_cmpxchg_ptr_entry = NULL;
+address StubRoutines::_atomic_cmpxchg_long_entry = NULL;
+address StubRoutines::_atomic_add_entry = NULL;
+address StubRoutines::_atomic_add_ptr_entry = NULL;
+address StubRoutines::_fence_entry = NULL;
+address StubRoutines::_d2i_wrapper = NULL;
+address StubRoutines::_d2l_wrapper = NULL;
+
+jint StubRoutines::_fpu_cntrl_wrd_std = 0;
+jint StubRoutines::_fpu_cntrl_wrd_24 = 0;
+jint StubRoutines::_fpu_cntrl_wrd_64 = 0;
+jint StubRoutines::_fpu_cntrl_wrd_trunc = 0;
+jint StubRoutines::_mxcsr_std = 0;
+jint StubRoutines::_fpu_subnormal_bias1[3] = { 0, 0, 0 };
+jint StubRoutines::_fpu_subnormal_bias2[3] = { 0, 0, 0 };
+
+// Compiled code entry points default values
+// The dafault functions don't have separate disjoint versions.
+address StubRoutines::_jbyte_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jbyte_copy);
+address StubRoutines::_jshort_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jshort_copy);
+address StubRoutines::_jint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jint_copy);
+address StubRoutines::_jlong_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jlong_copy);
+address StubRoutines::_oop_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy);
+address StubRoutines::_jbyte_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jbyte_copy);
+address StubRoutines::_jshort_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jshort_copy);
+address StubRoutines::_jint_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jint_copy);
+address StubRoutines::_jlong_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jlong_copy);
+address StubRoutines::_oop_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy);
+
+address StubRoutines::_arrayof_jbyte_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jbyte_copy);
+address StubRoutines::_arrayof_jshort_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jshort_copy);
+address StubRoutines::_arrayof_jint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jint_copy);
+address StubRoutines::_arrayof_jlong_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jlong_copy);
+address StubRoutines::_arrayof_oop_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy);
+address StubRoutines::_arrayof_jbyte_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jbyte_copy);
+address StubRoutines::_arrayof_jshort_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jshort_copy);
+address StubRoutines::_arrayof_jint_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jint_copy);
+address StubRoutines::_arrayof_jlong_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jlong_copy);
+address StubRoutines::_arrayof_oop_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy);
+
+address StubRoutines::_checkcast_arraycopy = NULL;
+address StubRoutines::_unsafe_arraycopy = NULL;
+address StubRoutines::_generic_arraycopy = NULL;
+
+// Initialization
+//
+// Note: to break cycle with universe initialization, stubs are generated in two phases.
+// The first one generates stubs needed during universe init (e.g., _handle_must_compile_first_entry).
+// The second phase includes all other stubs (which may depend on universe being initialized.)
+
+extern void StubGenerator_generate(CodeBuffer* code, bool all); // only interface to generators
+
+void StubRoutines::initialize1() {
+ if (_code1 == NULL) {
+ ResourceMark rm;
+ TraceTime timer("StubRoutines generation 1", TraceStartupTime);
+ _code1 = BufferBlob::create("StubRoutines (1)", code_size1);
+ if( _code1 == NULL) vm_exit_out_of_memory1(code_size1, "CodeCache: no room for %s", "StubRoutines (1)");
+ CodeBuffer buffer(_code1->instructions_begin(), _code1->instructions_size());
+ StubGenerator_generate(&buffer, false);
+ }
+}
+
+
+#ifdef ASSERT
+typedef void (*arraycopy_fn)(address src, address dst, int count);
+
+// simple tests of generated arraycopy functions
+static void test_arraycopy_func(address func, int alignment) {
+ int v = 0xcc;
+ int v2 = 0x11;
+ jlong lbuffer[2];
+ jlong lbuffer2[2];
+ address buffer = (address) lbuffer;
+ address buffer2 = (address) lbuffer2;
+ unsigned int i;
+ for (i = 0; i < sizeof(lbuffer); i++) {
+ buffer[i] = v; buffer2[i] = v2;
+ }
+ // do an aligned copy
+ ((arraycopy_fn)func)(buffer, buffer2, 0);
+ for (i = 0; i < sizeof(lbuffer); i++) {
+ assert(buffer[i] == v && buffer2[i] == v2, "shouldn't have copied anything");
+ }
+ // adjust destination alignment
+ ((arraycopy_fn)func)(buffer, buffer2 + alignment, 0);
+ for (i = 0; i < sizeof(lbuffer); i++) {
+ assert(buffer[i] == v && buffer2[i] == v2, "shouldn't have copied anything");
+ }
+ // adjust source alignment
+ ((arraycopy_fn)func)(buffer + alignment, buffer2, 0);
+ for (i = 0; i < sizeof(lbuffer); i++) {
+ assert(buffer[i] == v && buffer2[i] == v2, "shouldn't have copied anything");
+ }
+}
+#endif
+
+
+void StubRoutines::initialize2() {
+ if (_code2 == NULL) {
+ ResourceMark rm;
+ TraceTime timer("StubRoutines generation 2", TraceStartupTime);
+ _code2 = BufferBlob::create("StubRoutines (2)", code_size2);
+ if( _code2 == NULL) vm_exit_out_of_memory1(code_size2, "CodeCache: no room for %s", "StubRoutines (2)");
+ CodeBuffer buffer(_code2->instructions_begin(), _code2->instructions_size());
+ StubGenerator_generate(&buffer, true);
+ }
+
+#ifdef ASSERT
+
+#define TEST_ARRAYCOPY(type) \
+ test_arraycopy_func( type##_arraycopy(), sizeof(type)); \
+ test_arraycopy_func( type##_disjoint_arraycopy(), sizeof(type)); \
+ test_arraycopy_func(arrayof_##type##_arraycopy(), sizeof(HeapWord)); \
+ test_arraycopy_func(arrayof_##type##_disjoint_arraycopy(), sizeof(HeapWord))
+
+ // Make sure all the arraycopy stubs properly handle zeros
+ TEST_ARRAYCOPY(jbyte);
+ TEST_ARRAYCOPY(jshort);
+ TEST_ARRAYCOPY(jint);
+ TEST_ARRAYCOPY(jlong);
+
+#undef TEST_ARRAYCOPY
+
+#endif
+}
+
+
+void stubRoutines_init1() { StubRoutines::initialize1(); }
+void stubRoutines_init2() { StubRoutines::initialize2(); }
+
+//
+// Default versions of arraycopy functions
+//
+
+static void gen_arraycopy_barrier(oop* dest, size_t count) {
+ assert(count != 0, "count should be non-zero");
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
+ bs->write_ref_array(MemRegion((HeapWord*)dest, (HeapWord*)(dest + count)));
+}
+
+JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
+#ifndef PRODUCT
+ SharedRuntime::_jbyte_array_copy_ctr++; // Slow-path byte array copy
+#endif // !PRODUCT
+ assert(count != 0, "count should be non-zero");
+ Copy::conjoint_bytes_atomic(src, dest, count);
+JRT_END
+
+JRT_LEAF(void, StubRoutines::jshort_copy(jshort* src, jshort* dest, size_t count))
+#ifndef PRODUCT
+ SharedRuntime::_jshort_array_copy_ctr++; // Slow-path short/char array copy
+#endif // !PRODUCT
+ assert(count != 0, "count should be non-zero");
+ Copy::conjoint_jshorts_atomic(src, dest, count);
+JRT_END
+
+JRT_LEAF(void, StubRoutines::jint_copy(jint* src, jint* dest, size_t count))
+#ifndef PRODUCT
+ SharedRuntime::_jint_array_copy_ctr++; // Slow-path int/float array copy
+#endif // !PRODUCT
+ assert(count != 0, "count should be non-zero");
+ Copy::conjoint_jints_atomic(src, dest, count);
+JRT_END
+
+JRT_LEAF(void, StubRoutines::jlong_copy(jlong* src, jlong* dest, size_t count))
+#ifndef PRODUCT
+ SharedRuntime::_jlong_array_copy_ctr++; // Slow-path long/double array copy
+#endif // !PRODUCT
+ assert(count != 0, "count should be non-zero");
+ Copy::conjoint_jlongs_atomic(src, dest, count);
+JRT_END
+
+JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count))
+#ifndef PRODUCT
+ SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
+#endif // !PRODUCT
+ assert(count != 0, "count should be non-zero");
+ Copy::conjoint_oops_atomic(src, dest, count);
+ gen_arraycopy_barrier(dest, count);
+JRT_END
+
+JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, size_t count))
+#ifndef PRODUCT
+ SharedRuntime::_jbyte_array_copy_ctr++; // Slow-path byte array copy
+#endif // !PRODUCT
+ assert(count != 0, "count should be non-zero");
+ Copy::arrayof_conjoint_bytes(src, dest, count);
+JRT_END
+
+JRT_LEAF(void, StubRoutines::arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count))
+#ifndef PRODUCT
+ SharedRuntime::_jshort_array_copy_ctr++; // Slow-path short/char array copy
+#endif // !PRODUCT
+ assert(count != 0, "count should be non-zero");
+ Copy::arrayof_conjoint_jshorts(src, dest, count);
+JRT_END
+
+JRT_LEAF(void, StubRoutines::arrayof_jint_copy(HeapWord* src, HeapWord* dest, size_t count))
+#ifndef PRODUCT
+ SharedRuntime::_jint_array_copy_ctr++; // Slow-path int/float array copy
+#endif // !PRODUCT
+ assert(count != 0, "count should be non-zero");
+ Copy::arrayof_conjoint_jints(src, dest, count);
+JRT_END
+
+JRT_LEAF(void, StubRoutines::arrayof_jlong_copy(HeapWord* src, HeapWord* dest, size_t count))
+#ifndef PRODUCT
+ SharedRuntime::_jlong_array_copy_ctr++; // Slow-path int/float array copy
+#endif // !PRODUCT
+ assert(count != 0, "count should be non-zero");
+ Copy::arrayof_conjoint_jlongs(src, dest, count);
+JRT_END
+
+JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, size_t count))
+#ifndef PRODUCT
+ SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
+#endif // !PRODUCT
+ assert(count != 0, "count should be non-zero");
+ Copy::arrayof_conjoint_oops(src, dest, count);
+ gen_arraycopy_barrier((oop *) dest, count);
+JRT_END
diff --git a/src/share/vm/runtime/stubRoutines.hpp b/src/share/vm/runtime/stubRoutines.hpp
new file mode 100644
index 000000000..17246a8bc
--- /dev/null
+++ b/src/share/vm/runtime/stubRoutines.hpp
@@ -0,0 +1,261 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// StubRoutines provides entry points to assembly routines used by
+// compiled code and the run-time system. Platform-specific entry
+// points are defined in the platform-specific inner class.
+//
+// Class scheme:
+//
+// platform-independent platform-dependent
+//
+// stubRoutines.hpp <-- included -- stubRoutines_<arch>.hpp
+// ^ ^
+// | |
+// implements implements
+// | |
+// | |
+// stubRoutines.cpp stubRoutines_<arch>.cpp
+// stubRoutines_<os_family>.cpp stubGenerator_<arch>.cpp
+// stubRoutines_<os_arch>.cpp
+//
+// Note 1: The important thing is a clean decoupling between stub
+// entry points (interfacing to the whole vm; i.e., 1-to-n
+// relationship) and stub generators (interfacing only to
+// the entry points implementation; i.e., 1-to-1 relationship).
+// This significantly simplifies changes in the generator
+// structure since the rest of the vm is not affected.
+//
+// Note 2: stubGenerator_<arch>.cpp contains a minimal portion of
+// machine-independent code; namely the generator calls of
+// the generator functions that are used platform-independently.
+// However, it comes with the advantage of having a 1-file
+// implementation of the generator. It should be fairly easy
+// to change, should it become a problem later.
+//
+// Scheme for adding a new entry point:
+//
+// 1. determine if it's a platform-dependent or independent entry point
+// a) if platform independent: make subsequent changes in the independent files
+// b) if platform dependent: make subsequent changes in the dependent files
+// 2. add a private instance variable holding the entry point address
+// 3. add a public accessor function to the instance variable
+// 4. implement the corresponding generator function in the platform-dependent
+// stubGenerator_<arch>.cpp file and call the function in generate_all() of that file
+
+
+class StubRoutines: AllStatic {
+
+ public:
+ enum platform_independent_constants {
+ max_size_of_parameters = 256 // max. parameter size supported by megamorphic lookups
+ };
+
+ // Dependencies
+ friend class StubGenerator;
+ #include "incls/_stubRoutines_pd.hpp.incl" // machine-specific parts
+
+ static jint _verify_oop_count;
+ static address _verify_oop_subroutine_entry;
+
+ static address _call_stub_return_address; // the return PC, when returning to a call stub
+ static address _call_stub_entry;
+ static address _forward_exception_entry;
+ static address _catch_exception_entry;
+ static address _throw_AbstractMethodError_entry;
+ static address _throw_ArithmeticException_entry;
+ static address _throw_NullPointerException_entry;
+ static address _throw_NullPointerException_at_call_entry;
+ static address _throw_StackOverflowError_entry;
+ static address _handler_for_unsafe_access_entry;
+
+ static address _atomic_xchg_entry;
+ static address _atomic_xchg_ptr_entry;
+ static address _atomic_store_entry;
+ static address _atomic_store_ptr_entry;
+ static address _atomic_cmpxchg_entry;
+ static address _atomic_cmpxchg_ptr_entry;
+ static address _atomic_cmpxchg_long_entry;
+ static address _atomic_add_entry;
+ static address _atomic_add_ptr_entry;
+ static address _fence_entry;
+ static address _d2i_wrapper;
+ static address _d2l_wrapper;
+
+ static jint _fpu_cntrl_wrd_std;
+ static jint _fpu_cntrl_wrd_24;
+ static jint _fpu_cntrl_wrd_64;
+ static jint _fpu_cntrl_wrd_trunc;
+ static jint _mxcsr_std;
+ static jint _fpu_subnormal_bias1[3];
+ static jint _fpu_subnormal_bias2[3];
+
+ static BufferBlob* _code1; // code buffer for initial routines
+ static BufferBlob* _code2; // code buffer for all other routines
+
+ // Leaf routines which implement arraycopy and their addresses
+ // arraycopy operands aligned on element type boundary
+ static address _jbyte_arraycopy;
+ static address _jshort_arraycopy;
+ static address _jint_arraycopy;
+ static address _jlong_arraycopy;
+ static address _oop_arraycopy;
+ static address _jbyte_disjoint_arraycopy;
+ static address _jshort_disjoint_arraycopy;
+ static address _jint_disjoint_arraycopy;
+ static address _jlong_disjoint_arraycopy;
+ static address _oop_disjoint_arraycopy;
+
+ // arraycopy operands aligned on zero'th element boundary
+ // These are identical to the ones aligned aligned on an
+ // element type boundary, except that they assume that both
+ // source and destination are HeapWord aligned.
+ static address _arrayof_jbyte_arraycopy;
+ static address _arrayof_jshort_arraycopy;
+ static address _arrayof_jint_arraycopy;
+ static address _arrayof_jlong_arraycopy;
+ static address _arrayof_oop_arraycopy;
+ static address _arrayof_jbyte_disjoint_arraycopy;
+ static address _arrayof_jshort_disjoint_arraycopy;
+ static address _arrayof_jint_disjoint_arraycopy;
+ static address _arrayof_jlong_disjoint_arraycopy;
+ static address _arrayof_oop_disjoint_arraycopy;
+
+ // these are recommended but optional:
+ static address _checkcast_arraycopy;
+ static address _unsafe_arraycopy;
+ static address _generic_arraycopy;
+
+ public:
+ // Initialization/Testing
+ static void initialize1(); // must happen before universe::genesis
+ static void initialize2(); // must happen after universe::genesis
+
+ static bool contains(address addr) {
+ return
+ (_code1 != NULL && _code1->blob_contains(addr)) ||
+ (_code2 != NULL && _code2->blob_contains(addr)) ;
+ }
+
+ // Debugging
+ static jint verify_oop_count() { return _verify_oop_count; }
+ static jint* verify_oop_count_addr() { return &_verify_oop_count; }
+ // a subroutine for debugging the GC
+ static address verify_oop_subroutine_entry_address() { return (address)&_verify_oop_subroutine_entry; }
+
+ static address catch_exception_entry() { return _catch_exception_entry; }
+
+ // Calls to Java
+ typedef void (*CallStub)(
+ address link,
+ intptr_t* result,
+ BasicType result_type,
+ methodOopDesc* method,
+ address entry_point,
+ intptr_t* parameters,
+ int size_of_parameters,
+ TRAPS
+ );
+
+ static CallStub call_stub() { return CAST_TO_FN_PTR(CallStub, _call_stub_entry); }
+
+ // Exceptions
+ static address forward_exception_entry() { return _forward_exception_entry; }
+ // Implicit exceptions
+ static address throw_AbstractMethodError_entry() { return _throw_AbstractMethodError_entry; }
+ static address throw_ArithmeticException_entry() { return _throw_ArithmeticException_entry; }
+ static address throw_NullPointerException_entry() { return _throw_NullPointerException_entry; }
+ static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; }
+ static address throw_StackOverflowError_entry() { return _throw_StackOverflowError_entry; }
+
+ // Exceptions during unsafe access - should throw Java exception rather
+ // than crash.
+ static address handler_for_unsafe_access() { return _handler_for_unsafe_access_entry; }
+
+ static address atomic_xchg_entry() { return _atomic_xchg_entry; }
+ static address atomic_xchg_ptr_entry() { return _atomic_xchg_ptr_entry; }
+ static address atomic_store_entry() { return _atomic_store_entry; }
+ static address atomic_store_ptr_entry() { return _atomic_store_ptr_entry; }
+ static address atomic_cmpxchg_entry() { return _atomic_cmpxchg_entry; }
+ static address atomic_cmpxchg_ptr_entry() { return _atomic_cmpxchg_ptr_entry; }
+ static address atomic_cmpxchg_long_entry() { return _atomic_cmpxchg_long_entry; }
+ static address atomic_add_entry() { return _atomic_add_entry; }
+ static address atomic_add_ptr_entry() { return _atomic_add_ptr_entry; }
+ static address fence_entry() { return _fence_entry; }
+
+ static address d2i_wrapper() { return _d2i_wrapper; }
+ static address d2l_wrapper() { return _d2l_wrapper; }
+ static jint fpu_cntrl_wrd_std() { return _fpu_cntrl_wrd_std; }
+ static address addr_fpu_cntrl_wrd_std() { return (address)&_fpu_cntrl_wrd_std; }
+ static address addr_fpu_cntrl_wrd_24() { return (address)&_fpu_cntrl_wrd_24; }
+ static address addr_fpu_cntrl_wrd_64() { return (address)&_fpu_cntrl_wrd_64; }
+ static address addr_fpu_cntrl_wrd_trunc() { return (address)&_fpu_cntrl_wrd_trunc; }
+ static address addr_mxcsr_std() { return (address)&_mxcsr_std; }
+ static address addr_fpu_subnormal_bias1() { return (address)&_fpu_subnormal_bias1; }
+ static address addr_fpu_subnormal_bias2() { return (address)&_fpu_subnormal_bias2; }
+
+
+ static address jbyte_arraycopy() { return _jbyte_arraycopy; }
+ static address jshort_arraycopy() { return _jshort_arraycopy; }
+ static address jint_arraycopy() { return _jint_arraycopy; }
+ static address jlong_arraycopy() { return _jlong_arraycopy; }
+ static address oop_arraycopy() { return _oop_arraycopy; }
+ static address jbyte_disjoint_arraycopy() { return _jbyte_disjoint_arraycopy; }
+ static address jshort_disjoint_arraycopy() { return _jshort_disjoint_arraycopy; }
+ static address jint_disjoint_arraycopy() { return _jint_disjoint_arraycopy; }
+ static address jlong_disjoint_arraycopy() { return _jlong_disjoint_arraycopy; }
+ static address oop_disjoint_arraycopy() { return _oop_disjoint_arraycopy; }
+
+ static address arrayof_jbyte_arraycopy() { return _arrayof_jbyte_arraycopy; }
+ static address arrayof_jshort_arraycopy() { return _arrayof_jshort_arraycopy; }
+ static address arrayof_jint_arraycopy() { return _arrayof_jint_arraycopy; }
+ static address arrayof_jlong_arraycopy() { return _arrayof_jlong_arraycopy; }
+ static address arrayof_oop_arraycopy() { return _arrayof_oop_arraycopy; }
+
+ static address arrayof_jbyte_disjoint_arraycopy() { return _arrayof_jbyte_disjoint_arraycopy; }
+ static address arrayof_jshort_disjoint_arraycopy() { return _arrayof_jshort_disjoint_arraycopy; }
+ static address arrayof_jint_disjoint_arraycopy() { return _arrayof_jint_disjoint_arraycopy; }
+ static address arrayof_jlong_disjoint_arraycopy() { return _arrayof_jlong_disjoint_arraycopy; }
+ static address arrayof_oop_disjoint_arraycopy() { return _arrayof_oop_disjoint_arraycopy; }
+
+ static address checkcast_arraycopy() { return _checkcast_arraycopy; }
+ static address unsafe_arraycopy() { return _unsafe_arraycopy; }
+ static address generic_arraycopy() { return _generic_arraycopy; }
+
+ //
+ // Default versions of the above arraycopy functions for platforms which do
+ // not have specialized versions
+ //
+ static void jbyte_copy (jbyte* src, jbyte* dest, size_t count);
+ static void jshort_copy(jshort* src, jshort* dest, size_t count);
+ static void jint_copy (jint* src, jint* dest, size_t count);
+ static void jlong_copy (jlong* src, jlong* dest, size_t count);
+ static void oop_copy (oop* src, oop* dest, size_t count);
+
+ static void arrayof_jbyte_copy (HeapWord* src, HeapWord* dest, size_t count);
+ static void arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count);
+ static void arrayof_jint_copy (HeapWord* src, HeapWord* dest, size_t count);
+ static void arrayof_jlong_copy (HeapWord* src, HeapWord* dest, size_t count);
+ static void arrayof_oop_copy (HeapWord* src, HeapWord* dest, size_t count);
+};
diff --git a/src/share/vm/runtime/sweeper.cpp b/src/share/vm/runtime/sweeper.cpp
new file mode 100644
index 000000000..bfa4761d4
--- /dev/null
+++ b/src/share/vm/runtime/sweeper.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_sweeper.cpp.incl"
+
+long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
+CodeBlob* NMethodSweeper::_current = NULL; // Current nmethod
+int NMethodSweeper::_seen = 0 ; // No. of blobs we have currently processed in current pass of CodeCache
+int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
+
+jint NMethodSweeper::_locked_seen = 0;
+jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
+bool NMethodSweeper::_rescan = false;
+
+void NMethodSweeper::sweep() {
+ assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
+ if (!MethodFlushing) return;
+
+ // No need to synchronize access, since this is always executed at a
+ // safepoint. If we aren't in the middle of scan and a rescan
+ // hasn't been requested then just return.
+ if (_current == NULL && !_rescan) return;
+
+ // Make sure CompiledIC_lock in unlocked, since we might update some
+ // inline caches. If it is, we just bail-out and try later.
+ if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
+
+ // Check for restart
+ assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
+ if (_current == NULL) {
+ _seen = 0;
+ _invocations = NmethodSweepFraction;
+ _current = CodeCache::first();
+ _traversals += 1;
+ if (PrintMethodFlushing) {
+ tty->print_cr("### Sweep: stack traversal %d", _traversals);
+ }
+ Threads::nmethods_do();
+
+ // reset the flags since we started a scan from the beginning.
+ _rescan = false;
+ _locked_seen = 0;
+ _not_entrant_seen_on_stack = 0;
+ }
+
+ if (PrintMethodFlushing && Verbose) {
+ tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
+ }
+
+ // We want to visit all nmethods after NmethodSweepFraction invocations.
+ // If invocation is 1 we do the rest
+ int todo = CodeCache::nof_blobs();
+ if (_invocations != 1) {
+ todo = (CodeCache::nof_blobs() - _seen) / _invocations;
+ _invocations--;
+ }
+
+ for(int i = 0; i < todo && _current != NULL; i++) {
+ CodeBlob* next = CodeCache::next(_current); // Read next before we potentially delete current
+ if (_current->is_nmethod()) {
+ process_nmethod((nmethod *)_current);
+ }
+ _seen++;
+ _current = next;
+ }
+ // Because we could stop on a codeBlob other than an nmethod we skip forward
+ // to the next nmethod (if any). codeBlobs other than nmethods can be freed
+ // async to us and make _current invalid while we sleep.
+ while (_current != NULL && !_current->is_nmethod()) {
+ _current = CodeCache::next(_current);
+ }
+
+ if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
+ // we've completed a scan without making progress but there were
+ // nmethods we were unable to process either because they were
+ // locked or were still on stack. We don't have to aggresively
+ // clean them up so just stop scanning. We could scan once more
+ // but that complicates the control logic and it's unlikely to
+ // matter much.
+ if (PrintMethodFlushing) {
+ tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
+ }
+ }
+}
+
+
+void NMethodSweeper::process_nmethod(nmethod *nm) {
+ // Skip methods that are currently referenced by the VM
+ if (nm->is_locked_by_vm()) {
+ // But still remember to clean-up inline caches for alive nmethods
+ if (nm->is_alive()) {
+ // Clean-up all inline caches that points to zombie/non-reentrant methods
+ nm->cleanup_inline_caches();
+ } else {
+ _locked_seen++;
+ }
+ return;
+ }
+
+ if (nm->is_zombie()) {
+ // If it is first time, we see nmethod then we mark it. Otherwise,
+ // we reclame it. When we have seen a zombie method twice, we know that
+ // there are no inline caches that referes to it.
+ if (nm->is_marked_for_reclamation()) {
+ assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
+ nm->flush();
+ } else {
+ nm->mark_for_reclamation();
+ _rescan = true;
+ }
+ } else if (nm->is_not_entrant()) {
+ // If there is no current activations of this method on the
+ // stack we can safely convert it to a zombie method
+ if (nm->can_not_entrant_be_converted()) {
+ nm->make_zombie();
+ _rescan = true;
+ } else {
+ // Still alive, clean up its inline caches
+ nm->cleanup_inline_caches();
+ // we coudn't transition this nmethod so don't immediately
+ // request a rescan. If this method stays on the stack for a
+ // long time we don't want to keep rescanning at every safepoint.
+ _not_entrant_seen_on_stack++;
+ }
+ } else if (nm->is_unloaded()) {
+ // Unloaded code, just make it a zombie
+ if (nm->is_osr_only_method()) {
+ // No inline caches will ever point to osr methods, so we can just remove it
+ nm->flush();
+ } else {
+ nm->make_zombie();
+ _rescan = true;
+ }
+ } else {
+ assert(nm->is_alive(), "should be alive");
+ // Clean-up all inline caches that points to zombie/non-reentrant methods
+ nm->cleanup_inline_caches();
+ }
+}
diff --git a/src/share/vm/runtime/sweeper.hpp b/src/share/vm/runtime/sweeper.hpp
new file mode 100644
index 000000000..1f7260cac
--- /dev/null
+++ b/src/share/vm/runtime/sweeper.hpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// An NmethodSweeper is an incremental cleaner for:
+// - cleanup inline caches
+// - reclamation of unreferences zombie nmethods
+//
+
+class NMethodSweeper : public AllStatic {
+ static long _traversals; // Stack traversal count
+ static CodeBlob* _current; // Current nmethod
+ static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
+ static int _invocations; // No. of invocations left until we are completed with this pass
+
+ static bool _rescan; // Indicates that we should do a full rescan of the
+ // of the code cache looking for work to do.
+ static int _locked_seen; // Number of locked nmethods encountered during the scan
+ static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
+
+
+ static void process_nmethod(nmethod *nm);
+ public:
+ static long traversal_count() { return _traversals; }
+
+ static void sweep(); // Invoked at the end of each safepoint
+
+ static void notify(nmethod* nm) {
+ // Perform a full scan of the code cache from the beginning. No
+ // need to synchronize the setting of this flag since it only
+ // changes to false at safepoint so we can never overwrite it with false.
+ _rescan = true;
+ }
+};
diff --git a/src/share/vm/runtime/synchronizer.cpp b/src/share/vm/runtime/synchronizer.cpp
new file mode 100644
index 000000000..eb4e2cac0
--- /dev/null
+++ b/src/share/vm/runtime/synchronizer.cpp
@@ -0,0 +1,4716 @@
+/*
+ * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_synchronizer.cpp.incl"
+
+#if defined(__GNUC__) && !defined(IA64)
+ // Need to inhibit inlining for older versions of GCC to avoid build-time failures
+ #define ATTR __attribute__((noinline))
+#else
+ #define ATTR
+#endif
+
+// Native markword accessors for synchronization and hashCode().
+//
+// The "core" versions of monitor enter and exit reside in this file.
+// The interpreter and compilers contain specialized transliterated
+// variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
+// for instance. If you make changes here, make sure to modify the
+// interpreter, and both C1 and C2 fast-path inline locking code emission.
+//
+// TODO: merge the objectMonitor and synchronizer classes.
+//
+// -----------------------------------------------------------------------------
+
+#ifdef DTRACE_ENABLED
+
+// Only bother with this argument setup if dtrace is available
+// TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
+
+HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait,
+ jlong, uintptr_t, char*, int, long);
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
+ jlong, uintptr_t, char*, int);
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
+ jlong, uintptr_t, char*, int);
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
+ jlong, uintptr_t, char*, int);
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
+ jlong, uintptr_t, char*, int);
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
+ jlong, uintptr_t, char*, int);
+HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
+ jlong, uintptr_t, char*, int);
+
+#define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread) \
+ char* bytes = NULL; \
+ int len = 0; \
+ jlong jtid = SharedRuntime::get_java_tid(thread); \
+ symbolOop klassname = ((oop)(klassOop))->klass()->klass_part()->name(); \
+ if (klassname != NULL) { \
+ bytes = (char*)klassname->bytes(); \
+ len = klassname->utf8_length(); \
+ }
+
+#define DTRACE_MONITOR_WAIT_PROBE(monitor, klassOop, thread, millis) \
+ { \
+ if (DTraceMonitorProbes) { \
+ DTRACE_MONITOR_PROBE_COMMON(klassOop, thread); \
+ HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid, \
+ (monitor), bytes, len, (millis)); \
+ } \
+ }
+
+#define DTRACE_MONITOR_PROBE(probe, monitor, klassOop, thread) \
+ { \
+ if (DTraceMonitorProbes) { \
+ DTRACE_MONITOR_PROBE_COMMON(klassOop, thread); \
+ HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid, \
+ (uintptr_t)(monitor), bytes, len); \
+ } \
+ }
+
+#else // ndef DTRACE_ENABLED
+
+#define DTRACE_MONITOR_WAIT_PROBE(klassOop, thread, millis, mon) {;}
+#define DTRACE_MONITOR_PROBE(probe, klassOop, thread, mon) {;}
+
+#endif // ndef DTRACE_ENABLED
+
+// ObjectWaiter serves as a "proxy" or surrogate thread.
+// TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
+// ParkEvent instead. Beware, however, that the JVMTI code
+// knows about ObjectWaiters, so we'll have to reconcile that code.
+// See next_waiter(), first_waiter(), etc.
+
+class ObjectWaiter : public StackObj {
+ public:
+ enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ;
+ enum Sorted { PREPEND, APPEND, SORTED } ;
+ ObjectWaiter * volatile _next;
+ ObjectWaiter * volatile _prev;
+ Thread* _thread;
+ ParkEvent * _event;
+ volatile int _notified ;
+ volatile TStates TState ;
+ Sorted _Sorted ; // List placement disposition
+ bool _active ; // Contention monitoring is enabled
+ public:
+ ObjectWaiter(Thread* thread) {
+ _next = NULL;
+ _prev = NULL;
+ _notified = 0;
+ TState = TS_RUN ;
+ _thread = thread;
+ _event = thread->_ParkEvent ;
+ _active = false;
+ assert (_event != NULL, "invariant") ;
+ }
+
+ void wait_reenter_begin(ObjectMonitor *mon) {
+ JavaThread *jt = (JavaThread *)this->_thread;
+ _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
+ }
+
+ void wait_reenter_end(ObjectMonitor *mon) {
+ JavaThread *jt = (JavaThread *)this->_thread;
+ JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
+ }
+};
+
+enum ManifestConstants {
+ ClearResponsibleAtSTW = 0,
+ MaximumRecheckInterval = 1000
+} ;
+
+
+#undef TEVENT
+#define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); }
+
+#define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }}
+
+#undef TEVENT
+#define TEVENT(nom) {;}
+
+// Performance concern:
+// OrderAccess::storestore() calls release() which STs 0 into the global volatile
+// OrderAccess::Dummy variable. This store is unnecessary for correctness.
+// Many threads STing into a common location causes considerable cache migration
+// or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore()
+// until it's repaired. In some cases OrderAccess::fence() -- which incurs local
+// latency on the executing processor -- is a better choice as it scales on SMP
+// systems. See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a
+// discussion of coherency costs. Note that all our current reference platforms
+// provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC.
+//
+// As a general policy we use "volatile" to control compiler-based reordering
+// and explicit fences (barriers) to control for architectural reordering performed
+// by the CPU(s) or platform.
+
+static int MBFence (int x) { OrderAccess::fence(); return x; }
+
+struct SharedGlobals {
+ // These are highly shared mostly-read variables.
+ // To avoid false-sharing they need to be the sole occupants of a $ line.
+ double padPrefix [8];
+ volatile int stwRandom ;
+ volatile int stwCycle ;
+
+ // Hot RW variables -- Sequester to avoid false-sharing
+ double padSuffix [16];
+ volatile int hcSequence ;
+ double padFinal [8] ;
+} ;
+
+static SharedGlobals GVars ;
+
+
+// Tunables ...
+// The knob* variables are effectively final. Once set they should
+// never be modified hence. Consider using __read_mostly with GCC.
+
+static int Knob_LogSpins = 0 ; // enable jvmstat tally for spins
+static int Knob_HandOff = 0 ;
+static int Knob_Verbose = 0 ;
+static int Knob_ReportSettings = 0 ;
+
+static int Knob_SpinLimit = 5000 ; // derived by an external tool -
+static int Knob_SpinBase = 0 ; // Floor AKA SpinMin
+static int Knob_SpinBackOff = 0 ; // spin-loop backoff
+static int Knob_CASPenalty = -1 ; // Penalty for failed CAS
+static int Knob_OXPenalty = -1 ; // Penalty for observed _owner change
+static int Knob_SpinSetSucc = 1 ; // spinners set the _succ field
+static int Knob_SpinEarly = 1 ;
+static int Knob_SuccEnabled = 1 ; // futile wake throttling
+static int Knob_SuccRestrict = 0 ; // Limit successors + spinners to at-most-one
+static int Knob_MaxSpinners = -1 ; // Should be a function of # CPUs
+static int Knob_Bonus = 100 ; // spin success bonus
+static int Knob_BonusB = 100 ; // spin success bonus
+static int Knob_Penalty = 200 ; // spin failure penalty
+static int Knob_Poverty = 1000 ;
+static int Knob_SpinAfterFutile = 1 ; // Spin after returning from park()
+static int Knob_FixedSpin = 0 ;
+static int Knob_OState = 3 ; // Spinner checks thread state of _owner
+static int Knob_UsePause = 1 ;
+static int Knob_ExitPolicy = 0 ;
+static int Knob_PreSpin = 10 ; // 20-100 likely better
+static int Knob_ResetEvent = 0 ;
+static int BackOffMask = 0 ;
+
+static int Knob_FastHSSEC = 0 ;
+static int Knob_MoveNotifyee = 2 ; // notify() - disposition of notifyee
+static int Knob_QMode = 0 ; // EntryList-cxq policy - queue discipline
+static volatile int InitDone = 0 ;
+
+
+// hashCode() generation :
+//
+// Possibilities:
+// * MD5Digest of {obj,stwRandom}
+// * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
+// * A DES- or AES-style SBox[] mechanism
+// * One of the Phi-based schemes, such as:
+// 2654435761 = 2^32 * Phi (golden ratio)
+// HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
+// * A variation of Marsaglia's shift-xor RNG scheme.
+// * (obj ^ stwRandom) is appealing, but can result
+// in undesirable regularity in the hashCode values of adjacent objects
+// (objects allocated back-to-back, in particular). This could potentially
+// result in hashtable collisions and reduced hashtable efficiency.
+// There are simple ways to "diffuse" the middle address bits over the
+// generated hashCode values:
+//
+
+static inline intptr_t get_next_hash(Thread * Self, oop obj) {
+ intptr_t value = 0 ;
+ if (hashCode == 0) {
+ // This form uses an unguarded global Park-Miller RNG,
+ // so it's possible for two threads to race and generate the same RNG.
+ // On MP system we'll have lots of RW access to a global, so the
+ // mechanism induces lots of coherency traffic.
+ value = os::random() ;
+ } else
+ if (hashCode == 1) {
+ // This variation has the property of being stable (idempotent)
+ // between STW operations. This can be useful in some of the 1-0
+ // synchronization schemes.
+ intptr_t addrBits = intptr_t(obj) >> 3 ;
+ value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ;
+ } else
+ if (hashCode == 2) {
+ value = 1 ; // for sensitivity testing
+ } else
+ if (hashCode == 3) {
+ value = ++GVars.hcSequence ;
+ } else
+ if (hashCode == 4) {
+ value = intptr_t(obj) ;
+ } else {
+ // Marsaglia's xor-shift scheme with thread-specific state
+ // This is probably the best overall implementation -- we'll
+ // likely make this the default in future releases.
+ unsigned t = Self->_hashStateX ;
+ t ^= (t << 11) ;
+ Self->_hashStateX = Self->_hashStateY ;
+ Self->_hashStateY = Self->_hashStateZ ;
+ Self->_hashStateZ = Self->_hashStateW ;
+ unsigned v = Self->_hashStateW ;
+ v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ;
+ Self->_hashStateW = v ;
+ value = v ;
+ }
+
+ value &= markOopDesc::hash_mask;
+ if (value == 0) value = 0xBAD ;
+ assert (value != markOopDesc::no_hash, "invariant") ;
+ TEVENT (hashCode: GENERATE) ;
+ return value;
+}
+
+void BasicLock::print_on(outputStream* st) const {
+ st->print("monitor");
+}
+
+void BasicLock::move_to(oop obj, BasicLock* dest) {
+ // Check to see if we need to inflate the lock. This is only needed
+ // if an object is locked using "this" lightweight monitor. In that
+ // case, the displaced_header() is unlocked, because the
+ // displaced_header() contains the header for the originally unlocked
+ // object. However the object could have already been inflated. But it
+ // does not matter, the inflation will just a no-op. For other cases,
+ // the displaced header will be either 0x0 or 0x3, which are location
+ // independent, therefore the BasicLock is free to move.
+ //
+ // During OSR we may need to relocate a BasicLock (which contains a
+ // displaced word) from a location in an interpreter frame to a
+ // new location in a compiled frame. "this" refers to the source
+ // basiclock in the interpreter frame. "dest" refers to the destination
+ // basiclock in the new compiled frame. We *always* inflate in move_to().
+ // The always-Inflate policy works properly, but in 1.5.0 it can sometimes
+ // cause performance problems in code that makes heavy use of a small # of
+ // uncontended locks. (We'd inflate during OSR, and then sync performance
+ // would subsequently plummet because the thread would be forced thru the slow-path).
+ // This problem has been made largely moot on IA32 by inlining the inflated fast-path
+ // operations in Fast_Lock and Fast_Unlock in i486.ad.
+ //
+ // Note that there is a way to safely swing the object's markword from
+ // one stack location to another. This avoids inflation. Obviously,
+ // we need to ensure that both locations refer to the current thread's stack.
+ // There are some subtle concurrency issues, however, and since the benefit is
+ // is small (given the support for inflated fast-path locking in the fast_lock, etc)
+ // we'll leave that optimization for another time.
+
+ if (displaced_header()->is_neutral()) {
+ ObjectSynchronizer::inflate_helper(obj);
+ // WARNING: We can not put check here, because the inflation
+ // will not update the displaced header. Once BasicLock is inflated,
+ // no one should ever look at its content.
+ } else {
+ // Typically the displaced header will be 0 (recursive stack lock) or
+ // unused_mark. Naively we'd like to assert that the displaced mark
+ // value is either 0, neutral, or 3. But with the advent of the
+ // store-before-CAS avoidance in fast_lock/compiler_lock_object
+ // we can find any flavor mark in the displaced mark.
+ }
+// [RGV] The next line appears to do nothing!
+ intptr_t dh = (intptr_t) displaced_header();
+ dest->set_displaced_header(displaced_header());
+}
+
+// -----------------------------------------------------------------------------
+
+// standard constructor, allows locking failures
+ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
+ _dolock = doLock;
+ _thread = thread;
+ debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
+ _obj = obj;
+
+ if (_dolock) {
+ TEVENT (ObjectLocker) ;
+
+ ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
+ }
+}
+
+ObjectLocker::~ObjectLocker() {
+ if (_dolock) {
+ ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
+ }
+}
+
+// -----------------------------------------------------------------------------
+
+
+PerfCounter * ObjectSynchronizer::_sync_Inflations = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_Deflations = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_ContendedLockAttempts = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_FutileWakeups = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_Parks = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_EmptyNotifications = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_Notifications = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_PrivateA = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_PrivateB = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_SlowExit = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_SlowEnter = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_SlowNotify = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_SlowNotifyAll = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_FailedSpins = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_SuccessfulSpins = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_MonInCirculation = NULL ;
+PerfCounter * ObjectSynchronizer::_sync_MonScavenged = NULL ;
+PerfLongVariable * ObjectSynchronizer::_sync_MonExtant = NULL ;
+
+// One-shot global initialization for the sync subsystem.
+// We could also defer initialization and initialize on-demand
+// the first time we call inflate(). Initialization would
+// be protected - like so many things - by the MonitorCache_lock.
+
+void ObjectSynchronizer::Initialize () {
+ static int InitializationCompleted = 0 ;
+ assert (InitializationCompleted == 0, "invariant") ;
+ InitializationCompleted = 1 ;
+ if (UsePerfData) {
+ EXCEPTION_MARK ;
+ #define NEWPERFCOUNTER(n) {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
+ #define NEWPERFVARIABLE(n) {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
+ NEWPERFCOUNTER(_sync_Inflations) ;
+ NEWPERFCOUNTER(_sync_Deflations) ;
+ NEWPERFCOUNTER(_sync_ContendedLockAttempts) ;
+ NEWPERFCOUNTER(_sync_FutileWakeups) ;
+ NEWPERFCOUNTER(_sync_Parks) ;
+ NEWPERFCOUNTER(_sync_EmptyNotifications) ;
+ NEWPERFCOUNTER(_sync_Notifications) ;
+ NEWPERFCOUNTER(_sync_SlowEnter) ;
+ NEWPERFCOUNTER(_sync_SlowExit) ;
+ NEWPERFCOUNTER(_sync_SlowNotify) ;
+ NEWPERFCOUNTER(_sync_SlowNotifyAll) ;
+ NEWPERFCOUNTER(_sync_FailedSpins) ;
+ NEWPERFCOUNTER(_sync_SuccessfulSpins) ;
+ NEWPERFCOUNTER(_sync_PrivateA) ;
+ NEWPERFCOUNTER(_sync_PrivateB) ;
+ NEWPERFCOUNTER(_sync_MonInCirculation) ;
+ NEWPERFCOUNTER(_sync_MonScavenged) ;
+ NEWPERFVARIABLE(_sync_MonExtant) ;
+ #undef NEWPERFCOUNTER
+ }
+}
+
+// Compile-time asserts
+// When possible, it's better to catch errors deterministically at
+// compile-time than at runtime. The down-side to using compile-time
+// asserts is that error message -- often something about negative array
+// indices -- is opaque.
+
+#define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @%X\n", tag); }
+
+void ObjectMonitor::ctAsserts() {
+ CTASSERT(offset_of (ObjectMonitor, _header) == 0);
+}
+
+static int Adjust (volatile int * adr, int dx) {
+ int v ;
+ for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
+ return v ;
+}
+
+// Ad-hoc mutual exclusion primitives: SpinLock and Mux
+//
+// We employ SpinLocks _only for low-contention, fixed-length
+// short-duration critical sections where we're concerned
+// about native mutex_t or HotSpot Mutex:: latency.
+// The mux construct provides a spin-then-block mutual exclusion
+// mechanism.
+//
+// Testing has shown that contention on the ListLock guarding gFreeList
+// is common. If we implement ListLock as a simple SpinLock it's common
+// for the JVM to devolve to yielding with little progress. This is true
+// despite the fact that the critical sections protected by ListLock are
+// extremely short.
+//
+// TODO-FIXME: ListLock should be of type SpinLock.
+// We should make this a 1st-class type, integrated into the lock
+// hierarchy as leaf-locks. Critically, the SpinLock structure
+// should have sufficient padding to avoid false-sharing and excessive
+// cache-coherency traffic.
+
+
+typedef volatile int SpinLockT ;
+
+void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
+ if (Atomic::cmpxchg (1, adr, 0) == 0) {
+ return ; // normal fast-path return
+ }
+
+ // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
+ TEVENT (SpinAcquire - ctx) ;
+ int ctr = 0 ;
+ int Yields = 0 ;
+ for (;;) {
+ while (*adr != 0) {
+ ++ctr ;
+ if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
+ if (Yields > 5) {
+ // Consider using a simple NakedSleep() instead.
+ // Then SpinAcquire could be called by non-JVM threads
+ Thread::current()->_ParkEvent->park(1) ;
+ } else {
+ os::NakedYield() ;
+ ++Yields ;
+ }
+ } else {
+ SpinPause() ;
+ }
+ }
+ if (Atomic::cmpxchg (1, adr, 0) == 0) return ;
+ }
+}
+
+void Thread::SpinRelease (volatile int * adr) {
+ assert (*adr != 0, "invariant") ;
+ OrderAccess::fence() ; // guarantee at least release consistency.
+ // Roach-motel semantics.
+ // It's safe if subsequent LDs and STs float "up" into the critical section,
+ // but prior LDs and STs within the critical section can't be allowed
+ // to reorder or float past the ST that releases the lock.
+ *adr = 0 ;
+}
+
+// muxAcquire and muxRelease:
+//
+// * muxAcquire and muxRelease support a single-word lock-word construct.
+// The LSB of the word is set IFF the lock is held.
+// The remainder of the word points to the head of a singly-linked list
+// of threads blocked on the lock.
+//
+// * The current implementation of muxAcquire-muxRelease uses its own
+// dedicated Thread._MuxEvent instance. If we're interested in
+// minimizing the peak number of extant ParkEvent instances then
+// we could eliminate _MuxEvent and "borrow" _ParkEvent as long
+// as certain invariants were satisfied. Specifically, care would need
+// to be taken with regards to consuming unpark() "permits".
+// A safe rule of thumb is that a thread would never call muxAcquire()
+// if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently
+// park(). Otherwise the _ParkEvent park() operation in muxAcquire() could
+// consume an unpark() permit intended for monitorenter, for instance.
+// One way around this would be to widen the restricted-range semaphore
+// implemented in park(). Another alternative would be to provide
+// multiple instances of the PlatformEvent() for each thread. One
+// instance would be dedicated to muxAcquire-muxRelease, for instance.
+//
+// * Usage:
+// -- Only as leaf locks
+// -- for short-term locking only as muxAcquire does not perform
+// thread state transitions.
+//
+// Alternatives:
+// * We could implement muxAcquire and muxRelease with MCS or CLH locks
+// but with parking or spin-then-park instead of pure spinning.
+// * Use Taura-Oyama-Yonenzawa locks.
+// * It's possible to construct a 1-0 lock if we encode the lockword as
+// (List,LockByte). Acquire will CAS the full lockword while Release
+// will STB 0 into the LockByte. The 1-0 scheme admits stranding, so
+// acquiring threads use timers (ParkTimed) to detect and recover from
+// the stranding window. Thread/Node structures must be aligned on 256-byte
+// boundaries by using placement-new.
+// * Augment MCS with advisory back-link fields maintained with CAS().
+// Pictorially: LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
+// The validity of the backlinks must be ratified before we trust the value.
+// If the backlinks are invalid the exiting thread must back-track through the
+// the forward links, which are always trustworthy.
+// * Add a successor indication. The LockWord is currently encoded as
+// (List, LOCKBIT:1). We could also add a SUCCBIT or an explicit _succ variable
+// to provide the usual futile-wakeup optimization.
+// See RTStt for details.
+// * Consider schedctl.sc_nopreempt to cover the critical section.
+//
+
+
+typedef volatile intptr_t MutexT ; // Mux Lock-word
+enum MuxBits { LOCKBIT = 1 } ;
+
+void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
+ intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
+ if (w == 0) return ;
+ if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+ return ;
+ }
+
+ TEVENT (muxAcquire - Contention) ;
+ ParkEvent * const Self = Thread::current()->_MuxEvent ;
+ assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ;
+ for (;;) {
+ int its = (os::is_MP() ? 100 : 0) + 1 ;
+
+ // Optional spin phase: spin-then-park strategy
+ while (--its >= 0) {
+ w = *Lock ;
+ if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+ return ;
+ }
+ }
+
+ Self->reset() ;
+ Self->OnList = intptr_t(Lock) ;
+ // The following fence() isn't _strictly necessary as the subsequent
+ // CAS() both serializes execution and ratifies the fetched *Lock value.
+ OrderAccess::fence();
+ for (;;) {
+ w = *Lock ;
+ if ((w & LOCKBIT) == 0) {
+ if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+ Self->OnList = 0 ; // hygiene - allows stronger asserts
+ return ;
+ }
+ continue ; // Interference -- *Lock changed -- Just retry
+ }
+ assert (w & LOCKBIT, "invariant") ;
+ Self->ListNext = (ParkEvent *) (w & ~LOCKBIT );
+ if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ;
+ }
+
+ while (Self->OnList != 0) {
+ Self->park() ;
+ }
+ }
+}
+
+void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
+ intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
+ if (w == 0) return ;
+ if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+ return ;
+ }
+
+ TEVENT (muxAcquire - Contention) ;
+ ParkEvent * ReleaseAfter = NULL ;
+ if (ev == NULL) {
+ ev = ReleaseAfter = ParkEvent::Allocate (NULL) ;
+ }
+ assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ;
+ for (;;) {
+ guarantee (ev->OnList == 0, "invariant") ;
+ int its = (os::is_MP() ? 100 : 0) + 1 ;
+
+ // Optional spin phase: spin-then-park strategy
+ while (--its >= 0) {
+ w = *Lock ;
+ if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+ if (ReleaseAfter != NULL) {
+ ParkEvent::Release (ReleaseAfter) ;
+ }
+ return ;
+ }
+ }
+
+ ev->reset() ;
+ ev->OnList = intptr_t(Lock) ;
+ // The following fence() isn't _strictly necessary as the subsequent
+ // CAS() both serializes execution and ratifies the fetched *Lock value.
+ OrderAccess::fence();
+ for (;;) {
+ w = *Lock ;
+ if ((w & LOCKBIT) == 0) {
+ if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
+ ev->OnList = 0 ;
+ // We call ::Release while holding the outer lock, thus
+ // artificially lengthening the critical section.
+ // Consider deferring the ::Release() until the subsequent unlock(),
+ // after we've dropped the outer lock.
+ if (ReleaseAfter != NULL) {
+ ParkEvent::Release (ReleaseAfter) ;
+ }
+ return ;
+ }
+ continue ; // Interference -- *Lock changed -- Just retry
+ }
+ assert (w & LOCKBIT, "invariant") ;
+ ev->ListNext = (ParkEvent *) (w & ~LOCKBIT );
+ if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ;
+ }
+
+ while (ev->OnList != 0) {
+ ev->park() ;
+ }
+ }
+}
+
+// Release() must extract a successor from the list and then wake that thread.
+// It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
+// similar to that used by ParkEvent::Allocate() and ::Release(). DMR-based
+// Release() would :
+// (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list.
+// (B) Extract a successor from the private list "in-hand"
+// (C) attempt to CAS() the residual back into *Lock over null.
+// If there were any newly arrived threads and the CAS() would fail.
+// In that case Release() would detach the RATs, re-merge the list in-hand
+// with the RATs and repeat as needed. Alternately, Release() might
+// detach and extract a successor, but then pass the residual list to the wakee.
+// The wakee would be responsible for reattaching and remerging before it
+// competed for the lock.
+//
+// Both "pop" and DMR are immune from ABA corruption -- there can be
+// multiple concurrent pushers, but only one popper or detacher.
+// This implementation pops from the head of the list. This is unfair,
+// but tends to provide excellent throughput as hot threads remain hot.
+// (We wake recently run threads first).
+
+void Thread::muxRelease (volatile intptr_t * Lock) {
+ for (;;) {
+ const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ;
+ assert (w & LOCKBIT, "invariant") ;
+ if (w == LOCKBIT) return ;
+ ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ;
+ assert (List != NULL, "invariant") ;
+ assert (List->OnList == intptr_t(Lock), "invariant") ;
+ ParkEvent * nxt = List->ListNext ;
+
+ // The following CAS() releases the lock and pops the head element.
+ if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
+ continue ;
+ }
+ List->OnList = 0 ;
+ OrderAccess::fence() ;
+ List->unpark () ;
+ return ;
+ }
+}
+
+// ObjectMonitor Lifecycle
+// -----------------------
+// Inflation unlinks monitors from the global gFreeList and
+// associates them with objects. Deflation -- which occurs at
+// STW-time -- disassociates idle monitors from objects. Such
+// scavenged monitors are returned to the gFreeList.
+//
+// The global list is protected by ListLock. All the critical sections
+// are short and operate in constant-time.
+//
+// ObjectMonitors reside in type-stable memory (TSM) and are immortal.
+//
+// Lifecycle:
+// -- unassigned and on the global free list
+// -- unassigned and on a thread's private omFreeList
+// -- assigned to an object. The object is inflated and the mark refers
+// to the objectmonitor.
+//
+// TODO-FIXME:
+//
+// * We currently protect the gFreeList with a simple lock.
+// An alternate lock-free scheme would be to pop elements from the gFreeList
+// with CAS. This would be safe from ABA corruption as long we only
+// recycled previously appearing elements onto the list in deflate_idle_monitors()
+// at STW-time. Completely new elements could always be pushed onto the gFreeList
+// with CAS. Elements that appeared previously on the list could only
+// be installed at STW-time.
+//
+// * For efficiency and to help reduce the store-before-CAS penalty
+// the objectmonitors on gFreeList or local free lists should be ready to install
+// with the exception of _header and _object. _object can be set after inflation.
+// In particular, keep all objectMonitors on a thread's private list in ready-to-install
+// state with m.Owner set properly.
+//
+// * We could all diffuse contention by using multiple global (FreeList, Lock)
+// pairs -- threads could use trylock() and a cyclic-scan strategy to search for
+// an unlocked free list.
+//
+// * Add lifecycle tags and assert()s.
+//
+// * Be more consistent about when we clear an objectmonitor's fields:
+// A. After extracting the objectmonitor from a free list.
+// B. After adding an objectmonitor to a free list.
+//
+
+ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
+ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ;
+static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache
+#define CHAINMARKER ((oop)-1)
+
+ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
+ // A large MAXPRIVATE value reduces both list lock contention
+ // and list coherency traffic, but also tends to increase the
+ // number of objectMonitors in circulation as well as the STW
+ // scavenge costs. As usual, we lean toward time in space-time
+ // tradeoffs.
+ const int MAXPRIVATE = 1024 ;
+ for (;;) {
+ ObjectMonitor * m ;
+
+ // 1: try to allocate from the thread's local omFreeList.
+ // Threads will attempt to allocate first from their local list, then
+ // from the global list, and only after those attempts fail will the thread
+ // attempt to instantiate new monitors. Thread-local free lists take
+ // heat off the ListLock and improve allocation latency, as well as reducing
+ // coherency traffic on the shared global list.
+ m = Self->omFreeList ;
+ if (m != NULL) {
+ Self->omFreeList = m->FreeNext ;
+ Self->omFreeCount -- ;
+ // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
+ guarantee (m->object() == NULL, "invariant") ;
+ return m ;
+ }
+
+ // 2: try to allocate from the global gFreeList
+ // CONSIDER: use muxTry() instead of muxAcquire().
+ // If the muxTry() fails then drop immediately into case 3.
+ // If we're using thread-local free lists then try
+ // to reprovision the caller's free list.
+ if (gFreeList != NULL) {
+ // Reprovision the thread's omFreeList.
+ // Use bulk transfers to reduce the allocation rate and heat
+ // on various locks.
+ Thread::muxAcquire (&ListLock, "omAlloc") ;
+ for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) {
+ ObjectMonitor * take = gFreeList ;
+ gFreeList = take->FreeNext ;
+ guarantee (take->object() == NULL, "invariant") ;
+ guarantee (!take->is_busy(), "invariant") ;
+ take->Recycle() ;
+ omRelease (Self, take) ;
+ }
+ Thread::muxRelease (&ListLock) ;
+ Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
+ if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
+ TEVENT (omFirst - reprovision) ;
+ continue ;
+ }
+
+ // 3: allocate a block of new ObjectMonitors
+ // Both the local and global free lists are empty -- resort to malloc().
+ // In the current implementation objectMonitors are TSM - immortal.
+ assert (_BLOCKSIZE > 1, "invariant") ;
+ ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
+
+ // NOTE: (almost) no way to recover if allocation failed.
+ // We might be able to induce a STW safepoint and scavenge enough
+ // objectMonitors to permit progress.
+ if (temp == NULL) {
+ vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), "Allocate ObjectMonitors") ;
+ }
+
+ // Format the block.
+ // initialize the linked list, each monitor points to its next
+ // forming the single linked free list, the very first monitor
+ // will points to next block, which forms the block list.
+ // The trick of using the 1st element in the block as gBlockList
+ // linkage should be reconsidered. A better implementation would
+ // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
+
+ for (int i = 1; i < _BLOCKSIZE ; i++) {
+ temp[i].FreeNext = &temp[i+1];
+ }
+
+ // terminate the last monitor as the end of list
+ temp[_BLOCKSIZE - 1].FreeNext = NULL ;
+
+ // Element [0] is reserved for global list linkage
+ temp[0].set_object(CHAINMARKER);
+
+ // Consider carving out this thread's current request from the
+ // block in hand. This avoids some lock traffic and redundant
+ // list activity.
+
+ // Acquire the ListLock to manipulate BlockList and FreeList.
+ // An Oyama-Taura-Yonezawa scheme might be more efficient.
+ Thread::muxAcquire (&ListLock, "omAlloc [2]") ;
+
+ // Add the new block to the list of extant blocks (gBlockList).
+ // The very first objectMonitor in a block is reserved and dedicated.
+ // It serves as blocklist "next" linkage.
+ temp[0].FreeNext = gBlockList;
+ gBlockList = temp;
+
+ // Add the new string of objectMonitors to the global free list
+ temp[_BLOCKSIZE - 1].FreeNext = gFreeList ;
+ gFreeList = temp + 1;
+ Thread::muxRelease (&ListLock) ;
+ TEVENT (Allocate block of monitors) ;
+ }
+}
+
+// Place "m" on the caller's private per-thread omFreeList.
+// In practice there's no need to clamp or limit the number of
+// monitors on a thread's omFreeList as the only time we'll call
+// omRelease is to return a monitor to the free list after a CAS
+// attempt failed. This doesn't allow unbounded #s of monitors to
+// accumulate on a thread's free list.
+//
+// In the future the usage of omRelease() might change and monitors
+// could migrate between free lists. In that case to avoid excessive
+// accumulation we could limit omCount to (omProvision*2), otherwise return
+// the objectMonitor to the global list. We should drain (return) in reasonable chunks.
+// That is, *not* one-at-a-time.
+
+
+void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m) {
+ guarantee (m->object() == NULL, "invariant") ;
+ m->FreeNext = Self->omFreeList ;
+ Self->omFreeList = m ;
+ Self->omFreeCount ++ ;
+}
+
+// Return the monitors of a moribund thread's local free list to
+// the global free list. Typically a thread calls omFlush() when
+// it's dying. We could also consider having the VM thread steal
+// monitors from threads that have not run java code over a few
+// consecutive STW safepoints. Relatedly, we might decay
+// omFreeProvision at STW safepoints.
+//
+// We currently call omFlush() from the Thread:: dtor _after the thread
+// has been excised from the thread list and is no longer a mutator.
+// That means that omFlush() can run concurrently with a safepoint and
+// the scavenge operator. Calling omFlush() from JavaThread::exit() might
+// be a better choice as we could safely reason that that the JVM is
+// not at a safepoint at the time of the call, and thus there could
+// be not inopportune interleavings between omFlush() and the scavenge
+// operator.
+
+void ObjectSynchronizer::omFlush (Thread * Self) {
+ ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL
+ Self->omFreeList = NULL ;
+ if (List == NULL) return ;
+ ObjectMonitor * Tail = NULL ;
+ ObjectMonitor * s ;
+ for (s = List ; s != NULL ; s = s->FreeNext) {
+ Tail = s ;
+ guarantee (s->object() == NULL, "invariant") ;
+ guarantee (!s->is_busy(), "invariant") ;
+ s->set_owner (NULL) ; // redundant but good hygiene
+ TEVENT (omFlush - Move one) ;
+ }
+
+ guarantee (Tail != NULL && List != NULL, "invariant") ;
+ Thread::muxAcquire (&ListLock, "omFlush") ;
+ Tail->FreeNext = gFreeList ;
+ gFreeList = List ;
+ Thread::muxRelease (&ListLock) ;
+ TEVENT (omFlush) ;
+}
+
+
+// Get the next block in the block list.
+static inline ObjectMonitor* next(ObjectMonitor* block) {
+ assert(block->object() == CHAINMARKER, "must be a block header");
+ block = block->FreeNext ;
+ assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
+ return block;
+}
+
+// Fast path code shared by multiple functions
+ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
+ markOop mark = obj->mark();
+ if (mark->has_monitor()) {
+ assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
+ assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
+ return mark->monitor();
+ }
+ return ObjectSynchronizer::inflate(Thread::current(), obj);
+}
+
+// Note that we could encounter some performance loss through false-sharing as
+// multiple locks occupy the same $ line. Padding might be appropriate.
+
+#define NINFLATIONLOCKS 256
+static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
+
+static markOop ReadStableMark (oop obj) {
+ markOop mark = obj->mark() ;
+ if (!mark->is_being_inflated()) {
+ return mark ; // normal fast-path return
+ }
+
+ int its = 0 ;
+ for (;;) {
+ markOop mark = obj->mark() ;
+ if (!mark->is_being_inflated()) {
+ return mark ; // normal fast-path return
+ }
+
+ // The object is being inflated by some other thread.
+ // The caller of ReadStableMark() must wait for inflation to complete.
+ // Avoid live-lock
+ // TODO: consider calling SafepointSynchronize::do_call_back() while
+ // spinning to see if there's a safepoint pending. If so, immediately
+ // yielding or blocking would be appropriate. Avoid spinning while
+ // there is a safepoint pending.
+ // TODO: add inflation contention performance counters.
+ // TODO: restrict the aggregate number of spinners.
+
+ ++its ;
+ if (its > 10000 || !os::is_MP()) {
+ if (its & 1) {
+ os::NakedYield() ;
+ TEVENT (Inflate: INFLATING - yield) ;
+ } else {
+ // Note that the following code attenuates the livelock problem but is not
+ // a complete remedy. A more complete solution would require that the inflating
+ // thread hold the associated inflation lock. The following code simply restricts
+ // the number of spinners to at most one. We'll have N-2 threads blocked
+ // on the inflationlock, 1 thread holding the inflation lock and using
+ // a yield/park strategy, and 1 thread in the midst of inflation.
+ // A more refined approach would be to change the encoding of INFLATING
+ // to allow encapsulation of a native thread pointer. Threads waiting for
+ // inflation to complete would use CAS to push themselves onto a singly linked
+ // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag
+ // and calling park(). When inflation was complete the thread that accomplished inflation
+ // would detach the list and set the markword to inflated with a single CAS and
+ // then for each thread on the list, set the flag and unpark() the thread.
+ // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
+ // wakes at most one thread whereas we need to wake the entire list.
+ int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ;
+ int YieldThenBlock = 0 ;
+ assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
+ assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
+ Thread::muxAcquire (InflationLocks + ix, "InflationLock") ;
+ while (obj->mark() == markOopDesc::INFLATING()) {
+ // Beware: NakedYield() is advisory and has almost no effect on some platforms
+ // so we periodically call Self->_ParkEvent->park(1).
+ // We use a mixed spin/yield/block mechanism.
+ if ((YieldThenBlock++) >= 16) {
+ Thread::current()->_ParkEvent->park(1) ;
+ } else {
+ os::NakedYield() ;
+ }
+ }
+ Thread::muxRelease (InflationLocks + ix ) ;
+ TEVENT (Inflate: INFLATING - yield/park) ;
+ }
+ } else {
+ SpinPause() ; // SMP-polite spinning
+ }
+ }
+}
+
+ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
+ // Inflate mutates the heap ...
+ // Relaxing assertion for bug 6320749.
+ assert (Universe::verify_in_progress() ||
+ !SafepointSynchronize::is_at_safepoint(), "invariant") ;
+
+ for (;;) {
+ const markOop mark = object->mark() ;
+ assert (!mark->has_bias_pattern(), "invariant") ;
+
+ // The mark can be in one of the following states:
+ // * Inflated - just return
+ // * Stack-locked - coerce it to inflated
+ // * INFLATING - busy wait for conversion to complete
+ // * Neutral - aggressively inflate the object.
+ // * BIASED - Illegal. We should never see this
+
+ // CASE: inflated
+ if (mark->has_monitor()) {
+ ObjectMonitor * inf = mark->monitor() ;
+ assert (inf->header()->is_neutral(), "invariant");
+ assert (inf->object() == object, "invariant") ;
+ assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
+ return inf ;
+ }
+
+ // CASE: inflation in progress - inflating over a stack-lock.
+ // Some other thread is converting from stack-locked to inflated.
+ // Only that thread can complete inflation -- other threads must wait.
+ // The INFLATING value is transient.
+ // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
+ // We could always eliminate polling by parking the thread on some auxiliary list.
+ if (mark == markOopDesc::INFLATING()) {
+ TEVENT (Inflate: spin while INFLATING) ;
+ ReadStableMark(object) ;
+ continue ;
+ }
+
+ // CASE: stack-locked
+ // Could be stack-locked either by this thread or by some other thread.
+ //
+ // Note that we allocate the objectmonitor speculatively, _before_ attempting
+ // to install INFLATING into the mark word. We originally installed INFLATING,
+ // allocated the objectmonitor, and then finally STed the address of the
+ // objectmonitor into the mark. This was correct, but artificially lengthened
+ // the interval in which INFLATED appeared in the mark, thus increasing
+ // the odds of inflation contention.
+ //
+ // We now use per-thread private objectmonitor free lists.
+ // These list are reprovisioned from the global free list outside the
+ // critical INFLATING...ST interval. A thread can transfer
+ // multiple objectmonitors en-mass from the global free list to its local free list.
+ // This reduces coherency traffic and lock contention on the global free list.
+ // Using such local free lists, it doesn't matter if the omAlloc() call appears
+ // before or after the CAS(INFLATING) operation.
+ // See the comments in omAlloc().
+
+ if (mark->has_locker()) {
+ ObjectMonitor * m = omAlloc (Self) ;
+ // Optimistically prepare the objectmonitor - anticipate successful CAS
+ // We do this before the CAS in order to minimize the length of time
+ // in which INFLATING appears in the mark.
+ m->Recycle();
+ m->FreeNext = NULL ;
+ m->_Responsible = NULL ;
+ m->OwnerIsThread = 0 ;
+ m->_recursions = 0 ;
+ m->_SpinDuration = Knob_SpinLimit ; // Consider: maintain by type/class
+
+ markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
+ if (cmp != mark) {
+ omRelease (Self, m) ;
+ continue ; // Interference -- just retry
+ }
+
+ // We've successfully installed INFLATING (0) into the mark-word.
+ // This is the only case where 0 will appear in a mark-work.
+ // Only the singular thread that successfully swings the mark-word
+ // to 0 can perform (or more precisely, complete) inflation.
+ //
+ // Why do we CAS a 0 into the mark-word instead of just CASing the
+ // mark-word from the stack-locked value directly to the new inflated state?
+ // Consider what happens when a thread unlocks a stack-locked object.
+ // It attempts to use CAS to swing the displaced header value from the
+ // on-stack basiclock back into the object header. Recall also that the
+ // header value (hashcode, etc) can reside in (a) the object header, or
+ // (b) a displaced header associated with the stack-lock, or (c) a displaced
+ // header in an objectMonitor. The inflate() routine must copy the header
+ // value from the basiclock on the owner's stack to the objectMonitor, all
+ // the while preserving the hashCode stability invariants. If the owner
+ // decides to release the lock while the value is 0, the unlock will fail
+ // and control will eventually pass from slow_exit() to inflate. The owner
+ // will then spin, waiting for the 0 value to disappear. Put another way,
+ // the 0 causes the owner to stall if the owner happens to try to
+ // drop the lock (restoring the header from the basiclock to the object)
+ // while inflation is in-progress. This protocol avoids races that might
+ // would otherwise permit hashCode values to change or "flicker" for an object.
+ // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
+ // 0 serves as a "BUSY" inflate-in-progress indicator.
+
+
+ // fetch the displaced mark from the owner's stack.
+ // The owner can't die or unwind past the lock while our INFLATING
+ // object is in the mark. Furthermore the owner can't complete
+ // an unlock on the object, either.
+ markOop dmw = mark->displaced_mark_helper() ;
+ assert (dmw->is_neutral(), "invariant") ;
+
+ // Setup monitor fields to proper values -- prepare the monitor
+ m->set_header(dmw) ;
+
+ // Optimization: if the mark->locker stack address is associated
+ // with this thread we could simply set m->_owner = Self and
+ // m->OwnerIsThread = 1. Note that a thread can inflate an object
+ // that it has stack-locked -- as might happen in wait() -- directly
+ // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
+ m->set_owner (mark->locker());
+ m->set_object(object);
+ // TODO-FIXME: assert BasicLock->dhw != 0.
+
+ // Must preserve store ordering. The monitor state must
+ // be stable at the time of publishing the monitor address.
+ guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
+ object->release_set_mark(markOopDesc::encode(m));
+
+ // Hopefully the performance counters are allocated on distinct cache lines
+ // to avoid false sharing on MP systems ...
+ if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
+ TEVENT(Inflate: overwrite stacklock) ;
+ if (TraceMonitorInflation) {
+ if (object->is_instance()) {
+ ResourceMark rm;
+ tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
+ (intptr_t) object, (intptr_t) object->mark(),
+ Klass::cast(object->klass())->external_name());
+ }
+ }
+ return m ;
+ }
+
+ // CASE: neutral
+ // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
+ // If we know we're inflating for entry it's better to inflate by swinging a
+ // pre-locked objectMonitor pointer into the object header. A successful
+ // CAS inflates the object *and* confers ownership to the inflating thread.
+ // In the current implementation we use a 2-step mechanism where we CAS()
+ // to inflate and then CAS() again to try to swing _owner from NULL to Self.
+ // An inflateTry() method that we could call from fast_enter() and slow_enter()
+ // would be useful.
+
+ assert (mark->is_neutral(), "invariant");
+ ObjectMonitor * m = omAlloc (Self) ;
+ // prepare m for installation - set monitor to initial state
+ m->Recycle();
+ m->set_header(mark);
+ m->set_owner(NULL);
+ m->set_object(object);
+ m->OwnerIsThread = 1 ;
+ m->_recursions = 0 ;
+ m->FreeNext = NULL ;
+ m->_Responsible = NULL ;
+ m->_SpinDuration = Knob_SpinLimit ; // consider: keep metastats by type/class
+
+ if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
+ m->set_object (NULL) ;
+ m->set_owner (NULL) ;
+ m->OwnerIsThread = 0 ;
+ m->Recycle() ;
+ omRelease (Self, m) ;
+ m = NULL ;
+ continue ;
+ // interference - the markword changed - just retry.
+ // The state-transitions are one-way, so there's no chance of
+ // live-lock -- "Inflated" is an absorbing state.
+ }
+
+ // Hopefully the performance counters are allocated on distinct
+ // cache lines to avoid false sharing on MP systems ...
+ if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
+ TEVENT(Inflate: overwrite neutral) ;
+ if (TraceMonitorInflation) {
+ if (object->is_instance()) {
+ ResourceMark rm;
+ tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
+ (intptr_t) object, (intptr_t) object->mark(),
+ Klass::cast(object->klass())->external_name());
+ }
+ }
+ return m ;
+ }
+}
+
+
+// This the fast monitor enter. The interpreter and compiler use
+// some assembly copies of this code. Make sure update those code
+// if the following function is changed. The implementation is
+// extremely sensitive to race condition. Be careful.
+
+void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
+ if (UseBiasedLocking) {
+ if (!SafepointSynchronize::is_at_safepoint()) {
+ BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
+ if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
+ return;
+ }
+ } else {
+ assert(!attempt_rebias, "can not rebias toward VM thread");
+ BiasedLocking::revoke_at_safepoint(obj);
+ }
+ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ }
+
+ THREAD->update_highest_lock((address)lock);
+ slow_enter (obj, lock, THREAD) ;
+}
+
+void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
+ assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
+ // if displaced header is null, the previous enter is recursive enter, no-op
+ markOop dhw = lock->displaced_header();
+ markOop mark ;
+ if (dhw == NULL) {
+ // Recursive stack-lock.
+ // Diagnostics -- Could be: stack-locked, inflating, inflated.
+ mark = object->mark() ;
+ assert (!mark->is_neutral(), "invariant") ;
+ if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
+ assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
+ }
+ if (mark->has_monitor()) {
+ ObjectMonitor * m = mark->monitor() ;
+ assert(((oop)(m->object()))->mark() == mark, "invariant") ;
+ assert(m->is_entered(THREAD), "invariant") ;
+ }
+ return ;
+ }
+
+ mark = object->mark() ;
+
+ // If the object is stack-locked by the current thread, try to
+ // swing the displaced header from the box back to the mark.
+ if (mark == (markOop) lock) {
+ assert (dhw->is_neutral(), "invariant") ;
+ if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
+ TEVENT (fast_exit: release stacklock) ;
+ return;
+ }
+ }
+
+ ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
+}
+
+// This routine is used to handle interpreter/compiler slow case
+// We don't need to use fast path here, because it must have been
+// failed in the interpreter/compiler code.
+void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
+ markOop mark = obj->mark();
+ assert(!mark->has_bias_pattern(), "should not see bias pattern here");
+
+ if (mark->is_neutral()) {
+ // Anticipate successful CAS -- the ST of the displaced mark must
+ // be visible <= the ST performed by the CAS.
+ lock->set_displaced_header(mark);
+ if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
+ TEVENT (slow_enter: release stacklock) ;
+ return ;
+ }
+ // Fall through to inflate() ...
+ } else
+ if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
+ assert(lock != mark->locker(), "must not re-lock the same lock");
+ assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
+ lock->set_displaced_header(NULL);
+ return;
+ }
+
+#if 0
+ // The following optimization isn't particularly useful.
+ if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
+ lock->set_displaced_header (NULL) ;
+ return ;
+ }
+#endif
+
+ // The object header will never be displaced to this lock,
+ // so it does not matter what the value is, except that it
+ // must be non-zero to avoid looking like a re-entrant lock,
+ // and must not look locked either.
+ lock->set_displaced_header(markOopDesc::unused_mark());
+ ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
+}
+
+// This routine is used to handle interpreter/compiler slow case
+// We don't need to use fast path here, because it must have
+// failed in the interpreter/compiler code. Simply use the heavy
+// weight monitor should be ok, unless someone find otherwise.
+void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
+ fast_exit (object, lock, THREAD) ;
+}
+
+// NOTE: must use heavy weight monitor to handle jni monitor enter
+void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
+ // the current locking is from JNI instead of Java code
+ TEVENT (jni_enter) ;
+ if (UseBiasedLocking) {
+ BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ }
+ THREAD->set_current_pending_monitor_is_from_java(false);
+ ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
+ THREAD->set_current_pending_monitor_is_from_java(true);
+}
+
+// NOTE: must use heavy weight monitor to handle jni monitor enter
+bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
+ if (UseBiasedLocking) {
+ BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ }
+
+ ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
+ return monitor->try_enter(THREAD);
+}
+
+
+// NOTE: must use heavy weight monitor to handle jni monitor exit
+void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
+ TEVENT (jni_exit) ;
+ if (UseBiasedLocking) {
+ BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+ }
+ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+
+ ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
+ // If this thread has locked the object, exit the monitor. Note: can't use
+ // monitor->check(CHECK); must exit even if an exception is pending.
+ if (monitor->check(THREAD)) {
+ monitor->exit(THREAD);
+ }
+}
+
+// complete_exit()/reenter() are used to wait on a nested lock
+// i.e. to give up an outer lock completely and then re-enter
+// Used when holding nested locks - lock acquisition order: lock1 then lock2
+// 1) complete_exit lock1 - saving recursion count
+// 2) wait on lock2
+// 3) when notified on lock2, unlock lock2
+// 4) reenter lock1 with original recursion count
+// 5) lock lock2
+// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
+intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
+ TEVENT (complete_exit) ;
+ if (UseBiasedLocking) {
+ BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ }
+
+ ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
+
+ return monitor->complete_exit(THREAD);
+}
+
+// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
+void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
+ TEVENT (reenter) ;
+ if (UseBiasedLocking) {
+ BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ }
+
+ ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
+
+ monitor->reenter(recursion, THREAD);
+}
+
+// This exists only as a workaround of dtrace bug 6254741
+int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
+ DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
+ return 0;
+}
+
+// NOTE: must use heavy weight monitor to handle wait()
+void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
+ if (UseBiasedLocking) {
+ BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ }
+ if (millis < 0) {
+ TEVENT (wait - throw IAX) ;
+ THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
+ }
+ ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
+ DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
+ monitor->wait(millis, true, THREAD);
+
+ /* This dummy call is in place to get around dtrace bug 6254741. Once
+ that's fixed we can uncomment the following line and remove the call */
+ // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
+ dtrace_waited_probe(monitor, obj, THREAD);
+}
+
+void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
+ if (UseBiasedLocking) {
+ BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ }
+ if (millis < 0) {
+ TEVENT (wait - throw IAX) ;
+ THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
+ }
+ ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
+}
+
+void ObjectSynchronizer::notify(Handle obj, TRAPS) {
+ if (UseBiasedLocking) {
+ BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ }
+
+ markOop mark = obj->mark();
+ if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
+ return;
+ }
+ ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
+}
+
+// NOTE: see comment of notify()
+void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
+ if (UseBiasedLocking) {
+ BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ }
+
+ markOop mark = obj->mark();
+ if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
+ return;
+ }
+ ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
+}
+
+intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
+ if (UseBiasedLocking) {
+ // NOTE: many places throughout the JVM do not expect a safepoint
+ // to be taken here, in particular most operations on perm gen
+ // objects. However, we only ever bias Java instances and all of
+ // the call sites of identity_hash that might revoke biases have
+ // been checked to make sure they can handle a safepoint. The
+ // added check of the bias pattern is to avoid useless calls to
+ // thread-local storage.
+ if (obj->mark()->has_bias_pattern()) {
+ // Box and unbox the raw reference just in case we cause a STW safepoint.
+ Handle hobj (Self, obj) ;
+ // Relaxing assertion for bug 6320749.
+ assert (Universe::verify_in_progress() ||
+ !SafepointSynchronize::is_at_safepoint(),
+ "biases should not be seen by VM thread here");
+ BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
+ obj = hobj() ;
+ assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ }
+ }
+
+ // hashCode() is a heap mutator ...
+ // Relaxing assertion for bug 6320749.
+ assert (Universe::verify_in_progress() ||
+ !SafepointSynchronize::is_at_safepoint(), "invariant") ;
+ assert (Universe::verify_in_progress() ||
+ Self->is_Java_thread() , "invariant") ;
+ assert (Universe::verify_in_progress() ||
+ ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
+
+ ObjectMonitor* monitor = NULL;
+ markOop temp, test;
+ intptr_t hash;
+ markOop mark = ReadStableMark (obj);
+
+ // object should remain ineligible for biased locking
+ assert (!mark->has_bias_pattern(), "invariant") ;
+
+ if (mark->is_neutral()) {
+ hash = mark->hash(); // this is a normal header
+ if (hash) { // if it has hash, just return it
+ return hash;
+ }
+ hash = get_next_hash(Self, obj); // allocate a new hash code
+ temp = mark->copy_set_hash(hash); // merge the hash code into header
+ // use (machine word version) atomic operation to install the hash
+ test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
+ if (test == mark) {
+ return hash;
+ }
+ // If atomic operation failed, we must inflate the header
+ // into heavy weight monitor. We could add more code here
+ // for fast path, but it does not worth the complexity.
+ } else if (mark->has_monitor()) {
+ monitor = mark->monitor();
+ temp = monitor->header();
+ assert (temp->is_neutral(), "invariant") ;
+ hash = temp->hash();
+ if (hash) {
+ return hash;
+ }
+ // Skip to the following code to reduce code size
+ } else if (Self->is_lock_owned((address)mark->locker())) {
+ temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
+ assert (temp->is_neutral(), "invariant") ;
+ hash = temp->hash(); // by current thread, check if the displaced
+ if (hash) { // header contains hash code
+ return hash;
+ }
+ // WARNING:
+ // The displaced header is strictly immutable.
+ // It can NOT be changed in ANY cases. So we have
+ // to inflate the header into heavyweight monitor
+ // even the current thread owns the lock. The reason
+ // is the BasicLock (stack slot) will be asynchronously
+ // read by other threads during the inflate() function.
+ // Any change to stack may not propagate to other threads
+ // correctly.
+ }
+
+ // Inflate the monitor to set hash code
+ monitor = ObjectSynchronizer::inflate(Self, obj);
+ // Load displaced header and check it has hash code
+ mark = monitor->header();
+ assert (mark->is_neutral(), "invariant") ;
+ hash = mark->hash();
+ if (hash == 0) {
+ hash = get_next_hash(Self, obj);
+ temp = mark->copy_set_hash(hash); // merge hash code into header
+ assert (temp->is_neutral(), "invariant") ;
+ test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
+ if (test != mark) {
+ // The only update to the header in the monitor (outside GC)
+ // is install the hash code. If someone add new usage of
+ // displaced header, please update this code
+ hash = test->hash();
+ assert (test->is_neutral(), "invariant") ;
+ assert (hash != 0, "Trivial unexpected object/monitor header usage.");
+ }
+ }
+ // We finally get the hash
+ return hash;
+}
+
+// Deprecated -- use FastHashCode() instead.
+
+intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
+ return FastHashCode (Thread::current(), obj()) ;
+}
+
+bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
+ Handle h_obj) {
+ if (UseBiasedLocking) {
+ BiasedLocking::revoke_and_rebias(h_obj, false, thread);
+ assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ }
+
+ assert(thread == JavaThread::current(), "Can only be called on current thread");
+ oop obj = h_obj();
+
+ markOop mark = ReadStableMark (obj) ;
+
+ // Uncontended case, header points to stack
+ if (mark->has_locker()) {
+ return thread->is_lock_owned((address)mark->locker());
+ }
+ // Contended case, header points to ObjectMonitor (tagged pointer)
+ if (mark->has_monitor()) {
+ ObjectMonitor* monitor = mark->monitor();
+ return monitor->is_entered(thread) != 0 ;
+ }
+ // Unlocked case, header in place
+ assert(mark->is_neutral(), "sanity check");
+ return false;
+}
+
+// Be aware of this method could revoke bias of the lock object.
+// This method querys the ownership of the lock handle specified by 'h_obj'.
+// If the current thread owns the lock, it returns owner_self. If no
+// thread owns the lock, it returns owner_none. Otherwise, it will return
+// ower_other.
+ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
+(JavaThread *self, Handle h_obj) {
+ // The caller must beware this method can revoke bias, and
+ // revocation can result in a safepoint.
+ assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
+ assert (self->thread_state() != _thread_blocked , "invariant") ;
+
+ // Possible mark states: neutral, biased, stack-locked, inflated
+
+ if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
+ // CASE: biased
+ BiasedLocking::revoke_and_rebias(h_obj, false, self);
+ assert(!h_obj->mark()->has_bias_pattern(),
+ "biases should be revoked by now");
+ }
+
+ assert(self == JavaThread::current(), "Can only be called on current thread");
+ oop obj = h_obj();
+ markOop mark = ReadStableMark (obj) ;
+
+ // CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
+ if (mark->has_locker()) {
+ return self->is_lock_owned((address)mark->locker()) ?
+ owner_self : owner_other;
+ }
+
+ // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
+ // The Object:ObjectMonitor relationship is stable as long as we're
+ // not at a safepoint.
+ if (mark->has_monitor()) {
+ void * owner = mark->monitor()->_owner ;
+ if (owner == NULL) return owner_none ;
+ return (owner == self ||
+ self->is_lock_owned((address)owner)) ? owner_self : owner_other;
+ }
+
+ // CASE: neutral
+ assert(mark->is_neutral(), "sanity check");
+ return owner_none ; // it's unlocked
+}
+
+// FIXME: jvmti should call this
+JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
+ if (UseBiasedLocking) {
+ if (SafepointSynchronize::is_at_safepoint()) {
+ BiasedLocking::revoke_at_safepoint(h_obj);
+ } else {
+ BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
+ }
+ assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+ }
+
+ oop obj = h_obj();
+ address owner = NULL;
+
+ markOop mark = ReadStableMark (obj) ;
+
+ // Uncontended case, header points to stack
+ if (mark->has_locker()) {
+ owner = (address) mark->locker();
+ }
+
+ // Contended case, header points to ObjectMonitor (tagged pointer)
+ if (mark->has_monitor()) {
+ ObjectMonitor* monitor = mark->monitor();
+ assert(monitor != NULL, "monitor should be non-null");
+ owner = (address) monitor->owner();
+ }
+
+ if (owner != NULL) {
+ return Threads::owning_thread_from_monitor_owner(owner, doLock);
+ }
+
+ // Unlocked case, header in place
+ // Cannot have assertion since this object may have been
+ // locked by another thread when reaching here.
+ // assert(mark->is_neutral(), "sanity check");
+
+ return NULL;
+}
+
+// Iterate through monitor cache and attempt to release thread's monitors
+// Gives up on a particular monitor if an exception occurs, but continues
+// the overall iteration, swallowing the exception.
+class ReleaseJavaMonitorsClosure: public MonitorClosure {
+private:
+ TRAPS;
+
+public:
+ ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
+ void do_monitor(ObjectMonitor* mid) {
+ if (mid->owner() == THREAD) {
+ (void)mid->complete_exit(CHECK);
+ }
+ }
+};
+
+// Release all inflated monitors owned by THREAD. Lightweight monitors are
+// ignored. This is meant to be called during JNI thread detach which assumes
+// all remaining monitors are heavyweight. All exceptions are swallowed.
+// Scanning the extant monitor list can be time consuming.
+// A simple optimization is to add a per-thread flag that indicates a thread
+// called jni_monitorenter() during its lifetime.
+//
+// Instead of No_Savepoint_Verifier it might be cheaper to
+// use an idiom of the form:
+// auto int tmp = SafepointSynchronize::_safepoint_counter ;
+// <code that must not run at safepoint>
+// guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
+// Since the tests are extremely cheap we could leave them enabled
+// for normal product builds.
+
+void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
+ assert(THREAD == JavaThread::current(), "must be current Java thread");
+ No_Safepoint_Verifier nsv ;
+ ReleaseJavaMonitorsClosure rjmc(THREAD);
+ Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
+ ObjectSynchronizer::monitors_iterate(&rjmc);
+ Thread::muxRelease(&ListLock);
+ THREAD->clear_pending_exception();
+}
+
+// Visitors ...
+
+void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
+ ObjectMonitor* block = gBlockList;
+ ObjectMonitor* mid;
+ while (block) {
+ assert(block->object() == CHAINMARKER, "must be a block header");
+ for (int i = _BLOCKSIZE - 1; i > 0; i--) {
+ mid = block + i;
+ oop object = (oop) mid->object();
+ if (object != NULL) {
+ closure->do_monitor(mid);
+ }
+ }
+ block = (ObjectMonitor*) block->FreeNext;
+ }
+}
+
+void ObjectSynchronizer::oops_do(OopClosure* f) {
+ assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+ for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
+ assert(block->object() == CHAINMARKER, "must be a block header");
+ for (int i = 1; i < _BLOCKSIZE; i++) {
+ ObjectMonitor* mid = &block[i];
+ if (mid->object() != NULL) {
+ f->do_oop((oop*)mid->object_addr());
+ }
+ }
+ }
+}
+
+// Deflate_idle_monitors() is called at all safepoints, immediately
+// after all mutators are stopped, but before any objects have moved.
+// It traverses the list of known monitors, deflating where possible.
+// The scavenged monitor are returned to the monitor free list.
+//
+// Beware that we scavenge at *every* stop-the-world point.
+// Having a large number of monitors in-circulation negatively
+// impacts the performance of some applications (e.g., PointBase).
+// Broadly, we want to minimize the # of monitors in circulation.
+// Alternately, we could partition the active monitors into sub-lists
+// of those that need scanning and those that do not.
+// Specifically, we would add a new sub-list of objectmonitors
+// that are in-circulation and potentially active. deflate_idle_monitors()
+// would scan only that list. Other monitors could reside on a quiescent
+// list. Such sequestered monitors wouldn't need to be scanned by
+// deflate_idle_monitors(). omAlloc() would first check the global free list,
+// then the quiescent list, and, failing those, would allocate a new block.
+// Deflate_idle_monitors() would scavenge and move monitors to the
+// quiescent list.
+//
+// Perversely, the heap size -- and thus the STW safepoint rate --
+// typically drives the scavenge rate. Large heaps can mean infrequent GC,
+// which in turn can mean large(r) numbers of objectmonitors in circulation.
+// This is an unfortunate aspect of this design.
+//
+// Another refinement would be to refrain from calling deflate_idle_monitors()
+// except at stop-the-world points associated with garbage collections.
+//
+// An even better solution would be to deflate on-the-fly, aggressively,
+// at monitorexit-time as is done in EVM's metalock or Relaxed Locks.
+
+void ObjectSynchronizer::deflate_idle_monitors() {
+ assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+ int nInuse = 0 ; // currently associated with objects
+ int nInCirculation = 0 ; // extant
+ int nScavenged = 0 ; // reclaimed
+
+ ObjectMonitor * FreeHead = NULL ; // Local SLL of scavenged monitors
+ ObjectMonitor * FreeTail = NULL ;
+
+ // Iterate over all extant monitors - Scavenge all idle monitors.
+ TEVENT (deflate_idle_monitors) ;
+ for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
+ assert(block->object() == CHAINMARKER, "must be a block header");
+ nInCirculation += _BLOCKSIZE ;
+ for (int i = 1 ; i < _BLOCKSIZE; i++) {
+ ObjectMonitor* mid = &block[i];
+ oop obj = (oop) mid->object();
+
+ if (obj == NULL) {
+ // The monitor is not associated with an object.
+ // The monitor should either be a thread-specific private
+ // free list or the global free list.
+ // obj == NULL IMPLIES mid->is_busy() == 0
+ guarantee (!mid->is_busy(), "invariant") ;
+ continue ;
+ }
+
+ // Normal case ... The monitor is associated with obj.
+ guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ;
+ guarantee (mid == obj->mark()->monitor(), "invariant");
+ guarantee (mid->header()->is_neutral(), "invariant");
+
+ if (mid->is_busy()) {
+ if (ClearResponsibleAtSTW) mid->_Responsible = NULL ;
+ nInuse ++ ;
+ } else {
+ // Deflate the monitor if it is no longer being used
+ // It's idle - scavenge and return to the global free list
+ // plain old deflation ...
+ TEVENT (deflate_idle_monitors - scavenge1) ;
+ if (TraceMonitorInflation) {
+ if (obj->is_instance()) {
+ ResourceMark rm;
+ tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
+ (intptr_t) obj, (intptr_t) obj->mark(), Klass::cast(obj->klass())->external_name());
+ }
+ }
+
+ // Restore the header back to obj
+ obj->release_set_mark(mid->header());
+ mid->clear();
+
+ assert (mid->object() == NULL, "invariant") ;
+
+ // Move the object to the working free list defined by FreeHead,FreeTail.
+ mid->FreeNext = NULL ;
+ if (FreeHead == NULL) FreeHead = mid ;
+ if (FreeTail != NULL) FreeTail->FreeNext = mid ;
+ FreeTail = mid ;
+ nScavenged ++ ;
+ }
+ }
+ }
+
+ // Move the scavenged monitors back to the global free list.
+ // In theory we don't need the freelist lock as we're at a STW safepoint.
+ // omAlloc() and omFree() can only be called while a thread is _not in safepoint state.
+ // But it's remotely possible that omFlush() or release_monitors_owned_by_thread()
+ // might be called while not at a global STW safepoint. In the interest of
+ // safety we protect the following access with ListLock.
+ // An even more conservative and prudent approach would be to guard
+ // the main loop in scavenge_idle_monitors() with ListLock.
+ if (FreeHead != NULL) {
+ guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ;
+ assert (FreeTail->FreeNext == NULL, "invariant") ;
+ // constant-time list splice - prepend scavenged segment to gFreeList
+ Thread::muxAcquire (&ListLock, "scavenge - return") ;
+ FreeTail->FreeNext = gFreeList ;
+ gFreeList = FreeHead ;
+ Thread::muxRelease (&ListLock) ;
+ }
+
+ if (_sync_Deflations != NULL) _sync_Deflations->inc(nScavenged) ;
+ if (_sync_MonExtant != NULL) _sync_MonExtant ->set_value(nInCirculation);
+
+ // TODO: Add objectMonitor leak detection.
+ // Audit/inventory the objectMonitors -- make sure they're all accounted for.
+ GVars.stwRandom = os::random() ;
+ GVars.stwCycle ++ ;
+}
+
+// A macro is used below because there may already be a pending
+// exception which should not abort the execution of the routines
+// which use this (which is why we don't put this into check_slow and
+// call it with a CHECK argument).
+
+#define CHECK_OWNER() \
+ do { \
+ if (THREAD != _owner) { \
+ if (THREAD->is_lock_owned((address) _owner)) { \
+ _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ \
+ _recursions = 0; \
+ OwnerIsThread = 1 ; \
+ } else { \
+ TEVENT (Throw IMSX) ; \
+ THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
+ } \
+ } \
+ } while (false)
+
+// TODO-FIXME: eliminate ObjectWaiters. Replace this visitor/enumerator
+// interface with a simple FirstWaitingThread(), NextWaitingThread() interface.
+
+ObjectWaiter* ObjectMonitor::first_waiter() {
+ return _WaitSet;
+}
+
+ObjectWaiter* ObjectMonitor::next_waiter(ObjectWaiter* o) {
+ return o->_next;
+}
+
+Thread* ObjectMonitor::thread_of_waiter(ObjectWaiter* o) {
+ return o->_thread;
+}
+
+// initialize the monitor, exception the semaphore, all other fields
+// are simple integers or pointers
+ObjectMonitor::ObjectMonitor() {
+ _header = NULL;
+ _count = 0;
+ _waiters = 0,
+ _recursions = 0;
+ _object = NULL;
+ _owner = NULL;
+ _WaitSet = NULL;
+ _WaitSetLock = 0 ;
+ _Responsible = NULL ;
+ _succ = NULL ;
+ _cxq = NULL ;
+ FreeNext = NULL ;
+ _EntryList = NULL ;
+ _SpinFreq = 0 ;
+ _SpinClock = 0 ;
+ OwnerIsThread = 0 ;
+}
+
+ObjectMonitor::~ObjectMonitor() {
+ // TODO: Add asserts ...
+ // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
+ // _count == 0 _EntryList == NULL etc
+}
+
+intptr_t ObjectMonitor::is_busy() const {
+ // TODO-FIXME: merge _count and _waiters.
+ // TODO-FIXME: assert _owner == null implies _recursions = 0
+ // TODO-FIXME: assert _WaitSet != null implies _count > 0
+ return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ;
+}
+
+void ObjectMonitor::Recycle () {
+ // TODO: add stronger asserts ...
+ // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
+ // _count == 0 EntryList == NULL
+ // _recursions == 0 _WaitSet == NULL
+ // TODO: assert (is_busy()|_recursions) == 0
+ _succ = NULL ;
+ _EntryList = NULL ;
+ _cxq = NULL ;
+ _WaitSet = NULL ;
+ _recursions = 0 ;
+ _SpinFreq = 0 ;
+ _SpinClock = 0 ;
+ OwnerIsThread = 0 ;
+}
+
+// WaitSet management ...
+
+inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
+ assert(node != NULL, "should not dequeue NULL node");
+ assert(node->_prev == NULL, "node already in list");
+ assert(node->_next == NULL, "node already in list");
+ // put node at end of queue (circular doubly linked list)
+ if (_WaitSet == NULL) {
+ _WaitSet = node;
+ node->_prev = node;
+ node->_next = node;
+ } else {
+ ObjectWaiter* head = _WaitSet ;
+ ObjectWaiter* tail = head->_prev;
+ assert(tail->_next == head, "invariant check");
+ tail->_next = node;
+ head->_prev = node;
+ node->_next = head;
+ node->_prev = tail;
+ }
+}
+
+inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
+ // dequeue the very first waiter
+ ObjectWaiter* waiter = _WaitSet;
+ if (waiter) {
+ DequeueSpecificWaiter(waiter);
+ }
+ return waiter;
+}
+
+inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
+ assert(node != NULL, "should not dequeue NULL node");
+ assert(node->_prev != NULL, "node already removed from list");
+ assert(node->_next != NULL, "node already removed from list");
+ // when the waiter has woken up because of interrupt,
+ // timeout or other spurious wake-up, dequeue the
+ // waiter from waiting list
+ ObjectWaiter* next = node->_next;
+ if (next == node) {
+ assert(node->_prev == node, "invariant check");
+ _WaitSet = NULL;
+ } else {
+ ObjectWaiter* prev = node->_prev;
+ assert(prev->_next == node, "invariant check");
+ assert(next->_prev == node, "invariant check");
+ next->_prev = prev;
+ prev->_next = next;
+ if (_WaitSet == node) {
+ _WaitSet = next;
+ }
+ }
+ node->_next = NULL;
+ node->_prev = NULL;
+}
+
+static char * kvGet (char * kvList, const char * Key) {
+ if (kvList == NULL) return NULL ;
+ size_t n = strlen (Key) ;
+ char * Search ;
+ for (Search = kvList ; *Search ; Search += strlen(Search) + 1) {
+ if (strncmp (Search, Key, n) == 0) {
+ if (Search[n] == '=') return Search + n + 1 ;
+ if (Search[n] == 0) return (char *) "1" ;
+ }
+ }
+ return NULL ;
+}
+
+static int kvGetInt (char * kvList, const char * Key, int Default) {
+ char * v = kvGet (kvList, Key) ;
+ int rslt = v ? ::strtol (v, NULL, 0) : Default ;
+ if (Knob_ReportSettings && v != NULL) {
+ ::printf (" SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
+ ::fflush (stdout) ;
+ }
+ return rslt ;
+}
+
+// By convention we unlink a contending thread from EntryList|cxq immediately
+// after the thread acquires the lock in ::enter(). Equally, we could defer
+// unlinking the thread until ::exit()-time.
+
+void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
+{
+ assert (_owner == Self, "invariant") ;
+ assert (SelfNode->_thread == Self, "invariant") ;
+
+ if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
+ // Normal case: remove Self from the DLL EntryList .
+ // This is a constant-time operation.
+ ObjectWaiter * nxt = SelfNode->_next ;
+ ObjectWaiter * prv = SelfNode->_prev ;
+ if (nxt != NULL) nxt->_prev = prv ;
+ if (prv != NULL) prv->_next = nxt ;
+ if (SelfNode == _EntryList ) _EntryList = nxt ;
+ assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+ assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+ TEVENT (Unlink from EntryList) ;
+ } else {
+ guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
+ // Inopportune interleaving -- Self is still on the cxq.
+ // This usually means the enqueue of self raced an exiting thread.
+ // Normally we'll find Self near the front of the cxq, so
+ // dequeueing is typically fast. If needbe we can accelerate
+ // this with some MCS/CHL-like bidirectional list hints and advisory
+ // back-links so dequeueing from the interior will normally operate
+ // in constant-time.
+ // Dequeue Self from either the head (with CAS) or from the interior
+ // with a linear-time scan and normal non-atomic memory operations.
+ // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
+ // and then unlink Self from EntryList. We have to drain eventually,
+ // so it might as well be now.
+
+ ObjectWaiter * v = _cxq ;
+ assert (v != NULL, "invariant") ;
+ if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
+ // The CAS above can fail from interference IFF a "RAT" arrived.
+ // In that case Self must be in the interior and can no longer be
+ // at the head of cxq.
+ if (v == SelfNode) {
+ assert (_cxq != v, "invariant") ;
+ v = _cxq ; // CAS above failed - start scan at head of list
+ }
+ ObjectWaiter * p ;
+ ObjectWaiter * q = NULL ;
+ for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
+ q = p ;
+ assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
+ }
+ assert (v != SelfNode, "invariant") ;
+ assert (p == SelfNode, "Node not found on cxq") ;
+ assert (p != _cxq, "invariant") ;
+ assert (q != NULL, "invariant") ;
+ assert (q->_next == p, "invariant") ;
+ q->_next = p->_next ;
+ }
+ TEVENT (Unlink from cxq) ;
+ }
+
+ // Diagnostic hygiene ...
+ SelfNode->_prev = (ObjectWaiter *) 0xBAD ;
+ SelfNode->_next = (ObjectWaiter *) 0xBAD ;
+ SelfNode->TState = ObjectWaiter::TS_RUN ;
+}
+
+// Caveat: TryLock() is not necessarily serializing if it returns failure.
+// Callers must compensate as needed.
+
+int ObjectMonitor::TryLock (Thread * Self) {
+ for (;;) {
+ void * own = _owner ;
+ if (own != NULL) return 0 ;
+ if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+ // Either guarantee _recursions == 0 or set _recursions = 0.
+ assert (_recursions == 0, "invariant") ;
+ assert (_owner == Self, "invariant") ;
+ // CONSIDER: set or assert that OwnerIsThread == 1
+ return 1 ;
+ }
+ // The lock had been free momentarily, but we lost the race to the lock.
+ // Interference -- the CAS failed.
+ // We can either return -1 or retry.
+ // Retry doesn't make as much sense because the lock was just acquired.
+ if (true) return -1 ;
+ }
+}
+
+// NotRunnable() -- informed spinning
+//
+// Don't bother spinning if the owner is not eligible to drop the lock.
+// Peek at the owner's schedctl.sc_state and Thread._thread_values and
+// spin only if the owner thread is _thread_in_Java or _thread_in_vm.
+// The thread must be runnable in order to drop the lock in timely fashion.
+// If the _owner is not runnable then spinning will not likely be
+// successful (profitable).
+//
+// Beware -- the thread referenced by _owner could have died
+// so a simply fetch from _owner->_thread_state might trap.
+// Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
+// Because of the lifecycle issues the schedctl and _thread_state values
+// observed by NotRunnable() might be garbage. NotRunnable must
+// tolerate this and consider the observed _thread_state value
+// as advisory.
+//
+// Beware too, that _owner is sometimes a BasicLock address and sometimes
+// a thread pointer. We differentiate the two cases with OwnerIsThread.
+// Alternately, we might tag the type (thread pointer vs basiclock pointer)
+// with the LSB of _owner. Another option would be to probablistically probe
+// the putative _owner->TypeTag value.
+//
+// Checking _thread_state isn't perfect. Even if the thread is
+// in_java it might be blocked on a page-fault or have been preempted
+// and sitting on a ready/dispatch queue. _thread state in conjunction
+// with schedctl.sc_state gives us a good picture of what the
+// thread is doing, however.
+//
+// TODO: check schedctl.sc_state.
+// We'll need to use SafeFetch32() to read from the schedctl block.
+// See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
+//
+// The return value from NotRunnable() is *advisory* -- the
+// result is based on sampling and is not necessarily coherent.
+// The caller must tolerate false-negative and false-positive errors.
+// Spinning, in general, is probabilistic anyway.
+
+
+int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
+ // Check either OwnerIsThread or ox->TypeTag == 2BAD.
+ if (!OwnerIsThread) return 0 ;
+
+ if (ox == NULL) return 0 ;
+
+ // Avoid transitive spinning ...
+ // Say T1 spins or blocks trying to acquire L. T1._Stalled is set to L.
+ // Immediately after T1 acquires L it's possible that T2, also
+ // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
+ // This occurs transiently after T1 acquired L but before
+ // T1 managed to clear T1.Stalled. T2 does not need to abort
+ // its spin in this circumstance.
+ intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ;
+
+ if (BlockedOn == 1) return 1 ;
+ if (BlockedOn != 0) {
+ return BlockedOn != intptr_t(this) && _owner == ox ;
+ }
+
+ assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ;
+ int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ;
+ // consider also: jst != _thread_in_Java -- but that's overspecific.
+ return jst == _thread_blocked || jst == _thread_in_native ;
+}
+
+
+// Adaptive spin-then-block - rational spinning
+//
+// Note that we spin "globally" on _owner with a classic SMP-polite TATAS
+// algorithm. On high order SMP systems it would be better to start with
+// a brief global spin and then revert to spinning locally. In the spirit of MCS/CLH,
+// a contending thread could enqueue itself on the cxq and then spin locally
+// on a thread-specific variable such as its ParkEvent._Event flag.
+// That's left as an exercise for the reader. Note that global spinning is
+// not problematic on Niagara, as the L2$ serves the interconnect and has both
+// low latency and massive bandwidth.
+//
+// Broadly, we can fix the spin frequency -- that is, the % of contended lock
+// acquisition attempts where we opt to spin -- at 100% and vary the spin count
+// (duration) or we can fix the count at approximately the duration of
+// a context switch and vary the frequency. Of course we could also
+// vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
+// See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
+//
+// This implementation varies the duration "D", where D varies with
+// the success rate of recent spin attempts. (D is capped at approximately
+// length of a round-trip context switch). The success rate for recent
+// spin attempts is a good predictor of the success rate of future spin
+// attempts. The mechanism adapts automatically to varying critical
+// section length (lock modality), system load and degree of parallelism.
+// D is maintained per-monitor in _SpinDuration and is initialized
+// optimistically. Spin frequency is fixed at 100%.
+//
+// Note that _SpinDuration is volatile, but we update it without locks
+// or atomics. The code is designed so that _SpinDuration stays within
+// a reasonable range even in the presence of races. The arithmetic
+// operations on _SpinDuration are closed over the domain of legal values,
+// so at worst a race will install and older but still legal value.
+// At the very worst this introduces some apparent non-determinism.
+// We might spin when we shouldn't or vice-versa, but since the spin
+// count are relatively short, even in the worst case, the effect is harmless.
+//
+// Care must be taken that a low "D" value does not become an
+// an absorbing state. Transient spinning failures -- when spinning
+// is overall profitable -- should not cause the system to converge
+// on low "D" values. We want spinning to be stable and predictable
+// and fairly responsive to change and at the same time we don't want
+// it to oscillate, become metastable, be "too" non-deterministic,
+// or converge on or enter undesirable stable absorbing states.
+//
+// We implement a feedback-based control system -- using past behavior
+// to predict future behavior. We face two issues: (a) if the
+// input signal is random then the spin predictor won't provide optimal
+// results, and (b) if the signal frequency is too high then the control
+// system, which has some natural response lag, will "chase" the signal.
+// (b) can arise from multimodal lock hold times. Transient preemption
+// can also result in apparent bimodal lock hold times.
+// Although sub-optimal, neither condition is particularly harmful, as
+// in the worst-case we'll spin when we shouldn't or vice-versa.
+// The maximum spin duration is rather short so the failure modes aren't bad.
+// To be conservative, I've tuned the gain in system to bias toward
+// _not spinning. Relatedly, the system can sometimes enter a mode where it
+// "rings" or oscillates between spinning and not spinning. This happens
+// when spinning is just on the cusp of profitability, however, so the
+// situation is not dire. The state is benign -- there's no need to add
+// hysteresis control to damp the transition rate between spinning and
+// not spinning.
+//
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+//
+// Spin-then-block strategies ...
+//
+// Thoughts on ways to improve spinning :
+//
+// * Periodically call {psr_}getloadavg() while spinning, and
+// permit unbounded spinning if the load average is <
+// the number of processors. Beware, however, that getloadavg()
+// is exceptionally fast on solaris (about 1/10 the cost of a full
+// spin cycle, but quite expensive on linux. Beware also, that
+// multiple JVMs could "ring" or oscillate in a feedback loop.
+// Sufficient damping would solve that problem.
+//
+// * We currently use spin loops with iteration counters to approximate
+// spinning for some interval. Given the availability of high-precision
+// time sources such as gethrtime(), %TICK, %STICK, RDTSC, etc., we should
+// someday reimplement the spin loops to duration-based instead of iteration-based.
+//
+// * Don't spin if there are more than N = (CPUs/2) threads
+// currently spinning on the monitor (or globally).
+// That is, limit the number of concurrent spinners.
+// We might also limit the # of spinners in the JVM, globally.
+//
+// * If a spinning thread observes _owner change hands it should
+// abort the spin (and park immediately) or at least debit
+// the spin counter by a large "penalty".
+//
+// * Classically, the spin count is either K*(CPUs-1) or is a
+// simple constant that approximates the length of a context switch.
+// We currently use a value -- computed by a special utility -- that
+// approximates round-trip context switch times.
+//
+// * Normally schedctl_start()/_stop() is used to advise the kernel
+// to avoid preempting threads that are running in short, bounded
+// critical sections. We could use the schedctl hooks in an inverted
+// sense -- spinners would set the nopreempt flag, but poll the preempt
+// pending flag. If a spinner observed a pending preemption it'd immediately
+// abort the spin and park. As such, the schedctl service acts as
+// a preemption warning mechanism.
+//
+// * In lieu of spinning, if the system is running below saturation
+// (that is, loadavg() << #cpus), we can instead suppress futile
+// wakeup throttling, or even wake more than one successor at exit-time.
+// The net effect is largely equivalent to spinning. In both cases,
+// contending threads go ONPROC and opportunistically attempt to acquire
+// the lock, decreasing lock handover latency at the expense of wasted
+// cycles and context switching.
+//
+// * We might to spin less after we've parked as the thread will
+// have less $ and TLB affinity with the processor.
+// Likewise, we might spin less if we come ONPROC on a different
+// processor or after a long period (>> rechose_interval).
+//
+// * A table-driven state machine similar to Solaris' dispadmin scheduling
+// tables might be a better design. Instead of encoding information in
+// _SpinDuration, _SpinFreq and _SpinClock we'd just use explicit,
+// discrete states. Success or failure during a spin would drive
+// state transitions, and each state node would contain a spin count.
+//
+// * If the processor is operating in a mode intended to conserve power
+// (such as Intel's SpeedStep) or to reduce thermal output (thermal
+// step-down mode) then the Java synchronization subsystem should
+// forgo spinning.
+//
+// * The minimum spin duration should be approximately the worst-case
+// store propagation latency on the platform. That is, the time
+// it takes a store on CPU A to become visible on CPU B, where A and
+// B are "distant".
+//
+// * We might want to factor a thread's priority in the spin policy.
+// Threads with a higher priority might spin for slightly longer.
+// Similarly, if we use back-off in the TATAS loop, lower priority
+// threads might back-off longer. We don't currently use a
+// thread's priority when placing it on the entry queue. We may
+// want to consider doing so in future releases.
+//
+// * We might transiently drop a thread's scheduling priority while it spins.
+// SCHED_BATCH on linux and FX scheduling class at priority=0 on Solaris
+// would suffice. We could even consider letting the thread spin indefinitely at
+// a depressed or "idle" priority. This brings up fairness issues, however --
+// in a saturated system a thread would with a reduced priority could languish
+// for extended periods on the ready queue.
+//
+// * While spinning try to use the otherwise wasted time to help the VM make
+// progress:
+//
+// -- YieldTo() the owner, if the owner is OFFPROC but ready
+// Done our remaining quantum directly to the ready thread.
+// This helps "push" the lock owner through the critical section.
+// It also tends to improve affinity/locality as the lock
+// "migrates" less frequently between CPUs.
+// -- Walk our own stack in anticipation of blocking. Memoize the roots.
+// -- Perform strand checking for other thread. Unpark potential strandees.
+// -- Help GC: trace or mark -- this would need to be a bounded unit of work.
+// Unfortunately this will pollute our $ and TLBs. Recall that we
+// spin to avoid context switching -- context switching has an
+// immediate cost in latency, a disruptive cost to other strands on a CMT
+// processor, and an amortized cost because of the D$ and TLB cache
+// reload transient when the thread comes back ONPROC and repopulates
+// $s and TLBs.
+// -- call getloadavg() to see if the system is saturated. It'd probably
+// make sense to call getloadavg() half way through the spin.
+// If the system isn't at full capacity the we'd simply reset
+// the spin counter to and extend the spin attempt.
+// -- Doug points out that we should use the same "helping" policy
+// in thread.yield().
+//
+// * Try MONITOR-MWAIT on systems that support those instructions.
+//
+// * The spin statistics that drive spin decisions & frequency are
+// maintained in the objectmonitor structure so if we deflate and reinflate
+// we lose spin state. In practice this is not usually a concern
+// as the default spin state after inflation is aggressive (optimistic)
+// and tends toward spinning. So in the worst case for a lock where
+// spinning is not profitable we may spin unnecessarily for a brief
+// period. But then again, if a lock is contended it'll tend not to deflate
+// in the first place.
+
+
+intptr_t ObjectMonitor::SpinCallbackArgument = 0 ;
+int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ;
+
+// Spinning: Fixed frequency (100%), vary duration
+
+int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
+
+ // Dumb, brutal spin. Good for comparative measurements against adaptive spinning.
+ int ctr = Knob_FixedSpin ;
+ if (ctr != 0) {
+ while (--ctr >= 0) {
+ if (TryLock (Self) > 0) return 1 ;
+ SpinPause () ;
+ }
+ return 0 ;
+ }
+
+ for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) {
+ if (TryLock(Self) > 0) {
+ // Increase _SpinDuration ...
+ // Note that we don't clamp SpinDuration precisely at SpinLimit.
+ // Raising _SpurDuration to the poverty line is key.
+ int x = _SpinDuration ;
+ if (x < Knob_SpinLimit) {
+ if (x < Knob_Poverty) x = Knob_Poverty ;
+ _SpinDuration = x + Knob_BonusB ;
+ }
+ return 1 ;
+ }
+ SpinPause () ;
+ }
+
+ // Admission control - verify preconditions for spinning
+ //
+ // We always spin a little bit, just to prevent _SpinDuration == 0 from
+ // becoming an absorbing state. Put another way, we spin briefly to
+ // sample, just in case the system load, parallelism, contention, or lock
+ // modality changed.
+ //
+ // Consider the following alternative:
+ // Periodically set _SpinDuration = _SpinLimit and try a long/full
+ // spin attempt. "Periodically" might mean after a tally of
+ // the # of failed spin attempts (or iterations) reaches some threshold.
+ // This takes us into the realm of 1-out-of-N spinning, where we
+ // hold the duration constant but vary the frequency.
+
+ ctr = _SpinDuration ;
+ if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ;
+ if (ctr <= 0) return 0 ;
+
+ if (Knob_SuccRestrict && _succ != NULL) return 0 ;
+ if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
+ TEVENT (Spin abort - notrunnable [TOP]);
+ return 0 ;
+ }
+
+ int MaxSpin = Knob_MaxSpinners ;
+ if (MaxSpin >= 0) {
+ if (_Spinner > MaxSpin) {
+ TEVENT (Spin abort -- too many spinners) ;
+ return 0 ;
+ }
+ // Slighty racy, but benign ...
+ Adjust (&_Spinner, 1) ;
+ }
+
+ // We're good to spin ... spin ingress.
+ // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
+ // when preparing to LD...CAS _owner, etc and the CAS is likely
+ // to succeed.
+ int hits = 0 ;
+ int msk = 0 ;
+ int caspty = Knob_CASPenalty ;
+ int oxpty = Knob_OXPenalty ;
+ int sss = Knob_SpinSetSucc ;
+ if (sss && _succ == NULL ) _succ = Self ;
+ Thread * prv = NULL ;
+
+ // There are three ways to exit the following loop:
+ // 1. A successful spin where this thread has acquired the lock.
+ // 2. Spin failure with prejudice
+ // 3. Spin failure without prejudice
+
+ while (--ctr >= 0) {
+
+ // Periodic polling -- Check for pending GC
+ // Threads may spin while they're unsafe.
+ // We don't want spinning threads to delay the JVM from reaching
+ // a stop-the-world safepoint or to steal cycles from GC.
+ // If we detect a pending safepoint we abort in order that
+ // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
+ // this thread, if safe, doesn't steal cycles from GC.
+ // This is in keeping with the "no loitering in runtime" rule.
+ // We periodically check to see if there's a safepoint pending.
+ if ((ctr & 0xFF) == 0) {
+ if (SafepointSynchronize::do_call_back()) {
+ TEVENT (Spin: safepoint) ;
+ goto Abort ; // abrupt spin egress
+ }
+ if (Knob_UsePause & 1) SpinPause () ;
+
+ int (*scb)(intptr_t,int) = SpinCallbackFunction ;
+ if (hits > 50 && scb != NULL) {
+ int abend = (*scb)(SpinCallbackArgument, 0) ;
+ }
+ }
+
+ if (Knob_UsePause & 2) SpinPause() ;
+
+ // Exponential back-off ... Stay off the bus to reduce coherency traffic.
+ // This is useful on classic SMP systems, but is of less utility on
+ // N1-style CMT platforms.
+ //
+ // Trade-off: lock acquisition latency vs coherency bandwidth.
+ // Lock hold times are typically short. A histogram
+ // of successful spin attempts shows that we usually acquire
+ // the lock early in the spin. That suggests we want to
+ // sample _owner frequently in the early phase of the spin,
+ // but then back-off and sample less frequently as the spin
+ // progresses. The back-off makes a good citizen on SMP big
+ // SMP systems. Oversampling _owner can consume excessive
+ // coherency bandwidth. Relatedly, if we _oversample _owner we
+ // can inadvertently interfere with the the ST m->owner=null.
+ // executed by the lock owner.
+ if (ctr & msk) continue ;
+ ++hits ;
+ if ((hits & 0xF) == 0) {
+ // The 0xF, above, corresponds to the exponent.
+ // Consider: (msk+1)|msk
+ msk = ((msk << 2)|3) & BackOffMask ;
+ }
+
+ // Probe _owner with TATAS
+ // If this thread observes the monitor transition or flicker
+ // from locked to unlocked to locked, then the odds that this
+ // thread will acquire the lock in this spin attempt go down
+ // considerably. The same argument applies if the CAS fails
+ // or if we observe _owner change from one non-null value to
+ // another non-null value. In such cases we might abort
+ // the spin without prejudice or apply a "penalty" to the
+ // spin count-down variable "ctr", reducing it by 100, say.
+
+ Thread * ox = (Thread *) _owner ;
+ if (ox == NULL) {
+ ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
+ if (ox == NULL) {
+ // The CAS succeeded -- this thread acquired ownership
+ // Take care of some bookkeeping to exit spin state.
+ if (sss && _succ == Self) {
+ _succ = NULL ;
+ }
+ if (MaxSpin > 0) Adjust (&_Spinner, -1) ;
+
+ // Increase _SpinDuration :
+ // The spin was successful (profitable) so we tend toward
+ // longer spin attempts in the future.
+ // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
+ // If we acquired the lock early in the spin cycle it
+ // makes sense to increase _SpinDuration proportionally.
+ // Note that we don't clamp SpinDuration precisely at SpinLimit.
+ int x = _SpinDuration ;
+ if (x < Knob_SpinLimit) {
+ if (x < Knob_Poverty) x = Knob_Poverty ;
+ _SpinDuration = x + Knob_Bonus ;
+ }
+ return 1 ;
+ }
+
+ // The CAS failed ... we can take any of the following actions:
+ // * penalize: ctr -= Knob_CASPenalty
+ // * exit spin with prejudice -- goto Abort;
+ // * exit spin without prejudice.
+ // * Since CAS is high-latency, retry again immediately.
+ prv = ox ;
+ TEVENT (Spin: cas failed) ;
+ if (caspty == -2) break ;
+ if (caspty == -1) goto Abort ;
+ ctr -= caspty ;
+ continue ;
+ }
+
+ // Did lock ownership change hands ?
+ if (ox != prv && prv != NULL ) {
+ TEVENT (spin: Owner changed)
+ if (oxpty == -2) break ;
+ if (oxpty == -1) goto Abort ;
+ ctr -= oxpty ;
+ }
+ prv = ox ;
+
+ // Abort the spin if the owner is not executing.
+ // The owner must be executing in order to drop the lock.
+ // Spinning while the owner is OFFPROC is idiocy.
+ // Consider: ctr -= RunnablePenalty ;
+ if (Knob_OState && NotRunnable (Self, ox)) {
+ TEVENT (Spin abort - notrunnable);
+ goto Abort ;
+ }
+ if (sss && _succ == NULL ) _succ = Self ;
+ }
+
+ // Spin failed with prejudice -- reduce _SpinDuration.
+ // TODO: Use an AIMD-like policy to adjust _SpinDuration.
+ // AIMD is globally stable.
+ TEVENT (Spin failure) ;
+ {
+ int x = _SpinDuration ;
+ if (x > 0) {
+ // Consider an AIMD scheme like: x -= (x >> 3) + 100
+ // This is globally sample and tends to damp the response.
+ x -= Knob_Penalty ;
+ if (x < 0) x = 0 ;
+ _SpinDuration = x ;
+ }
+ }
+
+ Abort:
+ if (MaxSpin >= 0) Adjust (&_Spinner, -1) ;
+ if (sss && _succ == Self) {
+ _succ = NULL ;
+ // Invariant: after setting succ=null a contending thread
+ // must recheck-retry _owner before parking. This usually happens
+ // in the normal usage of TrySpin(), but it's safest
+ // to make TrySpin() as foolproof as possible.
+ OrderAccess::fence() ;
+ if (TryLock(Self) > 0) return 1 ;
+ }
+ return 0 ;
+}
+
+#define TrySpin TrySpin_VaryDuration
+
+static void DeferredInitialize () {
+ if (InitDone > 0) return ;
+ if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
+ while (InitDone != 1) ;
+ return ;
+ }
+
+ // One-shot global initialization ...
+ // The initialization is idempotent, so we don't need locks.
+ // In the future consider doing this via os::init_2().
+ // SyncKnobs consist of <Key>=<Value> pairs in the style
+ // of environment variables. Start by converting ':' to NUL.
+
+ if (SyncKnobs == NULL) SyncKnobs = "" ;
+
+ size_t sz = strlen (SyncKnobs) ;
+ char * knobs = (char *) malloc (sz + 2) ;
+ if (knobs == NULL) {
+ vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ;
+ guarantee (0, "invariant") ;
+ }
+ strcpy (knobs, SyncKnobs) ;
+ knobs[sz+1] = 0 ;
+ for (char * p = knobs ; *p ; p++) {
+ if (*p == ':') *p = 0 ;
+ }
+
+ #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
+ SETKNOB(ReportSettings) ;
+ SETKNOB(Verbose) ;
+ SETKNOB(FixedSpin) ;
+ SETKNOB(SpinLimit) ;
+ SETKNOB(SpinBase) ;
+ SETKNOB(SpinBackOff);
+ SETKNOB(CASPenalty) ;
+ SETKNOB(OXPenalty) ;
+ SETKNOB(LogSpins) ;
+ SETKNOB(SpinSetSucc) ;
+ SETKNOB(SuccEnabled) ;
+ SETKNOB(SuccRestrict) ;
+ SETKNOB(Penalty) ;
+ SETKNOB(Bonus) ;
+ SETKNOB(BonusB) ;
+ SETKNOB(Poverty) ;
+ SETKNOB(SpinAfterFutile) ;
+ SETKNOB(UsePause) ;
+ SETKNOB(SpinEarly) ;
+ SETKNOB(OState) ;
+ SETKNOB(MaxSpinners) ;
+ SETKNOB(PreSpin) ;
+ SETKNOB(ExitPolicy) ;
+ SETKNOB(QMode);
+ SETKNOB(ResetEvent) ;
+ SETKNOB(MoveNotifyee) ;
+ SETKNOB(FastHSSEC) ;
+ #undef SETKNOB
+
+ if (os::is_MP()) {
+ BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
+ if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
+ // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
+ } else {
+ Knob_SpinLimit = 0 ;
+ Knob_SpinBase = 0 ;
+ Knob_PreSpin = 0 ;
+ Knob_FixedSpin = -1 ;
+ }
+
+ if (Knob_LogSpins == 0) {
+ ObjectSynchronizer::_sync_FailedSpins = NULL ;
+ }
+
+ free (knobs) ;
+ OrderAccess::fence() ;
+ InitDone = 1 ;
+}
+
+// Theory of operations -- Monitors lists, thread residency, etc:
+//
+// * A thread acquires ownership of a monitor by successfully
+// CAS()ing the _owner field from null to non-null.
+//
+// * Invariant: A thread appears on at most one monitor list --
+// cxq, EntryList or WaitSet -- at any one time.
+//
+// * Contending threads "push" themselves onto the cxq with CAS
+// and then spin/park.
+//
+// * After a contending thread eventually acquires the lock it must
+// dequeue itself from either the EntryList or the cxq.
+//
+// * The exiting thread identifies and unparks an "heir presumptive"
+// tentative successor thread on the EntryList. Critically, the
+// exiting thread doesn't unlink the successor thread from the EntryList.
+// After having been unparked, the wakee will recontend for ownership of
+// the monitor. The successor (wakee) will either acquire the lock or
+// re-park itself.
+//
+// Succession is provided for by a policy of competitive handoff.
+// The exiting thread does _not_ grant or pass ownership to the
+// successor thread. (This is also referred to as "handoff" succession").
+// Instead the exiting thread releases ownership and possibly wakes
+// a successor, so the successor can (re)compete for ownership of the lock.
+// If the EntryList is empty but the cxq is populated the exiting
+// thread will drain the cxq into the EntryList. It does so by
+// by detaching the cxq (installing null with CAS) and folding
+// the threads from the cxq into the EntryList. The EntryList is
+// doubly linked, while the cxq is singly linked because of the
+// CAS-based "push" used to enqueue recently arrived threads (RATs).
+//
+// * Concurrency invariants:
+//
+// -- only the monitor owner may access or mutate the EntryList.
+// The mutex property of the monitor itself protects the EntryList
+// from concurrent interference.
+// -- Only the monitor owner may detach the cxq.
+//
+// * The monitor entry list operations avoid locks, but strictly speaking
+// they're not lock-free. Enter is lock-free, exit is not.
+// See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
+//
+// * The cxq can have multiple concurrent "pushers" but only one concurrent
+// detaching thread. This mechanism is immune from the ABA corruption.
+// More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
+//
+// * Taken together, the cxq and the EntryList constitute or form a
+// single logical queue of threads stalled trying to acquire the lock.
+// We use two distinct lists to improve the odds of a constant-time
+// dequeue operation after acquisition (in the ::enter() epilog) and
+// to reduce heat on the list ends. (c.f. Michael Scott's "2Q" algorithm).
+// A key desideratum is to minimize queue & monitor metadata manipulation
+// that occurs while holding the monitor lock -- that is, we want to
+// minimize monitor lock holds times. Note that even a small amount of
+// fixed spinning will greatly reduce the # of enqueue-dequeue operations
+// on EntryList|cxq. That is, spinning relieves contention on the "inner"
+// locks and monitor metadata.
+//
+// Cxq points to the the set of Recently Arrived Threads attempting entry.
+// Because we push threads onto _cxq with CAS, the RATs must take the form of
+// a singly-linked LIFO. We drain _cxq into EntryList at unlock-time when
+// the unlocking thread notices that EntryList is null but _cxq is != null.
+//
+// The EntryList is ordered by the prevailing queue discipline and
+// can be organized in any convenient fashion, such as a doubly-linked list or
+// a circular doubly-linked list. Critically, we want insert and delete operations
+// to operate in constant-time. If we need a priority queue then something akin
+// to Solaris' sleepq would work nicely. Viz.,
+// http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
+// Queue discipline is enforced at ::exit() time, when the unlocking thread
+// drains the cxq into the EntryList, and orders or reorders the threads on the
+// EntryList accordingly.
+//
+// Barring "lock barging", this mechanism provides fair cyclic ordering,
+// somewhat similar to an elevator-scan.
+//
+// * The monitor synchronization subsystem avoids the use of native
+// synchronization primitives except for the narrow platform-specific
+// park-unpark abstraction. See the comments in os_solaris.cpp regarding
+// the semantics of park-unpark. Put another way, this monitor implementation
+// depends only on atomic operations and park-unpark. The monitor subsystem
+// manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
+// underlying OS manages the READY<->RUN transitions.
+//
+// * Waiting threads reside on the WaitSet list -- wait() puts
+// the caller onto the WaitSet.
+//
+// * notify() or notifyAll() simply transfers threads from the WaitSet to
+// either the EntryList or cxq. Subsequent exit() operations will
+// unpark the notifyee. Unparking a notifee in notify() is inefficient -
+// it's likely the notifyee would simply impale itself on the lock held
+// by the notifier.
+//
+// * An interesting alternative is to encode cxq as (List,LockByte) where
+// the LockByte is 0 iff the monitor is owned. _owner is simply an auxiliary
+// variable, like _recursions, in the scheme. The threads or Events that form
+// the list would have to be aligned in 256-byte addresses. A thread would
+// try to acquire the lock or enqueue itself with CAS, but exiting threads
+// could use a 1-0 protocol and simply STB to set the LockByte to 0.
+// Note that is is *not* word-tearing, but it does presume that full-word
+// CAS operations are coherent with intermix with STB operations. That's true
+// on most common processors.
+//
+// * See also http://blogs.sun.com/dave
+
+
+void ATTR ObjectMonitor::EnterI (TRAPS) {
+ Thread * Self = THREAD ;
+ assert (Self->is_Java_thread(), "invariant") ;
+ assert (((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant") ;
+
+ // Try the lock - TATAS
+ if (TryLock (Self) > 0) {
+ assert (_succ != Self , "invariant") ;
+ assert (_owner == Self , "invariant") ;
+ assert (_Responsible != Self , "invariant") ;
+ return ;
+ }
+
+ DeferredInitialize () ;
+
+ // We try one round of spinning *before* enqueueing Self.
+ //
+ // If the _owner is ready but OFFPROC we could use a YieldTo()
+ // operation to donate the remainder of this thread's quantum
+ // to the owner. This has subtle but beneficial affinity
+ // effects.
+
+ if (TrySpin (Self) > 0) {
+ assert (_owner == Self , "invariant") ;
+ assert (_succ != Self , "invariant") ;
+ assert (_Responsible != Self , "invariant") ;
+ return ;
+ }
+
+ // The Spin failed -- Enqueue and park the thread ...
+ assert (_succ != Self , "invariant") ;
+ assert (_owner != Self , "invariant") ;
+ assert (_Responsible != Self , "invariant") ;
+
+ // Enqueue "Self" on ObjectMonitor's _cxq.
+ //
+ // Node acts as a proxy for Self.
+ // As an aside, if were to ever rewrite the synchronization code mostly
+ // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
+ // Java objects. This would avoid awkward lifecycle and liveness issues,
+ // as well as eliminate a subset of ABA issues.
+ // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
+ //
+
+ ObjectWaiter node(Self) ;
+ Self->_ParkEvent->reset() ;
+ node._prev = (ObjectWaiter *) 0xBAD ;
+ node.TState = ObjectWaiter::TS_CXQ ;
+
+ // Push "Self" onto the front of the _cxq.
+ // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
+ // Note that spinning tends to reduce the rate at which threads
+ // enqueue and dequeue on EntryList|cxq.
+ ObjectWaiter * nxt ;
+ for (;;) {
+ node._next = nxt = _cxq ;
+ if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
+
+ // Interference - the CAS failed because _cxq changed. Just retry.
+ // As an optional optimization we retry the lock.
+ if (TryLock (Self) > 0) {
+ assert (_succ != Self , "invariant") ;
+ assert (_owner == Self , "invariant") ;
+ assert (_Responsible != Self , "invariant") ;
+ return ;
+ }
+ }
+
+ // Check for cxq|EntryList edge transition to non-null. This indicates
+ // the onset of contention. While contention persists exiting threads
+ // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit
+ // operations revert to the faster 1-0 mode. This enter operation may interleave
+ // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
+ // arrange for one of the contending thread to use a timed park() operations
+ // to detect and recover from the race. (Stranding is form of progress failure
+ // where the monitor is unlocked but all the contending threads remain parked).
+ // That is, at least one of the contended threads will periodically poll _owner.
+ // One of the contending threads will become the designated "Responsible" thread.
+ // The Responsible thread uses a timed park instead of a normal indefinite park
+ // operation -- it periodically wakes and checks for and recovers from potential
+ // strandings admitted by 1-0 exit operations. We need at most one Responsible
+ // thread per-monitor at any given moment. Only threads on cxq|EntryList may
+ // be responsible for a monitor.
+ //
+ // Currently, one of the contended threads takes on the added role of "Responsible".
+ // A viable alternative would be to use a dedicated "stranding checker" thread
+ // that periodically iterated over all the threads (or active monitors) and unparked
+ // successors where there was risk of stranding. This would help eliminate the
+ // timer scalability issues we see on some platforms as we'd only have one thread
+ // -- the checker -- parked on a timer.
+
+ if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
+ // Try to assume the role of responsible thread for the monitor.
+ // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
+ Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
+ }
+
+ // The lock have been released while this thread was occupied queueing
+ // itself onto _cxq. To close the race and avoid "stranding" and
+ // progress-liveness failure we must resample-retry _owner before parking.
+ // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
+ // In this case the ST-MEMBAR is accomplished with CAS().
+ //
+ // TODO: Defer all thread state transitions until park-time.
+ // Since state transitions are heavy and inefficient we'd like
+ // to defer the state transitions until absolutely necessary,
+ // and in doing so avoid some transitions ...
+
+ TEVENT (Inflated enter - Contention) ;
+ int nWakeups = 0 ;
+ int RecheckInterval = 1 ;
+
+ for (;;) {
+
+ if (TryLock (Self) > 0) break ;
+ assert (_owner != Self, "invariant") ;
+
+ if ((SyncFlags & 2) && _Responsible == NULL) {
+ Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
+ }
+
+ // park self
+ if (_Responsible == Self || (SyncFlags & 1)) {
+ TEVENT (Inflated enter - park TIMED) ;
+ Self->_ParkEvent->park ((jlong) RecheckInterval) ;
+ // Increase the RecheckInterval, but clamp the value.
+ RecheckInterval *= 8 ;
+ if (RecheckInterval > 1000) RecheckInterval = 1000 ;
+ } else {
+ TEVENT (Inflated enter - park UNTIMED) ;
+ Self->_ParkEvent->park() ;
+ }
+
+ if (TryLock(Self) > 0) break ;
+
+ // The lock is still contested.
+ // Keep a tally of the # of futile wakeups.
+ // Note that the counter is not protected by a lock or updated by atomics.
+ // That is by design - we trade "lossy" counters which are exposed to
+ // races during updates for a lower probe effect.
+ TEVENT (Inflated enter - Futile wakeup) ;
+ if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
+ ObjectSynchronizer::_sync_FutileWakeups->inc() ;
+ }
+ ++ nWakeups ;
+
+ // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
+ // We can defer clearing _succ until after the spin completes
+ // TrySpin() must tolerate being called with _succ == Self.
+ // Try yet another round of adaptive spinning.
+ if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
+
+ // We can find that we were unpark()ed and redesignated _succ while
+ // we were spinning. That's harmless. If we iterate and call park(),
+ // park() will consume the event and return immediately and we'll
+ // just spin again. This pattern can repeat, leaving _succ to simply
+ // spin on a CPU. Enable Knob_ResetEvent to clear pending unparks().
+ // Alternately, we can sample fired() here, and if set, forgo spinning
+ // in the next iteration.
+
+ if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
+ Self->_ParkEvent->reset() ;
+ OrderAccess::fence() ;
+ }
+ if (_succ == Self) _succ = NULL ;
+
+ // Invariant: after clearing _succ a thread *must* retry _owner before parking.
+ OrderAccess::fence() ;
+ }
+
+ // Egress :
+ // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
+ // Normally we'll find Self on the EntryList .
+ // From the perspective of the lock owner (this thread), the
+ // EntryList is stable and cxq is prepend-only.
+ // The head of cxq is volatile but the interior is stable.
+ // In addition, Self.TState is stable.
+
+ assert (_owner == Self , "invariant") ;
+ assert (object() != NULL , "invariant") ;
+ // I'd like to write:
+ // guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+ // but as we're at a safepoint that's not safe.
+
+ UnlinkAfterAcquire (Self, &node) ;
+ if (_succ == Self) _succ = NULL ;
+
+ assert (_succ != Self, "invariant") ;
+ if (_Responsible == Self) {
+ _Responsible = NULL ;
+ // Dekker pivot-point.
+ // Consider OrderAccess::storeload() here
+
+ // We may leave threads on cxq|EntryList without a designated
+ // "Responsible" thread. This is benign. When this thread subsequently
+ // exits the monitor it can "see" such preexisting "old" threads --
+ // threads that arrived on the cxq|EntryList before the fence, above --
+ // by LDing cxq|EntryList. Newly arrived threads -- that is, threads
+ // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
+ // non-null and elect a new "Responsible" timer thread.
+ //
+ // This thread executes:
+ // ST Responsible=null; MEMBAR (in enter epilog - here)
+ // LD cxq|EntryList (in subsequent exit)
+ //
+ // Entering threads in the slow/contended path execute:
+ // ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
+ // The (ST cxq; MEMBAR) is accomplished with CAS().
+ //
+ // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
+ // exit operation from floating above the ST Responsible=null.
+ //
+ // In *practice* however, EnterI() is always followed by some atomic
+ // operation such as the decrement of _count in ::enter(). Those atomics
+ // obviate the need for the explicit MEMBAR, above.
+ }
+
+ // We've acquired ownership with CAS().
+ // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
+ // But since the CAS() this thread may have also stored into _succ,
+ // EntryList, cxq or Responsible. These meta-data updates must be
+ // visible __before this thread subsequently drops the lock.
+ // Consider what could occur if we didn't enforce this constraint --
+ // STs to monitor meta-data and user-data could reorder with (become
+ // visible after) the ST in exit that drops ownership of the lock.
+ // Some other thread could then acquire the lock, but observe inconsistent
+ // or old monitor meta-data and heap data. That violates the JMM.
+ // To that end, the 1-0 exit() operation must have at least STST|LDST
+ // "release" barrier semantics. Specifically, there must be at least a
+ // STST|LDST barrier in exit() before the ST of null into _owner that drops
+ // the lock. The barrier ensures that changes to monitor meta-data and data
+ // protected by the lock will be visible before we release the lock, and
+ // therefore before some other thread (CPU) has a chance to acquire the lock.
+ // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
+ //
+ // Critically, any prior STs to _succ or EntryList must be visible before
+ // the ST of null into _owner in the *subsequent* (following) corresponding
+ // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
+ // execute a serializing instruction.
+
+ if (SyncFlags & 8) {
+ OrderAccess::fence() ;
+ }
+ return ;
+}
+
+// ExitSuspendEquivalent:
+// A faster alternate to handle_special_suspend_equivalent_condition()
+//
+// handle_special_suspend_equivalent_condition() unconditionally
+// acquires the SR_lock. On some platforms uncontended MutexLocker()
+// operations have high latency. Note that in ::enter() we call HSSEC
+// while holding the monitor, so we effectively lengthen the critical sections.
+//
+// There are a number of possible solutions:
+//
+// A. To ameliorate the problem we might also defer state transitions
+// to as late as possible -- just prior to parking.
+// Given that, we'd call HSSEC after having returned from park(),
+// but before attempting to acquire the monitor. This is only a
+// partial solution. It avoids calling HSSEC while holding the
+// monitor (good), but it still increases successor reacquisition latency --
+// the interval between unparking a successor and the time the successor
+// resumes and retries the lock. See ReenterI(), which defers state transitions.
+// If we use this technique we can also avoid EnterI()-exit() loop
+// in ::enter() where we iteratively drop the lock and then attempt
+// to reacquire it after suspending.
+//
+// B. In the future we might fold all the suspend bits into a
+// composite per-thread suspend flag and then update it with CAS().
+// Alternately, a Dekker-like mechanism with multiple variables
+// would suffice:
+// ST Self->_suspend_equivalent = false
+// MEMBAR
+// LD Self_>_suspend_flags
+//
+
+
+bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
+ int Mode = Knob_FastHSSEC ;
+ if (Mode && !jSelf->is_external_suspend()) {
+ assert (jSelf->is_suspend_equivalent(), "invariant") ;
+ jSelf->clear_suspend_equivalent() ;
+ if (2 == Mode) OrderAccess::storeload() ;
+ if (!jSelf->is_external_suspend()) return false ;
+ // We raced a suspension -- fall thru into the slow path
+ TEVENT (ExitSuspendEquivalent - raced) ;
+ jSelf->set_suspend_equivalent() ;
+ }
+ return jSelf->handle_special_suspend_equivalent_condition() ;
+}
+
+
+// ReenterI() is a specialized inline form of the latter half of the
+// contended slow-path from EnterI(). We use ReenterI() only for
+// monitor reentry in wait().
+//
+// In the future we should reconcile EnterI() and ReenterI(), adding
+// Knob_Reset and Knob_SpinAfterFutile support and restructuring the
+// loop accordingly.
+
+void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
+ assert (Self != NULL , "invariant") ;
+ assert (SelfNode != NULL , "invariant") ;
+ assert (SelfNode->_thread == Self , "invariant") ;
+ assert (_waiters > 0 , "invariant") ;
+ assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
+ assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
+ JavaThread * jt = (JavaThread *) Self ;
+
+ int nWakeups = 0 ;
+ for (;;) {
+ ObjectWaiter::TStates v = SelfNode->TState ;
+ guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
+ assert (_owner != Self, "invariant") ;
+
+ if (TryLock (Self) > 0) break ;
+ if (TrySpin (Self) > 0) break ;
+
+ TEVENT (Wait Reentry - parking) ;
+
+ // State transition wrappers around park() ...
+ // ReenterI() wisely defers state transitions until
+ // it's clear we must park the thread.
+ {
+ OSThreadContendState osts(Self->osthread());
+ ThreadBlockInVM tbivm(jt);
+
+ // cleared by handle_special_suspend_equivalent_condition()
+ // or java_suspend_self()
+ jt->set_suspend_equivalent();
+ if (SyncFlags & 1) {
+ Self->_ParkEvent->park ((jlong)1000) ;
+ } else {
+ Self->_ParkEvent->park () ;
+ }
+
+ // were we externally suspended while we were waiting?
+ for (;;) {
+ if (!ExitSuspendEquivalent (jt)) break ;
+ if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
+ jt->java_suspend_self();
+ jt->set_suspend_equivalent();
+ }
+ }
+
+ // Try again, but just so we distinguish between futile wakeups and
+ // successful wakeups. The following test isn't algorithmically
+ // necessary, but it helps us maintain sensible statistics.
+ if (TryLock(Self) > 0) break ;
+
+ // The lock is still contested.
+ // Keep a tally of the # of futile wakeups.
+ // Note that the counter is not protected by a lock or updated by atomics.
+ // That is by design - we trade "lossy" counters which are exposed to
+ // races during updates for a lower probe effect.
+ TEVENT (Wait Reentry - futile wakeup) ;
+ ++ nWakeups ;
+
+ // Assuming this is not a spurious wakeup we'll normally
+ // find that _succ == Self.
+ if (_succ == Self) _succ = NULL ;
+
+ // Invariant: after clearing _succ a contending thread
+ // *must* retry _owner before parking.
+ OrderAccess::fence() ;
+
+ if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
+ ObjectSynchronizer::_sync_FutileWakeups->inc() ;
+ }
+ }
+
+ // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
+ // Normally we'll find Self on the EntryList.
+ // Unlinking from the EntryList is constant-time and atomic-free.
+ // From the perspective of the lock owner (this thread), the
+ // EntryList is stable and cxq is prepend-only.
+ // The head of cxq is volatile but the interior is stable.
+ // In addition, Self.TState is stable.
+
+ assert (_owner == Self, "invariant") ;
+ assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+ UnlinkAfterAcquire (Self, SelfNode) ;
+ if (_succ == Self) _succ = NULL ;
+ assert (_succ != Self, "invariant") ;
+ SelfNode->TState = ObjectWaiter::TS_RUN ;
+ OrderAccess::fence() ; // see comments at the end of EnterI()
+}
+
+bool ObjectMonitor::try_enter(Thread* THREAD) {
+ if (THREAD != _owner) {
+ if (THREAD->is_lock_owned ((address)_owner)) {
+ assert(_recursions == 0, "internal state error");
+ _owner = THREAD ;
+ _recursions = 1 ;
+ OwnerIsThread = 1 ;
+ return true;
+ }
+ if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+ return false;
+ }
+ return true;
+ } else {
+ _recursions++;
+ return true;
+ }
+}
+
+void ATTR ObjectMonitor::enter(TRAPS) {
+ // The following code is ordered to check the most common cases first
+ // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
+ Thread * const Self = THREAD ;
+ void * cur ;
+
+ cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
+ if (cur == NULL) {
+ // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
+ assert (_recursions == 0 , "invariant") ;
+ assert (_owner == Self, "invariant") ;
+ // CONSIDER: set or assert OwnerIsThread == 1
+ return ;
+ }
+
+ if (cur == Self) {
+ // TODO-FIXME: check for integer overflow! BUGID 6557169.
+ _recursions ++ ;
+ return ;
+ }
+
+ if (Self->is_lock_owned ((address)cur)) {
+ assert (_recursions == 0, "internal state error");
+ _recursions = 1 ;
+ // Commute owner from a thread-specific on-stack BasicLockObject address to
+ // a full-fledged "Thread *".
+ _owner = Self ;
+ OwnerIsThread = 1 ;
+ return ;
+ }
+
+ // We've encountered genuine contention.
+ assert (Self->_Stalled == 0, "invariant") ;
+ Self->_Stalled = intptr_t(this) ;
+
+ // Try one round of spinning *before* enqueueing Self
+ // and before going through the awkward and expensive state
+ // transitions. The following spin is strictly optional ...
+ // Note that if we acquire the monitor from an initial spin
+ // we forgo posting JVMTI events and firing DTRACE probes.
+ if (Knob_SpinEarly && TrySpin (Self) > 0) {
+ assert (_owner == Self , "invariant") ;
+ assert (_recursions == 0 , "invariant") ;
+ assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+ Self->_Stalled = 0 ;
+ return ;
+ }
+
+ assert (_owner != Self , "invariant") ;
+ assert (_succ != Self , "invariant") ;
+ assert (Self->is_Java_thread() , "invariant") ;
+ JavaThread * jt = (JavaThread *) Self ;
+ assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
+ assert (jt->thread_state() != _thread_blocked , "invariant") ;
+ assert (this->object() != NULL , "invariant") ;
+ assert (_count >= 0, "invariant") ;
+
+ // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy().
+ // Ensure the object-monitor relationship remains stable while there's contention.
+ Atomic::inc_ptr(&_count);
+
+ { // Change java thread status to indicate blocked on monitor enter.
+ JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
+
+ DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
+ if (JvmtiExport::should_post_monitor_contended_enter()) {
+ JvmtiExport::post_monitor_contended_enter(jt, this);
+ }
+
+ OSThreadContendState osts(Self->osthread());
+ ThreadBlockInVM tbivm(jt);
+
+ Self->set_current_pending_monitor(this);
+
+ // TODO-FIXME: change the following for(;;) loop to straight-line code.
+ for (;;) {
+ jt->set_suspend_equivalent();
+ // cleared by handle_special_suspend_equivalent_condition()
+ // or java_suspend_self()
+
+ EnterI (THREAD) ;
+
+ if (!ExitSuspendEquivalent(jt)) break ;
+
+ //
+ // We have acquired the contended monitor, but while we were
+ // waiting another thread suspended us. We don't want to enter
+ // the monitor while suspended because that would surprise the
+ // thread that suspended us.
+ //
+ _recursions = 0 ;
+ _succ = NULL ;
+ exit (Self) ;
+
+ jt->java_suspend_self();
+ }
+ Self->set_current_pending_monitor(NULL);
+ }
+
+ Atomic::dec_ptr(&_count);
+ assert (_count >= 0, "invariant") ;
+ Self->_Stalled = 0 ;
+
+ // Must either set _recursions = 0 or ASSERT _recursions == 0.
+ assert (_recursions == 0 , "invariant") ;
+ assert (_owner == Self , "invariant") ;
+ assert (_succ != Self , "invariant") ;
+ assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+
+ // The thread -- now the owner -- is back in vm mode.
+ // Report the glorious news via TI,DTrace and jvmstat.
+ // The probe effect is non-trivial. All the reportage occurs
+ // while we hold the monitor, increasing the length of the critical
+ // section. Amdahl's parallel speedup law comes vividly into play.
+ //
+ // Another option might be to aggregate the events (thread local or
+ // per-monitor aggregation) and defer reporting until a more opportune
+ // time -- such as next time some thread encounters contention but has
+ // yet to acquire the lock. While spinning that thread could
+ // spinning we could increment JVMStat counters, etc.
+
+ DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
+ if (JvmtiExport::should_post_monitor_contended_entered()) {
+ JvmtiExport::post_monitor_contended_entered(jt, this);
+ }
+ if (ObjectSynchronizer::_sync_ContendedLockAttempts != NULL) {
+ ObjectSynchronizer::_sync_ContendedLockAttempts->inc() ;
+ }
+}
+
+void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
+ assert (_owner == Self, "invariant") ;
+
+ // Exit protocol:
+ // 1. ST _succ = wakee
+ // 2. membar #loadstore|#storestore;
+ // 2. ST _owner = NULL
+ // 3. unpark(wakee)
+
+ _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
+ ParkEvent * Trigger = Wakee->_event ;
+
+ // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
+ // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
+ // out-of-scope (non-extant).
+ Wakee = NULL ;
+
+ // Drop the lock
+ OrderAccess::release_store_ptr (&_owner, NULL) ;
+ OrderAccess::fence() ; // ST _owner vs LD in unpark()
+
+ // TODO-FIXME:
+ // If there's a safepoint pending the best policy would be to
+ // get _this thread to a safepoint and only wake the successor
+ // after the safepoint completed. monitorexit uses a "leaf"
+ // state transition, however, so this thread can't become
+ // safe at this point in time. (Its stack isn't walkable).
+ // The next best thing is to defer waking the successor by
+ // adding to a list of thread to be unparked after at the
+ // end of the forthcoming STW).
+ if (SafepointSynchronize::do_call_back()) {
+ TEVENT (unpark before SAFEPOINT) ;
+ }
+
+ // Possible optimizations ...
+ //
+ // * Consider: set Wakee->UnparkTime = timeNow()
+ // When the thread wakes up it'll compute (timeNow() - Self->UnparkTime()).
+ // By measuring recent ONPROC latency we can approximate the
+ // system load. In turn, we can feed that information back
+ // into the spinning & succession policies.
+ // (ONPROC latency correlates strongly with load).
+ //
+ // * Pull affinity:
+ // If the wakee is cold then transiently setting it's affinity
+ // to the current CPU is a good idea.
+ // See http://j2se.east/~dice/PERSIST/050624-PullAffinity.txt
+ Trigger->unpark() ;
+
+ // Maintain stats and report events to JVMTI
+ if (ObjectSynchronizer::_sync_Parks != NULL) {
+ ObjectSynchronizer::_sync_Parks->inc() ;
+ }
+ DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
+}
+
+
+// exit()
+// ~~~~~~
+// Note that the collector can't reclaim the objectMonitor or deflate
+// the object out from underneath the thread calling ::exit() as the
+// thread calling ::exit() never transitions to a stable state.
+// This inhibits GC, which in turn inhibits asynchronous (and
+// inopportune) reclamation of "this".
+//
+// We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
+// There's one exception to the claim above, however. EnterI() can call
+// exit() to drop a lock if the acquirer has been externally suspended.
+// In that case exit() is called with _thread_state as _thread_blocked,
+// but the monitor's _count field is > 0, which inhibits reclamation.
+//
+// 1-0 exit
+// ~~~~~~~~
+// ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
+// the fast-path operators have been optimized so the common ::exit()
+// operation is 1-0. See i486.ad fast_unlock(), for instance.
+// The code emitted by fast_unlock() elides the usual MEMBAR. This
+// greatly improves latency -- MEMBAR and CAS having considerable local
+// latency on modern processors -- but at the cost of "stranding". Absent the
+// MEMBAR, a thread in fast_unlock() can race a thread in the slow
+// ::enter() path, resulting in the entering thread being stranding
+// and a progress-liveness failure. Stranding is extremely rare.
+// We use timers (timed park operations) & periodic polling to detect
+// and recover from stranding. Potentially stranded threads periodically
+// wake up and poll the lock. See the usage of the _Responsible variable.
+//
+// The CAS() in enter provides for safety and exclusion, while the CAS or
+// MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
+// eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
+// We detect and recover from stranding with timers.
+//
+// If a thread transiently strands it'll park until (a) another
+// thread acquires the lock and then drops the lock, at which time the
+// exiting thread will notice and unpark the stranded thread, or, (b)
+// the timer expires. If the lock is high traffic then the stranding latency
+// will be low due to (a). If the lock is low traffic then the odds of
+// stranding are lower, although the worst-case stranding latency
+// is longer. Critically, we don't want to put excessive load in the
+// platform's timer subsystem. We want to minimize both the timer injection
+// rate (timers created/sec) as well as the number of timers active at
+// any one time. (more precisely, we want to minimize timer-seconds, which is
+// the integral of the # of active timers at any instant over time).
+// Both impinge on OS scalability. Given that, at most one thread parked on
+// a monitor will use a timer.
+
+void ATTR ObjectMonitor::exit(TRAPS) {
+ Thread * Self = THREAD ;
+ if (THREAD != _owner) {
+ if (THREAD->is_lock_owned((address) _owner)) {
+ // Transmute _owner from a BasicLock pointer to a Thread address.
+ // We don't need to hold _mutex for this transition.
+ // Non-null to Non-null is safe as long as all readers can
+ // tolerate either flavor.
+ assert (_recursions == 0, "invariant") ;
+ _owner = THREAD ;
+ _recursions = 0 ;
+ OwnerIsThread = 1 ;
+ } else {
+ // NOTE: we need to handle unbalanced monitor enter/exit
+ // in native code by throwing an exception.
+ // TODO: Throw an IllegalMonitorStateException ?
+ TEVENT (Exit - Throw IMSX) ;
+ assert(false, "Non-balanced monitor enter/exit!");
+ if (false) {
+ THROW(vmSymbols::java_lang_IllegalMonitorStateException());
+ }
+ return;
+ }
+ }
+
+ if (_recursions != 0) {
+ _recursions--; // this is simple recursive enter
+ TEVENT (Inflated exit - recursive) ;
+ return ;
+ }
+
+ // Invariant: after setting Responsible=null an thread must execute
+ // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
+ if ((SyncFlags & 4) == 0) {
+ _Responsible = NULL ;
+ }
+
+ for (;;) {
+ assert (THREAD == _owner, "invariant") ;
+
+ // Fast-path monitor exit:
+ //
+ // Observe the Dekker/Lamport duality:
+ // A thread in ::exit() executes:
+ // ST Owner=null; MEMBAR; LD EntryList|cxq.
+ // A thread in the contended ::enter() path executes the complementary:
+ // ST EntryList|cxq = nonnull; MEMBAR; LD Owner.
+ //
+ // Note that there's a benign race in the exit path. We can drop the
+ // lock, another thread can reacquire the lock immediately, and we can
+ // then wake a thread unnecessarily (yet another flavor of futile wakeup).
+ // This is benign, and we've structured the code so the windows are short
+ // and the frequency of such futile wakeups is low.
+ //
+ // We could eliminate the race by encoding both the "LOCKED" state and
+ // the queue head in a single word. Exit would then use either CAS to
+ // clear the LOCKED bit/byte. This precludes the desirable 1-0 optimization,
+ // however.
+ //
+ // Possible fast-path ::exit() optimization:
+ // The current fast-path exit implementation fetches both cxq and EntryList.
+ // See also i486.ad fast_unlock(). Testing has shown that two LDs
+ // isn't measurably slower than a single LD on any platforms.
+ // Still, we could reduce the 2 LDs to one or zero by one of the following:
+ //
+ // - Use _count instead of cxq|EntryList
+ // We intend to eliminate _count, however, when we switch
+ // to on-the-fly deflation in ::exit() as is used in
+ // Metalocks and RelaxedLocks.
+ //
+ // - Establish the invariant that cxq == null implies EntryList == null.
+ // set cxq == EMPTY (1) to encode the state where cxq is empty
+ // by EntryList != null. EMPTY is a distinguished value.
+ // The fast-path exit() would fetch cxq but not EntryList.
+ //
+ // - Encode succ as follows:
+ // succ = t : Thread t is the successor -- t is ready or is spinning.
+ // Exiting thread does not need to wake a successor.
+ // succ = 0 : No successor required -> (EntryList|cxq) == null
+ // Exiting thread does not need to wake a successor
+ // succ = 1 : Successor required -> (EntryList|cxq) != null and
+ // logically succ == null.
+ // Exiting thread must wake a successor.
+ //
+ // The 1-1 fast-exit path would appear as :
+ // _owner = null ; membar ;
+ // if (_succ == 1 && CAS (&_owner, null, Self) == null) goto SlowPath
+ // goto FastPathDone ;
+ //
+ // and the 1-0 fast-exit path would appear as:
+ // if (_succ == 1) goto SlowPath
+ // Owner = null ;
+ // goto FastPathDone
+ //
+ // - Encode the LSB of _owner as 1 to indicate that exit()
+ // must use the slow-path and make a successor ready.
+ // (_owner & 1) == 0 IFF succ != null || (EntryList|cxq) == null
+ // (_owner & 1) == 0 IFF succ == null && (EntryList|cxq) != null (obviously)
+ // The 1-0 fast exit path would read:
+ // if (_owner != Self) goto SlowPath
+ // _owner = null
+ // goto FastPathDone
+
+ if (Knob_ExitPolicy == 0) {
+ // release semantics: prior loads and stores from within the critical section
+ // must not float (reorder) past the following store that drops the lock.
+ // On SPARC that requires MEMBAR #loadstore|#storestore.
+ // But of course in TSO #loadstore|#storestore is not required.
+ // I'd like to write one of the following:
+ // A. OrderAccess::release() ; _owner = NULL
+ // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
+ // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
+ // store into a _dummy variable. That store is not needed, but can result
+ // in massive wasteful coherency traffic on classic SMP systems.
+ // Instead, I use release_store(), which is implemented as just a simple
+ // ST on x64, x86 and SPARC.
+ OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock
+ OrderAccess::storeload() ; // See if we need to wake a successor
+ if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
+ TEVENT (Inflated exit - simple egress) ;
+ return ;
+ }
+ TEVENT (Inflated exit - complex egress) ;
+
+ // Normally the exiting thread is responsible for ensuring succession,
+ // but if other successors are ready or other entering threads are spinning
+ // then this thread can simply store NULL into _owner and exit without
+ // waking a successor. The existence of spinners or ready successors
+ // guarantees proper succession (liveness). Responsibility passes to the
+ // ready or running successors. The exiting thread delegates the duty.
+ // More precisely, if a successor already exists this thread is absolved
+ // of the responsibility of waking (unparking) one.
+ //
+ // The _succ variable is critical to reducing futile wakeup frequency.
+ // _succ identifies the "heir presumptive" thread that has been made
+ // ready (unparked) but that has not yet run. We need only one such
+ // successor thread to guarantee progress.
+ // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
+ // section 3.3 "Futile Wakeup Throttling" for details.
+ //
+ // Note that spinners in Enter() also set _succ non-null.
+ // In the current implementation spinners opportunistically set
+ // _succ so that exiting threads might avoid waking a successor.
+ // Another less appealing alternative would be for the exiting thread
+ // to drop the lock and then spin briefly to see if a spinner managed
+ // to acquire the lock. If so, the exiting thread could exit
+ // immediately without waking a successor, otherwise the exiting
+ // thread would need to dequeue and wake a successor.
+ // (Note that we'd need to make the post-drop spin short, but no
+ // shorter than the worst-case round-trip cache-line migration time.
+ // The dropped lock needs to become visible to the spinner, and then
+ // the acquisition of the lock by the spinner must become visible to
+ // the exiting thread).
+ //
+
+ // It appears that an heir-presumptive (successor) must be made ready.
+ // Only the current lock owner can manipulate the EntryList or
+ // drain _cxq, so we need to reacquire the lock. If we fail
+ // to reacquire the lock the responsibility for ensuring succession
+ // falls to the new owner.
+ //
+ if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+ return ;
+ }
+ TEVENT (Exit - Reacquired) ;
+ } else {
+ if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
+ OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock
+ OrderAccess::storeload() ;
+ // Ratify the previously observed values.
+ if (_cxq == NULL || _succ != NULL) {
+ TEVENT (Inflated exit - simple egress) ;
+ return ;
+ }
+
+ // inopportune interleaving -- the exiting thread (this thread)
+ // in the fast-exit path raced an entering thread in the slow-enter
+ // path.
+ // We have two choices:
+ // A. Try to reacquire the lock.
+ // If the CAS() fails return immediately, otherwise
+ // we either restart/rerun the exit operation, or simply
+ // fall-through into the code below which wakes a successor.
+ // B. If the elements forming the EntryList|cxq are TSM
+ // we could simply unpark() the lead thread and return
+ // without having set _succ.
+ if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
+ TEVENT (Inflated exit - reacquired succeeded) ;
+ return ;
+ }
+ TEVENT (Inflated exit - reacquired failed) ;
+ } else {
+ TEVENT (Inflated exit - complex egress) ;
+ }
+ }
+
+ guarantee (_owner == THREAD, "invariant") ;
+
+ // Select an appropriate successor ("heir presumptive") from the EntryList
+ // and make it ready. Generally we just wake the head of EntryList .
+ // There's no algorithmic constraint that we use the head - it's just
+ // a policy decision. Note that the thread at head of the EntryList
+ // remains at the head until it acquires the lock. This means we'll
+ // repeatedly wake the same thread until it manages to grab the lock.
+ // This is generally a good policy - if we're seeing lots of futile wakeups
+ // at least we're waking/rewaking a thread that's like to be hot or warm
+ // (have residual D$ and TLB affinity).
+ //
+ // "Wakeup locality" optimization:
+ // http://j2se.east/~dice/PERSIST/040825-WakeLocality.txt
+ // In the future we'll try to bias the selection mechanism
+ // to preferentially pick a thread that recently ran on
+ // a processor element that shares cache with the CPU on which
+ // the exiting thread is running. We need access to Solaris'
+ // schedctl.sc_cpu to make that work.
+ //
+ ObjectWaiter * w = NULL ;
+ int QMode = Knob_QMode ;
+
+ if (QMode == 2 && _cxq != NULL) {
+ // QMode == 2 : cxq has precedence over EntryList.
+ // Try to directly wake a successor from the cxq.
+ // If successful, the successor will need to unlink itself from cxq.
+ w = _cxq ;
+ assert (w != NULL, "invariant") ;
+ assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
+ ExitEpilog (Self, w) ;
+ return ;
+ }
+
+ if (QMode == 3 && _cxq != NULL) {
+ // Aggressively drain cxq into EntryList at the first opportunity.
+ // This policy ensure that recently-run threads live at the head of EntryList.
+ // Drain _cxq into EntryList - bulk transfer.
+ // First, detach _cxq.
+ // The following loop is tantamount to: w = swap (&cxq, NULL)
+ w = _cxq ;
+ for (;;) {
+ assert (w != NULL, "Invariant") ;
+ ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
+ if (u == w) break ;
+ w = u ;
+ }
+ assert (w != NULL , "invariant") ;
+
+ ObjectWaiter * q = NULL ;
+ ObjectWaiter * p ;
+ for (p = w ; p != NULL ; p = p->_next) {
+ guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
+ p->TState = ObjectWaiter::TS_ENTER ;
+ p->_prev = q ;
+ q = p ;
+ }
+
+ // Append the RATs to the EntryList
+ // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
+ ObjectWaiter * Tail ;
+ for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
+ if (Tail == NULL) {
+ _EntryList = w ;
+ } else {
+ Tail->_next = w ;
+ w->_prev = Tail ;
+ }
+
+ // Fall thru into code that tries to wake a successor from EntryList
+ }
+
+ if (QMode == 4 && _cxq != NULL) {
+ // Aggressively drain cxq into EntryList at the first opportunity.
+ // This policy ensure that recently-run threads live at the head of EntryList.
+
+ // Drain _cxq into EntryList - bulk transfer.
+ // First, detach _cxq.
+ // The following loop is tantamount to: w = swap (&cxq, NULL)
+ w = _cxq ;
+ for (;;) {
+ assert (w != NULL, "Invariant") ;
+ ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
+ if (u == w) break ;
+ w = u ;
+ }
+ assert (w != NULL , "invariant") ;
+
+ ObjectWaiter * q = NULL ;
+ ObjectWaiter * p ;
+ for (p = w ; p != NULL ; p = p->_next) {
+ guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
+ p->TState = ObjectWaiter::TS_ENTER ;
+ p->_prev = q ;
+ q = p ;
+ }
+
+ // Prepend the RATs to the EntryList
+ if (_EntryList != NULL) {
+ q->_next = _EntryList ;
+ _EntryList->_prev = q ;
+ }
+ _EntryList = w ;
+
+ // Fall thru into code that tries to wake a successor from EntryList
+ }
+
+ w = _EntryList ;
+ if (w != NULL) {
+ // I'd like to write: guarantee (w->_thread != Self).
+ // But in practice an exiting thread may find itself on the EntryList.
+ // Lets say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and
+ // then calls exit(). Exit release the lock by setting O._owner to NULL.
+ // Lets say T1 then stalls. T2 acquires O and calls O.notify(). The
+ // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
+ // release the lock "O". T2 resumes immediately after the ST of null into
+ // _owner, above. T2 notices that the EntryList is populated, so it
+ // reacquires the lock and then finds itself on the EntryList.
+ // Given all that, we have to tolerate the circumstance where "w" is
+ // associated with Self.
+ assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+ ExitEpilog (Self, w) ;
+ return ;
+ }
+
+ // If we find that both _cxq and EntryList are null then just
+ // re-run the exit protocol from the top.
+ w = _cxq ;
+ if (w == NULL) continue ;
+
+ // Drain _cxq into EntryList - bulk transfer.
+ // First, detach _cxq.
+ // The following loop is tantamount to: w = swap (&cxq, NULL)
+ for (;;) {
+ assert (w != NULL, "Invariant") ;
+ ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
+ if (u == w) break ;
+ w = u ;
+ }
+ TEVENT (Inflated exit - drain cxq into EntryList) ;
+
+ assert (w != NULL , "invariant") ;
+ assert (_EntryList == NULL , "invariant") ;
+
+ // Convert the LIFO SLL anchored by _cxq into a DLL.
+ // The list reorganization step operates in O(LENGTH(w)) time.
+ // It's critical that this step operate quickly as
+ // "Self" still holds the outer-lock, restricting parallelism
+ // and effectively lengthening the critical section.
+ // Invariant: s chases t chases u.
+ // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
+ // we have faster access to the tail.
+
+ if (QMode == 1) {
+ // QMode == 1 : drain cxq to EntryList, reversing order
+ // We also reverse the order of the list.
+ ObjectWaiter * s = NULL ;
+ ObjectWaiter * t = w ;
+ ObjectWaiter * u = NULL ;
+ while (t != NULL) {
+ guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
+ t->TState = ObjectWaiter::TS_ENTER ;
+ u = t->_next ;
+ t->_prev = u ;
+ t->_next = s ;
+ s = t;
+ t = u ;
+ }
+ _EntryList = s ;
+ assert (s != NULL, "invariant") ;
+ } else {
+ // QMode == 0 or QMode == 2
+ _EntryList = w ;
+ ObjectWaiter * q = NULL ;
+ ObjectWaiter * p ;
+ for (p = w ; p != NULL ; p = p->_next) {
+ guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
+ p->TState = ObjectWaiter::TS_ENTER ;
+ p->_prev = q ;
+ q = p ;
+ }
+ }
+
+ // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
+ // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
+
+ // See if we can abdicate to a spinner instead of waking a thread.
+ // A primary goal of the implementation is to reduce the
+ // context-switch rate.
+ if (_succ != NULL) continue;
+
+ w = _EntryList ;
+ if (w != NULL) {
+ guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+ ExitEpilog (Self, w) ;
+ return ;
+ }
+ }
+}
+// complete_exit exits a lock returning recursion count
+// complete_exit/reenter operate as a wait without waiting
+// complete_exit requires an inflated monitor
+// The _owner field is not always the Thread addr even with an
+// inflated monitor, e.g. the monitor can be inflated by a non-owning
+// thread due to contention.
+intptr_t ObjectMonitor::complete_exit(TRAPS) {
+ Thread * const Self = THREAD;
+ assert(Self->is_Java_thread(), "Must be Java thread!");
+ JavaThread *jt = (JavaThread *)THREAD;
+
+ DeferredInitialize();
+
+ if (THREAD != _owner) {
+ if (THREAD->is_lock_owned ((address)_owner)) {
+ assert(_recursions == 0, "internal state error");
+ _owner = THREAD ; /* Convert from basiclock addr to Thread addr */
+ _recursions = 0 ;
+ OwnerIsThread = 1 ;
+ }
+ }
+
+ guarantee(Self == _owner, "complete_exit not owner");
+ intptr_t save = _recursions; // record the old recursion count
+ _recursions = 0; // set the recursion level to be 0
+ exit (Self) ; // exit the monitor
+ guarantee (_owner != Self, "invariant");
+ return save;
+}
+
+// reenter() enters a lock and sets recursion count
+// complete_exit/reenter operate as a wait without waiting
+void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
+ Thread * const Self = THREAD;
+ assert(Self->is_Java_thread(), "Must be Java thread!");
+ JavaThread *jt = (JavaThread *)THREAD;
+
+ guarantee(_owner != Self, "reenter already owner");
+ enter (THREAD); // enter the monitor
+ guarantee (_recursions == 0, "reenter recursion");
+ _recursions = recursions;
+ return;
+}
+
+// Note: a subset of changes to ObjectMonitor::wait()
+// will need to be replicated in complete_exit above
+void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
+ Thread * const Self = THREAD ;
+ assert(Self->is_Java_thread(), "Must be Java thread!");
+ JavaThread *jt = (JavaThread *)THREAD;
+
+ DeferredInitialize () ;
+
+ // Throw IMSX or IEX.
+ CHECK_OWNER();
+
+ // check for a pending interrupt
+ if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
+ // post monitor waited event. Note that this is past-tense, we are done waiting.
+ if (JvmtiExport::should_post_monitor_waited()) {
+ // Note: 'false' parameter is passed here because the
+ // wait was not timed out due to thread interrupt.
+ JvmtiExport::post_monitor_waited(jt, this, false);
+ }
+ TEVENT (Wait - Throw IEX) ;
+ THROW(vmSymbols::java_lang_InterruptedException());
+ return ;
+ }
+ TEVENT (Wait) ;
+
+ assert (Self->_Stalled == 0, "invariant") ;
+ Self->_Stalled = intptr_t(this) ;
+ jt->set_current_waiting_monitor(this);
+
+ // create a node to be put into the queue
+ // Critically, after we reset() the event but prior to park(), we must check
+ // for a pending interrupt.
+ ObjectWaiter node(Self);
+ node.TState = ObjectWaiter::TS_WAIT ;
+ Self->_ParkEvent->reset() ;
+ OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag
+
+ // Enter the waiting queue, which is a circular doubly linked list in this case
+ // but it could be a priority queue or any data structure.
+ // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
+ // by the the owner of the monitor *except* in the case where park()
+ // returns because of a timeout of interrupt. Contention is exceptionally rare
+ // so we use a simple spin-lock instead of a heavier-weight blocking lock.
+
+ Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
+ AddWaiter (&node) ;
+ Thread::SpinRelease (&_WaitSetLock) ;
+
+ if ((SyncFlags & 4) == 0) {
+ _Responsible = NULL ;
+ }
+ intptr_t save = _recursions; // record the old recursion count
+ _waiters++; // increment the number of waiters
+ _recursions = 0; // set the recursion level to be 1
+ exit (Self) ; // exit the monitor
+ guarantee (_owner != Self, "invariant") ;
+
+ // As soon as the ObjectMonitor's ownership is dropped in the exit()
+ // call above, another thread can enter() the ObjectMonitor, do the
+ // notify(), and exit() the ObjectMonitor. If the other thread's
+ // exit() call chooses this thread as the successor and the unpark()
+ // call happens to occur while this thread is posting a
+ // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
+ // handler using RawMonitors and consuming the unpark().
+ //
+ // To avoid the problem, we re-post the event. This does no harm
+ // even if the original unpark() was not consumed because we are the
+ // chosen successor for this monitor.
+ if (node._notified != 0 && _succ == Self) {
+ node._event->unpark();
+ }
+
+ // The thread is on the WaitSet list - now park() it.
+ // On MP systems it's conceivable that a brief spin before we park
+ // could be profitable.
+ //
+ // TODO-FIXME: change the following logic to a loop of the form
+ // while (!timeout && !interrupted && _notified == 0) park()
+
+ int ret = OS_OK ;
+ int WasNotified = 0 ;
+ { // State transition wrappers
+ OSThread* osthread = Self->osthread();
+ OSThreadWaitState osts(osthread, true);
+ {
+ ThreadBlockInVM tbivm(jt);
+ // Thread is in thread_blocked state and oop access is unsafe.
+ jt->set_suspend_equivalent();
+
+ if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
+ // Intentionally empty
+ } else
+ if (node._notified == 0) {
+ if (millis <= 0) {
+ Self->_ParkEvent->park () ;
+ } else {
+ ret = Self->_ParkEvent->park (millis) ;
+ }
+ }
+
+ // were we externally suspended while we were waiting?
+ if (ExitSuspendEquivalent (jt)) {
+ // TODO-FIXME: add -- if succ == Self then succ = null.
+ jt->java_suspend_self();
+ }
+
+ } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
+
+
+ // Node may be on the WaitSet, the EntryList (or cxq), or in transition
+ // from the WaitSet to the EntryList.
+ // See if we need to remove Node from the WaitSet.
+ // We use double-checked locking to avoid grabbing _WaitSetLock
+ // if the thread is not on the wait queue.
+ //
+ // Note that we don't need a fence before the fetch of TState.
+ // In the worst case we'll fetch a old-stale value of TS_WAIT previously
+ // written by the is thread. (perhaps the fetch might even be satisfied
+ // by a look-aside into the processor's own store buffer, although given
+ // the length of the code path between the prior ST and this load that's
+ // highly unlikely). If the following LD fetches a stale TS_WAIT value
+ // then we'll acquire the lock and then re-fetch a fresh TState value.
+ // That is, we fail toward safety.
+
+ if (node.TState == ObjectWaiter::TS_WAIT) {
+ Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
+ if (node.TState == ObjectWaiter::TS_WAIT) {
+ DequeueSpecificWaiter (&node) ; // unlink from WaitSet
+ assert(node._notified == 0, "invariant");
+ node.TState = ObjectWaiter::TS_RUN ;
+ }
+ Thread::SpinRelease (&_WaitSetLock) ;
+ }
+
+ // The thread is now either on off-list (TS_RUN),
+ // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
+ // The Node's TState variable is stable from the perspective of this thread.
+ // No other threads will asynchronously modify TState.
+ guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
+ OrderAccess::loadload() ;
+ if (_succ == Self) _succ = NULL ;
+ WasNotified = node._notified ;
+
+ // Reentry phase -- reacquire the monitor.
+ // re-enter contended monitor after object.wait().
+ // retain OBJECT_WAIT state until re-enter successfully completes
+ // Thread state is thread_in_vm and oop access is again safe,
+ // although the raw address of the object may have changed.
+ // (Don't cache naked oops over safepoints, of course).
+
+ // post monitor waited event. Note that this is past-tense, we are done waiting.
+ if (JvmtiExport::should_post_monitor_waited()) {
+ JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
+ }
+ OrderAccess::fence() ;
+
+ assert (Self->_Stalled != 0, "invariant") ;
+ Self->_Stalled = 0 ;
+
+ assert (_owner != Self, "invariant") ;
+ ObjectWaiter::TStates v = node.TState ;
+ if (v == ObjectWaiter::TS_RUN) {
+ enter (Self) ;
+ } else {
+ guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
+ ReenterI (Self, &node) ;
+ node.wait_reenter_end(this);
+ }
+
+ // Self has reacquired the lock.
+ // Lifecycle - the node representing Self must not appear on any queues.
+ // Node is about to go out-of-scope, but even if it were immortal we wouldn't
+ // want residual elements associated with this thread left on any lists.
+ guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
+ assert (_owner == Self, "invariant") ;
+ assert (_succ != Self , "invariant") ;
+ } // OSThreadWaitState()
+
+ jt->set_current_waiting_monitor(NULL);
+
+ guarantee (_recursions == 0, "invariant") ;
+ _recursions = save; // restore the old recursion count
+ _waiters--; // decrement the number of waiters
+
+ // Verify a few postconditions
+ assert (_owner == Self , "invariant") ;
+ assert (_succ != Self , "invariant") ;
+ assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+
+ if (SyncFlags & 32) {
+ OrderAccess::fence() ;
+ }
+
+ // check if the notification happened
+ if (!WasNotified) {
+ // no, it could be timeout or Thread.interrupt() or both
+ // check for interrupt event, otherwise it is timeout
+ if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
+ TEVENT (Wait - throw IEX from epilog) ;
+ THROW(vmSymbols::java_lang_InterruptedException());
+ }
+ }
+
+ // NOTE: Spurious wake up will be consider as timeout.
+ // Monitor notify has precedence over thread interrupt.
+}
+
+
+// Consider:
+// If the lock is cool (cxq == null && succ == null) and we're on an MP system
+// then instead of transferring a thread from the WaitSet to the EntryList
+// we might just dequeue a thread from the WaitSet and directly unpark() it.
+
+void ObjectMonitor::notify(TRAPS) {
+ CHECK_OWNER();
+ if (_WaitSet == NULL) {
+ TEVENT (Empty-Notify) ;
+ return ;
+ }
+ DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
+
+ int Policy = Knob_MoveNotifyee ;
+
+ Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
+ ObjectWaiter * iterator = DequeueWaiter() ;
+ if (iterator != NULL) {
+ TEVENT (Notify1 - Transfer) ;
+ guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
+ guarantee (iterator->_notified == 0, "invariant") ;
+ // Disposition - what might we do with iterator ?
+ // a. add it directly to the EntryList - either tail or head.
+ // b. push it onto the front of the _cxq.
+ // For now we use (a).
+ if (Policy != 4) {
+ iterator->TState = ObjectWaiter::TS_ENTER ;
+ }
+ iterator->_notified = 1 ;
+
+ ObjectWaiter * List = _EntryList ;
+ if (List != NULL) {
+ assert (List->_prev == NULL, "invariant") ;
+ assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+ assert (List != iterator, "invariant") ;
+ }
+
+ if (Policy == 0) { // prepend to EntryList
+ if (List == NULL) {
+ iterator->_next = iterator->_prev = NULL ;
+ _EntryList = iterator ;
+ } else {
+ List->_prev = iterator ;
+ iterator->_next = List ;
+ iterator->_prev = NULL ;
+ _EntryList = iterator ;
+ }
+ } else
+ if (Policy == 1) { // append to EntryList
+ if (List == NULL) {
+ iterator->_next = iterator->_prev = NULL ;
+ _EntryList = iterator ;
+ } else {
+ // CONSIDER: finding the tail currently requires a linear-time walk of
+ // the EntryList. We can make tail access constant-time by converting to
+ // a CDLL instead of using our current DLL.
+ ObjectWaiter * Tail ;
+ for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
+ assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
+ Tail->_next = iterator ;
+ iterator->_prev = Tail ;
+ iterator->_next = NULL ;
+ }
+ } else
+ if (Policy == 2) { // prepend to cxq
+ // prepend to cxq
+ if (List == NULL) {
+ iterator->_next = iterator->_prev = NULL ;
+ _EntryList = iterator ;
+ } else {
+ iterator->TState = ObjectWaiter::TS_CXQ ;
+ for (;;) {
+ ObjectWaiter * Front = _cxq ;
+ iterator->_next = Front ;
+ if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
+ break ;
+ }
+ }
+ }
+ } else
+ if (Policy == 3) { // append to cxq
+ iterator->TState = ObjectWaiter::TS_CXQ ;
+ for (;;) {
+ ObjectWaiter * Tail ;
+ Tail = _cxq ;
+ if (Tail == NULL) {
+ iterator->_next = NULL ;
+ if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
+ break ;
+ }
+ } else {
+ while (Tail->_next != NULL) Tail = Tail->_next ;
+ Tail->_next = iterator ;
+ iterator->_prev = Tail ;
+ iterator->_next = NULL ;
+ break ;
+ }
+ }
+ } else {
+ ParkEvent * ev = iterator->_event ;
+ iterator->TState = ObjectWaiter::TS_RUN ;
+ OrderAccess::fence() ;
+ ev->unpark() ;
+ }
+
+ if (Policy < 4) {
+ iterator->wait_reenter_begin(this);
+ }
+
+ // _WaitSetLock protects the wait queue, not the EntryList. We could
+ // move the add-to-EntryList operation, above, outside the critical section
+ // protected by _WaitSetLock. In practice that's not useful. With the
+ // exception of wait() timeouts and interrupts the monitor owner
+ // is the only thread that grabs _WaitSetLock. There's almost no contention
+ // on _WaitSetLock so it's not profitable to reduce the length of the
+ // critical section.
+ }
+
+ Thread::SpinRelease (&_WaitSetLock) ;
+
+ if (iterator != NULL && ObjectSynchronizer::_sync_Notifications != NULL) {
+ ObjectSynchronizer::_sync_Notifications->inc() ;
+ }
+}
+
+
+void ObjectMonitor::notifyAll(TRAPS) {
+ CHECK_OWNER();
+ ObjectWaiter* iterator;
+ if (_WaitSet == NULL) {
+ TEVENT (Empty-NotifyAll) ;
+ return ;
+ }
+ DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
+
+ int Policy = Knob_MoveNotifyee ;
+ int Tally = 0 ;
+ Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
+
+ for (;;) {
+ iterator = DequeueWaiter () ;
+ if (iterator == NULL) break ;
+ TEVENT (NotifyAll - Transfer1) ;
+ ++Tally ;
+
+ // Disposition - what might we do with iterator ?
+ // a. add it directly to the EntryList - either tail or head.
+ // b. push it onto the front of the _cxq.
+ // For now we use (a).
+ //
+ // TODO-FIXME: currently notifyAll() transfers the waiters one-at-a-time from the waitset
+ // to the EntryList. This could be done more efficiently with a single bulk transfer,
+ // but in practice it's not time-critical. Beware too, that in prepend-mode we invert the
+ // order of the waiters. Lets say that the waitset is "ABCD" and the EntryList is "XYZ".
+ // After a notifyAll() in prepend mode the waitset will be empty and the EntryList will
+ // be "DCBAXYZ".
+
+ guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
+ guarantee (iterator->_notified == 0, "invariant") ;
+ iterator->_notified = 1 ;
+ if (Policy != 4) {
+ iterator->TState = ObjectWaiter::TS_ENTER ;
+ }
+
+ ObjectWaiter * List = _EntryList ;
+ if (List != NULL) {
+ assert (List->_prev == NULL, "invariant") ;
+ assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+ assert (List != iterator, "invariant") ;
+ }
+
+ if (Policy == 0) { // prepend to EntryList
+ if (List == NULL) {
+ iterator->_next = iterator->_prev = NULL ;
+ _EntryList = iterator ;
+ } else {
+ List->_prev = iterator ;
+ iterator->_next = List ;
+ iterator->_prev = NULL ;
+ _EntryList = iterator ;
+ }
+ } else
+ if (Policy == 1) { // append to EntryList
+ if (List == NULL) {
+ iterator->_next = iterator->_prev = NULL ;
+ _EntryList = iterator ;
+ } else {
+ // CONSIDER: finding the tail currently requires a linear-time walk of
+ // the EntryList. We can make tail access constant-time by converting to
+ // a CDLL instead of using our current DLL.
+ ObjectWaiter * Tail ;
+ for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
+ assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
+ Tail->_next = iterator ;
+ iterator->_prev = Tail ;
+ iterator->_next = NULL ;
+ }
+ } else
+ if (Policy == 2) { // prepend to cxq
+ // prepend to cxq
+ iterator->TState = ObjectWaiter::TS_CXQ ;
+ for (;;) {
+ ObjectWaiter * Front = _cxq ;
+ iterator->_next = Front ;
+ if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
+ break ;
+ }
+ }
+ } else
+ if (Policy == 3) { // append to cxq
+ iterator->TState = ObjectWaiter::TS_CXQ ;
+ for (;;) {
+ ObjectWaiter * Tail ;
+ Tail = _cxq ;
+ if (Tail == NULL) {
+ iterator->_next = NULL ;
+ if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
+ break ;
+ }
+ } else {
+ while (Tail->_next != NULL) Tail = Tail->_next ;
+ Tail->_next = iterator ;
+ iterator->_prev = Tail ;
+ iterator->_next = NULL ;
+ break ;
+ }
+ }
+ } else {
+ ParkEvent * ev = iterator->_event ;
+ iterator->TState = ObjectWaiter::TS_RUN ;
+ OrderAccess::fence() ;
+ ev->unpark() ;
+ }
+
+ if (Policy < 4) {
+ iterator->wait_reenter_begin(this);
+ }
+
+ // _WaitSetLock protects the wait queue, not the EntryList. We could
+ // move the add-to-EntryList operation, above, outside the critical section
+ // protected by _WaitSetLock. In practice that's not useful. With the
+ // exception of wait() timeouts and interrupts the monitor owner
+ // is the only thread that grabs _WaitSetLock. There's almost no contention
+ // on _WaitSetLock so it's not profitable to reduce the length of the
+ // critical section.
+ }
+
+ Thread::SpinRelease (&_WaitSetLock) ;
+
+ if (Tally != 0 && ObjectSynchronizer::_sync_Notifications != NULL) {
+ ObjectSynchronizer::_sync_Notifications->inc(Tally) ;
+ }
+}
+
+// check_slow() is a misnomer. It's called to simply to throw an IMSX exception.
+// TODO-FIXME: remove check_slow() -- it's likely dead.
+
+void ObjectMonitor::check_slow(TRAPS) {
+ TEVENT (check_slow - throw IMSX) ;
+ assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
+ THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
+}
+
+
+// -------------------------------------------------------------------------
+// The raw monitor subsystem is entirely distinct from normal
+// java-synchronization or jni-synchronization. raw monitors are not
+// associated with objects. They can be implemented in any manner
+// that makes sense. The original implementors decided to piggy-back
+// the raw-monitor implementation on the existing Java objectMonitor mechanism.
+// This flaw needs to fixed. We should reimplement raw monitors as sui-generis.
+// Specifically, we should not implement raw monitors via java monitors.
+// Time permitting, we should disentangle and deconvolve the two implementations
+// and move the resulting raw monitor implementation over to the JVMTI directories.
+// Ideally, the raw monitor implementation would be built on top of
+// park-unpark and nothing else.
+//
+// raw monitors are used mainly by JVMTI
+// The raw monitor implementation borrows the ObjectMonitor structure,
+// but the operators are degenerate and extremely simple.
+//
+// Mixed use of a single objectMonitor instance -- as both a raw monitor
+// and a normal java monitor -- is not permissible.
+//
+// Note that we use the single RawMonitor_lock to protect queue operations for
+// _all_ raw monitors. This is a scalability impediment, but since raw monitor usage
+// is deprecated and rare, this is not of concern. The RawMonitor_lock can not
+// be held indefinitely. The critical sections must be short and bounded.
+//
+// -------------------------------------------------------------------------
+
+int ObjectMonitor::SimpleEnter (Thread * Self) {
+ for (;;) {
+ if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+ return OS_OK ;
+ }
+
+ ObjectWaiter Node (Self) ;
+ Self->_ParkEvent->reset() ; // strictly optional
+ Node.TState = ObjectWaiter::TS_ENTER ;
+
+ RawMonitor_lock->lock_without_safepoint_check() ;
+ Node._next = _EntryList ;
+ _EntryList = &Node ;
+ OrderAccess::fence() ;
+ if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
+ _EntryList = Node._next ;
+ RawMonitor_lock->unlock() ;
+ return OS_OK ;
+ }
+ RawMonitor_lock->unlock() ;
+ while (Node.TState == ObjectWaiter::TS_ENTER) {
+ Self->_ParkEvent->park() ;
+ }
+ }
+}
+
+int ObjectMonitor::SimpleExit (Thread * Self) {
+ guarantee (_owner == Self, "invariant") ;
+ OrderAccess::release_store_ptr (&_owner, NULL) ;
+ OrderAccess::fence() ;
+ if (_EntryList == NULL) return OS_OK ;
+ ObjectWaiter * w ;
+
+ RawMonitor_lock->lock_without_safepoint_check() ;
+ w = _EntryList ;
+ if (w != NULL) {
+ _EntryList = w->_next ;
+ }
+ RawMonitor_lock->unlock() ;
+ if (w != NULL) {
+ guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
+ ParkEvent * ev = w->_event ;
+ w->TState = ObjectWaiter::TS_RUN ;
+ OrderAccess::fence() ;
+ ev->unpark() ;
+ }
+ return OS_OK ;
+}
+
+int ObjectMonitor::SimpleWait (Thread * Self, jlong millis) {
+ guarantee (_owner == Self , "invariant") ;
+ guarantee (_recursions == 0, "invariant") ;
+
+ ObjectWaiter Node (Self) ;
+ Node._notified = 0 ;
+ Node.TState = ObjectWaiter::TS_WAIT ;
+
+ RawMonitor_lock->lock_without_safepoint_check() ;
+ Node._next = _WaitSet ;
+ _WaitSet = &Node ;
+ RawMonitor_lock->unlock() ;
+
+ SimpleExit (Self) ;
+ guarantee (_owner != Self, "invariant") ;
+
+ int ret = OS_OK ;
+ if (millis <= 0) {
+ Self->_ParkEvent->park();
+ } else {
+ ret = Self->_ParkEvent->park(millis);
+ }
+
+ // If thread still resides on the waitset then unlink it.
+ // Double-checked locking -- the usage is safe in this context
+ // as we TState is volatile and the lock-unlock operators are
+ // serializing (barrier-equivalent).
+
+ if (Node.TState == ObjectWaiter::TS_WAIT) {
+ RawMonitor_lock->lock_without_safepoint_check() ;
+ if (Node.TState == ObjectWaiter::TS_WAIT) {
+ // Simple O(n) unlink, but performance isn't critical here.
+ ObjectWaiter * p ;
+ ObjectWaiter * q = NULL ;
+ for (p = _WaitSet ; p != &Node; p = p->_next) {
+ q = p ;
+ }
+ guarantee (p == &Node, "invariant") ;
+ if (q == NULL) {
+ guarantee (p == _WaitSet, "invariant") ;
+ _WaitSet = p->_next ;
+ } else {
+ guarantee (p == q->_next, "invariant") ;
+ q->_next = p->_next ;
+ }
+ Node.TState = ObjectWaiter::TS_RUN ;
+ }
+ RawMonitor_lock->unlock() ;
+ }
+
+ guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ;
+ SimpleEnter (Self) ;
+
+ guarantee (_owner == Self, "invariant") ;
+ guarantee (_recursions == 0, "invariant") ;
+ return ret ;
+}
+
+int ObjectMonitor::SimpleNotify (Thread * Self, bool All) {
+ guarantee (_owner == Self, "invariant") ;
+ if (_WaitSet == NULL) return OS_OK ;
+
+ // We have two options:
+ // A. Transfer the threads from the WaitSet to the EntryList
+ // B. Remove the thread from the WaitSet and unpark() it.
+ //
+ // We use (B), which is crude and results in lots of futile
+ // context switching. In particular (B) induces lots of contention.
+
+ ParkEvent * ev = NULL ; // consider using a small auto array ...
+ RawMonitor_lock->lock_without_safepoint_check() ;
+ for (;;) {
+ ObjectWaiter * w = _WaitSet ;
+ if (w == NULL) break ;
+ _WaitSet = w->_next ;
+ if (ev != NULL) { ev->unpark(); ev = NULL; }
+ ev = w->_event ;
+ OrderAccess::loadstore() ;
+ w->TState = ObjectWaiter::TS_RUN ;
+ OrderAccess::storeload();
+ if (!All) break ;
+ }
+ RawMonitor_lock->unlock() ;
+ if (ev != NULL) ev->unpark();
+ return OS_OK ;
+}
+
+// Any JavaThread will enter here with state _thread_blocked
+int ObjectMonitor::raw_enter(TRAPS) {
+ TEVENT (raw_enter) ;
+ void * Contended ;
+
+ // don't enter raw monitor if thread is being externally suspended, it will
+ // surprise the suspender if a "suspended" thread can still enter monitor
+ JavaThread * jt = (JavaThread *)THREAD;
+ if (THREAD->is_Java_thread()) {
+ jt->SR_lock()->lock_without_safepoint_check();
+ while (jt->is_external_suspend()) {
+ jt->SR_lock()->unlock();
+ jt->java_suspend_self();
+ jt->SR_lock()->lock_without_safepoint_check();
+ }
+ // guarded by SR_lock to avoid racing with new external suspend requests.
+ Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
+ jt->SR_lock()->unlock();
+ } else {
+ Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
+ }
+
+ if (Contended == THREAD) {
+ _recursions ++ ;
+ return OM_OK ;
+ }
+
+ if (Contended == NULL) {
+ guarantee (_owner == THREAD, "invariant") ;
+ guarantee (_recursions == 0, "invariant") ;
+ return OM_OK ;
+ }
+
+ THREAD->set_current_pending_monitor(this);
+
+ if (!THREAD->is_Java_thread()) {
+ // No other non-Java threads besides VM thread would acquire
+ // a raw monitor.
+ assert(THREAD->is_VM_thread(), "must be VM thread");
+ SimpleEnter (THREAD) ;
+ } else {
+ guarantee (jt->thread_state() == _thread_blocked, "invariant") ;
+ for (;;) {
+ jt->set_suspend_equivalent();
+ // cleared by handle_special_suspend_equivalent_condition() or
+ // java_suspend_self()
+ SimpleEnter (THREAD) ;
+
+ // were we externally suspended while we were waiting?
+ if (!jt->handle_special_suspend_equivalent_condition()) break ;
+
+ // This thread was externally suspended
+ //
+ // This logic isn't needed for JVMTI raw monitors,
+ // but doesn't hurt just in case the suspend rules change. This
+ // logic is needed for the ObjectMonitor.wait() reentry phase.
+ // We have reentered the contended monitor, but while we were
+ // waiting another thread suspended us. We don't want to reenter
+ // the monitor while suspended because that would surprise the
+ // thread that suspended us.
+ //
+ // Drop the lock -
+ SimpleExit (THREAD) ;
+
+ jt->java_suspend_self();
+ }
+
+ assert(_owner == THREAD, "Fatal error with monitor owner!");
+ assert(_recursions == 0, "Fatal error with monitor recursions!");
+ }
+
+ THREAD->set_current_pending_monitor(NULL);
+ guarantee (_recursions == 0, "invariant") ;
+ return OM_OK;
+}
+
+// Used mainly for JVMTI raw monitor implementation
+// Also used for ObjectMonitor::wait().
+int ObjectMonitor::raw_exit(TRAPS) {
+ TEVENT (raw_exit) ;
+ if (THREAD != _owner) {
+ return OM_ILLEGAL_MONITOR_STATE;
+ }
+ if (_recursions > 0) {
+ --_recursions ;
+ return OM_OK ;
+ }
+
+ void * List = _EntryList ;
+ SimpleExit (THREAD) ;
+
+ return OM_OK;
+}
+
+// Used for JVMTI raw monitor implementation.
+// All JavaThreads will enter here with state _thread_blocked
+
+int ObjectMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
+ TEVENT (raw_wait) ;
+ if (THREAD != _owner) {
+ return OM_ILLEGAL_MONITOR_STATE;
+ }
+
+ // To avoid spurious wakeups we reset the parkevent -- This is strictly optional.
+ // The caller must be able to tolerate spurious returns from raw_wait().
+ THREAD->_ParkEvent->reset() ;
+ OrderAccess::fence() ;
+
+ // check interrupt event
+ if (interruptible && Thread::is_interrupted(THREAD, true)) {
+ return OM_INTERRUPTED;
+ }
+
+ intptr_t save = _recursions ;
+ _recursions = 0 ;
+ _waiters ++ ;
+ if (THREAD->is_Java_thread()) {
+ guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ;
+ ((JavaThread *)THREAD)->set_suspend_equivalent();
+ }
+ int rv = SimpleWait (THREAD, millis) ;
+ _recursions = save ;
+ _waiters -- ;
+
+ guarantee (THREAD == _owner, "invariant") ;
+ if (THREAD->is_Java_thread()) {
+ JavaThread * jSelf = (JavaThread *) THREAD ;
+ for (;;) {
+ if (!jSelf->handle_special_suspend_equivalent_condition()) break ;
+ SimpleExit (THREAD) ;
+ jSelf->java_suspend_self();
+ SimpleEnter (THREAD) ;
+ jSelf->set_suspend_equivalent() ;
+ }
+ }
+ guarantee (THREAD == _owner, "invariant") ;
+
+ if (interruptible && Thread::is_interrupted(THREAD, true)) {
+ return OM_INTERRUPTED;
+ }
+ return OM_OK ;
+}
+
+int ObjectMonitor::raw_notify(TRAPS) {
+ TEVENT (raw_notify) ;
+ if (THREAD != _owner) {
+ return OM_ILLEGAL_MONITOR_STATE;
+ }
+ SimpleNotify (THREAD, false) ;
+ return OM_OK;
+}
+
+int ObjectMonitor::raw_notifyAll(TRAPS) {
+ TEVENT (raw_notifyAll) ;
+ if (THREAD != _owner) {
+ return OM_ILLEGAL_MONITOR_STATE;
+ }
+ SimpleNotify (THREAD, true) ;
+ return OM_OK;
+}
+
+#ifndef PRODUCT
+void ObjectMonitor::verify() {
+}
+
+void ObjectMonitor::print() {
+}
+#endif
+
+//------------------------------------------------------------------------------
+// Non-product code
+
+#ifndef PRODUCT
+
+void ObjectSynchronizer::trace_locking(Handle locking_obj, bool is_compiled,
+ bool is_method, bool is_locking) {
+ // Don't know what to do here
+}
+
+// Verify all monitors in the monitor cache, the verification is weak.
+void ObjectSynchronizer::verify() {
+ ObjectMonitor* block = gBlockList;
+ ObjectMonitor* mid;
+ while (block) {
+ assert(block->object() == CHAINMARKER, "must be a block header");
+ for (int i = 1; i < _BLOCKSIZE; i++) {
+ mid = block + i;
+ oop object = (oop) mid->object();
+ if (object != NULL) {
+ mid->verify();
+ }
+ }
+ block = (ObjectMonitor*) block->FreeNext;
+ }
+}
+
+// Check if monitor belongs to the monitor cache
+// The list is grow-only so it's *relatively* safe to traverse
+// the list of extant blocks without taking a lock.
+
+int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
+ ObjectMonitor* block = gBlockList;
+
+ while (block) {
+ assert(block->object() == CHAINMARKER, "must be a block header");
+ if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
+ address mon = (address) monitor;
+ address blk = (address) block;
+ size_t diff = mon - blk;
+ assert((diff % sizeof(ObjectMonitor)) == 0, "check");
+ return 1;
+ }
+ block = (ObjectMonitor*) block->FreeNext;
+ }
+ return 0;
+}
+
+#endif
diff --git a/src/share/vm/runtime/synchronizer.hpp b/src/share/vm/runtime/synchronizer.hpp
new file mode 100644
index 000000000..7fcd29cdd
--- /dev/null
+++ b/src/share/vm/runtime/synchronizer.hpp
@@ -0,0 +1,216 @@
+/*
+ * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class BasicLock VALUE_OBJ_CLASS_SPEC {
+ friend class VMStructs;
+ private:
+ volatile markOop _displaced_header;
+ public:
+ markOop displaced_header() const { return _displaced_header; }
+ void set_displaced_header(markOop header) { _displaced_header = header; }
+
+ void print_on(outputStream* st) const;
+
+ // move a basic lock (used during deoptimization
+ void move_to(oop obj, BasicLock* dest);
+
+ static int displaced_header_offset_in_bytes() { return offset_of(BasicLock, _displaced_header); }
+};
+
+// A BasicObjectLock associates a specific Java object with a BasicLock.
+// It is currently embedded in an interpreter frame.
+
+// Because some machines have alignment restrictions on the control stack,
+// the actual space allocated by the interpreter may include padding words
+// after the end of the BasicObjectLock. Also, in order to guarantee
+// alignment of the embedded BasicLock objects on such machines, we
+// put the embedded BasicLock at the beginning of the struct.
+
+class BasicObjectLock VALUE_OBJ_CLASS_SPEC {
+ friend class VMStructs;
+ private:
+ BasicLock _lock; // the lock, must be double word aligned
+ oop _obj; // object holds the lock;
+
+ public:
+ // Manipulation
+ oop obj() const { return _obj; }
+ void set_obj(oop obj) { _obj = obj; }
+ BasicLock* lock() { return &_lock; }
+
+ // Note: Use frame::interpreter_frame_monitor_size() for the size of BasicObjectLocks
+ // in interpreter activation frames since it includes machine-specific padding.
+ static int size() { return sizeof(BasicObjectLock)/wordSize; }
+
+ // GC support
+ void oops_do(OopClosure* f) { f->do_oop(&_obj); }
+
+ static int obj_offset_in_bytes() { return offset_of(BasicObjectLock, _obj); }
+ static int lock_offset_in_bytes() { return offset_of(BasicObjectLock, _lock); }
+};
+
+class ObjectMonitor;
+
+class ObjectSynchronizer : AllStatic {
+ friend class VMStructs;
+ public:
+ typedef enum {
+ owner_self,
+ owner_none,
+ owner_other
+ } LockOwnership;
+ // exit must be implemented non-blocking, since the compiler cannot easily handle
+ // deoptimization at monitor exit. Hence, it does not take a Handle argument.
+
+ // This is full version of monitor enter and exit. I choose not
+ // to use enter() and exit() in order to make sure user be ware
+ // of the performance and semantics difference. They are normally
+ // used by ObjectLocker etc. The interpreter and compiler use
+ // assembly copies of these routines. Please keep them synchornized.
+ //
+ // attempt_rebias flag is used by UseBiasedLocking implementation
+ static void fast_enter (Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS);
+ static void fast_exit (oop obj, BasicLock* lock, Thread* THREAD);
+
+ // WARNING: They are ONLY used to handle the slow cases. They should
+ // only be used when the fast cases failed. Use of these functions
+ // without previous fast case check may cause fatal error.
+ static void slow_enter (Handle obj, BasicLock* lock, TRAPS);
+ static void slow_exit (oop obj, BasicLock* lock, Thread* THREAD);
+
+ // Used only to handle jni locks or other unmatched monitor enter/exit
+ // Internally they will use heavy weight monitor.
+ static void jni_enter (Handle obj, TRAPS);
+ static bool jni_try_enter(Handle obj, Thread* THREAD); // Implements Unsafe.tryMonitorEnter
+ static void jni_exit (oop obj, Thread* THREAD);
+
+ // Handle all interpreter, compiler and jni cases
+ static void wait (Handle obj, jlong millis, TRAPS);
+ static void notify (Handle obj, TRAPS);
+ static void notifyall (Handle obj, TRAPS);
+
+ // Special internal-use-only method for use by JVM infrastructure
+ // that needs to wait() on a java-level object but that can't risk
+ // throwing unexpected InterruptedExecutionExceptions.
+ static void waitUninterruptibly (Handle obj, jlong Millis, Thread * THREAD) ;
+
+ // used by classloading to free classloader object lock,
+ // wait on an internal lock, and reclaim original lock
+ // with original recursion count
+ static intptr_t complete_exit (Handle obj, TRAPS);
+ static void reenter (Handle obj, intptr_t recursion, TRAPS);
+
+ // thread-specific and global objectMonitor free list accessors
+ static ObjectMonitor * omAlloc (Thread * Self) ;
+ static void omRelease (Thread * Self, ObjectMonitor * m) ;
+ static void omFlush (Thread * Self) ;
+
+ // Inflate light weight monitor to heavy weight monitor
+ static ObjectMonitor* inflate(Thread * Self, oop obj);
+ // This version is only for internal use
+ static ObjectMonitor* inflate_helper(oop obj);
+
+ // Returns the identity hash value for an oop
+ // NOTE: It may cause monitor inflation
+ static intptr_t identity_hash_value_for(Handle obj);
+ static intptr_t FastHashCode (Thread * Self, oop obj) ;
+
+ // java.lang.Thread support
+ static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
+ static LockOwnership query_lock_ownership(JavaThread * self, Handle h_obj);
+
+ static JavaThread* get_lock_owner(Handle h_obj, bool doLock);
+
+ // JNI detach support
+ static void release_monitors_owned_by_thread(TRAPS);
+ static void monitors_iterate(MonitorClosure* m);
+
+ // GC: we current use aggressive monitor deflation policy
+ // Basically we deflate all monitors that are not busy.
+ // An adaptive profile-based deflation policy could be used if needed
+ static void deflate_idle_monitors();
+ static void oops_do(OopClosure* f);
+
+ // debugging
+ static void trace_locking(Handle obj, bool is_compiled, bool is_method, bool is_locking) PRODUCT_RETURN;
+ static void verify() PRODUCT_RETURN;
+ static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
+
+ private:
+ enum { _BLOCKSIZE = 128 };
+ static ObjectMonitor* gBlockList;
+ static ObjectMonitor * volatile gFreeList;
+
+ public:
+ static void Initialize () ;
+ static PerfCounter * _sync_ContendedLockAttempts ;
+ static PerfCounter * _sync_FutileWakeups ;
+ static PerfCounter * _sync_Parks ;
+ static PerfCounter * _sync_EmptyNotifications ;
+ static PerfCounter * _sync_Notifications ;
+ static PerfCounter * _sync_SlowEnter ;
+ static PerfCounter * _sync_SlowExit ;
+ static PerfCounter * _sync_SlowNotify ;
+ static PerfCounter * _sync_SlowNotifyAll ;
+ static PerfCounter * _sync_FailedSpins ;
+ static PerfCounter * _sync_SuccessfulSpins ;
+ static PerfCounter * _sync_PrivateA ;
+ static PerfCounter * _sync_PrivateB ;
+ static PerfCounter * _sync_MonInCirculation ;
+ static PerfCounter * _sync_MonScavenged ;
+ static PerfCounter * _sync_Inflations ;
+ static PerfCounter * _sync_Deflations ;
+ static PerfLongVariable * _sync_MonExtant ;
+
+ public:
+ static void RegisterSpinCallback (int (*)(intptr_t, int), intptr_t) ;
+
+};
+
+// ObjectLocker enforced balanced locking and can never thrown an
+// IllegalMonitorStateException. However, a pending exception may
+// have to pass through, and we must also be able to deal with
+// asynchronous exceptions. The caller is responsible for checking
+// the threads pending exception if needed.
+// doLock was added to support classloading with UnsyncloadClass which
+// requires flag based choice of locking the classloader lock.
+class ObjectLocker : public StackObj {
+ private:
+ Thread* _thread;
+ Handle _obj;
+ BasicLock _lock;
+ bool _dolock; // default true
+ public:
+ ObjectLocker(Handle obj, Thread* thread, bool doLock = true);
+ ~ObjectLocker();
+
+ // Monitor behavior
+ void wait (TRAPS) { ObjectSynchronizer::wait (_obj, 0, CHECK); } // wait forever
+ void notify_all(TRAPS) { ObjectSynchronizer::notifyall(_obj, CHECK); }
+ void waitUninterruptibly (TRAPS) { ObjectSynchronizer::waitUninterruptibly (_obj, 0, CHECK);}
+ // complete_exit gives up lock completely, returning recursion count
+ // reenter reclaims lock with original recursion count
+ intptr_t complete_exit(TRAPS) { return ObjectSynchronizer::complete_exit(_obj, CHECK_0); }
+ void reenter(intptr_t recursion, TRAPS) { ObjectSynchronizer::reenter(_obj, recursion, CHECK); }
+};
diff --git a/src/share/vm/runtime/task.cpp b/src/share/vm/runtime/task.cpp
new file mode 100644
index 000000000..0fd1bed8a
--- /dev/null
+++ b/src/share/vm/runtime/task.cpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_task.cpp.incl"
+
+int PeriodicTask::_num_tasks = 0;
+PeriodicTask* PeriodicTask::_tasks[PeriodicTask::max_tasks];
+#ifndef PRODUCT
+elapsedTimer PeriodicTask::_timer;
+int PeriodicTask::_intervalHistogram[PeriodicTask::max_interval];
+int PeriodicTask::_ticks;
+
+void PeriodicTask::print_intervals() {
+ if (ProfilerCheckIntervals) {
+ for (int i = 0; i < PeriodicTask::max_interval; i++) {
+ int n = _intervalHistogram[i];
+ if (n > 0) tty->print_cr("%3d: %5d (%4.1f%%)", i, n, 100.0 * n / _ticks);
+ }
+ }
+}
+#endif
+
+void PeriodicTask::real_time_tick(size_t delay_time) {
+#ifndef PRODUCT
+ if (ProfilerCheckIntervals) {
+ _ticks++;
+ _timer.stop();
+ int ms = (int)(_timer.seconds() * 1000.0);
+ _timer.reset();
+ _timer.start();
+ if (ms >= PeriodicTask::max_interval) ms = PeriodicTask::max_interval - 1;
+ _intervalHistogram[ms]++;
+ }
+#endif
+ int orig_num_tasks = _num_tasks;
+ for(int index = 0; index < _num_tasks; index++) {
+ _tasks[index]->execute_if_pending(delay_time);
+ if (_num_tasks < orig_num_tasks) { // task dis-enrolled itself
+ index--; // re-do current slot as it has changed
+ orig_num_tasks = _num_tasks;
+ }
+ }
+}
+
+
+PeriodicTask::PeriodicTask(size_t interval_time) :
+ _counter(0), _interval(interval_time) {
+ assert(is_init_completed(), "Periodic tasks should not start during VM initialization");
+ // Sanity check the interval time
+ assert(_interval >= PeriodicTask::min_interval &&
+ _interval <= PeriodicTask::max_interval &&
+ _interval % PeriodicTask::interval_gran == 0,
+ "improper PeriodicTask interval time");
+}
+
+PeriodicTask::~PeriodicTask() {
+ if (is_enrolled())
+ disenroll();
+}
+
+bool PeriodicTask::is_enrolled() const {
+ for(int index = 0; index < _num_tasks; index++)
+ if (_tasks[index] == this) return true;
+ return false;
+}
+
+void PeriodicTask::enroll() {
+ assert(WatcherThread::watcher_thread() == NULL, "dynamic enrollment of tasks not yet supported");
+
+ if (_num_tasks == PeriodicTask::max_tasks)
+ fatal("Overflow in PeriodicTask table");
+ _tasks[_num_tasks++] = this;
+}
+
+void PeriodicTask::disenroll() {
+ assert(WatcherThread::watcher_thread() == NULL ||
+ Thread::current() == WatcherThread::watcher_thread(),
+ "dynamic disenrollment currently only handled from WatcherThread from within task() method");
+
+ int index;
+ for(index = 0; index < _num_tasks && _tasks[index] != this; index++);
+ if (index == _num_tasks) return;
+ _num_tasks--;
+ for (; index < _num_tasks; index++) {
+ _tasks[index] = _tasks[index+1];
+ }
+}
+
+TimeMillisUpdateTask* TimeMillisUpdateTask::_task = NULL;
+
+void TimeMillisUpdateTask::task() {
+ os::update_global_time();
+}
+
+void TimeMillisUpdateTask::engage() {
+ assert(_task == NULL, "init twice?");
+ os::update_global_time(); // initial update
+ os::enable_global_time();
+ _task = new TimeMillisUpdateTask(CacheTimeMillisGranularity);
+ _task->enroll();
+}
+
+void TimeMillisUpdateTask::disengage() {
+ assert(_task != NULL, "uninit twice?");
+ os::disable_global_time();
+ _task->disenroll();
+ delete _task;
+ _task = NULL;
+}
diff --git a/src/share/vm/runtime/task.hpp b/src/share/vm/runtime/task.hpp
new file mode 100644
index 000000000..924562cf0
--- /dev/null
+++ b/src/share/vm/runtime/task.hpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// A PeriodicTask has the sole purpose of executing its task
+// function with regular intervals.
+// Usage:
+// PeriodicTask pf(10);
+// pf.enroll();
+// ...
+// pf.disenroll();
+
+class PeriodicTask: public CHeapObj {
+ public:
+ // Useful constants.
+ // The interval constants are used to ensure the declared interval
+ // is appropriate; it must be between min_interval and max_interval,
+ // and have a granularity of interval_gran (all in millis).
+ enum { max_tasks = 10, // Max number of periodic tasks in system
+ interval_gran = 10,
+ min_interval = 10,
+ max_interval = 10000 };
+
+ static int num_tasks() { return _num_tasks; }
+
+ private:
+ size_t _counter;
+ const size_t _interval;
+
+ static int _num_tasks;
+ static PeriodicTask* _tasks[PeriodicTask::max_tasks];
+ static void real_time_tick(size_t delay_time);
+
+#ifndef PRODUCT
+ static elapsedTimer _timer; // measures time between ticks
+ static int _ticks; // total number of ticks
+ static int _intervalHistogram[max_interval]; // to check spacing of timer interrupts
+ public:
+ static void print_intervals();
+#endif
+ // Only the WatcherThread can cause us to execute PeriodicTasks
+ friend class WatcherThread;
+ public:
+ PeriodicTask(size_t interval_time); // interval is in milliseconds of elapsed time
+ ~PeriodicTask();
+
+ // Tells whether is enrolled
+ bool is_enrolled() const;
+
+ // Make the task active
+ // NOTE: this may only be called before the WatcherThread has been started
+ void enroll();
+
+ // Make the task deactive
+ // NOTE: this may only be called either while the WatcherThread is
+ // inactive or by a task from within its task() method. One-shot or
+ // several-shot tasks may be implemented this way.
+ void disenroll();
+
+ void execute_if_pending(size_t delay_time) {
+ _counter += delay_time;
+ if (_counter >= _interval) {
+ _counter = 0;
+ task();
+ }
+ }
+
+ // Returns how long (time in milliseconds) before the next time we should
+ // execute this task.
+ size_t time_to_next_interval() const {
+ assert(_interval > _counter, "task counter greater than interval?");
+ return _interval - _counter;
+ }
+
+ // Calculate when the next periodic task will fire.
+ // Called by the WatcherThread's run method.
+ // This assumes that periodic tasks aren't entering the system
+ // dynamically, except for during startup.
+ static size_t time_to_wait() {
+ if (_num_tasks == 0) {
+ // Don't wait any more; shut down the thread since we don't
+ // currently support dynamic enrollment.
+ return 0;
+ }
+
+ size_t delay = _tasks[0]->time_to_next_interval();
+ for (int index = 1; index < _num_tasks; index++) {
+ delay = MIN2(delay, _tasks[index]->time_to_next_interval());
+ }
+ return delay;
+ }
+
+ // The task to perform at each period
+ virtual void task() = 0;
+};
+
+class TimeMillisUpdateTask : public PeriodicTask {
+ private:
+ static TimeMillisUpdateTask* _task;
+ public:
+ TimeMillisUpdateTask(int interval) : PeriodicTask(interval) {}
+ void task();
+ static void engage();
+ static void disengage();
+};
diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp
new file mode 100644
index 000000000..607772a02
--- /dev/null
+++ b/src/share/vm/runtime/thread.cpp
@@ -0,0 +1,3972 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_thread.cpp.incl"
+
+#ifdef DTRACE_ENABLED
+
+// Only bother with this argument setup if dtrace is available
+
+HS_DTRACE_PROBE_DECL(hotspot, vm__init__begin);
+HS_DTRACE_PROBE_DECL(hotspot, vm__init__end);
+HS_DTRACE_PROBE_DECL5(hotspot, thread__start, char*, intptr_t,
+ intptr_t, intptr_t, bool);
+HS_DTRACE_PROBE_DECL5(hotspot, thread__stop, char*, intptr_t,
+ intptr_t, intptr_t, bool);
+
+#define DTRACE_THREAD_PROBE(probe, javathread) \
+ { \
+ ResourceMark rm(this); \
+ int len = 0; \
+ const char* name = (javathread)->get_thread_name(); \
+ len = strlen(name); \
+ HS_DTRACE_PROBE5(hotspot, thread__##probe, \
+ name, len, \
+ java_lang_Thread::thread_id((javathread)->threadObj()), \
+ (javathread)->osthread()->thread_id(), \
+ java_lang_Thread::is_daemon((javathread)->threadObj())); \
+ }
+
+#else // ndef DTRACE_ENABLED
+
+#define DTRACE_THREAD_PROBE(probe, javathread)
+
+#endif // ndef DTRACE_ENABLED
+
+// Class hierarchy
+// - Thread
+// - VMThread
+// - WatcherThread
+// - ConcurrentMarkSweepThread
+// - JavaThread
+// - CompilerThread
+
+// ======= Thread ========
+
+// Support for forcing alignment of thread objects for biased locking
+void* Thread::operator new(size_t size) {
+ if (UseBiasedLocking) {
+ const int alignment = markOopDesc::biased_lock_alignment;
+ size_t aligned_size = size + (alignment - sizeof(intptr_t));
+ void* real_malloc_addr = CHeapObj::operator new(aligned_size);
+ void* aligned_addr = (void*) align_size_up((intptr_t) real_malloc_addr, alignment);
+ assert(((uintptr_t) aligned_addr + (uintptr_t) size) <=
+ ((uintptr_t) real_malloc_addr + (uintptr_t) aligned_size),
+ "JavaThread alignment code overflowed allocated storage");
+ if (TraceBiasedLocking) {
+ if (aligned_addr != real_malloc_addr)
+ tty->print_cr("Aligned thread " INTPTR_FORMAT " to " INTPTR_FORMAT,
+ real_malloc_addr, aligned_addr);
+ }
+ ((Thread*) aligned_addr)->_real_malloc_address = real_malloc_addr;
+ return aligned_addr;
+ } else {
+ return CHeapObj::operator new(size);
+ }
+}
+
+void Thread::operator delete(void* p) {
+ if (UseBiasedLocking) {
+ void* real_malloc_addr = ((Thread*) p)->_real_malloc_address;
+ CHeapObj::operator delete(real_malloc_addr);
+ } else {
+ CHeapObj::operator delete(p);
+ }
+}
+
+
+// Base class for all threads: VMThread, WatcherThread, ConcurrentMarkSweepThread,
+// JavaThread
+
+
+Thread::Thread() {
+ // stack
+ _stack_base = NULL;
+ _stack_size = 0;
+ _self_raw_id = 0;
+ _lgrp_id = -1;
+ _osthread = NULL;
+
+ // allocated data structures
+ set_resource_area(new ResourceArea());
+ set_handle_area(new HandleArea(NULL));
+ set_active_handles(NULL);
+ set_free_handle_block(NULL);
+ set_last_handle_mark(NULL);
+ set_osthread(NULL);
+
+ // This initial value ==> never claimed.
+ _oops_do_parity = 0;
+
+ // the handle mark links itself to last_handle_mark
+ new HandleMark(this);
+
+ // plain initialization
+ debug_only(_owned_locks = NULL;)
+ debug_only(_allow_allocation_count = 0;)
+ NOT_PRODUCT(_allow_safepoint_count = 0;)
+ CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;)
+ _highest_lock = NULL;
+ _jvmti_env_iteration_count = 0;
+ _vm_operation_started_count = 0;
+ _vm_operation_completed_count = 0;
+ _current_pending_monitor = NULL;
+ _current_pending_monitor_is_from_java = true;
+ _current_waiting_monitor = NULL;
+ _num_nested_signal = 0;
+ omFreeList = NULL ;
+ omFreeCount = 0 ;
+ omFreeProvision = 32 ;
+
+ _SR_lock = new Monitor(Mutex::suspend_resume, "SR_lock", true);
+ _suspend_flags = 0;
+
+ // thread-specific hashCode stream generator state - Marsaglia shift-xor form
+ _hashStateX = os::random() ;
+ _hashStateY = 842502087 ;
+ _hashStateZ = 0x8767 ; // (int)(3579807591LL & 0xffff) ;
+ _hashStateW = 273326509 ;
+
+ _OnTrap = 0 ;
+ _schedctl = NULL ;
+ _Stalled = 0 ;
+ _TypeTag = 0x2BAD ;
+
+ // Many of the following fields are effectively final - immutable
+ // Note that nascent threads can't use the Native Monitor-Mutex
+ // construct until the _MutexEvent is initialized ...
+ // CONSIDER: instead of using a fixed set of purpose-dedicated ParkEvents
+ // we might instead use a stack of ParkEvents that we could provision on-demand.
+ // The stack would act as a cache to avoid calls to ParkEvent::Allocate()
+ // and ::Release()
+ _ParkEvent = ParkEvent::Allocate (this) ;
+ _SleepEvent = ParkEvent::Allocate (this) ;
+ _MutexEvent = ParkEvent::Allocate (this) ;
+ _MuxEvent = ParkEvent::Allocate (this) ;
+
+#ifdef CHECK_UNHANDLED_OOPS
+ if (CheckUnhandledOops) {
+ _unhandled_oops = new UnhandledOops(this);
+ }
+#endif // CHECK_UNHANDLED_OOPS
+#ifdef ASSERT
+ if (UseBiasedLocking) {
+ assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
+ assert(this == _real_malloc_address ||
+ this == (void*) align_size_up((intptr_t) _real_malloc_address, markOopDesc::biased_lock_alignment),
+ "bug in forced alignment of thread objects");
+ }
+#endif /* ASSERT */
+}
+
+void Thread::initialize_thread_local_storage() {
+ // Note: Make sure this method only calls
+ // non-blocking operations. Otherwise, it might not work
+ // with the thread-startup/safepoint interaction.
+
+ // During Java thread startup, safepoint code should allow this
+ // method to complete because it may need to allocate memory to
+ // store information for the new thread.
+
+ // initialize structure dependent on thread local storage
+ ThreadLocalStorage::set_thread(this);
+
+ // set up any platform-specific state.
+ os::initialize_thread();
+
+}
+
+void Thread::record_stack_base_and_size() {
+ set_stack_base(os::current_stack_base());
+ set_stack_size(os::current_stack_size());
+}
+
+
+Thread::~Thread() {
+ // Reclaim the objectmonitors from the omFreeList of the moribund thread.
+ ObjectSynchronizer::omFlush (this) ;
+
+ // deallocate data structures
+ delete resource_area();
+ // since the handle marks are using the handle area, we have to deallocated the root
+ // handle mark before deallocating the thread's handle area,
+ assert(last_handle_mark() != NULL, "check we have an element");
+ delete last_handle_mark();
+ assert(last_handle_mark() == NULL, "check we have reached the end");
+
+ // It's possible we can encounter a null _ParkEvent, etc., in stillborn threads.
+ // We NULL out the fields for good hygiene.
+ ParkEvent::Release (_ParkEvent) ; _ParkEvent = NULL ;
+ ParkEvent::Release (_SleepEvent) ; _SleepEvent = NULL ;
+ ParkEvent::Release (_MutexEvent) ; _MutexEvent = NULL ;
+ ParkEvent::Release (_MuxEvent) ; _MuxEvent = NULL ;
+
+ delete handle_area();
+
+ // osthread() can be NULL, if creation of thread failed.
+ if (osthread() != NULL) os::free_thread(osthread());
+
+ delete _SR_lock;
+
+ // clear thread local storage if the Thread is deleting itself
+ if (this == Thread::current()) {
+ ThreadLocalStorage::set_thread(NULL);
+ } else {
+ // In the case where we're not the current thread, invalidate all the
+ // caches in case some code tries to get the current thread or the
+ // thread that was destroyed, and gets stale information.
+ ThreadLocalStorage::invalidate_all();
+ }
+ CHECK_UNHANDLED_OOPS_ONLY(if (CheckUnhandledOops) delete unhandled_oops();)
+}
+
+// NOTE: dummy function for assertion purpose.
+void Thread::run() {
+ ShouldNotReachHere();
+}
+
+#ifdef ASSERT
+// Private method to check for dangling thread pointer
+void check_for_dangling_thread_pointer(Thread *thread) {
+ assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
+ "possibility of dangling Thread pointer");
+}
+#endif
+
+
+#ifndef PRODUCT
+// Tracing method for basic thread operations
+void Thread::trace(const char* msg, const Thread* const thread) {
+ if (!TraceThreadEvents) return;
+ ResourceMark rm;
+ ThreadCritical tc;
+ const char *name = "non-Java thread";
+ int prio = -1;
+ if (thread->is_Java_thread()
+ && !thread->is_Compiler_thread()) {
+ // The Threads_lock must be held to get information about
+ // this thread but may not be in some situations when
+ // tracing thread events.
+ bool release_Threads_lock = false;
+ if (!Threads_lock->owned_by_self()) {
+ Threads_lock->lock();
+ release_Threads_lock = true;
+ }
+ JavaThread* jt = (JavaThread *)thread;
+ name = (char *)jt->get_thread_name();
+ oop thread_oop = jt->threadObj();
+ if (thread_oop != NULL) {
+ prio = java_lang_Thread::priority(thread_oop);
+ }
+ if (release_Threads_lock) {
+ Threads_lock->unlock();
+ }
+ }
+ tty->print_cr("Thread::%s " INTPTR_FORMAT " [%lx] %s (prio: %d)", msg, thread, thread->osthread()->thread_id(), name, prio);
+}
+#endif
+
+
+ThreadPriority Thread::get_priority(const Thread* const thread) {
+ trace("get priority", thread);
+ ThreadPriority priority;
+ // Can return an error!
+ (void)os::get_priority(thread, priority);
+ assert(MinPriority <= priority && priority <= MaxPriority, "non-Java priority found");
+ return priority;
+}
+
+void Thread::set_priority(Thread* thread, ThreadPriority priority) {
+ trace("set priority", thread);
+ debug_only(check_for_dangling_thread_pointer(thread);)
+ // Can return an error!
+ (void)os::set_priority(thread, priority);
+}
+
+
+void Thread::start(Thread* thread) {
+ trace("start", thread);
+ // Start is different from resume in that its safety is guaranteed by context or
+ // being called from a Java method synchronized on the Thread object.
+ if (!DisableStartThread) {
+ if (thread->is_Java_thread()) {
+ // Initialize the thread state to RUNNABLE before starting this thread.
+ // Can not set it after the thread started because we do not know the
+ // exact thread state at that time. It could be in MONITOR_WAIT or
+ // in SLEEPING or some other state.
+ java_lang_Thread::set_thread_status(((JavaThread*)thread)->threadObj(),
+ java_lang_Thread::RUNNABLE);
+ }
+ os::start_thread(thread);
+ }
+}
+
+// Enqueue a VM_Operation to do the job for us - sometime later
+void Thread::send_async_exception(oop java_thread, oop java_throwable) {
+ VM_ThreadStop* vm_stop = new VM_ThreadStop(java_thread, java_throwable);
+ VMThread::execute(vm_stop);
+}
+
+
+//
+// Check if an external suspend request has completed (or has been
+// cancelled). Returns true if the thread is externally suspended and
+// false otherwise.
+//
+// The bits parameter returns information about the code path through
+// the routine. Useful for debugging:
+//
+// set in is_ext_suspend_completed():
+// 0x00000001 - routine was entered
+// 0x00000010 - routine return false at end
+// 0x00000100 - thread exited (return false)
+// 0x00000200 - suspend request cancelled (return false)
+// 0x00000400 - thread suspended (return true)
+// 0x00001000 - thread is in a suspend equivalent state (return true)
+// 0x00002000 - thread is native and walkable (return true)
+// 0x00004000 - thread is native_trans and walkable (needed retry)
+//
+// set in wait_for_ext_suspend_completion():
+// 0x00010000 - routine was entered
+// 0x00020000 - suspend request cancelled before loop (return false)
+// 0x00040000 - thread suspended before loop (return true)
+// 0x00080000 - suspend request cancelled in loop (return false)
+// 0x00100000 - thread suspended in loop (return true)
+// 0x00200000 - suspend not completed during retry loop (return false)
+//
+
+// Helper class for tracing suspend wait debug bits.
+//
+// 0x00000100 indicates that the target thread exited before it could
+// self-suspend which is not a wait failure. 0x00000200, 0x00020000 and
+// 0x00080000 each indicate a cancelled suspend request so they don't
+// count as wait failures either.
+#define DEBUG_FALSE_BITS (0x00000010 | 0x00200000)
+
+class TraceSuspendDebugBits : public StackObj {
+ private:
+ JavaThread * jt;
+ bool is_wait;
+ bool called_by_wait; // meaningful when !is_wait
+ uint32_t * bits;
+
+ public:
+ TraceSuspendDebugBits(JavaThread *_jt, bool _is_wait, bool _called_by_wait,
+ uint32_t *_bits) {
+ jt = _jt;
+ is_wait = _is_wait;
+ called_by_wait = _called_by_wait;
+ bits = _bits;
+ }
+
+ ~TraceSuspendDebugBits() {
+ if (!is_wait) {
+#if 1
+ // By default, don't trace bits for is_ext_suspend_completed() calls.
+ // That trace is very chatty.
+ return;
+#else
+ if (!called_by_wait) {
+ // If tracing for is_ext_suspend_completed() is enabled, then only
+ // trace calls to it from wait_for_ext_suspend_completion()
+ return;
+ }
+#endif
+ }
+
+ if (AssertOnSuspendWaitFailure || TraceSuspendWaitFailures) {
+ if (bits != NULL && (*bits & DEBUG_FALSE_BITS) != 0) {
+ MutexLocker ml(Threads_lock); // needed for get_thread_name()
+ ResourceMark rm;
+
+ tty->print_cr(
+ "Failed wait_for_ext_suspend_completion(thread=%s, debug_bits=%x)",
+ jt->get_thread_name(), *bits);
+
+ guarantee(!AssertOnSuspendWaitFailure, "external suspend wait failed");
+ }
+ }
+ }
+};
+#undef DEBUG_FALSE_BITS
+
+
+bool JavaThread::is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits) {
+ TraceSuspendDebugBits tsdb(this, false /* !is_wait */, called_by_wait, bits);
+
+ bool did_trans_retry = false; // only do thread_in_native_trans retry once
+ bool do_trans_retry; // flag to force the retry
+
+ *bits |= 0x00000001;
+
+ do {
+ do_trans_retry = false;
+
+ if (is_exiting()) {
+ // Thread is in the process of exiting. This is always checked
+ // first to reduce the risk of dereferencing a freed JavaThread.
+ *bits |= 0x00000100;
+ return false;
+ }
+
+ if (!is_external_suspend()) {
+ // Suspend request is cancelled. This is always checked before
+ // is_ext_suspended() to reduce the risk of a rogue resume
+ // confusing the thread that made the suspend request.
+ *bits |= 0x00000200;
+ return false;
+ }
+
+ if (is_ext_suspended()) {
+ // thread is suspended
+ *bits |= 0x00000400;
+ return true;
+ }
+
+ // Now that we no longer do hard suspends of threads running
+ // native code, the target thread can be changing thread state
+ // while we are in this routine:
+ //
+ // _thread_in_native -> _thread_in_native_trans -> _thread_blocked
+ //
+ // We save a copy of the thread state as observed at this moment
+ // and make our decision about suspend completeness based on the
+ // copy. This closes the race where the thread state is seen as
+ // _thread_in_native_trans in the if-thread_blocked check, but is
+ // seen as _thread_blocked in if-thread_in_native_trans check.
+ JavaThreadState save_state = thread_state();
+
+ if (save_state == _thread_blocked && is_suspend_equivalent()) {
+ // If the thread's state is _thread_blocked and this blocking
+ // condition is known to be equivalent to a suspend, then we can
+ // consider the thread to be externally suspended. This means that
+ // the code that sets _thread_blocked has been modified to do
+ // self-suspension if the blocking condition releases. We also
+ // used to check for CONDVAR_WAIT here, but that is now covered by
+ // the _thread_blocked with self-suspension check.
+ //
+ // Return true since we wouldn't be here unless there was still an
+ // external suspend request.
+ *bits |= 0x00001000;
+ return true;
+ } else if (save_state == _thread_in_native && frame_anchor()->walkable()) {
+ // Threads running native code will self-suspend on native==>VM/Java
+ // transitions. If its stack is walkable (should always be the case
+ // unless this function is called before the actual java_suspend()
+ // call), then the wait is done.
+ *bits |= 0x00002000;
+ return true;
+ } else if (!called_by_wait && !did_trans_retry &&
+ save_state == _thread_in_native_trans &&
+ frame_anchor()->walkable()) {
+ // The thread is transitioning from thread_in_native to another
+ // thread state. check_safepoint_and_suspend_for_native_trans()
+ // will force the thread to self-suspend. If it hasn't gotten
+ // there yet we may have caught the thread in-between the native
+ // code check above and the self-suspend. Lucky us. If we were
+ // called by wait_for_ext_suspend_completion(), then it
+ // will be doing the retries so we don't have to.
+ //
+ // Since we use the saved thread state in the if-statement above,
+ // there is a chance that the thread has already transitioned to
+ // _thread_blocked by the time we get here. In that case, we will
+ // make a single unnecessary pass through the logic below. This
+ // doesn't hurt anything since we still do the trans retry.
+
+ *bits |= 0x00004000;
+
+ // Once the thread leaves thread_in_native_trans for another
+ // thread state, we break out of this retry loop. We shouldn't
+ // need this flag to prevent us from getting back here, but
+ // sometimes paranoia is good.
+ did_trans_retry = true;
+
+ // We wait for the thread to transition to a more usable state.
+ for (int i = 1; i <= SuspendRetryCount; i++) {
+ // We used to do an "os::yield_all(i)" call here with the intention
+ // that yielding would increase on each retry. However, the parameter
+ // is ignored on Linux which means the yield didn't scale up. Waiting
+ // on the SR_lock below provides a much more predictable scale up for
+ // the delay. It also provides a simple/direct point to check for any
+ // safepoint requests from the VMThread
+
+ // temporarily drops SR_lock while doing wait with safepoint check
+ // (if we're a JavaThread - the WatcherThread can also call this)
+ // and increase delay with each retry
+ SR_lock()->wait(!Thread::current()->is_Java_thread(), i * delay);
+
+ // check the actual thread state instead of what we saved above
+ if (thread_state() != _thread_in_native_trans) {
+ // the thread has transitioned to another thread state so
+ // try all the checks (except this one) one more time.
+ do_trans_retry = true;
+ break;
+ }
+ } // end retry loop
+
+
+ }
+ } while (do_trans_retry);
+
+ *bits |= 0x00000010;
+ return false;
+}
+
+//
+// Wait for an external suspend request to complete (or be cancelled).
+// Returns true if the thread is externally suspended and false otherwise.
+//
+bool JavaThread::wait_for_ext_suspend_completion(int retries, int delay,
+ uint32_t *bits) {
+ TraceSuspendDebugBits tsdb(this, true /* is_wait */,
+ false /* !called_by_wait */, bits);
+
+ // local flag copies to minimize SR_lock hold time
+ bool is_suspended;
+ bool pending;
+ uint32_t reset_bits;
+
+ // set a marker so is_ext_suspend_completed() knows we are the caller
+ *bits |= 0x00010000;
+
+ // We use reset_bits to reinitialize the bits value at the top of
+ // each retry loop. This allows the caller to make use of any
+ // unused bits for their own marking purposes.
+ reset_bits = *bits;
+
+ {
+ MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
+ is_suspended = is_ext_suspend_completed(true /* called_by_wait */,
+ delay, bits);
+ pending = is_external_suspend();
+ }
+ // must release SR_lock to allow suspension to complete
+
+ if (!pending) {
+ // A cancelled suspend request is the only false return from
+ // is_ext_suspend_completed() that keeps us from entering the
+ // retry loop.
+ *bits |= 0x00020000;
+ return false;
+ }
+
+ if (is_suspended) {
+ *bits |= 0x00040000;
+ return true;
+ }
+
+ for (int i = 1; i <= retries; i++) {
+ *bits = reset_bits; // reinit to only track last retry
+
+ // We used to do an "os::yield_all(i)" call here with the intention
+ // that yielding would increase on each retry. However, the parameter
+ // is ignored on Linux which means the yield didn't scale up. Waiting
+ // on the SR_lock below provides a much more predictable scale up for
+ // the delay. It also provides a simple/direct point to check for any
+ // safepoint requests from the VMThread
+
+ {
+ MutexLocker ml(SR_lock());
+ // wait with safepoint check (if we're a JavaThread - the WatcherThread
+ // can also call this) and increase delay with each retry
+ SR_lock()->wait(!Thread::current()->is_Java_thread(), i * delay);
+
+ is_suspended = is_ext_suspend_completed(true /* called_by_wait */,
+ delay, bits);
+
+ // It is possible for the external suspend request to be cancelled
+ // (by a resume) before the actual suspend operation is completed.
+ // Refresh our local copy to see if we still need to wait.
+ pending = is_external_suspend();
+ }
+
+ if (!pending) {
+ // A cancelled suspend request is the only false return from
+ // is_ext_suspend_completed() that keeps us from staying in the
+ // retry loop.
+ *bits |= 0x00080000;
+ return false;
+ }
+
+ if (is_suspended) {
+ *bits |= 0x00100000;
+ return true;
+ }
+ } // end retry loop
+
+ // thread did not suspend after all our retries
+ *bits |= 0x00200000;
+ return false;
+}
+
+#ifndef PRODUCT
+void JavaThread::record_jump(address target, address instr, const char* file, int line) {
+
+ // This should not need to be atomic as the only way for simultaneous
+ // updates is via interrupts. Even then this should be rare or non-existant
+ // and we don't care that much anyway.
+
+ int index = _jmp_ring_index;
+ _jmp_ring_index = (index + 1 ) & (jump_ring_buffer_size - 1);
+ _jmp_ring[index]._target = (intptr_t) target;
+ _jmp_ring[index]._instruction = (intptr_t) instr;
+ _jmp_ring[index]._file = file;
+ _jmp_ring[index]._line = line;
+}
+#endif /* PRODUCT */
+
+// Called by flat profiler
+// Callers have already called wait_for_ext_suspend_completion
+// The assertion for that is currently too complex to put here:
+bool JavaThread::profile_last_Java_frame(frame* _fr) {
+ bool gotframe = false;
+ // self suspension saves needed state.
+ if (has_last_Java_frame() && _anchor.walkable()) {
+ *_fr = pd_last_frame();
+ gotframe = true;
+ }
+ return gotframe;
+}
+
+void Thread::interrupt(Thread* thread) {
+ trace("interrupt", thread);
+ debug_only(check_for_dangling_thread_pointer(thread);)
+ os::interrupt(thread);
+}
+
+bool Thread::is_interrupted(Thread* thread, bool clear_interrupted) {
+ trace("is_interrupted", thread);
+ debug_only(check_for_dangling_thread_pointer(thread);)
+ // Note: If clear_interrupted==false, this simply fetches and
+ // returns the value of the field osthread()->interrupted().
+ return os::is_interrupted(thread, clear_interrupted);
+}
+
+
+// GC Support
+bool Thread::claim_oops_do_par_case(int strong_roots_parity) {
+ jint thread_parity = _oops_do_parity;
+ if (thread_parity != strong_roots_parity) {
+ jint res = Atomic::cmpxchg(strong_roots_parity, &_oops_do_parity, thread_parity);
+ if (res == thread_parity) return true;
+ else {
+ guarantee(res == strong_roots_parity, "Or else what?");
+ assert(SharedHeap::heap()->n_par_threads() > 0,
+ "Should only fail when parallel.");
+ return false;
+ }
+ }
+ assert(SharedHeap::heap()->n_par_threads() > 0,
+ "Should only fail when parallel.");
+ return false;
+}
+
+void Thread::oops_do(OopClosure* f) {
+ active_handles()->oops_do(f);
+ // Do oop for ThreadShadow
+ f->do_oop((oop*)&_pending_exception);
+ handle_area()->oops_do(f);
+}
+
+void Thread::nmethods_do() {
+}
+
+void Thread::print_on(outputStream* st) const {
+ // get_priority assumes osthread initialized
+ if (osthread() != NULL) {
+ st->print("prio=%d tid=" INTPTR_FORMAT " ", get_priority(this), this);
+ osthread()->print_on(st);
+ }
+ debug_only(if (WizardMode) print_owned_locks_on(st);)
+}
+
+// Thread::print_on_error() is called by fatal error handler. Don't use
+// any lock or allocate memory.
+void Thread::print_on_error(outputStream* st, char* buf, int buflen) const {
+ if (is_VM_thread()) st->print("VMThread");
+ else if (is_Compiler_thread()) st->print("CompilerThread");
+ else if (is_Java_thread()) st->print("JavaThread");
+ else if (is_GC_task_thread()) st->print("GCTaskThread");
+ else if (is_Watcher_thread()) st->print("WatcherThread");
+ else if (is_ConcurrentGC_thread()) st->print("ConcurrentGCThread");
+ else st->print("Thread");
+
+ st->print(" [stack: " PTR_FORMAT "," PTR_FORMAT "]",
+ _stack_base - _stack_size, _stack_base);
+
+ if (osthread()) {
+ st->print(" [id=%d]", osthread()->thread_id());
+ }
+}
+
+#ifdef ASSERT
+void Thread::print_owned_locks_on(outputStream* st) const {
+ Monitor *cur = _owned_locks;
+ if (cur == NULL) {
+ st->print(" (no locks) ");
+ } else {
+ st->print_cr(" Locks owned:");
+ while(cur) {
+ cur->print_on(st);
+ cur = cur->next();
+ }
+ }
+}
+
+static int ref_use_count = 0;
+
+bool Thread::owns_locks_but_compiled_lock() const {
+ for(Monitor *cur = _owned_locks; cur; cur = cur->next()) {
+ if (cur != Compile_lock) return true;
+ }
+ return false;
+}
+
+
+#endif
+
+#ifndef PRODUCT
+
+// The flag: potential_vm_operation notifies if this particular safepoint state could potential
+// invoke the vm-thread (i.e., and oop allocation). In that case, we also have to make sure that
+// no threads which allow_vm_block's are held
+void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
+ // Check if current thread is allowed to block at a safepoint
+ if (!(_allow_safepoint_count == 0))
+ fatal("Possible safepoint reached by thread that does not allow it");
+ if (is_Java_thread() && ((JavaThread*)this)->thread_state() != _thread_in_vm) {
+ fatal("LEAF method calling lock?");
+ }
+
+#ifdef ASSERT
+ if (potential_vm_operation && is_Java_thread()
+ && !Universe::is_bootstrapping()) {
+ // Make sure we do not hold any locks that the VM thread also uses.
+ // This could potentially lead to deadlocks
+ for(Monitor *cur = _owned_locks; cur; cur = cur->next()) {
+ // Threads_lock is special, since the safepoint synchronization will not start before this is
+ // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
+ // since it is used to transfer control between JavaThreads and the VMThread
+ // Do not *exclude* any locks unless you are absolutly sure it is correct. Ask someone else first!
+ if ( (cur->allow_vm_block() &&
+ cur != Threads_lock &&
+ cur != Compile_lock && // Temporary: should not be necessary when we get spearate compilation
+ cur != VMOperationRequest_lock &&
+ cur != VMOperationQueue_lock) ||
+ cur->rank() == Mutex::special) {
+ warning("Thread holding lock at safepoint that vm can block on: %s", cur->name());
+ }
+ }
+ }
+
+ if (GCALotAtAllSafepoints) {
+ // We could enter a safepoint here and thus have a gc
+ InterfaceSupport::check_gc_alot();
+ }
+
+#endif
+}
+#endif
+
+bool Thread::lock_is_in_stack(address adr) const {
+ assert(Thread::current() == this, "lock_is_in_stack can only be called from current thread");
+ // High limit: highest_lock is set during thread execution
+ // Low limit: address of the local variable dummy, rounded to 4K boundary.
+ // (The rounding helps finding threads in unsafe mode, even if the particular stack
+ // frame has been popped already. Correct as long as stacks are at least 4K long and aligned.)
+ address end = os::current_stack_pointer();
+ if (_highest_lock >= adr && adr >= end) return true;
+
+ return false;
+}
+
+
+bool Thread::is_in_stack(address adr) const {
+ assert(Thread::current() == this, "is_in_stack can only be called from current thread");
+ address end = os::current_stack_pointer();
+ if (stack_base() >= adr && adr >= end) return true;
+
+ return false;
+}
+
+
+// We had to move these methods here, because vm threads get into ObjectSynchronizer::enter
+// However, there is a note in JavaThread::is_lock_owned() about the VM threads not being
+// used for compilation in the future. If that change is made, the need for these methods
+// should be revisited, and they should be removed if possible.
+
+bool Thread::is_lock_owned(address adr) const {
+ if (lock_is_in_stack(adr) ) return true;
+ return false;
+}
+
+bool Thread::set_as_starting_thread() {
+ // NOTE: this must be called inside the main thread.
+ return os::create_main_thread((JavaThread*)this);
+}
+
+static void initialize_class(symbolHandle class_name, TRAPS) {
+ klassOop klass = SystemDictionary::resolve_or_fail(class_name, true, CHECK);
+ instanceKlass::cast(klass)->initialize(CHECK);
+}
+
+
+// Creates the initial ThreadGroup
+static Handle create_initial_thread_group(TRAPS) {
+ klassOop k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_ThreadGroup(), true, CHECK_NH);
+ instanceKlassHandle klass (THREAD, k);
+
+ Handle system_instance = klass->allocate_instance_handle(CHECK_NH);
+ {
+ JavaValue result(T_VOID);
+ JavaCalls::call_special(&result,
+ system_instance,
+ klass,
+ vmSymbolHandles::object_initializer_name(),
+ vmSymbolHandles::void_method_signature(),
+ CHECK_NH);
+ }
+ Universe::set_system_thread_group(system_instance());
+
+ Handle main_instance = klass->allocate_instance_handle(CHECK_NH);
+ {
+ JavaValue result(T_VOID);
+ Handle string = java_lang_String::create_from_str("main", CHECK_NH);
+ JavaCalls::call_special(&result,
+ main_instance,
+ klass,
+ vmSymbolHandles::object_initializer_name(),
+ vmSymbolHandles::threadgroup_string_void_signature(),
+ system_instance,
+ string,
+ CHECK_NH);
+ }
+ return main_instance;
+}
+
+// Creates the initial Thread
+static oop create_initial_thread(Handle thread_group, JavaThread* thread, TRAPS) {
+ klassOop k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_Thread(), true, CHECK_NULL);
+ instanceKlassHandle klass (THREAD, k);
+ instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_NULL);
+
+ java_lang_Thread::set_thread(thread_oop(), thread);
+ java_lang_Thread::set_priority(thread_oop(), NormPriority);
+ thread->set_threadObj(thread_oop());
+
+ Handle string = java_lang_String::create_from_str("main", CHECK_NULL);
+
+ JavaValue result(T_VOID);
+ JavaCalls::call_special(&result, thread_oop,
+ klass,
+ vmSymbolHandles::object_initializer_name(),
+ vmSymbolHandles::threadgroup_string_void_signature(),
+ thread_group,
+ string,
+ CHECK_NULL);
+ return thread_oop();
+}
+
+static void call_initializeSystemClass(TRAPS) {
+ klassOop k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_System(), true, CHECK);
+ instanceKlassHandle klass (THREAD, k);
+
+ JavaValue result(T_VOID);
+ JavaCalls::call_static(&result, klass, vmSymbolHandles::initializeSystemClass_name(),
+ vmSymbolHandles::void_method_signature(), CHECK);
+}
+
+static void reset_vm_info_property(TRAPS) {
+ // the vm info string
+ ResourceMark rm(THREAD);
+ const char *vm_info = VM_Version::vm_info_string();
+
+ // java.lang.System class
+ klassOop k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_System(), true, CHECK);
+ instanceKlassHandle klass (THREAD, k);
+
+ // setProperty arguments
+ Handle key_str = java_lang_String::create_from_str("java.vm.info", CHECK);
+ Handle value_str = java_lang_String::create_from_str(vm_info, CHECK);
+
+ // return value
+ JavaValue r(T_OBJECT);
+
+ // public static String setProperty(String key, String value);
+ JavaCalls::call_static(&r,
+ klass,
+ vmSymbolHandles::setProperty_name(),
+ vmSymbolHandles::string_string_string_signature(),
+ key_str,
+ value_str,
+ CHECK);
+}
+
+
+void JavaThread::allocate_threadObj(Handle thread_group, char* thread_name, bool daemon, TRAPS) {
+ assert(thread_group.not_null(), "thread group should be specified");
+ assert(threadObj() == NULL, "should only create Java thread object once");
+
+ klassOop k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_Thread(), true, CHECK);
+ instanceKlassHandle klass (THREAD, k);
+ instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
+
+ java_lang_Thread::set_thread(thread_oop(), this);
+ java_lang_Thread::set_priority(thread_oop(), NormPriority);
+ set_threadObj(thread_oop());
+
+ JavaValue result(T_VOID);
+ if (thread_name != NULL) {
+ Handle name = java_lang_String::create_from_str(thread_name, CHECK);
+ // Thread gets assigned specified name and null target
+ JavaCalls::call_special(&result,
+ thread_oop,
+ klass,
+ vmSymbolHandles::object_initializer_name(),
+ vmSymbolHandles::threadgroup_string_void_signature(),
+ thread_group, // Argument 1
+ name, // Argument 2
+ THREAD);
+ } else {
+ // Thread gets assigned name "Thread-nnn" and null target
+ // (java.lang.Thread doesn't have a constructor taking only a ThreadGroup argument)
+ JavaCalls::call_special(&result,
+ thread_oop,
+ klass,
+ vmSymbolHandles::object_initializer_name(),
+ vmSymbolHandles::threadgroup_runnable_void_signature(),
+ thread_group, // Argument 1
+ Handle(), // Argument 2
+ THREAD);
+ }
+
+
+ if (daemon) {
+ java_lang_Thread::set_daemon(thread_oop());
+ }
+
+ if (HAS_PENDING_EXCEPTION) {
+ return;
+ }
+
+ KlassHandle group(this, SystemDictionary::threadGroup_klass());
+ Handle threadObj(this, this->threadObj());
+
+ JavaCalls::call_special(&result,
+ thread_group,
+ group,
+ vmSymbolHandles::add_method_name(),
+ vmSymbolHandles::thread_void_signature(),
+ threadObj, // Arg 1
+ THREAD);
+
+
+}
+
+// NamedThread -- non-JavaThread subclasses with multiple
+// uniquely named instances should derive from this.
+NamedThread::NamedThread() : Thread() {
+ _name = NULL;
+}
+
+NamedThread::~NamedThread() {
+ if (_name != NULL) {
+ FREE_C_HEAP_ARRAY(char, _name);
+ _name = NULL;
+ }
+}
+
+void NamedThread::set_name(const char* format, ...) {
+ guarantee(_name == NULL, "Only get to set name once.");
+ _name = NEW_C_HEAP_ARRAY(char, max_name_len);
+ guarantee(_name != NULL, "alloc failure");
+ va_list ap;
+ va_start(ap, format);
+ jio_vsnprintf(_name, max_name_len, format, ap);
+ va_end(ap);
+}
+
+// ======= WatcherThread ========
+
+// The watcher thread exists to simulate timer interrupts. It should
+// be replaced by an abstraction over whatever native support for
+// timer interrupts exists on the platform.
+
+WatcherThread* WatcherThread::_watcher_thread = NULL;
+bool WatcherThread::_should_terminate = false;
+
+WatcherThread::WatcherThread() : Thread() {
+ assert(watcher_thread() == NULL, "we can only allocate one WatcherThread");
+ if (os::create_thread(this, os::watcher_thread)) {
+ _watcher_thread = this;
+
+ // Set the watcher thread to the highest OS priority which should not be
+ // used, unless a Java thread with priority java.lang.Thread.MAX_PRIORITY
+ // is created. The only normal thread using this priority is the reference
+ // handler thread, which runs for very short intervals only.
+ // If the VMThread's priority is not lower than the WatcherThread profiling
+ // will be inaccurate.
+ os::set_priority(this, MaxPriority);
+ if (!DisableStartThread) {
+ os::start_thread(this);
+ }
+ }
+}
+
+void WatcherThread::run() {
+ assert(this == watcher_thread(), "just checking");
+
+ this->record_stack_base_and_size();
+ this->initialize_thread_local_storage();
+ this->set_active_handles(JNIHandleBlock::allocate_block());
+ while(!_should_terminate) {
+ assert(watcher_thread() == Thread::current(), "thread consistency check");
+ assert(watcher_thread() == this, "thread consistency check");
+
+ // Calculate how long it'll be until the next PeriodicTask work
+ // should be done, and sleep that amount of time.
+ const size_t time_to_wait = PeriodicTask::time_to_wait();
+ os::sleep(this, time_to_wait, false);
+
+ if (is_error_reported()) {
+ // A fatal error has happened, the error handler(VMError::report_and_die)
+ // should abort JVM after creating an error log file. However in some
+ // rare cases, the error handler itself might deadlock. Here we try to
+ // kill JVM if the fatal error handler fails to abort in 2 minutes.
+ //
+ // This code is in WatcherThread because WatcherThread wakes up
+ // periodically so the fatal error handler doesn't need to do anything;
+ // also because the WatcherThread is less likely to crash than other
+ // threads.
+
+ for (;;) {
+ if (!ShowMessageBoxOnError
+ && (OnError == NULL || OnError[0] == '\0')
+ && Arguments::abort_hook() == NULL) {
+ os::sleep(this, 2 * 60 * 1000, false);
+ fdStream err(defaultStream::output_fd());
+ err.print_raw_cr("# [ timer expired, abort... ]");
+ // skip atexit/vm_exit/vm_abort hooks
+ os::die();
+ }
+
+ // Wake up 5 seconds later, the fatal handler may reset OnError or
+ // ShowMessageBoxOnError when it is ready to abort.
+ os::sleep(this, 5 * 1000, false);
+ }
+ }
+
+ PeriodicTask::real_time_tick(time_to_wait);
+
+ // If we have no more tasks left due to dynamic disenrollment,
+ // shut down the thread since we don't currently support dynamic enrollment
+ if (PeriodicTask::num_tasks() == 0) {
+ _should_terminate = true;
+ }
+ }
+
+ // Signal that it is terminated
+ {
+ MutexLockerEx mu(Terminator_lock, Mutex::_no_safepoint_check_flag);
+ _watcher_thread = NULL;
+ Terminator_lock->notify();
+ }
+
+ // Thread destructor usually does this..
+ ThreadLocalStorage::set_thread(NULL);
+}
+
+void WatcherThread::start() {
+ if (watcher_thread() == NULL) {
+ _should_terminate = false;
+ // Create the single instance of WatcherThread
+ new WatcherThread();
+ }
+}
+
+void WatcherThread::stop() {
+ // it is ok to take late safepoints here, if needed
+ MutexLocker mu(Terminator_lock);
+ _should_terminate = true;
+ while(watcher_thread() != NULL) {
+ // This wait should make safepoint checks, wait without a timeout,
+ // and wait as a suspend-equivalent condition.
+ //
+ // Note: If the FlatProfiler is running, then this thread is waiting
+ // for the WatcherThread to terminate and the WatcherThread, via the
+ // FlatProfiler task, is waiting for the external suspend request on
+ // this thread to complete. wait_for_ext_suspend_completion() will
+ // eventually timeout, but that takes time. Making this wait a
+ // suspend-equivalent condition solves that timeout problem.
+ //
+ Terminator_lock->wait(!Mutex::_no_safepoint_check_flag, 0,
+ Mutex::_as_suspend_equivalent_flag);
+ }
+}
+
+void WatcherThread::print_on(outputStream* st) const {
+ st->print("\"%s\" ", name());
+ Thread::print_on(st);
+ st->cr();
+}
+
+// ======= JavaThread ========
+
+// A JavaThread is a normal Java thread
+
+void JavaThread::initialize() {
+ // Initialize fields
+ set_saved_exception_pc(NULL);
+ set_threadObj(NULL);
+ _anchor.clear();
+ set_entry_point(NULL);
+ set_jni_functions(jni_functions());
+ set_callee_target(NULL);
+ set_vm_result(NULL);
+ set_vm_result_2(NULL);
+ set_vframe_array_head(NULL);
+ set_vframe_array_last(NULL);
+ set_deferred_locals(NULL);
+ set_deopt_mark(NULL);
+ clear_must_deopt_id();
+ set_monitor_chunks(NULL);
+ set_next(NULL);
+ set_thread_state(_thread_new);
+ _terminated = _not_terminated;
+ _privileged_stack_top = NULL;
+ _array_for_gc = NULL;
+ _suspend_equivalent = false;
+ _in_deopt_handler = 0;
+ _doing_unsafe_access = false;
+ _stack_guard_state = stack_guard_unused;
+ _exception_oop = NULL;
+ _exception_pc = 0;
+ _exception_handler_pc = 0;
+ _exception_stack_size = 0;
+ _jvmti_thread_state= NULL;
+ _jvmti_get_loaded_classes_closure = NULL;
+ _interp_only_mode = 0;
+ _special_runtime_exit_condition = _no_async_condition;
+ _pending_async_exception = NULL;
+ _is_compiling = false;
+ _thread_stat = NULL;
+ _thread_stat = new ThreadStatistics();
+ _blocked_on_compilation = false;
+ _jni_active_critical = 0;
+ _do_not_unlock_if_synchronized = false;
+ _cached_monitor_info = NULL;
+ _parker = Parker::Allocate(this) ;
+
+#ifndef PRODUCT
+ _jmp_ring_index = 0;
+ for (int ji = 0 ; ji < jump_ring_buffer_size ; ji++ ) {
+ record_jump(NULL, NULL, NULL, 0);
+ }
+#endif /* PRODUCT */
+
+ set_thread_profiler(NULL);
+ if (FlatProfiler::is_active()) {
+ // This is where we would decide to either give each thread it's own profiler
+ // or use one global one from FlatProfiler,
+ // or up to some count of the number of profiled threads, etc.
+ ThreadProfiler* pp = new ThreadProfiler();
+ pp->engage();
+ set_thread_profiler(pp);
+ }
+
+ // Setup safepoint state info for this thread
+ ThreadSafepointState::create(this);
+
+ debug_only(_java_call_counter = 0);
+
+ // JVMTI PopFrame support
+ _popframe_condition = popframe_inactive;
+ _popframe_preserved_args = NULL;
+ _popframe_preserved_args_size = 0;
+
+ pd_initialize();
+}
+
+JavaThread::JavaThread(bool is_attaching) : Thread() {
+ initialize();
+ _is_attaching = is_attaching;
+}
+
+bool JavaThread::reguard_stack(address cur_sp) {
+ if (_stack_guard_state != stack_guard_yellow_disabled) {
+ return true; // Stack already guarded or guard pages not needed.
+ }
+
+ if (register_stack_overflow()) {
+ // For those architectures which have separate register and
+ // memory stacks, we must check the register stack to see if
+ // it has overflowed.
+ return false;
+ }
+
+ // Java code never executes within the yellow zone: the latter is only
+ // there to provoke an exception during stack banging. If java code
+ // is executing there, either StackShadowPages should be larger, or
+ // some exception code in c1, c2 or the interpreter isn't unwinding
+ // when it should.
+ guarantee(cur_sp > stack_yellow_zone_base(), "not enough space to reguard - increase StackShadowPages");
+
+ enable_stack_yellow_zone();
+ return true;
+}
+
+bool JavaThread::reguard_stack(void) {
+ return reguard_stack(os::current_stack_pointer());
+}
+
+
+void JavaThread::block_if_vm_exited() {
+ if (_terminated == _vm_exited) {
+ // _vm_exited is set at safepoint, and Threads_lock is never released
+ // we will block here forever
+ Threads_lock->lock_without_safepoint_check();
+ ShouldNotReachHere();
+ }
+}
+
+
+// Remove this ifdef when C1 is ported to the compiler interface.
+static void compiler_thread_entry(JavaThread* thread, TRAPS);
+
+JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) : Thread() {
+ if (TraceThreadEvents) {
+ tty->print_cr("creating thread %p", this);
+ }
+ initialize();
+ _is_attaching = false;
+ set_entry_point(entry_point);
+ // Create the native thread itself.
+ // %note runtime_23
+ os::ThreadType thr_type = os::java_thread;
+ thr_type = entry_point == &compiler_thread_entry ? os::compiler_thread :
+ os::java_thread;
+ os::create_thread(this, thr_type, stack_sz);
+
+ // The _osthread may be NULL here because we ran out of memory (too many threads active).
+ // We need to throw and OutOfMemoryError - however we cannot do this here because the caller
+ // may hold a lock and all locks must be unlocked before throwing the exception (throwing
+ // the exception consists of creating the exception object & initializing it, initialization
+ // will leave the VM via a JavaCall and then all locks must be unlocked).
+ //
+ // The thread is still suspended when we reach here. Thread must be explicit started
+ // by creator! Furthermore, the thread must also explicitly be added to the Threads list
+ // by calling Threads:add. The reason why this is not done here, is because the thread
+ // object must be fully initialized (take a look at JVM_Start)
+}
+
+JavaThread::~JavaThread() {
+ if (TraceThreadEvents) {
+ tty->print_cr("terminate thread %p", this);
+ }
+
+ // JSR166 -- return the parker to the free list
+ Parker::Release(_parker);
+ _parker = NULL ;
+
+ // Free any remaining previous UnrollBlock
+ vframeArray* old_array = vframe_array_last();
+
+ if (old_array != NULL) {
+ Deoptimization::UnrollBlock* old_info = old_array->unroll_block();
+ old_array->set_unroll_block(NULL);
+ delete old_info;
+ delete old_array;
+ }
+
+ GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred = deferred_locals();
+ if (deferred != NULL) {
+ // This can only happen if thread is destroyed before deoptimization occurs.
+ assert(deferred->length() != 0, "empty array!");
+ do {
+ jvmtiDeferredLocalVariableSet* dlv = deferred->at(0);
+ deferred->remove_at(0);
+ // individual jvmtiDeferredLocalVariableSet are CHeapObj's
+ delete dlv;
+ } while (deferred->length() != 0);
+ delete deferred;
+ }
+
+ // All Java related clean up happens in exit
+ ThreadSafepointState::destroy(this);
+ if (_thread_profiler != NULL) delete _thread_profiler;
+ if (_thread_stat != NULL) delete _thread_stat;
+
+ if (jvmti_thread_state() != NULL) {
+ JvmtiExport::cleanup_thread(this);
+ }
+}
+
+
+// The first routine called by a new Java thread
+void JavaThread::run() {
+ // initialize thread-local alloc buffer related fields
+ this->initialize_tlab();
+
+ // used to test validitity of stack trace backs
+ this->record_base_of_stack_pointer();
+
+ // Record real stack base and size.
+ this->record_stack_base_and_size();
+
+ // Initialize thread local storage; set before calling MutexLocker
+ this->initialize_thread_local_storage();
+
+ this->create_stack_guard_pages();
+
+ // Thread is now sufficient initialized to be handled by the safepoint code as being
+ // in the VM. Change thread state from _thread_new to _thread_in_vm
+ ThreadStateTransition::transition_and_fence(this, _thread_new, _thread_in_vm);
+
+ assert(JavaThread::current() == this, "sanity check");
+ assert(!Thread::current()->owns_locks(), "sanity check");
+
+ DTRACE_THREAD_PROBE(start, this);
+
+ // This operation might block. We call that after all safepoint checks for a new thread has
+ // been completed.
+ this->set_active_handles(JNIHandleBlock::allocate_block());
+
+ if (JvmtiExport::should_post_thread_life()) {
+ JvmtiExport::post_thread_start(this);
+ }
+
+ // We call another function to do the rest so we are sure that the stack addresses used
+ // from there will be lower than the stack base just computed
+ thread_main_inner();
+
+ // Note, thread is no longer valid at this point!
+}
+
+
+void JavaThread::thread_main_inner() {
+ assert(JavaThread::current() == this, "sanity check");
+ assert(this->threadObj() != NULL, "just checking");
+
+ // Execute thread entry point. If this thread is being asked to restart,
+ // or has been stopped before starting, do not reexecute entry point.
+ // Note: Due to JVM_StopThread we can have pending exceptions already!
+ if (!this->has_pending_exception() && !java_lang_Thread::is_stillborn(this->threadObj())) {
+ // enter the thread's entry point only if we have no pending exceptions
+ HandleMark hm(this);
+ this->entry_point()(this, this);
+ }
+
+ DTRACE_THREAD_PROBE(stop, this);
+
+ this->exit(false);
+ delete this;
+}
+
+
+static void ensure_join(JavaThread* thread) {
+ // We do not need to grap the Threads_lock, since we are operating on ourself.
+ Handle threadObj(thread, thread->threadObj());
+ assert(threadObj.not_null(), "java thread object must exist");
+ ObjectLocker lock(threadObj, thread);
+ // Ignore pending exception (ThreadDeath), since we are exiting anyway
+ thread->clear_pending_exception();
+ // It is of profound importance that we set the stillborn bit and reset the thread object,
+ // before we do the notify. Since, changing these two variable will make JVM_IsAlive return
+ // false. So in case another thread is doing a join on this thread , it will detect that the thread
+ // is dead when it gets notified.
+ java_lang_Thread::set_stillborn(threadObj());
+ // Thread is exiting. So set thread_status field in java.lang.Thread class to TERMINATED.
+ java_lang_Thread::set_thread_status(threadObj(), java_lang_Thread::TERMINATED);
+ java_lang_Thread::set_thread(threadObj(), NULL);
+ lock.notify_all(thread);
+ // Ignore pending exception (ThreadDeath), since we are exiting anyway
+ thread->clear_pending_exception();
+}
+
+// For any new cleanup additions, please check to see if they need to be applied to
+// cleanup_failed_attach_current_thread as well.
+void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
+ assert(this == JavaThread::current(), "thread consistency check");
+ if (!InitializeJavaLangSystem) return;
+
+ HandleMark hm(this);
+ Handle uncaught_exception(this, this->pending_exception());
+ this->clear_pending_exception();
+ Handle threadObj(this, this->threadObj());
+ assert(threadObj.not_null(), "Java thread object should be created");
+
+ if (get_thread_profiler() != NULL) {
+ get_thread_profiler()->disengage();
+ ResourceMark rm;
+ get_thread_profiler()->print(get_thread_name());
+ }
+
+
+ // FIXIT: This code should be moved into else part, when reliable 1.2/1.3 check is in place
+ {
+ EXCEPTION_MARK;
+
+ CLEAR_PENDING_EXCEPTION;
+ }
+ // FIXIT: The is_null check is only so it works better on JDK1.2 VM's. This
+ // has to be fixed by a runtime query method
+ if (!destroy_vm || JDK_Version::is_jdk12x_version()) {
+ // JSR-166: change call from from ThreadGroup.uncaughtException to
+ // java.lang.Thread.dispatchUncaughtException
+ if (uncaught_exception.not_null()) {
+ Handle group(this, java_lang_Thread::threadGroup(threadObj()));
+ Events::log("uncaught exception INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT",
+ (address)uncaught_exception(), (address)threadObj(), (address)group());
+ {
+ EXCEPTION_MARK;
+ // Check if the method Thread.dispatchUncaughtException() exists. If so
+ // call it. Otherwise we have an older library without the JSR-166 changes,
+ // so call ThreadGroup.uncaughtException()
+ KlassHandle recvrKlass(THREAD, threadObj->klass());
+ CallInfo callinfo;
+ KlassHandle thread_klass(THREAD, SystemDictionary::thread_klass());
+ LinkResolver::resolve_virtual_call(callinfo, threadObj, recvrKlass, thread_klass,
+ vmSymbolHandles::dispatchUncaughtException_name(),
+ vmSymbolHandles::throwable_void_signature(),
+ KlassHandle(), false, false, THREAD);
+ CLEAR_PENDING_EXCEPTION;
+ methodHandle method = callinfo.selected_method();
+ if (method.not_null()) {
+ JavaValue result(T_VOID);
+ JavaCalls::call_virtual(&result,
+ threadObj, thread_klass,
+ vmSymbolHandles::dispatchUncaughtException_name(),
+ vmSymbolHandles::throwable_void_signature(),
+ uncaught_exception,
+ THREAD);
+ } else {
+ KlassHandle thread_group(THREAD, SystemDictionary::threadGroup_klass());
+ JavaValue result(T_VOID);
+ JavaCalls::call_virtual(&result,
+ group, thread_group,
+ vmSymbolHandles::uncaughtException_name(),
+ vmSymbolHandles::thread_throwable_void_signature(),
+ threadObj, // Arg 1
+ uncaught_exception, // Arg 2
+ THREAD);
+ }
+ CLEAR_PENDING_EXCEPTION;
+ }
+ }
+
+ // Call Thread.exit(). We try 3 times in case we got another Thread.stop during
+ // the execution of the method. If that is not enough, then we don't really care. Thread.stop
+ // is deprecated anyhow.
+ { int count = 3;
+ while (java_lang_Thread::threadGroup(threadObj()) != NULL && (count-- > 0)) {
+ EXCEPTION_MARK;
+ JavaValue result(T_VOID);
+ KlassHandle thread_klass(THREAD, SystemDictionary::thread_klass());
+ JavaCalls::call_virtual(&result,
+ threadObj, thread_klass,
+ vmSymbolHandles::exit_method_name(),
+ vmSymbolHandles::void_method_signature(),
+ THREAD);
+ CLEAR_PENDING_EXCEPTION;
+ }
+ }
+
+ // notify JVMTI
+ if (JvmtiExport::should_post_thread_life()) {
+ JvmtiExport::post_thread_end(this);
+ }
+
+ // We have notified the agents that we are exiting, before we go on,
+ // we must check for a pending external suspend request and honor it
+ // in order to not surprise the thread that made the suspend request.
+ while (true) {
+ {
+ MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
+ if (!is_external_suspend()) {
+ set_terminated(_thread_exiting);
+ ThreadService::current_thread_exiting(this);
+ break;
+ }
+ // Implied else:
+ // Things get a little tricky here. We have a pending external
+ // suspend request, but we are holding the SR_lock so we
+ // can't just self-suspend. So we temporarily drop the lock
+ // and then self-suspend.
+ }
+
+ ThreadBlockInVM tbivm(this);
+ java_suspend_self();
+
+ // We're done with this suspend request, but we have to loop around
+ // and check again. Eventually we will get SR_lock without a pending
+ // external suspend request and will be able to mark ourselves as
+ // exiting.
+ }
+ // no more external suspends are allowed at this point
+ } else {
+ // before_exit() has already posted JVMTI THREAD_END events
+ }
+
+ // Notify waiters on thread object. This has to be done after exit() is called
+ // on the thread (if the thread is the last thread in a daemon ThreadGroup the
+ // group should have the destroyed bit set before waiters are notified).
+ ensure_join(this);
+ assert(!this->has_pending_exception(), "ensure_join should have cleared");
+
+ // 6282335 JNI DetachCurrentThread spec states that all Java monitors
+ // held by this thread must be released. A detach operation must only
+ // get here if there are no Java frames on the stack. Therefore, any
+ // owned monitors at this point MUST be JNI-acquired monitors which are
+ // pre-inflated and in the monitor cache.
+ //
+ // ensure_join() ignores IllegalThreadStateExceptions, and so does this.
+ if (exit_type == jni_detach && JNIDetachReleasesMonitors) {
+ assert(!this->has_last_Java_frame(), "detaching with Java frames?");
+ ObjectSynchronizer::release_monitors_owned_by_thread(this);
+ assert(!this->has_pending_exception(), "release_monitors should have cleared");
+ }
+
+ // These things needs to be done while we are still a Java Thread. Make sure that thread
+ // is in a consistent state, in case GC happens
+ assert(_privileged_stack_top == NULL, "must be NULL when we get here");
+
+ if (active_handles() != NULL) {
+ JNIHandleBlock* block = active_handles();
+ set_active_handles(NULL);
+ JNIHandleBlock::release_block(block);
+ }
+
+ if (free_handle_block() != NULL) {
+ JNIHandleBlock* block = free_handle_block();
+ set_free_handle_block(NULL);
+ JNIHandleBlock::release_block(block);
+ }
+
+ // These have to be removed while this is still a valid thread.
+ remove_stack_guard_pages();
+
+ if (UseTLAB) {
+ tlab().make_parsable(true); // retire TLAB
+ }
+
+ // Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread
+ Threads::remove(this);
+}
+
+void JavaThread::cleanup_failed_attach_current_thread() {
+
+ if (get_thread_profiler() != NULL) {
+ get_thread_profiler()->disengage();
+ ResourceMark rm;
+ get_thread_profiler()->print(get_thread_name());
+ }
+
+ if (active_handles() != NULL) {
+ JNIHandleBlock* block = active_handles();
+ set_active_handles(NULL);
+ JNIHandleBlock::release_block(block);
+ }
+
+ if (free_handle_block() != NULL) {
+ JNIHandleBlock* block = free_handle_block();
+ set_free_handle_block(NULL);
+ JNIHandleBlock::release_block(block);
+ }
+
+ if (UseTLAB) {
+ tlab().make_parsable(true); // retire TLAB, if any
+ }
+
+ Threads::remove(this);
+ delete this;
+}
+
+
+JavaThread* JavaThread::active() {
+ Thread* thread = ThreadLocalStorage::thread();
+ assert(thread != NULL, "just checking");
+ if (thread->is_Java_thread()) {
+ return (JavaThread*) thread;
+ } else {
+ assert(thread->is_VM_thread(), "this must be a vm thread");
+ VM_Operation* op = ((VMThread*) thread)->vm_operation();
+ JavaThread *ret=op == NULL ? NULL : (JavaThread *)op->calling_thread();
+ assert(ret->is_Java_thread(), "must be a Java thread");
+ return ret;
+ }
+}
+
+bool JavaThread::is_lock_owned(address adr) const {
+ if (lock_is_in_stack(adr)) return true;
+
+ for (MonitorChunk* chunk = monitor_chunks(); chunk != NULL; chunk = chunk->next()) {
+ if (chunk->contains(adr)) return true;
+ }
+
+ return false;
+}
+
+
+void JavaThread::add_monitor_chunk(MonitorChunk* chunk) {
+ chunk->set_next(monitor_chunks());
+ set_monitor_chunks(chunk);
+}
+
+void JavaThread::remove_monitor_chunk(MonitorChunk* chunk) {
+ guarantee(monitor_chunks() != NULL, "must be non empty");
+ if (monitor_chunks() == chunk) {
+ set_monitor_chunks(chunk->next());
+ } else {
+ MonitorChunk* prev = monitor_chunks();
+ while (prev->next() != chunk) prev = prev->next();
+ prev->set_next(chunk->next());
+ }
+}
+
+// JVM support.
+
+// Note: this function shouldn't block if it's called in
+// _thread_in_native_trans state (such as from
+// check_special_condition_for_native_trans()).
+void JavaThread::check_and_handle_async_exceptions(bool check_unsafe_error) {
+
+ if (has_last_Java_frame() && has_async_condition()) {
+ // If we are at a polling page safepoint (not a poll return)
+ // then we must defer async exception because live registers
+ // will be clobbered by the exception path. Poll return is
+ // ok because the call we a returning from already collides
+ // with exception handling registers and so there is no issue.
+ // (The exception handling path kills call result registers but
+ // this is ok since the exception kills the result anyway).
+
+ if (is_at_poll_safepoint()) {
+ // if the code we are returning to has deoptimized we must defer
+ // the exception otherwise live registers get clobbered on the
+ // exception path before deoptimization is able to retrieve them.
+ //
+ RegisterMap map(this, false);
+ frame caller_fr = last_frame().sender(&map);
+ assert(caller_fr.is_compiled_frame(), "what?");
+ if (caller_fr.is_deoptimized_frame()) {
+ if (TraceExceptions) {
+ ResourceMark rm;
+ tty->print_cr("deferred async exception at compiled safepoint");
+ }
+ return;
+ }
+ }
+ }
+
+ JavaThread::AsyncRequests condition = clear_special_runtime_exit_condition();
+ if (condition == _no_async_condition) {
+ // Conditions have changed since has_special_runtime_exit_condition()
+ // was called:
+ // - if we were here only because of an external suspend request,
+ // then that was taken care of above (or cancelled) so we are done
+ // - if we were here because of another async request, then it has
+ // been cleared between the has_special_runtime_exit_condition()
+ // and now so again we are done
+ return;
+ }
+
+ // Check for pending async. exception
+ if (_pending_async_exception != NULL) {
+ // Only overwrite an already pending exception, if it is not a threadDeath.
+ if (!has_pending_exception() || !pending_exception()->is_a(SystemDictionary::threaddeath_klass())) {
+
+ // We cannot call Exceptions::_throw(...) here because we cannot block
+ set_pending_exception(_pending_async_exception, __FILE__, __LINE__);
+
+ if (TraceExceptions) {
+ ResourceMark rm;
+ tty->print("Async. exception installed at runtime exit (" INTPTR_FORMAT ")", this);
+ if (has_last_Java_frame() ) {
+ frame f = last_frame();
+ tty->print(" (pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " )", f.pc(), f.sp());
+ }
+ tty->print_cr(" of type: %s", instanceKlass::cast(_pending_async_exception->klass())->external_name());
+ }
+ _pending_async_exception = NULL;
+ clear_has_async_exception();
+ }
+ }
+
+ if (check_unsafe_error &&
+ condition == _async_unsafe_access_error && !has_pending_exception()) {
+ condition = _no_async_condition; // done
+ switch (thread_state()) {
+ case _thread_in_vm:
+ {
+ JavaThread* THREAD = this;
+ THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
+ }
+ case _thread_in_native:
+ {
+ ThreadInVMfromNative tiv(this);
+ JavaThread* THREAD = this;
+ THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
+ }
+ case _thread_in_Java:
+ {
+ ThreadInVMfromJava tiv(this);
+ JavaThread* THREAD = this;
+ THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in a recent unsafe memory access operation in compiled Java code");
+ }
+ default:
+ ShouldNotReachHere();
+ }
+ }
+
+ assert(condition == _no_async_condition || has_pending_exception() ||
+ (!check_unsafe_error && condition == _async_unsafe_access_error),
+ "must have handled the async condition, if no exception");
+}
+
+void JavaThread::handle_special_runtime_exit_condition(bool check_asyncs) {
+ //
+ // Check for pending external suspend. Internal suspend requests do
+ // not use handle_special_runtime_exit_condition().
+ // If JNIEnv proxies are allowed, don't self-suspend if the target
+ // thread is not the current thread. In older versions of jdbx, jdbx
+ // threads could call into the VM with another thread's JNIEnv so we
+ // can be here operating on behalf of a suspended thread (4432884).
+ bool do_self_suspend = is_external_suspend_with_lock();
+ if (do_self_suspend && (!AllowJNIEnvProxy || this == JavaThread::current())) {
+ //
+ // Because thread is external suspended the safepoint code will count
+ // thread as at a safepoint. This can be odd because we can be here
+ // as _thread_in_Java which would normally transition to _thread_blocked
+ // at a safepoint. We would like to mark the thread as _thread_blocked
+ // before calling java_suspend_self like all other callers of it but
+ // we must then observe proper safepoint protocol. (We can't leave
+ // _thread_blocked with a safepoint in progress). However we can be
+ // here as _thread_in_native_trans so we can't use a normal transition
+ // constructor/destructor pair because they assert on that type of
+ // transition. We could do something like:
+ //
+ // JavaThreadState state = thread_state();
+ // set_thread_state(_thread_in_vm);
+ // {
+ // ThreadBlockInVM tbivm(this);
+ // java_suspend_self()
+ // }
+ // set_thread_state(_thread_in_vm_trans);
+ // if (safepoint) block;
+ // set_thread_state(state);
+ //
+ // but that is pretty messy. Instead we just go with the way the
+ // code has worked before and note that this is the only path to
+ // java_suspend_self that doesn't put the thread in _thread_blocked
+ // mode.
+
+ frame_anchor()->make_walkable(this);
+ java_suspend_self();
+
+ // We might be here for reasons in addition to the self-suspend request
+ // so check for other async requests.
+ }
+
+ if (check_asyncs) {
+ check_and_handle_async_exceptions();
+ }
+}
+
+void JavaThread::send_thread_stop(oop java_throwable) {
+ assert(Thread::current()->is_VM_thread(), "should be in the vm thread");
+ assert(Threads_lock->is_locked(), "Threads_lock should be locked by safepoint code");
+ assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
+
+ // Do not throw asynchronous exceptions against the compiler thread
+ // (the compiler thread should not be a Java thread -- fix in 1.4.2)
+ if (is_Compiler_thread()) return;
+
+ // This is a change from JDK 1.1, but JDK 1.2 will also do it:
+ if (java_throwable->is_a(SystemDictionary::threaddeath_klass())) {
+ java_lang_Thread::set_stillborn(threadObj());
+ }
+
+ {
+ // Actually throw the Throwable against the target Thread - however
+ // only if there is no thread death exception installed already.
+ if (_pending_async_exception == NULL || !_pending_async_exception->is_a(SystemDictionary::threaddeath_klass())) {
+ // If the topmost frame is a runtime stub, then we are calling into
+ // OptoRuntime from compiled code. Some runtime stubs (new, monitor_exit..)
+ // must deoptimize the caller before continuing, as the compiled exception handler table
+ // may not be valid
+ if (has_last_Java_frame()) {
+ frame f = last_frame();
+ if (f.is_runtime_frame() || f.is_safepoint_blob_frame()) {
+ // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
+ RegisterMap reg_map(this, UseBiasedLocking);
+ frame compiled_frame = f.sender(&reg_map);
+ if (compiled_frame.can_be_deoptimized()) {
+ Deoptimization::deoptimize(this, compiled_frame, &reg_map);
+ }
+ }
+ }
+
+ // Set async. pending exception in thread.
+ set_pending_async_exception(java_throwable);
+
+ if (TraceExceptions) {
+ ResourceMark rm;
+ tty->print_cr("Pending Async. exception installed of type: %s", instanceKlass::cast(_pending_async_exception->klass())->external_name());
+ }
+ // for AbortVMOnException flag
+ NOT_PRODUCT(Exceptions::debug_check_abort(instanceKlass::cast(_pending_async_exception->klass())->external_name()));
+ }
+ }
+
+
+ // Interrupt thread so it will wake up from a potential wait()
+ Thread::interrupt(this);
+}
+
+// External suspension mechanism.
+//
+// Tell the VM to suspend a thread when ever it knows that it does not hold on
+// to any VM_locks and it is at a transition
+// Self-suspension will happen on the transition out of the vm.
+// Catch "this" coming in from JNIEnv pointers when the thread has been freed
+//
+// Guarantees on return:
+// + Target thread will not execute any new bytecode (that's why we need to
+// force a safepoint)
+// + Target thread will not enter any new monitors
+//
+void JavaThread::java_suspend() {
+ { MutexLocker mu(Threads_lock);
+ if (!Threads::includes(this) || is_exiting() || this->threadObj() == NULL) {
+ return;
+ }
+ }
+
+ { MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
+ if (!is_external_suspend()) {
+ // a racing resume has cancelled us; bail out now
+ return;
+ }
+
+ // suspend is done
+ uint32_t debug_bits = 0;
+ // Warning: is_ext_suspend_completed() may temporarily drop the
+ // SR_lock to allow the thread to reach a stable thread state if
+ // it is currently in a transient thread state.
+ if (is_ext_suspend_completed(false /* !called_by_wait */,
+ SuspendRetryDelay, &debug_bits) ) {
+ return;
+ }
+ }
+
+ VM_ForceSafepoint vm_suspend;
+ VMThread::execute(&vm_suspend);
+}
+
+// Part II of external suspension.
+// A JavaThread self suspends when it detects a pending external suspend
+// request. This is usually on transitions. It is also done in places
+// where continuing to the next transition would surprise the caller,
+// e.g., monitor entry.
+//
+// Returns the number of times that the thread self-suspended.
+//
+// Note: DO NOT call java_suspend_self() when you just want to block current
+// thread. java_suspend_self() is the second stage of cooperative
+// suspension for external suspend requests and should only be used
+// to complete an external suspend request.
+//
+int JavaThread::java_suspend_self() {
+ int ret = 0;
+
+ // we are in the process of exiting so don't suspend
+ if (is_exiting()) {
+ clear_external_suspend();
+ return ret;
+ }
+
+ assert(_anchor.walkable() ||
+ (is_Java_thread() && !((JavaThread*)this)->has_last_Java_frame()),
+ "must have walkable stack");
+
+ MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
+
+ assert(!this->is_any_suspended(),
+ "a thread trying to self-suspend should not already be suspended");
+
+ if (this->is_suspend_equivalent()) {
+ // If we are self-suspending as a result of the lifting of a
+ // suspend equivalent condition, then the suspend_equivalent
+ // flag is not cleared until we set the ext_suspended flag so
+ // that wait_for_ext_suspend_completion() returns consistent
+ // results.
+ this->clear_suspend_equivalent();
+ }
+
+ // A racing resume may have cancelled us before we grabbed SR_lock
+ // above. Or another external suspend request could be waiting for us
+ // by the time we return from SR_lock()->wait(). The thread
+ // that requested the suspension may already be trying to walk our
+ // stack and if we return now, we can change the stack out from under
+ // it. This would be a "bad thing (TM)" and cause the stack walker
+ // to crash. We stay self-suspended until there are no more pending
+ // external suspend requests.
+ while (is_external_suspend()) {
+ ret++;
+ this->set_ext_suspended();
+
+ // _ext_suspended flag is cleared by java_resume()
+ while (is_ext_suspended()) {
+ this->SR_lock()->wait(Mutex::_no_safepoint_check_flag);
+ }
+ }
+
+ return ret;
+}
+
+#ifdef ASSERT
+// verify the JavaThread has not yet been published in the Threads::list, and
+// hence doesn't need protection from concurrent access at this stage
+void JavaThread::verify_not_published() {
+ if (!Threads_lock->owned_by_self()) {
+ MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
+ assert( !Threads::includes(this),
+ "java thread shouldn't have been published yet!");
+ }
+ else {
+ assert( !Threads::includes(this),
+ "java thread shouldn't have been published yet!");
+ }
+}
+#endif
+
+// Slow path when the native==>VM/Java barriers detect a safepoint is in
+// progress or when _suspend_flags is non-zero.
+// Current thread needs to self-suspend if there is a suspend request and/or
+// block if a safepoint is in progress.
+// Async exception ISN'T checked.
+// Note only the ThreadInVMfromNative transition can call this function
+// directly and when thread state is _thread_in_native_trans
+void JavaThread::check_safepoint_and_suspend_for_native_trans(JavaThread *thread) {
+ assert(thread->thread_state() == _thread_in_native_trans, "wrong state");
+
+ JavaThread *curJT = JavaThread::current();
+ bool do_self_suspend = thread->is_external_suspend();
+
+ assert(!curJT->has_last_Java_frame() || curJT->frame_anchor()->walkable(), "Unwalkable stack in native->vm transition");
+
+ // If JNIEnv proxies are allowed, don't self-suspend if the target
+ // thread is not the current thread. In older versions of jdbx, jdbx
+ // threads could call into the VM with another thread's JNIEnv so we
+ // can be here operating on behalf of a suspended thread (4432884).
+ if (do_self_suspend && (!AllowJNIEnvProxy || curJT == thread)) {
+ JavaThreadState state = thread->thread_state();
+
+ // We mark this thread_blocked state as a suspend-equivalent so
+ // that a caller to is_ext_suspend_completed() won't be confused.
+ // The suspend-equivalent state is cleared by java_suspend_self().
+ thread->set_suspend_equivalent();
+
+ // If the safepoint code sees the _thread_in_native_trans state, it will
+ // wait until the thread changes to other thread state. There is no
+ // guarantee on how soon we can obtain the SR_lock and complete the
+ // self-suspend request. It would be a bad idea to let safepoint wait for
+ // too long. Temporarily change the state to _thread_blocked to
+ // let the VM thread know that this thread is ready for GC. The problem
+ // of changing thread state is that safepoint could happen just after
+ // java_suspend_self() returns after being resumed, and VM thread will
+ // see the _thread_blocked state. We must check for safepoint
+ // after restoring the state and make sure we won't leave while a safepoint
+ // is in progress.
+ thread->set_thread_state(_thread_blocked);
+ thread->java_suspend_self();
+ thread->set_thread_state(state);
+ // Make sure new state is seen by VM thread
+ if (os::is_MP()) {
+ if (UseMembar) {
+ // Force a fence between the write above and read below
+ OrderAccess::fence();
+ } else {
+ // Must use this rather than serialization page in particular on Windows
+ InterfaceSupport::serialize_memory(thread);
+ }
+ }
+ }
+
+ if (SafepointSynchronize::do_call_back()) {
+ // If we are safepointing, then block the caller which may not be
+ // the same as the target thread (see above).
+ SafepointSynchronize::block(curJT);
+ }
+
+ if (thread->is_deopt_suspend()) {
+ thread->clear_deopt_suspend();
+ RegisterMap map(thread, false);
+ frame f = thread->last_frame();
+ while ( f.id() != thread->must_deopt_id() && ! f.is_first_frame()) {
+ f = f.sender(&map);
+ }
+ if (f.id() == thread->must_deopt_id()) {
+ thread->clear_must_deopt_id();
+ // Since we know we're safe to deopt the current state is a safe state
+ f.deoptimize(thread, true);
+ } else {
+ fatal("missed deoptimization!");
+ }
+ }
+}
+
+// Slow path when the native==>VM/Java barriers detect a safepoint is in
+// progress or when _suspend_flags is non-zero.
+// Current thread needs to self-suspend if there is a suspend request and/or
+// block if a safepoint is in progress.
+// Also check for pending async exception (not including unsafe access error).
+// Note only the native==>VM/Java barriers can call this function and when
+// thread state is _thread_in_native_trans.
+void JavaThread::check_special_condition_for_native_trans(JavaThread *thread) {
+ check_safepoint_and_suspend_for_native_trans(thread);
+
+ if (thread->has_async_exception()) {
+ // We are in _thread_in_native_trans state, don't handle unsafe
+ // access error since that may block.
+ thread->check_and_handle_async_exceptions(false);
+ }
+}
+
+// We need to guarantee the Threads_lock here, since resumes are not
+// allowed during safepoint synchronization
+// Can only resume from an external suspension
+void JavaThread::java_resume() {
+ assert_locked_or_safepoint(Threads_lock);
+
+ // Sanity check: thread is gone, has started exiting or the thread
+ // was not externally suspended.
+ if (!Threads::includes(this) || is_exiting() || !is_external_suspend()) {
+ return;
+ }
+
+ MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
+
+ clear_external_suspend();
+
+ if (is_ext_suspended()) {
+ clear_ext_suspended();
+ SR_lock()->notify_all();
+ }
+}
+
+void JavaThread::create_stack_guard_pages() {
+ if (! os::uses_stack_guard_pages() || _stack_guard_state != stack_guard_unused) return;
+ address low_addr = stack_base() - stack_size();
+ size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size();
+
+ int allocate = os::allocate_stack_guard_pages();
+ // warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len);
+
+ if (allocate && !os::commit_memory((char *) low_addr, len)) {
+ warning("Attempt to allocate stack guard pages failed.");
+ return;
+ }
+
+ if (os::guard_memory((char *) low_addr, len)) {
+ _stack_guard_state = stack_guard_enabled;
+ } else {
+ warning("Attempt to protect stack guard pages failed.");
+ if (os::uncommit_memory((char *) low_addr, len)) {
+ warning("Attempt to deallocate stack guard pages failed.");
+ }
+ }
+}
+
+void JavaThread::remove_stack_guard_pages() {
+ if (_stack_guard_state == stack_guard_unused) return;
+ address low_addr = stack_base() - stack_size();
+ size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size();
+
+ if (os::allocate_stack_guard_pages()) {
+ if (os::uncommit_memory((char *) low_addr, len)) {
+ _stack_guard_state = stack_guard_unused;
+ } else {
+ warning("Attempt to deallocate stack guard pages failed.");
+ }
+ } else {
+ if (_stack_guard_state == stack_guard_unused) return;
+ if (os::unguard_memory((char *) low_addr, len)) {
+ _stack_guard_state = stack_guard_unused;
+ } else {
+ warning("Attempt to unprotect stack guard pages failed.");
+ }
+ }
+}
+
+void JavaThread::enable_stack_yellow_zone() {
+ assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
+ assert(_stack_guard_state != stack_guard_enabled, "already enabled");
+
+ // The base notation is from the stacks point of view, growing downward.
+ // We need to adjust it to work correctly with guard_memory()
+ address base = stack_yellow_zone_base() - stack_yellow_zone_size();
+
+ guarantee(base < stack_base(),"Error calculating stack yellow zone");
+ guarantee(base < os::current_stack_pointer(),"Error calculating stack yellow zone");
+
+ if (os::guard_memory((char *) base, stack_yellow_zone_size())) {
+ _stack_guard_state = stack_guard_enabled;
+ } else {
+ warning("Attempt to guard stack yellow zone failed.");
+ }
+ enable_register_stack_guard();
+}
+
+void JavaThread::disable_stack_yellow_zone() {
+ assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
+ assert(_stack_guard_state != stack_guard_yellow_disabled, "already disabled");
+
+ // Simply return if called for a thread that does not use guard pages.
+ if (_stack_guard_state == stack_guard_unused) return;
+
+ // The base notation is from the stacks point of view, growing downward.
+ // We need to adjust it to work correctly with guard_memory()
+ address base = stack_yellow_zone_base() - stack_yellow_zone_size();
+
+ if (os::unguard_memory((char *)base, stack_yellow_zone_size())) {
+ _stack_guard_state = stack_guard_yellow_disabled;
+ } else {
+ warning("Attempt to unguard stack yellow zone failed.");
+ }
+ disable_register_stack_guard();
+}
+
+void JavaThread::enable_stack_red_zone() {
+ // The base notation is from the stacks point of view, growing downward.
+ // We need to adjust it to work correctly with guard_memory()
+ assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
+ address base = stack_red_zone_base() - stack_red_zone_size();
+
+ guarantee(base < stack_base(),"Error calculating stack red zone");
+ guarantee(base < os::current_stack_pointer(),"Error calculating stack red zone");
+
+ if(!os::guard_memory((char *) base, stack_red_zone_size())) {
+ warning("Attempt to guard stack red zone failed.");
+ }
+}
+
+void JavaThread::disable_stack_red_zone() {
+ // The base notation is from the stacks point of view, growing downward.
+ // We need to adjust it to work correctly with guard_memory()
+ assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
+ address base = stack_red_zone_base() - stack_red_zone_size();
+ if (!os::unguard_memory((char *)base, stack_red_zone_size())) {
+ warning("Attempt to unguard stack red zone failed.");
+ }
+}
+
+void JavaThread::frames_do(void f(frame*, const RegisterMap* map)) {
+ // ignore is there is no stack
+ if (!has_last_Java_frame()) return;
+ // traverse the stack frames. Starts from top frame.
+ for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
+ frame* fr = fst.current();
+ f(fr, fst.register_map());
+ }
+}
+
+
+#ifndef PRODUCT
+// Deoptimization
+// Function for testing deoptimization
+void JavaThread::deoptimize() {
+ // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
+ StackFrameStream fst(this, UseBiasedLocking);
+ bool deopt = false; // Dump stack only if a deopt actually happens.
+ bool only_at = strlen(DeoptimizeOnlyAt) > 0;
+ // Iterate over all frames in the thread and deoptimize
+ for(; !fst.is_done(); fst.next()) {
+ if(fst.current()->can_be_deoptimized()) {
+
+ if (only_at) {
+ // Deoptimize only at particular bcis. DeoptimizeOnlyAt
+ // consists of comma or carriage return separated numbers so
+ // search for the current bci in that string.
+ address pc = fst.current()->pc();
+ nmethod* nm = (nmethod*) fst.current()->cb();
+ ScopeDesc* sd = nm->scope_desc_at( pc);
+ char buffer[8];
+ jio_snprintf(buffer, sizeof(buffer), "%d", sd->bci());
+ size_t len = strlen(buffer);
+ const char * found = strstr(DeoptimizeOnlyAt, buffer);
+ while (found != NULL) {
+ if ((found[len] == ',' || found[len] == '\n' || found[len] == '\0') &&
+ (found == DeoptimizeOnlyAt || found[-1] == ',' || found[-1] == '\n')) {
+ // Check that the bci found is bracketed by terminators.
+ break;
+ }
+ found = strstr(found + 1, buffer);
+ }
+ if (!found) {
+ continue;
+ }
+ }
+
+ if (DebugDeoptimization && !deopt) {
+ deopt = true; // One-time only print before deopt
+ tty->print_cr("[BEFORE Deoptimization]");
+ trace_frames();
+ trace_stack();
+ }
+ Deoptimization::deoptimize(this, *fst.current(), fst.register_map());
+ }
+ }
+
+ if (DebugDeoptimization && deopt) {
+ tty->print_cr("[AFTER Deoptimization]");
+ trace_frames();
+ }
+}
+
+
+// Make zombies
+void JavaThread::make_zombies() {
+ for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
+ if (fst.current()->can_be_deoptimized()) {
+ // it is a Java nmethod
+ nmethod* nm = CodeCache::find_nmethod(fst.current()->pc());
+ nm->make_not_entrant();
+ }
+ }
+}
+#endif // PRODUCT
+
+
+void JavaThread::deoptimized_wrt_marked_nmethods() {
+ if (!has_last_Java_frame()) return;
+ // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
+ StackFrameStream fst(this, UseBiasedLocking);
+ for(; !fst.is_done(); fst.next()) {
+ if (fst.current()->should_be_deoptimized()) {
+ Deoptimization::deoptimize(this, *fst.current(), fst.register_map());
+ }
+ }
+}
+
+
+// GC support
+static void frame_gc_epilogue(frame* f, const RegisterMap* map) { f->gc_epilogue(); }
+
+void JavaThread::gc_epilogue() {
+ frames_do(frame_gc_epilogue);
+}
+
+
+static void frame_gc_prologue(frame* f, const RegisterMap* map) { f->gc_prologue(); }
+
+void JavaThread::gc_prologue() {
+ frames_do(frame_gc_prologue);
+}
+
+
+void JavaThread::oops_do(OopClosure* f) {
+ // The ThreadProfiler oops_do is done from FlatProfiler::oops_do
+ // since there may be more than one thread using each ThreadProfiler.
+
+ // Traverse the GCHandles
+ Thread::oops_do(f);
+
+ assert( (!has_last_Java_frame() && java_call_counter() == 0) ||
+ (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
+
+ if (has_last_Java_frame()) {
+
+ // Traverse the privileged stack
+ if (_privileged_stack_top != NULL) {
+ _privileged_stack_top->oops_do(f);
+ }
+
+ // traverse the registered growable array
+ if (_array_for_gc != NULL) {
+ for (int index = 0; index < _array_for_gc->length(); index++) {
+ f->do_oop(_array_for_gc->adr_at(index));
+ }
+ }
+
+ // Traverse the monitor chunks
+ for (MonitorChunk* chunk = monitor_chunks(); chunk != NULL; chunk = chunk->next()) {
+ chunk->oops_do(f);
+ }
+
+ // Traverse the execution stack
+ for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
+ fst.current()->oops_do(f, fst.register_map());
+ }
+ }
+
+ // callee_target is never live across a gc point so NULL it here should
+ // it still contain a methdOop.
+
+ set_callee_target(NULL);
+
+ assert(vframe_array_head() == NULL, "deopt in progress at a safepoint!");
+ // If we have deferred set_locals there might be oops waiting to be
+ // written
+ GrowableArray<jvmtiDeferredLocalVariableSet*>* list = deferred_locals();
+ if (list != NULL) {
+ for (int i = 0; i < list->length(); i++) {
+ list->at(i)->oops_do(f);
+ }
+ }
+
+ // Traverse instance variables at the end since the GC may be moving things
+ // around using this function
+ f->do_oop((oop*) &_threadObj);
+ f->do_oop((oop*) &_vm_result);
+ f->do_oop((oop*) &_vm_result_2);
+ f->do_oop((oop*) &_exception_oop);
+ f->do_oop((oop*) &_pending_async_exception);
+
+ if (jvmti_thread_state() != NULL) {
+ jvmti_thread_state()->oops_do(f);
+ }
+}
+
+void JavaThread::nmethods_do() {
+ // Traverse the GCHandles
+ Thread::nmethods_do();
+
+ assert( (!has_last_Java_frame() && java_call_counter() == 0) ||
+ (has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
+
+ if (has_last_Java_frame()) {
+ // Traverse the execution stack
+ for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
+ fst.current()->nmethods_do();
+ }
+ }
+}
+
+// Printing
+const char* _get_thread_state_name(JavaThreadState _thread_state) {
+ switch (_thread_state) {
+ case _thread_uninitialized: return "_thread_uninitialized";
+ case _thread_new: return "_thread_new";
+ case _thread_new_trans: return "_thread_new_trans";
+ case _thread_in_native: return "_thread_in_native";
+ case _thread_in_native_trans: return "_thread_in_native_trans";
+ case _thread_in_vm: return "_thread_in_vm";
+ case _thread_in_vm_trans: return "_thread_in_vm_trans";
+ case _thread_in_Java: return "_thread_in_Java";
+ case _thread_in_Java_trans: return "_thread_in_Java_trans";
+ case _thread_blocked: return "_thread_blocked";
+ case _thread_blocked_trans: return "_thread_blocked_trans";
+ default: return "unknown thread state";
+ }
+}
+
+#ifndef PRODUCT
+void JavaThread::print_thread_state_on(outputStream *st) const {
+ st->print_cr(" JavaThread state: %s", _get_thread_state_name(_thread_state));
+};
+void JavaThread::print_thread_state() const {
+ print_thread_state_on(tty);
+};
+#endif // PRODUCT
+
+// Called by Threads::print() for VM_PrintThreads operation
+void JavaThread::print_on(outputStream *st) const {
+ st->print("\"%s\" ", get_thread_name());
+ oop thread_oop = threadObj();
+ if (thread_oop != NULL && java_lang_Thread::is_daemon(thread_oop)) st->print("daemon ");
+ Thread::print_on(st);
+ // print guess for valid stack memory region (assume 4K pages); helps lock debugging
+ st->print_cr("[" INTPTR_FORMAT ".." INTPTR_FORMAT "]", (intptr_t)last_Java_sp() & ~right_n_bits(12), highest_lock());
+ if (thread_oop != NULL && JDK_Version::is_gte_jdk15x_version()) {
+ st->print_cr(" java.lang.Thread.State: %s", java_lang_Thread::thread_status_name(thread_oop));
+ }
+#ifndef PRODUCT
+ print_thread_state_on(st);
+ _safepoint_state->print_on(st);
+#endif // PRODUCT
+}
+
+// Called by fatal error handler. The difference between this and
+// JavaThread::print() is that we can't grab lock or allocate memory.
+void JavaThread::print_on_error(outputStream* st, char *buf, int buflen) const {
+ st->print("JavaThread \"%s\"", get_thread_name_string(buf, buflen));
+ oop thread_obj = threadObj();
+ if (thread_obj != NULL) {
+ if (java_lang_Thread::is_daemon(thread_obj)) st->print(" daemon");
+ }
+ st->print(" [");
+ st->print("%s", _get_thread_state_name(_thread_state));
+ if (osthread()) {
+ st->print(", id=%d", osthread()->thread_id());
+ }
+ st->print(", stack(" PTR_FORMAT "," PTR_FORMAT ")",
+ _stack_base - _stack_size, _stack_base);
+ st->print("]");
+ return;
+}
+
+// Verification
+
+static void frame_verify(frame* f, const RegisterMap *map) { f->verify(map); }
+
+void JavaThread::verify() {
+ // Verify oops in the thread.
+ oops_do(&VerifyOopClosure::verify_oop);
+
+ // Verify the stack frames.
+ frames_do(frame_verify);
+}
+
+// CR 6300358 (sub-CR 2137150)
+// Most callers of this method assume that it can't return NULL but a
+// thread may not have a name whilst it is in the process of attaching to
+// the VM - see CR 6412693, and there are places where a JavaThread can be
+// seen prior to having it's threadObj set (eg JNI attaching threads and
+// if vm exit occurs during initialization). These cases can all be accounted
+// for such that this method never returns NULL.
+const char* JavaThread::get_thread_name() const {
+#ifdef ASSERT
+ // early safepoints can hit while current thread does not yet have TLS
+ if (!SafepointSynchronize::is_at_safepoint()) {
+ Thread *cur = Thread::current();
+ if (!(cur->is_Java_thread() && cur == this)) {
+ // Current JavaThreads are allowed to get their own name without
+ // the Threads_lock.
+ assert_locked_or_safepoint(Threads_lock);
+ }
+ }
+#endif // ASSERT
+ return get_thread_name_string();
+}
+
+// Returns a non-NULL representation of this thread's name, or a suitable
+// descriptive string if there is no set name
+const char* JavaThread::get_thread_name_string(char* buf, int buflen) const {
+ const char* name_str;
+ oop thread_obj = threadObj();
+ if (thread_obj != NULL) {
+ typeArrayOop name = java_lang_Thread::name(thread_obj);
+ if (name != NULL) {
+ if (buf == NULL) {
+ name_str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length());
+ }
+ else {
+ name_str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length(), buf, buflen);
+ }
+ }
+ else if (is_attaching()) { // workaround for 6412693 - see 6404306
+ name_str = "<no-name - thread is attaching>";
+ }
+ else {
+ name_str = Thread::name();
+ }
+ }
+ else {
+ name_str = Thread::name();
+ }
+ assert(name_str != NULL, "unexpected NULL thread name");
+ return name_str;
+}
+
+
+const char* JavaThread::get_threadgroup_name() const {
+ debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);)
+ oop thread_obj = threadObj();
+ if (thread_obj != NULL) {
+ oop thread_group = java_lang_Thread::threadGroup(thread_obj);
+ if (thread_group != NULL) {
+ typeArrayOop name = java_lang_ThreadGroup::name(thread_group);
+ // ThreadGroup.name can be null
+ if (name != NULL) {
+ const char* str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length());
+ return str;
+ }
+ }
+ }
+ return NULL;
+}
+
+const char* JavaThread::get_parent_name() const {
+ debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);)
+ oop thread_obj = threadObj();
+ if (thread_obj != NULL) {
+ oop thread_group = java_lang_Thread::threadGroup(thread_obj);
+ if (thread_group != NULL) {
+ oop parent = java_lang_ThreadGroup::parent(thread_group);
+ if (parent != NULL) {
+ typeArrayOop name = java_lang_ThreadGroup::name(parent);
+ // ThreadGroup.name can be null
+ if (name != NULL) {
+ const char* str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length());
+ return str;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+ThreadPriority JavaThread::java_priority() const {
+ oop thr_oop = threadObj();
+ if (thr_oop == NULL) return NormPriority; // Bootstrapping
+ ThreadPriority priority = java_lang_Thread::priority(thr_oop);
+ assert(MinPriority <= priority && priority <= MaxPriority, "sanity check");
+ return priority;
+}
+
+void JavaThread::prepare(jobject jni_thread, ThreadPriority prio) {
+
+ assert(Threads_lock->owner() == Thread::current(), "must have threads lock");
+ // Link Java Thread object <-> C++ Thread
+
+ // Get the C++ thread object (an oop) from the JNI handle (a jthread)
+ // and put it into a new Handle. The Handle "thread_oop" can then
+ // be used to pass the C++ thread object to other methods.
+
+ // Set the Java level thread object (jthread) field of the
+ // new thread (a JavaThread *) to C++ thread object using the
+ // "thread_oop" handle.
+
+ // Set the thread field (a JavaThread *) of the
+ // oop representing the java_lang_Thread to the new thread (a JavaThread *).
+
+ Handle thread_oop(Thread::current(),
+ JNIHandles::resolve_non_null(jni_thread));
+ assert(instanceKlass::cast(thread_oop->klass())->is_linked(),
+ "must be initialized");
+ set_threadObj(thread_oop());
+ java_lang_Thread::set_thread(thread_oop(), this);
+
+ if (prio == NoPriority) {
+ prio = java_lang_Thread::priority(thread_oop());
+ assert(prio != NoPriority, "A valid priority should be present");
+ }
+
+ // Push the Java priority down to the native thread; needs Threads_lock
+ Thread::set_priority(this, prio);
+
+ // Add the new thread to the Threads list and set it in motion.
+ // We must have threads lock in order to call Threads::add.
+ // It is crucial that we do not block before the thread is
+ // added to the Threads list for if a GC happens, then the java_thread oop
+ // will not be visited by GC.
+ Threads::add(this);
+}
+
+oop JavaThread::current_park_blocker() {
+ // Support for JSR-166 locks
+ oop thread_oop = threadObj();
+ if (thread_oop != NULL && JDK_Version::supports_thread_park_blocker()) {
+ return java_lang_Thread::park_blocker(thread_oop);
+ }
+ return NULL;
+}
+
+
+void JavaThread::print_stack_on(outputStream* st) {
+ if (!has_last_Java_frame()) return;
+ ResourceMark rm;
+ HandleMark hm;
+
+ RegisterMap reg_map(this);
+ vframe* start_vf = last_java_vframe(&reg_map);
+ int count = 0;
+ for (vframe* f = start_vf; f; f = f->sender() ) {
+ if (f->is_java_frame()) {
+ javaVFrame* jvf = javaVFrame::cast(f);
+ java_lang_Throwable::print_stack_element(st, jvf->method(), jvf->bci());
+
+ // Print out lock information
+ if (JavaMonitorsInStackTrace) {
+ jvf->print_lock_info_on(st, count);
+ }
+ } else {
+ // Ignore non-Java frames
+ }
+
+ // Bail-out case for too deep stacks
+ count++;
+ if (MaxJavaStackTraceDepth == count) return;
+ }
+}
+
+
+// JVMTI PopFrame support
+void JavaThread::popframe_preserve_args(ByteSize size_in_bytes, void* start) {
+ assert(_popframe_preserved_args == NULL, "should not wipe out old PopFrame preserved arguments");
+ if (in_bytes(size_in_bytes) != 0) {
+ _popframe_preserved_args = NEW_C_HEAP_ARRAY(char, in_bytes(size_in_bytes));
+ _popframe_preserved_args_size = in_bytes(size_in_bytes);
+ Copy::conjoint_bytes(start, _popframe_preserved_args, _popframe_preserved_args_size);
+ }
+}
+
+void* JavaThread::popframe_preserved_args() {
+ return _popframe_preserved_args;
+}
+
+ByteSize JavaThread::popframe_preserved_args_size() {
+ return in_ByteSize(_popframe_preserved_args_size);
+}
+
+WordSize JavaThread::popframe_preserved_args_size_in_words() {
+ int sz = in_bytes(popframe_preserved_args_size());
+ assert(sz % wordSize == 0, "argument size must be multiple of wordSize");
+ return in_WordSize(sz / wordSize);
+}
+
+void JavaThread::popframe_free_preserved_args() {
+ assert(_popframe_preserved_args != NULL, "should not free PopFrame preserved arguments twice");
+ FREE_C_HEAP_ARRAY(char, (char*) _popframe_preserved_args);
+ _popframe_preserved_args = NULL;
+ _popframe_preserved_args_size = 0;
+}
+
+#ifndef PRODUCT
+
+void JavaThread::trace_frames() {
+ tty->print_cr("[Describe stack]");
+ int frame_no = 1;
+ for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
+ tty->print(" %d. ", frame_no++);
+ fst.current()->print_value_on(tty,this);
+ tty->cr();
+ }
+}
+
+
+void JavaThread::trace_stack_from(vframe* start_vf) {
+ ResourceMark rm;
+ int vframe_no = 1;
+ for (vframe* f = start_vf; f; f = f->sender() ) {
+ if (f->is_java_frame()) {
+ javaVFrame::cast(f)->print_activation(vframe_no++);
+ } else {
+ f->print();
+ }
+ if (vframe_no > StackPrintLimit) {
+ tty->print_cr("...<more frames>...");
+ return;
+ }
+ }
+}
+
+
+void JavaThread::trace_stack() {
+ if (!has_last_Java_frame()) return;
+ ResourceMark rm;
+ HandleMark hm;
+ RegisterMap reg_map(this);
+ trace_stack_from(last_java_vframe(&reg_map));
+}
+
+
+#endif // PRODUCT
+
+
+javaVFrame* JavaThread::last_java_vframe(RegisterMap *reg_map) {
+ assert(reg_map != NULL, "a map must be given");
+ frame f = last_frame();
+ for (vframe* vf = vframe::new_vframe(&f, reg_map, this); vf; vf = vf->sender() ) {
+ if (vf->is_java_frame()) return javaVFrame::cast(vf);
+ }
+ return NULL;
+}
+
+
+klassOop JavaThread::security_get_caller_class(int depth) {
+ vframeStream vfst(this);
+ vfst.security_get_caller_frame(depth);
+ if (!vfst.at_end()) {
+ return vfst.method()->method_holder();
+ }
+ return NULL;
+}
+
+static void compiler_thread_entry(JavaThread* thread, TRAPS) {
+ assert(thread->is_Compiler_thread(), "must be compiler thread");
+ CompileBroker::compiler_thread_loop();
+}
+
+// Create a CompilerThread
+CompilerThread::CompilerThread(CompileQueue* queue, CompilerCounters* counters)
+: JavaThread(&compiler_thread_entry) {
+ _env = NULL;
+ _log = NULL;
+ _task = NULL;
+ _queue = queue;
+ _counters = counters;
+
+#ifndef PRODUCT
+ _ideal_graph_printer = NULL;
+#endif
+}
+
+
+// ======= Threads ========
+
+// The Threads class links together all active threads, and provides
+// operations over all threads. It is protected by its own Mutex
+// lock, which is also used in other contexts to protect thread
+// operations from having the thread being operated on from exiting
+// and going away unexpectedly (e.g., safepoint synchronization)
+
+JavaThread* Threads::_thread_list = NULL;
+int Threads::_number_of_threads = 0;
+int Threads::_number_of_non_daemon_threads = 0;
+int Threads::_return_code = 0;
+size_t JavaThread::_stack_size_at_create = 0;
+
+// All JavaThreads
+#define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next())
+
+void os_stream();
+
+// All JavaThreads + all non-JavaThreads (i.e., every thread in the system)
+void Threads::threads_do(ThreadClosure* tc) {
+ assert_locked_or_safepoint(Threads_lock);
+ // ALL_JAVA_THREADS iterates through all JavaThreads
+ ALL_JAVA_THREADS(p) {
+ tc->do_thread(p);
+ }
+ // Someday we could have a table or list of all non-JavaThreads.
+ // For now, just manually iterate through them.
+ tc->do_thread(VMThread::vm_thread());
+ Universe::heap()->gc_threads_do(tc);
+ tc->do_thread(WatcherThread::watcher_thread());
+ // If CompilerThreads ever become non-JavaThreads, add them here
+}
+
+jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
+
+ // Check version
+ if (!is_supported_jni_version(args->version)) return JNI_EVERSION;
+
+ // Initialize the output stream module
+ ostream_init();
+
+ // Process java launcher properties.
+ Arguments::process_sun_java_launcher_properties(args);
+
+ // Initialize the os module before using TLS
+ os::init();
+
+ // Initialize system properties.
+ Arguments::init_system_properties();
+
+ // Parse arguments
+ jint parse_result = Arguments::parse(args);
+ if (parse_result != JNI_OK) return parse_result;
+
+ if (PauseAtStartup) {
+ os::pause();
+ }
+
+ HS_DTRACE_PROBE(hotspot, vm__init__begin);
+
+ // Record VM creation timing statistics
+ TraceVmCreationTime create_vm_timer;
+ create_vm_timer.start();
+
+ // Timing (must come after argument parsing)
+ TraceTime timer("Create VM", TraceStartupTime);
+
+ // Initialize the os module after parsing the args
+ jint os_init_2_result = os::init_2();
+ if (os_init_2_result != JNI_OK) return os_init_2_result;
+
+ // Initialize output stream logging
+ ostream_init_log();
+
+ // Convert -Xrun to -agentlib: if there is no JVM_OnLoad
+ // Must be before create_vm_init_agents()
+ if (Arguments::init_libraries_at_startup()) {
+ convert_vm_init_libraries_to_agents();
+ }
+
+ // Launch -agentlib/-agentpath and converted -Xrun agents
+ if (Arguments::init_agents_at_startup()) {
+ create_vm_init_agents();
+ }
+
+ // Initialize Threads state
+ _thread_list = NULL;
+ _number_of_threads = 0;
+ _number_of_non_daemon_threads = 0;
+
+ // Initialize TLS
+ ThreadLocalStorage::init();
+
+ // Initialize global data structures and create system classes in heap
+ vm_init_globals();
+
+ // Attach the main thread to this os thread
+ JavaThread* main_thread = new JavaThread();
+ main_thread->set_thread_state(_thread_in_vm);
+ // must do this before set_active_handles and initialize_thread_local_storage
+ // Note: on solaris initialize_thread_local_storage() will (indirectly)
+ // change the stack size recorded here to one based on the java thread
+ // stacksize. This adjusted size is what is used to figure the placement
+ // of the guard pages.
+ main_thread->record_stack_base_and_size();
+ main_thread->initialize_thread_local_storage();
+
+ main_thread->set_active_handles(JNIHandleBlock::allocate_block());
+
+ if (!main_thread->set_as_starting_thread()) {
+ vm_shutdown_during_initialization(
+ "Failed necessary internal allocation. Out of swap space");
+ delete main_thread;
+ *canTryAgain = false; // don't let caller call JNI_CreateJavaVM again
+ return JNI_ENOMEM;
+ }
+
+ // Enable guard page *after* os::create_main_thread(), otherwise it would
+ // crash Linux VM, see notes in os_linux.cpp.
+ main_thread->create_stack_guard_pages();
+
+ // Initialize Java-Leve synchronization subsystem
+ ObjectSynchronizer::Initialize() ;
+
+ // Initialize global modules
+ jint status = init_globals();
+ if (status != JNI_OK) {
+ delete main_thread;
+ *canTryAgain = false; // don't let caller call JNI_CreateJavaVM again
+ return status;
+ }
+
+ HandleMark hm;
+
+ { MutexLocker mu(Threads_lock);
+ Threads::add(main_thread);
+ }
+
+ // Any JVMTI raw monitors entered in onload will transition into
+ // real raw monitor. VM is setup enough here for raw monitor enter.
+ JvmtiExport::transition_pending_onload_raw_monitors();
+
+ if (VerifyBeforeGC &&
+ Universe::heap()->total_collections() >= VerifyGCStartAt) {
+ Universe::heap()->prepare_for_verify();
+ Universe::verify(); // make sure we're starting with a clean slate
+ }
+
+ // Create the VMThread
+ { TraceTime timer("Start VMThread", TraceStartupTime);
+ VMThread::create();
+ Thread* vmthread = VMThread::vm_thread();
+
+ if (!os::create_thread(vmthread, os::vm_thread))
+ vm_exit_during_initialization("Cannot create VM thread. Out of system resources.");
+
+ // Wait for the VM thread to become ready, and VMThread::run to initialize
+ // Monitors can have spurious returns, must always check another state flag
+ {
+ MutexLocker ml(Notify_lock);
+ os::start_thread(vmthread);
+ while (vmthread->active_handles() == NULL) {
+ Notify_lock->wait();
+ }
+ }
+ }
+
+ assert (Universe::is_fully_initialized(), "not initialized");
+ EXCEPTION_MARK;
+
+ // At this point, the Universe is initialized, but we have not executed
+ // any byte code. Now is a good time (the only time) to dump out the
+ // internal state of the JVM for sharing.
+
+ if (DumpSharedSpaces) {
+ Universe::heap()->preload_and_dump(CHECK_0);
+ ShouldNotReachHere();
+ }
+
+ // Always call even when there are not JVMTI environments yet, since environments
+ // may be attached late and JVMTI must track phases of VM execution
+ JvmtiExport::enter_start_phase();
+
+ // Notify JVMTI agents that VM has started (JNI is up) - nop if no agents.
+ JvmtiExport::post_vm_start();
+
+ {
+ TraceTime timer("Initialize java.lang classes", TraceStartupTime);
+
+ if (EagerXrunInit && Arguments::init_libraries_at_startup()) {
+ create_vm_init_libraries();
+ }
+
+ if (InitializeJavaLangString) {
+ initialize_class(vmSymbolHandles::java_lang_String(), CHECK_0);
+ } else {
+ warning("java.lang.String not initialized");
+ }
+
+ // Initialize java_lang.System (needed before creating the thread)
+ if (InitializeJavaLangSystem) {
+ initialize_class(vmSymbolHandles::java_lang_System(), CHECK_0);
+ initialize_class(vmSymbolHandles::java_lang_ThreadGroup(), CHECK_0);
+ Handle thread_group = create_initial_thread_group(CHECK_0);
+ Universe::set_main_thread_group(thread_group());
+ initialize_class(vmSymbolHandles::java_lang_Thread(), CHECK_0);
+ oop thread_object = create_initial_thread(thread_group, main_thread, CHECK_0);
+ main_thread->set_threadObj(thread_object);
+ // Set thread status to running since main thread has
+ // been started and running.
+ java_lang_Thread::set_thread_status(thread_object,
+ java_lang_Thread::RUNNABLE);
+
+ // The VM preresolve methods to these classes. Make sure that get initialized
+ initialize_class(vmSymbolHandles::java_lang_reflect_Method(), CHECK_0);
+ initialize_class(vmSymbolHandles::java_lang_ref_Finalizer(), CHECK_0);
+ // The VM creates & returns objects of this class. Make sure it's initialized.
+ initialize_class(vmSymbolHandles::java_lang_Class(), CHECK_0);
+ call_initializeSystemClass(CHECK_0);
+ } else {
+ warning("java.lang.System not initialized");
+ }
+
+ // an instance of OutOfMemory exception has been allocated earlier
+ if (InitializeJavaLangExceptionsErrors) {
+ initialize_class(vmSymbolHandles::java_lang_OutOfMemoryError(), CHECK_0);
+ initialize_class(vmSymbolHandles::java_lang_NullPointerException(), CHECK_0);
+ initialize_class(vmSymbolHandles::java_lang_ClassCastException(), CHECK_0);
+ initialize_class(vmSymbolHandles::java_lang_ArrayStoreException(), CHECK_0);
+ initialize_class(vmSymbolHandles::java_lang_ArithmeticException(), CHECK_0);
+ initialize_class(vmSymbolHandles::java_lang_StackOverflowError(), CHECK_0);
+ initialize_class(vmSymbolHandles::java_lang_IllegalMonitorStateException(), CHECK_0);
+ } else {
+ warning("java.lang.OutOfMemoryError has not been initialized");
+ warning("java.lang.NullPointerException has not been initialized");
+ warning("java.lang.ClassCastException has not been initialized");
+ warning("java.lang.ArrayStoreException has not been initialized");
+ warning("java.lang.ArithmeticException has not been initialized");
+ warning("java.lang.StackOverflowError has not been initialized");
+ }
+ }
+
+ // See : bugid 4211085.
+ // Background : the static initializer of java.lang.Compiler tries to read
+ // property"java.compiler" and read & write property "java.vm.info".
+ // When a security manager is installed through the command line
+ // option "-Djava.security.manager", the above properties are not
+ // readable and the static initializer for java.lang.Compiler fails
+ // resulting in a NoClassDefFoundError. This can happen in any
+ // user code which calls methods in java.lang.Compiler.
+ // Hack : the hack is to pre-load and initialize this class, so that only
+ // system domains are on the stack when the properties are read.
+ // Currently even the AWT code has calls to methods in java.lang.Compiler.
+ // On the classic VM, java.lang.Compiler is loaded very early to load the JIT.
+ // Future Fix : the best fix is to grant everyone permissions to read "java.compiler" and
+ // read and write"java.vm.info" in the default policy file. See bugid 4211383
+ // Once that is done, we should remove this hack.
+ initialize_class(vmSymbolHandles::java_lang_Compiler(), CHECK_0);
+
+ // More hackery - the static initializer of java.lang.Compiler adds the string "nojit" to
+ // the java.vm.info property if no jit gets loaded through java.lang.Compiler (the hotspot
+ // compiler does not get loaded through java.lang.Compiler). "java -version" with the
+ // hotspot vm says "nojit" all the time which is confusing. So, we reset it here.
+ // This should also be taken out as soon as 4211383 gets fixed.
+ reset_vm_info_property(CHECK_0);
+
+ quicken_jni_functions();
+
+ // Set flag that basic initialization has completed. Used by exceptions and various
+ // debug stuff, that does not work until all basic classes have been initialized.
+ set_init_completed();
+
+ HS_DTRACE_PROBE(hotspot, vm__init__end);
+
+ // record VM initialization completion time
+ Management::record_vm_init_completed();
+
+ // Compute system loader. Note that this has to occur after set_init_completed, since
+ // valid exceptions may be thrown in the process.
+ // Note that we do not use CHECK_0 here since we are inside an EXCEPTION_MARK and
+ // set_init_completed has just been called, causing exceptions not to be shortcut
+ // anymore. We call vm_exit_during_initialization directly instead.
+ SystemDictionary::compute_java_system_loader(THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
+ }
+
+#ifndef SERIALGC
+ // Support for ConcurrentMarkSweep. This should be cleaned up
+ // and better encapsulated. XXX YSR
+ if (UseConcMarkSweepGC) {
+ ConcurrentMarkSweepThread::makeSurrogateLockerThread(THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
+ }
+ }
+#endif // SERIALGC
+
+ // Always call even when there are not JVMTI environments yet, since environments
+ // may be attached late and JVMTI must track phases of VM execution
+ JvmtiExport::enter_live_phase();
+
+ // Signal Dispatcher needs to be started before VMInit event is posted
+ os::signal_init();
+
+ // Start Attach Listener if +StartAttachListener or it can't be started lazily
+ if (!DisableAttachMechanism) {
+ if (StartAttachListener || AttachListener::init_at_startup()) {
+ AttachListener::init();
+ }
+ }
+
+ // Launch -Xrun agents
+ // Must be done in the JVMTI live phase so that for backward compatibility the JDWP
+ // back-end can launch with -Xdebug -Xrunjdwp.
+ if (!EagerXrunInit && Arguments::init_libraries_at_startup()) {
+ create_vm_init_libraries();
+ }
+
+ // Notify JVMTI agents that VM initialization is complete - nop if no agents.
+ JvmtiExport::post_vm_initialized();
+
+ Chunk::start_chunk_pool_cleaner_task();
+
+ // initialize compiler(s)
+ CompileBroker::compilation_init();
+
+ Management::initialize(THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ // management agent fails to start possibly due to
+ // configuration problem and is responsible for printing
+ // stack trace if appropriate. Simply exit VM.
+ vm_exit(1);
+ }
+
+ if (Arguments::has_profile()) FlatProfiler::engage(main_thread, true);
+ if (Arguments::has_alloc_profile()) AllocationProfiler::engage();
+ if (MemProfiling) MemProfiler::engage();
+ StatSampler::engage();
+ if (CheckJNICalls) JniPeriodicChecker::engage();
+ if (CacheTimeMillis) TimeMillisUpdateTask::engage();
+
+ BiasedLocking::init();
+
+
+ // Start up the WatcherThread if there are any periodic tasks
+ // NOTE: All PeriodicTasks should be registered by now. If they
+ // aren't, late joiners might appear to start slowly (we might
+ // take a while to process their first tick).
+ if (PeriodicTask::num_tasks() > 0) {
+ WatcherThread::start();
+ }
+
+ create_vm_timer.end();
+ return JNI_OK;
+}
+
+// type for the Agent_OnLoad and JVM_OnLoad entry points
+extern "C" {
+ typedef jint (JNICALL *OnLoadEntry_t)(JavaVM *, char *, void *);
+}
+// Find a command line agent library and return its entry point for
+// -agentlib: -agentpath: -Xrun
+// num_symbol_entries must be passed-in since only the caller knows the number of symbols in the array.
+static OnLoadEntry_t lookup_on_load(AgentLibrary* agent, const char *on_load_symbols[], size_t num_symbol_entries) {
+ OnLoadEntry_t on_load_entry = NULL;
+ void *library = agent->os_lib(); // check if we have looked it up before
+
+ if (library == NULL) {
+ char buffer[JVM_MAXPATHLEN];
+ char ebuf[1024];
+ const char *name = agent->name();
+
+ if (agent->is_absolute_path()) {
+ library = hpi::dll_load(name, ebuf, sizeof ebuf);
+ if (library == NULL) {
+ // If we can't find the agent, exit.
+ vm_exit_during_initialization("Could not find agent library in absolute path", name);
+ }
+ } else {
+ // Try to load the agent from the standard dll directory
+ hpi::dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(), name);
+ library = hpi::dll_load(buffer, ebuf, sizeof ebuf);
+#ifdef KERNEL
+ // Download instrument dll
+ if (library == NULL && strcmp(name, "instrument") == 0) {
+ char *props = Arguments::get_kernel_properties();
+ char *home = Arguments::get_java_home();
+ const char *fmt = "%s/bin/java %s -Dkernel.background.download=false"
+ " sun.jkernel.DownloadManager -download client_jvm";
+ int length = strlen(props) + strlen(home) + strlen(fmt) + 1;
+ char *cmd = AllocateHeap(length);
+ jio_snprintf(cmd, length, fmt, home, props);
+ int status = os::fork_and_exec(cmd);
+ FreeHeap(props);
+ FreeHeap(cmd);
+ if (status == -1) {
+ warning(cmd);
+ vm_exit_during_initialization("fork_and_exec failed: %s",
+ strerror(errno));
+ }
+ // when this comes back the instrument.dll should be where it belongs.
+ library = hpi::dll_load(buffer, ebuf, sizeof ebuf);
+ }
+#endif // KERNEL
+ if (library == NULL) { // Try the local directory
+ char ns[1] = {0};
+ hpi::dll_build_name(buffer, sizeof(buffer), ns, name);
+ library = hpi::dll_load(buffer, ebuf, sizeof ebuf);
+ if (library == NULL) {
+ // If we can't find the agent, exit.
+ vm_exit_during_initialization("Could not find agent library on the library path or in the local directory", name);
+ }
+ }
+ }
+ agent->set_os_lib(library);
+ }
+
+ // Find the OnLoad function.
+ for (size_t symbol_index = 0; symbol_index < num_symbol_entries; symbol_index++) {
+ on_load_entry = CAST_TO_FN_PTR(OnLoadEntry_t, hpi::dll_lookup(library, on_load_symbols[symbol_index]));
+ if (on_load_entry != NULL) break;
+ }
+ return on_load_entry;
+}
+
+// Find the JVM_OnLoad entry point
+static OnLoadEntry_t lookup_jvm_on_load(AgentLibrary* agent) {
+ const char *on_load_symbols[] = JVM_ONLOAD_SYMBOLS;
+ return lookup_on_load(agent, on_load_symbols, sizeof(on_load_symbols) / sizeof(char*));
+}
+
+// Find the Agent_OnLoad entry point
+static OnLoadEntry_t lookup_agent_on_load(AgentLibrary* agent) {
+ const char *on_load_symbols[] = AGENT_ONLOAD_SYMBOLS;
+ return lookup_on_load(agent, on_load_symbols, sizeof(on_load_symbols) / sizeof(char*));
+}
+
+// For backwards compatibility with -Xrun
+// Convert libraries with no JVM_OnLoad, but which have Agent_OnLoad to be
+// treated like -agentpath:
+// Must be called before agent libraries are created
+void Threads::convert_vm_init_libraries_to_agents() {
+ AgentLibrary* agent;
+ AgentLibrary* next;
+
+ for (agent = Arguments::libraries(); agent != NULL; agent = next) {
+ next = agent->next(); // cache the next agent now as this agent may get moved off this list
+ OnLoadEntry_t on_load_entry = lookup_jvm_on_load(agent);
+
+ // If there is an JVM_OnLoad function it will get called later,
+ // otherwise see if there is an Agent_OnLoad
+ if (on_load_entry == NULL) {
+ on_load_entry = lookup_agent_on_load(agent);
+ if (on_load_entry != NULL) {
+ // switch it to the agent list -- so that Agent_OnLoad will be called,
+ // JVM_OnLoad won't be attempted and Agent_OnUnload will
+ Arguments::convert_library_to_agent(agent);
+ } else {
+ vm_exit_during_initialization("Could not find JVM_OnLoad or Agent_OnLoad function in the library", agent->name());
+ }
+ }
+ }
+}
+
+// Create agents for -agentlib: -agentpath: and converted -Xrun
+// Invokes Agent_OnLoad
+// Called very early -- before JavaThreads exist
+void Threads::create_vm_init_agents() {
+ extern struct JavaVM_ main_vm;
+ AgentLibrary* agent;
+
+ JvmtiExport::enter_onload_phase();
+ for (agent = Arguments::agents(); agent != NULL; agent = agent->next()) {
+ OnLoadEntry_t on_load_entry = lookup_agent_on_load(agent);
+
+ if (on_load_entry != NULL) {
+ // Invoke the Agent_OnLoad function
+ jint err = (*on_load_entry)(&main_vm, agent->options(), NULL);
+ if (err != JNI_OK) {
+ vm_exit_during_initialization("agent library failed to init", agent->name());
+ }
+ } else {
+ vm_exit_during_initialization("Could not find Agent_OnLoad function in the agent library", agent->name());
+ }
+ }
+ JvmtiExport::enter_primordial_phase();
+}
+
+extern "C" {
+ typedef void (JNICALL *Agent_OnUnload_t)(JavaVM *);
+}
+
+void Threads::shutdown_vm_agents() {
+ // Send any Agent_OnUnload notifications
+ const char *on_unload_symbols[] = AGENT_ONUNLOAD_SYMBOLS;
+ extern struct JavaVM_ main_vm;
+ for (AgentLibrary* agent = Arguments::agents(); agent != NULL; agent = agent->next()) {
+
+ // Find the Agent_OnUnload function.
+ for (uint symbol_index = 0; symbol_index < ARRAY_SIZE(on_unload_symbols); symbol_index++) {
+ Agent_OnUnload_t unload_entry = CAST_TO_FN_PTR(Agent_OnUnload_t,
+ hpi::dll_lookup(agent->os_lib(), on_unload_symbols[symbol_index]));
+
+ // Invoke the Agent_OnUnload function
+ if (unload_entry != NULL) {
+ JavaThread* thread = JavaThread::current();
+ ThreadToNativeFromVM ttn(thread);
+ HandleMark hm(thread);
+ (*unload_entry)(&main_vm);
+ break;
+ }
+ }
+ }
+}
+
+// Called for after the VM is initialized for -Xrun libraries which have not been converted to agent libraries
+// Invokes JVM_OnLoad
+void Threads::create_vm_init_libraries() {
+ extern struct JavaVM_ main_vm;
+ AgentLibrary* agent;
+
+ for (agent = Arguments::libraries(); agent != NULL; agent = agent->next()) {
+ OnLoadEntry_t on_load_entry = lookup_jvm_on_load(agent);
+
+ if (on_load_entry != NULL) {
+ // Invoke the JVM_OnLoad function
+ JavaThread* thread = JavaThread::current();
+ ThreadToNativeFromVM ttn(thread);
+ HandleMark hm(thread);
+ jint err = (*on_load_entry)(&main_vm, agent->options(), NULL);
+ if (err != JNI_OK) {
+ vm_exit_during_initialization("-Xrun library failed to init", agent->name());
+ }
+ } else {
+ vm_exit_during_initialization("Could not find JVM_OnLoad function in -Xrun library", agent->name());
+ }
+ }
+}
+
+// Last thread running calls java.lang.Shutdown.shutdown()
+void JavaThread::invoke_shutdown_hooks() {
+ HandleMark hm(this);
+
+ // We could get here with a pending exception, if so clear it now.
+ if (this->has_pending_exception()) {
+ this->clear_pending_exception();
+ }
+
+ EXCEPTION_MARK;
+ klassOop k =
+ SystemDictionary::resolve_or_null(vmSymbolHandles::java_lang_Shutdown(),
+ THREAD);
+ if (k != NULL) {
+ // SystemDictionary::resolve_or_null will return null if there was
+ // an exception. If we cannot load the Shutdown class, just don't
+ // call Shutdown.shutdown() at all. This will mean the shutdown hooks
+ // and finalizers (if runFinalizersOnExit is set) won't be run.
+ // Note that if a shutdown hook was registered or runFinalizersOnExit
+ // was called, the Shutdown class would have already been loaded
+ // (Runtime.addShutdownHook and runFinalizersOnExit will load it).
+ instanceKlassHandle shutdown_klass (THREAD, k);
+ JavaValue result(T_VOID);
+ JavaCalls::call_static(&result,
+ shutdown_klass,
+ vmSymbolHandles::shutdown_method_name(),
+ vmSymbolHandles::void_method_signature(),
+ THREAD);
+ }
+ CLEAR_PENDING_EXCEPTION;
+}
+
+// Threads::destroy_vm() is normally called from jni_DestroyJavaVM() when
+// the program falls off the end of main(). Another VM exit path is through
+// vm_exit() when the program calls System.exit() to return a value or when
+// there is a serious error in VM. The two shutdown paths are not exactly
+// the same, but they share Shutdown.shutdown() at Java level and before_exit()
+// and VM_Exit op at VM level.
+//
+// Shutdown sequence:
+// + Wait until we are the last non-daemon thread to execute
+// <-- every thing is still working at this moment -->
+// + Call java.lang.Shutdown.shutdown(), which will invoke Java level
+// shutdown hooks, run finalizers if finalization-on-exit
+// + Call before_exit(), prepare for VM exit
+// > run VM level shutdown hooks (they are registered through JVM_OnExit(),
+// currently the only user of this mechanism is File.deleteOnExit())
+// > stop flat profiler, StatSampler, watcher thread, CMS threads,
+// post thread end and vm death events to JVMTI,
+// stop signal thread
+// + Call JavaThread::exit(), it will:
+// > release JNI handle blocks, remove stack guard pages
+// > remove this thread from Threads list
+// <-- no more Java code from this thread after this point -->
+// + Stop VM thread, it will bring the remaining VM to a safepoint and stop
+// the compiler threads at safepoint
+// <-- do not use anything that could get blocked by Safepoint -->
+// + Disable tracing at JNI/JVM barriers
+// + Set _vm_exited flag for threads that are still running native code
+// + Delete this thread
+// + Call exit_globals()
+// > deletes tty
+// > deletes PerfMemory resources
+// + Return to caller
+
+bool Threads::destroy_vm() {
+ JavaThread* thread = JavaThread::current();
+
+ // Wait until we are the last non-daemon thread to execute
+ { MutexLocker nu(Threads_lock);
+ while (Threads::number_of_non_daemon_threads() > 1 )
+ // This wait should make safepoint checks, wait without a timeout,
+ // and wait as a suspend-equivalent condition.
+ //
+ // Note: If the FlatProfiler is running and this thread is waiting
+ // for another non-daemon thread to finish, then the FlatProfiler
+ // is waiting for the external suspend request on this thread to
+ // complete. wait_for_ext_suspend_completion() will eventually
+ // timeout, but that takes time. Making this wait a suspend-
+ // equivalent condition solves that timeout problem.
+ //
+ Threads_lock->wait(!Mutex::_no_safepoint_check_flag, 0,
+ Mutex::_as_suspend_equivalent_flag);
+ }
+
+ // Hang forever on exit if we are reporting an error.
+ if (ShowMessageBoxOnError && is_error_reported()) {
+ os::infinite_sleep();
+ }
+
+ if (JDK_Version::is_jdk12x_version()) {
+ // We are the last thread running, so check if finalizers should be run.
+ // For 1.3 or later this is done in thread->invoke_shutdown_hooks()
+ HandleMark rm(thread);
+ Universe::run_finalizers_on_exit();
+ } else {
+ // run Java level shutdown hooks
+ thread->invoke_shutdown_hooks();
+ }
+
+ before_exit(thread);
+
+ thread->exit(true);
+
+ // Stop VM thread.
+ {
+ // 4945125 The vm thread comes to a safepoint during exit.
+ // GC vm_operations can get caught at the safepoint, and the
+ // heap is unparseable if they are caught. Grab the Heap_lock
+ // to prevent this. The GC vm_operations will not be able to
+ // queue until after the vm thread is dead.
+ MutexLocker ml(Heap_lock);
+
+ VMThread::wait_for_vm_thread_exit();
+ assert(SafepointSynchronize::is_at_safepoint(), "VM thread should exit at Safepoint");
+ VMThread::destroy();
+ }
+
+ // clean up ideal graph printers
+#if defined(COMPILER2) && !defined(PRODUCT)
+ IdealGraphPrinter::clean_up();
+#endif
+
+ // Now, all Java threads are gone except daemon threads. Daemon threads
+ // running Java code or in VM are stopped by the Safepoint. However,
+ // daemon threads executing native code are still running. But they
+ // will be stopped at native=>Java/VM barriers. Note that we can't
+ // simply kill or suspend them, as it is inherently deadlock-prone.
+
+#ifndef PRODUCT
+ // disable function tracing at JNI/JVM barriers
+ TraceHPI = false;
+ TraceJNICalls = false;
+ TraceJVMCalls = false;
+ TraceRuntimeCalls = false;
+#endif
+
+ VM_Exit::set_vm_exited();
+
+ notify_vm_shutdown();
+
+ delete thread;
+
+ // exit_globals() will delete tty
+ exit_globals();
+
+ return true;
+}
+
+
+jboolean Threads::is_supported_jni_version_including_1_1(jint version) {
+ if (version == JNI_VERSION_1_1) return JNI_TRUE;
+ return is_supported_jni_version(version);
+}
+
+
+jboolean Threads::is_supported_jni_version(jint version) {
+ if (version == JNI_VERSION_1_2) return JNI_TRUE;
+ if (version == JNI_VERSION_1_4) return JNI_TRUE;
+ if (version == JNI_VERSION_1_6) return JNI_TRUE;
+ return JNI_FALSE;
+}
+
+
+void Threads::add(JavaThread* p, bool force_daemon) {
+ // The threads lock must be owned at this point
+ assert_locked_or_safepoint(Threads_lock);
+ p->set_next(_thread_list);
+ _thread_list = p;
+ _number_of_threads++;
+ oop threadObj = p->threadObj();
+ bool daemon = true;
+ // Bootstrapping problem: threadObj can be null for initial
+ // JavaThread (or for threads attached via JNI)
+ if ((!force_daemon) && (threadObj == NULL || !java_lang_Thread::is_daemon(threadObj))) {
+ _number_of_non_daemon_threads++;
+ daemon = false;
+ }
+
+ ThreadService::add_thread(p, daemon);
+
+ // Possible GC point.
+ Events::log("Thread added: " INTPTR_FORMAT, p);
+}
+
+void Threads::remove(JavaThread* p) {
+ // Extra scope needed for Thread_lock, so we can check
+ // that we do not remove thread without safepoint code notice
+ { MutexLocker ml(Threads_lock);
+
+ assert(includes(p), "p must be present");
+
+ JavaThread* current = _thread_list;
+ JavaThread* prev = NULL;
+
+ while (current != p) {
+ prev = current;
+ current = current->next();
+ }
+
+ if (prev) {
+ prev->set_next(current->next());
+ } else {
+ _thread_list = p->next();
+ }
+ _number_of_threads--;
+ oop threadObj = p->threadObj();
+ bool daemon = true;
+ if (threadObj == NULL || !java_lang_Thread::is_daemon(threadObj)) {
+ _number_of_non_daemon_threads--;
+ daemon = false;
+
+ // Only one thread left, do a notify on the Threads_lock so a thread waiting
+ // on destroy_vm will wake up.
+ if (number_of_non_daemon_threads() == 1)
+ Threads_lock->notify_all();
+ }
+ ThreadService::remove_thread(p, daemon);
+
+ // Make sure that safepoint code disregard this thread. This is needed since
+ // the thread might mess around with locks after this point. This can cause it
+ // to do callbacks into the safepoint code. However, the safepoint code is not aware
+ // of this thread since it is removed from the queue.
+ p->set_terminated_value();
+ } // unlock Threads_lock
+
+ // Since Events::log uses a lock, we grab it outside the Threads_lock
+ Events::log("Thread exited: " INTPTR_FORMAT, p);
+}
+
+// Threads_lock must be held when this is called (or must be called during a safepoint)
+bool Threads::includes(JavaThread* p) {
+ assert(Threads_lock->is_locked(), "sanity check");
+ ALL_JAVA_THREADS(q) {
+ if (q == p ) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Operations on the Threads list for GC. These are not explicitly locked,
+// but the garbage collector must provide a safe context for them to run.
+// In particular, these things should never be called when the Threads_lock
+// is held by some other thread. (Note: the Safepoint abstraction also
+// uses the Threads_lock to gurantee this property. It also makes sure that
+// all threads gets blocked when exiting or starting).
+
+void Threads::oops_do(OopClosure* f) {
+ ALL_JAVA_THREADS(p) {
+ p->oops_do(f);
+ }
+ VMThread::vm_thread()->oops_do(f);
+}
+
+void Threads::possibly_parallel_oops_do(OopClosure* f) {
+ // Introduce a mechanism allowing parallel threads to claim threads as
+ // root groups. Overhead should be small enough to use all the time,
+ // even in sequential code.
+ SharedHeap* sh = SharedHeap::heap();
+ bool is_par = (sh->n_par_threads() > 0);
+ int cp = SharedHeap::heap()->strong_roots_parity();
+ ALL_JAVA_THREADS(p) {
+ if (p->claim_oops_do(is_par, cp)) {
+ p->oops_do(f);
+ }
+ }
+ VMThread* vmt = VMThread::vm_thread();
+ if (vmt->claim_oops_do(is_par, cp))
+ vmt->oops_do(f);
+}
+
+#ifndef SERIALGC
+// Used by ParallelScavenge
+void Threads::create_thread_roots_tasks(GCTaskQueue* q) {
+ ALL_JAVA_THREADS(p) {
+ q->enqueue(new ThreadRootsTask(p));
+ }
+ q->enqueue(new ThreadRootsTask(VMThread::vm_thread()));
+}
+
+// Used by Parallel Old
+void Threads::create_thread_roots_marking_tasks(GCTaskQueue* q) {
+ ALL_JAVA_THREADS(p) {
+ q->enqueue(new ThreadRootsMarkingTask(p));
+ }
+ q->enqueue(new ThreadRootsMarkingTask(VMThread::vm_thread()));
+}
+#endif // SERIALGC
+
+void Threads::nmethods_do() {
+ ALL_JAVA_THREADS(p) {
+ p->nmethods_do();
+ }
+ VMThread::vm_thread()->nmethods_do();
+}
+
+void Threads::gc_epilogue() {
+ ALL_JAVA_THREADS(p) {
+ p->gc_epilogue();
+ }
+}
+
+void Threads::gc_prologue() {
+ ALL_JAVA_THREADS(p) {
+ p->gc_prologue();
+ }
+}
+
+void Threads::deoptimized_wrt_marked_nmethods() {
+ ALL_JAVA_THREADS(p) {
+ p->deoptimized_wrt_marked_nmethods();
+ }
+}
+
+
+// Get count Java threads that are waiting to enter the specified monitor.
+GrowableArray<JavaThread*>* Threads::get_pending_threads(int count,
+ address monitor, bool doLock) {
+ assert(doLock || SafepointSynchronize::is_at_safepoint(),
+ "must grab Threads_lock or be at safepoint");
+ GrowableArray<JavaThread*>* result = new GrowableArray<JavaThread*>(count);
+
+ int i = 0;
+ {
+ MutexLockerEx ml(doLock ? Threads_lock : NULL);
+ ALL_JAVA_THREADS(p) {
+ if (p->is_Compiler_thread()) continue;
+
+ address pending = (address)p->current_pending_monitor();
+ if (pending == monitor) { // found a match
+ if (i < count) result->append(p); // save the first count matches
+ i++;
+ }
+ }
+ }
+ return result;
+}
+
+
+JavaThread *Threads::owning_thread_from_monitor_owner(address owner, bool doLock) {
+ assert(doLock ||
+ Threads_lock->owned_by_self() ||
+ SafepointSynchronize::is_at_safepoint(),
+ "must grab Threads_lock or be at safepoint");
+
+ // NULL owner means not locked so we can skip the search
+ if (owner == NULL) return NULL;
+
+ {
+ MutexLockerEx ml(doLock ? Threads_lock : NULL);
+ ALL_JAVA_THREADS(p) {
+ // first, see if owner is the address of a Java thread
+ if (owner == (address)p) return p;
+ }
+ }
+ assert(UseHeavyMonitors == false, "Did not find owning Java thread with UseHeavyMonitors enabled");
+ if (UseHeavyMonitors) return NULL;
+
+ //
+ // If we didn't find a matching Java thread and we didn't force use of
+ // heavyweight monitors, then the owner is the stack address of the
+ // Lock Word in the owning Java thread's stack.
+ //
+ // We can't use Thread::is_lock_owned() or Thread::lock_is_in_stack() because
+ // those routines rely on the "current" stack pointer. That would be our
+ // stack pointer which is not relevant to the question. Instead we use the
+ // highest lock ever entered by the thread and find the thread that is
+ // higher than and closest to our target stack address.
+ //
+ address least_diff = 0;
+ bool least_diff_initialized = false;
+ JavaThread* the_owner = NULL;
+ {
+ MutexLockerEx ml(doLock ? Threads_lock : NULL);
+ ALL_JAVA_THREADS(q) {
+ address addr = q->highest_lock();
+ if (addr == NULL || addr < owner) continue; // thread has entered no monitors or is too low
+ address diff = (address)(addr - owner);
+ if (!least_diff_initialized || diff < least_diff) {
+ least_diff_initialized = true;
+ least_diff = diff;
+ the_owner = q;
+ }
+ }
+ }
+ assert(the_owner != NULL, "Did not find owning Java thread for lock word address");
+ return the_owner;
+}
+
+// Threads::print_on() is called at safepoint by VM_PrintThreads operation.
+void Threads::print_on(outputStream* st, bool print_stacks, bool internal_format, bool print_concurrent_locks) {
+ char buf[32];
+ st->print_cr(os::local_time_string(buf, sizeof(buf)));
+
+ st->print_cr("Full thread dump %s (%s %s):",
+ Abstract_VM_Version::vm_name(),
+ Abstract_VM_Version::vm_release(),
+ Abstract_VM_Version::vm_info_string()
+ );
+ st->cr();
+
+#ifndef SERIALGC
+ // Dump concurrent locks
+ ConcurrentLocksDump concurrent_locks;
+ if (print_concurrent_locks) {
+ concurrent_locks.dump_at_safepoint();
+ }
+#endif // SERIALGC
+
+ ALL_JAVA_THREADS(p) {
+ ResourceMark rm;
+ p->print_on(st);
+ if (print_stacks) {
+ if (internal_format) {
+ p->trace_stack();
+ } else {
+ p->print_stack_on(st);
+ }
+ }
+ st->cr();
+#ifndef SERIALGC
+ if (print_concurrent_locks) {
+ concurrent_locks.print_locks_on(p, st);
+ }
+#endif // SERIALGC
+ }
+
+ VMThread::vm_thread()->print_on(st);
+ st->cr();
+ Universe::heap()->print_gc_threads_on(st);
+ WatcherThread* wt = WatcherThread::watcher_thread();
+ if (wt != NULL) wt->print_on(st);
+ st->cr();
+ CompileBroker::print_compiler_threads_on(st);
+ st->flush();
+}
+
+// Threads::print_on_error() is called by fatal error handler. It's possible
+// that VM is not at safepoint and/or current thread is inside signal handler.
+// Don't print stack trace, as the stack may not be walkable. Don't allocate
+// memory (even in resource area), it might deadlock the error handler.
+void Threads::print_on_error(outputStream* st, Thread* current, char* buf, int buflen) {
+ bool found_current = false;
+ st->print_cr("Java Threads: ( => current thread )");
+ ALL_JAVA_THREADS(thread) {
+ bool is_current = (current == thread);
+ found_current = found_current || is_current;
+
+ st->print("%s", is_current ? "=>" : " ");
+
+ st->print(PTR_FORMAT, thread);
+ st->print(" ");
+ thread->print_on_error(st, buf, buflen);
+ st->cr();
+ }
+ st->cr();
+
+ st->print_cr("Other Threads:");
+ if (VMThread::vm_thread()) {
+ bool is_current = (current == VMThread::vm_thread());
+ found_current = found_current || is_current;
+ st->print("%s", current == VMThread::vm_thread() ? "=>" : " ");
+
+ st->print(PTR_FORMAT, VMThread::vm_thread());
+ st->print(" ");
+ VMThread::vm_thread()->print_on_error(st, buf, buflen);
+ st->cr();
+ }
+ WatcherThread* wt = WatcherThread::watcher_thread();
+ if (wt != NULL) {
+ bool is_current = (current == wt);
+ found_current = found_current || is_current;
+ st->print("%s", is_current ? "=>" : " ");
+
+ st->print(PTR_FORMAT, wt);
+ st->print(" ");
+ wt->print_on_error(st, buf, buflen);
+ st->cr();
+ }
+ if (!found_current) {
+ st->cr();
+ st->print("=>" PTR_FORMAT " (exited) ", current);
+ current->print_on_error(st, buf, buflen);
+ st->cr();
+ }
+}
+
+
+// Lifecycle management for TSM ParkEvents.
+// ParkEvents are type-stable (TSM).
+// In our particular implementation they happen to be immortal.
+//
+// We manage concurrency on the FreeList with a CAS-based
+// detach-modify-reattach idiom that avoids the ABA problems
+// that would otherwise be present in a simple CAS-based
+// push-pop implementation. (push-one and pop-all)
+//
+// Caveat: Allocate() and Release() may be called from threads
+// other than the thread associated with the Event!
+// If we need to call Allocate() when running as the thread in
+// question then look for the PD calls to initialize native TLS.
+// Native TLS (Win32/Linux/Solaris) can only be initialized or
+// accessed by the associated thread.
+// See also pd_initialize().
+//
+// Note that we could defer associating a ParkEvent with a thread
+// until the 1st time the thread calls park(). unpark() calls to
+// an unprovisioned thread would be ignored. The first park() call
+// for a thread would allocate and associate a ParkEvent and return
+// immediately.
+
+volatile int ParkEvent::ListLock = 0 ;
+ParkEvent * volatile ParkEvent::FreeList = NULL ;
+
+ParkEvent * ParkEvent::Allocate (Thread * t) {
+ // In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
+ ParkEvent * ev ;
+
+ // Start by trying to recycle an existing but unassociated
+ // ParkEvent from the global free list.
+ for (;;) {
+ ev = FreeList ;
+ if (ev == NULL) break ;
+ // 1: Detach - sequester or privatize the list
+ // Tantamount to ev = Swap (&FreeList, NULL)
+ if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
+ continue ;
+ }
+
+ // We've detached the list. The list in-hand is now
+ // local to this thread. This thread can operate on the
+ // list without risk of interference from other threads.
+ // 2: Extract -- pop the 1st element from the list.
+ ParkEvent * List = ev->FreeNext ;
+ if (List == NULL) break ;
+ for (;;) {
+ // 3: Try to reattach the residual list
+ guarantee (List != NULL, "invariant") ;
+ ParkEvent * Arv = (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
+ if (Arv == NULL) break ;
+
+ // New nodes arrived. Try to detach the recent arrivals.
+ if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
+ continue ;
+ }
+ guarantee (Arv != NULL, "invariant") ;
+ // 4: Merge Arv into List
+ ParkEvent * Tail = List ;
+ while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
+ Tail->FreeNext = Arv ;
+ }
+ break ;
+ }
+
+ if (ev != NULL) {
+ guarantee (ev->AssociatedWith == NULL, "invariant") ;
+ } else {
+ // Do this the hard way -- materialize a new ParkEvent.
+ // In rare cases an allocating thread might detach a long list --
+ // installing null into FreeList -- and then stall or be obstructed.
+ // A 2nd thread calling Allocate() would see FreeList == null.
+ // The list held privately by the 1st thread is unavailable to the 2nd thread.
+ // In that case the 2nd thread would have to materialize a new ParkEvent,
+ // even though free ParkEvents existed in the system. In this case we end up
+ // with more ParkEvents in circulation than we need, but the race is
+ // rare and the outcome is benign. Ideally, the # of extant ParkEvents
+ // is equal to the maximum # of threads that existed at any one time.
+ // Because of the race mentioned above, segments of the freelist
+ // can be transiently inaccessible. At worst we may end up with the
+ // # of ParkEvents in circulation slightly above the ideal.
+ // Note that if we didn't have the TSM/immortal constraint, then
+ // when reattaching, above, we could trim the list.
+ ev = new ParkEvent () ;
+ guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
+ }
+ ev->reset() ; // courtesy to caller
+ ev->AssociatedWith = t ; // Associate ev with t
+ ev->FreeNext = NULL ;
+ return ev ;
+}
+
+void ParkEvent::Release (ParkEvent * ev) {
+ if (ev == NULL) return ;
+ guarantee (ev->FreeNext == NULL , "invariant") ;
+ ev->AssociatedWith = NULL ;
+ for (;;) {
+ // Push ev onto FreeList
+ // The mechanism is "half" lock-free.
+ ParkEvent * List = FreeList ;
+ ev->FreeNext = List ;
+ if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
+ }
+}
+
+// Override operator new and delete so we can ensure that the
+// least significant byte of ParkEvent addresses is 0.
+// Beware that excessive address alignment is undesirable
+// as it can result in D$ index usage imbalance as
+// well as bank access imbalance on Niagara-like platforms,
+// although Niagara's hash function should help.
+
+void * ParkEvent::operator new (size_t sz) {
+ return (void *) ((intptr_t (CHeapObj::operator new (sz + 256)) + 256) & -256) ;
+}
+
+void ParkEvent::operator delete (void * a) {
+ // ParkEvents are type-stable and immortal ...
+ ShouldNotReachHere();
+}
+
+
+// 6399321 As a temporary measure we copied & modified the ParkEvent::
+// allocate() and release() code for use by Parkers. The Parker:: forms
+// will eventually be removed as we consolide and shift over to ParkEvents
+// for both builtin synchronization and JSR166 operations.
+
+volatile int Parker::ListLock = 0 ;
+Parker * volatile Parker::FreeList = NULL ;
+
+Parker * Parker::Allocate (JavaThread * t) {
+ guarantee (t != NULL, "invariant") ;
+ Parker * p ;
+
+ // Start by trying to recycle an existing but unassociated
+ // Parker from the global free list.
+ for (;;) {
+ p = FreeList ;
+ if (p == NULL) break ;
+ // 1: Detach
+ // Tantamount to p = Swap (&FreeList, NULL)
+ if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
+ continue ;
+ }
+
+ // We've detached the list. The list in-hand is now
+ // local to this thread. This thread can operate on the
+ // list without risk of interference from other threads.
+ // 2: Extract -- pop the 1st element from the list.
+ Parker * List = p->FreeNext ;
+ if (List == NULL) break ;
+ for (;;) {
+ // 3: Try to reattach the residual list
+ guarantee (List != NULL, "invariant") ;
+ Parker * Arv = (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
+ if (Arv == NULL) break ;
+
+ // New nodes arrived. Try to detach the recent arrivals.
+ if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
+ continue ;
+ }
+ guarantee (Arv != NULL, "invariant") ;
+ // 4: Merge Arv into List
+ Parker * Tail = List ;
+ while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
+ Tail->FreeNext = Arv ;
+ }
+ break ;
+ }
+
+ if (p != NULL) {
+ guarantee (p->AssociatedWith == NULL, "invariant") ;
+ } else {
+ // Do this the hard way -- materialize a new Parker..
+ // In rare cases an allocating thread might detach
+ // a long list -- installing null into FreeList --and
+ // then stall. Another thread calling Allocate() would see
+ // FreeList == null and then invoke the ctor. In this case we
+ // end up with more Parkers in circulation than we need, but
+ // the race is rare and the outcome is benign.
+ // Ideally, the # of extant Parkers is equal to the
+ // maximum # of threads that existed at any one time.
+ // Because of the race mentioned above, segments of the
+ // freelist can be transiently inaccessible. At worst
+ // we may end up with the # of Parkers in circulation
+ // slightly above the ideal.
+ p = new Parker() ;
+ }
+ p->AssociatedWith = t ; // Associate p with t
+ p->FreeNext = NULL ;
+ return p ;
+}
+
+
+void Parker::Release (Parker * p) {
+ if (p == NULL) return ;
+ guarantee (p->AssociatedWith != NULL, "invariant") ;
+ guarantee (p->FreeNext == NULL , "invariant") ;
+ p->AssociatedWith = NULL ;
+ for (;;) {
+ // Push p onto FreeList
+ Parker * List = FreeList ;
+ p->FreeNext = List ;
+ if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
+ }
+}
+
+void Threads::verify() {
+ ALL_JAVA_THREADS(p) {
+ p->verify();
+ }
+ VMThread* thread = VMThread::vm_thread();
+ if (thread != NULL) thread->verify();
+}
diff --git a/src/share/vm/runtime/thread.hpp b/src/share/vm/runtime/thread.hpp
new file mode 100644
index 000000000..72277e039
--- /dev/null
+++ b/src/share/vm/runtime/thread.hpp
@@ -0,0 +1,1757 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class ThreadSafepointState;
+class ThreadProfiler;
+
+class JvmtiThreadState;
+class JvmtiGetLoadedClassesClosure;
+class ThreadStatistics;
+class ConcurrentLocksDump;
+class ParkEvent ;
+
+class ciEnv;
+class CompileThread;
+class CompileLog;
+class CompileTask;
+class CompileQueue;
+class CompilerCounters;
+class vframeArray;
+
+class DeoptResourceMark;
+class jvmtiDeferredLocalVariableSet;
+
+class GCTaskQueue;
+class ThreadClosure;
+class IdealGraphPrinter;
+
+// Class hierarchy
+// - Thread
+// - VMThread
+// - JavaThread
+// - WatcherThread
+
+class Thread: public ThreadShadow {
+ friend class VMStructs;
+ private:
+ // Exception handling
+ // (Note: _pending_exception and friends are in ThreadShadow)
+ //oop _pending_exception; // pending exception for current thread
+ // const char* _exception_file; // file information for exception (debugging only)
+ // int _exception_line; // line information for exception (debugging only)
+
+ // Support for forcing alignment of thread objects for biased locking
+ void* _real_malloc_address;
+ public:
+ void* operator new(size_t size);
+ void operator delete(void* p);
+ private:
+
+ // ***************************************************************
+ // Suspend and resume support
+ // ***************************************************************
+ //
+ // VM suspend/resume no longer exists - it was once used for various
+ // things including safepoints but was deprecated and finally removed
+ // in Java 7. Because VM suspension was considered "internal" Java-level
+ // suspension was considered "external", and this legacy naming scheme
+ // remains.
+ //
+ // External suspend/resume requests come from JVM_SuspendThread,
+ // JVM_ResumeThread, JVMTI SuspendThread, and finally JVMTI
+ // ResumeThread. External
+ // suspend requests cause _external_suspend to be set and external
+ // resume requests cause _external_suspend to be cleared.
+ // External suspend requests do not nest on top of other external
+ // suspend requests. The higher level APIs reject suspend requests
+ // for already suspended threads.
+ //
+ // The external_suspend
+ // flag is checked by has_special_runtime_exit_condition() and java thread
+ // will self-suspend when handle_special_runtime_exit_condition() is
+ // called. Most uses of the _thread_blocked state in JavaThreads are
+ // considered the same as being externally suspended; if the blocking
+ // condition lifts, the JavaThread will self-suspend. Other places
+ // where VM checks for external_suspend include:
+ // + mutex granting (do not enter monitors when thread is suspended)
+ // + state transitions from _thread_in_native
+ //
+ // In general, java_suspend() does not wait for an external suspend
+ // request to complete. When it returns, the only guarantee is that
+ // the _external_suspend field is true.
+ //
+ // wait_for_ext_suspend_completion() is used to wait for an external
+ // suspend request to complete. External suspend requests are usually
+ // followed by some other interface call that requires the thread to
+ // be quiescent, e.g., GetCallTrace(). By moving the "wait time" into
+ // the interface that requires quiescence, we give the JavaThread a
+ // chance to self-suspend before we need it to be quiescent. This
+ // improves overall suspend/query performance.
+ //
+ // _suspend_flags controls the behavior of java_ suspend/resume.
+ // It must be set under the protection of SR_lock. Read from the flag is
+ // OK without SR_lock as long as the value is only used as a hint.
+ // (e.g., check _external_suspend first without lock and then recheck
+ // inside SR_lock and finish the suspension)
+ //
+ // _suspend_flags is also overloaded for other "special conditions" so
+ // that a single check indicates whether any special action is needed
+ // eg. for async exceptions.
+ // -------------------------------------------------------------------
+ // Notes:
+ // 1. The suspend/resume logic no longer uses ThreadState in OSThread
+ // but we still update its value to keep other part of the system (mainly
+ // JVMTI) happy. ThreadState is legacy code (see notes in
+ // osThread.hpp).
+ //
+ // 2. It would be more natural if set_external_suspend() is private and
+ // part of java_suspend(), but that probably would affect the suspend/query
+ // performance. Need more investigation on this.
+ //
+
+ // suspend/resume lock: used for self-suspend
+ Monitor* _SR_lock;
+
+ protected:
+ enum SuspendFlags {
+ // NOTE: avoid using the sign-bit as cc generates different test code
+ // when the sign-bit is used, and sometimes incorrectly - see CR 6398077
+
+ _external_suspend = 0x20000000U, // thread is asked to self suspend
+ _ext_suspended = 0x40000000U, // thread has self-suspended
+ _deopt_suspend = 0x10000000U, // thread needs to self suspend for deopt
+
+ _has_async_exception = 0x00000001U // there is a pending async exception
+ };
+
+ // various suspension related flags - atomically updated
+ // overloaded for async exception checking in check_special_condition_for_native_trans.
+ volatile uint32_t _suspend_flags;
+
+ private:
+ int _num_nested_signal;
+
+ public:
+ void enter_signal_handler() { _num_nested_signal++; }
+ void leave_signal_handler() { _num_nested_signal--; }
+ bool is_inside_signal_handler() const { return _num_nested_signal > 0; }
+
+ private:
+ // Debug tracing
+ static void trace(const char* msg, const Thread* const thread) PRODUCT_RETURN;
+
+ // Active_handles points to a block of handles
+ JNIHandleBlock* _active_handles;
+
+ // One-element thread local free list
+ JNIHandleBlock* _free_handle_block;
+
+ // Point to the last handle mark
+ HandleMark* _last_handle_mark;
+
+ // The parity of the last strong_roots iteration in which this thread was
+ // claimed as a task.
+ jint _oops_do_parity;
+
+ public:
+ void set_last_handle_mark(HandleMark* mark) { _last_handle_mark = mark; }
+ HandleMark* last_handle_mark() const { return _last_handle_mark; }
+ private:
+
+ // debug support for checking if code does allow safepoints or not
+ // GC points in the VM can happen because of allocation, invoking a VM operation, or blocking on
+ // mutex, or blocking on an object synchronizer (Java locking).
+ // If !allow_safepoint(), then an assertion failure will happen in any of the above cases
+ // If !allow_allocation(), then an assertion failure will happen during allocation
+ // (Hence, !allow_safepoint() => !allow_allocation()).
+ //
+ // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
+ //
+ NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen
+ debug_only (int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops.
+
+ // Record when GC is locked out via the GC_locker mechanism
+ CHECK_UNHANDLED_OOPS_ONLY(int _gc_locked_out_count;)
+
+ friend class No_Alloc_Verifier;
+ friend class No_Safepoint_Verifier;
+ friend class Pause_No_Safepoint_Verifier;
+ friend class ThreadLocalStorage;
+ friend class GC_locker;
+
+ // In order for all threads to be able to use fast locking, we need to know the highest stack
+ // address of where a lock is on the stack (stacks normally grow towards lower addresses). This
+ // variable is initially set to NULL, indicating no locks are used by the thread. During the thread's
+ // execution, it will be set whenever locking can happen, i.e., when we call out to Java code or use
+ // an ObjectLocker. The value is never decreased, hence, it will over the lifetime of a thread
+ // approximate the real stackbase.
+ address _highest_lock; // Highest stack address where a JavaLock exist
+
+ ThreadLocalAllocBuffer _tlab; // Thread-local eden
+
+ int _vm_operation_started_count; // VM_Operation support
+ int _vm_operation_completed_count; // VM_Operation support
+
+ ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread
+ // is waiting to lock
+ bool _current_pending_monitor_is_from_java; // locking is from Java code
+
+ // ObjectMonitor on which this thread called Object.wait()
+ ObjectMonitor* _current_waiting_monitor;
+
+ // Private thread-local objectmonitor list - a simple cache organized as a SLL.
+ public:
+ ObjectMonitor * omFreeList ;
+ int omFreeCount ; // length of omFreeList
+ int omFreeProvision ; // reload chunk size
+
+ public:
+ enum {
+ is_definitely_current_thread = true
+ };
+
+ // Constructor
+ Thread();
+ virtual ~Thread();
+
+ // initializtion
+ void initialize_thread_local_storage();
+
+ // thread entry point
+ virtual void run();
+
+ // Testers
+ virtual bool is_VM_thread() const { return false; }
+ virtual bool is_Java_thread() const { return false; }
+ // Remove this ifdef when C1 is ported to the compiler interface.
+ virtual bool is_Compiler_thread() const { return false; }
+ virtual bool is_hidden_from_external_view() const { return false; }
+ virtual bool is_jvmti_agent_thread() const { return false; }
+ // True iff the thread can perform GC operations at a safepoint.
+ // Generally will be true only of VM thread and parallel GC WorkGang
+ // threads.
+ virtual bool is_GC_task_thread() const { return false; }
+ virtual bool is_Watcher_thread() const { return false; }
+ virtual bool is_ConcurrentGC_thread() const { return false; }
+
+ virtual char* name() const { return (char*)"Unknown thread"; }
+
+ // Returns the current thread
+ static inline Thread* current();
+
+ // Common thread operations
+ static void set_priority(Thread* thread, ThreadPriority priority);
+ static ThreadPriority get_priority(const Thread* const thread);
+ static void start(Thread* thread);
+ static void interrupt(Thread* thr);
+ static bool is_interrupted(Thread* thr, bool clear_interrupted);
+
+ Monitor* SR_lock() const { return _SR_lock; }
+
+ bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }
+
+ void set_suspend_flag(SuspendFlags f) {
+ assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
+ uint32_t flags;
+ do {
+ flags = _suspend_flags;
+ }
+ while (Atomic::cmpxchg((jint)(flags | f),
+ (volatile jint*)&_suspend_flags,
+ (jint)flags) != (jint)flags);
+ }
+ void clear_suspend_flag(SuspendFlags f) {
+ assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
+ uint32_t flags;
+ do {
+ flags = _suspend_flags;
+ }
+ while (Atomic::cmpxchg((jint)(flags & ~f),
+ (volatile jint*)&_suspend_flags,
+ (jint)flags) != (jint)flags);
+ }
+
+ void set_has_async_exception() {
+ set_suspend_flag(_has_async_exception);
+ }
+ void clear_has_async_exception() {
+ clear_suspend_flag(_has_async_exception);
+ }
+
+ // Support for Unhandled Oop detection
+#ifdef CHECK_UNHANDLED_OOPS
+ private:
+ UnhandledOops *_unhandled_oops;
+ public:
+ UnhandledOops* unhandled_oops() { return _unhandled_oops; }
+ // Mark oop safe for gc. It may be stack allocated but won't move.
+ void allow_unhandled_oop(oop *op) {
+ if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
+ }
+ // Clear oops at safepoint so crashes point to unhandled oop violator
+ void clear_unhandled_oops() {
+ if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
+ }
+ bool is_gc_locked_out() { return _gc_locked_out_count > 0; }
+#endif // CHECK_UNHANDLED_OOPS
+
+ public:
+ // Installs a pending exception to be inserted later
+ static void send_async_exception(oop thread_oop, oop java_throwable);
+
+ // Resource area
+ ResourceArea* resource_area() const { return _resource_area; }
+ void set_resource_area(ResourceArea* area) { _resource_area = area; }
+
+ OSThread* osthread() const { return _osthread; }
+ void set_osthread(OSThread* thread) { _osthread = thread; }
+
+ // JNI handle support
+ JNIHandleBlock* active_handles() const { return _active_handles; }
+ void set_active_handles(JNIHandleBlock* block) { _active_handles = block; }
+ JNIHandleBlock* free_handle_block() const { return _free_handle_block; }
+ void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; }
+
+ // Internal handle support
+ HandleArea* handle_area() const { return _handle_area; }
+ void set_handle_area(HandleArea* area) { _handle_area = area; }
+
+ // Thread-Local Allocation Buffer (TLAB) support
+ ThreadLocalAllocBuffer& tlab() { return _tlab; }
+ void initialize_tlab() {
+ if (UseTLAB) {
+ tlab().initialize();
+ }
+ }
+
+ // VM operation support
+ int vm_operation_ticket() { return ++_vm_operation_started_count; }
+ int vm_operation_completed_count() { return _vm_operation_completed_count; }
+ void increment_vm_operation_completed_count() { _vm_operation_completed_count++; }
+
+ // For tracking the heavyweight monitor the thread is pending on.
+ ObjectMonitor* current_pending_monitor() {
+ return _current_pending_monitor;
+ }
+ void set_current_pending_monitor(ObjectMonitor* monitor) {
+ _current_pending_monitor = monitor;
+ }
+ void set_current_pending_monitor_is_from_java(bool from_java) {
+ _current_pending_monitor_is_from_java = from_java;
+ }
+ bool current_pending_monitor_is_from_java() {
+ return _current_pending_monitor_is_from_java;
+ }
+
+ // For tracking the ObjectMonitor on which this thread called Object.wait()
+ ObjectMonitor* current_waiting_monitor() {
+ return _current_waiting_monitor;
+ }
+ void set_current_waiting_monitor(ObjectMonitor* monitor) {
+ _current_waiting_monitor = monitor;
+ }
+
+ // GC support
+ // Apply "f->do_oop" to all root oops in "this".
+ void oops_do(OopClosure* f);
+
+ // Handles the parallel case for the method below.
+private:
+ bool claim_oops_do_par_case(int collection_parity);
+public:
+ // Requires that "collection_parity" is that of the current strong roots
+ // iteration. If "is_par" is false, sets the parity of "this" to
+ // "collection_parity", and returns "true". If "is_par" is true,
+ // uses an atomic instruction to set the current threads parity to
+ // "collection_parity", if it is not already. Returns "true" iff the
+ // calling thread does the update, this indicates that the calling thread
+ // has claimed the thread's stack as a root groop in the current
+ // collection.
+ bool claim_oops_do(bool is_par, int collection_parity) {
+ if (!is_par) {
+ _oops_do_parity = collection_parity;
+ return true;
+ } else {
+ return claim_oops_do_par_case(collection_parity);
+ }
+ }
+
+ // Sweeper support
+ void nmethods_do();
+
+ // Fast-locking support
+ address highest_lock() const { return _highest_lock; }
+ void update_highest_lock(address base) { if (base > _highest_lock) _highest_lock = base; }
+
+ // Tells if adr belong to this thread. This is used
+ // for checking if a lock is owned by the running thread.
+ // Warning: the method can only be used on the running thread
+ // Fast lock support uses these methods
+ virtual bool lock_is_in_stack(address adr) const;
+ virtual bool is_lock_owned(address adr) const;
+
+ // Check if address is in the stack of the thread (not just for locks).
+ bool is_in_stack(address adr) const;
+
+ // Sets this thread as starting thread. Returns failure if thread
+ // creation fails due to lack of memory, too many threads etc.
+ bool set_as_starting_thread();
+
+ protected:
+ // OS data associated with the thread
+ OSThread* _osthread; // Platform-specific thread information
+
+ // Thread local resource area for temporary allocation within the VM
+ ResourceArea* _resource_area;
+
+ // Thread local handle area for allocation of handles within the VM
+ HandleArea* _handle_area;
+
+ // Support for stack overflow handling, get_thread, etc.
+ address _stack_base;
+ size_t _stack_size;
+ uintptr_t _self_raw_id; // used by get_thread (mutable)
+ int _lgrp_id;
+
+ public:
+ // Stack overflow support
+ address stack_base() const { assert(_stack_base != NULL,"Sanity check"); return _stack_base; }
+
+ void set_stack_base(address base) { _stack_base = base; }
+ size_t stack_size() const { return _stack_size; }
+ void set_stack_size(size_t size) { _stack_size = size; }
+ void record_stack_base_and_size();
+
+ int lgrp_id() const { return _lgrp_id; }
+ void set_lgrp_id(int value) { _lgrp_id = value; }
+
+ // Printing
+ void print_on(outputStream* st) const;
+ void print() const { print_on(tty); }
+ virtual void print_on_error(outputStream* st, char* buf, int buflen) const;
+
+ // Debug-only code
+
+#ifdef ASSERT
+ private:
+ // Deadlock detection support for Mutex locks. List of locks own by thread.
+ Monitor *_owned_locks;
+ // Mutex::set_owner_implementation is the only place where _owned_locks is modified,
+ // thus the friendship
+ friend class Mutex;
+ friend class Monitor;
+
+ public:
+ void print_owned_locks_on(outputStream* st) const;
+ void print_owned_locks() const { print_owned_locks_on(tty); }
+ Monitor * owned_locks() const { return _owned_locks; }
+ bool owns_locks() const { return owned_locks() != NULL; }
+ bool owns_locks_but_compiled_lock() const;
+
+ // Deadlock detection
+ bool allow_allocation() { return _allow_allocation_count == 0; }
+#endif
+
+ void check_for_valid_safepoint_state(bool potential_vm_operation) PRODUCT_RETURN;
+
+ private:
+ volatile int _jvmti_env_iteration_count;
+
+ public:
+ void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; }
+ void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; }
+ bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; }
+
+ // Code generation
+ static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file ); }
+ static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line ); }
+ static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles ); }
+
+ static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base ); }
+ static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size ); }
+ static ByteSize omFreeList_offset() { return byte_offset_of(Thread, omFreeList); }
+
+#define TLAB_FIELD_OFFSET(name) \
+ static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
+
+ TLAB_FIELD_OFFSET(start)
+ TLAB_FIELD_OFFSET(end)
+ TLAB_FIELD_OFFSET(top)
+ TLAB_FIELD_OFFSET(pf_top)
+ TLAB_FIELD_OFFSET(size) // desired_size
+ TLAB_FIELD_OFFSET(refill_waste_limit)
+ TLAB_FIELD_OFFSET(number_of_refills)
+ TLAB_FIELD_OFFSET(fast_refill_waste)
+ TLAB_FIELD_OFFSET(slow_allocations)
+
+#undef TLAB_FIELD_OFFSET
+
+ public:
+ volatile intptr_t _Stalled ;
+ volatile int _TypeTag ;
+ ParkEvent * _ParkEvent ; // for synchronized()
+ ParkEvent * _SleepEvent ; // for Thread.sleep
+ ParkEvent * _MutexEvent ; // for native internal Mutex/Monitor
+ ParkEvent * _MuxEvent ; // for low-level muxAcquire-muxRelease
+ int NativeSyncRecursion ; // diagnostic
+
+ volatile int _OnTrap ; // Resume-at IP delta
+ jint _hashStateW ; // Marsaglia Shift-XOR thread-local RNG
+ jint _hashStateX ; // thread-specific hashCode generator state
+ jint _hashStateY ;
+ jint _hashStateZ ;
+ void * _schedctl ;
+
+ intptr_t _ScratchA, _ScratchB ; // Scratch locations for fast-path sync code
+ static ByteSize ScratchA_offset() { return byte_offset_of(Thread, _ScratchA ); }
+ static ByteSize ScratchB_offset() { return byte_offset_of(Thread, _ScratchB ); }
+
+ volatile jint rng [4] ; // RNG for spin loop
+
+ // Low-level leaf-lock primitives used to implement synchronization
+ // and native monitor-mutex infrastructure.
+ // Not for general synchronization use.
+ static void SpinAcquire (volatile int * Lock, const char * Name) ;
+ static void SpinRelease (volatile int * Lock) ;
+ static void muxAcquire (volatile intptr_t * Lock, const char * Name) ;
+ static void muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) ;
+ static void muxRelease (volatile intptr_t * Lock) ;
+
+};
+
+// Inline implementation of Thread::current()
+// Thread::current is "hot" it's called > 128K times in the 1st 500 msecs of
+// startup.
+// ThreadLocalStorage::thread is warm -- it's called > 16K times in the same
+// period. This is inlined in thread_<os_family>.inline.hpp.
+
+inline Thread* Thread::current() {
+#ifdef ASSERT
+// This function is very high traffic. Define PARANOID to enable expensive
+// asserts.
+#ifdef PARANOID
+ // Signal handler should call ThreadLocalStorage::get_thread_slow()
+ Thread* t = ThreadLocalStorage::get_thread_slow();
+ assert(t != NULL && !t->is_inside_signal_handler(),
+ "Don't use Thread::current() inside signal handler");
+#endif
+#endif
+ Thread* thread = ThreadLocalStorage::thread();
+ assert(thread != NULL, "just checking");
+ return thread;
+}
+
+// Name support for threads. non-JavaThread subclasses with multiple
+// uniquely named instances should derive from this.
+class NamedThread: public Thread {
+ friend class VMStructs;
+ enum {
+ max_name_len = 64
+ };
+ private:
+ char* _name;
+ public:
+ NamedThread();
+ ~NamedThread();
+ // May only be called once per thread.
+ void set_name(const char* format, ...);
+ virtual char* name() const { return _name == NULL ? (char*)"Unknown Thread" : _name; }
+};
+
+// Worker threads are named and have an id of an assigned work.
+class WorkerThread: public NamedThread {
+private:
+ uint _id;
+public:
+ WorkerThread() : _id(0) { }
+ void set_id(uint work_id) { _id = work_id; }
+ uint id() const { return _id; }
+};
+
+// A single WatcherThread is used for simulating timer interrupts.
+class WatcherThread: public Thread {
+ friend class VMStructs;
+ public:
+ virtual void run();
+
+ private:
+ static WatcherThread* _watcher_thread;
+
+ static bool _should_terminate;
+ public:
+ enum SomeConstants {
+ delay_interval = 10 // interrupt delay in milliseconds
+ };
+
+ // Constructor
+ WatcherThread();
+
+ // Tester
+ bool is_Watcher_thread() const { return true; }
+
+ // Printing
+ char* name() const { return (char*)"VM Periodic Task Thread"; }
+ void print_on(outputStream* st) const;
+ void print() const { print_on(tty); }
+
+ // Returns the single instance of WatcherThread
+ static WatcherThread* watcher_thread() { return _watcher_thread; }
+
+ // Create and start the single instance of WatcherThread, or stop it on shutdown
+ static void start();
+ static void stop();
+};
+
+
+class CompilerThread;
+
+typedef void (*ThreadFunction)(JavaThread*, TRAPS);
+
+class JavaThread: public Thread {
+ friend class VMStructs;
+ private:
+ JavaThread* _next; // The next thread in the Threads list
+ oop _threadObj; // The Java level thread object
+
+#ifdef ASSERT
+ private:
+ int _java_call_counter;
+
+ public:
+ int java_call_counter() { return _java_call_counter; }
+ void inc_java_call_counter() { _java_call_counter++; }
+ void dec_java_call_counter() {
+ assert(_java_call_counter > 0, "Invalid nesting of JavaCallWrapper");
+ _java_call_counter--;
+ }
+ private: // restore original namespace restriction
+#endif // ifdef ASSERT
+
+#ifndef PRODUCT
+ public:
+ enum {
+ jump_ring_buffer_size = 16
+ };
+ private: // restore original namespace restriction
+#endif
+
+ JavaFrameAnchor _anchor; // Encapsulation of current java frame and it state
+
+ ThreadFunction _entry_point;
+
+ JNIEnv _jni_environment;
+
+ // Deopt support
+ DeoptResourceMark* _deopt_mark; // Holds special ResourceMark for deoptimization
+
+ intptr_t* _must_deopt_id; // id of frame that needs to be deopted once we
+ // transition out of native
+
+ vframeArray* _vframe_array_head; // Holds the heap of the active vframeArrays
+ vframeArray* _vframe_array_last; // Holds last vFrameArray we popped
+ // Because deoptimization is lazy we must save jvmti requests to set locals
+ // in compiled frames until we deoptimize and we have an interpreter frame.
+ // This holds the pointer to array (yeah like there might be more than one) of
+ // description of compiled vframes that have locals that need to be updated.
+ GrowableArray<jvmtiDeferredLocalVariableSet*>* _deferred_locals_updates;
+
+ // Handshake value for fixing 6243940. We need a place for the i2c
+ // adapter to store the callee methodOop. This value is NEVER live
+ // across a gc point so it does NOT have to be gc'd
+ // The handshake is open ended since we can't be certain that it will
+ // be NULLed. This is because we rarely ever see the race and end up
+ // in handle_wrong_method which is the backend of the handshake. See
+ // code in i2c adapters and handle_wrong_method.
+
+ methodOop _callee_target;
+
+ // Oop results of VM runtime calls
+ oop _vm_result; // Used to pass back an oop result into Java code, GC-preserved
+ oop _vm_result_2; // Used to pass back an oop result into Java code, GC-preserved
+
+ MonitorChunk* _monitor_chunks; // Contains the off stack monitors
+ // allocated during deoptimization
+ // and by JNI_MonitorEnter/Exit
+
+ // Async. requests support
+ enum AsyncRequests {
+ _no_async_condition = 0,
+ _async_exception,
+ _async_unsafe_access_error
+ };
+ AsyncRequests _special_runtime_exit_condition; // Enum indicating pending async. request
+ oop _pending_async_exception;
+
+ // Safepoint support
+ public: // Expose _thread_state for SafeFetchInt()
+ volatile JavaThreadState _thread_state;
+ private:
+ ThreadSafepointState *_safepoint_state; // Holds information about a thread during a safepoint
+ address _saved_exception_pc; // Saved pc of instruction where last implicit exception happened
+
+ // JavaThread termination support
+ enum TerminatedTypes {
+ _not_terminated = 0xDEAD - 2,
+ _thread_exiting, // JavaThread::exit() has been called for this thread
+ _thread_terminated, // JavaThread is removed from thread list
+ _vm_exited // JavaThread is still executing native code, but VM is terminated
+ // only VM_Exit can set _vm_exited
+ };
+
+ // In general a JavaThread's _terminated field transitions as follows:
+ //
+ // _not_terminated => _thread_exiting => _thread_terminated
+ //
+ // _vm_exited is a special value to cover the case of a JavaThread
+ // executing native code after the VM itself is terminated.
+ TerminatedTypes _terminated;
+ // suspend/resume support
+ volatile bool _suspend_equivalent; // Suspend equivalent condition
+ jint _in_deopt_handler; // count of deoptimization
+ // handlers thread is in
+ volatile bool _doing_unsafe_access; // Thread may fault due to unsafe access
+ bool _do_not_unlock_if_synchronized; // Do not unlock the receiver of a synchronized method (since it was
+ // never locked) when throwing an exception. Used by interpreter only.
+
+ // Flag to mark a JNI thread in the process of attaching - See CR 6404306
+ // This flag is never set true other than at construction, and in that case
+ // is shortly thereafter set false
+ volatile bool _is_attaching;
+
+ public:
+ // State of the stack guard pages for this thread.
+ enum StackGuardState {
+ stack_guard_unused, // not needed
+ stack_guard_yellow_disabled,// disabled (temporarily) after stack overflow
+ stack_guard_enabled // enabled
+ };
+
+ private:
+
+ StackGuardState _stack_guard_state;
+
+ // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is
+ // used to temp. parsing values into and out of the runtime system during exception handling for compiled
+ // code)
+ volatile oop _exception_oop; // Exception thrown in compiled code
+ volatile address _exception_pc; // PC where exception happened
+ volatile address _exception_handler_pc; // PC for handler of exception
+ volatile int _exception_stack_size; // Size of frame where exception happened
+
+ // support for compilation
+ bool _is_compiling; // is true if a compilation is active inthis thread (one compilation per thread possible)
+
+ // support for JNI critical regions
+ jint _jni_active_critical; // count of entries into JNI critical region
+
+ // For deadlock detection.
+ int _depth_first_number;
+
+ // JVMTI PopFrame support
+ // This is set to popframe_pending to signal that top Java frame should be popped immediately
+ int _popframe_condition;
+
+#ifndef PRODUCT
+ int _jmp_ring_index;
+ struct {
+ // We use intptr_t instead of address so debugger doesn't try and display strings
+ intptr_t _target;
+ intptr_t _instruction;
+ const char* _file;
+ int _line;
+ } _jmp_ring[ jump_ring_buffer_size ];
+#endif /* PRODUCT */
+
+ friend class VMThread;
+ friend class ThreadWaitTransition;
+ friend class VM_Exit;
+
+ void initialize(); // Initialized the instance variables
+
+ public:
+ // Constructor
+ JavaThread(bool is_attaching = false); // for main thread and JNI attached threads
+ JavaThread(ThreadFunction entry_point, size_t stack_size = 0);
+ ~JavaThread();
+
+#ifdef ASSERT
+ // verify this JavaThread hasn't be published in the Threads::list yet
+ void verify_not_published();
+#endif
+
+ //JNI functiontable getter/setter for JVMTI jni function table interception API.
+ void set_jni_functions(struct JNINativeInterface_* functionTable) {
+ _jni_environment.functions = functionTable;
+ }
+ struct JNINativeInterface_* get_jni_functions() {
+ return (struct JNINativeInterface_ *)_jni_environment.functions;
+ }
+
+ // Executes Shutdown.shutdown()
+ void invoke_shutdown_hooks();
+
+ // Cleanup on thread exit
+ enum ExitType {
+ normal_exit,
+ jni_detach
+ };
+ void exit(bool destroy_vm, ExitType exit_type = normal_exit);
+
+ void cleanup_failed_attach_current_thread();
+
+ // Testers
+ virtual bool is_Java_thread() const { return true; }
+
+ // compilation
+ void set_is_compiling(bool f) { _is_compiling = f; }
+ bool is_compiling() const { return _is_compiling; }
+
+ // Thread chain operations
+ JavaThread* next() const { return _next; }
+ void set_next(JavaThread* p) { _next = p; }
+
+ // Thread oop. threadObj() can be NULL for initial JavaThread
+ // (or for threads attached via JNI)
+ oop threadObj() const { return _threadObj; }
+ void set_threadObj(oop p) { _threadObj = p; }
+
+ ThreadPriority java_priority() const; // Read from threadObj()
+
+ // Prepare thread and add to priority queue. If a priority is
+ // not specified, use the priority of the thread object. Threads_lock
+ // must be held while this function is called.
+ void prepare(jobject jni_thread, ThreadPriority prio=NoPriority);
+
+ void set_saved_exception_pc(address pc) { _saved_exception_pc = pc; }
+ address saved_exception_pc() { return _saved_exception_pc; }
+
+
+ ThreadFunction entry_point() const { return _entry_point; }
+
+ // Allocates a new Java level thread object for this thread. thread_name may be NULL.
+ void allocate_threadObj(Handle thread_group, char* thread_name, bool daemon, TRAPS);
+
+ // Last frame anchor routines
+
+ JavaFrameAnchor* frame_anchor(void) { return &_anchor; }
+
+ // last_Java_sp
+ bool has_last_Java_frame() const { return _anchor.has_last_Java_frame(); }
+ intptr_t* last_Java_sp() const { return _anchor.last_Java_sp(); }
+
+ // last_Java_pc
+
+ address last_Java_pc(void) { return _anchor.last_Java_pc(); }
+
+ // Safepoint support
+ JavaThreadState thread_state() const { return _thread_state; }
+ void set_thread_state(JavaThreadState s) { _thread_state=s; }
+ ThreadSafepointState *safepoint_state() const { return _safepoint_state; }
+ void set_safepoint_state(ThreadSafepointState *state) { _safepoint_state = state; }
+ bool is_at_poll_safepoint() { return _safepoint_state->is_at_poll_safepoint(); }
+
+ // thread has called JavaThread::exit() or is terminated
+ bool is_exiting() { return _terminated == _thread_exiting || is_terminated(); }
+ // thread is terminated (no longer on the threads list); we compare
+ // against the two non-terminated values so that a freed JavaThread
+ // will also be considered terminated.
+ bool is_terminated() { return _terminated != _not_terminated && _terminated != _thread_exiting; }
+ void set_terminated(TerminatedTypes t) { _terminated = t; }
+ // special for Threads::remove() which is static:
+ void set_terminated_value() { _terminated = _thread_terminated; }
+ void block_if_vm_exited();
+
+ bool doing_unsafe_access() { return _doing_unsafe_access; }
+ void set_doing_unsafe_access(bool val) { _doing_unsafe_access = val; }
+
+ bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; }
+ void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
+
+
+ // Suspend/resume support for JavaThread
+
+ private:
+ void set_ext_suspended() { set_suspend_flag (_ext_suspended); }
+ void clear_ext_suspended() { clear_suspend_flag(_ext_suspended); }
+
+ public:
+ void java_suspend();
+ void java_resume();
+ int java_suspend_self();
+
+ void check_and_wait_while_suspended() {
+ assert(JavaThread::current() == this, "sanity check");
+
+ bool do_self_suspend;
+ do {
+ // were we externally suspended while we were waiting?
+ do_self_suspend = handle_special_suspend_equivalent_condition();
+ if (do_self_suspend) {
+ // don't surprise the thread that suspended us by returning
+ java_suspend_self();
+ set_suspend_equivalent();
+ }
+ } while (do_self_suspend);
+ }
+ static void check_safepoint_and_suspend_for_native_trans(JavaThread *thread);
+ // Check for async exception in addition to safepoint and suspend request.
+ static void check_special_condition_for_native_trans(JavaThread *thread);
+
+ bool is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits);
+ bool is_ext_suspend_completed_with_lock(uint32_t *bits) {
+ MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
+ // Warning: is_ext_suspend_completed() may temporarily drop the
+ // SR_lock to allow the thread to reach a stable thread state if
+ // it is currently in a transient thread state.
+ return is_ext_suspend_completed(false /*!called_by_wait */,
+ SuspendRetryDelay, bits);
+ }
+
+ // We cannot allow wait_for_ext_suspend_completion() to run forever or
+ // we could hang. SuspendRetryCount and SuspendRetryDelay are normally
+ // passed as the count and delay parameters. Experiments with specific
+ // calls to wait_for_ext_suspend_completion() can be done by passing
+ // other values in the code. Experiments with all calls can be done
+ // via the appropriate -XX options.
+ bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits);
+
+ void set_external_suspend() { set_suspend_flag (_external_suspend); }
+ void clear_external_suspend() { clear_suspend_flag(_external_suspend); }
+
+ void set_deopt_suspend() { set_suspend_flag (_deopt_suspend); }
+ void clear_deopt_suspend() { clear_suspend_flag(_deopt_suspend); }
+ bool is_deopt_suspend() { return (_suspend_flags & _deopt_suspend) != 0; }
+
+ bool is_external_suspend() const {
+ return (_suspend_flags & _external_suspend) != 0;
+ }
+ // Whenever a thread transitions from native to vm/java it must suspend
+ // if external|deopt suspend is present.
+ bool is_suspend_after_native() const {
+ return (_suspend_flags & (_external_suspend | _deopt_suspend) ) != 0;
+ }
+
+ // external suspend request is completed
+ bool is_ext_suspended() const {
+ return (_suspend_flags & _ext_suspended) != 0;
+ }
+
+ // legacy method that checked for either external suspension or vm suspension
+ bool is_any_suspended() const {
+ return is_ext_suspended();
+ }
+
+ bool is_external_suspend_with_lock() const {
+ MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
+ return is_external_suspend();
+ }
+
+ // Special method to handle a pending external suspend request
+ // when a suspend equivalent condition lifts.
+ bool handle_special_suspend_equivalent_condition() {
+ assert(is_suspend_equivalent(),
+ "should only be called in a suspend equivalence condition");
+ MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
+ bool ret = is_external_suspend();
+ if (!ret) {
+ // not about to self-suspend so clear suspend equivalence
+ clear_suspend_equivalent();
+ }
+ // implied else:
+ // We have a pending external suspend request so we leave the
+ // suspend_equivalent flag set until java_suspend_self() sets
+ // the ext_suspended flag and clears the suspend_equivalent
+ // flag. This insures that wait_for_ext_suspend_completion()
+ // will return consistent values.
+ return ret;
+ }
+
+ bool is_any_suspended_with_lock() const {
+ MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
+ return is_any_suspended();
+ }
+ // utility methods to see if we are doing some kind of suspension
+ bool is_being_ext_suspended() const {
+ MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
+ return is_ext_suspended() || is_external_suspend();
+ }
+
+ bool is_suspend_equivalent() const { return _suspend_equivalent; }
+
+ void set_suspend_equivalent() { _suspend_equivalent = true; };
+ void clear_suspend_equivalent() { _suspend_equivalent = false; };
+
+ // Thread.stop support
+ void send_thread_stop(oop throwable);
+ AsyncRequests clear_special_runtime_exit_condition() {
+ AsyncRequests x = _special_runtime_exit_condition;
+ _special_runtime_exit_condition = _no_async_condition;
+ return x;
+ }
+
+ // Are any async conditions present?
+ bool has_async_condition() { return (_special_runtime_exit_condition != _no_async_condition); }
+
+ void check_and_handle_async_exceptions(bool check_unsafe_error = true);
+
+ // these next two are also used for self-suspension and async exception support
+ void handle_special_runtime_exit_condition(bool check_asyncs = true);
+
+ // Return true if JavaThread has an asynchronous condition or
+ // if external suspension is requested.
+ bool has_special_runtime_exit_condition() {
+ // We call is_external_suspend() last since external suspend should
+ // be less common. Because we don't use is_external_suspend_with_lock
+ // it is possible that we won't see an asynchronous external suspend
+ // request that has just gotten started, i.e., SR_lock grabbed but
+ // _external_suspend field change either not made yet or not visible
+ // yet. However, this is okay because the request is asynchronous and
+ // we will see the new flag value the next time through. It's also
+ // possible that the external suspend request is dropped after
+ // we have checked is_external_suspend(), we will recheck its value
+ // under SR_lock in java_suspend_self().
+ return (_special_runtime_exit_condition != _no_async_condition) ||
+ is_external_suspend() || is_deopt_suspend();
+ }
+
+ void set_pending_unsafe_access_error() { _special_runtime_exit_condition = _async_unsafe_access_error; }
+
+ void set_pending_async_exception(oop e) {
+ _pending_async_exception = e;
+ _special_runtime_exit_condition = _async_exception;
+ set_has_async_exception();
+ }
+
+ // Fast-locking support
+ bool is_lock_owned(address adr) const;
+
+ // Accessors for vframe array top
+ // The linked list of vframe arrays are sorted on sp. This means when we
+ // unpack the head must contain the vframe array to unpack.
+ void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; }
+ vframeArray* vframe_array_head() const { return _vframe_array_head; }
+
+ // Side structure for defering update of java frame locals until deopt occurs
+ GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _deferred_locals_updates; }
+ void set_deferred_locals(GrowableArray<jvmtiDeferredLocalVariableSet *>* vf) { _deferred_locals_updates = vf; }
+
+ // These only really exist to make debugging deopt problems simpler
+
+ void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; }
+ vframeArray* vframe_array_last() const { return _vframe_array_last; }
+
+ // The special resourceMark used during deoptimization
+
+ void set_deopt_mark(DeoptResourceMark* value) { _deopt_mark = value; }
+ DeoptResourceMark* deopt_mark(void) { return _deopt_mark; }
+
+ intptr_t* must_deopt_id() { return _must_deopt_id; }
+ void set_must_deopt_id(intptr_t* id) { _must_deopt_id = id; }
+ void clear_must_deopt_id() { _must_deopt_id = NULL; }
+
+ methodOop callee_target() const { return _callee_target; }
+ void set_callee_target (methodOop x) { _callee_target = x; }
+
+ // Oop results of vm runtime calls
+ oop vm_result() const { return _vm_result; }
+ void set_vm_result (oop x) { _vm_result = x; }
+
+ oop vm_result_2() const { return _vm_result_2; }
+ void set_vm_result_2 (oop x) { _vm_result_2 = x; }
+
+ // Exception handling for compiled methods
+ oop exception_oop() const { return _exception_oop; }
+ int exception_stack_size() const { return _exception_stack_size; }
+ address exception_pc() const { return _exception_pc; }
+ address exception_handler_pc() const { return _exception_handler_pc; }
+
+ void set_exception_oop(oop o) { _exception_oop = o; }
+ void set_exception_pc(address a) { _exception_pc = a; }
+ void set_exception_handler_pc(address a) { _exception_handler_pc = a; }
+ void set_exception_stack_size(int size) { _exception_stack_size = size; }
+
+ // Stack overflow support
+ inline size_t stack_available(address cur_sp);
+ address stack_yellow_zone_base()
+ { return (address)(stack_base() - (stack_size() - (stack_red_zone_size() + stack_yellow_zone_size()))); }
+ size_t stack_yellow_zone_size()
+ { return StackYellowPages * os::vm_page_size(); }
+ address stack_red_zone_base()
+ { return (address)(stack_base() - (stack_size() - stack_red_zone_size())); }
+ size_t stack_red_zone_size()
+ { return StackRedPages * os::vm_page_size(); }
+ bool in_stack_yellow_zone(address a)
+ { return (a <= stack_yellow_zone_base()) && (a >= stack_red_zone_base()); }
+ bool in_stack_red_zone(address a)
+ { return (a <= stack_red_zone_base()) && (a >= (address)((intptr_t)stack_base() - stack_size())); }
+
+ void create_stack_guard_pages();
+ void remove_stack_guard_pages();
+
+ void enable_stack_yellow_zone();
+ void disable_stack_yellow_zone();
+ void enable_stack_red_zone();
+ void disable_stack_red_zone();
+
+ inline bool stack_yellow_zone_disabled();
+ inline bool stack_yellow_zone_enabled();
+
+ // Attempt to reguard the stack after a stack overflow may have occurred.
+ // Returns true if (a) guard pages are not needed on this thread, (b) the
+ // pages are already guarded, or (c) the pages were successfully reguarded.
+ // Returns false if there is not enough stack space to reguard the pages, in
+ // which case the caller should unwind a frame and try again. The argument
+ // should be the caller's (approximate) sp.
+ bool reguard_stack(address cur_sp);
+ // Similar to above but see if current stackpoint is out of the guard area
+ // and reguard if possible.
+ bool reguard_stack(void);
+
+ // Misc. accessors/mutators
+ void set_do_not_unlock(void) { _do_not_unlock_if_synchronized = true; }
+ void clr_do_not_unlock(void) { _do_not_unlock_if_synchronized = false; }
+ bool do_not_unlock(void) { return _do_not_unlock_if_synchronized; }
+
+#ifndef PRODUCT
+ void record_jump(address target, address instr, const char* file, int line);
+#endif /* PRODUCT */
+
+ // For assembly stub generation
+ static ByteSize threadObj_offset() { return byte_offset_of(JavaThread, _threadObj ); }
+#ifndef PRODUCT
+ static ByteSize jmp_ring_index_offset() { return byte_offset_of(JavaThread, _jmp_ring_index ); }
+ static ByteSize jmp_ring_offset() { return byte_offset_of(JavaThread, _jmp_ring ); }
+#endif /* PRODUCT */
+ static ByteSize jni_environment_offset() { return byte_offset_of(JavaThread, _jni_environment ); }
+ static ByteSize last_Java_sp_offset() {
+ return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset();
+ }
+ static ByteSize last_Java_pc_offset() {
+ return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_pc_offset();
+ }
+ static ByteSize frame_anchor_offset() {
+ return byte_offset_of(JavaThread, _anchor);
+ }
+ static ByteSize callee_target_offset() { return byte_offset_of(JavaThread, _callee_target ); }
+ static ByteSize vm_result_offset() { return byte_offset_of(JavaThread, _vm_result ); }
+ static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2 ); }
+ static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state ); }
+ static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc ); }
+ static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread ); }
+ static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop ); }
+ static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc ); }
+ static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); }
+ static ByteSize exception_stack_size_offset() { return byte_offset_of(JavaThread, _exception_stack_size); }
+ static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state ); }
+ static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags ); }
+
+ static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
+
+ // Returns the jni environment for this thread
+ JNIEnv* jni_environment() { return &_jni_environment; }
+
+ static JavaThread* thread_from_jni_environment(JNIEnv* env) {
+ JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset()));
+ // Only return NULL if thread is off the thread list; starting to
+ // exit should not return NULL.
+ if (thread_from_jni_env->is_terminated()) {
+ thread_from_jni_env->block_if_vm_exited();
+ return NULL;
+ } else {
+ return thread_from_jni_env;
+ }
+ }
+
+ // JNI critical regions. These can nest.
+ bool in_critical() { return _jni_active_critical > 0; }
+ void enter_critical() { assert(Thread::current() == this,
+ "this must be current thread");
+ _jni_active_critical++; }
+ void exit_critical() { assert(Thread::current() == this,
+ "this must be current thread");
+ _jni_active_critical--;
+ assert(_jni_active_critical >= 0,
+ "JNI critical nesting problem?"); }
+
+ // For deadlock detection
+ int depth_first_number() { return _depth_first_number; }
+ void set_depth_first_number(int dfn) { _depth_first_number = dfn; }
+
+ private:
+ void set_monitor_chunks(MonitorChunk* monitor_chunks) { _monitor_chunks = monitor_chunks; }
+
+ public:
+ MonitorChunk* monitor_chunks() const { return _monitor_chunks; }
+ void add_monitor_chunk(MonitorChunk* chunk);
+ void remove_monitor_chunk(MonitorChunk* chunk);
+ bool in_deopt_handler() const { return _in_deopt_handler > 0; }
+ void inc_in_deopt_handler() { _in_deopt_handler++; }
+ void dec_in_deopt_handler() {
+ assert(_in_deopt_handler > 0, "mismatched deopt nesting");
+ if (_in_deopt_handler > 0) { // robustness
+ _in_deopt_handler--;
+ }
+ }
+
+ private:
+ void set_entry_point(ThreadFunction entry_point) { _entry_point = entry_point; }
+
+ public:
+
+ // Frame iteration; calls the function f for all frames on the stack
+ void frames_do(void f(frame*, const RegisterMap*));
+
+ // Memory operations
+ void oops_do(OopClosure* f);
+
+ // Sweeper operations
+ void nmethods_do();
+
+ // Memory management operations
+ void gc_epilogue();
+ void gc_prologue();
+
+ // Misc. operations
+ char* name() const { return (char*)get_thread_name(); }
+ void print_on(outputStream* st) const;
+ void print() const { print_on(tty); }
+ void print_value();
+ void print_thread_state_on(outputStream* ) const PRODUCT_RETURN;
+ void print_thread_state() const PRODUCT_RETURN;
+ void print_on_error(outputStream* st, char* buf, int buflen) const;
+ void verify();
+ const char* get_thread_name() const;
+private:
+ // factor out low-level mechanics for use in both normal and error cases
+ const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const;
+public:
+ const char* get_threadgroup_name() const;
+ const char* get_parent_name() const;
+
+ // Accessing frames
+ frame last_frame() {
+ _anchor.make_walkable(this);
+ return pd_last_frame();
+ }
+ javaVFrame* last_java_vframe(RegisterMap* reg_map);
+
+ // Returns method at 'depth' java or native frames down the stack
+ // Used for security checks
+ klassOop security_get_caller_class(int depth);
+
+ // Print stack trace in external format
+ void print_stack_on(outputStream* st);
+ void print_stack() { print_stack_on(tty); }
+
+ // Print stack traces in various internal formats
+ void trace_stack() PRODUCT_RETURN;
+ void trace_stack_from(vframe* start_vf) PRODUCT_RETURN;
+ void trace_frames() PRODUCT_RETURN;
+
+ // Returns the number of stack frames on the stack
+ int depth() const;
+
+ // Function for testing deoptimization
+ void deoptimize();
+ void make_zombies();
+
+ void deoptimized_wrt_marked_nmethods();
+
+ // Profiling operation (see fprofile.cpp)
+ public:
+ bool profile_last_Java_frame(frame* fr);
+
+ private:
+ ThreadProfiler* _thread_profiler;
+ private:
+ friend class FlatProfiler; // uses both [gs]et_thread_profiler.
+ friend class FlatProfilerTask; // uses get_thread_profiler.
+ friend class ThreadProfilerMark; // uses get_thread_profiler.
+ ThreadProfiler* get_thread_profiler() { return _thread_profiler; }
+ ThreadProfiler* set_thread_profiler(ThreadProfiler* tp) {
+ ThreadProfiler* result = _thread_profiler;
+ _thread_profiler = tp;
+ return result;
+ }
+
+ // Static operations
+ public:
+ // Returns the running thread as a JavaThread
+ static inline JavaThread* current();
+
+ // Returns the active Java thread. Do not use this if you know you are calling
+ // from a JavaThread, as it's slower than JavaThread::current. If called from
+ // the VMThread, it also returns the JavaThread that instigated the VMThread's
+ // operation. You may not want that either.
+ static JavaThread* active();
+
+ inline CompilerThread* as_CompilerThread();
+
+ public:
+ virtual void run();
+ void thread_main_inner();
+
+ private:
+ // PRIVILEGED STACK
+ PrivilegedElement* _privileged_stack_top;
+ GrowableArray<oop>* _array_for_gc;
+ public:
+
+ // Returns the privileged_stack information.
+ PrivilegedElement* privileged_stack_top() const { return _privileged_stack_top; }
+ void set_privileged_stack_top(PrivilegedElement *e) { _privileged_stack_top = e; }
+ void register_array_for_gc(GrowableArray<oop>* array) { _array_for_gc = array; }
+
+ public:
+ // Thread local information maintained by JVMTI.
+ void set_jvmti_thread_state(JvmtiThreadState *value) { _jvmti_thread_state = value; }
+ JvmtiThreadState *jvmti_thread_state() const { return _jvmti_thread_state; }
+ static ByteSize jvmti_thread_state_offset() { return byte_offset_of(JavaThread, _jvmti_thread_state); }
+ void set_jvmti_get_loaded_classes_closure(JvmtiGetLoadedClassesClosure* value) { _jvmti_get_loaded_classes_closure = value; }
+ JvmtiGetLoadedClassesClosure* get_jvmti_get_loaded_classes_closure() const { return _jvmti_get_loaded_classes_closure; }
+
+ // JVMTI PopFrame support
+ // Setting and clearing popframe_condition
+ // All of these enumerated values are bits. popframe_pending
+ // indicates that a PopFrame() has been requested and not yet been
+ // completed. popframe_processing indicates that that PopFrame() is in
+ // the process of being completed. popframe_force_deopt_reexecution_bit
+ // indicates that special handling is required when returning to a
+ // deoptimized caller.
+ enum PopCondition {
+ popframe_inactive = 0x00,
+ popframe_pending_bit = 0x01,
+ popframe_processing_bit = 0x02,
+ popframe_force_deopt_reexecution_bit = 0x04
+ };
+ PopCondition popframe_condition() { return (PopCondition) _popframe_condition; }
+ void set_popframe_condition(PopCondition c) { _popframe_condition = c; }
+ void set_popframe_condition_bit(PopCondition c) { _popframe_condition |= c; }
+ void clear_popframe_condition() { _popframe_condition = popframe_inactive; }
+ static ByteSize popframe_condition_offset() { return byte_offset_of(JavaThread, _popframe_condition); }
+ bool has_pending_popframe() { return (popframe_condition() & popframe_pending_bit) != 0; }
+ bool popframe_forcing_deopt_reexecution() { return (popframe_condition() & popframe_force_deopt_reexecution_bit) != 0; }
+ void clear_popframe_forcing_deopt_reexecution() { _popframe_condition &= ~popframe_force_deopt_reexecution_bit; }
+#ifdef CC_INTERP
+ bool pop_frame_pending(void) { return ((_popframe_condition & popframe_pending_bit) != 0); }
+ void clr_pop_frame_pending(void) { _popframe_condition = popframe_inactive; }
+ bool pop_frame_in_process(void) { return ((_popframe_condition & popframe_processing_bit) != 0); }
+ void set_pop_frame_in_process(void) { _popframe_condition |= popframe_processing_bit; }
+ void clr_pop_frame_in_process(void) { _popframe_condition &= ~popframe_processing_bit; }
+#endif
+
+ private:
+ // Saved incoming arguments to popped frame.
+ // Used only when popped interpreted frame returns to deoptimized frame.
+ void* _popframe_preserved_args;
+ int _popframe_preserved_args_size;
+
+ public:
+ void popframe_preserve_args(ByteSize size_in_bytes, void* start);
+ void* popframe_preserved_args();
+ ByteSize popframe_preserved_args_size();
+ WordSize popframe_preserved_args_size_in_words();
+ void popframe_free_preserved_args();
+
+
+ private:
+ JvmtiThreadState *_jvmti_thread_state;
+ JvmtiGetLoadedClassesClosure* _jvmti_get_loaded_classes_closure;
+
+ // Used by the interpreter in fullspeed mode for frame pop, method
+ // entry, method exit and single stepping support. This field is
+ // only set to non-zero by the VM_EnterInterpOnlyMode VM operation.
+ // It can be set to zero asynchronously (i.e., without a VM operation
+ // or a lock) so we have to be very careful.
+ int _interp_only_mode;
+
+ public:
+ // used by the interpreter for fullspeed debugging support (see above)
+ static ByteSize interp_only_mode_offset() { return byte_offset_of(JavaThread, _interp_only_mode); }
+ bool is_interp_only_mode() { return (_interp_only_mode != 0); }
+ int get_interp_only_mode() { return _interp_only_mode; }
+ void increment_interp_only_mode() { ++_interp_only_mode; }
+ void decrement_interp_only_mode() { --_interp_only_mode; }
+
+ private:
+ ThreadStatistics *_thread_stat;
+
+ public:
+ ThreadStatistics* get_thread_stat() const { return _thread_stat; }
+
+ // Return a blocker object for which this thread is blocked parking.
+ oop current_park_blocker();
+
+ private:
+ static size_t _stack_size_at_create;
+
+ public:
+ static inline size_t stack_size_at_create(void) {
+ return _stack_size_at_create;
+ }
+ static inline void set_stack_size_at_create(size_t value) {
+ _stack_size_at_create = value;
+ }
+
+ // Machine dependent stuff
+ #include "incls/_thread_pd.hpp.incl"
+
+ public:
+ void set_blocked_on_compilation(bool value) {
+ _blocked_on_compilation = value;
+ }
+
+ bool blocked_on_compilation() {
+ return _blocked_on_compilation;
+ }
+ protected:
+ bool _blocked_on_compilation;
+
+
+ // JSR166 per-thread parker
+private:
+ Parker* _parker;
+public:
+ Parker* parker() { return _parker; }
+
+ // Biased locking support
+private:
+ GrowableArray<MonitorInfo*>* _cached_monitor_info;
+public:
+ GrowableArray<MonitorInfo*>* cached_monitor_info() { return _cached_monitor_info; }
+ void set_cached_monitor_info(GrowableArray<MonitorInfo*>* info) { _cached_monitor_info = info; }
+
+ // clearing/querying jni attach status
+ bool is_attaching() const { return _is_attaching; }
+ void set_attached() { _is_attaching = false; OrderAccess::fence(); }
+};
+
+// Inline implementation of JavaThread::current
+inline JavaThread* JavaThread::current() {
+ Thread* thread = ThreadLocalStorage::thread();
+ assert(thread != NULL && thread->is_Java_thread(), "just checking");
+ return (JavaThread*)thread;
+}
+
+inline CompilerThread* JavaThread::as_CompilerThread() {
+ assert(is_Compiler_thread(), "just checking");
+ return (CompilerThread*)this;
+}
+
+inline bool JavaThread::stack_yellow_zone_disabled() {
+ return _stack_guard_state == stack_guard_yellow_disabled;
+}
+
+inline bool JavaThread::stack_yellow_zone_enabled() {
+#ifdef ASSERT
+ if (os::uses_stack_guard_pages()) {
+ assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");
+ }
+#endif
+ return _stack_guard_state == stack_guard_enabled;
+}
+
+inline size_t JavaThread::stack_available(address cur_sp) {
+ // This code assumes java stacks grow down
+ address low_addr; // Limit on the address for deepest stack depth
+ if ( _stack_guard_state == stack_guard_unused) {
+ low_addr = stack_base() - stack_size();
+ } else {
+ low_addr = stack_yellow_zone_base();
+ }
+ return cur_sp > low_addr ? cur_sp - low_addr : 0;
+}
+
+// A JavaThread for low memory detection support
+class LowMemoryDetectorThread : public JavaThread {
+ friend class VMStructs;
+public:
+ LowMemoryDetectorThread(ThreadFunction entry_point) : JavaThread(entry_point) {};
+
+ // Hide this thread from external view.
+ bool is_hidden_from_external_view() const { return true; }
+};
+
+// A thread used for Compilation.
+class CompilerThread : public JavaThread {
+ friend class VMStructs;
+ private:
+ CompilerCounters* _counters;
+
+ ciEnv* _env;
+ CompileLog* _log;
+ CompileTask* _task;
+ CompileQueue* _queue;
+
+ public:
+
+ static CompilerThread* current();
+
+ CompilerThread(CompileQueue* queue, CompilerCounters* counters);
+
+ bool is_Compiler_thread() const { return true; }
+ // Hide this compiler thread from external view.
+ bool is_hidden_from_external_view() const { return true; }
+
+ CompileQueue* queue() { return _queue; }
+ CompilerCounters* counters() { return _counters; }
+
+ // Get/set the thread's compilation environment.
+ ciEnv* env() { return _env; }
+ void set_env(ciEnv* env) { _env = env; }
+
+ // Get/set the thread's logging information
+ CompileLog* log() { return _log; }
+ void init_log(CompileLog* log) {
+ // Set once, for good.
+ assert(_log == NULL, "set only once");
+ _log = log;
+ }
+
+#ifndef PRODUCT
+private:
+ IdealGraphPrinter *_ideal_graph_printer;
+public:
+ IdealGraphPrinter *ideal_graph_printer() { return _ideal_graph_printer; }
+ void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; }
+#endif
+
+ // Get/set the thread's current task
+ CompileTask* task() { return _task; }
+ void set_task(CompileTask* task) { _task = task; }
+};
+
+inline CompilerThread* CompilerThread::current() {
+ return JavaThread::current()->as_CompilerThread();
+}
+
+
+// The active thread queue. It also keeps track of the current used
+// thread priorities.
+class Threads: AllStatic {
+ friend class VMStructs;
+ private:
+ static JavaThread* _thread_list;
+ static int _number_of_threads;
+ static int _number_of_non_daemon_threads;
+ static int _return_code;
+
+ public:
+ // Thread management
+ // force_daemon is a concession to JNI, where we may need to add a
+ // thread to the thread list before allocating its thread object
+ static void add(JavaThread* p, bool force_daemon = false);
+ static void remove(JavaThread* p);
+ static bool includes(JavaThread* p);
+ static JavaThread* first() { return _thread_list; }
+ static void threads_do(ThreadClosure* tc);
+
+ // Initializes the vm and creates the vm thread
+ static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain);
+ static void convert_vm_init_libraries_to_agents();
+ static void create_vm_init_libraries();
+ static void create_vm_init_agents();
+ static void shutdown_vm_agents();
+ static bool destroy_vm();
+ // Supported VM versions via JNI
+ // Includes JNI_VERSION_1_1
+ static jboolean is_supported_jni_version_including_1_1(jint version);
+ // Does not include JNI_VERSION_1_1
+ static jboolean is_supported_jni_version(jint version);
+
+ // Garbage collection
+ static void follow_other_roots(void f(oop*));
+
+ // Apply "f->do_oop" to all root oops in all threads.
+ // This version may only be called by sequential code.
+ static void oops_do(OopClosure* f);
+ // This version may be called by sequential or parallel code.
+ static void possibly_parallel_oops_do(OopClosure* f);
+ // This creates a list of GCTasks, one per thread.
+ static void create_thread_roots_tasks(GCTaskQueue* q);
+ // This creates a list of GCTasks, one per thread, for marking objects.
+ static void create_thread_roots_marking_tasks(GCTaskQueue* q);
+
+ // Apply "f->do_oop" to roots in all threads that
+ // are part of compiled frames
+ static void compiled_frame_oops_do(OopClosure* f);
+
+ static void convert_hcode_pointers();
+ static void restore_hcode_pointers();
+
+ // Sweeper
+ static void nmethods_do();
+
+ static void gc_epilogue();
+ static void gc_prologue();
+
+ // Verification
+ static void verify();
+ static void print_on(outputStream* st, bool print_stacks, bool internal_format, bool print_concurrent_locks);
+ static void print(bool print_stacks, bool internal_format) {
+ // this function is only used by debug.cpp
+ print_on(tty, print_stacks, internal_format, false /* no concurrent lock printed */);
+ }
+ static void print_on_error(outputStream* st, Thread* current, char* buf, int buflen);
+
+ // Get Java threads that are waiting to enter a monitor. If doLock
+ // is true, then Threads_lock is grabbed as needed. Otherwise, the
+ // VM needs to be at a safepoint.
+ static GrowableArray<JavaThread*>* get_pending_threads(int count,
+ address monitor, bool doLock);
+
+ // Get owning Java thread from the monitor's owner field. If doLock
+ // is true, then Threads_lock is grabbed as needed. Otherwise, the
+ // VM needs to be at a safepoint.
+ static JavaThread *owning_thread_from_monitor_owner(address owner,
+ bool doLock);
+
+ // Number of threads on the active threads list
+ static int number_of_threads() { return _number_of_threads; }
+ // Number of non-daemon threads on the active threads list
+ static int number_of_non_daemon_threads() { return _number_of_non_daemon_threads; }
+
+ // Deoptimizes all frames tied to marked nmethods
+ static void deoptimized_wrt_marked_nmethods();
+
+};
+
+
+// Thread iterator
+class ThreadClosure: public StackObj {
+ public:
+ virtual void do_thread(Thread* thread) = 0;
+};
+
+class SignalHandlerMark: public StackObj {
+private:
+ Thread* _thread;
+public:
+ SignalHandlerMark(Thread* t) {
+ _thread = t;
+ if (_thread) _thread->enter_signal_handler();
+ }
+ ~SignalHandlerMark() {
+ if (_thread) _thread->leave_signal_handler();
+ _thread = NULL;
+ }
+};
+
+// ParkEvents are type-stable and immortal.
+//
+// Lifecycle: Once a ParkEvent is associated with a thread that ParkEvent remains
+// associated with the thread for the thread's entire lifetime - the relationship is
+// stable. A thread will be associated at most one ParkEvent. When the thread
+// expires, the ParkEvent moves to the EventFreeList. New threads attempt to allocate from
+// the EventFreeList before creating a new Event. Type-stability frees us from
+// worrying about stale Event or Thread references in the objectMonitor subsystem.
+// (A reference to ParkEvent is always valid, even though the event may no longer be associated
+// with the desired or expected thread. A key aspect of this design is that the callers of
+// park, unpark, etc must tolerate stale references and spurious wakeups).
+//
+// Only the "associated" thread can block (park) on the ParkEvent, although
+// any other thread can unpark a reachable parkevent. Park() is allowed to
+// return spuriously. In fact park-unpark a really just an optimization to
+// avoid unbounded spinning and surrender the CPU to be a polite system citizen.
+// A degenerate albeit "impolite" park-unpark implementation could simply return.
+// See http://blogs.sun.com/dave for more details.
+//
+// Eventually I'd like to eliminate Events and ObjectWaiters, both of which serve as
+// thread proxies, and simply make the THREAD structure type-stable and persistent.
+// Currently, we unpark events associated with threads, but ideally we'd just
+// unpark threads.
+//
+// The base-class, PlatformEvent, is platform-specific while the ParkEvent is
+// platform-independent. PlatformEvent provides park(), unpark(), etc., and
+// is abstract -- that is, a PlatformEvent should never be instantiated except
+// as part of a ParkEvent.
+// Equivalently we could have defined a platform-independent base-class that
+// exported Allocate(), Release(), etc. The platform-specific class would extend
+// that base-class, adding park(), unpark(), etc.
+//
+// A word of caution: The JVM uses 2 very similar constructs:
+// 1. ParkEvent are used for Java-level "monitor" synchronization.
+// 2. Parkers are used by JSR166-JUC park-unpark.
+//
+// We'll want to eventually merge these redundant facilities and use ParkEvent.
+
+
+class ParkEvent : public os::PlatformEvent {
+ private:
+ ParkEvent * FreeNext ;
+
+ // Current association
+ Thread * AssociatedWith ;
+ intptr_t RawThreadIdentity ; // LWPID etc
+ volatile int Incarnation ;
+
+ // diagnostic : keep track of last thread to wake this thread.
+ // this is useful for construction of dependency graphs.
+ void * LastWaker ;
+
+ public:
+ // MCS-CLH list linkage and Native Mutex/Monitor
+ ParkEvent * volatile ListNext ;
+ ParkEvent * volatile ListPrev ;
+ volatile intptr_t OnList ;
+ volatile int TState ;
+ volatile int Notified ; // for native monitor construct
+ volatile int IsWaiting ; // Enqueued on WaitSet
+
+
+ private:
+ static ParkEvent * volatile FreeList ;
+ static volatile int ListLock ;
+
+ // It's prudent to mark the dtor as "private"
+ // ensuring that it's not visible outside the package.
+ // Unfortunately gcc warns about such usage, so
+ // we revert to the less desirable "protected" visibility.
+ // The other compilers accept private dtors.
+
+ protected: // Ensure dtor is never invoked
+ ~ParkEvent() { guarantee (0, "invariant") ; }
+
+ ParkEvent() : PlatformEvent() {
+ AssociatedWith = NULL ;
+ FreeNext = NULL ;
+ ListNext = NULL ;
+ ListPrev = NULL ;
+ OnList = 0 ;
+ TState = 0 ;
+ Notified = 0 ;
+ IsWaiting = 0 ;
+ }
+
+ // We use placement-new to force ParkEvent instances to be
+ // aligned on 256-byte address boundaries. This ensures that the least
+ // significant byte of a ParkEvent address is always 0.
+
+ void * operator new (size_t sz) ;
+ void operator delete (void * a) ;
+
+ public:
+ static ParkEvent * Allocate (Thread * t) ;
+ static void Release (ParkEvent * e) ;
+} ;
diff --git a/src/share/vm/runtime/threadCritical.hpp b/src/share/vm/runtime/threadCritical.hpp
new file mode 100644
index 000000000..6f8529512
--- /dev/null
+++ b/src/share/vm/runtime/threadCritical.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2001-2002 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// ThreadCritical is used to protect short non-blocking critical sections.
+// This class must use no vm facilities that require initialization.
+// It is used very early in the vm's initialization, in allocation
+// code and other areas. ThreadCritical regions are reentrant.
+//
+// Due to race conditions during vm exit, some of the os level
+// synchronization primitives may not be deallocated at exit. It
+// is a good plan to implement the platform dependant sections of
+// code with resources that are recoverable during process
+// cleanup by the os. Calling the initialize method before use
+// is also problematic, it is best to use preinitialized primitives
+// if possible. As an example:
+//
+// mutex_t mp = DEFAULTMUTEX;
+//
+// Also note that this class is declared as a StackObj to enforce
+// block structured short locks. It cannot be declared a ResourceObj
+// or CHeapObj, due to initialization issues.
+
+class ThreadCritical : public StackObj {
+ friend class os;
+ private:
+ static void initialize();
+ static void release();
+
+ public:
+ ThreadCritical();
+ ~ThreadCritical();
+};
diff --git a/src/share/vm/runtime/threadLocalStorage.cpp b/src/share/vm/runtime/threadLocalStorage.cpp
new file mode 100644
index 000000000..e00c7fe25
--- /dev/null
+++ b/src/share/vm/runtime/threadLocalStorage.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright 1997-2003 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_threadLocalStorage.cpp.incl"
+
+// static member initialization
+int ThreadLocalStorage::_thread_index = -1;
+
+Thread* ThreadLocalStorage::get_thread_slow() {
+ return (Thread*) os::thread_local_storage_at(ThreadLocalStorage::thread_index());
+}
+
+void ThreadLocalStorage::set_thread(Thread* thread) {
+ pd_set_thread(thread);
+
+ // The following ensure that any optimization tricks we have tried
+ // did not backfire on us:
+ guarantee(get_thread() == thread, "must be the same thread, quickly");
+ guarantee(get_thread_slow() == thread, "must be the same thread, slowly");
+}
+
+void ThreadLocalStorage::init() {
+ assert(ThreadLocalStorage::thread_index() == -1, "More than one attempt to initialize threadLocalStorage");
+ pd_init();
+ set_thread_index(os::allocate_thread_local_storage());
+ generate_code_for_get_thread();
+}
diff --git a/src/share/vm/runtime/threadLocalStorage.hpp b/src/share/vm/runtime/threadLocalStorage.hpp
new file mode 100644
index 000000000..e522d2791
--- /dev/null
+++ b/src/share/vm/runtime/threadLocalStorage.hpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright 1997-2003 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Interface for thread local storage
+
+// Fast variant of ThreadLocalStorage::get_thread_slow
+extern "C" Thread* get_thread();
+
+// Get raw thread id: e.g., %g7 on sparc, fs or gs on x86
+extern "C" uintptr_t _raw_thread_id();
+
+class ThreadLocalStorage : AllStatic {
+ public:
+ static void set_thread(Thread* thread);
+ static Thread* get_thread_slow();
+ static void invalidate_all() { pd_invalidate_all(); }
+
+ // Machine dependent stuff
+ #include "incls/_threadLS_pd.hpp.incl"
+
+ public:
+ // Accessor
+ static inline int thread_index() { return _thread_index; }
+ static inline void set_thread_index(int index) { _thread_index = index; }
+
+ // Initialization
+ // Called explicitly from VMThread::activate_system instead of init_globals.
+ static void init();
+
+ private:
+ static int _thread_index;
+
+ static void generate_code_for_get_thread();
+
+ // Processor dependent parts of set_thread and initialization
+ static void pd_set_thread(Thread* thread);
+ static void pd_init();
+ // Invalidate any thread cacheing or optimization schemes.
+ static void pd_invalidate_all();
+
+};
diff --git a/src/share/vm/runtime/timer.cpp b/src/share/vm/runtime/timer.cpp
new file mode 100644
index 000000000..42f598dd9
--- /dev/null
+++ b/src/share/vm/runtime/timer.cpp
@@ -0,0 +1,209 @@
+/*
+ * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_timer.cpp.incl"
+
+
+void elapsedTimer::add(elapsedTimer t) {
+ _counter += t._counter;
+}
+
+void elapsedTimer::start() {
+ if (!_active) {
+ _active = true;
+ _start_counter = os::elapsed_counter();
+ }
+}
+
+void elapsedTimer::stop() {
+ if (_active) {
+ _counter += os::elapsed_counter() - _start_counter;
+ _active = false;
+ }
+}
+
+double elapsedTimer::seconds() const {
+ double count = (double) _counter;
+ double freq = (double) os::elapsed_frequency();
+ return count/freq;
+}
+
+jlong elapsedTimer::milliseconds() const {
+ jlong ticks_per_ms = os::elapsed_frequency() / 1000;
+ return _counter / ticks_per_ms;
+}
+
+jlong elapsedTimer::active_ticks() const {
+ if (!_active) {
+ return ticks();
+ }
+ jlong counter = _counter + os::elapsed_counter() - _start_counter;
+ return counter;
+}
+
+void TimeStamp::update_to(jlong ticks) {
+ _counter = ticks;
+ if (_counter == 0) _counter = 1;
+ assert(is_updated(), "must not look clear");
+}
+
+void TimeStamp::update() {
+ update_to(os::elapsed_counter());
+}
+
+double TimeStamp::seconds() const {
+ assert(is_updated(), "must not be clear");
+ jlong new_count = os::elapsed_counter();
+ double count = (double) new_count - _counter;
+ double freq = (double) os::elapsed_frequency();
+ return count/freq;
+}
+
+jlong TimeStamp::milliseconds() const {
+ assert(is_updated(), "must not be clear");
+
+ jlong new_count = os::elapsed_counter();
+ jlong count = new_count - _counter;
+ jlong ticks_per_ms = os::elapsed_frequency() / 1000;
+ return count / ticks_per_ms;
+}
+
+jlong TimeStamp::ticks_since_update() const {
+ assert(is_updated(), "must not be clear");
+ return os::elapsed_counter() - _counter;
+}
+
+TraceTime::TraceTime(const char* title,
+ bool doit,
+ bool print_cr,
+ outputStream* logfile) {
+ _active = doit;
+ _verbose = true;
+ _print_cr = print_cr;
+ _logfile = (logfile != NULL) ? logfile : tty;
+
+ if (_active) {
+ _accum = NULL;
+ if (PrintGCTimeStamps) {
+ _logfile->stamp();
+ _logfile->print(": ");
+ }
+ _logfile->print("[%s", title);
+ _logfile->flush();
+ _t.start();
+ }
+}
+
+TraceTime::TraceTime(const char* title,
+ elapsedTimer* accumulator,
+ bool doit,
+ bool verbose,
+ outputStream* logfile) {
+ _active = doit;
+ _verbose = verbose;
+ _print_cr = true;
+ _logfile = (logfile != NULL) ? logfile : tty;
+ if (_active) {
+ if (_verbose) {
+ if (PrintGCTimeStamps) {
+ _logfile->stamp();
+ _logfile->print(": ");
+ }
+ _logfile->print("[%s", title);
+ _logfile->flush();
+ }
+ _accum = accumulator;
+ _t.start();
+ }
+}
+
+TraceTime::~TraceTime() {
+ if (_active) {
+ _t.stop();
+ if (_accum!=NULL) _accum->add(_t);
+ if (_verbose) {
+ if (_print_cr) {
+ _logfile->print_cr(", %3.7f secs]", _t.seconds());
+ } else {
+ _logfile->print(", %3.7f secs]", _t.seconds());
+ }
+ _logfile->flush();
+ }
+ }
+}
+
+TraceCPUTime::TraceCPUTime(bool doit,
+ bool print_cr,
+ outputStream *logfile) :
+ _active(doit),
+ _print_cr(print_cr),
+ _starting_user_time(0.0),
+ _starting_system_time(0.0),
+ _starting_real_time(0.0),
+ _logfile(logfile),
+ _error(false) {
+ if (_active) {
+ if (logfile != NULL) {
+ _logfile = logfile;
+ } else {
+ _logfile = tty;
+ }
+
+ _error = !os::getTimesSecs(&_starting_real_time,
+ &_starting_user_time,
+ &_starting_system_time);
+ }
+}
+
+TraceCPUTime::~TraceCPUTime() {
+ if (_active) {
+ bool valid = false;
+ if (!_error) {
+ double real_secs; // walk clock time
+ double system_secs; // system time
+ double user_secs; // user time for all threads
+
+ double real_time, user_time, system_time;
+ valid = os::getTimesSecs(&real_time, &user_time, &system_time);
+ if (valid) {
+
+ user_secs = user_time - _starting_user_time;
+ system_secs = system_time - _starting_system_time;
+ real_secs = real_time - _starting_real_time;
+
+ _logfile->print(" [Times: user=%3.2f sys=%3.2f, real=%3.2f secs] ",
+ user_secs, system_secs, real_secs);
+
+ } else {
+ _logfile->print("[Invalid result in TraceCPUTime]");
+ }
+ } else {
+ _logfile->print("[Error in TraceCPUTime]");
+ }
+ if (_print_cr) {
+ _logfile->print_cr("");
+ }
+ }
+}
diff --git a/src/share/vm/runtime/timer.hpp b/src/share/vm/runtime/timer.hpp
new file mode 100644
index 000000000..bddbd04f1
--- /dev/null
+++ b/src/share/vm/runtime/timer.hpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Timers for simple measurement.
+
+class elapsedTimer VALUE_OBJ_CLASS_SPEC {
+ friend class VMStructs;
+ private:
+ jlong _counter;
+ jlong _start_counter;
+ bool _active;
+ public:
+ elapsedTimer() { _active = false; reset(); }
+ void add(elapsedTimer t);
+ void start();
+ void stop();
+ void reset() { _counter = 0; }
+ double seconds() const;
+ jlong milliseconds() const;
+ jlong ticks() const { return _counter; }
+ jlong active_ticks() const;
+ bool is_active() const { return _active; }
+};
+
+// TimeStamp is used for recording when an event took place.
+class TimeStamp VALUE_OBJ_CLASS_SPEC {
+ private:
+ jlong _counter;
+ public:
+ TimeStamp() { _counter = 0; }
+ void clear() { _counter = 0; }
+ // has the timestamp been updated since being created or cleared?
+ bool is_updated() const { return _counter != 0; }
+ // update to current elapsed time
+ void update();
+ // update to given elapsed time
+ void update_to(jlong ticks);
+ // returns seconds since updated
+ // (must not be in a cleared state: must have been previously updated)
+ double seconds() const;
+ jlong milliseconds() const;
+ // ticks elapsed between VM start and last update
+ jlong ticks() const { return _counter; }
+ // ticks elapsed since last update
+ jlong ticks_since_update() const;
+};
+
+// TraceTime is used for tracing the execution time of a block
+// Usage:
+// { TraceTime t("block time")
+// some_code();
+// }
+//
+
+class TraceTime: public StackObj {
+ private:
+ bool _active; // do timing
+ bool _verbose; // report every timing
+ bool _print_cr; // add a CR to the end of the timer report
+ elapsedTimer _t; // timer
+ elapsedTimer* _accum; // accumulator
+ outputStream* _logfile; // output log file
+ public:
+ // Constuctors
+ TraceTime(const char* title,
+ bool doit = true,
+ bool print_cr = true,
+ outputStream *logfile = NULL);
+ TraceTime(const char* title,
+ elapsedTimer* accumulator,
+ bool doit = true,
+ bool verbose = false,
+ outputStream *logfile = NULL );
+ ~TraceTime();
+
+ // Accessors
+ void set_verbose(bool verbose) { _verbose = verbose; }
+ bool verbose() const { return _verbose; }
+
+ // Activation
+ void suspend() { if (_active) _t.stop(); }
+ void resume() { if (_active) _t.start(); }
+};
+
+class TraceCPUTime: public StackObj {
+ private:
+ bool _active; // true if times will be measured and printed
+ bool _print_cr; // if true print carriage return at end
+ double _starting_user_time; // user time at start of measurement
+ double _starting_system_time; // system time at start of measurement
+ double _starting_real_time; // real time at start of measurement
+ outputStream* _logfile; // output is printed to this stream
+ bool _error; // true if an error occurred, turns off output
+
+ public:
+ TraceCPUTime(bool doit = true,
+ bool print_cr = true,
+ outputStream *logfile = NULL);
+ ~TraceCPUTime();
+};
diff --git a/src/share/vm/runtime/unhandledOops.cpp b/src/share/vm/runtime/unhandledOops.cpp
new file mode 100644
index 000000000..fdaf43061
--- /dev/null
+++ b/src/share/vm/runtime/unhandledOops.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_unhandledOops.cpp.incl"
+
+#ifdef CHECK_UNHANDLED_OOPS
+const int free_list_size = 256;
+
+
+UnhandledOops::UnhandledOops(Thread* thread) {
+ _thread = thread;
+ _oop_list = new (ResourceObj::C_HEAP)
+ GrowableArray<UnhandledOopEntry>(free_list_size, true);
+ _level = 0;
+}
+
+UnhandledOops::~UnhandledOops() {
+ delete _oop_list;
+}
+
+
+void UnhandledOops::dump_oops(UnhandledOops *list) {
+ for (int k = 0; k < list->_oop_list->length(); k++) {
+ UnhandledOopEntry entry = list->_oop_list->at(k);
+ tty->print(" " INTPTR_FORMAT, entry._oop_ptr);
+ }
+ tty->cr();
+}
+
+// For debugging unhandled oop detector _in the debugger_
+// You don't want to turn it on in compiled code here.
+static bool unhandled_oop_print=0;
+
+void UnhandledOops::register_unhandled_oop(oop* op, address pc) {
+ if (!_thread->is_in_stack((address)op))
+ return;
+
+ _level ++;
+ if (unhandled_oop_print) {
+ for (int i=0; i<_level; i++) tty->print(" ");
+ tty->print_cr("r " INTPTR_FORMAT, op);
+ }
+ UnhandledOopEntry entry(op, pc);
+ _oop_list->push(entry);
+}
+
+
+bool match_oop_entry(void *op, UnhandledOopEntry e) {
+ return (e.oop_ptr() == op);
+}
+
+// Mark unhandled oop as okay for GC - the containing struct has an oops_do and
+// for some reason the oop has to be on the stack.
+// May not be called for the current thread, as in the case of
+// VM_GetOrSetLocal in jvmti.
+void UnhandledOops::allow_unhandled_oop(oop* op) {
+ assert (CheckUnhandledOops, "should only be called with checking option");
+
+ int i = _oop_list->find_at_end(op, match_oop_entry);
+ assert(i!=-1, "safe for gc oop not in unhandled_oop_list");
+
+ UnhandledOopEntry entry = _oop_list->at(i);
+ assert(!entry._ok_for_gc, "duplicate entry");
+ entry._ok_for_gc = true;
+ _oop_list->at_put(i, entry);
+}
+
+
+// Called by the oop destructor to remove unhandled oop from the thread's
+// oop list. All oops given are assumed to be on the list. If not,
+// there's a bug in the unhandled oop detector.
+void UnhandledOops::unregister_unhandled_oop(oop* op) {
+ if (!_thread->is_in_stack((address)op)) return;
+
+ _level --;
+ if (unhandled_oop_print) {
+ for (int i=0; i<_level; i++) tty->print(" ");
+ tty->print_cr("u "INTPTR_FORMAT, op);
+ }
+
+ int i = _oop_list->find_at_end(op, match_oop_entry);
+ assert(i!=-1, "oop not in unhandled_oop_list");
+ _oop_list->remove_at(i);
+}
+
+void UnhandledOops::clear_unhandled_oops() {
+ assert (CheckUnhandledOops, "should only be called with checking option");
+ if (_thread->is_gc_locked_out()) {
+ return;
+ }
+ for (int k = 0; k < _oop_list->length(); k++) {
+ UnhandledOopEntry entry = _oop_list->at(k);
+ // If an entry is on the unhandled oop list but isn't on the stack
+ // anymore, it must not have gotten unregistered properly and it's a bug
+ // in the unhandled oop generator.
+ if(!_thread->is_in_stack((address)entry._oop_ptr)) {
+ tty->print_cr("oop_ptr is " INTPTR_FORMAT, (address)entry._oop_ptr);
+ tty->print_cr("thread is " INTPTR_FORMAT " from pc " INTPTR_FORMAT,
+ (address)_thread, (address)entry._pc);
+ assert(false, "heap is corrupted by the unhandled oop detector");
+ }
+ // Set unhandled oops to a pattern that will crash distinctively
+ if (!entry._ok_for_gc) *(intptr_t*)(entry._oop_ptr) = BAD_OOP_ADDR;
+ }
+}
+#endif // CHECK_UNHANDLED_OOPS
diff --git a/src/share/vm/runtime/unhandledOops.hpp b/src/share/vm/runtime/unhandledOops.hpp
new file mode 100644
index 000000000..c2cd95ee4
--- /dev/null
+++ b/src/share/vm/runtime/unhandledOops.hpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+#ifdef CHECK_UNHANDLED_OOPS
+
+// Detect unhanded oops in VM code
+
+// The design is that when an oop is declared on the stack as a local
+// variable, the oop is actually a C++ struct with constructor and
+// destructor. The constructor adds the oop address on a list
+// off each thread and the destructor removes the oop. At a potential
+// safepoint, the stack addresses of the local variable oops are trashed
+// with a recognizeable value. If the local variable is used again, it
+// will segfault, indicating an unsafe use of that oop.
+// eg:
+// oop o; //register &o on list
+// funct(); // if potential safepoint - causes clear_naked_oops()
+// // which trashes o above.
+// o->do_something(); // Crashes because o is unsafe.
+//
+// This code implements the details of the unhandled oop list on the thread.
+//
+
+class oop;
+class Thread;
+
+class UnhandledOopEntry {
+ friend class UnhandledOops;
+ private:
+ oop* _oop_ptr;
+ bool _ok_for_gc;
+ address _pc;
+ public:
+ oop* oop_ptr() { return _oop_ptr; }
+ UnhandledOopEntry() : _oop_ptr(NULL), _ok_for_gc(false), _pc(NULL) {}
+ UnhandledOopEntry(oop* op, address pc) :
+ _oop_ptr(op), _ok_for_gc(false), _pc(pc) {}
+};
+
+
+class UnhandledOops {
+ friend class Thread;
+ private:
+ Thread* _thread;
+ int _level;
+ GrowableArray<UnhandledOopEntry> *_oop_list;
+ void allow_unhandled_oop(oop* op);
+ void clear_unhandled_oops();
+ UnhandledOops(Thread* thread);
+ ~UnhandledOops();
+
+ public:
+ static void dump_oops(UnhandledOops* list);
+ void register_unhandled_oop(oop* op, address pc);
+ void unregister_unhandled_oop(oop* op);
+};
+
+#ifdef _LP64
+const intptr_t BAD_OOP_ADDR = 0xfffffffffffffff1;
+#else
+const intptr_t BAD_OOP_ADDR = 0xfffffff1;
+#endif // _LP64
+#endif // CHECK_UNHANDLED_OOPS
diff --git a/src/share/vm/runtime/vframe.cpp b/src/share/vm/runtime/vframe.cpp
new file mode 100644
index 000000000..fdb2864df
--- /dev/null
+++ b/src/share/vm/runtime/vframe.cpp
@@ -0,0 +1,636 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_vframe.cpp.incl"
+
+vframe::vframe(const frame* fr, const RegisterMap* reg_map, JavaThread* thread)
+: _reg_map(reg_map), _thread(thread) {
+ assert(fr != NULL, "must have frame");
+ _fr = *fr;
+}
+
+vframe::vframe(const frame* fr, JavaThread* thread)
+: _reg_map(thread), _thread(thread) {
+ assert(fr != NULL, "must have frame");
+ _fr = *fr;
+}
+
+vframe* vframe::new_vframe(const frame* f, const RegisterMap* reg_map, JavaThread* thread) {
+ // Interpreter frame
+ if (f->is_interpreted_frame()) {
+ return new interpretedVFrame(f, reg_map, thread);
+ }
+
+ // Compiled frame
+ CodeBlob* cb = f->cb();
+ if (cb != NULL) {
+ if (cb->is_nmethod()) {
+ nmethod* nm = (nmethod*)cb;
+ return new compiledVFrame(f, reg_map, thread, nm);
+ }
+
+ if (f->is_runtime_frame()) {
+ // Skip this frame and try again.
+ RegisterMap temp_map = *reg_map;
+ frame s = f->sender(&temp_map);
+ return new_vframe(&s, &temp_map, thread);
+ }
+ }
+
+ // External frame
+ return new externalVFrame(f, reg_map, thread);
+}
+
+vframe* vframe::sender() const {
+ RegisterMap temp_map = *register_map();
+ assert(is_top(), "just checking");
+ if (_fr.is_entry_frame() && _fr.is_first_frame()) return NULL;
+ frame s = _fr.real_sender(&temp_map);
+ if (s.is_first_frame()) return NULL;
+ return vframe::new_vframe(&s, &temp_map, thread());
+}
+
+vframe* vframe::top() const {
+ vframe* vf = (vframe*) this;
+ while (!vf->is_top()) vf = vf->sender();
+ return vf;
+}
+
+
+javaVFrame* vframe::java_sender() const {
+ vframe* f = sender();
+ while (f != NULL) {
+ if (f->is_java_frame()) return javaVFrame::cast(f);
+ f = f->sender();
+ }
+ return NULL;
+}
+
+// ------------- javaVFrame --------------
+
+GrowableArray<MonitorInfo*>* javaVFrame::locked_monitors() {
+ assert(SafepointSynchronize::is_at_safepoint() || JavaThread::current() == thread(),
+ "must be at safepoint or it's a java frame of the current thread");
+
+ GrowableArray<MonitorInfo*>* mons = monitors();
+ GrowableArray<MonitorInfo*>* result = new GrowableArray<MonitorInfo*>(mons->length());
+ if (mons->is_empty()) return result;
+
+ bool found_first_monitor = false;
+ ObjectMonitor *pending_monitor = thread()->current_pending_monitor();
+ ObjectMonitor *waiting_monitor = thread()->current_waiting_monitor();
+ oop pending_obj = (pending_monitor != NULL ? (oop) pending_monitor->object() : NULL);
+ oop waiting_obj = (waiting_monitor != NULL ? (oop) waiting_monitor->object() : NULL);
+
+ for (int index = (mons->length()-1); index >= 0; index--) {
+ MonitorInfo* monitor = mons->at(index);
+ oop obj = monitor->owner();
+ if (obj == NULL) continue; // skip unowned monitor
+ //
+ // Skip the monitor that the thread is blocked to enter or waiting on
+ //
+ if (!found_first_monitor && (obj == pending_obj || obj == waiting_obj)) {
+ continue;
+ }
+ found_first_monitor = true;
+ result->append(monitor);
+ }
+ return result;
+}
+
+static void print_locked_object_class_name(outputStream* st, Handle obj, const char* lock_state) {
+ if (obj.not_null()) {
+ st->print("\t- %s <" INTPTR_FORMAT "> ", lock_state, (address)obj());
+ if (obj->klass() == SystemDictionary::class_klass()) {
+ klassOop target_klass = java_lang_Class::as_klassOop(obj());
+ st->print_cr("(a java.lang.Class for %s)", instanceKlass::cast(target_klass)->external_name());
+ } else {
+ Klass* k = Klass::cast(obj->klass());
+ st->print_cr("(a %s)", k->external_name());
+ }
+ }
+}
+
+void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) {
+ ResourceMark rm;
+
+ // If this is the first frame, and java.lang.Object.wait(...) then print out the receiver.
+ if (frame_count == 0) {
+ if (method()->name() == vmSymbols::wait_name() &&
+ instanceKlass::cast(method()->method_holder())->name() == vmSymbols::java_lang_Object()) {
+ StackValueCollection* locs = locals();
+ if (!locs->is_empty()) {
+ StackValue* sv = locs->at(0);
+ if (sv->type() == T_OBJECT) {
+ Handle o = locs->at(0)->get_obj();
+ print_locked_object_class_name(st, o, "waiting on");
+ }
+ }
+ } else if (thread()->current_park_blocker() != NULL) {
+ oop obj = thread()->current_park_blocker();
+ Klass* k = Klass::cast(obj->klass());
+ st->print_cr("\t- %s <" INTPTR_FORMAT "> (a %s)", "parking to wait for ", (address)obj, k->external_name());
+ }
+ }
+
+
+ // Print out all monitors that we have locked or are trying to lock
+ GrowableArray<MonitorInfo*>* mons = monitors();
+ if (!mons->is_empty()) {
+ bool found_first_monitor = false;
+ for (int index = (mons->length()-1); index >= 0; index--) {
+ MonitorInfo* monitor = mons->at(index);
+ if (monitor->owner() != NULL) {
+
+ // First, assume we have the monitor locked. If we haven't found an
+ // owned monitor before and this is the first frame, then we need to
+ // see if we have completed the lock or we are blocked trying to
+ // acquire it - we can only be blocked if the monitor is inflated
+
+ const char *lock_state = "locked"; // assume we have the monitor locked
+ if (!found_first_monitor && frame_count == 0) {
+ markOop mark = monitor->owner()->mark();
+ if (mark->has_monitor() &&
+ mark->monitor() == thread()->current_pending_monitor()) {
+ lock_state = "waiting to lock";
+ }
+ }
+
+ found_first_monitor = true;
+ print_locked_object_class_name(st, monitor->owner(), lock_state);
+ }
+ }
+ }
+}
+
+// ------------- interpretedVFrame --------------
+
+u_char* interpretedVFrame::bcp() const {
+ return fr().interpreter_frame_bcp();
+}
+
+void interpretedVFrame::set_bcp(u_char* bcp) {
+ fr().interpreter_frame_set_bcp(bcp);
+}
+
+intptr_t* interpretedVFrame::locals_addr_at(int offset) const {
+ assert(fr().is_interpreted_frame(), "frame should be an interpreted frame");
+ return fr().interpreter_frame_local_at(offset);
+}
+
+
+GrowableArray<MonitorInfo*>* interpretedVFrame::monitors() const {
+ GrowableArray<MonitorInfo*>* result = new GrowableArray<MonitorInfo*>(5);
+ for (BasicObjectLock* current = (fr().previous_monitor_in_interpreter_frame(fr().interpreter_frame_monitor_begin()));
+ current >= fr().interpreter_frame_monitor_end();
+ current = fr().previous_monitor_in_interpreter_frame(current)) {
+ result->push(new MonitorInfo(current->obj(), current->lock()));
+ }
+ return result;
+}
+
+int interpretedVFrame::bci() const {
+ return method()->bci_from(bcp());
+}
+
+methodOop interpretedVFrame::method() const {
+ return fr().interpreter_frame_method();
+}
+
+StackValueCollection* interpretedVFrame::locals() const {
+ int length = method()->max_locals();
+
+ if (method()->is_native()) {
+ // If the method is native, max_locals is not telling the truth.
+ // maxlocals then equals the size of parameters
+ length = method()->size_of_parameters();
+ }
+
+ StackValueCollection* result = new StackValueCollection(length);
+
+ // Get oopmap describing oops and int for current bci
+ if (TaggedStackInterpreter) {
+ for(int i=0; i < length; i++) {
+ // Find stack location
+ intptr_t *addr = locals_addr_at(i);
+
+ // Depending on oop/int put it in the right package
+ StackValue *sv;
+ frame::Tag tag = fr().interpreter_frame_local_tag(i);
+ if (tag == frame::TagReference) {
+ // oop value
+ Handle h(*(oop *)addr);
+ sv = new StackValue(h);
+ } else {
+ // integer
+ sv = new StackValue(*addr);
+ }
+ assert(sv != NULL, "sanity check");
+ result->add(sv);
+ }
+ } else {
+ InterpreterOopMap oop_mask;
+ if (TraceDeoptimization && Verbose) {
+ methodHandle m_h(thread(), method());
+ OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
+ } else {
+ method()->mask_for(bci(), &oop_mask);
+ }
+ // handle locals
+ for(int i=0; i < length; i++) {
+ // Find stack location
+ intptr_t *addr = locals_addr_at(i);
+
+ // Depending on oop/int put it in the right package
+ StackValue *sv;
+ if (oop_mask.is_oop(i)) {
+ // oop value
+ Handle h(*(oop *)addr);
+ sv = new StackValue(h);
+ } else {
+ // integer
+ sv = new StackValue(*addr);
+ }
+ assert(sv != NULL, "sanity check");
+ result->add(sv);
+ }
+ }
+ return result;
+}
+
+void interpretedVFrame::set_locals(StackValueCollection* values) const {
+ if (values == NULL || values->size() == 0) return;
+
+ int length = method()->max_locals();
+ if (method()->is_native()) {
+ // If the method is native, max_locals is not telling the truth.
+ // maxlocals then equals the size of parameters
+ length = method()->size_of_parameters();
+ }
+
+ assert(length == values->size(), "Mismatch between actual stack format and supplied data");
+
+ // handle locals
+ for (int i = 0; i < length; i++) {
+ // Find stack location
+ intptr_t *addr = locals_addr_at(i);
+
+ // Depending on oop/int put it in the right package
+ StackValue *sv = values->at(i);
+ assert(sv != NULL, "sanity check");
+ if (sv->type() == T_OBJECT) {
+ *(oop *) addr = (sv->get_obj())();
+ } else { // integer
+ *addr = sv->get_int();
+ }
+ }
+}
+
+StackValueCollection* interpretedVFrame::expressions() const {
+ int length = fr().interpreter_frame_expression_stack_size();
+ if (method()->is_native()) {
+ // If the method is native, there is no expression stack
+ length = 0;
+ }
+
+ int nof_locals = method()->max_locals();
+ StackValueCollection* result = new StackValueCollection(length);
+
+ if (TaggedStackInterpreter) {
+ // handle expressions
+ for(int i=0; i < length; i++) {
+ // Find stack location
+ intptr_t *addr = fr().interpreter_frame_expression_stack_at(i);
+ frame::Tag tag = fr().interpreter_frame_expression_stack_tag(i);
+
+ // Depending on oop/int put it in the right package
+ StackValue *sv;
+ if (tag == frame::TagReference) {
+ // oop value
+ Handle h(*(oop *)addr);
+ sv = new StackValue(h);
+ } else {
+ // otherwise
+ sv = new StackValue(*addr);
+ }
+ assert(sv != NULL, "sanity check");
+ result->add(sv);
+ }
+ } else {
+ InterpreterOopMap oop_mask;
+ // Get oopmap describing oops and int for current bci
+ if (TraceDeoptimization && Verbose) {
+ methodHandle m_h(method());
+ OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
+ } else {
+ method()->mask_for(bci(), &oop_mask);
+ }
+ // handle expressions
+ for(int i=0; i < length; i++) {
+ // Find stack location
+ intptr_t *addr = fr().interpreter_frame_expression_stack_at(i);
+
+ // Depending on oop/int put it in the right package
+ StackValue *sv;
+ if (oop_mask.is_oop(i + nof_locals)) {
+ // oop value
+ Handle h(*(oop *)addr);
+ sv = new StackValue(h);
+ } else {
+ // integer
+ sv = new StackValue(*addr);
+ }
+ assert(sv != NULL, "sanity check");
+ result->add(sv);
+ }
+ }
+ return result;
+}
+
+
+// ------------- cChunk --------------
+
+entryVFrame::entryVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread)
+: externalVFrame(fr, reg_map, thread) {}
+
+
+void vframeStreamCommon::found_bad_method_frame() {
+ // 6379830 Cut point for an assertion that occasionally fires when
+ // we are using the performance analyzer.
+ // Disable this assert when testing the analyzer with fastdebug.
+ // -XX:SuppressErrorAt=vframe.cpp:XXX (XXX=following line number)
+ assert(false, "invalid bci or invalid scope desc");
+}
+
+// top-frame will be skipped
+vframeStream::vframeStream(JavaThread* thread, frame top_frame,
+ bool stop_at_java_call_stub) : vframeStreamCommon(thread) {
+ _stop_at_java_call_stub = stop_at_java_call_stub;
+
+ // skip top frame, as it may not be at safepoint
+ _frame = top_frame.sender(&_reg_map);
+ while (!fill_from_frame()) {
+ _frame = _frame.sender(&_reg_map);
+ }
+}
+
+
+// Step back n frames, skip any pseudo frames in between.
+// This function is used in Class.forName, Class.newInstance, Method.Invoke,
+// AccessController.doPrivileged.
+//
+// NOTE that in JDK 1.4 this has been exposed to Java as
+// sun.reflect.Reflection.getCallerClass(), which can be inlined.
+// Inlined versions must match this routine's logic.
+// Native method prefixing logic does not need to match since
+// the method names don't match and inlining will not occur.
+// See, for example,
+// Parse::inline_native_Reflection_getCallerClass in
+// opto/library_call.cpp.
+void vframeStreamCommon::security_get_caller_frame(int depth) {
+ bool use_new_reflection = JDK_Version::is_gte_jdk14x_version() && UseNewReflection;
+
+ while (!at_end()) {
+ if (Universe::reflect_invoke_cache()->is_same_method(method())) {
+ // This is Method.invoke() -- skip it
+ } else if (use_new_reflection &&
+ Klass::cast(method()->method_holder())
+ ->is_subclass_of(SystemDictionary::reflect_method_accessor_klass())) {
+ // This is an auxilary frame -- skip it
+ } else {
+ // This is non-excluded frame, we need to count it against the depth
+ if (depth-- <= 0) {
+ // we have reached the desired depth, we are done
+ break;
+ }
+ }
+ if (method()->is_prefixed_native()) {
+ skip_prefixed_method_and_wrappers();
+ } else {
+ next();
+ }
+ }
+}
+
+
+void vframeStreamCommon::skip_prefixed_method_and_wrappers() {
+ ResourceMark rm;
+ HandleMark hm;
+
+ int method_prefix_count = 0;
+ char** method_prefixes = JvmtiExport::get_all_native_method_prefixes(&method_prefix_count);
+ KlassHandle prefixed_klass(method()->method_holder());
+ const char* prefixed_name = method()->name()->as_C_string();
+ size_t prefixed_name_len = strlen(prefixed_name);
+ int prefix_index = method_prefix_count-1;
+
+ while (!at_end()) {
+ next();
+ if (method()->method_holder() != prefixed_klass()) {
+ break; // classes don't match, can't be a wrapper
+ }
+ const char* name = method()->name()->as_C_string();
+ size_t name_len = strlen(name);
+ size_t prefix_len = prefixed_name_len - name_len;
+ if (prefix_len <= 0 || strcmp(name, prefixed_name + prefix_len) != 0) {
+ break; // prefixed name isn't prefixed version of method name, can't be a wrapper
+ }
+ for (; prefix_index >= 0; --prefix_index) {
+ const char* possible_prefix = method_prefixes[prefix_index];
+ size_t possible_prefix_len = strlen(possible_prefix);
+ if (possible_prefix_len == prefix_len &&
+ strncmp(possible_prefix, prefixed_name, prefix_len) == 0) {
+ break; // matching prefix found
+ }
+ }
+ if (prefix_index < 0) {
+ break; // didn't find the prefix, can't be a wrapper
+ }
+ prefixed_name = name;
+ prefixed_name_len = name_len;
+ }
+}
+
+
+void vframeStreamCommon::skip_reflection_related_frames() {
+ while (!at_end() &&
+ (JDK_Version::is_gte_jdk14x_version() && UseNewReflection &&
+ (Klass::cast(method()->method_holder())->is_subclass_of(SystemDictionary::reflect_method_accessor_klass()) ||
+ Klass::cast(method()->method_holder())->is_subclass_of(SystemDictionary::reflect_constructor_accessor_klass())))) {
+ next();
+ }
+}
+
+
+#ifndef PRODUCT
+void vframe::print() {
+ if (WizardMode) _fr.print_value_on(tty,NULL);
+}
+
+
+void vframe::print_value() const {
+ ((vframe*)this)->print();
+}
+
+
+void entryVFrame::print_value() const {
+ ((entryVFrame*)this)->print();
+}
+
+void entryVFrame::print() {
+ vframe::print();
+ tty->print_cr("C Chunk inbetween Java");
+ tty->print_cr("C link " INTPTR_FORMAT, _fr.link());
+}
+
+
+// ------------- javaVFrame --------------
+
+static void print_stack_values(const char* title, StackValueCollection* values) {
+ if (values->is_empty()) return;
+ tty->print_cr("\t%s:", title);
+ values->print();
+}
+
+
+void javaVFrame::print() {
+ ResourceMark rm;
+ vframe::print();
+ tty->print("\t");
+ method()->print_value();
+ tty->cr();
+ tty->print_cr("\tbci: %d", bci());
+
+ print_stack_values("locals", locals());
+ print_stack_values("expressions", expressions());
+
+ GrowableArray<MonitorInfo*>* list = monitors();
+ if (list->is_empty()) return;
+ tty->print_cr("\tmonitor list:");
+ for (int index = (list->length()-1); index >= 0; index--) {
+ MonitorInfo* monitor = list->at(index);
+ tty->print("\t obj\t"); monitor->owner()->print_value();
+ tty->print("(" INTPTR_FORMAT ")", (address)monitor->owner());
+ tty->cr();
+ tty->print("\t ");
+ monitor->lock()->print_on(tty);
+ tty->cr();
+ }
+}
+
+
+void javaVFrame::print_value() const {
+ methodOop m = method();
+ klassOop k = m->method_holder();
+ tty->print_cr("frame( sp=" INTPTR_FORMAT ", unextended_sp=" INTPTR_FORMAT ", fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT ")",
+ _fr.sp(), _fr.unextended_sp(), _fr.fp(), _fr.pc());
+ tty->print("%s.%s", Klass::cast(k)->internal_name(), m->name()->as_C_string());
+
+ if (!m->is_native()) {
+ symbolOop source_name = instanceKlass::cast(k)->source_file_name();
+ int line_number = m->line_number_from_bci(bci());
+ if (source_name != NULL && (line_number != -1)) {
+ tty->print("(%s:%d)", source_name->as_C_string(), line_number);
+ }
+ } else {
+ tty->print("(Native Method)");
+ }
+ // Check frame size and print warning if it looks suspiciously large
+ if (fr().sp() != NULL) {
+ uint size = fr().frame_size();
+#ifdef _LP64
+ if (size > 8*K) warning("SUSPICIOUSLY LARGE FRAME (%d)", size);
+#else
+ if (size > 4*K) warning("SUSPICIOUSLY LARGE FRAME (%d)", size);
+#endif
+ }
+}
+
+
+bool javaVFrame::structural_compare(javaVFrame* other) {
+ // Check static part
+ if (method() != other->method()) return false;
+ if (bci() != other->bci()) return false;
+
+ // Check locals
+ StackValueCollection *locs = locals();
+ StackValueCollection *other_locs = other->locals();
+ assert(locs->size() == other_locs->size(), "sanity check");
+ int i;
+ for(i = 0; i < locs->size(); i++) {
+ // it might happen the compiler reports a conflict and
+ // the interpreter reports a bogus int.
+ if ( is_compiled_frame() && locs->at(i)->type() == T_CONFLICT) continue;
+ if (other->is_compiled_frame() && other_locs->at(i)->type() == T_CONFLICT) continue;
+
+ if (!locs->at(i)->equal(other_locs->at(i)))
+ return false;
+ }
+
+ // Check expressions
+ StackValueCollection* exprs = expressions();
+ StackValueCollection* other_exprs = other->expressions();
+ assert(exprs->size() == other_exprs->size(), "sanity check");
+ for(i = 0; i < exprs->size(); i++) {
+ if (!exprs->at(i)->equal(other_exprs->at(i)))
+ return false;
+ }
+
+ return true;
+}
+
+
+void javaVFrame::print_activation(int index) const {
+ // frame number and method
+ tty->print("%2d - ", index);
+ ((vframe*)this)->print_value();
+ tty->cr();
+
+ if (WizardMode) {
+ ((vframe*)this)->print();
+ tty->cr();
+ }
+}
+
+
+void javaVFrame::verify() const {
+}
+
+
+void interpretedVFrame::verify() const {
+}
+
+
+// ------------- externalVFrame --------------
+
+void externalVFrame::print() {
+ _fr.print_value_on(tty,NULL);
+}
+
+
+void externalVFrame::print_value() const {
+ ((vframe*)this)->print();
+}
+#endif // PRODUCT
diff --git a/src/share/vm/runtime/vframe.hpp b/src/share/vm/runtime/vframe.hpp
new file mode 100644
index 000000000..b62a6f76f
--- /dev/null
+++ b/src/share/vm/runtime/vframe.hpp
@@ -0,0 +1,447 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// vframes are virtual stack frames representing source level activations.
+// A single frame may hold several source level activations in the case of
+// optimized code. The debugging stored with the optimized code enables
+// us to unfold a frame as a stack of vframes.
+// A cVFrame represents an activation of a non-java method.
+
+// The vframe inheritance hierarchy:
+// - vframe
+// - javaVFrame
+// - interpretedVFrame
+// - compiledVFrame ; (used for both compiled Java methods and native stubs)
+// - externalVFrame
+// - entryVFrame ; special frame created when calling Java from C
+
+// - BasicLock
+
+class vframe: public ResourceObj {
+ protected:
+ frame _fr; // Raw frame behind the virtual frame.
+ RegisterMap _reg_map; // Register map for the raw frame (used to handle callee-saved registers).
+ JavaThread* _thread; // The thread owning the raw frame.
+
+ vframe(const frame* fr, const RegisterMap* reg_map, JavaThread* thread);
+ vframe(const frame* fr, JavaThread* thread);
+ public:
+ // Factory method for creating vframes
+ static vframe* new_vframe(const frame* f, const RegisterMap *reg_map, JavaThread* thread);
+
+ // Accessors
+ frame fr() const { return _fr; }
+ CodeBlob* cb() const { return _fr.cb(); }
+ nmethod* nm() const {
+ assert( cb() != NULL && cb()->is_nmethod(), "usage");
+ return (nmethod*) cb();
+ }
+
+// ???? Does this need to be a copy?
+ frame* frame_pointer() { return &_fr; }
+ const RegisterMap* register_map() const { return &_reg_map; }
+ JavaThread* thread() const { return _thread; }
+
+ // Returns the sender vframe
+ virtual vframe* sender() const;
+
+ // Returns the next javaVFrame on the stack (skipping all other kinds of frame)
+ javaVFrame *java_sender() const;
+
+ // Answers if the this is the top vframe in the frame, i.e., if the sender vframe
+ // is in the caller frame
+ virtual bool is_top() const { return true; }
+
+ // Returns top vframe within same frame (see is_top())
+ virtual vframe* top() const;
+
+ // Type testing operations
+ virtual bool is_entry_frame() const { return false; }
+ virtual bool is_java_frame() const { return false; }
+ virtual bool is_interpreted_frame() const { return false; }
+ virtual bool is_compiled_frame() const { return false; }
+
+#ifndef PRODUCT
+ // printing operations
+ virtual void print_value() const;
+ virtual void print();
+#endif
+};
+
+
+class javaVFrame: public vframe {
+ public:
+ // JVM state
+ virtual methodOop method() const = 0;
+ virtual int bci() const = 0;
+ virtual StackValueCollection* locals() const = 0;
+ virtual StackValueCollection* expressions() const = 0;
+ // the order returned by monitors() is from oldest -> youngest#4418568
+ virtual GrowableArray<MonitorInfo*>* monitors() const = 0;
+
+ // Debugging support via JVMTI.
+ // NOTE that this is not guaranteed to give correct results for compiled vframes.
+ // Deoptimize first if necessary.
+ virtual void set_locals(StackValueCollection* values) const = 0;
+
+ // Test operation
+ bool is_java_frame() const { return true; }
+
+ protected:
+ javaVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : vframe(fr, reg_map, thread) {}
+ javaVFrame(const frame* fr, JavaThread* thread) : vframe(fr, thread) {}
+
+ public:
+ // casting
+ static javaVFrame* cast(vframe* vf) {
+ assert(vf == NULL || vf->is_java_frame(), "must be java frame");
+ return (javaVFrame*) vf;
+ }
+
+ // Return an array of monitors locked by this frame in the youngest to oldest order
+ GrowableArray<MonitorInfo*>* locked_monitors();
+
+ // printing used during stack dumps
+ void print_lock_info_on(outputStream* st, int frame_count);
+ void print_lock_info(int frame_count) { print_lock_info_on(tty, frame_count); }
+
+#ifndef PRODUCT
+ public:
+ // printing operations
+ void print();
+ void print_value() const;
+ void print_activation(int index) const;
+
+ // verify operations
+ virtual void verify() const;
+
+ // Structural compare
+ bool structural_compare(javaVFrame* other);
+#endif
+ friend class vframe;
+};
+
+class interpretedVFrame: public javaVFrame {
+ public:
+ // JVM state
+ methodOop method() const;
+ int bci() const;
+ StackValueCollection* locals() const;
+ StackValueCollection* expressions() const;
+ GrowableArray<MonitorInfo*>* monitors() const;
+
+ void set_locals(StackValueCollection* values) const;
+
+ // Test operation
+ bool is_interpreted_frame() const { return true; }
+
+ protected:
+ interpretedVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : javaVFrame(fr, reg_map, thread) {};
+
+ public:
+ // Accessors for Byte Code Pointer
+ u_char* bcp() const;
+ void set_bcp(u_char* bcp);
+
+ // casting
+ static interpretedVFrame* cast(vframe* vf) {
+ assert(vf == NULL || vf->is_interpreted_frame(), "must be interpreted frame");
+ return (interpretedVFrame*) vf;
+ }
+
+ private:
+ static const int bcp_offset;
+ intptr_t* locals_addr_at(int offset) const;
+
+ // returns where the parameters starts relative to the frame pointer
+ int start_of_parameters() const;
+
+#ifndef PRODUCT
+ public:
+ // verify operations
+ void verify() const;
+#endif
+ friend class vframe;
+};
+
+
+class externalVFrame: public vframe {
+ protected:
+ externalVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : vframe(fr, reg_map, thread) {}
+
+#ifndef PRODUCT
+ public:
+ // printing operations
+ void print_value() const;
+ void print();
+#endif
+ friend class vframe;
+};
+
+class entryVFrame: public externalVFrame {
+ public:
+ bool is_entry_frame() const { return true; }
+
+ protected:
+ entryVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread);
+
+ public:
+ // casting
+ static entryVFrame* cast(vframe* vf) {
+ assert(vf == NULL || vf->is_entry_frame(), "must be entry frame");
+ return (entryVFrame*) vf;
+ }
+
+#ifndef PRODUCT
+ public:
+ // printing
+ void print_value() const;
+ void print();
+#endif
+ friend class vframe;
+};
+
+
+// A MonitorInfo is a ResourceObject that describes a the pair:
+// 1) the owner of the monitor
+// 2) the monitor lock
+class MonitorInfo : public ResourceObj {
+ private:
+ oop _owner; // the object owning the monitor
+ BasicLock* _lock;
+ public:
+ // Constructor
+ MonitorInfo(oop owner, BasicLock* lock) {
+ _owner = owner;
+ _lock = lock;
+ }
+ // Accessors
+ oop owner() const { return _owner; }
+ BasicLock* lock() const { return _lock; }
+};
+
+class vframeStreamCommon : StackObj {
+ protected:
+ // common
+ frame _frame;
+ JavaThread* _thread;
+ RegisterMap _reg_map;
+ enum { interpreted_mode, compiled_mode, at_end_mode } _mode;
+
+ int _sender_decode_offset;
+
+ // Cached information
+ methodOop _method;
+ int _bci;
+
+ // Should VM activations be ignored or not
+ bool _stop_at_java_call_stub;
+
+ bool fill_in_compiled_inlined_sender();
+ void fill_from_compiled_frame(int decode_offset);
+ void fill_from_compiled_native_frame();
+
+ void found_bad_method_frame();
+
+ void fill_from_interpreter_frame();
+ bool fill_from_frame();
+
+ // Helper routine for security_get_caller_frame
+ void skip_prefixed_method_and_wrappers();
+
+ public:
+ // Constructor
+ vframeStreamCommon(JavaThread* thread) : _reg_map(thread, false) {
+ _thread = thread;
+ }
+
+ // Accessors
+ methodOop method() const { return _method; }
+ int bci() const { return _bci; }
+ intptr_t* frame_id() const { return _frame.id(); }
+ address frame_pc() const { return _frame.pc(); }
+
+ CodeBlob* cb() const { return _frame.cb(); }
+ nmethod* nm() const {
+ assert( cb() != NULL && cb()->is_nmethod(), "usage");
+ return (nmethod*) cb();
+ }
+
+ // Frame type
+ bool is_interpreted_frame() const { return _frame.is_interpreted_frame(); }
+ bool is_entry_frame() const { return _frame.is_entry_frame(); }
+
+ // Iteration
+ void next() {
+ // handle frames with inlining
+ if (_mode == compiled_mode && fill_in_compiled_inlined_sender()) return;
+
+ // handle general case
+ do {
+ _frame = _frame.sender(&_reg_map);
+ } while (!fill_from_frame());
+ }
+
+ bool at_end() const { return _mode == at_end_mode; }
+
+ // Implements security traversal. Skips depth no. of frame including
+ // special security frames and prefixed native methods
+ void security_get_caller_frame(int depth);
+
+ // Helper routine for JVM_LatestUserDefinedLoader -- needed for 1.4
+ // reflection implementation
+ void skip_reflection_related_frames();
+};
+
+class vframeStream : public vframeStreamCommon {
+ public:
+ // Constructors
+ vframeStream(JavaThread* thread, bool stop_at_java_call_stub = false)
+ : vframeStreamCommon(thread) {
+ _stop_at_java_call_stub = stop_at_java_call_stub;
+
+ if (!thread->has_last_Java_frame()) {
+ _mode = at_end_mode;
+ return;
+ }
+
+ _frame = _thread->last_frame();
+ while (!fill_from_frame()) {
+ _frame = _frame.sender(&_reg_map);
+ }
+ }
+
+ // top_frame may not be at safepoint, start with sender
+ vframeStream(JavaThread* thread, frame top_frame, bool stop_at_java_call_stub = false);
+};
+
+
+inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() {
+ if (_sender_decode_offset == DebugInformationRecorder::serialized_null) {
+ return false;
+ }
+ fill_from_compiled_frame(_sender_decode_offset);
+ return true;
+}
+
+
+inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
+ _mode = compiled_mode;
+
+ // Range check to detect ridiculous offsets.
+ if (decode_offset == DebugInformationRecorder::serialized_null ||
+ decode_offset < 0 ||
+ decode_offset >= nm()->scopes_data_size()) {
+ // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
+ // If we attempt to read nmethod::scopes_data at serialized_null (== 0),
+ // or if we read some at other crazy offset,
+ // we will decode garbage and make wild references into the heap,
+ // leading to crashes in product mode.
+ // (This isn't airtight, of course, since there are internal
+ // offsets which are also crazy.)
+#ifdef ASSERT
+ if (WizardMode) {
+ tty->print_cr("Error in fill_from_frame: pc_desc for "
+ INTPTR_FORMAT " not found or invalid at %d",
+ _frame.pc(), decode_offset);
+ nm()->print();
+ nm()->method()->print_codes();
+ nm()->print_code();
+ nm()->print_pcs();
+ }
+#endif
+ // Provide a cheap fallback in product mode. (See comment above.)
+ found_bad_method_frame();
+ fill_from_compiled_native_frame();
+ return;
+ }
+
+ // Decode first part of scopeDesc
+ DebugInfoReadStream buffer(nm(), decode_offset);
+ _sender_decode_offset = buffer.read_int();
+ _method = methodOop(buffer.read_oop());
+ _bci = buffer.read_bci();
+
+ assert(_method->is_method(), "checking type of decoded method");
+}
+
+// The native frames are handled specially. We do not rely on ScopeDesc info
+// since the pc might not be exact due to the _last_native_pc trick.
+inline void vframeStreamCommon::fill_from_compiled_native_frame() {
+ _mode = compiled_mode;
+ _sender_decode_offset = DebugInformationRecorder::serialized_null;
+ _method = nm()->method();
+ _bci = 0;
+}
+
+inline bool vframeStreamCommon::fill_from_frame() {
+ // Interpreted frame
+ if (_frame.is_interpreted_frame()) {
+ fill_from_interpreter_frame();
+ return true;
+ }
+
+ // Compiled frame
+
+ if (cb() != NULL && cb()->is_nmethod()) {
+ if (nm()->is_native_method()) {
+ // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick.
+ fill_from_compiled_native_frame();
+ } else {
+ PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc());
+ int decode_offset;
+ if (pc_desc == NULL) {
+ // Should not happen, but let fill_from_compiled_frame handle it.
+ decode_offset = DebugInformationRecorder::serialized_null;
+ } else {
+ decode_offset = pc_desc->scope_decode_offset();
+ }
+ fill_from_compiled_frame(decode_offset);
+ }
+ return true;
+ }
+
+ // End of stack?
+ if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) {
+ _mode = at_end_mode;
+ return true;
+ }
+
+ return false;
+}
+
+
+inline void vframeStreamCommon::fill_from_interpreter_frame() {
+ methodOop method = _frame.interpreter_frame_method();
+ intptr_t bcx = _frame.interpreter_frame_bcx();
+ int bci = method->validate_bci_from_bcx(bcx);
+ // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
+ if (bci < 0) {
+ found_bad_method_frame();
+ bci = 0; // pretend it's on the point of entering
+ }
+ _mode = interpreted_mode;
+ _method = method;
+ _bci = bci;
+}
diff --git a/src/share/vm/runtime/vframeArray.cpp b/src/share/vm/runtime/vframeArray.cpp
new file mode 100644
index 000000000..841306366
--- /dev/null
+++ b/src/share/vm/runtime/vframeArray.cpp
@@ -0,0 +1,585 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_vframeArray.cpp.incl"
+
+
+int vframeArrayElement:: bci(void) const { return (_bci == SynchronizationEntryBCI ? 0 : _bci); }
+
+void vframeArrayElement::free_monitors(JavaThread* jt) {
+ if (_monitors != NULL) {
+ MonitorChunk* chunk = _monitors;
+ _monitors = NULL;
+ jt->remove_monitor_chunk(chunk);
+ delete chunk;
+ }
+}
+
+void vframeArrayElement::fill_in(compiledVFrame* vf) {
+
+// Copy the information from the compiled vframe to the
+// interpreter frame we will be creating to replace vf
+
+ _method = vf->method();
+ _bci = vf->raw_bci();
+
+ int index;
+
+ // Get the monitors off-stack
+
+ GrowableArray<MonitorInfo*>* list = vf->monitors();
+ if (list->is_empty()) {
+ _monitors = NULL;
+ } else {
+
+ // Allocate monitor chunk
+ _monitors = new MonitorChunk(list->length());
+ vf->thread()->add_monitor_chunk(_monitors);
+
+ // Migrate the BasicLocks from the stack to the monitor chunk
+ for (index = 0; index < list->length(); index++) {
+ MonitorInfo* monitor = list->at(index);
+ assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
+ BasicObjectLock* dest = _monitors->at(index);
+ dest->set_obj(monitor->owner());
+ monitor->lock()->move_to(monitor->owner(), dest->lock());
+ }
+ }
+
+ // Convert the vframe locals and expressions to off stack
+ // values. Because we will not gc all oops can be converted to
+ // intptr_t (i.e. a stack slot) and we are fine. This is
+ // good since we are inside a HandleMark and the oops in our
+ // collection would go away between packing them here and
+ // unpacking them in unpack_on_stack.
+
+ // First the locals go off-stack
+
+ // FIXME this seems silly it creates a StackValueCollection
+ // in order to get the size to then copy them and
+ // convert the types to intptr_t size slots. Seems like it
+ // could do it in place... Still uses less memory than the
+ // old way though
+
+ StackValueCollection *locs = vf->locals();
+ _locals = new StackValueCollection(locs->size());
+ for(index = 0; index < locs->size(); index++) {
+ StackValue* value = locs->at(index);
+ switch(value->type()) {
+ case T_OBJECT:
+ // preserve object type
+ _locals->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
+ break;
+ case T_CONFLICT:
+ // A dead local. Will be initialized to null/zero.
+ _locals->add( new StackValue());
+ break;
+ case T_INT:
+ _locals->add( new StackValue(value->get_int()));
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+ }
+
+ // Now the expressions off-stack
+ // Same silliness as above
+
+ StackValueCollection *exprs = vf->expressions();
+ _expressions = new StackValueCollection(exprs->size());
+ for(index = 0; index < exprs->size(); index++) {
+ StackValue* value = exprs->at(index);
+ switch(value->type()) {
+ case T_OBJECT:
+ // preserve object type
+ _expressions->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
+ break;
+ case T_CONFLICT:
+ // A dead stack element. Will be initialized to null/zero.
+ // This can occur when the compiler emits a state in which stack
+ // elements are known to be dead (because of an imminent exception).
+ _expressions->add( new StackValue());
+ break;
+ case T_INT:
+ _expressions->add( new StackValue(value->get_int()));
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+ }
+}
+
+int unpack_counter = 0;
+
+void vframeArrayElement::unpack_on_stack(int callee_parameters,
+ int callee_locals,
+ frame* caller,
+ bool is_top_frame,
+ int exec_mode) {
+ JavaThread* thread = (JavaThread*) Thread::current();
+
+ // Look at bci and decide on bcp and continuation pc
+ address bcp;
+ // C++ interpreter doesn't need a pc since it will figure out what to do when it
+ // begins execution
+ address pc;
+ bool use_next_mdp; // true if we should use the mdp associated with the next bci
+ // rather than the one associated with bcp
+ if (raw_bci() == SynchronizationEntryBCI) {
+ // We are deoptimizing while hanging in prologue code for synchronized method
+ bcp = method()->bcp_from(0); // first byte code
+ pc = Interpreter::deopt_entry(vtos, 0); // step = 0 since we don't skip current bytecode
+ use_next_mdp = false;
+ } else {
+ bcp = method()->bcp_from(bci());
+ pc = Interpreter::continuation_for(method(), bcp, callee_parameters, is_top_frame, use_next_mdp);
+ }
+ assert(Bytecodes::is_defined(*bcp), "must be a valid bytecode");
+
+ // Monitorenter and pending exceptions:
+ //
+ // For Compiler2, there should be no pending exception when deoptimizing at monitorenter
+ // because there is no safepoint at the null pointer check (it is either handled explicitly
+ // or prior to the monitorenter) and asynchronous exceptions are not made "pending" by the
+ // runtime interface for the slow case (see JRT_ENTRY_FOR_MONITORENTER). If an asynchronous
+ // exception was processed, the bytecode pointer would have to be extended one bytecode beyond
+ // the monitorenter to place it in the proper exception range.
+ //
+ // For Compiler1, deoptimization can occur while throwing a NullPointerException at monitorenter,
+ // in which case bcp should point to the monitorenter since it is within the exception's range.
+
+ assert(*bcp != Bytecodes::_monitorenter || is_top_frame, "a _monitorenter must be a top frame");
+ // TIERED Must know the compiler of the deoptee QQQ
+ COMPILER2_PRESENT(guarantee(*bcp != Bytecodes::_monitorenter || exec_mode != Deoptimization::Unpack_exception,
+ "shouldn't get exception during monitorenter");)
+
+ int popframe_preserved_args_size_in_bytes = 0;
+ int popframe_preserved_args_size_in_words = 0;
+ if (is_top_frame) {
+ JvmtiThreadState *state = thread->jvmti_thread_state();
+ if (JvmtiExport::can_pop_frame() &&
+ (thread->has_pending_popframe() || thread->popframe_forcing_deopt_reexecution())) {
+ if (thread->has_pending_popframe()) {
+ // Pop top frame after deoptimization
+#ifndef CC_INTERP
+ pc = Interpreter::remove_activation_preserving_args_entry();
+#else
+ // Do an uncommon trap type entry. c++ interpreter will know
+ // to pop frame and preserve the args
+ pc = Interpreter::deopt_entry(vtos, 0);
+ use_next_mdp = false;
+#endif
+ } else {
+ // Reexecute invoke in top frame
+ pc = Interpreter::deopt_entry(vtos, 0);
+ use_next_mdp = false;
+ popframe_preserved_args_size_in_bytes = in_bytes(thread->popframe_preserved_args_size());
+ // Note: the PopFrame-related extension of the expression stack size is done in
+ // Deoptimization::fetch_unroll_info_helper
+ popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words());
+ }
+ } else if (JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) {
+ // Force early return from top frame after deoptimization
+#ifndef CC_INTERP
+ pc = Interpreter::remove_activation_early_entry(state->earlyret_tos());
+#else
+ // TBD: Need to implement ForceEarlyReturn for CC_INTERP (ia64)
+#endif
+ } else {
+ // Possibly override the previous pc computation of the top (youngest) frame
+ switch (exec_mode) {
+ case Deoptimization::Unpack_deopt:
+ // use what we've got
+ break;
+ case Deoptimization::Unpack_exception:
+ // exception is pending
+ pc = SharedRuntime::raw_exception_handler_for_return_address(pc);
+ // [phh] We're going to end up in some handler or other, so it doesn't
+ // matter what mdp we point to. See exception_handler_for_exception()
+ // in interpreterRuntime.cpp.
+ break;
+ case Deoptimization::Unpack_uncommon_trap:
+ case Deoptimization::Unpack_reexecute:
+ // redo last byte code
+ pc = Interpreter::deopt_entry(vtos, 0);
+ use_next_mdp = false;
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+ }
+ }
+
+ // Setup the interpreter frame
+
+ assert(method() != NULL, "method must exist");
+ int temps = expressions()->size();
+
+ int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
+
+ Interpreter::layout_activation(method(),
+ temps + callee_parameters,
+ popframe_preserved_args_size_in_words,
+ locks,
+ callee_parameters,
+ callee_locals,
+ caller,
+ iframe(),
+ is_top_frame);
+
+ // Update the pc in the frame object and overwrite the temporary pc
+ // we placed in the skeletal frame now that we finally know the
+ // exact interpreter address we should use.
+
+ _frame.patch_pc(thread, pc);
+
+ assert (!method()->is_synchronized() || locks > 0, "synchronized methods must have monitors");
+
+ BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin();
+ for (int index = 0; index < locks; index++) {
+ top = iframe()->previous_monitor_in_interpreter_frame(top);
+ BasicObjectLock* src = _monitors->at(index);
+ top->set_obj(src->obj());
+ src->lock()->move_to(src->obj(), top->lock());
+ }
+ if (ProfileInterpreter) {
+ iframe()->interpreter_frame_set_mdx(0); // clear out the mdp.
+ }
+ iframe()->interpreter_frame_set_bcx((intptr_t)bcp); // cannot use bcp because frame is not initialized yet
+ if (ProfileInterpreter) {
+ methodDataOop mdo = method()->method_data();
+ if (mdo != NULL) {
+ int bci = iframe()->interpreter_frame_bci();
+ if (use_next_mdp) ++bci;
+ address mdp = mdo->bci_to_dp(bci);
+ iframe()->interpreter_frame_set_mdp(mdp);
+ }
+ }
+
+ // Unpack expression stack
+ // If this is an intermediate frame (i.e. not top frame) then this
+ // only unpacks the part of the expression stack not used by callee
+ // as parameters. The callee parameters are unpacked as part of the
+ // callee locals.
+ int i;
+ for(i = 0; i < expressions()->size(); i++) {
+ StackValue *value = expressions()->at(i);
+ intptr_t* addr = iframe()->interpreter_frame_expression_stack_at(i);
+ switch(value->type()) {
+ case T_INT:
+ *addr = value->get_int();
+ break;
+ case T_OBJECT:
+ *addr = value->get_int(T_OBJECT);
+ break;
+ case T_CONFLICT:
+ // A dead stack slot. Initialize to null in case it is an oop.
+ *addr = NULL_WORD;
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+ if (TaggedStackInterpreter) {
+ // Write tag to the stack
+ iframe()->interpreter_frame_set_expression_stack_tag(i,
+ frame::tag_for_basic_type(value->type()));
+ }
+ }
+
+
+ // Unpack the locals
+ for(i = 0; i < locals()->size(); i++) {
+ StackValue *value = locals()->at(i);
+ intptr_t* addr = iframe()->interpreter_frame_local_at(i);
+ switch(value->type()) {
+ case T_INT:
+ *addr = value->get_int();
+ break;
+ case T_OBJECT:
+ *addr = value->get_int(T_OBJECT);
+ break;
+ case T_CONFLICT:
+ // A dead location. If it is an oop then we need a NULL to prevent GC from following it
+ *addr = NULL_WORD;
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+ if (TaggedStackInterpreter) {
+ // Write tag to stack
+ iframe()->interpreter_frame_set_local_tag(i,
+ frame::tag_for_basic_type(value->type()));
+ }
+ }
+
+ if (is_top_frame && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
+ // An interpreted frame was popped but it returns to a deoptimized
+ // frame. The incoming arguments to the interpreted activation
+ // were preserved in thread-local storage by the
+ // remove_activation_preserving_args_entry in the interpreter; now
+ // we put them back into the just-unpacked interpreter frame.
+ // Note that this assumes that the locals arena grows toward lower
+ // addresses.
+ if (popframe_preserved_args_size_in_words != 0) {
+ void* saved_args = thread->popframe_preserved_args();
+ assert(saved_args != NULL, "must have been saved by interpreter");
+#ifdef ASSERT
+ int stack_words = Interpreter::stackElementWords();
+ assert(popframe_preserved_args_size_in_words <=
+ iframe()->interpreter_frame_expression_stack_size()*stack_words,
+ "expression stack size should have been extended");
+#endif // ASSERT
+ int top_element = iframe()->interpreter_frame_expression_stack_size()-1;
+ intptr_t* base;
+ if (frame::interpreter_frame_expression_stack_direction() < 0) {
+ base = iframe()->interpreter_frame_expression_stack_at(top_element);
+ } else {
+ base = iframe()->interpreter_frame_expression_stack();
+ }
+ Copy::conjoint_bytes(saved_args,
+ base,
+ popframe_preserved_args_size_in_bytes);
+ thread->popframe_free_preserved_args();
+ }
+ }
+
+#ifndef PRODUCT
+ if (TraceDeoptimization && Verbose) {
+ ttyLocker ttyl;
+ tty->print_cr("[%d Interpreted Frame]", ++unpack_counter);
+ iframe()->print_on(tty);
+ RegisterMap map(thread);
+ vframe* f = vframe::new_vframe(iframe(), &map, thread);
+ f->print();
+ iframe()->interpreter_frame_print_on(tty);
+
+ tty->print_cr("locals size %d", locals()->size());
+ tty->print_cr("expression size %d", expressions()->size());
+
+ method()->print_value();
+ tty->cr();
+ // method()->print_codes();
+ } else if (TraceDeoptimization) {
+ tty->print(" ");
+ method()->print_value();
+ Bytecodes::Code code = Bytecodes::java_code_at(bcp);
+ int bci = method()->bci_from(bcp);
+ tty->print(" - %s", Bytecodes::name(code));
+ tty->print(" @ bci %d ", bci);
+ tty->print_cr("sp = " PTR_FORMAT, iframe()->sp());
+ }
+#endif // PRODUCT
+
+ // The expression stack and locals are in the resource area don't leave
+ // a dangling pointer in the vframeArray we leave around for debug
+ // purposes
+
+ _locals = _expressions = NULL;
+
+}
+
+int vframeArrayElement::on_stack_size(int callee_parameters,
+ int callee_locals,
+ bool is_top_frame,
+ int popframe_extra_stack_expression_els) const {
+ assert(method()->max_locals() == locals()->size(), "just checking");
+ int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
+ int temps = expressions()->size();
+ return Interpreter::size_activation(method(),
+ temps + callee_parameters,
+ popframe_extra_stack_expression_els,
+ locks,
+ callee_parameters,
+ callee_locals,
+ is_top_frame);
+}
+
+
+
+vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
+ RegisterMap *reg_map, frame sender, frame caller, frame self) {
+
+ // Allocate the vframeArray
+ vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part
+ sizeof(vframeArrayElement) * (chunk->length() - 1), // variable part
+ "vframeArray::allocate");
+ result->_frames = chunk->length();
+ result->_owner_thread = thread;
+ result->_sender = sender;
+ result->_caller = caller;
+ result->_original = self;
+ result->set_unroll_block(NULL); // initialize it
+ result->fill_in(thread, frame_size, chunk, reg_map);
+ return result;
+}
+
+void vframeArray::fill_in(JavaThread* thread,
+ int frame_size,
+ GrowableArray<compiledVFrame*>* chunk,
+ const RegisterMap *reg_map) {
+ // Set owner first, it is used when adding monitor chunks
+
+ _frame_size = frame_size;
+ for(int i = 0; i < chunk->length(); i++) {
+ element(i)->fill_in(chunk->at(i));
+ }
+
+ // Copy registers for callee-saved registers
+ if (reg_map != NULL) {
+ for(int i = 0; i < RegisterMap::reg_count; i++) {
+#ifdef AMD64
+ // The register map has one entry for every int (32-bit value), so
+ // 64-bit physical registers have two entries in the map, one for
+ // each half. Ignore the high halves of 64-bit registers, just like
+ // frame::oopmapreg_to_location does.
+ //
+ // [phh] FIXME: this is a temporary hack! This code *should* work
+ // correctly w/o this hack, possibly by changing RegisterMap::pd_location
+ // in frame_amd64.cpp and the values of the phantom high half registers
+ // in amd64.ad.
+ // if (VMReg::Name(i) < SharedInfo::stack0 && is_even(i)) {
+ intptr_t* src = (intptr_t*) reg_map->location(VMRegImpl::as_VMReg(i));
+ _callee_registers[i] = src != NULL ? *src : NULL_WORD;
+ // } else {
+ // jint* src = (jint*) reg_map->location(VMReg::Name(i));
+ // _callee_registers[i] = src != NULL ? *src : NULL_WORD;
+ // }
+#else
+ jint* src = (jint*) reg_map->location(VMRegImpl::as_VMReg(i));
+ _callee_registers[i] = src != NULL ? *src : NULL_WORD;
+#endif
+ if (src == NULL) {
+ set_location_valid(i, false);
+ } else {
+ set_location_valid(i, true);
+ jint* dst = (jint*) register_location(i);
+ *dst = *src;
+ }
+ }
+ }
+}
+
+void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode) {
+ // stack picture
+ // unpack_frame
+ // [new interpreter frames ] (frames are skeletal but walkable)
+ // caller_frame
+ //
+ // This routine fills in the missing data for the skeletal interpreter frames
+ // in the above picture.
+
+ // Find the skeletal interpreter frames to unpack into
+ RegisterMap map(JavaThread::current(), false);
+ // Get the youngest frame we will unpack (last to be unpacked)
+ frame me = unpack_frame.sender(&map);
+ int index;
+ for (index = 0; index < frames(); index++ ) {
+ *element(index)->iframe() = me;
+ // Get the caller frame (possibly skeletal)
+ me = me.sender(&map);
+ }
+
+ frame caller_frame = me;
+
+ // Do the unpacking of interpreter frames; the frame at index 0 represents the top activation, so it has no callee
+
+ // Unpack the frames from the oldest (frames() -1) to the youngest (0)
+
+ for (index = frames() - 1; index >= 0 ; index--) {
+ int callee_parameters = index == 0 ? 0 : element(index-1)->method()->size_of_parameters();
+ int callee_locals = index == 0 ? 0 : element(index-1)->method()->max_locals();
+ element(index)->unpack_on_stack(callee_parameters,
+ callee_locals,
+ &caller_frame,
+ index == 0,
+ exec_mode);
+ if (index == frames() - 1) {
+ Deoptimization::unwind_callee_save_values(element(index)->iframe(), this);
+ }
+ caller_frame = *element(index)->iframe();
+ }
+
+
+ deallocate_monitor_chunks();
+}
+
+void vframeArray::deallocate_monitor_chunks() {
+ JavaThread* jt = JavaThread::current();
+ for (int index = 0; index < frames(); index++ ) {
+ element(index)->free_monitors(jt);
+ }
+}
+
+#ifndef PRODUCT
+
+bool vframeArray::structural_compare(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk) {
+ if (owner_thread() != thread) return false;
+ int index = 0;
+#if 0 // FIXME can't do this comparison
+
+ // Compare only within vframe array.
+ for (deoptimizedVFrame* vf = deoptimizedVFrame::cast(vframe_at(first_index())); vf; vf = vf->deoptimized_sender_or_null()) {
+ if (index >= chunk->length() || !vf->structural_compare(chunk->at(index))) return false;
+ index++;
+ }
+ if (index != chunk->length()) return false;
+#endif
+
+ return true;
+}
+
+#endif
+
+address vframeArray::register_location(int i) const {
+ assert(0 <= i && i < RegisterMap::reg_count, "index out of bounds");
+ return (address) & _callee_registers[i];
+}
+
+
+#ifndef PRODUCT
+
+// Printing
+
+// Note: we cannot have print_on as const, as we allocate inside the method
+void vframeArray::print_on_2(outputStream* st) {
+ st->print_cr(" - sp: " INTPTR_FORMAT, sp());
+ st->print(" - thread: ");
+ Thread::current()->print();
+ st->print_cr(" - frame size: %d", frame_size());
+ for (int index = 0; index < frames() ; index++ ) {
+ element(index)->print(st);
+ }
+}
+
+void vframeArrayElement::print(outputStream* st) {
+ st->print_cr(" - interpreter_frame -> sp: ", INTPTR_FORMAT, iframe()->sp());
+}
+
+void vframeArray::print_value_on(outputStream* st) const {
+ st->print_cr("vframeArray [%d] ", frames());
+}
+
+
+#endif
diff --git a/src/share/vm/runtime/vframeArray.hpp b/src/share/vm/runtime/vframeArray.hpp
new file mode 100644
index 000000000..767b98907
--- /dev/null
+++ b/src/share/vm/runtime/vframeArray.hpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// A vframeArray is an array used for momentarily storing off stack Java method activations
+// during deoptimization. Essentially it is an array of vframes where each vframe
+// data is stored off stack. This structure will never exist across a safepoint so
+// there is no need to gc any oops that are stored in the structure.
+
+
+class LocalsClosure;
+class ExpressionStackClosure;
+class MonitorStackClosure;
+class MonitorArrayElement;
+class StackValueCollection;
+
+// A vframeArrayElement is an element of a vframeArray. Each element
+// represent an interpreter frame which will eventually be created.
+
+class vframeArrayElement : public _ValueObj {
+ private:
+
+ frame _frame; // the interpreter frame we will unpack into
+ int _bci; // raw bci for this vframe
+ methodOop _method; // the method for this vframe
+ MonitorChunk* _monitors; // active monitors for this vframe
+ StackValueCollection* _locals;
+ StackValueCollection* _expressions;
+
+ public:
+
+ frame* iframe(void) { return &_frame; }
+
+ int bci(void) const;
+
+ int raw_bci(void) const { return _bci; }
+
+ methodOop method(void) const { return _method; }
+
+ MonitorChunk* monitors(void) const { return _monitors; }
+
+ void free_monitors(JavaThread* jt);
+
+ StackValueCollection* locals(void) const { return _locals; }
+
+ StackValueCollection* expressions(void) const { return _expressions; }
+
+ void fill_in(compiledVFrame* vf);
+
+ // Formerly part of deoptimizedVFrame
+
+
+ // Returns the on stack word size for this frame
+ // callee_parameters is the number of callee locals residing inside this frame
+ int on_stack_size(int callee_parameters,
+ int callee_locals,
+ bool is_top_frame,
+ int popframe_extra_stack_expression_els) const;
+
+ // Unpacks the element to skeletal interpreter frame
+ void unpack_on_stack(int callee_parameters,
+ int callee_locals,
+ frame* caller,
+ bool is_top_frame,
+ int exec_mode);
+
+#ifndef PRODUCT
+ void print(outputStream* st);
+#endif /* PRODUCT */
+};
+
+// this can be a ResourceObj if we don't save the last one...
+// but it does make debugging easier even if we can't look
+// at the data in each vframeElement
+
+class vframeArray: public CHeapObj {
+ private:
+
+
+ // Here is what a vframeArray looks like in memory
+
+ /*
+ fixed part
+ description of the original frame
+ _frames - number of vframes in this array
+ adapter info
+ callee register save area
+ variable part
+ vframeArrayElement [ 0 ]
+ ...
+ vframeArrayElement [_frames - 1]
+
+ */
+
+ JavaThread* _owner_thread;
+ vframeArray* _next;
+ frame _original; // the original frame of the deoptee
+ frame _caller; // caller of root frame in vframeArray
+ frame _sender;
+
+ Deoptimization::UnrollBlock* _unroll_block;
+ int _frame_size;
+
+ int _frames; // number of javavframes in the array (does not count any adapter)
+
+ intptr_t _callee_registers[RegisterMap::reg_count];
+ unsigned char _valid[RegisterMap::reg_count];
+
+ vframeArrayElement _elements[1]; // First variable section.
+
+ void fill_in_element(int index, compiledVFrame* vf);
+
+ bool is_location_valid(int i) const { return _valid[i] != 0; }
+ void set_location_valid(int i, bool valid) { _valid[i] = valid; }
+
+ public:
+
+
+ // Tells whether index is within bounds.
+ bool is_within_bounds(int index) const { return 0 <= index && index < frames(); }
+
+ // Accessores for instance variable
+ int frames() const { return _frames; }
+
+ static vframeArray* allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
+ RegisterMap* reg_map, frame sender, frame caller, frame self);
+
+
+ vframeArrayElement* element(int index) { assert(is_within_bounds(index), "Bad index"); return &_elements[index]; }
+
+ // Allocates a new vframe in the array and fills the array with vframe information in chunk
+ void fill_in(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk, const RegisterMap *reg_map);
+
+ // Returns the owner of this vframeArray
+ JavaThread* owner_thread() const { return _owner_thread; }
+
+ // Accessors for next
+ vframeArray* next() const { return _next; }
+ void set_next(vframeArray* value) { _next = value; }
+
+ // Accessors for sp
+ intptr_t* sp() const { return _original.sp(); }
+
+ intptr_t* unextended_sp() const { return _original.unextended_sp(); }
+
+ address original_pc() const { return _original.pc(); }
+
+ frame original() const { return _original; }
+
+ frame caller() const { return _caller; }
+
+ frame sender() const { return _sender; }
+
+ // Accessors for unroll block
+ Deoptimization::UnrollBlock* unroll_block() const { return _unroll_block; }
+ void set_unroll_block(Deoptimization::UnrollBlock* block) { _unroll_block = block; }
+
+ // Returns the size of the frame that got deoptimized
+ int frame_size() const { return _frame_size; }
+
+ // Unpack the array on the stack passed in stack interval
+ void unpack_to_stack(frame &unpack_frame, int exec_mode);
+
+ // Deallocates monitor chunks allocated during deoptimization.
+ // This should be called when the array is not used anymore.
+ void deallocate_monitor_chunks();
+
+
+
+ // Accessor for register map
+ address register_location(int i) const;
+
+ void print_on_2(outputStream* st) PRODUCT_RETURN;
+ void print_value_on(outputStream* st) const PRODUCT_RETURN;
+
+#ifndef PRODUCT
+ // Comparing
+ bool structural_compare(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk);
+#endif
+
+};
diff --git a/src/share/vm/runtime/vframe_hp.cpp b/src/share/vm/runtime/vframe_hp.cpp
new file mode 100644
index 000000000..3399e23a5
--- /dev/null
+++ b/src/share/vm/runtime/vframe_hp.cpp
@@ -0,0 +1,337 @@
+/*
+ * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_vframe_hp.cpp.incl"
+
+
+// ------------- compiledVFrame --------------
+
+StackValueCollection* compiledVFrame::locals() const {
+ // Natives has no scope
+ if (scope() == NULL) return new StackValueCollection(0);
+ GrowableArray<ScopeValue*>* scv_list = scope()->locals();
+ if (scv_list == NULL) return new StackValueCollection(0);
+
+ // scv_list is the list of ScopeValues describing the JVM stack state.
+ // There is one scv_list entry for every JVM stack state in use.
+ int length = scv_list->length();
+ StackValueCollection* result = new StackValueCollection(length);
+ // In rare instances set_locals may have occurred in which case
+ // there are local values that are not described by the ScopeValue anymore
+ GrowableArray<jvmtiDeferredLocalVariable*>* deferred = NULL;
+ GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread()->deferred_locals();
+ if (list != NULL ) {
+ // In real life this never happens or is typically a single element search
+ for (int i = 0; i < list->length(); i++) {
+ if (list->at(i)->matches((vframe*)this)) {
+ deferred = list->at(i)->locals();
+ break;
+ }
+ }
+ }
+
+ for( int i = 0; i < length; i++ ) {
+ result->add( create_stack_value(scv_list->at(i)) );
+ }
+
+ // Replace specified locals with any deferred writes that are present
+ if (deferred != NULL) {
+ for ( int l = 0; l < deferred->length() ; l ++) {
+ jvmtiDeferredLocalVariable* val = deferred->at(l);
+ switch (val->type()) {
+ case T_BOOLEAN:
+ result->set_int_at(val->index(), val->value().z);
+ break;
+ case T_CHAR:
+ result->set_int_at(val->index(), val->value().c);
+ break;
+ case T_FLOAT:
+ result->set_float_at(val->index(), val->value().f);
+ break;
+ case T_DOUBLE:
+ result->set_double_at(val->index(), val->value().d);
+ break;
+ case T_BYTE:
+ result->set_int_at(val->index(), val->value().b);
+ break;
+ case T_SHORT:
+ result->set_int_at(val->index(), val->value().s);
+ break;
+ case T_INT:
+ result->set_int_at(val->index(), val->value().i);
+ break;
+ case T_LONG:
+ result->set_long_at(val->index(), val->value().j);
+ break;
+ case T_OBJECT:
+ {
+ Handle obj((oop)val->value().l);
+ result->set_obj_at(val->index(), obj);
+ }
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+ }
+ }
+
+ return result;
+}
+
+
+void compiledVFrame::set_locals(StackValueCollection* values) const {
+
+ fatal("Should use update_local for each local update");
+}
+
+void compiledVFrame::update_local(BasicType type, int index, jvalue value) {
+
+#ifdef ASSERT
+
+ assert(fr().is_deoptimized_frame(), "frame must be scheduled for deoptimization");
+#endif /* ASSERT */
+ GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred = thread()->deferred_locals();
+ if (deferred != NULL ) {
+ // See if this vframe has already had locals with deferred writes
+ int f;
+ for ( f = 0 ; f < deferred->length() ; f++ ) {
+ if (deferred->at(f)->matches(this)) {
+ // Matching, vframe now see if the local already had deferred write
+ GrowableArray<jvmtiDeferredLocalVariable*>* locals = deferred->at(f)->locals();
+ int l;
+ for (l = 0 ; l < locals->length() ; l++ ) {
+ if (locals->at(l)->index() == index) {
+ locals->at(l)->set_value(value);
+ return;
+ }
+ }
+ // No matching local already present. Push a new value onto the deferred collection
+ locals->push(new jvmtiDeferredLocalVariable(index, type, value));
+ return;
+ }
+ }
+ // No matching vframe must push a new vframe
+ } else {
+ // No deferred updates pending for this thread.
+ // allocate in C heap
+ deferred = new(ResourceObj::C_HEAP) GrowableArray<jvmtiDeferredLocalVariableSet*> (1, true);
+ thread()->set_deferred_locals(deferred);
+ }
+ deferred->push(new jvmtiDeferredLocalVariableSet(method(), bci(), fr().id()));
+ assert(deferred->top()->id() == fr().id(), "Huh? Must match");
+ deferred->top()->set_local_at(index, type, value);
+}
+
+StackValueCollection* compiledVFrame::expressions() const {
+ // Natives has no scope
+ if (scope() == NULL) return new StackValueCollection(0);
+ GrowableArray<ScopeValue*>* scv_list = scope()->expressions();
+ if (scv_list == NULL) return new StackValueCollection(0);
+
+ // scv_list is the list of ScopeValues describing the JVM stack state.
+ // There is one scv_list entry for every JVM stack state in use.
+ int length = scv_list->length();
+ StackValueCollection* result = new StackValueCollection(length);
+ for( int i = 0; i < length; i++ )
+ result->add( create_stack_value(scv_list->at(i)) );
+
+ return result;
+}
+
+
+// The implementation of the following two methods was factorized into the
+// class StackValue because it is also used from within deoptimization.cpp for
+// rematerialization and relocking of non-escaping objects.
+
+StackValue *compiledVFrame::create_stack_value(ScopeValue *sv) const {
+ return StackValue::create_stack_value(&_fr, register_map(), sv);
+}
+
+BasicLock* compiledVFrame::resolve_monitor_lock(Location location) const {
+ return StackValue::resolve_monitor_lock(&_fr, location);
+}
+
+
+GrowableArray<MonitorInfo*>* compiledVFrame::monitors() const {
+ // Natives has no scope
+ if (scope() == NULL) {
+ nmethod* nm = code();
+ methodOop method = nm->method();
+ assert(method->is_native(), "");
+ if (!method->is_synchronized()) {
+ return new GrowableArray<MonitorInfo*>(0);
+ }
+ // This monitor is really only needed for UseBiasedLocking, but
+ // return it in all cases for now as it might be useful for stack
+ // traces and tools as well
+ GrowableArray<MonitorInfo*> *monitors = new GrowableArray<MonitorInfo*>(1);
+ // Casting away const
+ frame& fr = (frame&) _fr;
+ MonitorInfo* info = new MonitorInfo(fr.compiled_synchronized_native_monitor_owner(nm),
+ fr.compiled_synchronized_native_monitor(nm));
+ monitors->push(info);
+ return monitors;
+ }
+ GrowableArray<MonitorValue*>* monitors = scope()->monitors();
+ if (monitors == NULL) {
+ return new GrowableArray<MonitorInfo*>(0);
+ }
+ GrowableArray<MonitorInfo*>* result = new GrowableArray<MonitorInfo*>(monitors->length());
+ for (int index = 0; index < monitors->length(); index++) {
+ MonitorValue* mv = monitors->at(index);
+ StackValue *owner_sv = create_stack_value(mv->owner()); // it is an oop
+ result->push(new MonitorInfo(owner_sv->get_obj()(), resolve_monitor_lock(mv->basic_lock())));
+ }
+ return result;
+}
+
+
+compiledVFrame::compiledVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread, nmethod* nm)
+: javaVFrame(fr, reg_map, thread) {
+ _scope = NULL;
+ // Compiled method (native stub or Java code)
+ // native wrappers have no scope data, it is implied
+ if (!nm->is_native_method()) {
+ _scope = nm->scope_desc_at(_fr.pc());
+ }
+}
+
+compiledVFrame::compiledVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread, ScopeDesc* scope)
+: javaVFrame(fr, reg_map, thread) {
+ _scope = scope;
+ guarantee(_scope != NULL, "scope must be present");
+}
+
+
+bool compiledVFrame::is_top() const {
+ // FIX IT: Remove this when new native stubs are in place
+ if (scope() == NULL) return true;
+ return scope()->is_top();
+}
+
+
+nmethod* compiledVFrame::code() const {
+ return CodeCache::find_nmethod(_fr.pc());
+}
+
+
+methodOop compiledVFrame::method() const {
+ if (scope() == NULL) {
+ // native nmethods have no scope the method is implied
+ nmethod* nm = code();
+ assert(nm->is_native_method(), "must be native");
+ return nm->method();
+ }
+ return scope()->method()();
+}
+
+
+int compiledVFrame::bci() const {
+ int raw = raw_bci();
+ return raw == SynchronizationEntryBCI ? 0 : raw;
+}
+
+
+int compiledVFrame::raw_bci() const {
+ if (scope() == NULL) {
+ // native nmethods have no scope the method/bci is implied
+ nmethod* nm = code();
+ assert(nm->is_native_method(), "must be native");
+ return 0;
+ }
+ return scope()->bci();
+}
+
+
+vframe* compiledVFrame::sender() const {
+ const frame f = fr();
+ if (scope() == NULL) {
+ // native nmethods have no scope the method/bci is implied
+ nmethod* nm = code();
+ assert(nm->is_native_method(), "must be native");
+ return vframe::sender();
+ } else {
+ return scope()->is_top()
+ ? vframe::sender()
+ : new compiledVFrame(&f, register_map(), thread(), scope()->sender());
+ }
+}
+
+jvmtiDeferredLocalVariableSet::jvmtiDeferredLocalVariableSet(methodOop method, int bci, intptr_t* id) {
+ _method = method;
+ _bci = bci;
+ _id = id;
+ // Alway will need at least one, must be on C heap
+ _locals = new(ResourceObj::C_HEAP) GrowableArray<jvmtiDeferredLocalVariable*> (1, true);
+}
+
+jvmtiDeferredLocalVariableSet::~jvmtiDeferredLocalVariableSet() {
+ for (int i = 0; i < _locals->length() ; i++ ) {
+ delete _locals->at(i);
+ }
+ // Free growableArray and c heap for elements
+ delete _locals;
+}
+
+bool jvmtiDeferredLocalVariableSet::matches(vframe* vf) {
+ if (!vf->is_compiled_frame()) return false;
+ compiledVFrame* cvf = (compiledVFrame*)vf;
+ return cvf->fr().id() == id() && cvf->method() == method() && cvf->bci() == bci();
+}
+
+void jvmtiDeferredLocalVariableSet::set_local_at(int idx, BasicType type, jvalue val) {
+ int i;
+ for ( i = 0 ; i < locals()->length() ; i++ ) {
+ if ( locals()->at(i)->index() == idx) {
+ assert(locals()->at(i)->type() == type, "Wrong type");
+ locals()->at(i)->set_value(val);
+ return;
+ }
+ }
+ locals()->push(new jvmtiDeferredLocalVariable(idx, type, val));
+}
+
+void jvmtiDeferredLocalVariableSet::oops_do(OopClosure* f) {
+
+ f->do_oop((oop*) &_method);
+ for ( int i = 0; i < locals()->length(); i++ ) {
+ if ( locals()->at(i)->type() == T_OBJECT) {
+ f->do_oop(locals()->at(i)->oop_addr());
+ }
+ }
+}
+
+jvmtiDeferredLocalVariable::jvmtiDeferredLocalVariable(int index, BasicType type, jvalue value) {
+ _index = index;
+ _type = type;
+ _value = value;
+}
+
+
+#ifndef PRODUCT
+void compiledVFrame::verify() const {
+ Unimplemented();
+}
+#endif // PRODUCT
diff --git a/src/share/vm/runtime/vframe_hp.hpp b/src/share/vm/runtime/vframe_hp.hpp
new file mode 100644
index 000000000..a0dd0be2f
--- /dev/null
+++ b/src/share/vm/runtime/vframe_hp.hpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class compiledVFrame: public javaVFrame {
+ public:
+ // JVM state
+ methodOop method() const;
+ int bci() const;
+ StackValueCollection* locals() const;
+ StackValueCollection* expressions() const;
+ GrowableArray<MonitorInfo*>* monitors() const;
+
+ void set_locals(StackValueCollection* values) const;
+
+ // Virtuals defined in vframe
+ bool is_compiled_frame() const { return true; }
+ vframe* sender() const;
+ bool is_top() const;
+
+ // Casting
+ static compiledVFrame* cast(vframe* vf) {
+ assert(vf == NULL || vf->is_compiled_frame(), "must be compiled frame");
+ return (compiledVFrame*) vf;
+ }
+
+ public:
+ // Constructors
+ compiledVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread, nmethod* nm);
+
+ // Update a local in a compiled frame. Update happens when deopt occurs
+ void update_local(BasicType type, int index, jvalue value);
+
+ // Returns the active nmethod
+ nmethod* code() const;
+
+ // Returns the scopeDesc
+ ScopeDesc* scope() const { return _scope; }
+
+ // Returns SynchronizationEntryBCI or bci() (used for synchronization)
+ int raw_bci() const;
+
+ protected:
+ ScopeDesc* _scope;
+
+
+ //StackValue resolve(ScopeValue* sv) const;
+ BasicLock* resolve_monitor_lock(Location location) const;
+ StackValue *create_stack_value(ScopeValue *sv) const;
+
+ private:
+ compiledVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread, ScopeDesc* scope);
+
+#ifndef PRODUCT
+ public:
+ void verify() const;
+#endif
+};
+
+// In order to implement set_locals for compiled vframes we must
+// store updated locals in a data structure that contains enough
+// information to recognize equality with a vframe and to store
+// any updated locals.
+
+class jvmtiDeferredLocalVariable;
+class jvmtiDeferredLocalVariableSet : public CHeapObj {
+private:
+
+ methodOop _method; // must be GC'd
+ int _bci;
+ intptr_t* _id;
+ GrowableArray<jvmtiDeferredLocalVariable*>* _locals;
+
+ public:
+ // JVM state
+ methodOop method() const { return _method; }
+ int bci() const { return _bci; }
+ intptr_t* id() const { return _id; }
+ GrowableArray<jvmtiDeferredLocalVariable*>* locals() const { return _locals; }
+ void set_local_at(int idx, BasicType typ, jvalue val);
+
+ // Does the vframe match this jvmtiDeferredLocalVariableSet
+ bool matches(vframe* vf);
+ // GC
+ void oops_do(OopClosure* f);
+
+ // constructor
+ jvmtiDeferredLocalVariableSet(methodOop method, int bci, intptr_t* id);
+
+ // destructor
+ ~jvmtiDeferredLocalVariableSet();
+
+
+};
+
+class jvmtiDeferredLocalVariable : public CHeapObj {
+ public:
+
+ jvmtiDeferredLocalVariable(int index, BasicType type, jvalue value);
+
+ BasicType type(void) { return _type; }
+ int index(void) { return _index; }
+ jvalue value(void) { return _value; }
+ // Only mutator is for value as only it can change
+ void set_value(jvalue value) { _value = value; }
+ // For gc
+ oop* oop_addr(void) { return (oop*) &_value.l; }
+
+ private:
+
+ BasicType _type;
+ jvalue _value;
+ int _index;
+
+};
diff --git a/src/share/vm/runtime/virtualspace.cpp b/src/share/vm/runtime/virtualspace.cpp
new file mode 100644
index 000000000..23b75dc9d
--- /dev/null
+++ b/src/share/vm/runtime/virtualspace.cpp
@@ -0,0 +1,704 @@
+/*
+ * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_virtualspace.cpp.incl"
+
+
+// ReservedSpace
+ReservedSpace::ReservedSpace(size_t size) {
+ initialize(size, 0, false, NULL);
+}
+
+ReservedSpace::ReservedSpace(size_t size, size_t alignment,
+ bool large, char* requested_address) {
+ initialize(size, alignment, large, requested_address);
+}
+
+char *
+ReservedSpace::align_reserved_region(char* addr, const size_t len,
+ const size_t prefix_size,
+ const size_t prefix_align,
+ const size_t suffix_size,
+ const size_t suffix_align)
+{
+ assert(addr != NULL, "sanity");
+ const size_t required_size = prefix_size + suffix_size;
+ assert(len >= required_size, "len too small");
+
+ const size_t s = size_t(addr);
+ const size_t beg_ofs = s + prefix_size & suffix_align - 1;
+ const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
+
+ if (len < beg_delta + required_size) {
+ return NULL; // Cannot do proper alignment.
+ }
+ const size_t end_delta = len - (beg_delta + required_size);
+
+ if (beg_delta != 0) {
+ os::release_memory(addr, beg_delta);
+ }
+
+ if (end_delta != 0) {
+ char* release_addr = (char*) (s + beg_delta + required_size);
+ os::release_memory(release_addr, end_delta);
+ }
+
+ return (char*) (s + beg_delta);
+}
+
+char* ReservedSpace::reserve_and_align(const size_t reserve_size,
+ const size_t prefix_size,
+ const size_t prefix_align,
+ const size_t suffix_size,
+ const size_t suffix_align)
+{
+ assert(reserve_size > prefix_size + suffix_size, "should not be here");
+
+ char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
+ if (raw_addr == NULL) return NULL;
+
+ char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
+ prefix_align, suffix_size,
+ suffix_align);
+ if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
+ fatal("os::release_memory failed");
+ }
+
+#ifdef ASSERT
+ if (result != NULL) {
+ const size_t raw = size_t(raw_addr);
+ const size_t res = size_t(result);
+ assert(res >= raw, "alignment decreased start addr");
+ assert(res + prefix_size + suffix_size <= raw + reserve_size,
+ "alignment increased end addr");
+ assert((res & prefix_align - 1) == 0, "bad alignment of prefix");
+ assert((res + prefix_size & suffix_align - 1) == 0,
+ "bad alignment of suffix");
+ }
+#endif
+
+ return result;
+}
+
+ReservedSpace::ReservedSpace(const size_t prefix_size,
+ const size_t prefix_align,
+ const size_t suffix_size,
+ const size_t suffix_align)
+{
+ assert(prefix_size != 0, "sanity");
+ assert(prefix_align != 0, "sanity");
+ assert(suffix_size != 0, "sanity");
+ assert(suffix_align != 0, "sanity");
+ assert((prefix_size & prefix_align - 1) == 0,
+ "prefix_size not divisible by prefix_align");
+ assert((suffix_size & suffix_align - 1) == 0,
+ "suffix_size not divisible by suffix_align");
+ assert((suffix_align & prefix_align - 1) == 0,
+ "suffix_align not divisible by prefix_align");
+
+ // On systems where the entire region has to be reserved and committed up
+ // front, the compound alignment normally done by this method is unnecessary.
+ const bool try_reserve_special = UseLargePages &&
+ prefix_align == os::large_page_size();
+ if (!os::can_commit_large_page_memory() && try_reserve_special) {
+ initialize(prefix_size + suffix_size, prefix_align, true);
+ return;
+ }
+
+ _base = NULL;
+ _size = 0;
+ _alignment = 0;
+ _special = false;
+
+ // Optimistically try to reserve the exact size needed.
+ const size_t size = prefix_size + suffix_size;
+ char* addr = os::reserve_memory(size, NULL, prefix_align);
+ if (addr == NULL) return;
+
+ // Check whether the result has the needed alignment (unlikely unless
+ // prefix_align == suffix_align).
+ const size_t ofs = size_t(addr) + prefix_size & suffix_align - 1;
+ if (ofs != 0) {
+ // Wrong alignment. Release, allocate more space and do manual alignment.
+ //
+ // On most operating systems, another allocation with a somewhat larger size
+ // will return an address "close to" that of the previous allocation. The
+ // result is often the same address (if the kernel hands out virtual
+ // addresses from low to high), or an address that is offset by the increase
+ // in size. Exploit that to minimize the amount of extra space requested.
+ if (!os::release_memory(addr, size)) {
+ fatal("os::release_memory failed");
+ }
+
+ const size_t extra = MAX2(ofs, suffix_align - ofs);
+ addr = reserve_and_align(size + extra, prefix_size, prefix_align,
+ suffix_size, suffix_align);
+ if (addr == NULL) {
+ // Try an even larger region. If this fails, address space is exhausted.
+ addr = reserve_and_align(size + suffix_align, prefix_size,
+ prefix_align, suffix_size, suffix_align);
+ }
+ }
+
+ _base = addr;
+ _size = size;
+ _alignment = prefix_align;
+}
+
+void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
+ char* requested_address) {
+ const size_t granularity = os::vm_allocation_granularity();
+ assert((size & granularity - 1) == 0,
+ "size not aligned to os::vm_allocation_granularity()");
+ assert((alignment & granularity - 1) == 0,
+ "alignment not aligned to os::vm_allocation_granularity()");
+ assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
+ "not a power of 2");
+
+ _base = NULL;
+ _size = 0;
+ _special = false;
+ _alignment = 0;
+ if (size == 0) {
+ return;
+ }
+
+ // If OS doesn't support demand paging for large page memory, we need
+ // to use reserve_memory_special() to reserve and pin the entire region.
+ bool special = large && !os::can_commit_large_page_memory();
+ char* base = NULL;
+
+ if (special) {
+ // It's not hard to implement reserve_memory_special() such that it can
+ // allocate at fixed address, but there seems no use of this feature
+ // for now, so it's not implemented.
+ assert(requested_address == NULL, "not implemented");
+
+ base = os::reserve_memory_special(size);
+
+ if (base != NULL) {
+ // Check alignment constraints
+ if (alignment > 0) {
+ assert((uintptr_t) base % alignment == 0,
+ "Large pages returned a non-aligned address");
+ }
+ _special = true;
+ } else {
+ // failed; try to reserve regular memory below
+ }
+ }
+
+ if (base == NULL) {
+ // Optimistically assume that the OSes returns an aligned base pointer.
+ // When reserving a large address range, most OSes seem to align to at
+ // least 64K.
+
+ // If the memory was requested at a particular address, use
+ // os::attempt_reserve_memory_at() to avoid over mapping something
+ // important. If available space is not detected, return NULL.
+
+ if (requested_address != 0) {
+ base = os::attempt_reserve_memory_at(size, requested_address);
+ } else {
+ base = os::reserve_memory(size, NULL, alignment);
+ }
+
+ if (base == NULL) return;
+
+ // Check alignment constraints
+ if (alignment > 0 && ((size_t)base & alignment - 1) != 0) {
+ // Base not aligned, retry
+ if (!os::release_memory(base, size)) fatal("os::release_memory failed");
+ // Reserve size large enough to do manual alignment and
+ // increase size to a multiple of the desired alignment
+ size = align_size_up(size, alignment);
+ size_t extra_size = size + alignment;
+ char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
+ if (extra_base == NULL) return;
+ // Do manual alignement
+ base = (char*) align_size_up((uintptr_t) extra_base, alignment);
+ assert(base >= extra_base, "just checking");
+ // Release unused areas
+ size_t unused_bottom_size = base - extra_base;
+ size_t unused_top_size = extra_size - size - unused_bottom_size;
+ assert(unused_bottom_size % os::vm_allocation_granularity() == 0,
+ "size not allocation aligned");
+ assert(unused_top_size % os::vm_allocation_granularity() == 0,
+ "size not allocation aligned");
+ if (unused_bottom_size > 0) {
+ os::release_memory(extra_base, unused_bottom_size);
+ }
+ if (unused_top_size > 0) {
+ os::release_memory(base + size, unused_top_size);
+ }
+ }
+ }
+ // Done
+ _base = base;
+ _size = size;
+ _alignment = MAX2(alignment, (size_t) os::vm_page_size());
+
+ assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
+ "area must be distinguisable from marks for mark-sweep");
+ assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
+ "area must be distinguisable from marks for mark-sweep");
+}
+
+
+ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
+ bool special) {
+ assert((size % os::vm_allocation_granularity()) == 0,
+ "size not allocation aligned");
+ _base = base;
+ _size = size;
+ _alignment = alignment;
+ _special = special;
+}
+
+
+ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
+ bool split, bool realloc) {
+ assert(partition_size <= size(), "partition failed");
+ if (split) {
+ os::split_reserved_memory(_base, _size, partition_size, realloc);
+ }
+ ReservedSpace result(base(), partition_size, alignment, special());
+ return result;
+}
+
+
+ReservedSpace
+ReservedSpace::last_part(size_t partition_size, size_t alignment) {
+ assert(partition_size <= size(), "partition failed");
+ ReservedSpace result(base() + partition_size, size() - partition_size,
+ alignment, special());
+ return result;
+}
+
+
+size_t ReservedSpace::page_align_size_up(size_t size) {
+ return align_size_up(size, os::vm_page_size());
+}
+
+
+size_t ReservedSpace::page_align_size_down(size_t size) {
+ return align_size_down(size, os::vm_page_size());
+}
+
+
+size_t ReservedSpace::allocation_align_size_up(size_t size) {
+ return align_size_up(size, os::vm_allocation_granularity());
+}
+
+
+size_t ReservedSpace::allocation_align_size_down(size_t size) {
+ return align_size_down(size, os::vm_allocation_granularity());
+}
+
+
+void ReservedSpace::release() {
+ if (is_reserved()) {
+ if (special()) {
+ os::release_memory_special(_base, _size);
+ } else{
+ os::release_memory(_base, _size);
+ }
+ _base = NULL;
+ _size = 0;
+ _special = false;
+ }
+}
+
+
+// VirtualSpace
+
+VirtualSpace::VirtualSpace() {
+ _low_boundary = NULL;
+ _high_boundary = NULL;
+ _low = NULL;
+ _high = NULL;
+ _lower_high = NULL;
+ _middle_high = NULL;
+ _upper_high = NULL;
+ _lower_high_boundary = NULL;
+ _middle_high_boundary = NULL;
+ _upper_high_boundary = NULL;
+ _lower_alignment = 0;
+ _middle_alignment = 0;
+ _upper_alignment = 0;
+}
+
+
+bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
+ if(!rs.is_reserved()) return false; // allocation failed.
+ assert(_low_boundary == NULL, "VirtualSpace already initialized");
+ _low_boundary = rs.base();
+ _high_boundary = low_boundary() + rs.size();
+
+ _low = low_boundary();
+ _high = low();
+
+ _special = rs.special();
+
+ // When a VirtualSpace begins life at a large size, make all future expansion
+ // and shrinking occur aligned to a granularity of large pages. This avoids
+ // fragmentation of physical addresses that inhibits the use of large pages
+ // by the OS virtual memory system. Empirically, we see that with a 4MB
+ // page size, the only spaces that get handled this way are codecache and
+ // the heap itself, both of which provide a substantial performance
+ // boost in many benchmarks when covered by large pages.
+ //
+ // No attempt is made to force large page alignment at the very top and
+ // bottom of the space if they are not aligned so already.
+ _lower_alignment = os::vm_page_size();
+ _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
+ _upper_alignment = os::vm_page_size();
+
+ // End of each region
+ _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
+ _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
+ _upper_high_boundary = high_boundary();
+
+ // High address of each region
+ _lower_high = low_boundary();
+ _middle_high = lower_high_boundary();
+ _upper_high = middle_high_boundary();
+
+ // commit to initial size
+ if (committed_size > 0) {
+ if (!expand_by(committed_size)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+VirtualSpace::~VirtualSpace() {
+ release();
+}
+
+
+void VirtualSpace::release() {
+ (void)os::release_memory(low_boundary(), reserved_size());
+ _low_boundary = NULL;
+ _high_boundary = NULL;
+ _low = NULL;
+ _high = NULL;
+ _lower_high = NULL;
+ _middle_high = NULL;
+ _upper_high = NULL;
+ _lower_high_boundary = NULL;
+ _middle_high_boundary = NULL;
+ _upper_high_boundary = NULL;
+ _lower_alignment = 0;
+ _middle_alignment = 0;
+ _upper_alignment = 0;
+ _special = false;
+}
+
+
+size_t VirtualSpace::committed_size() const {
+ return pointer_delta(high(), low(), sizeof(char));
+}
+
+
+size_t VirtualSpace::reserved_size() const {
+ return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
+}
+
+
+size_t VirtualSpace::uncommitted_size() const {
+ return reserved_size() - committed_size();
+}
+
+
+bool VirtualSpace::contains(const void* p) const {
+ return low() <= (const char*) p && (const char*) p < high();
+}
+
+/*
+ First we need to determine if a particular virtual space is using large
+ pages. This is done at the initialize function and only virtual spaces
+ that are larger than LargePageSizeInBytes use large pages. Once we
+ have determined this, all expand_by and shrink_by calls must grow and
+ shrink by large page size chunks. If a particular request
+ is within the current large page, the call to commit and uncommit memory
+ can be ignored. In the case that the low and high boundaries of this
+ space is not large page aligned, the pages leading to the first large
+ page address and the pages after the last large page address must be
+ allocated with default pages.
+*/
+bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
+ if (uncommitted_size() < bytes) return false;
+
+ if (special()) {
+ // don't commit memory if the entire space is pinned in memory
+ _high += bytes;
+ return true;
+ }
+
+ char* previous_high = high();
+ char* unaligned_new_high = high() + bytes;
+ assert(unaligned_new_high <= high_boundary(),
+ "cannot expand by more than upper boundary");
+
+ // Calculate where the new high for each of the regions should be. If
+ // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
+ // then the unaligned lower and upper new highs would be the
+ // lower_high() and upper_high() respectively.
+ char* unaligned_lower_new_high =
+ MIN2(unaligned_new_high, lower_high_boundary());
+ char* unaligned_middle_new_high =
+ MIN2(unaligned_new_high, middle_high_boundary());
+ char* unaligned_upper_new_high =
+ MIN2(unaligned_new_high, upper_high_boundary());
+
+ // Align the new highs based on the regions alignment. lower and upper
+ // alignment will always be default page size. middle alignment will be
+ // LargePageSizeInBytes if the actual size of the virtual space is in
+ // fact larger than LargePageSizeInBytes.
+ char* aligned_lower_new_high =
+ (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
+ char* aligned_middle_new_high =
+ (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
+ char* aligned_upper_new_high =
+ (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
+
+ // Determine which regions need to grow in this expand_by call.
+ // If you are growing in the lower region, high() must be in that
+ // region so calcuate the size based on high(). For the middle and
+ // upper regions, determine the starting point of growth based on the
+ // location of high(). By getting the MAX of the region's low address
+ // (or the prevoius region's high address) and high(), we can tell if it
+ // is an intra or inter region growth.
+ size_t lower_needs = 0;
+ if (aligned_lower_new_high > lower_high()) {
+ lower_needs =
+ pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
+ }
+ size_t middle_needs = 0;
+ if (aligned_middle_new_high > middle_high()) {
+ middle_needs =
+ pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
+ }
+ size_t upper_needs = 0;
+ if (aligned_upper_new_high > upper_high()) {
+ upper_needs =
+ pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
+ }
+
+ // Check contiguity.
+ assert(low_boundary() <= lower_high() &&
+ lower_high() <= lower_high_boundary(),
+ "high address must be contained within the region");
+ assert(lower_high_boundary() <= middle_high() &&
+ middle_high() <= middle_high_boundary(),
+ "high address must be contained within the region");
+ assert(middle_high_boundary() <= upper_high() &&
+ upper_high() <= upper_high_boundary(),
+ "high address must be contained within the region");
+
+ // Commit regions
+ if (lower_needs > 0) {
+ assert(low_boundary() <= lower_high() &&
+ lower_high() + lower_needs <= lower_high_boundary(),
+ "must not expand beyond region");
+ if (!os::commit_memory(lower_high(), lower_needs)) {
+ debug_only(warning("os::commit_memory failed"));
+ return false;
+ } else {
+ _lower_high += lower_needs;
+ }
+ }
+ if (middle_needs > 0) {
+ assert(lower_high_boundary() <= middle_high() &&
+ middle_high() + middle_needs <= middle_high_boundary(),
+ "must not expand beyond region");
+ if (!os::commit_memory(middle_high(), middle_needs, middle_alignment())) {
+ debug_only(warning("os::commit_memory failed"));
+ return false;
+ }
+ _middle_high += middle_needs;
+ }
+ if (upper_needs > 0) {
+ assert(middle_high_boundary() <= upper_high() &&
+ upper_high() + upper_needs <= upper_high_boundary(),
+ "must not expand beyond region");
+ if (!os::commit_memory(upper_high(), upper_needs)) {
+ debug_only(warning("os::commit_memory failed"));
+ return false;
+ } else {
+ _upper_high += upper_needs;
+ }
+ }
+
+ if (pre_touch || AlwaysPreTouch) {
+ int vm_ps = os::vm_page_size();
+ for (char* curr = previous_high;
+ curr < unaligned_new_high;
+ curr += vm_ps) {
+ // Note the use of a write here; originally we tried just a read, but
+ // since the value read was unused, the optimizer removed the read.
+ // If we ever have a concurrent touchahead thread, we'll want to use
+ // a read, to avoid the potential of overwriting data (if a mutator
+ // thread beats the touchahead thread to a page). There are various
+ // ways of making sure this read is not optimized away: for example,
+ // generating the code for a read procedure at runtime.
+ *curr = 0;
+ }
+ }
+
+ _high += bytes;
+ return true;
+}
+
+// A page is uncommitted if the contents of the entire page is deemed unusable.
+// Continue to decrement the high() pointer until it reaches a page boundary
+// in which case that particular page can now be uncommitted.
+void VirtualSpace::shrink_by(size_t size) {
+ if (committed_size() < size)
+ fatal("Cannot shrink virtual space to negative size");
+
+ if (special()) {
+ // don't uncommit if the entire space is pinned in memory
+ _high -= size;
+ return;
+ }
+
+ char* unaligned_new_high = high() - size;
+ assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
+
+ // Calculate new unaligned address
+ char* unaligned_upper_new_high =
+ MAX2(unaligned_new_high, middle_high_boundary());
+ char* unaligned_middle_new_high =
+ MAX2(unaligned_new_high, lower_high_boundary());
+ char* unaligned_lower_new_high =
+ MAX2(unaligned_new_high, low_boundary());
+
+ // Align address to region's alignment
+ char* aligned_upper_new_high =
+ (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
+ char* aligned_middle_new_high =
+ (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
+ char* aligned_lower_new_high =
+ (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
+
+ // Determine which regions need to shrink
+ size_t upper_needs = 0;
+ if (aligned_upper_new_high < upper_high()) {
+ upper_needs =
+ pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
+ }
+ size_t middle_needs = 0;
+ if (aligned_middle_new_high < middle_high()) {
+ middle_needs =
+ pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
+ }
+ size_t lower_needs = 0;
+ if (aligned_lower_new_high < lower_high()) {
+ lower_needs =
+ pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
+ }
+
+ // Check contiguity.
+ assert(middle_high_boundary() <= upper_high() &&
+ upper_high() <= upper_high_boundary(),
+ "high address must be contained within the region");
+ assert(lower_high_boundary() <= middle_high() &&
+ middle_high() <= middle_high_boundary(),
+ "high address must be contained within the region");
+ assert(low_boundary() <= lower_high() &&
+ lower_high() <= lower_high_boundary(),
+ "high address must be contained within the region");
+
+ // Uncommit
+ if (upper_needs > 0) {
+ assert(middle_high_boundary() <= aligned_upper_new_high &&
+ aligned_upper_new_high + upper_needs <= upper_high_boundary(),
+ "must not shrink beyond region");
+ if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
+ debug_only(warning("os::uncommit_memory failed"));
+ return;
+ } else {
+ _upper_high -= upper_needs;
+ }
+ }
+ if (middle_needs > 0) {
+ assert(lower_high_boundary() <= aligned_middle_new_high &&
+ aligned_middle_new_high + middle_needs <= middle_high_boundary(),
+ "must not shrink beyond region");
+ if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
+ debug_only(warning("os::uncommit_memory failed"));
+ return;
+ } else {
+ _middle_high -= middle_needs;
+ }
+ }
+ if (lower_needs > 0) {
+ assert(low_boundary() <= aligned_lower_new_high &&
+ aligned_lower_new_high + lower_needs <= lower_high_boundary(),
+ "must not shrink beyond region");
+ if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
+ debug_only(warning("os::uncommit_memory failed"));
+ return;
+ } else {
+ _lower_high -= lower_needs;
+ }
+ }
+
+ _high -= size;
+}
+
+#ifndef PRODUCT
+void VirtualSpace::check_for_contiguity() {
+ // Check contiguity.
+ assert(low_boundary() <= lower_high() &&
+ lower_high() <= lower_high_boundary(),
+ "high address must be contained within the region");
+ assert(lower_high_boundary() <= middle_high() &&
+ middle_high() <= middle_high_boundary(),
+ "high address must be contained within the region");
+ assert(middle_high_boundary() <= upper_high() &&
+ upper_high() <= upper_high_boundary(),
+ "high address must be contained within the region");
+ assert(low() >= low_boundary(), "low");
+ assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
+ assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
+ assert(high() <= upper_high(), "upper high");
+}
+
+void VirtualSpace::print() {
+ tty->print ("Virtual space:");
+ if (special()) tty->print(" (pinned in memory)");
+ tty->cr();
+ tty->print_cr(" - committed: %ld", committed_size());
+ tty->print_cr(" - reserved: %ld", reserved_size());
+ tty->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
+ tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
+}
+
+#endif
diff --git a/src/share/vm/runtime/virtualspace.hpp b/src/share/vm/runtime/virtualspace.hpp
new file mode 100644
index 000000000..ad952e49a
--- /dev/null
+++ b/src/share/vm/runtime/virtualspace.hpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// ReservedSpace is a data structure for reserving a contiguous address range.
+
+class ReservedSpace VALUE_OBJ_CLASS_SPEC {
+ friend class VMStructs;
+ private:
+ char* _base;
+ size_t _size;
+ size_t _alignment;
+ bool _special;
+
+ // ReservedSpace
+ ReservedSpace(char* base, size_t size, size_t alignment, bool special);
+ void initialize(size_t size, size_t alignment, bool large,
+ char* requested_address = NULL);
+
+ // Release parts of an already-reserved memory region [addr, addr + len) to
+ // get a new region that has "compound alignment." Return the start of the
+ // resulting region, or NULL on failure.
+ //
+ // The region is logically divided into a prefix and a suffix. The prefix
+ // starts at the result address, which is aligned to prefix_align. The suffix
+ // starts at result address + prefix_size, which is aligned to suffix_align.
+ // The total size of the result region is size prefix_size + suffix_size.
+ char* align_reserved_region(char* addr, const size_t len,
+ const size_t prefix_size,
+ const size_t prefix_align,
+ const size_t suffix_size,
+ const size_t suffix_align);
+
+ // Reserve memory, call align_reserved_region() to alignment it and return the
+ // result.
+ char* reserve_and_align(const size_t reserve_size,
+ const size_t prefix_size,
+ const size_t prefix_align,
+ const size_t suffix_size,
+ const size_t suffix_align);
+
+ public:
+ // Constructor
+ ReservedSpace(size_t size);
+ ReservedSpace(size_t size, size_t alignment, bool large,
+ char* requested_address = NULL);
+ ReservedSpace(const size_t prefix_size, const size_t prefix_align,
+ const size_t suffix_size, const size_t suffix_align);
+
+ // Accessors
+ char* base() const { return _base; }
+ size_t size() const { return _size; }
+ size_t alignment() const { return _alignment; }
+ bool special() const { return _special; }
+
+ bool is_reserved() const { return _base != NULL; }
+ void release();
+
+ // Splitting
+ ReservedSpace first_part(size_t partition_size, size_t alignment,
+ bool split = false, bool realloc = true);
+ ReservedSpace last_part (size_t partition_size, size_t alignment);
+
+ // These simply call the above using the default alignment.
+ inline ReservedSpace first_part(size_t partition_size,
+ bool split = false, bool realloc = true);
+ inline ReservedSpace last_part (size_t partition_size);
+
+ // Alignment
+ static size_t page_align_size_up(size_t size);
+ static size_t page_align_size_down(size_t size);
+ static size_t allocation_align_size_up(size_t size);
+ static size_t allocation_align_size_down(size_t size);
+};
+
+ReservedSpace
+ReservedSpace::first_part(size_t partition_size, bool split, bool realloc)
+{
+ return first_part(partition_size, alignment(), split, realloc);
+}
+
+ReservedSpace ReservedSpace::last_part(size_t partition_size)
+{
+ return last_part(partition_size, alignment());
+}
+
+// VirtualSpace is data structure for committing a previously reserved address range in smaller chunks.
+
+class VirtualSpace VALUE_OBJ_CLASS_SPEC {
+ friend class VMStructs;
+ private:
+ // Reserved area
+ char* _low_boundary;
+ char* _high_boundary;
+
+ // Committed area
+ char* _low;
+ char* _high;
+
+ // The entire space has been committed and pinned in memory, no
+ // os::commit_memory() or os::uncommit_memory().
+ bool _special;
+
+ // MPSS Support
+ // Each virtualspace region has a lower, middle, and upper region.
+ // Each region has an end boundary and a high pointer which is the
+ // high water mark for the last allocated byte.
+ // The lower and upper unaligned to LargePageSizeInBytes uses default page.
+ // size. The middle region uses large page size.
+ char* _lower_high;
+ char* _middle_high;
+ char* _upper_high;
+
+ char* _lower_high_boundary;
+ char* _middle_high_boundary;
+ char* _upper_high_boundary;
+
+ size_t _lower_alignment;
+ size_t _middle_alignment;
+ size_t _upper_alignment;
+
+ // MPSS Accessors
+ char* lower_high() const { return _lower_high; }
+ char* middle_high() const { return _middle_high; }
+ char* upper_high() const { return _upper_high; }
+
+ char* lower_high_boundary() const { return _lower_high_boundary; }
+ char* middle_high_boundary() const { return _middle_high_boundary; }
+ char* upper_high_boundary() const { return _upper_high_boundary; }
+
+ size_t lower_alignment() const { return _lower_alignment; }
+ size_t middle_alignment() const { return _middle_alignment; }
+ size_t upper_alignment() const { return _upper_alignment; }
+
+ public:
+ // Committed area
+ char* low() const { return _low; }
+ char* high() const { return _high; }
+
+ // Reserved area
+ char* low_boundary() const { return _low_boundary; }
+ char* high_boundary() const { return _high_boundary; }
+
+ bool special() const { return _special; }
+
+ public:
+ // Initialization
+ VirtualSpace();
+ bool initialize(ReservedSpace rs, size_t committed_byte_size);
+
+ // Destruction
+ ~VirtualSpace();
+
+ // Testers (all sizes are byte sizes)
+ size_t committed_size() const;
+ size_t reserved_size() const;
+ size_t uncommitted_size() const;
+ bool contains(const void* p) const;
+
+ // Operations
+ // returns true on success, false otherwise
+ bool expand_by(size_t bytes, bool pre_touch = false);
+ void shrink_by(size_t bytes);
+ void release();
+
+ void check_for_contiguity() PRODUCT_RETURN;
+
+ // Debugging
+ void print() PRODUCT_RETURN;
+};
diff --git a/src/share/vm/runtime/vmStructs.cpp b/src/share/vm/runtime/vmStructs.cpp
new file mode 100644
index 000000000..fbc424303
--- /dev/null
+++ b/src/share/vm/runtime/vmStructs.cpp
@@ -0,0 +1,2277 @@
+/*
+ * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_vmStructs.cpp.incl"
+
+// Note: the cross-product of (c1, c2, product, nonproduct, ...),
+// (nonstatic, static), and (unchecked, checked) has not been taken.
+// Only the macros currently needed have been defined.
+
+// A field whose type is not checked is given a null string as the
+// type name, indicating an "opaque" type to the serviceability agent.
+
+// NOTE: there is an interdependency between this file and
+// HotSpotTypeDataBase.java, which parses the type strings.
+
+#ifndef REG_COUNT
+ #define REG_COUNT 0
+#endif
+
+// whole purpose of this function is to work around bug c++/27724 in gcc 4.1.1
+// with optimization turned on it doesn't affect produced code
+static inline uint64_t cast_uint64_t(size_t x)
+{
+ return x;
+}
+
+
+//--------------------------------------------------------------------------------
+// VM_STRUCTS
+//
+// This list enumerates all of the fields the serviceability agent
+// needs to know about. Be sure to see also the type table below this one.
+// NOTE that there are platform-specific additions to this table in
+// vmStructs_<os>_<cpu>.hpp.
+
+#define VM_STRUCTS(nonstatic_field, \
+ static_field, \
+ unchecked_nonstatic_field, \
+ volatile_nonstatic_field, \
+ nonproduct_nonstatic_field, \
+ c1_nonstatic_field, \
+ c2_nonstatic_field, \
+ unchecked_c1_static_field, \
+ unchecked_c2_static_field, \
+ last_entry) \
+ \
+ /******************************************************************/ \
+ /* OopDesc and Klass hierarchies (NOTE: methodDataOop incomplete) */ \
+ /******************************************************************/ \
+ \
+ volatile_nonstatic_field(oopDesc, _mark, markOop) \
+ nonstatic_field(oopDesc, _klass, klassOop) \
+ static_field(oopDesc, _bs, BarrierSet*) \
+ nonstatic_field(arrayKlass, _dimension, int) \
+ nonstatic_field(arrayKlass, _higher_dimension, klassOop) \
+ nonstatic_field(arrayKlass, _lower_dimension, klassOop) \
+ nonstatic_field(arrayKlass, _vtable_len, int) \
+ nonstatic_field(arrayKlass, _alloc_size, juint) \
+ nonstatic_field(arrayKlass, _component_mirror, oop) \
+ nonstatic_field(arrayOopDesc, _length, int) \
+ nonstatic_field(compiledICHolderKlass, _alloc_size, juint) \
+ nonstatic_field(compiledICHolderOopDesc, _holder_method, methodOop) \
+ nonstatic_field(compiledICHolderOopDesc, _holder_klass, klassOop) \
+ nonstatic_field(constantPoolOopDesc, _tags, typeArrayOop) \
+ nonstatic_field(constantPoolOopDesc, _cache, constantPoolCacheOop) \
+ nonstatic_field(constantPoolOopDesc, _pool_holder, klassOop) \
+ nonstatic_field(constantPoolCacheOopDesc, _constant_pool, constantPoolOop) \
+ nonstatic_field(instanceKlass, _array_klasses, klassOop) \
+ nonstatic_field(instanceKlass, _methods, objArrayOop) \
+ nonstatic_field(instanceKlass, _method_ordering, typeArrayOop) \
+ nonstatic_field(instanceKlass, _local_interfaces, objArrayOop) \
+ nonstatic_field(instanceKlass, _transitive_interfaces, objArrayOop) \
+ nonstatic_field(instanceKlass, _nof_implementors, int) \
+ nonstatic_field(instanceKlass, _implementors[0], klassOop) \
+ nonstatic_field(instanceKlass, _fields, typeArrayOop) \
+ nonstatic_field(instanceKlass, _constants, constantPoolOop) \
+ nonstatic_field(instanceKlass, _class_loader, oop) \
+ nonstatic_field(instanceKlass, _protection_domain, oop) \
+ nonstatic_field(instanceKlass, _signers, objArrayOop) \
+ nonstatic_field(instanceKlass, _source_file_name, symbolOop) \
+ nonstatic_field(instanceKlass, _source_debug_extension, symbolOop) \
+ nonstatic_field(instanceKlass, _inner_classes, typeArrayOop) \
+ nonstatic_field(instanceKlass, _nonstatic_field_size, int) \
+ nonstatic_field(instanceKlass, _static_field_size, int) \
+ nonstatic_field(instanceKlass, _static_oop_field_size, int) \
+ nonstatic_field(instanceKlass, _nonstatic_oop_map_size, int) \
+ nonstatic_field(instanceKlass, _is_marked_dependent, bool) \
+ nonstatic_field(instanceKlass, _minor_version, u2) \
+ nonstatic_field(instanceKlass, _major_version, u2) \
+ nonstatic_field(instanceKlass, _init_state, instanceKlass::ClassState) \
+ nonstatic_field(instanceKlass, _init_thread, Thread*) \
+ nonstatic_field(instanceKlass, _vtable_len, int) \
+ nonstatic_field(instanceKlass, _itable_len, int) \
+ nonstatic_field(instanceKlass, _reference_type, ReferenceType) \
+ volatile_nonstatic_field(instanceKlass, _oop_map_cache, OopMapCache*) \
+ nonstatic_field(instanceKlass, _jni_ids, JNIid*) \
+ nonstatic_field(instanceKlass, _osr_nmethods_head, nmethod*) \
+ nonstatic_field(instanceKlass, _breakpoints, BreakpointInfo*) \
+ nonstatic_field(instanceKlass, _generic_signature, symbolOop) \
+ nonstatic_field(instanceKlass, _methods_jmethod_ids, jmethodID*) \
+ nonstatic_field(instanceKlass, _methods_cached_itable_indices, int*) \
+ volatile_nonstatic_field(instanceKlass, _idnum_allocated_count, u2) \
+ nonstatic_field(instanceKlass, _class_annotations, typeArrayOop) \
+ nonstatic_field(instanceKlass, _fields_annotations, objArrayOop) \
+ nonstatic_field(instanceKlass, _methods_annotations, objArrayOop) \
+ nonstatic_field(instanceKlass, _methods_parameter_annotations, objArrayOop) \
+ nonstatic_field(instanceKlass, _methods_default_annotations, objArrayOop) \
+ nonstatic_field(Klass, _super_check_offset, juint) \
+ nonstatic_field(Klass, _secondary_super_cache, klassOop) \
+ nonstatic_field(Klass, _secondary_supers, objArrayOop) \
+ nonstatic_field(Klass, _primary_supers[0], klassOop) \
+ nonstatic_field(Klass, _java_mirror, oop) \
+ nonstatic_field(Klass, _modifier_flags, jint) \
+ nonstatic_field(Klass, _super, klassOop) \
+ nonstatic_field(Klass, _layout_helper, jint) \
+ nonstatic_field(Klass, _name, symbolOop) \
+ nonstatic_field(Klass, _access_flags, AccessFlags) \
+ nonstatic_field(Klass, _subklass, klassOop) \
+ nonstatic_field(Klass, _next_sibling, klassOop) \
+ nonproduct_nonstatic_field(Klass, _verify_count, int) \
+ nonstatic_field(Klass, _alloc_count, juint) \
+ nonstatic_field(klassKlass, _alloc_size, juint) \
+ nonstatic_field(methodKlass, _alloc_size, juint) \
+ nonstatic_field(methodDataOopDesc, _size, int) \
+ nonstatic_field(methodDataOopDesc, _method, methodOop) \
+ nonstatic_field(methodOopDesc, _constMethod, constMethodOop) \
+ nonstatic_field(methodOopDesc, _constants, constantPoolOop) \
+ c2_nonstatic_field(methodOopDesc, _method_data, methodDataOop) \
+ c2_nonstatic_field(methodOopDesc, _interpreter_invocation_count, int) \
+ nonstatic_field(methodOopDesc, _access_flags, AccessFlags) \
+ nonstatic_field(methodOopDesc, _vtable_index, int) \
+ nonstatic_field(methodOopDesc, _method_size, u2) \
+ nonstatic_field(methodOopDesc, _max_stack, u2) \
+ nonstatic_field(methodOopDesc, _max_locals, u2) \
+ nonstatic_field(methodOopDesc, _size_of_parameters, u2) \
+ c2_nonstatic_field(methodOopDesc, _interpreter_throwout_count, u2) \
+ nonstatic_field(methodOopDesc, _number_of_breakpoints, u2) \
+ nonstatic_field(methodOopDesc, _invocation_counter, InvocationCounter) \
+ nonstatic_field(methodOopDesc, _backedge_counter, InvocationCounter) \
+ nonproduct_nonstatic_field(methodOopDesc, _compiled_invocation_count, int) \
+ volatile_nonstatic_field(methodOopDesc, _code, nmethod*) \
+ nonstatic_field(methodOopDesc, _i2i_entry, address) \
+ nonstatic_field(methodOopDesc, _adapter, AdapterHandlerEntry*) \
+ volatile_nonstatic_field(methodOopDesc, _from_compiled_entry, address) \
+ volatile_nonstatic_field(methodOopDesc, _from_interpreted_entry, address) \
+ volatile_nonstatic_field(constMethodOopDesc, _fingerprint, uint64_t) \
+ nonstatic_field(constMethodOopDesc, _method, methodOop) \
+ nonstatic_field(constMethodOopDesc, _stackmap_data, typeArrayOop) \
+ nonstatic_field(constMethodOopDesc, _exception_table, typeArrayOop) \
+ nonstatic_field(constMethodOopDesc, _constMethod_size, int) \
+ nonstatic_field(constMethodOopDesc, _interpreter_kind, jbyte) \
+ nonstatic_field(constMethodOopDesc, _flags, jbyte) \
+ nonstatic_field(constMethodOopDesc, _code_size, u2) \
+ nonstatic_field(constMethodOopDesc, _name_index, u2) \
+ nonstatic_field(constMethodOopDesc, _signature_index, u2) \
+ nonstatic_field(constMethodOopDesc, _method_idnum, u2) \
+ nonstatic_field(constMethodOopDesc, _generic_signature_index, u2) \
+ nonstatic_field(objArrayKlass, _element_klass, klassOop) \
+ nonstatic_field(objArrayKlass, _bottom_klass, klassOop) \
+ nonstatic_field(symbolKlass, _alloc_size, juint) \
+ nonstatic_field(symbolOopDesc, _length, unsigned short) \
+ unchecked_nonstatic_field(symbolOopDesc, _body, sizeof(jbyte)) /* NOTE: no type */ \
+ nonstatic_field(typeArrayKlass, _max_length, int) \
+ \
+ /***********************/ \
+ /* Constant Pool Cache */ \
+ /***********************/ \
+ \
+ volatile_nonstatic_field(ConstantPoolCacheEntry, _indices, intx) \
+ volatile_nonstatic_field(ConstantPoolCacheEntry, _f1, oop) \
+ volatile_nonstatic_field(ConstantPoolCacheEntry, _f2, intx) \
+ volatile_nonstatic_field(ConstantPoolCacheEntry, _flags, intx) \
+ \
+ /********************************/ \
+ /* MethodOop-related structures */ \
+ /********************************/ \
+ \
+ nonstatic_field(CheckedExceptionElement, class_cp_index, u2) \
+ nonstatic_field(LocalVariableTableElement, start_bci, u2) \
+ nonstatic_field(LocalVariableTableElement, length, u2) \
+ nonstatic_field(LocalVariableTableElement, name_cp_index, u2) \
+ nonstatic_field(LocalVariableTableElement, descriptor_cp_index, u2) \
+ nonstatic_field(LocalVariableTableElement, signature_cp_index, u2) \
+ nonstatic_field(LocalVariableTableElement, slot, u2) \
+ nonstatic_field(BreakpointInfo, _orig_bytecode, Bytecodes::Code) \
+ nonstatic_field(BreakpointInfo, _bci, int) \
+ nonstatic_field(BreakpointInfo, _name_index, u2) \
+ nonstatic_field(BreakpointInfo, _signature_index, u2) \
+ nonstatic_field(BreakpointInfo, _next, BreakpointInfo*) \
+ /***********/ \
+ /* JNI IDs */ \
+ /***********/ \
+ \
+ nonstatic_field(JNIid, _holder, klassOop) \
+ nonstatic_field(JNIid, _next, JNIid*) \
+ nonstatic_field(JNIid, _offset, int) \
+ /************/ \
+ /* Universe */ \
+ /************/ \
+ \
+ static_field(Universe, _boolArrayKlassObj, klassOop) \
+ static_field(Universe, _byteArrayKlassObj, klassOop) \
+ static_field(Universe, _charArrayKlassObj, klassOop) \
+ static_field(Universe, _intArrayKlassObj, klassOop) \
+ static_field(Universe, _shortArrayKlassObj, klassOop) \
+ static_field(Universe, _longArrayKlassObj, klassOop) \
+ static_field(Universe, _singleArrayKlassObj, klassOop) \
+ static_field(Universe, _doubleArrayKlassObj, klassOop) \
+ static_field(Universe, _symbolKlassObj, klassOop) \
+ static_field(Universe, _methodKlassObj, klassOop) \
+ static_field(Universe, _constMethodKlassObj, klassOop) \
+ static_field(Universe, _methodDataKlassObj, klassOop) \
+ static_field(Universe, _klassKlassObj, klassOop) \
+ static_field(Universe, _arrayKlassKlassObj, klassOop) \
+ static_field(Universe, _objArrayKlassKlassObj, klassOop) \
+ static_field(Universe, _typeArrayKlassKlassObj, klassOop) \
+ static_field(Universe, _instanceKlassKlassObj, klassOop) \
+ static_field(Universe, _constantPoolKlassObj, klassOop) \
+ static_field(Universe, _constantPoolCacheKlassObj, klassOop) \
+ static_field(Universe, _compiledICHolderKlassObj, klassOop) \
+ static_field(Universe, _systemObjArrayKlassObj, klassOop) \
+ static_field(Universe, _mirrors[0], oop) \
+ static_field(Universe, _main_thread_group, oop) \
+ static_field(Universe, _system_thread_group, oop) \
+ static_field(Universe, _the_empty_byte_array, typeArrayOop) \
+ static_field(Universe, _the_empty_short_array, typeArrayOop) \
+ static_field(Universe, _the_empty_int_array, typeArrayOop) \
+ static_field(Universe, _the_empty_system_obj_array, objArrayOop) \
+ static_field(Universe, _the_empty_class_klass_array, objArrayOop) \
+ static_field(Universe, _out_of_memory_error_java_heap, oop) \
+ static_field(Universe, _out_of_memory_error_perm_gen, oop) \
+ static_field(Universe, _out_of_memory_error_array_size, oop) \
+ static_field(Universe, _out_of_memory_error_gc_overhead_limit, oop) \
+ static_field(Universe, _null_ptr_exception_instance, oop) \
+ static_field(Universe, _arithmetic_exception_instance, oop) \
+ static_field(Universe, _vm_exception, oop) \
+ static_field(Universe, _collectedHeap, CollectedHeap*) \
+ static_field(Universe, _base_vtable_size, int) \
+ static_field(Universe, _bootstrapping, bool) \
+ static_field(Universe, _fully_initialized, bool) \
+ static_field(Universe, _verify_count, int) \
+ \
+ /**********************************************************************************/ \
+ /* Generation and Space hierarchies */ \
+ /**********************************************************************************/ \
+ \
+ unchecked_nonstatic_field(ageTable, sizes, sizeof(ageTable::sizes)) \
+ \
+ nonstatic_field(BarrierSet, _max_covered_regions, int) \
+ nonstatic_field(BlockOffsetTable, _bottom, HeapWord*) \
+ nonstatic_field(BlockOffsetTable, _end, HeapWord*) \
+ \
+ nonstatic_field(BlockOffsetSharedArray, _reserved, MemRegion) \
+ nonstatic_field(BlockOffsetSharedArray, _end, HeapWord*) \
+ nonstatic_field(BlockOffsetSharedArray, _vs, VirtualSpace) \
+ nonstatic_field(BlockOffsetSharedArray, _offset_array, u_char*) \
+ \
+ nonstatic_field(BlockOffsetArray, _array, BlockOffsetSharedArray*) \
+ nonstatic_field(BlockOffsetArray, _sp, Space*) \
+ nonstatic_field(BlockOffsetArrayContigSpace, _next_offset_threshold, HeapWord*) \
+ nonstatic_field(BlockOffsetArrayContigSpace, _next_offset_index, size_t) \
+ \
+ nonstatic_field(BlockOffsetArrayNonContigSpace, _unallocated_block, HeapWord*) \
+ \
+ nonstatic_field(CardGeneration, _rs, GenRemSet*) \
+ nonstatic_field(CardGeneration, _bts, BlockOffsetSharedArray*) \
+ \
+ nonstatic_field(CardTableModRefBS, _whole_heap, const MemRegion) \
+ nonstatic_field(CardTableModRefBS, _guard_index, const size_t) \
+ nonstatic_field(CardTableModRefBS, _last_valid_index, const size_t) \
+ nonstatic_field(CardTableModRefBS, _page_size, const size_t) \
+ nonstatic_field(CardTableModRefBS, _byte_map_size, const size_t) \
+ nonstatic_field(CardTableModRefBS, _byte_map, jbyte*) \
+ nonstatic_field(CardTableModRefBS, _cur_covered_regions, int) \
+ nonstatic_field(CardTableModRefBS, _covered, MemRegion*) \
+ nonstatic_field(CardTableModRefBS, _committed, MemRegion*) \
+ nonstatic_field(CardTableModRefBS, _guard_region, MemRegion) \
+ nonstatic_field(CardTableModRefBS, byte_map_base, jbyte*) \
+ \
+ nonstatic_field(CardTableRS, _ct_bs, CardTableModRefBS) \
+ \
+ nonstatic_field(CollectedHeap, _reserved, MemRegion) \
+ nonstatic_field(SharedHeap, _perm_gen, PermGen*) \
+ nonstatic_field(CollectedHeap, _barrier_set, BarrierSet*) \
+ nonstatic_field(CollectedHeap, _is_gc_active, bool) \
+ nonstatic_field(CollectedHeap, _max_heap_capacity, size_t) \
+ \
+ nonstatic_field(CompactibleSpace, _compaction_top, HeapWord*) \
+ nonstatic_field(CompactibleSpace, _first_dead, HeapWord*) \
+ nonstatic_field(CompactibleSpace, _end_of_live, HeapWord*) \
+ \
+ nonstatic_field(CompactingPermGen, _gen, OneContigSpaceCardGeneration*) \
+ \
+ nonstatic_field(ContiguousSpace, _top, HeapWord*) \
+ nonstatic_field(ContiguousSpace, _concurrent_iteration_safe_limit, HeapWord*) \
+ nonstatic_field(ContiguousSpace, _saved_mark_word, HeapWord*) \
+ \
+ nonstatic_field(DefNewGeneration, _next_gen, Generation*) \
+ nonstatic_field(DefNewGeneration, _tenuring_threshold, int) \
+ nonstatic_field(DefNewGeneration, _age_table, ageTable) \
+ nonstatic_field(DefNewGeneration, _eden_space, EdenSpace*) \
+ nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \
+ nonstatic_field(DefNewGeneration, _to_space, ContiguousSpace*) \
+ \
+ nonstatic_field(EdenSpace, _gen, DefNewGeneration*) \
+ \
+ nonstatic_field(Generation, _reserved, MemRegion) \
+ nonstatic_field(Generation, _virtual_space, VirtualSpace) \
+ nonstatic_field(Generation, _level, int) \
+ nonstatic_field(Generation, _stat_record, Generation::StatRecord) \
+ \
+ nonstatic_field(Generation::StatRecord, invocations, int) \
+ nonstatic_field(Generation::StatRecord, accumulated_time, elapsedTimer) \
+ \
+ nonstatic_field(GenerationSpec, _name, Generation::Name) \
+ nonstatic_field(GenerationSpec, _init_size, size_t) \
+ nonstatic_field(GenerationSpec, _max_size, size_t) \
+ \
+ static_field(GenCollectedHeap, _gch, GenCollectedHeap*) \
+ nonstatic_field(GenCollectedHeap, _n_gens, int) \
+ unchecked_nonstatic_field(GenCollectedHeap, _gens, sizeof(GenCollectedHeap::_gens)) /* NOTE: no type */ \
+ nonstatic_field(GenCollectedHeap, _gen_specs, GenerationSpec**) \
+ \
+ nonstatic_field(HeapWord, i, char*) \
+ \
+ nonstatic_field(MemRegion, _start, HeapWord*) \
+ nonstatic_field(MemRegion, _word_size, size_t) \
+ \
+ nonstatic_field(OffsetTableContigSpace, _offsets, BlockOffsetArray) \
+ \
+ nonstatic_field(OneContigSpaceCardGeneration, _min_heap_delta_bytes, size_t) \
+ nonstatic_field(OneContigSpaceCardGeneration, _the_space, ContiguousSpace*) \
+ nonstatic_field(OneContigSpaceCardGeneration, _last_gc, WaterMark) \
+ \
+ nonstatic_field(CompactingPermGenGen, _ro_vs, VirtualSpace) \
+ nonstatic_field(CompactingPermGenGen, _rw_vs, VirtualSpace) \
+ nonstatic_field(CompactingPermGenGen, _md_vs, VirtualSpace) \
+ nonstatic_field(CompactingPermGenGen, _mc_vs, VirtualSpace) \
+ nonstatic_field(CompactingPermGenGen, _ro_space, OffsetTableContigSpace*) \
+ nonstatic_field(CompactingPermGenGen, _rw_space, OffsetTableContigSpace*) \
+ static_field(CompactingPermGenGen, unshared_bottom, HeapWord*) \
+ static_field(CompactingPermGenGen, unshared_end, HeapWord*) \
+ static_field(CompactingPermGenGen, shared_bottom, HeapWord*) \
+ static_field(CompactingPermGenGen, readonly_bottom, HeapWord*) \
+ static_field(CompactingPermGenGen, readonly_end, HeapWord*) \
+ static_field(CompactingPermGenGen, readwrite_bottom, HeapWord*) \
+ static_field(CompactingPermGenGen, readwrite_end, HeapWord*) \
+ static_field(CompactingPermGenGen, miscdata_bottom, HeapWord*) \
+ static_field(CompactingPermGenGen, miscdata_end, HeapWord*) \
+ static_field(CompactingPermGenGen, misccode_bottom, HeapWord*) \
+ static_field(CompactingPermGenGen, misccode_end, HeapWord*) \
+ static_field(CompactingPermGenGen, shared_end, HeapWord*) \
+ \
+ nonstatic_field(PermGen, _capacity_expansion_limit, size_t) \
+ \
+ nonstatic_field(PermanentGenerationSpec, _name, PermGen::Name) \
+ nonstatic_field(PermanentGenerationSpec, _init_size, size_t) \
+ nonstatic_field(PermanentGenerationSpec, _max_size, size_t) \
+ \
+ nonstatic_field(Space, _bottom, HeapWord*) \
+ nonstatic_field(Space, _end, HeapWord*) \
+ \
+ nonstatic_field(TenuredGeneration, _shrink_factor, size_t) \
+ nonstatic_field(TenuredGeneration, _capacity_at_prologue, size_t) \
+ nonstatic_field(ThreadLocalAllocBuffer, _start, HeapWord*) \
+ nonstatic_field(ThreadLocalAllocBuffer, _top, HeapWord*) \
+ nonstatic_field(ThreadLocalAllocBuffer, _end, HeapWord*) \
+ nonstatic_field(ThreadLocalAllocBuffer, _desired_size, size_t) \
+ nonstatic_field(ThreadLocalAllocBuffer, _refill_waste_limit, size_t) \
+ static_field(ThreadLocalAllocBuffer, _target_refills, unsigned) \
+ nonstatic_field(VirtualSpace, _low_boundary, char*) \
+ nonstatic_field(VirtualSpace, _high_boundary, char*) \
+ nonstatic_field(VirtualSpace, _low, char*) \
+ nonstatic_field(VirtualSpace, _high, char*) \
+ nonstatic_field(VirtualSpace, _lower_high, char*) \
+ nonstatic_field(VirtualSpace, _middle_high, char*) \
+ nonstatic_field(VirtualSpace, _upper_high, char*) \
+ nonstatic_field(WaterMark, _point, HeapWord*) \
+ nonstatic_field(WaterMark, _space, Space*) \
+ \
+ /************************/ \
+ /* PerfMemory - jvmstat */ \
+ /************************/ \
+ \
+ nonstatic_field(PerfDataPrologue, magic, jint) \
+ nonstatic_field(PerfDataPrologue, byte_order, jbyte) \
+ nonstatic_field(PerfDataPrologue, major_version, jbyte) \
+ nonstatic_field(PerfDataPrologue, minor_version, jbyte) \
+ nonstatic_field(PerfDataPrologue, accessible, jbyte) \
+ nonstatic_field(PerfDataPrologue, used, jint) \
+ nonstatic_field(PerfDataPrologue, overflow, jint) \
+ nonstatic_field(PerfDataPrologue, mod_time_stamp, jlong) \
+ nonstatic_field(PerfDataPrologue, entry_offset, jint) \
+ nonstatic_field(PerfDataPrologue, num_entries, jint) \
+ \
+ nonstatic_field(PerfDataEntry, entry_length, jint) \
+ nonstatic_field(PerfDataEntry, name_offset, jint) \
+ nonstatic_field(PerfDataEntry, vector_length, jint) \
+ nonstatic_field(PerfDataEntry, data_type, jbyte) \
+ nonstatic_field(PerfDataEntry, flags, jbyte) \
+ nonstatic_field(PerfDataEntry, data_units, jbyte) \
+ nonstatic_field(PerfDataEntry, data_variability, jbyte) \
+ nonstatic_field(PerfDataEntry, data_offset, jint) \
+ \
+ static_field(PerfMemory, _start, char*) \
+ static_field(PerfMemory, _end, char*) \
+ static_field(PerfMemory, _top, char*) \
+ static_field(PerfMemory, _capacity, size_t) \
+ static_field(PerfMemory, _prologue, PerfDataPrologue*) \
+ static_field(PerfMemory, _initialized, jint) \
+ \
+ /***************/ \
+ /* SymbolTable */ \
+ /***************/ \
+ \
+ static_field(SymbolTable, _the_table, SymbolTable*) \
+ \
+ /***************/ \
+ /* StringTable */ \
+ /***************/ \
+ \
+ static_field(StringTable, _the_table, StringTable*) \
+ \
+ /********************/ \
+ /* SystemDictionary */ \
+ /********************/ \
+ \
+ static_field(SystemDictionary, _dictionary, Dictionary*) \
+ static_field(SystemDictionary, _placeholders, PlaceholderTable*) \
+ static_field(SystemDictionary, _shared_dictionary, Dictionary*) \
+ static_field(SystemDictionary, _system_loader_lock_obj, oop) \
+ static_field(SystemDictionary, _loader_constraints, LoaderConstraintTable*) \
+ static_field(SystemDictionary, _object_klass, klassOop) \
+ static_field(SystemDictionary, _string_klass, klassOop) \
+ static_field(SystemDictionary, _class_klass, klassOop) \
+ static_field(SystemDictionary, _cloneable_klass, klassOop) \
+ static_field(SystemDictionary, _classloader_klass, klassOop) \
+ static_field(SystemDictionary, _serializable_klass, klassOop) \
+ static_field(SystemDictionary, _system_klass, klassOop) \
+ static_field(SystemDictionary, _throwable_klass, klassOop) \
+ static_field(SystemDictionary, _threaddeath_klass, klassOop) \
+ static_field(SystemDictionary, _error_klass, klassOop) \
+ static_field(SystemDictionary, _exception_klass, klassOop) \
+ static_field(SystemDictionary, _runtime_exception_klass, klassOop) \
+ static_field(SystemDictionary, _classNotFoundException_klass, klassOop) \
+ static_field(SystemDictionary, _noClassDefFoundError_klass, klassOop) \
+ static_field(SystemDictionary, _linkageError_klass, klassOop) \
+ static_field(SystemDictionary, _classCastException_klass, klassOop) \
+ static_field(SystemDictionary, _arrayStoreException_klass, klassOop) \
+ static_field(SystemDictionary, _virtualMachineError_klass, klassOop) \
+ static_field(SystemDictionary, _outOfMemoryError_klass, klassOop) \
+ static_field(SystemDictionary, _StackOverflowError_klass, klassOop) \
+ static_field(SystemDictionary, _protectionDomain_klass, klassOop) \
+ static_field(SystemDictionary, _AccessControlContext_klass, klassOop) \
+ static_field(SystemDictionary, _reference_klass, klassOop) \
+ static_field(SystemDictionary, _soft_reference_klass, klassOop) \
+ static_field(SystemDictionary, _weak_reference_klass, klassOop) \
+ static_field(SystemDictionary, _final_reference_klass, klassOop) \
+ static_field(SystemDictionary, _phantom_reference_klass, klassOop) \
+ static_field(SystemDictionary, _finalizer_klass, klassOop) \
+ static_field(SystemDictionary, _thread_klass, klassOop) \
+ static_field(SystemDictionary, _threadGroup_klass, klassOop) \
+ static_field(SystemDictionary, _properties_klass, klassOop) \
+ static_field(SystemDictionary, _stringBuffer_klass, klassOop) \
+ static_field(SystemDictionary, _vector_klass, klassOop) \
+ static_field(SystemDictionary, _hashtable_klass, klassOop) \
+ static_field(SystemDictionary, _box_klasses[0], klassOop) \
+ static_field(SystemDictionary, _java_system_loader, oop) \
+ \
+ /*******************/ \
+ /* HashtableBucket */ \
+ /*******************/ \
+ \
+ nonstatic_field(HashtableBucket, _entry, BasicHashtableEntry*) \
+ \
+ /******************/ \
+ /* HashtableEntry */ \
+ /******************/ \
+ \
+ nonstatic_field(BasicHashtableEntry, _next, BasicHashtableEntry*) \
+ nonstatic_field(BasicHashtableEntry, _hash, unsigned int) \
+ nonstatic_field(HashtableEntry, _literal, oop) \
+ \
+ /*************/ \
+ /* Hashtable */ \
+ /*************/ \
+ \
+ nonstatic_field(BasicHashtable, _table_size, int) \
+ nonstatic_field(BasicHashtable, _buckets, HashtableBucket*) \
+ nonstatic_field(BasicHashtable, _free_list, BasicHashtableEntry*) \
+ nonstatic_field(BasicHashtable, _first_free_entry, char*) \
+ nonstatic_field(BasicHashtable, _end_block, char*) \
+ nonstatic_field(BasicHashtable, _entry_size, int) \
+ \
+ /*******************/ \
+ /* DictionaryEntry */ \
+ /*******************/ \
+ \
+ nonstatic_field(DictionaryEntry, _loader, oop) \
+ nonstatic_field(DictionaryEntry, _pd_set, ProtectionDomainEntry*) \
+ \
+ /********************/ \
+ \
+ nonstatic_field(PlaceholderEntry, _loader, oop) \
+ \
+ /**************************/ \
+ /* ProctectionDomainEntry */ \
+ /**************************/ \
+ \
+ nonstatic_field(ProtectionDomainEntry, _next, ProtectionDomainEntry*) \
+ nonstatic_field(ProtectionDomainEntry, _protection_domain, oop) \
+ \
+ /*************************/ \
+ /* LoaderConstraintEntry */ \
+ /*************************/ \
+ \
+ nonstatic_field(LoaderConstraintEntry, _name, symbolOop) \
+ nonstatic_field(LoaderConstraintEntry, _num_loaders, int) \
+ nonstatic_field(LoaderConstraintEntry, _max_loaders, int) \
+ nonstatic_field(LoaderConstraintEntry, _loaders, oop*) \
+ \
+ /********************************/ \
+ /* CodeCache (NOTE: incomplete) */ \
+ /********************************/ \
+ \
+ static_field(CodeCache, _heap, CodeHeap*) \
+ \
+ /*******************************/ \
+ /* CodeHeap (NOTE: incomplete) */ \
+ /*******************************/ \
+ \
+ nonstatic_field(CodeHeap, _memory, VirtualSpace) \
+ nonstatic_field(CodeHeap, _segmap, VirtualSpace) \
+ nonstatic_field(CodeHeap, _log2_segment_size, int) \
+ nonstatic_field(HeapBlock, _header, HeapBlock::Header) \
+ nonstatic_field(HeapBlock::Header, _length, size_t) \
+ nonstatic_field(HeapBlock::Header, _used, bool) \
+ \
+ /**********************************/ \
+ /* Interpreter (NOTE: incomplete) */ \
+ /**********************************/ \
+ \
+ static_field(AbstractInterpreter, _code, StubQueue*) \
+ \
+ /****************************/ \
+ /* Stubs (NOTE: incomplete) */ \
+ /****************************/ \
+ \
+ nonstatic_field(StubQueue, _stub_buffer, address) \
+ nonstatic_field(StubQueue, _buffer_limit, int) \
+ nonstatic_field(StubQueue, _queue_begin, int) \
+ nonstatic_field(StubQueue, _queue_end, int) \
+ nonstatic_field(StubQueue, _number_of_stubs, int) \
+ nonstatic_field(InterpreterCodelet, _size, int) \
+ nonstatic_field(InterpreterCodelet, _description, const char*) \
+ nonstatic_field(InterpreterCodelet, _bytecode, Bytecodes::Code) \
+ \
+ /***********************************/ \
+ /* StubRoutines (NOTE: incomplete) */ \
+ /***********************************/ \
+ \
+ static_field(StubRoutines, _call_stub_return_address, address) \
+ IA32_ONLY(static_field(StubRoutines::i486,_call_stub_compiled_return, address)) \
+ \
+ /***************************************/ \
+ /* PcDesc and other compiled code info */ \
+ /***************************************/ \
+ \
+ nonstatic_field(PcDesc, _pc_offset, int) \
+ nonstatic_field(PcDesc, _scope_decode_offset, int) \
+ \
+ /***************************************************/ \
+ /* CodeBlobs (NOTE: incomplete, but only a little) */ \
+ /***************************************************/ \
+ \
+ nonstatic_field(CodeBlob, _name, const char*) \
+ nonstatic_field(CodeBlob, _size, int) \
+ nonstatic_field(CodeBlob, _header_size, int) \
+ nonstatic_field(CodeBlob, _relocation_size, int) \
+ nonstatic_field(CodeBlob, _instructions_offset, int) \
+ nonstatic_field(CodeBlob, _frame_complete_offset, int) \
+ nonstatic_field(CodeBlob, _data_offset, int) \
+ nonstatic_field(CodeBlob, _oops_offset, int) \
+ nonstatic_field(CodeBlob, _oops_length, int) \
+ nonstatic_field(CodeBlob, _frame_size, int) \
+ nonstatic_field(CodeBlob, _oop_maps, OopMapSet*) \
+ \
+ /**************************************************/ \
+ /* NMethods (NOTE: incomplete, but only a little) */ \
+ /**************************************************/ \
+ \
+ static_field(nmethod, _zombie_instruction_size, int) \
+ nonstatic_field(nmethod, _method, methodOop) \
+ nonstatic_field(nmethod, _entry_bci, int) \
+ nonstatic_field(nmethod, _link, nmethod*) \
+ nonstatic_field(nmethod, _exception_offset, int) \
+ nonstatic_field(nmethod, _deoptimize_offset, int) \
+ nonstatic_field(nmethod, _orig_pc_offset, int) \
+ nonstatic_field(nmethod, _stub_offset, int) \
+ nonstatic_field(nmethod, _scopes_data_offset, int) \
+ nonstatic_field(nmethod, _scopes_pcs_offset, int) \
+ nonstatic_field(nmethod, _dependencies_offset, int) \
+ nonstatic_field(nmethod, _handler_table_offset, int) \
+ nonstatic_field(nmethod, _nul_chk_table_offset, int) \
+ nonstatic_field(nmethod, _nmethod_end_offset, int) \
+ nonstatic_field(nmethod, _entry_point, address) \
+ nonstatic_field(nmethod, _verified_entry_point, address) \
+ nonstatic_field(nmethod, _osr_entry_point, address) \
+ nonstatic_field(nmethod, _lock_count, jint) \
+ nonstatic_field(nmethod, _stack_traversal_mark, long) \
+ \
+ /********************************/ \
+ /* JavaCalls (NOTE: incomplete) */ \
+ /********************************/ \
+ \
+ nonstatic_field(JavaCallWrapper, _anchor, JavaFrameAnchor) \
+ /********************************/ \
+ /* JavaFrameAnchor (NOTE: incomplete) */ \
+ /********************************/ \
+ volatile_nonstatic_field(JavaFrameAnchor, _last_Java_sp, intptr_t*) \
+ volatile_nonstatic_field(JavaFrameAnchor, _last_Java_pc, address) \
+ \
+ /******************************/ \
+ /* Threads (NOTE: incomplete) */ \
+ /******************************/ \
+ \
+ static_field(Threads, _thread_list, JavaThread*) \
+ static_field(Threads, _number_of_threads, int) \
+ static_field(Threads, _number_of_non_daemon_threads, int) \
+ static_field(Threads, _return_code, int) \
+ \
+ volatile_nonstatic_field(Thread, _suspend_flags, uint32_t) \
+ nonstatic_field(Thread, _active_handles, JNIHandleBlock*) \
+ nonstatic_field(Thread, _highest_lock, address) \
+ nonstatic_field(Thread, _tlab, ThreadLocalAllocBuffer) \
+ nonstatic_field(Thread, _current_pending_monitor, ObjectMonitor*) \
+ nonstatic_field(Thread, _current_pending_monitor_is_from_java, bool) \
+ nonstatic_field(Thread, _current_waiting_monitor, ObjectMonitor*) \
+ nonstatic_field(NamedThread, _name, char*) \
+ nonstatic_field(JavaThread, _next, JavaThread*) \
+ nonstatic_field(JavaThread, _threadObj, oop) \
+ nonstatic_field(JavaThread, _anchor, JavaFrameAnchor) \
+ volatile_nonstatic_field(JavaThread, _thread_state, JavaThreadState) \
+ nonstatic_field(JavaThread, _osthread, OSThread*) \
+ nonstatic_field(JavaThread, _stack_base, address) \
+ nonstatic_field(JavaThread, _stack_size, size_t) \
+ \
+ /************/ \
+ /* OSThread */ \
+ /************/ \
+ \
+ nonstatic_field(OSThread, _interrupted, jint) \
+ \
+ /************************/ \
+ /* OopMap and OopMapSet */ \
+ /************************/ \
+ \
+ nonstatic_field(OopMap, _pc_offset, int) \
+ nonstatic_field(OopMap, _omv_count, int) \
+ nonstatic_field(OopMap, _omv_data_size, int) \
+ nonstatic_field(OopMap, _omv_data, unsigned char*) \
+ nonstatic_field(OopMap, _write_stream, CompressedWriteStream*) \
+ nonstatic_field(OopMapSet, _om_count, int) \
+ nonstatic_field(OopMapSet, _om_size, int) \
+ nonstatic_field(OopMapSet, _om_data, OopMap**) \
+ \
+ /*********************************/ \
+ /* JNIHandles and JNIHandleBlock */ \
+ /*********************************/ \
+ static_field(JNIHandles, _global_handles, JNIHandleBlock*) \
+ static_field(JNIHandles, _weak_global_handles, JNIHandleBlock*) \
+ static_field(JNIHandles, _deleted_handle, oop) \
+ \
+ unchecked_nonstatic_field(JNIHandleBlock, _handles, JNIHandleBlock::block_size_in_oops * sizeof(Oop)) /* Note: no type */ \
+ nonstatic_field(JNIHandleBlock, _top, int) \
+ nonstatic_field(JNIHandleBlock, _next, JNIHandleBlock*) \
+ \
+ /********************/ \
+ /* CompressedStream */ \
+ /********************/ \
+ \
+ nonstatic_field(CompressedStream, _buffer, u_char*) \
+ nonstatic_field(CompressedStream, _position, int) \
+ \
+ /*********************************/ \
+ /* VMRegImpl (NOTE: incomplete) */ \
+ /*********************************/ \
+ \
+ static_field(VMRegImpl, regName[0], const char*) \
+ static_field(VMRegImpl, stack0, VMReg) \
+ \
+ /*******************************/ \
+ /* Runtime1 (NOTE: incomplete) */ \
+ /*******************************/ \
+ \
+ unchecked_c1_static_field(Runtime1, _blobs, sizeof(Runtime1::_blobs)) /* NOTE: no type */ \
+ \
+ /************/ \
+ /* Monitors */ \
+ /************/ \
+ \
+ volatile_nonstatic_field(ObjectMonitor, _header, markOop) \
+ unchecked_nonstatic_field(ObjectMonitor, _object, sizeof(void *)) /* NOTE: no type */ \
+ unchecked_nonstatic_field(ObjectMonitor, _owner, sizeof(void *)) /* NOTE: no type */ \
+ volatile_nonstatic_field(ObjectMonitor, _count, intptr_t) \
+ volatile_nonstatic_field(ObjectMonitor, _waiters, intptr_t) \
+ volatile_nonstatic_field(ObjectMonitor, _recursions, intptr_t) \
+ nonstatic_field(ObjectMonitor, FreeNext, ObjectMonitor*) \
+ volatile_nonstatic_field(BasicLock, _displaced_header, markOop) \
+ nonstatic_field(BasicObjectLock, _lock, BasicLock) \
+ nonstatic_field(BasicObjectLock, _obj, oop) \
+ static_field(ObjectSynchronizer, gBlockList, ObjectMonitor*) \
+ \
+ /*********************/ \
+ /* Matcher (C2 only) */ \
+ /*********************/ \
+ \
+ unchecked_c2_static_field(Matcher, _regEncode, sizeof(Matcher::_regEncode)) /* NOTE: no type */ \
+ \
+ /*********************/ \
+ /* -XX flags */ \
+ /*********************/ \
+ \
+ nonstatic_field(Flag, type, const char*) \
+ nonstatic_field(Flag, name, const char*) \
+ unchecked_nonstatic_field(Flag, addr, sizeof(void*)) /* NOTE: no type */ \
+ nonstatic_field(Flag, kind, const char*) \
+ static_field(Flag, flags, Flag*) \
+ static_field(Flag, numFlags, size_t) \
+ \
+ /*************************/ \
+ /* JDK / VM version info */ \
+ /*************************/ \
+ \
+ static_field(Abstract_VM_Version, _s_vm_release, const char*) \
+ static_field(Abstract_VM_Version, _s_internal_vm_info_string, const char*) \
+ static_field(Abstract_VM_Version, _vm_major_version, int) \
+ static_field(Abstract_VM_Version, _vm_minor_version, int) \
+ static_field(Abstract_VM_Version, _vm_build_number, int) \
+ \
+ static_field(JDK_Version, _pre_jdk16_version, bool) \
+ static_field(JDK_Version, _jdk_version, int) \
+ \
+ \
+ \
+ /*************/ \
+ /* Arguments */ \
+ /*************/ \
+ \
+ static_field(Arguments, _jvm_flags_array, char**) \
+ static_field(Arguments, _num_jvm_flags, int) \
+ static_field(Arguments, _jvm_args_array, char**) \
+ static_field(Arguments, _num_jvm_args, int) \
+ static_field(Arguments, _java_command, char*) \
+ \
+ \
+ /************************/ \
+ /* Miscellaneous fields */ \
+ /************************/ \
+ \
+ nonstatic_field(AccessFlags, _flags, jint) \
+ nonstatic_field(elapsedTimer, _counter, jlong) \
+ nonstatic_field(elapsedTimer, _active, bool) \
+ nonstatic_field(InvocationCounter, _counter, unsigned int)
+
+ /* NOTE that we do not use the last_entry() macro here; it is used */
+ /* in vmStructs_<os>_<cpu>.hpp's VM_STRUCTS_OS_CPU macro (and must */
+ /* be present there) */
+
+//--------------------------------------------------------------------------------
+// VM_TYPES
+//
+// This list must enumerate at least all of the types in the above
+// list. For the types in the above list, the entry below must have
+// exactly the same spacing since string comparisons are done in the
+// code which verifies the consistency of these tables (in the debug
+// build).
+//
+// In addition to the above types, this list is required to enumerate
+// the JNI's java types, which are used to indicate the size of Java
+// fields in this VM to the SA. Further, oop types are currently
+// distinguished by name (i.e., ends with "oop") over in the SA.
+//
+// The declare_toplevel_type macro should be used to declare types
+// which do not have a superclass.
+//
+// The declare_integer_type and declare_unsigned_integer_type macros
+// are required in order to properly identify C integer types over in
+// the SA. They should be used for any type which is otherwise opaque
+// and which it is necessary to coerce into an integer value. This
+// includes, for example, the type uintptr_t. Note that while they
+// will properly identify the type's size regardless of the platform,
+// since it is does not seem possible to deduce or check signedness at
+// compile time using the pointer comparison tricks, it is currently
+// required that the given types have the same signedness across all
+// platforms.
+//
+// NOTE that there are platform-specific additions to this table in
+// vmStructs_<os>_<cpu>.hpp.
+
+#define VM_TYPES(declare_type, \
+ declare_toplevel_type, \
+ declare_oop_type, \
+ declare_integer_type, \
+ declare_unsigned_integer_type, \
+ declare_c1_toplevel_type, \
+ declare_c2_type, \
+ declare_c2_toplevel_type, \
+ last_entry) \
+ \
+ /*************************************************************/ \
+ /* Java primitive types -- required by the SA implementation */ \
+ /* in order to determine the size of Java fields in this VM */ \
+ /* (the implementation looks up these names specifically) */ \
+ /* NOTE: since we fetch these sizes from the remote VM, we */ \
+ /* have a bootstrapping sequence during which it is not */ \
+ /* valid to fetch Java values from the remote process, only */ \
+ /* C integer values (of known size). NOTE also that we do */ \
+ /* NOT include "Java unsigned" types like juint here; since */ \
+ /* Java does not have unsigned primitive types, those can */ \
+ /* not be mapped directly and are considered to be C integer */ \
+ /* types in this system (see the "other types" section, */ \
+ /* below.) */ \
+ /*************************************************************/ \
+ \
+ declare_toplevel_type(jboolean) \
+ declare_toplevel_type(jbyte) \
+ declare_toplevel_type(jchar) \
+ declare_toplevel_type(jdouble) \
+ declare_toplevel_type(jfloat) \
+ declare_toplevel_type(jint) \
+ declare_toplevel_type(jlong) \
+ declare_toplevel_type(jshort) \
+ \
+ /*********************************************************************/ \
+ /* C integer types. User-defined typedefs (like "size_t" or */ \
+ /* "intptr_t") are guaranteed to be present with the same names over */ \
+ /* in the SA's type database. Names like "unsigned short" are not */ \
+ /* guaranteed to be visible through the SA's type database lookup */ \
+ /* mechanism, though they will have a Type object created for them */ \
+ /* and are valid types for Fields. */ \
+ /*********************************************************************/ \
+ declare_integer_type(bool) \
+ declare_integer_type(int) \
+ declare_integer_type(long) \
+ declare_integer_type(char) \
+ declare_unsigned_integer_type(unsigned char) \
+ declare_unsigned_integer_type(unsigned int) \
+ declare_unsigned_integer_type(unsigned short) \
+ declare_unsigned_integer_type(unsigned long) \
+ /* The compiler thinks this is a different type than */ \
+ /* unsigned short on Win32 */ \
+ declare_unsigned_integer_type(u2) \
+ declare_unsigned_integer_type(unsigned) \
+ \
+ /*****************************/ \
+ /* C primitive pointer types */ \
+ /*****************************/ \
+ \
+ declare_toplevel_type(int*) \
+ declare_toplevel_type(char*) \
+ declare_toplevel_type(char**) \
+ declare_toplevel_type(const char*) \
+ declare_toplevel_type(u_char*) \
+ declare_toplevel_type(unsigned char*) \
+ \
+ /*******************************************************************/ \
+ /* Types which it will be handy to have available over in the SA */ \
+ /* in order to do platform-independent address -> integer coercion */ \
+ /* (note: these will be looked up by name) */ \
+ /*******************************************************************/ \
+ \
+ declare_unsigned_integer_type(size_t) \
+ declare_unsigned_integer_type(const size_t) \
+ declare_integer_type(intx) \
+ declare_integer_type(intptr_t) \
+ declare_unsigned_integer_type(uintx) \
+ declare_unsigned_integer_type(uintptr_t) \
+ declare_unsigned_integer_type(uint32_t) \
+ declare_unsigned_integer_type(uint64_t) \
+ declare_integer_type(const int) \
+ \
+ /*******************************************************************************/ \
+ /* OopDesc and Klass hierarchies (NOTE: missing methodDataOop-related classes) */ \
+ /*******************************************************************************/ \
+ \
+ declare_toplevel_type(oopDesc) \
+ declare_toplevel_type(Klass_vtbl) \
+ declare_type(Klass, Klass_vtbl) \
+ declare_type(arrayKlass, Klass) \
+ declare_type(arrayKlassKlass, klassKlass) \
+ declare_type(arrayOopDesc, oopDesc) \
+ declare_type(compiledICHolderKlass, Klass) \
+ declare_type(compiledICHolderOopDesc, oopDesc) \
+ declare_type(constantPoolKlass, arrayKlass) \
+ declare_type(constantPoolOopDesc, arrayOopDesc) \
+ declare_type(constantPoolCacheKlass, arrayKlass) \
+ declare_type(constantPoolCacheOopDesc, arrayOopDesc) \
+ declare_type(instanceKlass, Klass) \
+ declare_type(instanceKlassKlass, klassKlass) \
+ declare_type(instanceOopDesc, oopDesc) \
+ declare_type(instanceRefKlass, instanceKlass) \
+ declare_type(klassKlass, Klass) \
+ declare_type(klassOopDesc, oopDesc) \
+ declare_type(markOopDesc, oopDesc) \
+ declare_type(methodDataKlass, Klass) \
+ declare_type(methodDataOopDesc, oopDesc) \
+ declare_type(methodKlass, Klass) \
+ declare_type(constMethodKlass, Klass) \
+ declare_type(methodOopDesc, oopDesc) \
+ declare_type(objArrayKlass, arrayKlass) \
+ declare_type(objArrayKlassKlass, arrayKlassKlass) \
+ declare_type(objArrayOopDesc, arrayOopDesc) \
+ declare_type(constMethodOopDesc, oopDesc) \
+ declare_type(symbolKlass, Klass) \
+ declare_type(symbolOopDesc, oopDesc) \
+ declare_type(typeArrayKlass, arrayKlass) \
+ declare_type(typeArrayKlassKlass, arrayKlassKlass) \
+ declare_type(typeArrayOopDesc, arrayOopDesc) \
+ \
+ /********/ \
+ /* Oops */ \
+ /********/ \
+ \
+ declare_oop_type(constantPoolOop) \
+ declare_oop_type(constantPoolCacheOop) \
+ declare_oop_type(klassOop) \
+ declare_oop_type(markOop) \
+ declare_oop_type(methodOop) \
+ declare_oop_type(methodDataOop) \
+ declare_oop_type(objArrayOop) \
+ declare_oop_type(oop) \
+ declare_oop_type(constMethodOop) \
+ declare_oop_type(symbolOop) \
+ declare_oop_type(typeArrayOop) \
+ \
+ /*************************************/ \
+ /* MethodOop-related data structures */ \
+ /*************************************/ \
+ \
+ declare_toplevel_type(CheckedExceptionElement) \
+ declare_toplevel_type(LocalVariableTableElement) \
+ \
+ /******************************************/ \
+ /* Generation and space hierarchies */ \
+ /* (needed for run-time type information) */ \
+ /******************************************/ \
+ \
+ declare_toplevel_type(CollectedHeap) \
+ declare_type(SharedHeap, CollectedHeap) \
+ declare_type(GenCollectedHeap, SharedHeap) \
+ declare_toplevel_type(Generation) \
+ declare_type(DefNewGeneration, Generation) \
+ declare_type(CardGeneration, Generation) \
+ declare_type(OneContigSpaceCardGeneration, CardGeneration) \
+ declare_type(TenuredGeneration, OneContigSpaceCardGeneration) \
+ declare_type(CompactingPermGenGen, OneContigSpaceCardGeneration) \
+ declare_toplevel_type(Space) \
+ declare_toplevel_type(BitMap) \
+ declare_type(CompactibleSpace, Space) \
+ declare_type(ContiguousSpace, CompactibleSpace) \
+ declare_type(EdenSpace, ContiguousSpace) \
+ declare_type(OffsetTableContigSpace, ContiguousSpace) \
+ declare_type(TenuredSpace, OffsetTableContigSpace) \
+ declare_type(ContigPermSpace, OffsetTableContigSpace) \
+ declare_toplevel_type(PermGen) \
+ declare_type(CompactingPermGen, PermGen) \
+ declare_toplevel_type(BarrierSet) \
+ declare_type(ModRefBarrierSet, BarrierSet) \
+ declare_type(CardTableModRefBS, ModRefBarrierSet) \
+ declare_toplevel_type(GenRemSet) \
+ declare_type(CardTableRS, GenRemSet) \
+ declare_toplevel_type(BlockOffsetSharedArray) \
+ declare_toplevel_type(BlockOffsetTable) \
+ declare_type(BlockOffsetArray, BlockOffsetTable) \
+ declare_type(BlockOffsetArrayContigSpace, BlockOffsetArray) \
+ declare_type(BlockOffsetArrayNonContigSpace, BlockOffsetArray) \
+ \
+ /* Miscellaneous other GC types */ \
+ \
+ declare_toplevel_type(ageTable) \
+ declare_toplevel_type(Generation::StatRecord) \
+ declare_toplevel_type(GenerationSpec) \
+ declare_toplevel_type(HeapWord) \
+ declare_toplevel_type(MemRegion) \
+ declare_toplevel_type(const MemRegion) \
+ declare_toplevel_type(PermanentGenerationSpec) \
+ declare_toplevel_type(ThreadLocalAllocBuffer) \
+ declare_toplevel_type(VirtualSpace) \
+ declare_toplevel_type(WaterMark) \
+ \
+ /* Pointers to Garbage Collection types */ \
+ \
+ declare_toplevel_type(BarrierSet*) \
+ declare_toplevel_type(BlockOffsetSharedArray*) \
+ declare_toplevel_type(GenRemSet*) \
+ declare_toplevel_type(CardTableRS*) \
+ declare_toplevel_type(CollectedHeap*) \
+ declare_toplevel_type(ContiguousSpace*) \
+ declare_toplevel_type(DefNewGeneration*) \
+ declare_toplevel_type(EdenSpace*) \
+ declare_toplevel_type(GenCollectedHeap*) \
+ declare_toplevel_type(Generation*) \
+ declare_toplevel_type(GenerationSpec**) \
+ declare_toplevel_type(HeapWord*) \
+ declare_toplevel_type(MemRegion*) \
+ declare_toplevel_type(OffsetTableContigSpace*) \
+ declare_toplevel_type(OneContigSpaceCardGeneration*) \
+ declare_toplevel_type(PermGen*) \
+ declare_toplevel_type(Space*) \
+ declare_toplevel_type(ThreadLocalAllocBuffer*) \
+ \
+ /************************/ \
+ /* PerfMemory - jvmstat */ \
+ /************************/ \
+ \
+ declare_toplevel_type(PerfDataPrologue) \
+ declare_toplevel_type(PerfDataPrologue*) \
+ declare_toplevel_type(PerfDataEntry) \
+ declare_toplevel_type(PerfMemory) \
+ \
+ /*********************************/ \
+ /* SymbolTable, SystemDictionary */ \
+ /*********************************/ \
+ \
+ declare_toplevel_type(BasicHashtable) \
+ declare_type(Hashtable, BasicHashtable) \
+ declare_type(SymbolTable, Hashtable) \
+ declare_type(StringTable, Hashtable) \
+ declare_type(LoaderConstraintTable, Hashtable) \
+ declare_type(TwoOopHashtable, Hashtable) \
+ declare_type(Dictionary, TwoOopHashtable) \
+ declare_type(PlaceholderTable, TwoOopHashtable) \
+ declare_toplevel_type(Hashtable*) \
+ declare_toplevel_type(SymbolTable*) \
+ declare_toplevel_type(StringTable*) \
+ declare_toplevel_type(LoaderConstraintTable*) \
+ declare_toplevel_type(TwoOopHashtable*) \
+ declare_toplevel_type(Dictionary*) \
+ declare_toplevel_type(PlaceholderTable*) \
+ declare_toplevel_type(BasicHashtableEntry) \
+ declare_toplevel_type(BasicHashtableEntry*) \
+ declare_type(HashtableEntry, BasicHashtableEntry) \
+ declare_type(DictionaryEntry, HashtableEntry) \
+ declare_type(PlaceholderEntry, HashtableEntry) \
+ declare_type(LoaderConstraintEntry, HashtableEntry) \
+ declare_toplevel_type(HashtableEntry*) \
+ declare_toplevel_type(DictionaryEntry*) \
+ declare_toplevel_type(HashtableBucket) \
+ declare_toplevel_type(HashtableBucket*) \
+ declare_toplevel_type(SystemDictionary) \
+ declare_toplevel_type(ProtectionDomainEntry) \
+ declare_toplevel_type(ProtectionDomainEntry*) \
+ \
+ /***********************************************************/ \
+ /* Thread hierarchy (needed for run-time type information) */ \
+ /***********************************************************/ \
+ \
+ declare_toplevel_type(Threads) \
+ declare_toplevel_type(ThreadShadow) \
+ declare_type(Thread, ThreadShadow) \
+ declare_type(NamedThread, Thread) \
+ declare_type(WatcherThread, Thread) \
+ declare_type(JavaThread, Thread) \
+ declare_type(JvmtiAgentThread, JavaThread) \
+ declare_type(LowMemoryDetectorThread, JavaThread) \
+ declare_type(CompilerThread, JavaThread) \
+ declare_toplevel_type(OSThread) \
+ declare_toplevel_type(JavaFrameAnchor) \
+ \
+ /***************/ \
+ /* Interpreter */ \
+ /***************/ \
+ \
+ declare_toplevel_type(AbstractInterpreter) \
+ \
+ /*********/ \
+ /* Stubs */ \
+ /*********/ \
+ \
+ declare_toplevel_type(StubQueue) \
+ declare_toplevel_type(StubRoutines) \
+ IA32_ONLY(declare_toplevel_type(StubRoutines::i486)) \
+ declare_toplevel_type(Stub) \
+ declare_type(InterpreterCodelet, Stub) \
+ \
+ /*************/ \
+ /* JavaCalls */ \
+ /*************/ \
+ \
+ declare_toplevel_type(JavaCallWrapper) \
+ \
+ /*************/ \
+ /* CodeCache */ \
+ /*************/ \
+ \
+ declare_toplevel_type(CodeCache) \
+ \
+ /************/ \
+ /* CodeHeap */ \
+ /************/ \
+ \
+ declare_toplevel_type(CodeHeap) \
+ declare_toplevel_type(CodeHeap*) \
+ declare_toplevel_type(HeapBlock) \
+ declare_toplevel_type(HeapBlock::Header) \
+ declare_type(FreeBlock, HeapBlock) \
+ \
+ /*************************************************************/ \
+ /* CodeBlob hierarchy (needed for run-time type information) */ \
+ /*************************************************************/ \
+ \
+ declare_toplevel_type(CodeBlob) \
+ declare_type(BufferBlob, CodeBlob) \
+ declare_type(nmethod, CodeBlob) \
+ declare_type(RuntimeStub, CodeBlob) \
+ declare_type(SingletonBlob, CodeBlob) \
+ declare_type(SafepointBlob, SingletonBlob) \
+ declare_type(DeoptimizationBlob, SingletonBlob) \
+ declare_c2_type(ExceptionBlob, SingletonBlob) \
+ declare_c2_type(UncommonTrapBlob, CodeBlob) \
+ \
+ /***************************************/ \
+ /* PcDesc and other compiled code info */ \
+ /***************************************/ \
+ \
+ declare_toplevel_type(PcDesc) \
+ \
+ /************************/ \
+ /* OopMap and OopMapSet */ \
+ /************************/ \
+ \
+ declare_toplevel_type(OopMap) \
+ declare_toplevel_type(OopMapSet) \
+ \
+ /********************/ \
+ /* CompressedStream */ \
+ /********************/ \
+ \
+ declare_toplevel_type(CompressedStream) \
+ \
+ /**************/ \
+ /* VMRegImpl */ \
+ /**************/ \
+ \
+ declare_toplevel_type(VMRegImpl) \
+ \
+ /*********************************/ \
+ /* JNIHandles and JNIHandleBlock */ \
+ /*********************************/ \
+ \
+ declare_toplevel_type(JNIHandles) \
+ declare_toplevel_type(JNIHandleBlock) \
+ \
+ /**********************/ \
+ /* Runtime1 (C1 only) */ \
+ /**********************/ \
+ \
+ declare_c1_toplevel_type(Runtime1) \
+ \
+ /************/ \
+ /* Monitors */ \
+ /************/ \
+ \
+ declare_toplevel_type(ObjectMonitor) \
+ declare_toplevel_type(ObjectSynchronizer) \
+ declare_toplevel_type(BasicLock) \
+ declare_toplevel_type(BasicObjectLock) \
+ \
+ /*********************/ \
+ /* Matcher (C2 only) */ \
+ /*********************/ \
+ \
+ /* NOTE: this is not really a toplevel type, but we only need */ \
+ /* this one -- FIXME later if necessary */ \
+ declare_c2_toplevel_type(Matcher) \
+ \
+ /*********************/ \
+ /* Adapter Blob Entries */ \
+ /*********************/ \
+ declare_toplevel_type(AdapterHandlerEntry) \
+ declare_toplevel_type(AdapterHandlerEntry*) \
+ \
+ /********************/ \
+ /* -XX flags */ \
+ /********************/ \
+ \
+ declare_toplevel_type(Flag) \
+ declare_toplevel_type(Flag*) \
+ \
+ /********************/ \
+ /* JDK/VM version */ \
+ /********************/ \
+ \
+ declare_toplevel_type(Abstract_VM_Version) \
+ declare_toplevel_type(JDK_Version) \
+ \
+ /*************/ \
+ /* Arguments */ \
+ /*************/ \
+ \
+ declare_toplevel_type(Arguments) \
+ \
+ /***************/ \
+ /* Other types */ \
+ /***************/ \
+ \
+ /* all enum types */ \
+ \
+ declare_integer_type(Bytecodes::Code) \
+ declare_integer_type(Generation::Name) \
+ declare_integer_type(instanceKlass::ClassState) \
+ declare_integer_type(JavaThreadState) \
+ declare_integer_type(Location::Type) \
+ declare_integer_type(Location::Where) \
+ declare_integer_type(PermGen::Name) \
+ \
+ declare_integer_type(AccessFlags) /* FIXME: wrong type (not integer) */\
+ declare_toplevel_type(address) /* FIXME: should this be an integer type? */\
+ declare_toplevel_type(BreakpointInfo) \
+ declare_toplevel_type(BreakpointInfo*) \
+ declare_toplevel_type(CodeBlob*) \
+ declare_toplevel_type(CompressedWriteStream*) \
+ declare_toplevel_type(ConstantPoolCacheEntry) \
+ declare_toplevel_type(elapsedTimer) \
+ declare_toplevel_type(intptr_t*) \
+ declare_unsigned_integer_type(InvocationCounter) /* FIXME: wrong type (not integer) */ \
+ declare_toplevel_type(JavaThread*) \
+ declare_toplevel_type(jbyte*) \
+ declare_toplevel_type(jbyte**) \
+ declare_toplevel_type(jint*) \
+ declare_toplevel_type(jniIdMapBase*) \
+ declare_unsigned_integer_type(juint) \
+ declare_unsigned_integer_type(julong) \
+ declare_toplevel_type(JNIHandleBlock*) \
+ declare_toplevel_type(JNIid) \
+ declare_toplevel_type(JNIid*) \
+ declare_toplevel_type(jmethodID*) \
+ declare_toplevel_type(Mutex*) \
+ declare_toplevel_type(nmethod*) \
+ declare_toplevel_type(ObjectMonitor*) \
+ declare_toplevel_type(oop*) \
+ declare_toplevel_type(OopMap**) \
+ declare_toplevel_type(OopMapCache*) \
+ declare_toplevel_type(OopMapSet*) \
+ declare_toplevel_type(VMReg) \
+ declare_toplevel_type(OSThread*) \
+ declare_integer_type(ReferenceType) \
+ declare_toplevel_type(StubQueue*) \
+ declare_toplevel_type(Thread*) \
+ declare_toplevel_type(Universe)
+
+ /* NOTE that we do not use the last_entry() macro here; it is used */
+ /* in vmStructs_<os>_<cpu>.hpp's VM_TYPES_OS_CPU macro (and must be */
+ /* present there) */
+
+//--------------------------------------------------------------------------------
+// VM_INT_CONSTANTS
+//
+// This table contains integer constants required over in the
+// serviceability agent. The "declare_constant" macro is used for all
+// enums, etc., while "declare_preprocessor_constant" must be used for
+// all #defined constants.
+
+#define VM_INT_CONSTANTS(declare_constant, \
+ declare_preprocessor_constant, \
+ declare_c1_constant, \
+ declare_c2_constant, \
+ declare_c2_preprocessor_constant, \
+ last_entry) \
+ \
+ /******************/ \
+ /* Useful globals */ \
+ /******************/ \
+ \
+ declare_constant(UseTLAB) \
+ \
+ /**************/ \
+ /* Stack bias */ \
+ /**************/ \
+ \
+ declare_preprocessor_constant("STACK_BIAS", STACK_BIAS) \
+ \
+ /****************/ \
+ /* Object sizes */ \
+ /****************/ \
+ \
+ declare_constant(LogBytesPerWord) \
+ declare_constant(BytesPerLong) \
+ \
+ /********************/ \
+ /* Object alignment */ \
+ /********************/ \
+ \
+ declare_constant(MinObjAlignmentInBytes) \
+ \
+ /********************************************/ \
+ /* Generation and Space Hierarchy Constants */ \
+ /********************************************/ \
+ \
+ declare_constant(ageTable::table_size) \
+ \
+ declare_constant(BarrierSet::ModRef) \
+ declare_constant(BarrierSet::CardTableModRef) \
+ declare_constant(BarrierSet::Other) \
+ \
+ declare_constant(BlockOffsetSharedArray::LogN) \
+ declare_constant(BlockOffsetSharedArray::LogN_words) \
+ declare_constant(BlockOffsetSharedArray::N_bytes) \
+ declare_constant(BlockOffsetSharedArray::N_words) \
+ \
+ declare_constant(BlockOffsetArray::N_words) \
+ \
+ declare_constant(CardTableModRefBS::clean_card) \
+ declare_constant(CardTableModRefBS::last_card) \
+ declare_constant(CardTableModRefBS::dirty_card) \
+ declare_constant(CardTableModRefBS::Precise) \
+ declare_constant(CardTableModRefBS::ObjHeadPreciseArray) \
+ declare_constant(CardTableModRefBS::card_shift) \
+ declare_constant(CardTableModRefBS::card_size) \
+ declare_constant(CardTableModRefBS::card_size_in_words) \
+ \
+ declare_constant(CardTableRS::youngergen_card) \
+ \
+ declare_constant(CollectedHeap::Abstract) \
+ declare_constant(CollectedHeap::SharedHeap) \
+ declare_constant(CollectedHeap::GenCollectedHeap) \
+ \
+ declare_constant(GenCollectedHeap::max_gens) \
+ \
+ /* constants from Generation::Name enum */ \
+ \
+ declare_constant(Generation::DefNew) \
+ declare_constant(Generation::MarkSweepCompact) \
+ declare_constant(Generation::Other) \
+ \
+ declare_constant(Generation::LogOfGenGrain) \
+ declare_constant(Generation::GenGrain) \
+ \
+ declare_constant(HeapWordSize) \
+ declare_constant(LogHeapWordSize) \
+ declare_constant(HeapWordsPerOop) \
+ \
+ /* constants from PermGen::Name enum */ \
+ \
+ declare_constant(PermGen::MarkSweepCompact) \
+ declare_constant(PermGen::MarkSweep) \
+ \
+ /************************/ \
+ /* PerfMemory - jvmstat */ \
+ /************************/ \
+ \
+ declare_preprocessor_constant("PERFDATA_MAJOR_VERSION", PERFDATA_MAJOR_VERSION) \
+ declare_preprocessor_constant("PERFDATA_MINOR_VERSION", PERFDATA_MINOR_VERSION) \
+ declare_preprocessor_constant("PERFDATA_BIG_ENDIAN", PERFDATA_BIG_ENDIAN) \
+ declare_preprocessor_constant("PERFDATA_LITTLE_ENDIAN", PERFDATA_LITTLE_ENDIAN) \
+ \
+ /***************/ \
+ /* SymbolTable */ \
+ /***************/ \
+ \
+ declare_constant(SymbolTable::symbol_table_size) \
+ \
+ /***************/ \
+ /* StringTable */ \
+ /***************/ \
+ \
+ declare_constant(StringTable::string_table_size) \
+ \
+ /********************/ \
+ /* SystemDictionary */ \
+ /********************/ \
+ \
+ declare_constant(SystemDictionary::_loader_constraint_size) \
+ declare_constant(SystemDictionary::_nof_buckets) \
+ \
+ /***********************************/ \
+ /* LoaderConstraintTable constants */ \
+ /***********************************/ \
+ \
+ declare_constant(LoaderConstraintTable::_loader_constraint_size) \
+ declare_constant(LoaderConstraintTable::_nof_buckets) \
+ \
+ /************************************************************/ \
+ /* HotSpot specific JVM_ACC constants from global anon enum */ \
+ /************************************************************/ \
+ \
+ declare_constant(JVM_ACC_WRITTEN_FLAGS) \
+ declare_constant(JVM_ACC_MONITOR_MATCH) \
+ declare_constant(JVM_ACC_HAS_MONITOR_BYTECODES) \
+ declare_constant(JVM_ACC_HAS_LOOPS) \
+ declare_constant(JVM_ACC_LOOPS_FLAG_INIT) \
+ declare_constant(JVM_ACC_QUEUED) \
+ declare_constant(JVM_ACC_NOT_OSR_COMPILABLE) \
+ declare_constant(JVM_ACC_HAS_LINE_NUMBER_TABLE) \
+ declare_constant(JVM_ACC_HAS_CHECKED_EXCEPTIONS) \
+ declare_constant(JVM_ACC_HAS_JSRS) \
+ declare_constant(JVM_ACC_IS_OLD) \
+ declare_constant(JVM_ACC_IS_OBSOLETE) \
+ declare_constant(JVM_ACC_IS_PREFIXED_NATIVE) \
+ declare_constant(JVM_ACC_HAS_MIRANDA_METHODS) \
+ declare_constant(JVM_ACC_HAS_VANILLA_CONSTRUCTOR) \
+ declare_constant(JVM_ACC_HAS_FINALIZER) \
+ declare_constant(JVM_ACC_IS_CLONEABLE) \
+ declare_constant(JVM_ACC_HAS_LOCAL_VARIABLE_TABLE) \
+ declare_constant(JVM_ACC_PROMOTED_FLAGS) \
+ declare_constant(JVM_ACC_FIELD_ACCESS_WATCHED) \
+ declare_constant(JVM_ACC_FIELD_MODIFICATION_WATCHED) \
+ \
+ /*****************************/ \
+ /* Thread::SuspendFlags enum */ \
+ /*****************************/ \
+ \
+ declare_constant(Thread::_external_suspend) \
+ declare_constant(Thread::_ext_suspended) \
+ declare_constant(Thread::_has_async_exception) \
+ \
+ /*******************/ \
+ /* JavaThreadState */ \
+ /*******************/ \
+ \
+ declare_constant(_thread_uninitialized) \
+ declare_constant(_thread_new) \
+ declare_constant(_thread_new_trans) \
+ declare_constant(_thread_in_native) \
+ declare_constant(_thread_in_native_trans) \
+ declare_constant(_thread_in_vm) \
+ declare_constant(_thread_in_vm_trans) \
+ declare_constant(_thread_in_Java) \
+ declare_constant(_thread_in_Java_trans) \
+ declare_constant(_thread_blocked) \
+ declare_constant(_thread_blocked_trans) \
+ \
+ /******************************/ \
+ /* Klass misc. enum constants */ \
+ /******************************/ \
+ \
+ declare_constant(Klass::_primary_super_limit) \
+ declare_constant(Klass::_lh_instance_slow_path_bit) \
+ declare_constant(Klass::_lh_log2_element_size_shift) \
+ declare_constant(Klass::_lh_element_type_shift) \
+ declare_constant(Klass::_lh_header_size_shift) \
+ declare_constant(Klass::_lh_array_tag_shift) \
+ declare_constant(Klass::_lh_array_tag_type_value) \
+ declare_constant(Klass::_lh_array_tag_obj_value) \
+ \
+ /********************************/ \
+ /* constMethodOopDesc anon-enum */ \
+ /********************************/ \
+ \
+ declare_constant(constMethodOopDesc::_has_linenumber_table) \
+ declare_constant(constMethodOopDesc::_has_checked_exceptions) \
+ declare_constant(constMethodOopDesc::_has_localvariable_table) \
+ \
+ /*************************************/ \
+ /* instanceKlass FieldOffset enum */ \
+ /*************************************/ \
+ \
+ declare_constant(instanceKlass::access_flags_offset) \
+ declare_constant(instanceKlass::name_index_offset) \
+ declare_constant(instanceKlass::signature_index_offset) \
+ declare_constant(instanceKlass::initval_index_offset) \
+ declare_constant(instanceKlass::low_offset) \
+ declare_constant(instanceKlass::high_offset) \
+ declare_constant(instanceKlass::generic_signature_offset) \
+ declare_constant(instanceKlass::next_offset) \
+ declare_constant(instanceKlass::implementors_limit) \
+ \
+ /************************************************/ \
+ /* instanceKlass InnerClassAttributeOffset enum */ \
+ /************************************************/ \
+ \
+ declare_constant(instanceKlass::inner_class_inner_class_info_offset) \
+ declare_constant(instanceKlass::inner_class_outer_class_info_offset) \
+ declare_constant(instanceKlass::inner_class_inner_name_offset) \
+ declare_constant(instanceKlass::inner_class_access_flags_offset) \
+ declare_constant(instanceKlass::inner_class_next_offset) \
+ \
+ /*********************************/ \
+ /* instanceKlass ClassState enum */ \
+ /*********************************/ \
+ \
+ declare_constant(instanceKlass::unparsable_by_gc) \
+ declare_constant(instanceKlass::allocated) \
+ declare_constant(instanceKlass::loaded) \
+ declare_constant(instanceKlass::linked) \
+ declare_constant(instanceKlass::being_initialized) \
+ declare_constant(instanceKlass::fully_initialized) \
+ declare_constant(instanceKlass::initialization_error) \
+ \
+ /*********************************/ \
+ /* symbolOop - symbol max length */ \
+ /*********************************/ \
+ \
+ declare_constant(symbolOopDesc::max_symbol_length) \
+ \
+ /*********************************************/ \
+ /* ConstantPoolCacheEntry FlagBitValues enum */ \
+ /*********************************************/ \
+ \
+ declare_constant(ConstantPoolCacheEntry::hotSwapBit) \
+ declare_constant(ConstantPoolCacheEntry::methodInterface) \
+ declare_constant(ConstantPoolCacheEntry::volatileField) \
+ declare_constant(ConstantPoolCacheEntry::vfinalMethod) \
+ declare_constant(ConstantPoolCacheEntry::finalField) \
+ \
+ /******************************************/ \
+ /* ConstantPoolCacheEntry FlagValues enum */ \
+ /******************************************/ \
+ \
+ declare_constant(ConstantPoolCacheEntry::tosBits) \
+ \
+ /*********************************/ \
+ /* java_lang_Class field offsets */ \
+ /*********************************/ \
+ \
+ declare_constant(java_lang_Class::hc_klass_offset) \
+ declare_constant(java_lang_Class::hc_array_klass_offset) \
+ declare_constant(java_lang_Class::hc_resolved_constructor_offset) \
+ declare_constant(java_lang_Class::hc_number_of_fake_oop_fields) \
+ \
+ /***************************************/ \
+ /* java_lang_Thread::ThreadStatus enum */ \
+ /***************************************/ \
+ \
+ declare_constant(java_lang_Thread::NEW) \
+ declare_constant(java_lang_Thread::RUNNABLE) \
+ declare_constant(java_lang_Thread::SLEEPING) \
+ declare_constant(java_lang_Thread::IN_OBJECT_WAIT) \
+ declare_constant(java_lang_Thread::IN_OBJECT_WAIT_TIMED) \
+ declare_constant(java_lang_Thread::PARKED) \
+ declare_constant(java_lang_Thread::PARKED_TIMED) \
+ declare_constant(java_lang_Thread::BLOCKED_ON_MONITOR_ENTER) \
+ declare_constant(java_lang_Thread::TERMINATED) \
+ \
+ /******************************/ \
+ /* Debug info */ \
+ /******************************/ \
+ \
+ declare_constant(Location::OFFSET_MASK) \
+ declare_constant(Location::OFFSET_SHIFT) \
+ declare_constant(Location::TYPE_MASK) \
+ declare_constant(Location::TYPE_SHIFT) \
+ declare_constant(Location::WHERE_MASK) \
+ declare_constant(Location::WHERE_SHIFT) \
+ \
+ /* constants from Location::Type enum */ \
+ \
+ declare_constant(Location::normal) \
+ declare_constant(Location::oop) \
+ declare_constant(Location::int_in_long) \
+ declare_constant(Location::lng) \
+ declare_constant(Location::float_in_dbl) \
+ declare_constant(Location::dbl) \
+ declare_constant(Location::addr) \
+ declare_constant(Location::invalid) \
+ \
+ /* constants from Location::Where enum */ \
+ \
+ declare_constant(Location::on_stack) \
+ declare_constant(Location::in_register) \
+ \
+ /*********************/ \
+ /* Matcher (C2 only) */ \
+ /*********************/ \
+ \
+ declare_c2_preprocessor_constant("Matcher::interpreter_frame_pointer_reg", Matcher::interpreter_frame_pointer_reg()) \
+ \
+ /*********************************************/ \
+ /* MethodCompilation (globalDefinitions.hpp) */ \
+ /*********************************************/ \
+ \
+ declare_constant(InvocationEntryBci) \
+ declare_constant(InvalidOSREntryBci) \
+ \
+ /***************/ \
+ /* OopMapValue */ \
+ /***************/ \
+ \
+ declare_constant(OopMapValue::type_bits) \
+ declare_constant(OopMapValue::register_bits) \
+ declare_constant(OopMapValue::type_shift) \
+ declare_constant(OopMapValue::register_shift) \
+ declare_constant(OopMapValue::type_mask) \
+ declare_constant(OopMapValue::type_mask_in_place) \
+ declare_constant(OopMapValue::register_mask) \
+ declare_constant(OopMapValue::register_mask_in_place) \
+ declare_constant(OopMapValue::unused_value) \
+ declare_constant(OopMapValue::oop_value) \
+ declare_constant(OopMapValue::value_value) \
+ declare_constant(OopMapValue::dead_value) \
+ declare_constant(OopMapValue::callee_saved_value) \
+ declare_constant(OopMapValue::derived_oop_value) \
+ \
+ /******************/ \
+ /* JNIHandleBlock */ \
+ /******************/ \
+ \
+ declare_constant(JNIHandleBlock::block_size_in_oops) \
+ \
+ /**********************/ \
+ /* ObjectSynchronizer */ \
+ /**********************/ \
+ \
+ declare_constant(ObjectSynchronizer::_BLOCKSIZE) \
+ \
+ /********************************/ \
+ /* Calling convention constants */ \
+ /********************************/ \
+ \
+ declare_constant(RegisterImpl::number_of_registers) \
+ declare_constant(ConcreteRegisterImpl::number_of_registers) \
+ declare_preprocessor_constant("REG_COUNT", REG_COUNT) \
+ declare_c2_preprocessor_constant("SAVED_ON_ENTRY_REG_COUNT", SAVED_ON_ENTRY_REG_COUNT) \
+ declare_c2_preprocessor_constant("C_SAVED_ON_ENTRY_REG_COUNT", C_SAVED_ON_ENTRY_REG_COUNT)
+
+ /* NOTE that we do not use the last_entry() macro here; it is used */
+ /* in vmStructs_<os>_<cpu>.hpp's VM_INT_CONSTANTS_OS_CPU macro (and */
+ /* must be present there) */
+
+//--------------------------------------------------------------------------------
+// VM_LONG_CONSTANTS
+//
+// This table contains long constants required over in the
+// serviceability agent. The "declare_constant" macro is used for all
+// enums, etc., while "declare_preprocessor_constant" must be used for
+// all #defined constants.
+
+#define VM_LONG_CONSTANTS(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
+ \
+ /*********************/ \
+ /* MarkOop constants */ \
+ /*********************/ \
+ \
+ /* Note: some of these are declared as long constants just for */ \
+ /* consistency. The mask constants are the only ones requiring */ \
+ /* 64 bits (on 64-bit platforms). */ \
+ \
+ declare_constant(markOopDesc::age_bits) \
+ declare_constant(markOopDesc::lock_bits) \
+ declare_constant(markOopDesc::biased_lock_bits) \
+ declare_constant(markOopDesc::max_hash_bits) \
+ declare_constant(markOopDesc::hash_bits) \
+ \
+ declare_constant(markOopDesc::lock_shift) \
+ declare_constant(markOopDesc::biased_lock_shift) \
+ declare_constant(markOopDesc::age_shift) \
+ declare_constant(markOopDesc::hash_shift) \
+ \
+ declare_constant(markOopDesc::lock_mask) \
+ declare_constant(markOopDesc::lock_mask_in_place) \
+ declare_constant(markOopDesc::biased_lock_mask) \
+ declare_constant(markOopDesc::biased_lock_mask_in_place) \
+ declare_constant(markOopDesc::biased_lock_bit_in_place) \
+ declare_constant(markOopDesc::age_mask) \
+ declare_constant(markOopDesc::age_mask_in_place) \
+ declare_constant(markOopDesc::hash_mask) \
+ declare_constant(markOopDesc::hash_mask_in_place) \
+ declare_constant(markOopDesc::biased_lock_alignment) \
+ \
+ declare_constant(markOopDesc::locked_value) \
+ declare_constant(markOopDesc::unlocked_value) \
+ declare_constant(markOopDesc::monitor_value) \
+ declare_constant(markOopDesc::marked_value) \
+ declare_constant(markOopDesc::biased_lock_pattern) \
+ \
+ declare_constant(markOopDesc::no_hash) \
+ declare_constant(markOopDesc::no_hash_in_place) \
+ declare_constant(markOopDesc::no_lock_in_place) \
+ declare_constant(markOopDesc::max_age)
+
+ /* NOTE that we do not use the last_entry() macro here; it is used */
+ /* in vmStructs_<os>_<cpu>.hpp's VM_LONG_CONSTANTS_OS_CPU macro (and */
+ /* must be present there) */
+
+
+//--------------------------------------------------------------------------------
+// Macros operating on the above lists
+//--------------------------------------------------------------------------------
+
+// This utility macro quotes the passed string
+#define QUOTE(x) #x
+
+//--------------------------------------------------------------------------------
+// VMStructEntry macros
+//
+
+// This macro generates a VMStructEntry line for a nonstatic field
+#define GENERATE_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \
+ { QUOTE(typeName), QUOTE(fieldName), QUOTE(type), 0, cast_uint64_t(offset_of(typeName, fieldName)), NULL },
+
+// This macro generates a VMStructEntry line for a static field
+#define GENERATE_STATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \
+ { QUOTE(typeName), QUOTE(fieldName), QUOTE(type), 1, 0, &typeName::fieldName },
+
+// This macro generates a VMStructEntry line for an unchecked
+// nonstatic field, in which the size of the type is also specified.
+// The type string is given as NULL, indicating an "opaque" type.
+#define GENERATE_UNCHECKED_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, size) \
+ { QUOTE(typeName), QUOTE(fieldName), NULL, 0, cast_uint64_t(offset_of(typeName, fieldName)), NULL },
+
+// This macro generates a VMStructEntry line for an unchecked
+// static field, in which the size of the type is also specified.
+// The type string is given as NULL, indicating an "opaque" type.
+#define GENERATE_UNCHECKED_STATIC_VM_STRUCT_ENTRY(typeName, fieldName, size) \
+ { QUOTE(typeName), QUOTE(fieldName), NULL, 1, 0, (void*) &typeName::fieldName },
+
+// This macro generates the sentinel value indicating the end of the list
+#define GENERATE_VM_STRUCT_LAST_ENTRY() \
+ { NULL, NULL, NULL, 0, 0, NULL }
+
+// This macro checks the type of a VMStructEntry by comparing pointer types
+#define CHECK_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \
+ {typeName *dummyObj = NULL; type* dummy = &dummyObj->fieldName; }
+
+// This macro checks the type of a volatile VMStructEntry by comparing pointer types
+#define CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \
+ {typedef type dummyvtype; typeName *dummyObj = NULL; volatile dummyvtype* dummy = &dummyObj->fieldName; }
+
+// This macro checks the type of a VMStructEntry by comparing pointer types
+#define CHECK_STATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \
+ {type* dummy = &typeName::fieldName; }
+
+// This macro ensures the type of a field and its containing type are
+// present in the type table. The assertion string is shorter than
+// preferable because (incredibly) of a bug in Solstice NFS client
+// which seems to prevent very long lines from compiling. This assertion
+// means that an entry in VMStructs::localHotSpotVMStructs[] was not
+// found in VMStructs::localHotSpotVMTypes[].
+#define ENSURE_FIELD_TYPE_PRESENT(typeName, fieldName, type) \
+ { assert(findType(QUOTE(typeName)) != 0, "type \"" QUOTE(typeName) "\" not found in type table"); \
+ assert(findType(QUOTE(type)) != 0, "type \"" QUOTE(type) "\" not found in type table"); }
+
+// This is a no-op macro for unchecked fields
+#define CHECK_NO_OP(a, b, c)
+
+// This is a no-op macro for the sentinel value
+#define CHECK_SENTINEL()
+
+//
+// Build-specific macros:
+//
+
+// Generate and check a nonstatic field in non-product builds
+#ifndef PRODUCT
+# define GENERATE_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY(a, b, c) GENERATE_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define CHECK_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY(a, b, c) CHECK_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define ENSURE_NONPRODUCT_FIELD_TYPE_PRESENT(a, b, c) ENSURE_FIELD_TYPE_PRESENT(a, b, c)
+# define GENERATE_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY(a, b, c) GENERATE_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define CHECK_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY(a, b, c) CHECK_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define ENSURE_NONPRODUCT_FIELD_TYPE_PRESENT(a, b, c) ENSURE_FIELD_TYPE_PRESENT(a, b, c)
+#else
+# define GENERATE_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define CHECK_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define ENSURE_NONPRODUCT_FIELD_TYPE_PRESENT(a, b, c)
+# define GENERATE_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define CHECK_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define ENSURE_NONPRODUCT_FIELD_TYPE_PRESENT(a, b, c)
+#endif /* PRODUCT */
+
+// Generate and check a nonstatic field in C1 builds
+#ifdef COMPILER1
+# define GENERATE_C1_NONSTATIC_VM_STRUCT_ENTRY(a, b, c) GENERATE_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define CHECK_C1_NONSTATIC_VM_STRUCT_ENTRY(a, b, c) CHECK_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define ENSURE_C1_FIELD_TYPE_PRESENT(a, b, c) ENSURE_FIELD_TYPE_PRESENT(a, b, c)
+#else
+# define GENERATE_C1_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define CHECK_C1_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define ENSURE_C1_FIELD_TYPE_PRESENT(a, b, c)
+#endif /* COMPILER1 */
+// Generate and check a nonstatic field in C2 builds
+#ifdef COMPILER2
+# define GENERATE_C2_NONSTATIC_VM_STRUCT_ENTRY(a, b, c) GENERATE_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define CHECK_C2_NONSTATIC_VM_STRUCT_ENTRY(a, b, c) CHECK_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define ENSURE_C2_FIELD_TYPE_PRESENT(a, b, c) ENSURE_FIELD_TYPE_PRESENT(a, b, c)
+#else
+# define GENERATE_C2_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define CHECK_C2_NONSTATIC_VM_STRUCT_ENTRY(a, b, c)
+# define ENSURE_C2_FIELD_TYPE_PRESENT(a, b, c)
+#endif /* COMPILER2 */
+
+// Generate but do not check a static field in C1 builds
+#ifdef COMPILER1
+# define GENERATE_C1_UNCHECKED_STATIC_VM_STRUCT_ENTRY(a, b, c) GENERATE_UNCHECKED_STATIC_VM_STRUCT_ENTRY(a, b, c)
+#else
+# define GENERATE_C1_UNCHECKED_STATIC_VM_STRUCT_ENTRY(a, b, c)
+#endif /* COMPILER1 */
+
+// Generate but do not check a static field in C2 builds
+#ifdef COMPILER2
+# define GENERATE_C2_UNCHECKED_STATIC_VM_STRUCT_ENTRY(a, b, c) GENERATE_UNCHECKED_STATIC_VM_STRUCT_ENTRY(a, b, c)
+#else
+# define GENERATE_C2_UNCHECKED_STATIC_VM_STRUCT_ENTRY(a, b, c)
+#endif /* COMPILER2 */
+
+//--------------------------------------------------------------------------------
+// VMTypeEntry macros
+//
+
+#define GENERATE_VM_TYPE_ENTRY(type, superclass) \
+ { QUOTE(type), QUOTE(superclass), 0, 0, 0, sizeof(type) },
+
+#define GENERATE_TOPLEVEL_VM_TYPE_ENTRY(type) \
+ { QUOTE(type), NULL, 0, 0, 0, sizeof(type) },
+
+#define GENERATE_OOP_VM_TYPE_ENTRY(type) \
+ { QUOTE(type), NULL, 1, 0, 0, sizeof(type) },
+
+#define GENERATE_INTEGER_VM_TYPE_ENTRY(type) \
+ { QUOTE(type), NULL, 0, 1, 0, sizeof(type) },
+
+#define GENERATE_UNSIGNED_INTEGER_VM_TYPE_ENTRY(type) \
+ { QUOTE(type), NULL, 0, 1, 1, sizeof(type) },
+
+#define GENERATE_VM_TYPE_LAST_ENTRY() \
+ { NULL, NULL, 0, 0, 0, 0 }
+
+#define CHECK_VM_TYPE_ENTRY(type, superclass) \
+ { type* dummyObj = NULL; superclass* dummySuperObj = dummyObj; }
+
+#define CHECK_VM_TYPE_NO_OP(a)
+#define CHECK_SINGLE_ARG_VM_TYPE_NO_OP(a)
+
+//
+// Build-specific macros:
+//
+
+#ifdef COMPILER1
+# define GENERATE_C1_TOPLEVEL_VM_TYPE_ENTRY(a) GENERATE_TOPLEVEL_VM_TYPE_ENTRY(a)
+# define CHECK_C1_TOPLEVEL_VM_TYPE_ENTRY(a)
+#else
+# define GENERATE_C1_TOPLEVEL_VM_TYPE_ENTRY(a)
+# define CHECK_C1_TOPLEVEL_VM_TYPE_ENTRY(a)
+#endif /* COMPILER1 */
+
+#ifdef COMPILER2
+# define GENERATE_C2_VM_TYPE_ENTRY(a, b) GENERATE_VM_TYPE_ENTRY(a, b)
+# define CHECK_C2_VM_TYPE_ENTRY(a, b) CHECK_VM_TYPE_ENTRY(a, b)
+# define GENERATE_C2_TOPLEVEL_VM_TYPE_ENTRY(a) GENERATE_TOPLEVEL_VM_TYPE_ENTRY(a)
+# define CHECK_C2_TOPLEVEL_VM_TYPE_ENTRY(a)
+#else
+# define GENERATE_C2_VM_TYPE_ENTRY(a, b)
+# define CHECK_C2_VM_TYPE_ENTRY(a, b)
+# define GENERATE_C2_TOPLEVEL_VM_TYPE_ENTRY(a)
+# define CHECK_C2_TOPLEVEL_VM_TYPE_ENTRY(a)
+#endif /* COMPILER2 */
+
+
+//--------------------------------------------------------------------------------
+// VMIntConstantEntry macros
+//
+
+#define GENERATE_VM_INT_CONSTANT_ENTRY(name) \
+ { QUOTE(name), (int32_t) name },
+
+#define GENERATE_PREPROCESSOR_VM_INT_CONSTANT_ENTRY(name, value) \
+ { name, (int32_t) value },
+
+// This macro generates the sentinel value indicating the end of the list
+#define GENERATE_VM_INT_CONSTANT_LAST_ENTRY() \
+ { NULL, 0 }
+
+
+// Generate an int constant for a C1 build
+#ifdef COMPILER1
+# define GENERATE_C1_VM_INT_CONSTANT_ENTRY(name) GENERATE_VM_INT_CONSTANT_ENTRY(name)
+#else
+# define GENERATE_C1_VM_INT_CONSTANT_ENTRY(name)
+#endif /* COMPILER1 */
+
+// Generate an int constant for a C2 build
+#ifdef COMPILER2
+# define GENERATE_C2_VM_INT_CONSTANT_ENTRY(name) GENERATE_VM_INT_CONSTANT_ENTRY(name)
+# define GENERATE_C2_PREPROCESSOR_VM_INT_CONSTANT_ENTRY(name, value) GENERATE_PREPROCESSOR_VM_INT_CONSTANT_ENTRY(name, value)
+#else
+# define GENERATE_C2_VM_INT_CONSTANT_ENTRY(name)
+# define GENERATE_C2_PREPROCESSOR_VM_INT_CONSTANT_ENTRY(name, value)
+#endif /* COMPILER1 */
+
+//--------------------------------------------------------------------------------
+// VMLongConstantEntry macros
+//
+
+#define GENERATE_VM_LONG_CONSTANT_ENTRY(name) \
+ { QUOTE(name), cast_uint64_t(name) },
+
+#define GENERATE_PREPROCESSOR_VM_LONG_CONSTANT_ENTRY(name, value) \
+ { name, cast_uint64_t(value) },
+
+// This macro generates the sentinel value indicating the end of the list
+#define GENERATE_VM_LONG_CONSTANT_LAST_ENTRY() \
+ { NULL, 0 }
+
+// Generate a long constant for a C1 build
+#ifdef COMPILER1
+# define GENERATE_C1_VM_LONG_CONSTANT_ENTRY(name) GENERATE_VM_LONG_CONSTANT_ENTRY(name)
+#else
+# define GENERATE_C1_VM_LONG_CONSTANT_ENTRY(name)
+#endif /* COMPILER1 */
+
+// Generate a long constant for a C2 build
+#ifdef COMPILER2
+# define GENERATE_C2_VM_LONG_CONSTANT_ENTRY(name) GENERATE_VM_LONG_CONSTANT_ENTRY(name)
+# define GENERATE_C2_PREPROCESSOR_VM_LONG_CONSTANT_ENTRY(name, value) GENERATE_PREPROCESSOR_VM_LONG_CONSTANT_ENTRY(name, value)
+#else
+# define GENERATE_C2_VM_LONG_CONSTANT_ENTRY(name)
+# define GENERATE_C2_PREPROCESSOR_VM_LONG_CONSTANT_ENTRY(name, value)
+#endif /* COMPILER1 */
+
+//
+// Instantiation of VMStructEntries, VMTypeEntries and VMIntConstantEntries
+//
+
+// These initializers are allowed to access private fields in classes
+// as long as class VMStructs is a friend
+VMStructEntry VMStructs::localHotSpotVMStructs[] = {
+
+ VM_STRUCTS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_STATIC_VM_STRUCT_ENTRY, \
+ GENERATE_UNCHECKED_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_C1_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_C2_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_C1_UNCHECKED_STATIC_VM_STRUCT_ENTRY, \
+ GENERATE_C2_UNCHECKED_STATIC_VM_STRUCT_ENTRY, \
+ GENERATE_VM_STRUCT_LAST_ENTRY)
+
+#ifndef SERIALGC
+ VM_STRUCTS_PARALLELGC(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_STATIC_VM_STRUCT_ENTRY)
+
+ VM_STRUCTS_CMS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_STATIC_VM_STRUCT_ENTRY)
+#endif // SERIALGC
+
+ VM_STRUCTS_CPU(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_STATIC_VM_STRUCT_ENTRY, \
+ GENERATE_UNCHECKED_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_C2_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_C1_UNCHECKED_STATIC_VM_STRUCT_ENTRY, \
+ GENERATE_C2_UNCHECKED_STATIC_VM_STRUCT_ENTRY, \
+ GENERATE_VM_STRUCT_LAST_ENTRY)
+
+ VM_STRUCTS_OS_CPU(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_STATIC_VM_STRUCT_ENTRY, \
+ GENERATE_UNCHECKED_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_C2_NONSTATIC_VM_STRUCT_ENTRY, \
+ GENERATE_C1_UNCHECKED_STATIC_VM_STRUCT_ENTRY, \
+ GENERATE_C2_UNCHECKED_STATIC_VM_STRUCT_ENTRY, \
+ GENERATE_VM_STRUCT_LAST_ENTRY)
+};
+
+VMTypeEntry VMStructs::localHotSpotVMTypes[] = {
+
+ VM_TYPES(GENERATE_VM_TYPE_ENTRY,
+ GENERATE_TOPLEVEL_VM_TYPE_ENTRY,
+ GENERATE_OOP_VM_TYPE_ENTRY,
+ GENERATE_INTEGER_VM_TYPE_ENTRY,
+ GENERATE_UNSIGNED_INTEGER_VM_TYPE_ENTRY,
+ GENERATE_C1_TOPLEVEL_VM_TYPE_ENTRY,
+ GENERATE_C2_VM_TYPE_ENTRY,
+ GENERATE_C2_TOPLEVEL_VM_TYPE_ENTRY,
+ GENERATE_VM_TYPE_LAST_ENTRY)
+
+#ifndef SERIALGC
+ VM_TYPES_PARALLELGC(GENERATE_VM_TYPE_ENTRY,
+ GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
+
+ VM_TYPES_CMS(GENERATE_VM_TYPE_ENTRY,
+ GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
+
+ VM_TYPES_PARNEW(GENERATE_VM_TYPE_ENTRY)
+#endif // SERIALGC
+
+ VM_TYPES_CPU(GENERATE_VM_TYPE_ENTRY,
+ GENERATE_TOPLEVEL_VM_TYPE_ENTRY,
+ GENERATE_OOP_VM_TYPE_ENTRY,
+ GENERATE_INTEGER_VM_TYPE_ENTRY,
+ GENERATE_UNSIGNED_INTEGER_VM_TYPE_ENTRY,
+ GENERATE_C1_TOPLEVEL_VM_TYPE_ENTRY,
+ GENERATE_C2_VM_TYPE_ENTRY,
+ GENERATE_C2_TOPLEVEL_VM_TYPE_ENTRY,
+ GENERATE_VM_TYPE_LAST_ENTRY)
+
+ VM_TYPES_OS_CPU(GENERATE_VM_TYPE_ENTRY,
+ GENERATE_TOPLEVEL_VM_TYPE_ENTRY,
+ GENERATE_OOP_VM_TYPE_ENTRY,
+ GENERATE_INTEGER_VM_TYPE_ENTRY,
+ GENERATE_UNSIGNED_INTEGER_VM_TYPE_ENTRY,
+ GENERATE_C1_TOPLEVEL_VM_TYPE_ENTRY,
+ GENERATE_C2_VM_TYPE_ENTRY,
+ GENERATE_C2_TOPLEVEL_VM_TYPE_ENTRY,
+ GENERATE_VM_TYPE_LAST_ENTRY)
+};
+
+VMIntConstantEntry VMStructs::localHotSpotVMIntConstants[] = {
+
+ VM_INT_CONSTANTS(GENERATE_VM_INT_CONSTANT_ENTRY,
+ GENERATE_PREPROCESSOR_VM_INT_CONSTANT_ENTRY,
+ GENERATE_C1_VM_INT_CONSTANT_ENTRY,
+ GENERATE_C2_VM_INT_CONSTANT_ENTRY,
+ GENERATE_C2_PREPROCESSOR_VM_INT_CONSTANT_ENTRY,
+ GENERATE_VM_INT_CONSTANT_LAST_ENTRY)
+
+#ifndef SERIALGC
+ VM_INT_CONSTANTS_CMS(GENERATE_VM_INT_CONSTANT_ENTRY)
+
+ VM_INT_CONSTANTS_PARNEW(GENERATE_VM_INT_CONSTANT_ENTRY)
+#endif // SERIALGC
+
+ VM_INT_CONSTANTS_CPU(GENERATE_VM_INT_CONSTANT_ENTRY,
+ GENERATE_PREPROCESSOR_VM_INT_CONSTANT_ENTRY,
+ GENERATE_C1_VM_INT_CONSTANT_ENTRY,
+ GENERATE_C2_VM_INT_CONSTANT_ENTRY,
+ GENERATE_C2_PREPROCESSOR_VM_INT_CONSTANT_ENTRY,
+ GENERATE_VM_INT_CONSTANT_LAST_ENTRY)
+
+ VM_INT_CONSTANTS_OS_CPU(GENERATE_VM_INT_CONSTANT_ENTRY,
+ GENERATE_PREPROCESSOR_VM_INT_CONSTANT_ENTRY,
+ GENERATE_C1_VM_INT_CONSTANT_ENTRY,
+ GENERATE_C2_VM_INT_CONSTANT_ENTRY,
+ GENERATE_C2_PREPROCESSOR_VM_INT_CONSTANT_ENTRY,
+ GENERATE_VM_INT_CONSTANT_LAST_ENTRY)
+};
+
+VMLongConstantEntry VMStructs::localHotSpotVMLongConstants[] = {
+
+ VM_LONG_CONSTANTS(GENERATE_VM_LONG_CONSTANT_ENTRY,
+ GENERATE_PREPROCESSOR_VM_LONG_CONSTANT_ENTRY,
+ GENERATE_C1_VM_LONG_CONSTANT_ENTRY,
+ GENERATE_C2_VM_LONG_CONSTANT_ENTRY,
+ GENERATE_C2_PREPROCESSOR_VM_LONG_CONSTANT_ENTRY,
+ GENERATE_VM_LONG_CONSTANT_LAST_ENTRY)
+
+ VM_LONG_CONSTANTS_CPU(GENERATE_VM_LONG_CONSTANT_ENTRY,
+ GENERATE_PREPROCESSOR_VM_LONG_CONSTANT_ENTRY,
+ GENERATE_C1_VM_LONG_CONSTANT_ENTRY,
+ GENERATE_C2_VM_LONG_CONSTANT_ENTRY,
+ GENERATE_C2_PREPROCESSOR_VM_LONG_CONSTANT_ENTRY,
+ GENERATE_VM_LONG_CONSTANT_LAST_ENTRY)
+
+ VM_LONG_CONSTANTS_OS_CPU(GENERATE_VM_LONG_CONSTANT_ENTRY,
+ GENERATE_PREPROCESSOR_VM_LONG_CONSTANT_ENTRY,
+ GENERATE_C1_VM_LONG_CONSTANT_ENTRY,
+ GENERATE_C2_VM_LONG_CONSTANT_ENTRY,
+ GENERATE_C2_PREPROCESSOR_VM_LONG_CONSTANT_ENTRY,
+ GENERATE_VM_LONG_CONSTANT_LAST_ENTRY)
+};
+
+// This is used both to check the types of referenced fields and, in
+// debug builds, to ensure that all of the field types are present.
+void
+VMStructs::init() {
+ VM_STRUCTS(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_STATIC_VM_STRUCT_ENTRY,
+ CHECK_NO_OP,
+ CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_C1_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_C2_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_NO_OP,
+ CHECK_NO_OP,
+ CHECK_SENTINEL);
+
+#ifndef SERIALGC
+ VM_STRUCTS_PARALLELGC(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_STATIC_VM_STRUCT_ENTRY);
+
+ VM_STRUCTS_CMS(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_STATIC_VM_STRUCT_ENTRY);
+#endif // SERIALGC
+
+ VM_STRUCTS_CPU(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_STATIC_VM_STRUCT_ENTRY,
+ CHECK_NO_OP,
+ CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_C2_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_NO_OP,
+ CHECK_NO_OP,
+ CHECK_SENTINEL);
+
+ VM_STRUCTS_OS_CPU(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_STATIC_VM_STRUCT_ENTRY,
+ CHECK_NO_OP,
+ CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_NONPRODUCT_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_C2_NONSTATIC_VM_STRUCT_ENTRY,
+ CHECK_NO_OP,
+ CHECK_NO_OP,
+ CHECK_SENTINEL);
+
+ VM_TYPES(CHECK_VM_TYPE_ENTRY,
+ CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
+ CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
+ CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
+ CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
+ CHECK_C1_TOPLEVEL_VM_TYPE_ENTRY,
+ CHECK_C2_VM_TYPE_ENTRY,
+ CHECK_C2_TOPLEVEL_VM_TYPE_ENTRY,
+ CHECK_SENTINEL);
+
+#ifndef SERIALGC
+ VM_TYPES_PARALLELGC(CHECK_VM_TYPE_ENTRY,
+ CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
+
+ VM_TYPES_CMS(CHECK_VM_TYPE_ENTRY,
+ CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
+
+ VM_TYPES_PARNEW(CHECK_VM_TYPE_ENTRY)
+#endif // SERIALGC
+
+ VM_TYPES_CPU(CHECK_VM_TYPE_ENTRY,
+ CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
+ CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
+ CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
+ CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
+ CHECK_C1_TOPLEVEL_VM_TYPE_ENTRY,
+ CHECK_C2_VM_TYPE_ENTRY,
+ CHECK_C2_TOPLEVEL_VM_TYPE_ENTRY,
+ CHECK_SENTINEL);
+
+ VM_TYPES_OS_CPU(CHECK_VM_TYPE_ENTRY,
+ CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
+ CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
+ CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
+ CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
+ CHECK_C1_TOPLEVEL_VM_TYPE_ENTRY,
+ CHECK_C2_VM_TYPE_ENTRY,
+ CHECK_C2_TOPLEVEL_VM_TYPE_ENTRY,
+ CHECK_SENTINEL);
+
+ //
+ // Split VM_STRUCTS() invocation into two parts to allow MS VC++ 6.0
+ // to build with the source mounted over SNC3.2. Symptom was that
+ // debug build failed with an internal compiler error. Has been seen
+ // mounting sources from Solaris 2.6 and 2.7 hosts, but so far not
+ // 2.8 hosts. Appears to occur because line is too long.
+ //
+ // If an assertion failure is triggered here it means that an entry
+ // in VMStructs::localHotSpotVMStructs[] was not found in
+ // VMStructs::localHotSpotVMTypes[]. (The assertion itself had to be
+ // made less descriptive because of this above bug -- see the
+ // definition of ENSURE_FIELD_TYPE_PRESENT.)
+ //
+ // NOTE: taken out because this was just not working on everyone's
+ // Solstice NFS setup. If everyone switches to local workspaces on
+ // Win32, we can put this back in.
+#ifndef _WINDOWS
+ debug_only(VM_STRUCTS(ENSURE_FIELD_TYPE_PRESENT, \
+ CHECK_NO_OP, \
+ CHECK_NO_OP, \
+ CHECK_NO_OP, \
+ CHECK_NO_OP, \
+ CHECK_NO_OP, \
+ CHECK_NO_OP, \
+ CHECK_NO_OP, \
+ CHECK_NO_OP, \
+ CHECK_SENTINEL));
+ debug_only(VM_STRUCTS(CHECK_NO_OP, \
+ ENSURE_FIELD_TYPE_PRESENT, \
+ CHECK_NO_OP, \
+ ENSURE_FIELD_TYPE_PRESENT, \
+ ENSURE_NONPRODUCT_FIELD_TYPE_PRESENT, \
+ ENSURE_C1_FIELD_TYPE_PRESENT, \
+ ENSURE_C2_FIELD_TYPE_PRESENT, \
+ CHECK_NO_OP, \
+ CHECK_NO_OP, \
+ CHECK_SENTINEL));
+#ifndef SERIALGC
+ debug_only(VM_STRUCTS_PARALLELGC(ENSURE_FIELD_TYPE_PRESENT, \
+ ENSURE_FIELD_TYPE_PRESENT));
+ debug_only(VM_STRUCTS_CMS(ENSURE_FIELD_TYPE_PRESENT, \
+ ENSURE_FIELD_TYPE_PRESENT));
+#endif // SERIALGC
+ debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT, \
+ ENSURE_FIELD_TYPE_PRESENT, \
+ CHECK_NO_OP, \
+ ENSURE_FIELD_TYPE_PRESENT, \
+ ENSURE_NONPRODUCT_FIELD_TYPE_PRESENT, \
+ ENSURE_C2_FIELD_TYPE_PRESENT, \
+ CHECK_NO_OP, \
+ CHECK_NO_OP, \
+ CHECK_SENTINEL));
+ debug_only(VM_STRUCTS_OS_CPU(ENSURE_FIELD_TYPE_PRESENT, \
+ ENSURE_FIELD_TYPE_PRESENT, \
+ CHECK_NO_OP, \
+ ENSURE_FIELD_TYPE_PRESENT, \
+ ENSURE_NONPRODUCT_FIELD_TYPE_PRESENT, \
+ ENSURE_C2_FIELD_TYPE_PRESENT, \
+ CHECK_NO_OP, \
+ CHECK_NO_OP, \
+ CHECK_SENTINEL));
+#endif
+}
+
+extern "C" {
+
+// see comments on cast_uint64_t at the top of this file
+#define ASSIGN_CONST_TO_64BIT_VAR(var, expr) \
+ JNIEXPORT uint64_t var = cast_uint64_t(expr);
+#define ASSIGN_OFFSET_TO_64BIT_VAR(var, type, field) \
+ ASSIGN_CONST_TO_64BIT_VAR(var, offset_of(type, field))
+#define ASSIGN_STRIDE_TO_64BIT_VAR(var, array) \
+ ASSIGN_CONST_TO_64BIT_VAR(var, (char*)&array[1] - (char*)&array[0])
+
+JNIEXPORT VMStructEntry* gHotSpotVMStructs = VMStructs::localHotSpotVMStructs;
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMStructEntryTypeNameOffset, VMStructEntry, typeName);
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMStructEntryFieldNameOffset, VMStructEntry, fieldName);
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMStructEntryTypeStringOffset, VMStructEntry, typeString);
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMStructEntryIsStaticOffset, VMStructEntry, isStatic);
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMStructEntryOffsetOffset, VMStructEntry, offset);
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMStructEntryAddressOffset, VMStructEntry, address);
+ASSIGN_STRIDE_TO_64BIT_VAR(gHotSpotVMStructEntryArrayStride, gHotSpotVMStructs);
+JNIEXPORT VMTypeEntry* gHotSpotVMTypes = VMStructs::localHotSpotVMTypes;
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMTypeEntryTypeNameOffset, VMTypeEntry, typeName);
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMTypeEntrySuperclassNameOffset, VMTypeEntry, superclassName);
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMTypeEntryIsOopTypeOffset, VMTypeEntry, isOopType);
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMTypeEntryIsIntegerTypeOffset, VMTypeEntry, isIntegerType);
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMTypeEntryIsUnsignedOffset, VMTypeEntry, isUnsigned);
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMTypeEntrySizeOffset, VMTypeEntry, size);
+ASSIGN_STRIDE_TO_64BIT_VAR(gHotSpotVMTypeEntryArrayStride,gHotSpotVMTypes);
+JNIEXPORT VMIntConstantEntry* gHotSpotVMIntConstants = VMStructs::localHotSpotVMIntConstants;
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMIntConstantEntryNameOffset, VMIntConstantEntry, name);
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMIntConstantEntryValueOffset, VMIntConstantEntry, value);
+ASSIGN_STRIDE_TO_64BIT_VAR(gHotSpotVMIntConstantEntryArrayStride, gHotSpotVMIntConstants);
+JNIEXPORT VMLongConstantEntry* gHotSpotVMLongConstants = VMStructs::localHotSpotVMLongConstants;
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMLongConstantEntryNameOffset, VMLongConstantEntry, name);
+ASSIGN_OFFSET_TO_64BIT_VAR(gHotSpotVMLongConstantEntryValueOffset, VMLongConstantEntry, value);
+ASSIGN_STRIDE_TO_64BIT_VAR(gHotSpotVMLongConstantEntryArrayStride, gHotSpotVMLongConstants);
+}
+
+#ifdef ASSERT
+int
+VMStructs::findType(const char* typeName) {
+ VMTypeEntry* types = gHotSpotVMTypes;
+
+ while (types->typeName != NULL) {
+ if (!strcmp(typeName, types->typeName)) {
+ return 1;
+ }
+ ++types;
+ }
+ return 0;
+}
+#endif
+
+void vmStructs_init() {
+ debug_only(VMStructs::init());
+}
diff --git a/src/share/vm/runtime/vmStructs.hpp b/src/share/vm/runtime/vmStructs.hpp
new file mode 100644
index 000000000..4e2670da6
--- /dev/null
+++ b/src/share/vm/runtime/vmStructs.hpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2000-2001 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// This table encapsulates the debugging information required by the
+// serviceability agent in order to run. Specifically, we need to
+// understand the layout of certain C data structures (offsets, in
+// bytes, of their fields.)
+//
+// There are alternatives for the design of this mechanism, including
+// parsing platform-specific debugging symbols from a debug build into
+// a program database. While this current mechanism can be considered
+// to be a workaround for the inability to debug arbitrary C and C++
+// programs at the present time, it does have certain advantages.
+// First, it is platform-independent, which will vastly simplify the
+// initial bringup of the system both now and on future platforms.
+// Second, it is embedded within the VM, as opposed to being in a
+// separate program database; experience has shown that whenever
+// portions of a system are decoupled, version skew is problematic.
+// Third, generating a program database, for example for a product
+// build, would probably require two builds to be done: the desired
+// product build as well as an intermediary build with the PRODUCT
+// flag turned on but also compiled with -g, leading to a doubling of
+// the time required to get a serviceability agent-debuggable product
+// build. Fourth, and very significantly, this table probably
+// preserves more information about field types than stabs do; for
+// example, it preserves the fact that a field is a "jlong" rather
+// than transforming the type according to the typedef in jni_md.h,
+// which allows the Java-side code to identify "Java-sized" fields in
+// C++ data structures. If the symbol parsing mechanism was redone
+// using stabs, it might still be necessary to have a table somewhere
+// containing this information.
+//
+// Do not change the sizes or signedness of the integer values in
+// these data structures; they are fixed over in the serviceability
+// agent's Java code (for bootstrapping).
+
+typedef struct {
+ const char* typeName; // The type name containing the given field (example: "Klass")
+ const char* fieldName; // The field name within the type (example: "_name")
+ const char* typeString; // Quoted name of the type of this field (example: "symbolOopDesc*";
+ // parsed in Java to ensure type correctness
+ int32_t isStatic; // Indicates whether following field is an offset or an address
+ uint64_t offset; // Offset of field within structure; only used for nonstatic fields
+ void* address; // Address of field; only used for static fields
+ // ("offset" can not be reused because of apparent SparcWorks compiler bug
+ // in generation of initializer data)
+} VMStructEntry;
+
+typedef struct {
+ const char* typeName; // Type name (example: "methodOopDesc")
+ const char* superclassName; // Superclass name, or null if none (example: "oopDesc")
+ int32_t isOopType; // Does this type represent an oop typedef? (i.e., "methodOop" or
+ // "klassOop", but NOT "methodOopDesc")
+ int32_t isIntegerType; // Does this type represent an integer type (of arbitrary size)?
+ int32_t isUnsigned; // If so, is it unsigned?
+ uint64_t size; // Size, in bytes, of the type
+} VMTypeEntry;
+
+typedef struct {
+ const char* name; // Name of constant (example: "_thread_in_native")
+ int32_t value; // Value of constant
+} VMIntConstantEntry;
+
+typedef struct {
+ const char* name; // Name of constant (example: "_thread_in_native")
+ uint64_t value; // Value of constant
+} VMLongConstantEntry;
+
+// This class is a friend of most classes, to be able to access
+// private fields
+class VMStructs {
+public:
+ // The last entry is identified over in the serviceability agent by
+ // the fact that it has a NULL fieldName
+ static VMStructEntry localHotSpotVMStructs[];
+
+ // The last entry is identified over in the serviceability agent by
+ // the fact that it has a NULL typeName
+ static VMTypeEntry localHotSpotVMTypes[];
+
+ // Table of integer constants required by the serviceability agent.
+ // The last entry is identified over in the serviceability agent by
+ // the fact that it has a NULL typeName
+ static VMIntConstantEntry localHotSpotVMIntConstants[];
+
+ // Table of long constants required by the serviceability agent.
+ // The last entry is identified over in the serviceability agent by
+ // the fact that it has a NULL typeName
+ static VMLongConstantEntry localHotSpotVMLongConstants[];
+
+ // This is used to run any checking code necessary for validation of
+ // the data structure (debug build only)
+ static void init();
+
+private:
+ // Look up a type in localHotSpotVMTypes using strcmp() (debug build only).
+ // Returns 1 if found, 0 if not.
+ // debug_only(static int findType(const char* typeName);)
+ static int findType(const char* typeName);
+};
diff --git a/src/share/vm/runtime/vmThread.cpp b/src/share/vm/runtime/vmThread.cpp
new file mode 100644
index 000000000..56b54c381
--- /dev/null
+++ b/src/share/vm/runtime/vmThread.cpp
@@ -0,0 +1,655 @@
+/*
+ * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_vmThread.cpp.incl"
+
+// Dummy VM operation to act as first element in our circular double-linked list
+class VM_Dummy: public VM_Operation {
+ VMOp_Type type() const { return VMOp_Dummy; }
+ void doit() {};
+};
+
+VMOperationQueue::VMOperationQueue() {
+ // The queue is a circular doubled-linked list, which always contains
+ // one element (i.e., one element means empty).
+ for(int i = 0; i < nof_priorities; i++) {
+ _queue_length[i] = 0;
+ _queue_counter = 0;
+ _queue[i] = new VM_Dummy();
+ _queue[i]->set_next(_queue[i]);
+ _queue[i]->set_prev(_queue[i]);
+ }
+ _drain_list = NULL;
+}
+
+
+bool VMOperationQueue::queue_empty(int prio) {
+ // It is empty if there is exactly one element
+ bool empty = (_queue[prio] == _queue[prio]->next());
+ assert( (_queue_length[prio] == 0 && empty) ||
+ (_queue_length[prio] > 0 && !empty), "sanity check");
+ return _queue_length[prio] == 0;
+}
+
+// Inserts an element to the right of the q element
+void VMOperationQueue::insert(VM_Operation* q, VM_Operation* n) {
+ assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check");
+ n->set_prev(q);
+ n->set_next(q->next());
+ q->next()->set_prev(n);
+ q->set_next(n);
+}
+
+void VMOperationQueue::queue_add_front(int prio, VM_Operation *op) {
+ _queue_length[prio]++;
+ insert(_queue[prio]->next(), op);
+}
+
+void VMOperationQueue::queue_add_back(int prio, VM_Operation *op) {
+ _queue_length[prio]++;
+ insert(_queue[prio]->prev(), op);
+}
+
+
+void VMOperationQueue::unlink(VM_Operation* q) {
+ assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check");
+ q->prev()->set_next(q->next());
+ q->next()->set_prev(q->prev());
+}
+
+VM_Operation* VMOperationQueue::queue_remove_front(int prio) {
+ if (queue_empty(prio)) return NULL;
+ assert(_queue_length[prio] >= 0, "sanity check");
+ _queue_length[prio]--;
+ VM_Operation* r = _queue[prio]->next();
+ assert(r != _queue[prio], "cannot remove base element");
+ unlink(r);
+ return r;
+}
+
+VM_Operation* VMOperationQueue::queue_drain(int prio) {
+ if (queue_empty(prio)) return NULL;
+ DEBUG_ONLY(int length = _queue_length[prio];);
+ assert(length >= 0, "sanity check");
+ _queue_length[prio] = 0;
+ VM_Operation* r = _queue[prio]->next();
+ assert(r != _queue[prio], "cannot remove base element");
+ // remove links to base element from head and tail
+ r->set_prev(NULL);
+ _queue[prio]->prev()->set_next(NULL);
+ // restore queue to empty state
+ _queue[prio]->set_next(_queue[prio]);
+ _queue[prio]->set_prev(_queue[prio]);
+ assert(queue_empty(prio), "drain corrupted queue")
+#ifdef DEBUG
+ int len = 0;
+ VM_Operation* cur;
+ for(cur = r; cur != NULL; cur=cur->next()) len++;
+ assert(len == length, "drain lost some ops");
+#endif
+ return r;
+}
+
+void VMOperationQueue::queue_oops_do(int queue, OopClosure* f) {
+ VM_Operation* cur = _queue[queue];
+ cur = cur->next();
+ while (cur != _queue[queue]) {
+ cur->oops_do(f);
+ cur = cur->next();
+ }
+}
+
+void VMOperationQueue::drain_list_oops_do(OopClosure* f) {
+ VM_Operation* cur = _drain_list;
+ while (cur != NULL) {
+ cur->oops_do(f);
+ cur = cur->next();
+ }
+}
+
+//-----------------------------------------------------------------
+// High-level interface
+bool VMOperationQueue::add(VM_Operation *op) {
+ // Encapsulates VM queue policy. Currently, that
+ // only involves putting them on the right list
+ if (op->evaluate_at_safepoint()) {
+ queue_add_back(SafepointPriority, op);
+ return true;
+ }
+
+ queue_add_back(MediumPriority, op);
+ return true;
+}
+
+VM_Operation* VMOperationQueue::remove_next() {
+ // Assuming VMOperation queue is two-level priority queue. If there are
+ // more than two priorities, we need a different scheduling algorithm.
+ assert(SafepointPriority == 0 && MediumPriority == 1 && nof_priorities == 2,
+ "current algorithm does not work");
+
+ // simple counter based scheduling to prevent starvation of lower priority
+ // queue. -- see 4390175
+ int high_prio, low_prio;
+ if (_queue_counter++ < 10) {
+ high_prio = SafepointPriority;
+ low_prio = MediumPriority;
+ } else {
+ _queue_counter = 0;
+ high_prio = MediumPriority;
+ low_prio = SafepointPriority;
+ }
+
+ return queue_remove_front(queue_empty(high_prio) ? low_prio : high_prio);
+}
+
+void VMOperationQueue::oops_do(OopClosure* f) {
+ for(int i = 0; i < nof_priorities; i++) {
+ queue_oops_do(i, f);
+ }
+ drain_list_oops_do(f);
+}
+
+
+//------------------------------------------------------------------------------------------------------------------
+// Implementation of VMThread stuff
+
+bool VMThread::_should_terminate = false;
+bool VMThread::_terminated = false;
+Monitor* VMThread::_terminate_lock = NULL;
+VMThread* VMThread::_vm_thread = NULL;
+VM_Operation* VMThread::_cur_vm_operation = NULL;
+VMOperationQueue* VMThread::_vm_queue = NULL;
+PerfCounter* VMThread::_perf_accumulated_vm_operation_time = NULL;
+
+
+void VMThread::create() {
+ assert(vm_thread() == NULL, "we can only allocate one VMThread");
+ _vm_thread = new VMThread();
+
+ // Create VM operation queue
+ _vm_queue = new VMOperationQueue();
+ guarantee(_vm_queue != NULL, "just checking");
+
+ _terminate_lock = new Monitor(Mutex::safepoint, "VMThread::_terminate_lock", true);
+
+ if (UsePerfData) {
+ // jvmstat performance counters
+ Thread* THREAD = Thread::current();
+ _perf_accumulated_vm_operation_time =
+ PerfDataManager::create_counter(SUN_THREADS, "vmOperationTime",
+ PerfData::U_Ticks, CHECK);
+ }
+}
+
+
+VMThread::VMThread() : Thread() {
+ // nothing to do
+}
+
+void VMThread::destroy() {
+ if (_vm_thread != NULL) {
+ delete _vm_thread;
+ _vm_thread = NULL; // VM thread is gone
+ }
+}
+
+void VMThread::run() {
+ assert(this == vm_thread(), "check");
+
+ this->initialize_thread_local_storage();
+ this->record_stack_base_and_size();
+ // Notify_lock wait checks on active_handles() to rewait in
+ // case of spurious wakeup, it should wait on the last
+ // value set prior to the notify
+ this->set_active_handles(JNIHandleBlock::allocate_block());
+
+ {
+ MutexLocker ml(Notify_lock);
+ Notify_lock->notify();
+ }
+ // Notify_lock is destroyed by Threads::create_vm()
+
+ int prio = (VMThreadPriority == -1)
+ ? os::java_to_os_priority[NearMaxPriority]
+ : VMThreadPriority;
+ // Note that I cannot call os::set_priority because it expects Java
+ // priorities and I am *explicitly* using OS priorities so that it's
+ // possible to set the VM thread priority higher than any Java thread.
+ os::set_native_priority( this, prio );
+
+ // Wait for VM_Operations until termination
+ this->loop();
+
+ // Note the intention to exit before safepointing.
+ // 6295565 This has the effect of waiting for any large tty
+ // outputs to finish.
+ if (xtty != NULL) {
+ ttyLocker ttyl;
+ xtty->begin_elem("destroy_vm");
+ xtty->stamp();
+ xtty->end_elem();
+ assert(should_terminate(), "termination flag must be set");
+ }
+
+ // 4526887 let VM thread exit at Safepoint
+ SafepointSynchronize::begin();
+
+ if (VerifyBeforeExit) {
+ HandleMark hm(VMThread::vm_thread());
+ // Among other things, this ensures that Eden top is correct.
+ Universe::heap()->prepare_for_verify();
+ os::check_heap();
+ Universe::verify(true, true); // Silent verification to not polute normal output
+ }
+
+ CompileBroker::set_should_block();
+
+ // wait for threads (compiler threads or daemon threads) in the
+ // _thread_in_native state to block.
+ VM_Exit::wait_for_threads_in_native_to_block();
+
+ // signal other threads that VM process is gone
+ {
+ // Note: we must have the _no_safepoint_check_flag. Mutex::lock() allows
+ // VM thread to enter any lock at Safepoint as long as its _owner is NULL.
+ // If that happens after _terminate_lock->wait() has unset _owner
+ // but before it actually drops the lock and waits, the notification below
+ // may get lost and we will have a hang. To avoid this, we need to use
+ // Mutex::lock_without_safepoint_check().
+ MutexLockerEx ml(_terminate_lock, Mutex::_no_safepoint_check_flag);
+ _terminated = true;
+ _terminate_lock->notify();
+ }
+
+ // Deletion must be done synchronously by the JNI DestroyJavaVM thread
+ // so that the VMThread deletion completes before the main thread frees
+ // up the CodeHeap.
+
+}
+
+
+// Notify the VMThread that the last non-daemon JavaThread has terminated,
+// and wait until operation is performed.
+void VMThread::wait_for_vm_thread_exit() {
+ { MutexLocker mu(VMOperationQueue_lock);
+ _should_terminate = true;
+ VMOperationQueue_lock->notify();
+ }
+
+ // Note: VM thread leaves at Safepoint. We are not stopped by Safepoint
+ // because this thread has been removed from the threads list. But anything
+ // that could get blocked by Safepoint should not be used after this point,
+ // otherwise we will hang, since there is no one can end the safepoint.
+
+ // Wait until VM thread is terminated
+ // Note: it should be OK to use Terminator_lock here. But this is called
+ // at a very delicate time (VM shutdown) and we are operating in non- VM
+ // thread at Safepoint. It's safer to not share lock with other threads.
+ { MutexLockerEx ml(_terminate_lock, Mutex::_no_safepoint_check_flag);
+ while(!VMThread::is_terminated()) {
+ _terminate_lock->wait(Mutex::_no_safepoint_check_flag);
+ }
+ }
+}
+
+void VMThread::print_on(outputStream* st) const {
+ st->print("\"%s\" ", name());
+ Thread::print_on(st);
+ st->cr();
+}
+
+void VMThread::evaluate_operation(VM_Operation* op) {
+ ResourceMark rm;
+
+ {
+ PerfTraceTime vm_op_timer(perf_accumulated_vm_operation_time());
+ op->evaluate();
+ }
+
+ // Last access of info in _cur_vm_operation!
+ bool c_heap_allocated = op->is_cheap_allocated();
+
+ // Mark as completed
+ if (!op->evaluate_concurrently()) {
+ op->calling_thread()->increment_vm_operation_completed_count();
+ }
+ // It is unsafe to access the _cur_vm_operation after the 'increment_vm_operation_completed_count' call,
+ // since if it is stack allocated the calling thread might have deallocated
+ if (c_heap_allocated) {
+ delete _cur_vm_operation;
+ }
+}
+
+
+void VMThread::loop() {
+ assert(_cur_vm_operation == NULL, "no current one should be executing");
+
+ while(true) {
+ VM_Operation* safepoint_ops = NULL;
+ //
+ // Wait for VM operation
+ //
+ // use no_safepoint_check to get lock without attempting to "sneak"
+ { MutexLockerEx mu_queue(VMOperationQueue_lock,
+ Mutex::_no_safepoint_check_flag);
+
+ // Look for new operation
+ assert(_cur_vm_operation == NULL, "no current one should be executing");
+ _cur_vm_operation = _vm_queue->remove_next();
+
+ // Stall time tracking code
+ if (PrintVMQWaitTime && _cur_vm_operation != NULL &&
+ !_cur_vm_operation->evaluate_concurrently()) {
+ long stall = os::javaTimeMillis() - _cur_vm_operation->timestamp();
+ if (stall > 0)
+ tty->print_cr("%s stall: %Ld", _cur_vm_operation->name(), stall);
+ }
+
+ while (!should_terminate() && _cur_vm_operation == NULL) {
+ // wait with a timeout to guarantee safepoints at regular intervals
+ bool timedout =
+ VMOperationQueue_lock->wait(Mutex::_no_safepoint_check_flag,
+ GuaranteedSafepointInterval);
+
+ // Support for self destruction
+ if ((SelfDestructTimer != 0) && !is_error_reported() &&
+ (os::elapsedTime() > SelfDestructTimer * 60)) {
+ tty->print_cr("VM self-destructed");
+ exit(-1);
+ }
+
+ if (timedout && (SafepointALot ||
+ SafepointSynchronize::is_cleanup_needed())) {
+ MutexUnlockerEx mul(VMOperationQueue_lock,
+ Mutex::_no_safepoint_check_flag);
+ // Force a safepoint since we have not had one for at least
+ // 'GuaranteedSafepointInterval' milliseconds. This will run all
+ // the clean-up processing that needs to be done regularly at a
+ // safepoint
+ SafepointSynchronize::begin();
+ #ifdef ASSERT
+ if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
+ #endif
+ SafepointSynchronize::end();
+ }
+ _cur_vm_operation = _vm_queue->remove_next();
+
+ // If we are at a safepoint we will evaluate all the operations that
+ // follow that also require a safepoint
+ if (_cur_vm_operation != NULL &&
+ _cur_vm_operation->evaluate_at_safepoint()) {
+ safepoint_ops = _vm_queue->drain_at_safepoint_priority();
+ }
+ }
+
+ if (should_terminate()) break;
+ } // Release mu_queue_lock
+
+ //
+ // Execute VM operation
+ //
+ { HandleMark hm(VMThread::vm_thread());
+
+ EventMark em("Executing VM operation: %s", vm_operation()->name());
+ assert(_cur_vm_operation != NULL, "we should have found an operation to execute");
+
+ // Give the VM thread an extra quantum. Jobs tend to be bursty and this
+ // helps the VM thread to finish up the job.
+ // FIXME: When this is enabled and there are many threads, this can degrade
+ // performance significantly.
+ if( VMThreadHintNoPreempt )
+ os::hint_no_preempt();
+
+ // If we are at a safepoint we will evaluate all the operations that
+ // follow that also require a safepoint
+ if (_cur_vm_operation->evaluate_at_safepoint()) {
+
+ if (PrintGCApplicationConcurrentTime) {
+ gclog_or_tty->print_cr("Application time: %3.7f seconds",
+ RuntimeService::last_application_time_sec());
+ }
+
+ _vm_queue->set_drain_list(safepoint_ops); // ensure ops can be scanned
+
+ SafepointSynchronize::begin();
+ evaluate_operation(_cur_vm_operation);
+ // now process all queued safepoint ops, iteratively draining
+ // the queue until there are none left
+ do {
+ _cur_vm_operation = safepoint_ops;
+ if (_cur_vm_operation != NULL) {
+ do {
+ // evaluate_operation deletes the op object so we have
+ // to grab the next op now
+ VM_Operation* next = _cur_vm_operation->next();
+ _vm_queue->set_drain_list(next);
+ evaluate_operation(_cur_vm_operation);
+ _cur_vm_operation = next;
+ if (PrintSafepointStatistics) {
+ SafepointSynchronize::inc_vmop_coalesced_count();
+ }
+ } while (_cur_vm_operation != NULL);
+ }
+ // There is a chance that a thread enqueued a safepoint op
+ // since we released the op-queue lock and initiated the safepoint.
+ // So we drain the queue again if there is anything there, as an
+ // optimization to try and reduce the number of safepoints.
+ // As the safepoint synchronizes us with JavaThreads we will see
+ // any enqueue made by a JavaThread, but the peek will not
+ // necessarily detect a concurrent enqueue by a GC thread, but
+ // that simply means the op will wait for the next major cycle of the
+ // VMThread - just as it would if the GC thread lost the race for
+ // the lock.
+ if (_vm_queue->peek_at_safepoint_priority()) {
+ // must hold lock while draining queue
+ MutexLockerEx mu_queue(VMOperationQueue_lock,
+ Mutex::_no_safepoint_check_flag);
+ safepoint_ops = _vm_queue->drain_at_safepoint_priority();
+ } else {
+ safepoint_ops = NULL;
+ }
+ } while(safepoint_ops != NULL);
+
+ _vm_queue->set_drain_list(NULL);
+
+ // Complete safepoint synchronization
+ SafepointSynchronize::end();
+
+ if (PrintGCApplicationStoppedTime) {
+ gclog_or_tty->print_cr("Total time for which application threads "
+ "were stopped: %3.7f seconds",
+ RuntimeService::last_safepoint_time_sec());
+ }
+
+ } else { // not a safepoint operation
+ if (TraceLongCompiles) {
+ elapsedTimer t;
+ t.start();
+ evaluate_operation(_cur_vm_operation);
+ t.stop();
+ double secs = t.seconds();
+ if (secs * 1e3 > LongCompileThreshold) {
+ // XXX - _cur_vm_operation should not be accessed after
+ // the completed count has been incremented; the waiting
+ // thread may have already freed this memory.
+ tty->print_cr("vm %s: %3.7f secs]", _cur_vm_operation->name(), secs);
+ }
+ } else {
+ evaluate_operation(_cur_vm_operation);
+ }
+
+ _cur_vm_operation = NULL;
+ }
+ }
+
+ //
+ // Notify (potential) waiting Java thread(s) - lock without safepoint
+ // check so that sneaking is not possible
+ { MutexLockerEx mu(VMOperationRequest_lock,
+ Mutex::_no_safepoint_check_flag);
+ VMOperationRequest_lock->notify_all();
+ }
+
+ //
+ // We want to make sure that we get to a safepoint regularly.
+ //
+ if (SafepointALot || SafepointSynchronize::is_cleanup_needed()) {
+ long interval = SafepointSynchronize::last_non_safepoint_interval();
+ bool max_time_exceeded = GuaranteedSafepointInterval != 0 && (interval > GuaranteedSafepointInterval);
+ if (SafepointALot || max_time_exceeded) {
+ HandleMark hm(VMThread::vm_thread());
+ SafepointSynchronize::begin();
+ SafepointSynchronize::end();
+ }
+ }
+ }
+}
+
+void VMThread::execute(VM_Operation* op) {
+ Thread* t = Thread::current();
+
+ if (!t->is_VM_thread()) {
+ // JavaThread or WatcherThread
+ t->check_for_valid_safepoint_state(true);
+
+ // New request from Java thread, evaluate prologue
+ if (!op->doit_prologue()) {
+ return; // op was cancelled
+ }
+
+ // Setup VM_operations for execution
+ op->set_calling_thread(t, Thread::get_priority(t));
+
+ // It does not make sense to execute the epilogue, if the VM operation object is getting
+ // deallocated by the VM thread.
+ bool concurrent = op->evaluate_concurrently();
+ bool execute_epilog = !op->is_cheap_allocated();
+ assert(!concurrent || op->is_cheap_allocated(), "concurrent => cheap_allocated");
+
+ // Get ticket number for non-concurrent VM operations
+ int ticket = 0;
+ if (!concurrent) {
+ ticket = t->vm_operation_ticket();
+ }
+
+ // Add VM operation to list of waiting threads. We are guaranteed not to block while holding the
+ // VMOperationQueue_lock, so we can block without a safepoint check. This allows vm operation requests
+ // to be queued up during a safepoint synchronization.
+ {
+ VMOperationQueue_lock->lock_without_safepoint_check();
+ bool ok = _vm_queue->add(op);
+ op->set_timestamp(os::javaTimeMillis());
+ VMOperationQueue_lock->notify();
+ VMOperationQueue_lock->unlock();
+ // VM_Operation got skipped
+ if (!ok) {
+ assert(concurrent, "can only skip concurrent tasks");
+ if (op->is_cheap_allocated()) delete op;
+ return;
+ }
+ }
+
+ if (!concurrent) {
+ // Wait for completion of request (non-concurrent)
+ // Note: only a JavaThread triggers the safepoint check when locking
+ MutexLocker mu(VMOperationRequest_lock);
+ while(t->vm_operation_completed_count() < ticket) {
+ VMOperationRequest_lock->wait(!t->is_Java_thread());
+ }
+ }
+
+ if (execute_epilog) {
+ op->doit_epilogue();
+ }
+ } else {
+ // invoked by VM thread; usually nested VM operation
+ assert(t->is_VM_thread(), "must be a VM thread");
+ VM_Operation* prev_vm_operation = vm_operation();
+ if (prev_vm_operation != NULL) {
+ // Check the VM operation allows nested VM operation. This normally not the case, e.g., the compiler
+ // does not allow nested scavenges or compiles.
+ if (!prev_vm_operation->allow_nested_vm_operations()) {
+ fatal2("Nested VM operation %s requested by operation %s", op->name(), vm_operation()->name());
+ }
+ op->set_calling_thread(prev_vm_operation->calling_thread(), prev_vm_operation->priority());
+ }
+
+ EventMark em("Executing %s VM operation: %s", prev_vm_operation ? "nested" : "", op->name());
+
+ // Release all internal handles after operation is evaluated
+ HandleMark hm(t);
+ _cur_vm_operation = op;
+
+ if (op->evaluate_at_safepoint() && !SafepointSynchronize::is_at_safepoint()) {
+ SafepointSynchronize::begin();
+ op->evaluate();
+ SafepointSynchronize::end();
+ } else {
+ op->evaluate();
+ }
+
+ // Free memory if needed
+ if (op->is_cheap_allocated()) delete op;
+
+ _cur_vm_operation = prev_vm_operation;
+ }
+}
+
+
+void VMThread::oops_do(OopClosure* f) {
+ Thread::oops_do(f);
+ _vm_queue->oops_do(f);
+}
+
+//------------------------------------------------------------------------------------------------------------------
+#ifndef PRODUCT
+
+void VMOperationQueue::verify_queue(int prio) {
+ // Check that list is correctly linked
+ int length = _queue_length[prio];
+ VM_Operation *cur = _queue[prio];
+ int i;
+
+ // Check forward links
+ for(i = 0; i < length; i++) {
+ cur = cur->next();
+ assert(cur != _queue[prio], "list to short (forward)");
+ }
+ assert(cur->next() == _queue[prio], "list to long (forward)");
+
+ // Check backwards links
+ cur = _queue[prio];
+ for(i = 0; i < length; i++) {
+ cur = cur->prev();
+ assert(cur != _queue[prio], "list to short (backwards)");
+ }
+ assert(cur->prev() == _queue[prio], "list to long (backwards)");
+}
+
+#endif
+
+void VMThread::verify() {
+ oops_do(&VerifyOopClosure::verify_oop);
+}
diff --git a/src/share/vm/runtime/vmThread.hpp b/src/share/vm/runtime/vmThread.hpp
new file mode 100644
index 000000000..b196d0fc5
--- /dev/null
+++ b/src/share/vm/runtime/vmThread.hpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+//
+// Prioritized queue of VM operations.
+//
+// Encapsulates both queue management and
+// and priority policy
+//
+class VMOperationQueue : public CHeapObj {
+ private:
+ enum Priorities {
+ SafepointPriority, // Highest priority (operation executed at a safepoint)
+ MediumPriority, // Medium priority
+ nof_priorities
+ };
+
+ // We maintain a doubled linked list, with explicit count.
+ int _queue_length[nof_priorities];
+ int _queue_counter;
+ VM_Operation* _queue [nof_priorities];
+ // we also allow the vmThread to register the ops it has drained so we
+ // can scan them from oops_do
+ VM_Operation* _drain_list;
+
+ // Double-linked non-empty list insert.
+ void insert(VM_Operation* q,VM_Operation* n);
+ void unlink(VM_Operation* q);
+
+ // Basic queue manipulation
+ bool queue_empty (int prio);
+ void queue_add_front (int prio, VM_Operation *op);
+ void queue_add_back (int prio, VM_Operation *op);
+ VM_Operation* queue_remove_front(int prio);
+ void queue_oops_do(int queue, OopClosure* f);
+ void drain_list_oops_do(OopClosure* f);
+ VM_Operation* queue_drain(int prio);
+ // lock-free query: may return the wrong answer but must not break
+ bool queue_peek(int prio) { return _queue_length[prio] > 0; }
+
+ public:
+ VMOperationQueue();
+
+ // Highlevel operations. Encapsulates policy
+ bool add(VM_Operation *op);
+ VM_Operation* remove_next(); // Returns next or null
+ VM_Operation* remove_next_at_safepoint_priority() { return queue_remove_front(SafepointPriority); }
+ VM_Operation* drain_at_safepoint_priority() { return queue_drain(SafepointPriority); }
+ void set_drain_list(VM_Operation* list) { _drain_list = list; }
+ bool peek_at_safepoint_priority() { return queue_peek(SafepointPriority); }
+
+ // GC support
+ void oops_do(OopClosure* f);
+
+ void verify_queue(int prio) PRODUCT_RETURN;
+};
+
+
+//
+// A single VMThread (the primordial thread) spawns all other threads
+// and is itself used by other threads to offload heavy vm operations
+// like scavenge, garbage_collect etc.
+//
+
+class VMThread: public Thread {
+ private:
+ static ThreadPriority _current_priority;
+
+ static bool _should_terminate;
+ static bool _terminated;
+ static Monitor * _terminate_lock;
+ static PerfCounter* _perf_accumulated_vm_operation_time;
+
+ void evaluate_operation(VM_Operation* op);
+ public:
+ // Constructor
+ VMThread();
+
+ // Tester
+ bool is_VM_thread() const { return true; }
+ bool is_GC_thread() const { return true; }
+
+ char* name() const { return (char*)"VM Thread"; }
+
+ // The ever running loop for the VMThread
+ void loop();
+
+ // Called to stop the VM thread
+ static void wait_for_vm_thread_exit();
+ static bool should_terminate() { return _should_terminate; }
+ static bool is_terminated() { return _terminated == true; }
+
+ // Execution of vm operation
+ static void execute(VM_Operation* op);
+
+ // Returns the current vm operation if any.
+ static VM_Operation* vm_operation() { return _cur_vm_operation; }
+
+ // Returns the single instance of VMThread.
+ static VMThread* vm_thread() { return _vm_thread; }
+
+ // GC support
+ void oops_do(OopClosure* f);
+
+ // Debugging
+ void print_on(outputStream* st) const;
+ void print() const { print_on(tty); }
+ void verify();
+
+ // Performance measurement
+ static PerfCounter* perf_accumulated_vm_operation_time() { return _perf_accumulated_vm_operation_time; }
+
+ // Entry for starting vm thread
+ virtual void run();
+
+ // Creations/Destructions
+ static void create();
+ static void destroy();
+
+ private:
+ // VM_Operation support
+ static VM_Operation* _cur_vm_operation; // Current VM operation
+ static VMOperationQueue* _vm_queue; // Queue (w/ policy) of VM operations
+
+ // Pointer to single-instance of VM thread
+ static VMThread* _vm_thread;
+};
diff --git a/src/share/vm/runtime/vm_operations.cpp b/src/share/vm/runtime/vm_operations.cpp
new file mode 100644
index 000000000..9733aed03
--- /dev/null
+++ b/src/share/vm/runtime/vm_operations.cpp
@@ -0,0 +1,450 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_vm_operations.cpp.incl"
+
+#define VM_OP_NAME_INITIALIZE(name) #name,
+
+const char* VM_Operation::_names[VM_Operation::VMOp_Terminating] = \
+ { VM_OPS_DO(VM_OP_NAME_INITIALIZE) };
+
+void VM_Operation::set_calling_thread(Thread* thread, ThreadPriority priority) {
+ _calling_thread = thread;
+ assert(MinPriority <= priority && priority <= MaxPriority, "sanity check");
+ _priority = priority;
+}
+
+
+void VM_Operation::evaluate() {
+ ResourceMark rm;
+ if (TraceVMOperation) {
+ tty->print("[");
+ NOT_PRODUCT(print();)
+ }
+ doit();
+ if (TraceVMOperation) {
+ tty->print_cr("]");
+ }
+}
+
+// Called by fatal error handler.
+void VM_Operation::print_on_error(outputStream* st) const {
+ st->print("VM_Operation (" PTR_FORMAT "): ", this);
+ st->print("%s", name());
+
+ const char* mode;
+ switch(evaluation_mode()) {
+ case _safepoint : mode = "safepoint"; break;
+ case _no_safepoint : mode = "no safepoint"; break;
+ case _concurrent : mode = "concurrent"; break;
+ case _async_safepoint: mode = "async safepoint"; break;
+ default : mode = "unknown"; break;
+ }
+ st->print(", mode: %s", mode);
+
+ if (calling_thread()) {
+ st->print(", requested by thread " PTR_FORMAT, calling_thread());
+ }
+}
+
+void VM_ThreadStop::doit() {
+ assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
+ JavaThread* target = java_lang_Thread::thread(target_thread());
+ // Note that this now allows multiple ThreadDeath exceptions to be
+ // thrown at a thread.
+ if (target != NULL) {
+ // the thread has run and is not already in the process of exiting
+ target->send_thread_stop(throwable());
+ }
+}
+
+void VM_Deoptimize::doit() {
+ // We do not want any GCs to happen while we are in the middle of this VM operation
+ ResourceMark rm;
+ DeoptimizationMarker dm;
+
+ // Deoptimize all activations depending on marked nmethods
+ Deoptimization::deoptimize_dependents();
+
+ // Make the dependent methods zombies
+ CodeCache::make_marked_nmethods_zombies();
+}
+
+
+VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id) {
+ _thread = thread;
+ _id = id;
+}
+
+
+void VM_DeoptimizeFrame::doit() {
+ Deoptimization::deoptimize_frame(_thread, _id);
+}
+
+
+#ifndef PRODUCT
+
+void VM_DeoptimizeAll::doit() {
+ DeoptimizationMarker dm;
+ // deoptimize all java threads in the system
+ if (DeoptimizeALot) {
+ for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
+ if (thread->has_last_Java_frame()) {
+ thread->deoptimize();
+ }
+ }
+ } else if (DeoptimizeRandom) {
+
+ // Deoptimize some selected threads and frames
+ int tnum = os::random() & 0x3;
+ int fnum = os::random() & 0x3;
+ int tcount = 0;
+ for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
+ if (thread->has_last_Java_frame()) {
+ if (tcount++ == tnum) {
+ tcount = 0;
+ int fcount = 0;
+ // Deoptimize some selected frames.
+ // Biased llocking wants a updated register map
+ for(StackFrameStream fst(thread, UseBiasedLocking); !fst.is_done(); fst.next()) {
+ if (fst.current()->can_be_deoptimized()) {
+ if (fcount++ == fnum) {
+ fcount = 0;
+ Deoptimization::deoptimize(thread, *fst.current(), fst.register_map());
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+
+void VM_ZombieAll::doit() {
+ JavaThread *thread = (JavaThread *)calling_thread();
+ assert(thread->is_Java_thread(), "must be a Java thread");
+ thread->make_zombies();
+}
+
+#endif // !PRODUCT
+
+void VM_Verify::doit() {
+ Universe::verify();
+}
+
+bool VM_PrintThreads::doit_prologue() {
+ assert(Thread::current()->is_Java_thread(), "just checking");
+
+ // Make sure AbstractOwnableSynchronizer is loaded
+ if (JDK_Version::is_gte_jdk16x_version()) {
+ java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(JavaThread::current());
+ }
+
+ // Get Heap_lock if concurrent locks will be dumped
+ if (_print_concurrent_locks) {
+ Heap_lock->lock();
+ }
+ return true;
+}
+
+void VM_PrintThreads::doit() {
+ Threads::print_on(_out, true, false, _print_concurrent_locks);
+}
+
+void VM_PrintThreads::doit_epilogue() {
+ if (_print_concurrent_locks) {
+ // Release Heap_lock
+ Heap_lock->unlock();
+ }
+}
+
+void VM_PrintJNI::doit() {
+ JNIHandles::print_on(_out);
+}
+
+VM_FindDeadlocks::~VM_FindDeadlocks() {
+ if (_deadlocks != NULL) {
+ DeadlockCycle* cycle = _deadlocks;
+ while (cycle != NULL) {
+ DeadlockCycle* d = cycle;
+ cycle = cycle->next();
+ delete d;
+ }
+ }
+}
+
+bool VM_FindDeadlocks::doit_prologue() {
+ assert(Thread::current()->is_Java_thread(), "just checking");
+
+ // Load AbstractOwnableSynchronizer class
+ if (_concurrent_locks && JDK_Version::is_gte_jdk16x_version()) {
+ java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(JavaThread::current());
+ }
+
+ return true;
+}
+
+void VM_FindDeadlocks::doit() {
+ _deadlocks = ThreadService::find_deadlocks_at_safepoint(_concurrent_locks);
+ if (_out != NULL) {
+ int num_deadlocks = 0;
+ for (DeadlockCycle* cycle = _deadlocks; cycle != NULL; cycle = cycle->next()) {
+ num_deadlocks++;
+ cycle->print_on(_out);
+ }
+
+ if (num_deadlocks == 1) {
+ _out->print_cr("\nFound 1 deadlock.\n");
+ _out->flush();
+ } else if (num_deadlocks > 1) {
+ _out->print_cr("\nFound %d deadlocks.\n", num_deadlocks);
+ _out->flush();
+ }
+ }
+}
+
+VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
+ int max_depth,
+ bool with_locked_monitors,
+ bool with_locked_synchronizers) {
+ _result = result;
+ _num_threads = 0; // 0 indicates all threads
+ _threads = NULL;
+ _result = result;
+ _max_depth = max_depth;
+ _with_locked_monitors = with_locked_monitors;
+ _with_locked_synchronizers = with_locked_synchronizers;
+}
+
+VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result,
+ GrowableArray<instanceHandle>* threads,
+ int num_threads,
+ int max_depth,
+ bool with_locked_monitors,
+ bool with_locked_synchronizers) {
+ _result = result;
+ _num_threads = num_threads;
+ _threads = threads;
+ _result = result;
+ _max_depth = max_depth;
+ _with_locked_monitors = with_locked_monitors;
+ _with_locked_synchronizers = with_locked_synchronizers;
+}
+
+bool VM_ThreadDump::doit_prologue() {
+ assert(Thread::current()->is_Java_thread(), "just checking");
+
+ // Load AbstractOwnableSynchronizer class before taking thread snapshots
+ if (JDK_Version::is_gte_jdk16x_version()) {
+ java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(JavaThread::current());
+ }
+
+ if (_with_locked_synchronizers) {
+ // Acquire Heap_lock to dump concurrent locks
+ Heap_lock->lock();
+ }
+
+ return true;
+}
+
+void VM_ThreadDump::doit_epilogue() {
+ if (_with_locked_synchronizers) {
+ // Release Heap_lock
+ Heap_lock->unlock();
+ }
+}
+
+void VM_ThreadDump::doit() {
+ ResourceMark rm;
+
+ ConcurrentLocksDump concurrent_locks(true);
+ if (_with_locked_synchronizers) {
+ concurrent_locks.dump_at_safepoint();
+ }
+
+ if (_num_threads == 0) {
+ // Snapshot all live threads
+ for (JavaThread* jt = Threads::first(); jt != NULL; jt = jt->next()) {
+ if (jt->is_exiting() ||
+ jt->is_hidden_from_external_view()) {
+ // skip terminating threads and hidden threads
+ continue;
+ }
+ ThreadConcurrentLocks* tcl = NULL;
+ if (_with_locked_synchronizers) {
+ tcl = concurrent_locks.thread_concurrent_locks(jt);
+ }
+ ThreadSnapshot* ts = snapshot_thread(jt, tcl);
+ _result->add_thread_snapshot(ts);
+ }
+ } else {
+ // Snapshot threads in the given _threads array
+ // A dummy snapshot is created if a thread doesn't exist
+ for (int i = 0; i < _num_threads; i++) {
+ instanceHandle th = _threads->at(i);
+ if (th() == NULL) {
+ // skip if the thread doesn't exist
+ // Add a dummy snapshot
+ _result->add_thread_snapshot(new ThreadSnapshot());
+ continue;
+ }
+
+ // Dump thread stack only if the thread is alive and not exiting
+ // and not VM internal thread.
+ JavaThread* jt = java_lang_Thread::thread(th());
+ if (jt == NULL || /* thread not alive */
+ jt->is_exiting() ||
+ jt->is_hidden_from_external_view()) {
+ // add a NULL snapshot if skipped
+ _result->add_thread_snapshot(new ThreadSnapshot());
+ continue;
+ }
+ ThreadConcurrentLocks* tcl = NULL;
+ if (_with_locked_synchronizers) {
+ tcl = concurrent_locks.thread_concurrent_locks(jt);
+ }
+ ThreadSnapshot* ts = snapshot_thread(jt, tcl);
+ _result->add_thread_snapshot(ts);
+ }
+ }
+}
+
+ThreadSnapshot* VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl) {
+ ThreadSnapshot* snapshot = new ThreadSnapshot(java_thread);
+ snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors);
+ snapshot->set_concurrent_locks(tcl);
+ return snapshot;
+}
+
+volatile bool VM_Exit::_vm_exited = false;
+Thread * VM_Exit::_shutdown_thread = NULL;
+
+int VM_Exit::set_vm_exited() {
+ Thread * thr_cur = ThreadLocalStorage::get_thread_slow();
+
+ assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
+
+ int num_active = 0;
+
+ _shutdown_thread = thr_cur;
+ _vm_exited = true; // global flag
+ for(JavaThread *thr = Threads::first(); thr != NULL; thr = thr->next())
+ if (thr!=thr_cur && thr->thread_state() == _thread_in_native) {
+ ++num_active;
+ thr->set_terminated(JavaThread::_vm_exited); // per-thread flag
+ }
+
+ return num_active;
+}
+
+int VM_Exit::wait_for_threads_in_native_to_block() {
+ // VM exits at safepoint. This function must be called at the final safepoint
+ // to wait for threads in _thread_in_native state to be quiescent.
+ assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
+
+ Thread * thr_cur = ThreadLocalStorage::get_thread_slow();
+ Monitor timer(Mutex::leaf, "VM_Exit timer", true);
+
+ // Compiler threads need longer wait because they can access VM data directly
+ // while in native. If they are active and some structures being used are
+ // deleted by the shutdown sequence, they will crash. On the other hand, user
+ // threads must go through native=>Java/VM transitions first to access VM
+ // data, and they will be stopped during state transition. In theory, we
+ // don't have to wait for user threads to be quiescent, but it's always
+ // better to terminate VM when current thread is the only active thread, so
+ // wait for user threads too. Numbers are in 10 milliseconds.
+ int max_wait_user_thread = 30; // at least 300 milliseconds
+ int max_wait_compiler_thread = 1000; // at least 10 seconds
+
+ int max_wait = max_wait_compiler_thread;
+
+ int attempts = 0;
+ while (true) {
+ int num_active = 0;
+ int num_active_compiler_thread = 0;
+
+ for(JavaThread *thr = Threads::first(); thr != NULL; thr = thr->next()) {
+ if (thr!=thr_cur && thr->thread_state() == _thread_in_native) {
+ num_active++;
+ if (thr->is_Compiler_thread()) {
+ num_active_compiler_thread++;
+ }
+ }
+ }
+
+ if (num_active == 0) {
+ return 0;
+ } else if (attempts > max_wait) {
+ return num_active;
+ } else if (num_active_compiler_thread == 0 && attempts > max_wait_user_thread) {
+ return num_active;
+ }
+
+ attempts++;
+
+ MutexLockerEx ml(&timer, Mutex::_no_safepoint_check_flag);
+ timer.wait(Mutex::_no_safepoint_check_flag, 10);
+ }
+}
+
+void VM_Exit::doit() {
+ CompileBroker::set_should_block();
+
+ // Wait for a short period for threads in native to block. Any thread
+ // still executing native code after the wait will be stopped at
+ // native==>Java/VM barriers.
+ // Among 16276 JCK tests, 94% of them come here without any threads still
+ // running in native; the other 6% are quiescent within 250ms (Ultra 80).
+ wait_for_threads_in_native_to_block();
+
+ set_vm_exited();
+
+ // cleanup globals resources before exiting. exit_globals() currently
+ // cleans up outputStream resources and PerfMemory resources.
+ exit_globals();
+
+ // Check for exit hook
+ exit_hook_t exit_hook = Arguments::exit_hook();
+ if (exit_hook != NULL) {
+ // exit hook should exit.
+ exit_hook(_exit_code);
+ // ... but if it didn't, we must do it here
+ vm_direct_exit(_exit_code);
+ } else {
+ vm_direct_exit(_exit_code);
+ }
+}
+
+
+void VM_Exit::wait_if_vm_exited() {
+ if (_vm_exited &&
+ ThreadLocalStorage::get_thread_slow() != _shutdown_thread) {
+ // _vm_exited is set at safepoint, and the Threads_lock is never released
+ // we will block here until the process dies
+ Threads_lock->lock_without_safepoint_check();
+ ShouldNotReachHere();
+ }
+}
diff --git a/src/share/vm/runtime/vm_operations.hpp b/src/share/vm/runtime/vm_operations.hpp
new file mode 100644
index 000000000..778a46dd7
--- /dev/null
+++ b/src/share/vm/runtime/vm_operations.hpp
@@ -0,0 +1,366 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// The following classes are used for operations
+// initiated by a Java thread but that must
+// take place in the VMThread.
+
+#define VM_OP_ENUM(type) VMOp_##type,
+
+// Note: When new VM_XXX comes up, add 'XXX' to the template table.
+#define VM_OPS_DO(template) \
+ template(Dummy) \
+ template(ThreadStop) \
+ template(ThreadDump) \
+ template(PrintThreads) \
+ template(FindDeadlocks) \
+ template(ForceSafepoint) \
+ template(ForceAsyncSafepoint) \
+ template(Deoptimize) \
+ template(DeoptimizeFrame) \
+ template(DeoptimizeAll) \
+ template(ZombieAll) \
+ template(Verify) \
+ template(PrintJNI) \
+ template(HeapDumper) \
+ template(DeoptimizeTheWorld) \
+ template(GC_HeapInspection) \
+ template(GenCollectFull) \
+ template(GenCollectFullConcurrent) \
+ template(GenCollectForAllocation) \
+ template(ParallelGCFailedAllocation) \
+ template(ParallelGCFailedPermanentAllocation) \
+ template(ParallelGCSystemGC) \
+ template(CMS_Initial_Mark) \
+ template(CMS_Final_Remark) \
+ template(EnableBiasedLocking) \
+ template(RevokeBias) \
+ template(BulkRevokeBias) \
+ template(PopulateDumpSharedSpace) \
+ template(JNIFunctionTableCopier) \
+ template(RedefineClasses) \
+ template(GetOwnedMonitorInfo) \
+ template(GetObjectMonitorUsage) \
+ template(GetCurrentContendedMonitor) \
+ template(GetStackTrace) \
+ template(GetMultipleStackTraces) \
+ template(GetAllStackTraces) \
+ template(GetThreadListStackTraces) \
+ template(GetFrameCount) \
+ template(GetFrameLocation) \
+ template(ChangeBreakpoints) \
+ template(GetOrSetLocal) \
+ template(GetCurrentLocation) \
+ template(EnterInterpOnlyMode) \
+ template(ChangeSingleStep) \
+ template(HeapWalkOperation) \
+ template(HeapIterateOperation) \
+ template(ReportJavaOutOfMemory) \
+ template(Exit) \
+
+class VM_Operation: public CHeapObj {
+ public:
+ enum Mode {
+ _safepoint, // blocking, safepoint, vm_op C-heap allocated
+ _no_safepoint, // blocking, no safepoint, vm_op C-Heap allocated
+ _concurrent, // non-blocking, no safepoint, vm_op C-Heap allocated
+ _async_safepoint // non-blocking, safepoint, vm_op C-Heap allocated
+ };
+
+ enum VMOp_Type {
+ VM_OPS_DO(VM_OP_ENUM)
+ VMOp_Terminating
+ };
+
+ private:
+ Thread* _calling_thread;
+ ThreadPriority _priority;
+ long _timestamp;
+ VM_Operation* _next;
+ VM_Operation* _prev;
+
+ // The VM operation name array
+ static const char* _names[];
+
+ public:
+ VM_Operation() { _calling_thread = NULL; _next = NULL; _prev = NULL; }
+ virtual ~VM_Operation() {}
+
+ // VM operation support (used by VM thread)
+ Thread* calling_thread() const { return _calling_thread; }
+ ThreadPriority priority() { return _priority; }
+ void set_calling_thread(Thread* thread, ThreadPriority priority);
+
+ long timestamp() const { return _timestamp; }
+ void set_timestamp(long timestamp) { _timestamp = timestamp; }
+
+ // Called by VM thread - does in turn invoke doit(). Do not override this
+ void evaluate();
+
+ // evaluate() is called by the VMThread and in turn calls doit().
+ // If the thread invoking VMThread::execute((VM_Operation*) is a JavaThread,
+ // doit_prologue() is called in that thread before transferring control to
+ // the VMThread.
+ // If doit_prologue() returns true the VM operation will proceed, and
+ // doit_epilogue() will be called by the JavaThread once the VM operation
+ // completes. If doit_prologue() returns false the VM operation is cancelled.
+ virtual void doit() = 0;
+ virtual bool doit_prologue() { return true; };
+ virtual void doit_epilogue() {}; // Note: Not called if mode is: _concurrent
+
+ // Type test
+ virtual bool is_methodCompiler() const { return false; }
+
+ // Linking
+ VM_Operation *next() const { return _next; }
+ VM_Operation *prev() const { return _prev; }
+ void set_next(VM_Operation *next) { _next = next; }
+ void set_prev(VM_Operation *prev) { _prev = prev; }
+
+ // Configuration. Override these appropriatly in subclasses.
+ virtual VMOp_Type type() const = 0;
+ virtual Mode evaluation_mode() const { return _safepoint; }
+ virtual bool allow_nested_vm_operations() const { return false; }
+ virtual bool is_cheap_allocated() const { return false; }
+ virtual void oops_do(OopClosure* f) { /* do nothing */ };
+
+ // CAUTION: <don't hang yourself with following rope>
+ // If you override these methods, make sure that the evaluation
+ // of these methods is race-free and non-blocking, since these
+ // methods may be evaluated either by the mutators or by the
+ // vm thread, either concurrently with mutators or with the mutators
+ // stopped. In other words, taking locks is verboten, and if there
+ // are any races in evaluating the conditions, they'd better be benign.
+ virtual bool evaluate_at_safepoint() const {
+ return evaluation_mode() == _safepoint ||
+ evaluation_mode() == _async_safepoint;
+ }
+ virtual bool evaluate_concurrently() const {
+ return evaluation_mode() == _concurrent ||
+ evaluation_mode() == _async_safepoint;
+ }
+
+ // Debugging
+ void print_on_error(outputStream* st) const;
+ const char* name() const { return _names[type()]; }
+ static const char* name(int type) {
+ assert(type >= 0 && type < VMOp_Terminating, "invalid VM operation type");
+ return _names[type];
+ }
+#ifndef PRODUCT
+ void print_on(outputStream* st) const { print_on_error(st); }
+#endif
+};
+
+class VM_ThreadStop: public VM_Operation {
+ private:
+ oop _thread; // The Thread that the Throwable is thrown against
+ oop _throwable; // The Throwable thrown at the target Thread
+ public:
+ // All oops are passed as JNI handles, since there is no guarantee that a GC might happen before the
+ // VM operation is executed.
+ VM_ThreadStop(oop thread, oop throwable) {
+ _thread = thread;
+ _throwable = throwable;
+ }
+ VMOp_Type type() const { return VMOp_ThreadStop; }
+ oop target_thread() const { return _thread; }
+ oop throwable() const { return _throwable;}
+ void doit();
+ // We deoptimize if top-most frame is compiled - this might require a C2I adapter to be generated
+ bool allow_nested_vm_operations() const { return true; }
+ Mode evaluation_mode() const { return _async_safepoint; }
+ bool is_cheap_allocated() const { return true; }
+
+ // GC support
+ void oops_do(OopClosure* f) {
+ f->do_oop(&_thread); f->do_oop(&_throwable);
+ }
+};
+
+// dummy vm op, evaluated just to force a safepoint
+class VM_ForceSafepoint: public VM_Operation {
+ public:
+ VM_ForceSafepoint() {}
+ void doit() {}
+ VMOp_Type type() const { return VMOp_ForceSafepoint; }
+};
+
+// dummy vm op, evaluated just to force a safepoint
+class VM_ForceAsyncSafepoint: public VM_Operation {
+ public:
+ VM_ForceAsyncSafepoint() {}
+ void doit() {}
+ VMOp_Type type() const { return VMOp_ForceAsyncSafepoint; }
+ Mode evaluation_mode() const { return _async_safepoint; }
+ bool is_cheap_allocated() const { return true; }
+};
+
+class VM_Deoptimize: public VM_Operation {
+ public:
+ VM_Deoptimize() {}
+ VMOp_Type type() const { return VMOp_Deoptimize; }
+ void doit();
+ bool allow_nested_vm_operations() const { return true; }
+};
+
+class VM_DeoptimizeFrame: public VM_Operation {
+ private:
+ JavaThread* _thread;
+ intptr_t* _id;
+ public:
+ VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id);
+ VMOp_Type type() const { return VMOp_DeoptimizeFrame; }
+ void doit();
+ bool allow_nested_vm_operations() const { return true; }
+};
+
+#ifndef PRODUCT
+class VM_DeoptimizeAll: public VM_Operation {
+ private:
+ KlassHandle _dependee;
+ public:
+ VM_DeoptimizeAll() {}
+ VMOp_Type type() const { return VMOp_DeoptimizeAll; }
+ void doit();
+ bool allow_nested_vm_operations() const { return true; }
+};
+
+
+class VM_ZombieAll: public VM_Operation {
+ public:
+ VM_ZombieAll() {}
+ VMOp_Type type() const { return VMOp_ZombieAll; }
+ void doit();
+ bool allow_nested_vm_operations() const { return true; }
+};
+#endif // PRODUCT
+
+class VM_Verify: public VM_Operation {
+ private:
+ KlassHandle _dependee;
+ public:
+ VM_Verify() {}
+ VMOp_Type type() const { return VMOp_Verify; }
+ void doit();
+};
+
+
+class VM_PrintThreads: public VM_Operation {
+ private:
+ outputStream* _out;
+ bool _print_concurrent_locks;
+ public:
+ VM_PrintThreads() { _out = tty; _print_concurrent_locks = PrintConcurrentLocks; }
+ VM_PrintThreads(outputStream* out, bool print_concurrent_locks) { _out = out; _print_concurrent_locks = print_concurrent_locks; }
+ VMOp_Type type() const { return VMOp_PrintThreads; }
+ void doit();
+ bool doit_prologue();
+ void doit_epilogue();
+};
+
+class VM_PrintJNI: public VM_Operation {
+ private:
+ outputStream* _out;
+ public:
+ VM_PrintJNI() { _out = tty; }
+ VM_PrintJNI(outputStream* out) { _out = out; }
+ VMOp_Type type() const { return VMOp_PrintJNI; }
+ void doit();
+};
+
+class DeadlockCycle;
+class VM_FindDeadlocks: public VM_Operation {
+ private:
+ bool _concurrent_locks;
+ DeadlockCycle* _deadlocks;
+ outputStream* _out;
+
+ public:
+ VM_FindDeadlocks(bool concurrent_locks) : _concurrent_locks(concurrent_locks), _out(NULL), _deadlocks(NULL) {};
+ VM_FindDeadlocks(outputStream* st) : _concurrent_locks(true), _out(st), _deadlocks(NULL) {};
+ ~VM_FindDeadlocks();
+
+ DeadlockCycle* result() { return _deadlocks; };
+ VMOp_Type type() const { return VMOp_FindDeadlocks; }
+ void doit();
+ bool doit_prologue();
+};
+
+class ThreadDumpResult;
+class ThreadSnapshot;
+class ThreadConcurrentLocks;
+
+class VM_ThreadDump : public VM_Operation {
+ private:
+ ThreadDumpResult* _result;
+ int _num_threads;
+ GrowableArray<instanceHandle>* _threads;
+ int _max_depth;
+ bool _with_locked_monitors;
+ bool _with_locked_synchronizers;
+
+ ThreadSnapshot* snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl);
+
+ public:
+ VM_ThreadDump(ThreadDumpResult* result,
+ int max_depth, // -1 indicates entire stack
+ bool with_locked_monitors,
+ bool with_locked_synchronizers);
+
+ VM_ThreadDump(ThreadDumpResult* result,
+ GrowableArray<instanceHandle>* threads,
+ int num_threads, // -1 indicates entire stack
+ int max_depth,
+ bool with_locked_monitors,
+ bool with_locked_synchronizers);
+
+ VMOp_Type type() const { return VMOp_ThreadDump; }
+ void doit();
+ bool doit_prologue();
+ void doit_epilogue();
+};
+
+
+class VM_Exit: public VM_Operation {
+ private:
+ int _exit_code;
+ static volatile bool _vm_exited;
+ static Thread * _shutdown_thread;
+ static void wait_if_vm_exited();
+ public:
+ VM_Exit(int exit_code) {
+ _exit_code = exit_code;
+ }
+ static int wait_for_threads_in_native_to_block();
+ static int set_vm_exited();
+ static bool vm_exited() { return _vm_exited; }
+ static void block_if_vm_exited() {
+ if (_vm_exited) {
+ wait_if_vm_exited();
+ }
+ }
+ VMOp_Type type() const { return VMOp_Exit; }
+ void doit();
+};
diff --git a/src/share/vm/runtime/vm_version.cpp b/src/share/vm/runtime/vm_version.cpp
new file mode 100644
index 000000000..f5ee15ade
--- /dev/null
+++ b/src/share/vm/runtime/vm_version.cpp
@@ -0,0 +1,212 @@
+/*
+ * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_vm_version.cpp.incl"
+
+const char* Abstract_VM_Version::_s_vm_release = Abstract_VM_Version::vm_release();
+const char* Abstract_VM_Version::_s_internal_vm_info_string = Abstract_VM_Version::internal_vm_info_string();
+bool Abstract_VM_Version::_supports_cx8 = false;
+unsigned int Abstract_VM_Version::_logical_processors_per_package = 1U;
+
+#ifndef HOTSPOT_RELEASE_VERSION
+ #error HOTSPOT_RELEASE_VERSION must be defined
+#endif
+#ifndef JRE_RELEASE_VERSION
+ #error JRE_RELEASE_VERSION must be defined
+#endif
+#ifndef HOTSPOT_BUILD_TARGET
+ #error HOTSPOT_BUILD_TARGET must be defined
+#endif
+
+#ifdef PRODUCT
+ #define VM_RELEASE HOTSPOT_RELEASE_VERSION
+#else
+ #define VM_RELEASE HOTSPOT_RELEASE_VERSION "-" HOTSPOT_BUILD_TARGET
+#endif
+
+// HOTSPOT_RELEASE_VERSION must follow the release version naming convention
+// <major_ver>.<minor_ver>-b<nn>[-<identifier>][-<debug_target>]
+int Abstract_VM_Version::_vm_major_version = 0;
+int Abstract_VM_Version::_vm_minor_version = 0;
+int Abstract_VM_Version::_vm_build_number = 0;
+bool Abstract_VM_Version::_initialized = false;
+
+void Abstract_VM_Version::initialize() {
+ if (_initialized) {
+ return;
+ }
+ char* vm_version = os::strdup(HOTSPOT_RELEASE_VERSION);
+
+ // Expecting the next vm_version format:
+ // <major_ver>.<minor_ver>-b<nn>[-<identifier>]
+ char* vm_major_ver = vm_version;
+ assert(isdigit(vm_major_ver[0]),"wrong vm major version number");
+ char* vm_minor_ver = strchr(vm_major_ver, '.');
+ assert(vm_minor_ver != NULL && isdigit(vm_minor_ver[1]),"wrong vm minor version number");
+ vm_minor_ver[0] = '\0'; // terminate vm_major_ver
+ vm_minor_ver += 1;
+ char* vm_build_num = strchr(vm_minor_ver, '-');
+ assert(vm_build_num != NULL && vm_build_num[1] == 'b' && isdigit(vm_build_num[2]),"wrong vm build number");
+ vm_build_num[0] = '\0'; // terminate vm_minor_ver
+ vm_build_num += 2;
+
+ _vm_major_version = atoi(vm_major_ver);
+ _vm_minor_version = atoi(vm_minor_ver);
+ _vm_build_number = atoi(vm_build_num);
+
+ os::free(vm_version);
+ _initialized = true;
+}
+
+#if defined(_LP64)
+ #define VMLP "64-Bit "
+#else
+ #define VMLP ""
+#endif
+
+#ifdef KERNEL
+ #define VMTYPE "Kernel"
+#else // KERNEL
+#ifdef TIERED
+ #define VMTYPE "Server"
+#else
+ #define VMTYPE COMPILER1_PRESENT("Client") \
+ COMPILER2_PRESENT("Server")
+#endif // TIERED
+#endif // KERNEL
+
+#ifndef HOTSPOT_VM_DISTRO
+ #error HOTSPOT_VM_DISTRO must be defined
+#endif
+#define VMNAME HOTSPOT_VM_DISTRO " " VMLP VMTYPE " VM"
+
+const char* Abstract_VM_Version::vm_name() {
+ return VMNAME;
+}
+
+
+const char* Abstract_VM_Version::vm_vendor() {
+#ifdef VENDOR
+ return XSTR(VENDOR);
+#else
+ return "Sun Microsystems Inc.";
+#endif
+}
+
+
+const char* Abstract_VM_Version::vm_info_string() {
+ switch (Arguments::mode()) {
+ case Arguments::_int:
+ return UseSharedSpaces ? "interpreted mode, sharing" : "interpreted mode";
+ case Arguments::_mixed:
+ return UseSharedSpaces ? "mixed mode, sharing" : "mixed mode";
+ case Arguments::_comp:
+ return UseSharedSpaces ? "compiled mode, sharing" : "compiled mode";
+ };
+ ShouldNotReachHere();
+ return "";
+}
+
+// NOTE: do *not* use stringStream. this function is called by
+// fatal error handler. if the crash is in native thread,
+// stringStream cannot get resource allocated and will SEGV.
+const char* Abstract_VM_Version::vm_release() {
+ return VM_RELEASE;
+}
+
+#define OS LINUX_ONLY("linux") \
+ WINDOWS_ONLY("windows") \
+ SOLARIS_ONLY("solaris")
+
+#define CPU IA32_ONLY("x86") \
+ IA64_ONLY("ia64") \
+ AMD64_ONLY("amd64") \
+ SPARC_ONLY("sparc")
+
+const char *Abstract_VM_Version::vm_platform_string() {
+ return OS "-" CPU;
+}
+
+const char* Abstract_VM_Version::internal_vm_info_string() {
+ #ifndef HOTSPOT_BUILD_USER
+ #define HOTSPOT_BUILD_USER unknown
+ #endif
+
+ #ifndef HOTSPOT_BUILD_COMPILER
+ #ifdef _MSC_VER
+ #if _MSC_VER == 1100
+ #define HOTSPOT_BUILD_COMPILER "MS VC++ 5.0"
+ #elif _MSC_VER == 1200
+ #define HOTSPOT_BUILD_COMPILER "MS VC++ 6.0"
+ #elif _MSC_VER == 1310
+ #define HOTSPOT_BUILD_COMPILER "MS VC++ 7.1"
+ #elif _MSC_VER == 1400
+ #define HOTSPOT_BUILD_COMPILER "MS VC++ 8.0"
+ #else
+ #define HOTSPOT_BUILD_COMPILER "unknown MS VC++:" XSTR(_MSC_VER)
+ #endif
+ #elif defined(__SUNPRO_CC)
+ #if __SUNPRO_CC == 0x420
+ #define HOTSPOT_BUILD_COMPILER "Workshop 4.2"
+ #elif __SUNPRO_CC == 0x500
+ #define HOTSPOT_BUILD_COMPILER "Workshop 5.0 compat=" XSTR(__SUNPRO_CC_COMPAT)
+ #elif __SUNPRO_CC == 0x520
+ #define HOTSPOT_BUILD_COMPILER "Workshop 5.2 compat=" XSTR(__SUNPRO_CC_COMPAT)
+ #elif __SUNPRO_CC == 0x580
+ #define HOTSPOT_BUILD_COMPILER "Workshop 5.8"
+ #elif __SUNPRO_CC == 0x590
+ #define HOTSPOT_BUILD_COMPILER "Workshop 5.9"
+ #else
+ #define HOTSPOT_BUILD_COMPILER "unknown Workshop:" XSTR(__SUNPRO_CC)
+ #endif
+ #elif defined(__GNUC__)
+ #define HOTSPOT_BUILD_COMPILER "gcc " __VERSION__
+ #else
+ #define HOTSPOT_BUILD_COMPILER "unknown compiler"
+ #endif
+ #endif
+
+
+ return VMNAME " (" VM_RELEASE ") for " OS "-" CPU
+ " JRE (" JRE_RELEASE_VERSION "), built on " __DATE__ " " __TIME__
+ " by " XSTR(HOTSPOT_BUILD_USER) " with " HOTSPOT_BUILD_COMPILER;
+}
+
+unsigned int Abstract_VM_Version::jvm_version() {
+ return ((Abstract_VM_Version::vm_major_version() & 0xFF) << 24) |
+ ((Abstract_VM_Version::vm_minor_version() & 0xFF) << 16) |
+ (Abstract_VM_Version::vm_build_number() & 0xFF);
+}
+
+
+void VM_Version_init() {
+ VM_Version::initialize();
+
+#ifndef PRODUCT
+ if (PrintMiscellaneous && Verbose) {
+ os::print_cpu_info(tty);
+ }
+#endif
+}
diff --git a/src/share/vm/runtime/vm_version.hpp b/src/share/vm/runtime/vm_version.hpp
new file mode 100644
index 000000000..2f708e39d
--- /dev/null
+++ b/src/share/vm/runtime/vm_version.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// VM_Version provides information about the VM.
+
+class Abstract_VM_Version: AllStatic {
+ protected:
+ friend class VMStructs;
+ static const char* _s_vm_release;
+ static const char* _s_internal_vm_info_string;
+ // These are set by machine-dependent initializations
+ static bool _supports_cx8;
+ static unsigned int _logical_processors_per_package;
+ static int _vm_major_version;
+ static int _vm_minor_version;
+ static int _vm_build_number;
+ static bool _initialized;
+ public:
+ static void initialize();
+
+ // Name
+ static const char* vm_name();
+ // Vendor
+ static const char* vm_vendor();
+ // VM version information string printed by launcher (java -version)
+ static const char* vm_info_string();
+ static const char* vm_release();
+ static const char* vm_platform_string();
+
+ static int vm_major_version() { assert(_initialized, "not initialized"); return _vm_major_version; }
+ static int vm_minor_version() { assert(_initialized, "not initialized"); return _vm_minor_version; }
+ static int vm_build_number() { assert(_initialized, "not initialized"); return _vm_build_number; }
+
+ // Gets the jvm_version_info.jvm_version defined in jvm.h
+ static unsigned int jvm_version();
+
+ // Internal version providing additional build information
+ static const char* internal_vm_info_string();
+
+ // does HW support an 8-byte compare-exchange operation?
+ static bool supports_cx8() {return _supports_cx8;}
+ static unsigned int logical_processors_per_package() {
+ return _logical_processors_per_package;
+ }
+
+ // Number of page sizes efficiently supported by the hardware. Most chips now
+ // support two sizes, thus this default implementation. Processor-specific
+ // subclasses should define new versions to hide this one as needed. Note
+ // that the O/S may support more sizes, but at most this many are used.
+ static uint page_size_count() { return 2; }
+};
diff --git a/src/share/vm/runtime/vtune.hpp b/src/share/vm/runtime/vtune.hpp
new file mode 100644
index 000000000..3d6187baa
--- /dev/null
+++ b/src/share/vm/runtime/vtune.hpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Interface to Intel's VTune profiler.
+
+class VTune : AllStatic {
+ public:
+ static void create_nmethod(nmethod* nm); // register newly created nmethod
+ static void delete_nmethod(nmethod* nm); // unregister nmethod before discarding it
+
+ static void register_stub(const char* name, address start, address end);
+ // register internal VM stub
+ static void start_GC(); // start/end of GC or scavenge
+ static void end_GC();
+
+ static void start_class_load(); // start/end of class loading
+ static void end_class_load();
+
+ static void exit(); // VM exit
+};
+
+
+// helper objects
+class VTuneGCMarker : StackObj {
+ public:
+ VTuneGCMarker() { VTune::start_GC(); }
+ ~VTuneGCMarker() { VTune::end_GC(); }
+};
+
+class VTuneClassLoadMarker : StackObj {
+ public:
+ VTuneClassLoadMarker() { VTune::start_class_load(); }
+ ~VTuneClassLoadMarker() { VTune::end_class_load(); }
+};