aboutsummaryrefslogtreecommitdiff
path: root/src/share/vm/services
diff options
context:
space:
mode:
authorduke <none@none>2007-12-01 00:00:00 +0000
committerduke <none@none>2007-12-01 00:00:00 +0000
commitfa6b5a8027b86d2f8a200e72b4ef6a0d3f9189d3 (patch)
tree8376f6e5c41e70162b5867d9e1fea3f17f540473 /src/share/vm/services
Initial loadjdk7-b24
Diffstat (limited to 'src/share/vm/services')
-rw-r--r--src/share/vm/services/attachListener.cpp451
-rw-r--r--src/share/vm/services/attachListener.hpp147
-rw-r--r--src/share/vm/services/classLoadingService.cpp212
-rw-r--r--src/share/vm/services/classLoadingService.hpp135
-rw-r--r--src/share/vm/services/dtraceAttacher.cpp138
-rw-r--r--src/share/vm/services/dtraceAttacher.hpp44
-rw-r--r--src/share/vm/services/heapDumper.cpp1773
-rw-r--r--src/share/vm/services/heapDumper.hpp69
-rw-r--r--src/share/vm/services/jmm.h287
-rw-r--r--src/share/vm/services/lowMemoryDetector.cpp422
-rw-r--r--src/share/vm/services/lowMemoryDetector.hpp285
-rw-r--r--src/share/vm/services/management.cpp2019
-rw-r--r--src/share/vm/services/management.hpp106
-rw-r--r--src/share/vm/services/memoryManager.cpp245
-rw-r--r--src/share/vm/services/memoryManager.hpp233
-rw-r--r--src/share/vm/services/memoryPool.cpp249
-rw-r--r--src/share/vm/services/memoryPool.hpp212
-rw-r--r--src/share/vm/services/memoryService.cpp548
-rw-r--r--src/share/vm/services/memoryService.hpp162
-rw-r--r--src/share/vm/services/memoryUsage.hpp77
-rw-r--r--src/share/vm/services/psMemoryPool.cpp96
-rw-r--r--src/share/vm/services/psMemoryPool.hpp81
-rw-r--r--src/share/vm/services/runtimeService.cpp176
-rw-r--r--src/share/vm/services/runtimeService.hpp59
-rw-r--r--src/share/vm/services/serviceUtil.hpp89
-rw-r--r--src/share/vm/services/threadService.cpp885
-rw-r--r--src/share/vm/services/threadService.hpp566
27 files changed, 9766 insertions, 0 deletions
diff --git a/src/share/vm/services/attachListener.cpp b/src/share/vm/services/attachListener.cpp
new file mode 100644
index 000000000..2361f200a
--- /dev/null
+++ b/src/share/vm/services/attachListener.cpp
@@ -0,0 +1,451 @@
+/*
+ * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_attachListener.cpp.incl"
+
+volatile bool AttachListener::_initialized;
+
+// Implementation of "properties" command.
+//
+// Invokes sun.misc.VMSupport.serializePropertiesToByteArray to serialize
+// the system properties into a byte array.
+
+static klassOop load_and_initialize_klass(symbolHandle sh, TRAPS) {
+ klassOop k = SystemDictionary::resolve_or_fail(sh, true, CHECK_NULL);
+ instanceKlassHandle ik (THREAD, k);
+ if (ik->should_be_initialized()) {
+ ik->initialize(CHECK_NULL);
+ }
+ return ik();
+}
+
+static jint get_properties(AttachOperation* op, outputStream* out, symbolHandle serializePropertiesMethod) {
+ Thread* THREAD = Thread::current();
+ HandleMark hm;
+
+ // load sun.misc.VMSupport
+ symbolHandle klass = vmSymbolHandles::sun_misc_VMSupport();
+ klassOop k = load_and_initialize_klass(klass, THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ java_lang_Throwable::print(PENDING_EXCEPTION, out);
+ CLEAR_PENDING_EXCEPTION;
+ return JNI_ERR;
+ }
+ instanceKlassHandle ik(THREAD, k);
+
+ // invoke the serializePropertiesToByteArray method
+ JavaValue result(T_OBJECT);
+ JavaCallArguments args;
+
+
+ symbolHandle signature = vmSymbolHandles::serializePropertiesToByteArray_signature();
+ JavaCalls::call_static(&result,
+ ik,
+ serializePropertiesMethod,
+ signature,
+ &args,
+ THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ java_lang_Throwable::print(PENDING_EXCEPTION, out);
+ CLEAR_PENDING_EXCEPTION;
+ return JNI_ERR;
+ }
+
+ // The result should be a [B
+ oop res = (oop)result.get_jobject();
+ assert(res->is_typeArray(), "just checking");
+ assert(typeArrayKlass::cast(res->klass())->element_type() == T_BYTE, "just checking");
+
+ // copy the bytes to the output stream
+ typeArrayOop ba = typeArrayOop(res);
+ jbyte* addr = typeArrayOop(res)->byte_at_addr(0);
+ out->print_raw((const char*)addr, ba->length());
+
+ return JNI_OK;
+}
+
+// Implementation of "properties" command.
+static jint get_system_properties(AttachOperation* op, outputStream* out) {
+ return get_properties(op, out, vmSymbolHandles::serializePropertiesToByteArray_name());
+}
+
+// Implementation of "agent_properties" command.
+static jint get_agent_properties(AttachOperation* op, outputStream* out) {
+ return get_properties(op, out, vmSymbolHandles::serializeAgentPropertiesToByteArray_name());
+}
+
+// Implementation of "datadump" command.
+//
+// Raises a SIGBREAK signal so that VM dump threads, does deadlock detection,
+// etc. In theory this command should only post a DataDumpRequest to any
+// JVMTI environment that has enabled this event. However it's useful to
+// trigger the SIGBREAK handler.
+
+static jint data_dump(AttachOperation* op, outputStream* out) {
+ if (!ReduceSignalUsage) {
+ AttachListener::pd_data_dump();
+ } else {
+ if (JvmtiExport::should_post_data_dump()) {
+ JvmtiExport::post_data_dump();
+ }
+ }
+ return JNI_OK;
+}
+
+// Implementation of "threaddump" command - essentially a remote ctrl-break
+//
+static jint thread_dump(AttachOperation* op, outputStream* out) {
+ bool print_concurrent_locks = false;
+ if (op->arg(0) != NULL && strcmp(op->arg(0), "-l") == 0) {
+ print_concurrent_locks = true;
+ }
+
+ // thread stacks
+ VM_PrintThreads op1(out, print_concurrent_locks);
+ VMThread::execute(&op1);
+
+ // JNI global handles
+ VM_PrintJNI op2(out);
+ VMThread::execute(&op2);
+
+ // Deadlock detection
+ VM_FindDeadlocks op3(out);
+ VMThread::execute(&op3);
+
+ return JNI_OK;
+}
+
+#ifndef SERVICES_KERNEL // Heap dumping not supported
+// Implementation of "dumpheap" command.
+//
+// Input arguments :-
+// arg0: Name of the dump file
+// arg1: "-live" or "-all"
+jint dump_heap(AttachOperation* op, outputStream* out) {
+ const char* path = op->arg(0);
+ if (path == NULL || path[0] == '\0') {
+ out->print_cr("No dump file specified");
+ } else {
+ bool live_objects_only = true; // default is true to retain the behavior before this change is made
+ const char* arg1 = op->arg(1);
+ if (arg1 != NULL && (strlen(arg1) > 0)) {
+ if (strcmp(arg1, "-all") != 0 && strcmp(arg1, "-live") != 0) {
+ out->print_cr("Invalid argument to dumpheap operation: %s", arg1);
+ return JNI_ERR;
+ }
+ live_objects_only = strcmp(arg1, "-live") == 0;
+ }
+
+ // Request a full GC before heap dump if live_objects_only = true
+ // This helps reduces the amount of unreachable objects in the dump
+ // and makes it easier to browse.
+ HeapDumper dumper(live_objects_only /* request GC */);
+ int res = dumper.dump(op->arg(0));
+ if (res == 0) {
+ out->print_cr("Heap dump file created");
+ } else {
+ // heap dump failed
+ ResourceMark rm;
+ char* error = dumper.error_as_C_string();
+ if (error == NULL) {
+ out->print_cr("Dump failed - reason unknown");
+ } else {
+ out->print_cr("%s", error);
+ }
+ }
+ }
+ return JNI_OK;
+}
+#endif // SERVICES_KERNEL
+
+// Implementation of "inspectheap" command
+//
+// Input arguments :-
+// arg0: "-live" or "-all"
+static jint heap_inspection(AttachOperation* op, outputStream* out) {
+ bool live_objects_only = true; // default is true to retain the behavior before this change is made
+ const char* arg0 = op->arg(0);
+ if (arg0 != NULL && (strlen(arg0) > 0)) {
+ if (strcmp(arg0, "-all") != 0 && strcmp(arg0, "-live") != 0) {
+ out->print_cr("Invalid argument to inspectheap operation: %s", arg0);
+ return JNI_ERR;
+ }
+ live_objects_only = strcmp(arg0, "-live") == 0;
+ }
+ VM_GC_HeapInspection heapop(out, live_objects_only /* request gc */);
+ VMThread::execute(&heapop);
+ return JNI_OK;
+}
+
+// set a boolean global flag using value from AttachOperation
+static jint set_bool_flag(const char* name, AttachOperation* op, outputStream* out) {
+ bool value = true;
+ const char* arg1;
+ if ((arg1 = op->arg(1)) != NULL) {
+ int tmp;
+ int n = sscanf(arg1, "%d", &tmp);
+ if (n != 1) {
+ out->print_cr("flag value has to be boolean (1 or 0)");
+ return JNI_ERR;
+ }
+ value = (tmp != 0);
+ }
+ bool res = CommandLineFlags::boolAtPut((char*)name, &value, ATTACH_ON_DEMAND);
+ if (! res) {
+ out->print_cr("setting flag %s failed", name);
+ }
+ return res? JNI_OK : JNI_ERR;
+}
+
+// set a intx global flag using value from AttachOperation
+static jint set_intx_flag(const char* name, AttachOperation* op, outputStream* out) {
+ intx value;
+ const char* arg1;
+ if ((arg1 = op->arg(1)) != NULL) {
+ int n = sscanf(arg1, INTX_FORMAT, &value);
+ if (n != 1) {
+ out->print_cr("flag value has to be integer");
+ return JNI_ERR;
+ }
+ }
+ bool res = CommandLineFlags::intxAtPut((char*)name, &value, ATTACH_ON_DEMAND);
+ if (! res) {
+ out->print_cr("setting flag %s failed", name);
+ }
+
+ return res? JNI_OK : JNI_ERR;
+}
+
+// set a uintx global flag using value from AttachOperation
+static jint set_uintx_flag(const char* name, AttachOperation* op, outputStream* out) {
+ uintx value;
+ const char* arg1;
+ if ((arg1 = op->arg(1)) != NULL) {
+ int n = sscanf(arg1, UINTX_FORMAT, &value);
+ if (n != 1) {
+ out->print_cr("flag value has to be integer");
+ return JNI_ERR;
+ }
+ }
+ bool res = CommandLineFlags::uintxAtPut((char*)name, &value, ATTACH_ON_DEMAND);
+ if (! res) {
+ out->print_cr("setting flag %s failed", name);
+ }
+
+ return res? JNI_OK : JNI_ERR;
+}
+
+// set a string global flag using value from AttachOperation
+static jint set_ccstr_flag(const char* name, AttachOperation* op, outputStream* out) {
+ const char* value;
+ if ((value = op->arg(1)) == NULL) {
+ out->print_cr("flag value has to be a string");
+ return JNI_ERR;
+ }
+ bool res = CommandLineFlags::ccstrAtPut((char*)name, &value, ATTACH_ON_DEMAND);
+ if (res) {
+ FREE_C_HEAP_ARRAY(char, value);
+ } else {
+ out->print_cr("setting flag %s failed", name);
+ }
+
+ return res? JNI_OK : JNI_ERR;
+}
+
+// Implementation of "setflag" command
+static jint set_flag(AttachOperation* op, outputStream* out) {
+
+ const char* name = NULL;
+ if ((name = op->arg(0)) == NULL) {
+ out->print_cr("flag name is missing");
+ return JNI_ERR;
+ }
+
+ Flag* f = Flag::find_flag((char*)name, strlen(name));
+ if (f && f->is_external() && f->is_writeable()) {
+ if (f->is_bool()) {
+ return set_bool_flag(name, op, out);
+ } else if (f->is_intx()) {
+ return set_intx_flag(name, op, out);
+ } else if (f->is_uintx()) {
+ return set_uintx_flag(name, op, out);
+ } else if (f->is_ccstr()) {
+ return set_ccstr_flag(name, op, out);
+ } else {
+ ShouldNotReachHere();
+ return JNI_ERR;
+ }
+ } else {
+ return AttachListener::pd_set_flag(op, out);
+ }
+}
+
+// Implementation of "printflag" command
+static jint print_flag(AttachOperation* op, outputStream* out) {
+ const char* name = NULL;
+ if ((name = op->arg(0)) == NULL) {
+ out->print_cr("flag name is missing");
+ return JNI_ERR;
+ }
+ Flag* f = Flag::find_flag((char*)name, strlen(name));
+ if (f) {
+ f->print_as_flag(out);
+ out->print_cr("");
+ } else {
+ out->print_cr("no such flag '%s'", name);
+ }
+ return JNI_OK;
+}
+
+// Table to map operation names to functions.
+
+// names must be of length <= AttachOperation::name_length_max
+static AttachOperationFunctionInfo funcs[] = {
+ { "agentProperties", get_agent_properties },
+ { "datadump", data_dump },
+#ifndef SERVICES_KERNEL
+ { "dumpheap", dump_heap },
+#endif // SERVICES_KERNEL
+ { "load", JvmtiExport::load_agent_library },
+ { "properties", get_system_properties },
+ { "threaddump", thread_dump },
+ { "inspectheap", heap_inspection },
+ { "setflag", set_flag },
+ { "printflag", print_flag },
+ { NULL, NULL }
+};
+
+
+
+// The Attach Listener threads services a queue. It dequeues an operation
+// from the queue, examines the operation name (command), and dispatches
+// to the corresponding function to perform the operation.
+
+static void attach_listener_thread_entry(JavaThread* thread, TRAPS) {
+ os::set_priority(thread, NearMaxPriority);
+
+ if (AttachListener::pd_init() != 0) {
+ return;
+ }
+ AttachListener::set_initialized();
+
+ for (;;) {
+ AttachOperation* op = AttachListener::dequeue();
+ if (op == NULL) {
+ return; // dequeue failed or shutdown
+ }
+
+ ResourceMark rm;
+ bufferedStream st;
+ jint res = JNI_OK;
+
+ // handle special detachall operation
+ if (strcmp(op->name(), AttachOperation::detachall_operation_name()) == 0) {
+ AttachListener::detachall();
+ } else {
+ // find the function to dispatch too
+ AttachOperationFunctionInfo* info = NULL;
+ for (int i=0; funcs[i].name != NULL; i++) {
+ const char* name = funcs[i].name;
+ assert(strlen(name) <= AttachOperation::name_length_max, "operation <= name_length_max");
+ if (strcmp(op->name(), name) == 0) {
+ info = &(funcs[i]);
+ break;
+ }
+ }
+
+ // check for platform dependent attach operation
+ if (info == NULL) {
+ info = AttachListener::pd_find_operation(op->name());
+ }
+
+ if (info != NULL) {
+ // dispatch to the function that implements this operation
+ res = (info->func)(op, &st);
+ } else {
+ st.print("Operation %s not recognized!", op->name());
+ res = JNI_ERR;
+ }
+ }
+
+ // operation complete - send result and output to client
+ op->complete(res, &st);
+ }
+}
+
+// Starts the Attach Listener thread
+void AttachListener::init() {
+ EXCEPTION_MARK;
+ klassOop k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_Thread(), true, CHECK);
+ instanceKlassHandle klass (THREAD, k);
+ instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
+
+ const char thread_name[] = "Attach Listener";
+ Handle string = java_lang_String::create_from_str(thread_name, CHECK);
+
+ // Initialize thread_oop to put it into the system threadGroup
+ Handle thread_group (THREAD, Universe::system_thread_group());
+ JavaValue result(T_VOID);
+ JavaCalls::call_special(&result, thread_oop,
+ klass,
+ vmSymbolHandles::object_initializer_name(),
+ vmSymbolHandles::threadgroup_string_void_signature(),
+ thread_group,
+ string,
+ CHECK);
+
+ KlassHandle group(THREAD, SystemDictionary::threadGroup_klass());
+ JavaCalls::call_special(&result,
+ thread_group,
+ group,
+ vmSymbolHandles::add_method_name(),
+ vmSymbolHandles::thread_void_signature(),
+ thread_oop, // ARG 1
+ CHECK);
+
+ { MutexLocker mu(Threads_lock);
+ JavaThread* listener_thread = new JavaThread(&attach_listener_thread_entry);
+
+ // Check that thread and osthread were created
+ if (listener_thread == NULL || listener_thread->osthread() == NULL) {
+ vm_exit_during_initialization("java.lang.OutOfMemoryError",
+ "unable to create new native thread");
+ }
+
+ java_lang_Thread::set_thread(thread_oop(), listener_thread);
+ java_lang_Thread::set_daemon(thread_oop());
+
+ listener_thread->set_threadObj(thread_oop());
+ Threads::add(listener_thread);
+ Thread::start(listener_thread);
+ }
+}
+
+// Performs clean-up tasks on platforms where we can detect that the last
+// client has detached
+void AttachListener::detachall() {
+ // call the platform dependent clean-up
+ pd_detachall();
+}
diff --git a/src/share/vm/services/attachListener.hpp b/src/share/vm/services/attachListener.hpp
new file mode 100644
index 000000000..7ae0c233d
--- /dev/null
+++ b/src/share/vm/services/attachListener.hpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// The AttachListener thread services a queue of operations that are enqueued
+// by client tools. Each operation is identified by a name and has up to 3
+// arguments. The operation name is mapped to a function which performs the
+// operation. The function is called with an outputStream which is can use to
+// write any result data (for examples the properties command serializes
+// properties names and values to the output stream). When the function
+// complets the result value and any result data is returned to the client
+// tool.
+
+#ifndef SERVICES_KERNEL
+
+class AttachOperation;
+
+typedef jint (*AttachOperationFunction)(AttachOperation* op, outputStream* out);
+
+struct AttachOperationFunctionInfo {
+ const char* name;
+ AttachOperationFunction func;
+};
+#endif // SERVICES_KERNEL
+
+class AttachListener: AllStatic {
+ public:
+ static void init() KERNEL_RETURN;
+ static void abort() KERNEL_RETURN;
+
+ // invoke to perform clean-up tasks when all clients detach
+ static void detachall() KERNEL_RETURN;
+
+ // indicates if the Attach Listener needs to be created at startup
+ static bool init_at_startup() KERNEL_RETURN_(return false;);
+
+ // indicates if we have a trigger to start the Attach Listener
+ static bool is_init_trigger() KERNEL_RETURN_(return false;);
+
+#ifdef SERVICES_KERNEL
+ static bool is_attach_supported() { return false; }
+#else // SERVICES_KERNEL
+ private:
+ static volatile bool _initialized;
+
+ public:
+ static bool is_initialized() { return _initialized; }
+ static void set_initialized() { _initialized = true; }
+
+ // indicates if this VM supports attach-on-demand
+ static bool is_attach_supported() { return !DisableAttachMechanism; }
+
+ // platform specific initialization
+ static int pd_init();
+
+ // platform specific operation
+ static AttachOperationFunctionInfo* pd_find_operation(const char* name);
+
+ // platform specific flag change
+ static jint pd_set_flag(AttachOperation* op, outputStream* out);
+
+ // platform specific detachall
+ static void pd_detachall();
+
+ // platform specific data dump
+ static void pd_data_dump();
+
+ // dequeue the next operation
+ static AttachOperation* dequeue();
+#endif // SERVICES_KERNEL
+};
+
+#ifndef SERVICES_KERNEL
+class AttachOperation: public CHeapObj {
+ public:
+ enum {
+ name_length_max = 16, // maximum length of name
+ arg_length_max = 1024, // maximum length of argument
+ arg_count_max = 3 // maximum number of arguments
+ };
+
+ // name of special operation that can be enqueued when all
+ // clients detach
+ static char* detachall_operation_name() { return (char*)"detachall"; }
+
+ private:
+ char _name[name_length_max+1];
+ char _arg[arg_count_max][arg_length_max+1];
+
+ public:
+ const char* name() const { return _name; }
+
+ // set the operation name
+ void set_name(char* name) {
+ assert(strlen(name) <= name_length_max, "exceeds maximum name length");
+ strcpy(_name, name);
+ }
+
+ // get an argument value
+ const char* arg(int i) const {
+ assert(i>=0 && i<arg_count_max, "invalid argument index");
+ return _arg[i];
+ }
+
+ // set an argument value
+ void set_arg(int i, char* arg) {
+ assert(i>=0 && i<arg_count_max, "invalid argument index");
+ if (arg == NULL) {
+ _arg[i][0] = '\0';
+ } else {
+ assert(strlen(arg) <= arg_length_max, "exceeds maximum argument length");
+ strcpy(_arg[i], arg);
+ }
+ }
+
+ // create an operation of a given name
+ AttachOperation(char* name) {
+ set_name(name);
+ for (int i=0; i<arg_count_max; i++) {
+ set_arg(i, NULL);
+ }
+ }
+
+ // complete operation by sending result code and any result data to the client
+ virtual void complete(jint result, bufferedStream* result_stream) = 0;
+};
+#endif // SERVICES_KERNEL
diff --git a/src/share/vm/services/classLoadingService.cpp b/src/share/vm/services/classLoadingService.cpp
new file mode 100644
index 000000000..d8f107579
--- /dev/null
+++ b/src/share/vm/services/classLoadingService.cpp
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_classLoadingService.cpp.incl"
+
+#ifdef DTRACE_ENABLED
+
+// Only bother with this argument setup if dtrace is available
+
+HS_DTRACE_PROBE_DECL4(hotspot, class__loaded, char*, int, oop, bool);
+HS_DTRACE_PROBE_DECL4(hotspot, class__unloaded, char*, int, oop, bool);
+
+#define DTRACE_CLASSLOAD_PROBE(type, clss, shared) \
+ { \
+ char* data = NULL; \
+ int len = 0; \
+ symbolOop name = (clss)->name(); \
+ if (name != NULL) { \
+ data = (char*)name->bytes(); \
+ len = name->utf8_length(); \
+ } \
+ HS_DTRACE_PROBE4(hotspot, class__##type, \
+ data, len, (clss)->class_loader(), (shared)); \
+ }
+
+#else // ndef DTRACE_ENABLED
+
+#define DTRACE_CLASSLOAD_PROBE(type, clss, shared)
+
+#endif
+
+// counters for classes loaded from class files
+PerfCounter* ClassLoadingService::_classes_loaded_count = NULL;
+PerfCounter* ClassLoadingService::_classes_unloaded_count = NULL;
+PerfCounter* ClassLoadingService::_classbytes_loaded = NULL;
+PerfCounter* ClassLoadingService::_classbytes_unloaded = NULL;
+
+// counters for classes loaded from shared archive
+PerfCounter* ClassLoadingService::_shared_classes_loaded_count = NULL;
+PerfCounter* ClassLoadingService::_shared_classes_unloaded_count = NULL;
+PerfCounter* ClassLoadingService::_shared_classbytes_loaded = NULL;
+PerfCounter* ClassLoadingService::_shared_classbytes_unloaded = NULL;
+PerfVariable* ClassLoadingService::_class_methods_size = NULL;
+
+void ClassLoadingService::init() {
+ EXCEPTION_MARK;
+
+ // These counters are for java.lang.management API support.
+ // They are created even if -XX:-UsePerfData is set and in
+ // that case, they will be allocated on C heap.
+ _classes_loaded_count =
+ PerfDataManager::create_counter(JAVA_CLS, "loadedClasses",
+ PerfData::U_Events, CHECK);
+
+ _classes_unloaded_count =
+ PerfDataManager::create_counter(JAVA_CLS, "unloadedClasses",
+ PerfData::U_Events, CHECK);
+
+ _shared_classes_loaded_count =
+ PerfDataManager::create_counter(JAVA_CLS, "sharedLoadedClasses",
+ PerfData::U_Events, CHECK);
+
+ _shared_classes_unloaded_count =
+ PerfDataManager::create_counter(JAVA_CLS, "sharedUnloadedClasses",
+ PerfData::U_Events, CHECK);
+
+ if (UsePerfData) {
+ _classbytes_loaded =
+ PerfDataManager::create_counter(SUN_CLS, "loadedBytes",
+ PerfData::U_Bytes, CHECK);
+
+ _classbytes_unloaded =
+ PerfDataManager::create_counter(SUN_CLS, "unloadedBytes",
+ PerfData::U_Bytes, CHECK);
+ _shared_classbytes_loaded =
+ PerfDataManager::create_counter(SUN_CLS, "sharedLoadedBytes",
+ PerfData::U_Bytes, CHECK);
+
+ _shared_classbytes_unloaded =
+ PerfDataManager::create_counter(SUN_CLS, "sharedUnloadedBytes",
+ PerfData::U_Bytes, CHECK);
+ _class_methods_size =
+ PerfDataManager::create_variable(SUN_CLS, "methodBytes",
+ PerfData::U_Bytes, CHECK);
+ }
+}
+
+void ClassLoadingService::notify_class_unloaded(instanceKlass* k) {
+ DTRACE_CLASSLOAD_PROBE(unloaded, k, false);
+ // Classes that can be unloaded must be non-shared
+ _classes_unloaded_count->inc();
+
+ if (UsePerfData) {
+ // add the class size
+ size_t size = compute_class_size(k);
+ _classbytes_unloaded->inc(size);
+
+ // Compute method size & subtract from running total.
+ // We are called during phase 1 of mark sweep, so it's
+ // still ok to iterate through methodOops here.
+ objArrayOop methods = k->methods();
+ for (int i = 0; i < methods->length(); i++) {
+ _class_methods_size->inc(-methods->obj_at(i)->size());
+ }
+ }
+
+ if (TraceClassUnloading) {
+ ResourceMark rm;
+ tty->print_cr("[Unloading class %s]", k->external_name());
+ }
+}
+
+void ClassLoadingService::notify_class_loaded(instanceKlass* k, bool shared_class) {
+ DTRACE_CLASSLOAD_PROBE(loaded, k, shared_class);
+ PerfCounter* classes_counter = (shared_class ? _shared_classes_loaded_count
+ : _classes_loaded_count);
+ // increment the count
+ classes_counter->inc();
+
+ if (UsePerfData) {
+ PerfCounter* classbytes_counter = (shared_class ? _shared_classbytes_loaded
+ : _classbytes_loaded);
+ // add the class size
+ size_t size = compute_class_size(k);
+ classbytes_counter->inc(size);
+ }
+}
+
+size_t ClassLoadingService::compute_class_size(instanceKlass* k) {
+ // lifted from ClassStatistics.do_class(klassOop k)
+
+ size_t class_size = 0;
+
+ class_size += k->as_klassOop()->size();
+
+ if (k->oop_is_instance()) {
+ class_size += k->methods()->size();
+ class_size += k->constants()->size();
+ class_size += k->local_interfaces()->size();
+ class_size += k->transitive_interfaces()->size();
+ // We do not have to count implementors, since we only store one!
+ class_size += k->fields()->size();
+ }
+ return class_size * oopSize;
+}
+
+
+bool ClassLoadingService::set_verbose(bool verbose) {
+ MutexLocker m(Management_lock);
+
+ // verbose will be set to the previous value
+ bool succeed = CommandLineFlags::boolAtPut((char*)"TraceClassLoading", &verbose, MANAGEMENT);
+ assert(succeed, "Setting TraceClassLoading flag fails");
+ reset_trace_class_unloading();
+
+ return verbose;
+}
+
+// Caller to this function must own Management_lock
+void ClassLoadingService::reset_trace_class_unloading() {
+ assert(Management_lock->owned_by_self(), "Must own the Management_lock");
+ bool value = MemoryService::get_verbose() || ClassLoadingService::get_verbose();
+ bool succeed = CommandLineFlags::boolAtPut((char*)"TraceClassUnloading", &value, MANAGEMENT);
+ assert(succeed, "Setting TraceClassUnLoading flag fails");
+}
+
+GrowableArray<KlassHandle>* LoadedClassesEnumerator::_loaded_classes = NULL;
+Thread* LoadedClassesEnumerator::_current_thread = NULL;
+
+LoadedClassesEnumerator::LoadedClassesEnumerator(Thread* cur_thread) {
+ assert(cur_thread == Thread::current(), "Check current thread");
+
+ int init_size = ClassLoadingService::loaded_class_count();
+ _klass_handle_array = new GrowableArray<KlassHandle>(init_size);
+
+ // For consistency of the loaded classes, grab the SystemDictionary lock
+ MutexLocker sd_mutex(SystemDictionary_lock);
+
+ // Set _loaded_classes and _current_thread and begin enumerating all classes.
+ // Only one thread will do the enumeration at a time.
+ // These static variables are needed and they are used by the static method
+ // add_loaded_class called from classes_do().
+ _loaded_classes = _klass_handle_array;
+ _current_thread = cur_thread;
+
+ SystemDictionary::classes_do(&add_loaded_class);
+
+ // FIXME: Exclude array klasses for now
+ // Universe::basic_type_classes_do(&add_loaded_class);
+}
diff --git a/src/share/vm/services/classLoadingService.hpp b/src/share/vm/services/classLoadingService.hpp
new file mode 100644
index 000000000..c20824e29
--- /dev/null
+++ b/src/share/vm/services/classLoadingService.hpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class instanceKlass;
+
+// VM monitoring and management support for the Class Loading subsystem
+class ClassLoadingService : public AllStatic {
+private:
+ // Counters for classes loaded from class files
+ static PerfCounter* _classes_loaded_count;
+ static PerfCounter* _classes_unloaded_count;
+ static PerfCounter* _classbytes_loaded;
+ static PerfCounter* _classbytes_unloaded;
+
+ // Counters for classes loaded from shared archive
+ static PerfCounter* _shared_classes_loaded_count;
+ static PerfCounter* _shared_classes_unloaded_count;
+ static PerfCounter* _shared_classbytes_loaded;
+ static PerfCounter* _shared_classbytes_unloaded;
+
+ static PerfVariable* _class_methods_size;
+
+ static size_t compute_class_size(instanceKlass* k);
+
+public:
+ static void init();
+
+ static bool get_verbose() { return TraceClassLoading; }
+ static bool set_verbose(bool verbose);
+ static void reset_trace_class_unloading();
+
+ static jlong loaded_class_count() {
+ return _classes_loaded_count->get_value() + _shared_classes_loaded_count->get_value();
+ }
+ static jlong unloaded_class_count() {
+ return _classes_unloaded_count->get_value() + _shared_classes_unloaded_count->get_value();
+ }
+ static jlong loaded_class_bytes() {
+ if (UsePerfData) {
+ return _classbytes_loaded->get_value() + _shared_classbytes_loaded->get_value();
+ } else {
+ return -1;
+ }
+ }
+ static jlong unloaded_class_bytes() {
+ if (UsePerfData) {
+ return _classbytes_unloaded->get_value() + _shared_classbytes_unloaded->get_value();
+ } else {
+ return -1;
+ }
+ }
+
+ static jlong loaded_shared_class_count() {
+ return _shared_classes_loaded_count->get_value();
+ }
+ static jlong unloaded_shared_class_count() {
+ return _shared_classes_unloaded_count->get_value();
+ }
+ static jlong loaded_shared_class_bytes() {
+ if (UsePerfData) {
+ return _shared_classbytes_loaded->get_value();
+ } else {
+ return -1;
+ }
+ }
+ static jlong unloaded_shared_class_bytes() {
+ if (UsePerfData) {
+ return _shared_classbytes_unloaded->get_value();
+ } else {
+ return -1;
+ }
+ }
+ static jlong class_method_data_size() {
+ return (UsePerfData ? _class_methods_size->get_value() : -1);
+ }
+
+ static void notify_class_loaded(instanceKlass* k, bool shared_class);
+ // All unloaded classes are non-shared
+ static void notify_class_unloaded(instanceKlass* k);
+ static void add_class_method_size(int size) {
+ if (UsePerfData) {
+ _class_methods_size->inc(size);
+ }
+ }
+};
+
+// FIXME: make this piece of code to be shared by M&M and JVMTI
+class LoadedClassesEnumerator : public StackObj {
+private:
+ static GrowableArray<KlassHandle>* _loaded_classes;
+ // _current_thread is for creating a KlassHandle with a faster version constructor
+ static Thread* _current_thread;
+
+ GrowableArray<KlassHandle>* _klass_handle_array;
+
+public:
+ LoadedClassesEnumerator(Thread* cur_thread);
+
+ int num_loaded_classes() { return _klass_handle_array->length(); }
+ KlassHandle get_klass(int index) { return _klass_handle_array->at(index); }
+
+ static void add_loaded_class(klassOop k) {
+ // FIXME: For now - don't include array klasses
+ // The spec is unclear at this point to count array klasses or not
+ // and also indirect creation of array of super class and secondaries
+ //
+ // for (klassOop l = k; l != NULL; l = Klass::cast(l)->array_klass_or_null()) {
+ // KlassHandle h(_current_thread, l);
+ // _loaded_classes->append(h);
+ // }
+ KlassHandle h(_current_thread, k);
+ _loaded_classes->append(h);
+ }
+};
diff --git a/src/share/vm/services/dtraceAttacher.cpp b/src/share/vm/services/dtraceAttacher.cpp
new file mode 100644
index 000000000..ec2c8c82b
--- /dev/null
+++ b/src/share/vm/services/dtraceAttacher.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2006-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_dtraceAttacher.cpp.incl"
+
+#ifdef SOLARIS
+
+class VM_DeoptimizeTheWorld : public VM_Operation {
+ public:
+ VMOp_Type type() const {
+ return VMOp_DeoptimizeTheWorld;
+ }
+ void doit() {
+ CodeCache::mark_all_nmethods_for_deoptimization();
+ ResourceMark rm;
+ DeoptimizationMarker dm;
+ // Deoptimize all activations depending on marked methods
+ Deoptimization::deoptimize_dependents();
+
+ // Mark the dependent methods non entrant
+ CodeCache::make_marked_nmethods_not_entrant();
+ }
+};
+
+static void set_bool_flag(const char* flag, bool value) {
+ CommandLineFlags::boolAtPut((char*)flag, strlen(flag), &value,
+ ATTACH_ON_DEMAND);
+}
+
+// Enable only the "fine grained" flags. Do *not* touch
+// the overall "ExtendedDTraceProbes" flag.
+void DTrace::enable_dprobes(int probes) {
+ bool changed = false;
+ if (!DTraceAllocProbes && (probes & DTRACE_ALLOC_PROBES)) {
+ set_bool_flag("DTraceAllocProbes", true);
+ changed = true;
+ }
+ if (!DTraceMethodProbes && (probes & DTRACE_METHOD_PROBES)) {
+ set_bool_flag("DTraceMethodProbes", true);
+ changed = true;
+ }
+ if (!DTraceMonitorProbes && (probes & DTRACE_MONITOR_PROBES)) {
+ set_bool_flag("DTraceMonitorProbes", true);
+ changed = true;
+ }
+
+ if (changed) {
+ // one or more flags changed, need to deoptimize
+ VM_DeoptimizeTheWorld op;
+ VMThread::execute(&op);
+ }
+}
+
+// Disable only the "fine grained" flags. Do *not* touch
+// the overall "ExtendedDTraceProbes" flag.
+void DTrace::disable_dprobes(int probes) {
+ bool changed = false;
+ if (DTraceAllocProbes && (probes & DTRACE_ALLOC_PROBES)) {
+ set_bool_flag("DTraceAllocProbes", false);
+ changed = true;
+ }
+ if (DTraceMethodProbes && (probes & DTRACE_METHOD_PROBES)) {
+ set_bool_flag("DTraceMethodProbes", false);
+ changed = true;
+ }
+ if (DTraceMonitorProbes && (probes & DTRACE_MONITOR_PROBES)) {
+ set_bool_flag("DTraceMonitorProbes", false);
+ changed = true;
+ }
+ if (changed) {
+ // one or more flags changed, need to deoptimize
+ VM_DeoptimizeTheWorld op;
+ VMThread::execute(&op);
+ }
+}
+
+// Do clean-up on "all door clients detached" event.
+void DTrace::detach_all_clients() {
+ /*
+ * We restore the state of the fine grained flags
+ * to be consistent with overall ExtendedDTraceProbes.
+ * This way, we will honour command line setting or the
+ * last explicit modification of ExtendedDTraceProbes by
+ * a call to set_extended_dprobes.
+ */
+ if (ExtendedDTraceProbes) {
+ enable_dprobes(DTRACE_ALL_PROBES);
+ } else {
+ disable_dprobes(DTRACE_ALL_PROBES);
+ }
+}
+
+void DTrace::set_extended_dprobes(bool flag) {
+ // explicit setting of ExtendedDTraceProbes flag
+ set_bool_flag("ExtendedDTraceProbes", flag);
+
+ // make sure that the fine grained flags reflect the change.
+ if (flag) {
+ enable_dprobes(DTRACE_ALL_PROBES);
+ } else {
+ /*
+ * FIXME: Revisit this: currently all-client-detach detection
+ * does not work and hence disabled. The following scheme does
+ * not work. So, we have to disable fine-grained flags here.
+ *
+ * disable_dprobes call has to be delayed till next "detach all "event.
+ * This is to be done so that concurrent DTrace clients that may
+ * have enabled one or more fine grained dprobes and may be running
+ * still. On "detach all" clients event, we would sync ExtendedDTraceProbes
+ * with fine grained flags which would take care of disabling fine grained flags.
+ */
+ disable_dprobes(DTRACE_ALL_PROBES);
+ }
+}
+
+#endif /* SOLARIS */
diff --git a/src/share/vm/services/dtraceAttacher.hpp b/src/share/vm/services/dtraceAttacher.hpp
new file mode 100644
index 000000000..089decd9d
--- /dev/null
+++ b/src/share/vm/services/dtraceAttacher.hpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#define DTRACE_ALLOC_PROBES 0x1
+#define DTRACE_METHOD_PROBES 0x2
+#define DTRACE_MONITOR_PROBES 0x4
+#define DTRACE_ALL_PROBES (DTRACE_ALLOC_PROBES | \
+ DTRACE_METHOD_PROBES | \
+ DTRACE_MONITOR_PROBES)
+
+class DTrace : public AllStatic {
+ private:
+ // disable one or more probes - OR above constants
+ static void disable_dprobes(int probe_types);
+
+ public:
+ // enable one or more probes - OR above constants
+ static void enable_dprobes(int probe_types);
+ // all clients detached, do any clean-up
+ static void detach_all_clients();
+ // set ExtendedDTraceProbes flag
+ static void set_extended_dprobes(bool value);
+};
diff --git a/src/share/vm/services/heapDumper.cpp b/src/share/vm/services/heapDumper.cpp
new file mode 100644
index 000000000..cdd79a618
--- /dev/null
+++ b/src/share/vm/services/heapDumper.cpp
@@ -0,0 +1,1773 @@
+/*
+ * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_heapDumper.cpp.incl"
+
+/*
+ * HPROF binary format - description copied from:
+ * src/share/demo/jvmti/hprof/hprof_io.c
+ *
+ *
+ * header "JAVA PROFILE 1.0.1" or "JAVA PROFILE 1.0.2"
+ * (0-terminated)
+ *
+ * u4 size of identifiers. Identifiers are used to represent
+ * UTF8 strings, objects, stack traces, etc. They usually
+ * have the same size as host pointers. For example, on
+ * Solaris and Win32, the size is 4.
+ * u4 high word
+ * u4 low word number of milliseconds since 0:00 GMT, 1/1/70
+ * [record]* a sequence of records.
+ *
+ *
+ * Record format:
+ *
+ * u1 a TAG denoting the type of the record
+ * u4 number of *microseconds* since the time stamp in the
+ * header. (wraps around in a little more than an hour)
+ * u4 number of bytes *remaining* in the record. Note that
+ * this number excludes the tag and the length field itself.
+ * [u1]* BODY of the record (a sequence of bytes)
+ *
+ *
+ * The following TAGs are supported:
+ *
+ * TAG BODY notes
+ *----------------------------------------------------------
+ * HPROF_UTF8 a UTF8-encoded name
+ *
+ * id name ID
+ * [u1]* UTF8 characters (no trailing zero)
+ *
+ * HPROF_LOAD_CLASS a newly loaded class
+ *
+ * u4 class serial number (> 0)
+ * id class object ID
+ * u4 stack trace serial number
+ * id class name ID
+ *
+ * HPROF_UNLOAD_CLASS an unloading class
+ *
+ * u4 class serial_number
+ *
+ * HPROF_FRAME a Java stack frame
+ *
+ * id stack frame ID
+ * id method name ID
+ * id method signature ID
+ * id source file name ID
+ * u4 class serial number
+ * i4 line number. >0: normal
+ * -1: unknown
+ * -2: compiled method
+ * -3: native method
+ *
+ * HPROF_TRACE a Java stack trace
+ *
+ * u4 stack trace serial number
+ * u4 thread serial number
+ * u4 number of frames
+ * [id]* stack frame IDs
+ *
+ *
+ * HPROF_ALLOC_SITES a set of heap allocation sites, obtained after GC
+ *
+ * u2 flags 0x0001: incremental vs. complete
+ * 0x0002: sorted by allocation vs. live
+ * 0x0004: whether to force a GC
+ * u4 cutoff ratio
+ * u4 total live bytes
+ * u4 total live instances
+ * u8 total bytes allocated
+ * u8 total instances allocated
+ * u4 number of sites that follow
+ * [u1 is_array: 0: normal object
+ * 2: object array
+ * 4: boolean array
+ * 5: char array
+ * 6: float array
+ * 7: double array
+ * 8: byte array
+ * 9: short array
+ * 10: int array
+ * 11: long array
+ * u4 class serial number (may be zero during startup)
+ * u4 stack trace serial number
+ * u4 number of bytes alive
+ * u4 number of instances alive
+ * u4 number of bytes allocated
+ * u4]* number of instance allocated
+ *
+ * HPROF_START_THREAD a newly started thread.
+ *
+ * u4 thread serial number (> 0)
+ * id thread object ID
+ * u4 stack trace serial number
+ * id thread name ID
+ * id thread group name ID
+ * id thread group parent name ID
+ *
+ * HPROF_END_THREAD a terminating thread.
+ *
+ * u4 thread serial number
+ *
+ * HPROF_HEAP_SUMMARY heap summary
+ *
+ * u4 total live bytes
+ * u4 total live instances
+ * u8 total bytes allocated
+ * u8 total instances allocated
+ *
+ * HPROF_HEAP_DUMP denote a heap dump
+ *
+ * [heap dump sub-records]*
+ *
+ * There are four kinds of heap dump sub-records:
+ *
+ * u1 sub-record type
+ *
+ * HPROF_GC_ROOT_UNKNOWN unknown root
+ *
+ * id object ID
+ *
+ * HPROF_GC_ROOT_THREAD_OBJ thread object
+ *
+ * id thread object ID (may be 0 for a
+ * thread newly attached through JNI)
+ * u4 thread sequence number
+ * u4 stack trace sequence number
+ *
+ * HPROF_GC_ROOT_JNI_GLOBAL JNI global ref root
+ *
+ * id object ID
+ * id JNI global ref ID
+ *
+ * HPROF_GC_ROOT_JNI_LOCAL JNI local ref
+ *
+ * id object ID
+ * u4 thread serial number
+ * u4 frame # in stack trace (-1 for empty)
+ *
+ * HPROF_GC_ROOT_JAVA_FRAME Java stack frame
+ *
+ * id object ID
+ * u4 thread serial number
+ * u4 frame # in stack trace (-1 for empty)
+ *
+ * HPROF_GC_ROOT_NATIVE_STACK Native stack
+ *
+ * id object ID
+ * u4 thread serial number
+ *
+ * HPROF_GC_ROOT_STICKY_CLASS System class
+ *
+ * id object ID
+ *
+ * HPROF_GC_ROOT_THREAD_BLOCK Reference from thread block
+ *
+ * id object ID
+ * u4 thread serial number
+ *
+ * HPROF_GC_ROOT_MONITOR_USED Busy monitor
+ *
+ * id object ID
+ *
+ * HPROF_GC_CLASS_DUMP dump of a class object
+ *
+ * id class object ID
+ * u4 stack trace serial number
+ * id super class object ID
+ * id class loader object ID
+ * id signers object ID
+ * id protection domain object ID
+ * id reserved
+ * id reserved
+ *
+ * u4 instance size (in bytes)
+ *
+ * u2 size of constant pool
+ * [u2, constant pool index,
+ * ty, type
+ * 2: object
+ * 4: boolean
+ * 5: char
+ * 6: float
+ * 7: double
+ * 8: byte
+ * 9: short
+ * 10: int
+ * 11: long
+ * vl]* and value
+ *
+ * u2 number of static fields
+ * [id, static field name,
+ * ty, type,
+ * vl]* and value
+ *
+ * u2 number of inst. fields (not inc. super)
+ * [id, instance field name,
+ * ty]* type
+ *
+ * HPROF_GC_INSTANCE_DUMP dump of a normal object
+ *
+ * id object ID
+ * u4 stack trace serial number
+ * id class object ID
+ * u4 number of bytes that follow
+ * [vl]* instance field values (class, followed
+ * by super, super's super ...)
+ *
+ * HPROF_GC_OBJ_ARRAY_DUMP dump of an object array
+ *
+ * id array object ID
+ * u4 stack trace serial number
+ * u4 number of elements
+ * id array class ID
+ * [id]* elements
+ *
+ * HPROF_GC_PRIM_ARRAY_DUMP dump of a primitive array
+ *
+ * id array object ID
+ * u4 stack trace serial number
+ * u4 number of elements
+ * u1 element type
+ * 4: boolean array
+ * 5: char array
+ * 6: float array
+ * 7: double array
+ * 8: byte array
+ * 9: short array
+ * 10: int array
+ * 11: long array
+ * [u1]* elements
+ *
+ * HPROF_CPU_SAMPLES a set of sample traces of running threads
+ *
+ * u4 total number of samples
+ * u4 # of traces
+ * [u4 # of samples
+ * u4]* stack trace serial number
+ *
+ * HPROF_CONTROL_SETTINGS the settings of on/off switches
+ *
+ * u4 0x00000001: alloc traces on/off
+ * 0x00000002: cpu sampling on/off
+ * u2 stack trace depth
+ *
+ *
+ * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
+ * be generated as a sequence of heap dump segments. This sequence is
+ * terminated by an end record. The additional tags allowed by format
+ * "JAVA PROFILE 1.0.2" are:
+ *
+ * HPROF_HEAP_DUMP_SEGMENT denote a heap dump segment
+ *
+ * [heap dump sub-records]*
+ * The same sub-record types allowed by HPROF_HEAP_DUMP
+ *
+ * HPROF_HEAP_DUMP_END denotes the end of a heap dump
+ *
+ */
+
+
+// HPROF tags
+
+typedef enum {
+ // top-level records
+ HPROF_UTF8 = 0x01,
+ HPROF_LOAD_CLASS = 0x02,
+ HPROF_UNLOAD_CLASS = 0x03,
+ HPROF_FRAME = 0x04,
+ HPROF_TRACE = 0x05,
+ HPROF_ALLOC_SITES = 0x06,
+ HPROF_HEAP_SUMMARY = 0x07,
+ HPROF_START_THREAD = 0x0A,
+ HPROF_END_THREAD = 0x0B,
+ HPROF_HEAP_DUMP = 0x0C,
+ HPROF_CPU_SAMPLES = 0x0D,
+ HPROF_CONTROL_SETTINGS = 0x0E,
+
+ // 1.0.2 record types
+ HPROF_HEAP_DUMP_SEGMENT = 0x1C,
+ HPROF_HEAP_DUMP_END = 0x2C,
+
+ // field types
+ HPROF_ARRAY_OBJECT = 0x01,
+ HPROF_NORMAL_OBJECT = 0x02,
+ HPROF_BOOLEAN = 0x04,
+ HPROF_CHAR = 0x05,
+ HPROF_FLOAT = 0x06,
+ HPROF_DOUBLE = 0x07,
+ HPROF_BYTE = 0x08,
+ HPROF_SHORT = 0x09,
+ HPROF_INT = 0x0A,
+ HPROF_LONG = 0x0B,
+
+ // data-dump sub-records
+ HPROF_GC_ROOT_UNKNOWN = 0xFF,
+ HPROF_GC_ROOT_JNI_GLOBAL = 0x01,
+ HPROF_GC_ROOT_JNI_LOCAL = 0x02,
+ HPROF_GC_ROOT_JAVA_FRAME = 0x03,
+ HPROF_GC_ROOT_NATIVE_STACK = 0x04,
+ HPROF_GC_ROOT_STICKY_CLASS = 0x05,
+ HPROF_GC_ROOT_THREAD_BLOCK = 0x06,
+ HPROF_GC_ROOT_MONITOR_USED = 0x07,
+ HPROF_GC_ROOT_THREAD_OBJ = 0x08,
+ HPROF_GC_CLASS_DUMP = 0x20,
+ HPROF_GC_INSTANCE_DUMP = 0x21,
+ HPROF_GC_OBJ_ARRAY_DUMP = 0x22,
+ HPROF_GC_PRIM_ARRAY_DUMP = 0x23
+} hprofTag;
+
+// Default stack trace ID (used for dummy HPROF_TRACE record)
+enum {
+ STACK_TRACE_ID = 1
+};
+
+
+// Supports I/O operations on a dump file
+
+class DumpWriter : public StackObj {
+ private:
+ enum {
+ io_buffer_size = 8*M
+ };
+
+ int _fd; // file descriptor (-1 if dump file not open)
+ jlong _bytes_written; // number of byte written to dump file
+
+ char* _buffer; // internal buffer
+ int _size;
+ int _pos;
+
+ char* _error; // error message when I/O fails
+
+ void set_file_descriptor(int fd) { _fd = fd; }
+ int file_descriptor() const { return _fd; }
+
+ char* buffer() const { return _buffer; }
+ int buffer_size() const { return _size; }
+ int position() const { return _pos; }
+ void set_position(int pos) { _pos = pos; }
+
+ void set_error(const char* error) { _error = (char*)os::strdup(error); }
+
+ // all I/O go through this function
+ void write_internal(void* s, int len);
+
+ public:
+ DumpWriter(const char* path);
+ ~DumpWriter();
+
+ void close();
+ bool is_open() const { return file_descriptor() >= 0; }
+ void flush();
+
+ // total number of bytes written to the disk
+ jlong bytes_written() const { return _bytes_written; }
+
+ // adjust the number of bytes written to disk (used to keep the count
+ // of the number of bytes written in case of rewrites)
+ void adjust_bytes_written(jlong n) { _bytes_written += n; }
+
+ // number of (buffered) bytes as yet unwritten to the dump file
+ jlong bytes_unwritten() const { return (jlong)position(); }
+
+ char* error() const { return _error; }
+
+ jlong current_offset();
+ void seek_to_offset(jlong pos);
+
+ // writer functions
+ void write_raw(void* s, int len);
+ void write_u1(u1 x) { write_raw((void*)&x, 1); }
+ void write_u2(u2 x);
+ void write_u4(u4 x);
+ void write_u8(u8 x);
+ void write_objectID(oop o);
+ void write_classID(Klass* k);
+};
+
+DumpWriter::DumpWriter(const char* path) {
+ // try to allocate an I/O buffer of io_buffer_size. If there isn't
+ // sufficient memory then reduce size until we can allocate something.
+ _size = io_buffer_size;
+ do {
+ _buffer = (char*)os::malloc(_size);
+ if (_buffer == NULL) {
+ _size = _size >> 1;
+ }
+ } while (_buffer == NULL && _size > 0);
+ assert((_size > 0 && _buffer != NULL) || (_size == 0 && _buffer == NULL), "sanity check");
+ _pos = 0;
+ _error = NULL;
+ _bytes_written = 0L;
+ _fd = os::create_binary_file(path, false); // don't replace existing file
+
+ // if the open failed we record the error
+ if (_fd < 0) {
+ _error = (char*)os::strdup(strerror(errno));
+ }
+}
+
+DumpWriter::~DumpWriter() {
+ // flush and close dump file
+ if (file_descriptor() >= 0) {
+ close();
+ }
+ if (_buffer != NULL) os::free(_buffer);
+ if (_error != NULL) os::free(_error);
+}
+
+// closes dump file (if open)
+void DumpWriter::close() {
+ // flush and close dump file
+ if (file_descriptor() >= 0) {
+ flush();
+ ::close(file_descriptor());
+ }
+}
+
+// write directly to the file
+void DumpWriter::write_internal(void* s, int len) {
+ if (is_open()) {
+ int n = ::write(file_descriptor(), s, len);
+ if (n > 0) {
+ _bytes_written += n;
+ }
+ if (n != len) {
+ if (n < 0) {
+ set_error(strerror(errno));
+ } else {
+ set_error("file size limit");
+ }
+ ::close(file_descriptor());
+ set_file_descriptor(-1);
+ }
+ }
+}
+
+// write raw bytes
+void DumpWriter::write_raw(void* s, int len) {
+ if (is_open()) {
+ // flush buffer to make toom
+ if ((position()+ len) >= buffer_size()) {
+ flush();
+ }
+
+ // buffer not available or too big to buffer it
+ if ((buffer() == NULL) || (len >= buffer_size())) {
+ write_internal(s, len);
+ } else {
+ // Should optimize this for u1/u2/u4/u8 sizes.
+ memcpy(buffer() + position(), s, len);
+ set_position(position() + len);
+ }
+ }
+}
+
+// flush any buffered bytes to the file
+void DumpWriter::flush() {
+ if (is_open() && position() > 0) {
+ write_internal(buffer(), position());
+ set_position(0);
+ }
+}
+
+
+jlong DumpWriter::current_offset() {
+ if (is_open()) {
+ // the offset is the file offset plus whatever we have buffered
+ jlong offset = os::current_file_offset(file_descriptor());
+ assert(offset >= 0, "lseek failed");
+ return offset + (jlong)position();
+ } else {
+ return (jlong)-1;
+ }
+}
+
+void DumpWriter::seek_to_offset(jlong off) {
+ assert(off >= 0, "bad offset");
+
+ // need to flush before seeking
+ flush();
+
+ // may be closed due to I/O error
+ if (is_open()) {
+ jlong n = os::seek_to_file_offset(file_descriptor(), off);
+ assert(n >= 0, "lseek failed");
+ }
+}
+
+void DumpWriter::write_u2(u2 x) {
+ u2 v;
+ Bytes::put_Java_u2((address)&v, x);
+ write_raw((void*)&v, 2);
+}
+
+void DumpWriter::write_u4(u4 x) {
+ u4 v;
+ Bytes::put_Java_u4((address)&v, x);
+ write_raw((void*)&v, 4);
+}
+
+void DumpWriter::write_u8(u8 x) {
+ u8 v;
+ Bytes::put_Java_u8((address)&v, x);
+ write_raw((void*)&v, 8);
+}
+
+void DumpWriter::write_objectID(oop o) {
+ address a = (address)((uintptr_t)o);
+#ifdef _LP64
+ write_u8((u8)a);
+#else
+ write_u4((u4)a);
+#endif
+}
+
+// We use java mirror as the class ID
+void DumpWriter::write_classID(Klass* k) {
+ write_objectID(k->java_mirror());
+}
+
+
+
+// Support class with a collection of functions used when dumping the heap
+
+class DumperSupport : AllStatic {
+ public:
+
+ // write a header of the given type
+ static void write_header(DumpWriter* writer, hprofTag tag, u4 len);
+
+ // returns hprof tag for the given type signature
+ static hprofTag sig2tag(symbolOop sig);
+ // returns hprof tag for the given basic type
+ static hprofTag type2tag(BasicType type);
+
+ // returns the size of the instance of the given class
+ static u4 instance_size(klassOop k);
+
+ // dump a jfloat
+ static void dump_float(DumpWriter* writer, jfloat f);
+ // dump a jdouble
+ static void dump_double(DumpWriter* writer, jdouble d);
+ // dumps the raw value of the given field
+ static void dump_field_value(DumpWriter* writer, char type, address addr);
+ // dumps static fields of the given class
+ static void dump_static_fields(DumpWriter* writer, klassOop k);
+ // dump the raw values of the instance fields of the given object
+ static void dump_instance_fields(DumpWriter* writer, oop o);
+ // dumps the definition of the instance fields for a given class
+ static void dump_instance_field_descriptors(DumpWriter* writer, klassOop k);
+ // creates HPROF_GC_INSTANCE_DUMP record for the given object
+ static void dump_instance(DumpWriter* writer, oop o);
+ // creates HPROF_GC_CLASS_DUMP record for the given class and each of its
+ // array classes
+ static void dump_class_and_array_classes(DumpWriter* writer, klassOop k);
+ // creates HPROF_GC_CLASS_DUMP record for a given primitive array
+ // class (and each multi-dimensional array class too)
+ static void dump_basic_type_array_class(DumpWriter* writer, klassOop k);
+
+ // creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
+ static void dump_object_array(DumpWriter* writer, objArrayOop array);
+ // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
+ static void dump_prim_array(DumpWriter* writer, typeArrayOop array);
+};
+
+// write a header of the given type
+void DumperSupport:: write_header(DumpWriter* writer, hprofTag tag, u4 len) {
+ writer->write_u1((u1)tag);
+ writer->write_u4(0); // current ticks
+ writer->write_u4(len);
+}
+
+// returns hprof tag for the given type signature
+hprofTag DumperSupport::sig2tag(symbolOop sig) {
+ switch (sig->byte_at(0)) {
+ case JVM_SIGNATURE_CLASS : return HPROF_NORMAL_OBJECT;
+ case JVM_SIGNATURE_ARRAY : return HPROF_NORMAL_OBJECT;
+ case JVM_SIGNATURE_BYTE : return HPROF_BYTE;
+ case JVM_SIGNATURE_CHAR : return HPROF_CHAR;
+ case JVM_SIGNATURE_FLOAT : return HPROF_FLOAT;
+ case JVM_SIGNATURE_DOUBLE : return HPROF_DOUBLE;
+ case JVM_SIGNATURE_INT : return HPROF_INT;
+ case JVM_SIGNATURE_LONG : return HPROF_LONG;
+ case JVM_SIGNATURE_SHORT : return HPROF_SHORT;
+ case JVM_SIGNATURE_BOOLEAN : return HPROF_BOOLEAN;
+ default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
+ }
+}
+
+hprofTag DumperSupport::type2tag(BasicType type) {
+ switch (type) {
+ case T_BYTE : return HPROF_BYTE;
+ case T_CHAR : return HPROF_CHAR;
+ case T_FLOAT : return HPROF_FLOAT;
+ case T_DOUBLE : return HPROF_DOUBLE;
+ case T_INT : return HPROF_INT;
+ case T_LONG : return HPROF_LONG;
+ case T_SHORT : return HPROF_SHORT;
+ case T_BOOLEAN : return HPROF_BOOLEAN;
+ default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
+ }
+}
+
+// dump a jfloat
+void DumperSupport::dump_float(DumpWriter* writer, jfloat f) {
+ if (g_isnan(f)) {
+ writer->write_u4(0x7fc00000); // collapsing NaNs
+ } else {
+ union {
+ int i;
+ float f;
+ } u;
+ u.f = (float)f;
+ writer->write_u4((u4)u.i);
+ }
+}
+
+// dump a jdouble
+void DumperSupport::dump_double(DumpWriter* writer, jdouble d) {
+ union {
+ jlong l;
+ double d;
+ } u;
+ if (g_isnan(d)) { // collapsing NaNs
+ u.l = (jlong)(0x7ff80000);
+ u.l = (u.l << 32);
+ } else {
+ u.d = (double)d;
+ }
+ writer->write_u8((u8)u.l);
+}
+
+// dumps the raw value of the given field
+void DumperSupport::dump_field_value(DumpWriter* writer, char type, address addr) {
+ switch (type) {
+ case JVM_SIGNATURE_CLASS :
+ case JVM_SIGNATURE_ARRAY : {
+ oop* f = (oop*)addr;
+ oop o = *f;
+
+ // reflection and sun.misc.Unsafe classes may have a reference to a
+ // klassOop so filter it out.
+ if (o != NULL && o->is_klass()) {
+ o = NULL;
+ }
+
+ // FIXME: When sharing is enabled we don't emit field references to objects
+ // in shared spaces. We can remove this once we write records for the classes
+ // and strings that are shared.
+ if (o != NULL && o->is_shared()) {
+ o = NULL;
+ }
+ writer->write_objectID(o);
+ break;
+ }
+ case JVM_SIGNATURE_BYTE : {
+ jbyte* b = (jbyte*)addr;
+ writer->write_u1((u1)*b);
+ break;
+ }
+ case JVM_SIGNATURE_CHAR : {
+ jchar* c = (jchar*)addr;
+ writer->write_u2((u2)*c);
+ break;
+ }
+ case JVM_SIGNATURE_SHORT : {
+ jshort* s = (jshort*)addr;
+ writer->write_u2((u2)*s);
+ break;
+ }
+ case JVM_SIGNATURE_FLOAT : {
+ jfloat* f = (jfloat*)addr;
+ dump_float(writer, *f);
+ break;
+ }
+ case JVM_SIGNATURE_DOUBLE : {
+ jdouble* f = (jdouble*)addr;
+ dump_double(writer, *f);
+ break;
+ }
+ case JVM_SIGNATURE_INT : {
+ jint* i = (jint*)addr;
+ writer->write_u4((u4)*i);
+ break;
+ }
+ case JVM_SIGNATURE_LONG : {
+ jlong* l = (jlong*)addr;
+ writer->write_u8((u8)*l);
+ break;
+ }
+ case JVM_SIGNATURE_BOOLEAN : {
+ jboolean* b = (jboolean*)addr;
+ writer->write_u1((u1)*b);
+ break;
+ }
+ default : ShouldNotReachHere();
+ }
+}
+
+// returns the size of the instance of the given class
+u4 DumperSupport::instance_size(klassOop k) {
+ HandleMark hm;
+ instanceKlassHandle ikh = instanceKlassHandle(Thread::current(), k);
+
+ int size = 0;
+
+ for (FieldStream fld(ikh, false, false); !fld.eos(); fld.next()) {
+ if (!fld.access_flags().is_static()) {
+ symbolOop sig = fld.signature();
+ switch (sig->byte_at(0)) {
+ case JVM_SIGNATURE_CLASS :
+ case JVM_SIGNATURE_ARRAY : size += oopSize; break;
+
+ case JVM_SIGNATURE_BYTE :
+ case JVM_SIGNATURE_BOOLEAN : size += 1; break;
+
+ case JVM_SIGNATURE_CHAR :
+ case JVM_SIGNATURE_SHORT : size += 2; break;
+
+ case JVM_SIGNATURE_INT :
+ case JVM_SIGNATURE_FLOAT : size += 4; break;
+
+ case JVM_SIGNATURE_LONG :
+ case JVM_SIGNATURE_DOUBLE : size += 8; break;
+
+ default : ShouldNotReachHere();
+ }
+ }
+ }
+ return (u4)size;
+}
+
+// dumps static fields of the given class
+void DumperSupport::dump_static_fields(DumpWriter* writer, klassOop k) {
+ HandleMark hm;
+ instanceKlassHandle ikh = instanceKlassHandle(Thread::current(), k);
+
+ // pass 1 - count the static fields
+ u2 field_count = 0;
+ for (FieldStream fldc(ikh, true, true); !fldc.eos(); fldc.next()) {
+ if (fldc.access_flags().is_static()) field_count++;
+ }
+
+ writer->write_u2(field_count);
+
+ // pass 2 - dump the field descriptors and raw values
+ for (FieldStream fld(ikh, true, true); !fld.eos(); fld.next()) {
+ if (fld.access_flags().is_static()) {
+ symbolOop sig = fld.signature();
+
+ writer->write_objectID(fld.name()); // name
+ writer->write_u1(sig2tag(sig)); // type
+
+ // value
+ int offset = fld.offset();
+ address addr = (address)k + offset;
+
+ dump_field_value(writer, sig->byte_at(0), addr);
+ }
+ }
+}
+
+// dump the raw values of the instance fields of the given object
+void DumperSupport::dump_instance_fields(DumpWriter* writer, oop o) {
+ HandleMark hm;
+ instanceKlassHandle ikh = instanceKlassHandle(Thread::current(), o->klass());
+
+ for (FieldStream fld(ikh, false, false); !fld.eos(); fld.next()) {
+ if (!fld.access_flags().is_static()) {
+ symbolOop sig = fld.signature();
+ address addr = (address)o + fld.offset();
+
+ dump_field_value(writer, sig->byte_at(0), addr);
+ }
+ }
+}
+
+// dumps the definition of the instance fields for a given class
+void DumperSupport::dump_instance_field_descriptors(DumpWriter* writer, klassOop k) {
+ HandleMark hm;
+ instanceKlassHandle ikh = instanceKlassHandle(Thread::current(), k);
+
+ // pass 1 - count the instance fields
+ u2 field_count = 0;
+ for (FieldStream fldc(ikh, true, true); !fldc.eos(); fldc.next()) {
+ if (!fldc.access_flags().is_static()) field_count++;
+ }
+
+ writer->write_u2(field_count);
+
+ // pass 2 - dump the field descriptors
+ for (FieldStream fld(ikh, true, true); !fld.eos(); fld.next()) {
+ if (!fld.access_flags().is_static()) {
+ symbolOop sig = fld.signature();
+
+ writer->write_objectID(fld.name()); // name
+ writer->write_u1(sig2tag(sig)); // type
+ }
+ }
+}
+
+// creates HPROF_GC_INSTANCE_DUMP record for the given object
+void DumperSupport::dump_instance(DumpWriter* writer, oop o) {
+ klassOop k = o->klass();
+
+ writer->write_u1(HPROF_GC_INSTANCE_DUMP);
+ writer->write_objectID(o);
+ writer->write_u4(STACK_TRACE_ID);
+
+ // class ID
+ writer->write_classID(Klass::cast(k));
+
+ // number of bytes that follow
+ writer->write_u4(instance_size(k) );
+
+ // field values
+ dump_instance_fields(writer, o);
+}
+
+// creates HPROF_GC_CLASS_DUMP record for the given class and each of
+// its array classes
+void DumperSupport::dump_class_and_array_classes(DumpWriter* writer, klassOop k) {
+ Klass* klass = Klass::cast(k);
+ assert(klass->oop_is_instance(), "not an instanceKlass");
+ instanceKlass* ik = (instanceKlass*)klass;
+
+ writer->write_u1(HPROF_GC_CLASS_DUMP);
+
+ // class ID
+ writer->write_classID(ik);
+ writer->write_u4(STACK_TRACE_ID);
+
+ // super class ID
+ klassOop java_super = ik->java_super();
+ if (java_super == NULL) {
+ writer->write_objectID(NULL);
+ } else {
+ writer->write_classID(Klass::cast(java_super));
+ }
+
+ writer->write_objectID(ik->class_loader());
+ writer->write_objectID(ik->signers());
+ writer->write_objectID(ik->protection_domain());
+
+ // reserved
+ writer->write_objectID(NULL);
+ writer->write_objectID(NULL);
+
+ // instance size
+ writer->write_u4(DumperSupport::instance_size(k));
+
+ // size of constant pool - ignored by HAT 1.1
+ writer->write_u2(0);
+
+ // number of static fields
+ dump_static_fields(writer, k);
+
+ // description of instance fields
+ dump_instance_field_descriptors(writer, k);
+
+ // array classes
+ k = klass->array_klass_or_null();
+ while (k != NULL) {
+ Klass* klass = Klass::cast(k);
+ assert(klass->oop_is_objArray(), "not an objArrayKlass");
+
+ writer->write_u1(HPROF_GC_CLASS_DUMP);
+ writer->write_classID(klass);
+ writer->write_u4(STACK_TRACE_ID);
+
+ // super class of array classes is java.lang.Object
+ java_super = klass->java_super();
+ assert(java_super != NULL, "checking");
+ writer->write_classID(Klass::cast(java_super));
+
+ writer->write_objectID(ik->class_loader());
+ writer->write_objectID(ik->signers());
+ writer->write_objectID(ik->protection_domain());
+
+ writer->write_objectID(NULL); // reserved
+ writer->write_objectID(NULL);
+ writer->write_u4(0); // instance size
+ writer->write_u2(0); // constant pool
+ writer->write_u2(0); // static fields
+ writer->write_u2(0); // instance fields
+
+ // get the array class for the next rank
+ k = klass->array_klass_or_null();
+ }
+}
+
+// creates HPROF_GC_CLASS_DUMP record for a given primitive array
+// class (and each multi-dimensional array class too)
+void DumperSupport::dump_basic_type_array_class(DumpWriter* writer, klassOop k) {
+ // array classes
+ while (k != NULL) {
+ Klass* klass = Klass::cast(k);
+
+ writer->write_u1(HPROF_GC_CLASS_DUMP);
+ writer->write_classID(klass);
+ writer->write_u4(STACK_TRACE_ID);
+
+ // super class of array classes is java.lang.Object
+ klassOop java_super = klass->java_super();
+ assert(java_super != NULL, "checking");
+ writer->write_classID(Klass::cast(java_super));
+
+ writer->write_objectID(NULL); // loader
+ writer->write_objectID(NULL); // signers
+ writer->write_objectID(NULL); // protection domain
+
+ writer->write_objectID(NULL); // reserved
+ writer->write_objectID(NULL);
+ writer->write_u4(0); // instance size
+ writer->write_u2(0); // constant pool
+ writer->write_u2(0); // static fields
+ writer->write_u2(0); // instance fields
+
+ // get the array class for the next rank
+ k = klass->array_klass_or_null();
+ }
+}
+
+// creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array
+void DumperSupport::dump_object_array(DumpWriter* writer, objArrayOop array) {
+
+ // filter this
+ if (array->klass() == Universe::systemObjArrayKlassObj()) return;
+
+ writer->write_u1(HPROF_GC_OBJ_ARRAY_DUMP);
+ writer->write_objectID(array);
+ writer->write_u4(STACK_TRACE_ID);
+ writer->write_u4((u4)array->length());
+
+ // array class ID
+ writer->write_classID(Klass::cast(array->klass()));
+
+ // [id]* elements
+ for (int index=0; index<array->length(); index++) {
+ oop o = array->obj_at(index);
+ writer->write_objectID(o);
+ }
+}
+
+#define WRITE_ARRAY(Array, Type, Size) \
+ for (int i=0; i<Array->length(); i++) { writer->write_##Size((Size)array->Type##_at(i)); }
+
+
+// creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array
+void DumperSupport::dump_prim_array(DumpWriter* writer, typeArrayOop array) {
+ BasicType type = typeArrayKlass::cast(array->klass())->element_type();
+
+ writer->write_u1(HPROF_GC_PRIM_ARRAY_DUMP);
+ writer->write_objectID(array);
+ writer->write_u4(STACK_TRACE_ID);
+ writer->write_u4((u4)array->length());
+ writer->write_u1(type2tag(type));
+
+ // nothing to copy
+ if (array->length() == 0) {
+ return;
+ }
+
+ // If the byte ordering is big endian then we can copy most types directly
+ int length_in_bytes = array->length() * type2aelembytes[type];
+ assert(length_in_bytes > 0, "nothing to copy");
+
+ switch (type) {
+ case T_INT : {
+ if (Bytes::is_Java_byte_ordering_different()) {
+ WRITE_ARRAY(array, int, u4);
+ } else {
+ writer->write_raw((void*)(array->int_at_addr(0)), length_in_bytes);
+ }
+ break;
+ }
+ case T_BYTE : {
+ writer->write_raw((void*)(array->byte_at_addr(0)), length_in_bytes);
+ break;
+ }
+ case T_CHAR : {
+ if (Bytes::is_Java_byte_ordering_different()) {
+ WRITE_ARRAY(array, char, u2);
+ } else {
+ writer->write_raw((void*)(array->char_at_addr(0)), length_in_bytes);
+ }
+ break;
+ }
+ case T_SHORT : {
+ if (Bytes::is_Java_byte_ordering_different()) {
+ WRITE_ARRAY(array, short, u2);
+ } else {
+ writer->write_raw((void*)(array->short_at_addr(0)), length_in_bytes);
+ }
+ break;
+ }
+ case T_BOOLEAN : {
+ if (Bytes::is_Java_byte_ordering_different()) {
+ WRITE_ARRAY(array, bool, u1);
+ } else {
+ writer->write_raw((void*)(array->bool_at_addr(0)), length_in_bytes);
+ }
+ break;
+ }
+ case T_LONG : {
+ if (Bytes::is_Java_byte_ordering_different()) {
+ WRITE_ARRAY(array, long, u8);
+ } else {
+ writer->write_raw((void*)(array->long_at_addr(0)), length_in_bytes);
+ }
+ break;
+ }
+
+ // handle float/doubles in a special value to ensure than NaNs are
+ // written correctly. TO DO: Check if we can avoid this on processors that
+ // use IEEE 754.
+
+ case T_FLOAT : {
+ for (int i=0; i<array->length(); i++) {
+ dump_float( writer, array->float_at(i) );
+ }
+ break;
+ }
+ case T_DOUBLE : {
+ for (int i=0; i<array->length(); i++) {
+ dump_double( writer, array->double_at(i) );
+ }
+ break;
+ }
+ default : ShouldNotReachHere();
+ }
+}
+
+
+// Support class used to generate HPROF_UTF8 records from the entries in the
+// SymbolTable.
+
+class SymbolTableDumper : public OopClosure {
+ private:
+ DumpWriter* _writer;
+ DumpWriter* writer() const { return _writer; }
+ public:
+ SymbolTableDumper(DumpWriter* writer) { _writer = writer; }
+ void do_oop(oop* obj_p);
+};
+
+void SymbolTableDumper::do_oop(oop* obj_p) {
+ ResourceMark rm;
+ symbolOop sym = (symbolOop)*obj_p;
+
+ int len = sym->utf8_length();
+ if (len > 0) {
+ char* s = sym->as_utf8();
+ DumperSupport::write_header(writer(), HPROF_UTF8, oopSize + len);
+ writer()->write_objectID(sym);
+ writer()->write_raw(s, len);
+ }
+}
+
+
+// Support class used to generate HPROF_GC_ROOT_JNI_LOCAL records
+
+class JNILocalsDumper : public OopClosure {
+ private:
+ DumpWriter* _writer;
+ u4 _thread_serial_num;
+ DumpWriter* writer() const { return _writer; }
+ public:
+ JNILocalsDumper(DumpWriter* writer, u4 thread_serial_num) {
+ _writer = writer;
+ _thread_serial_num = thread_serial_num;
+ }
+ void do_oop(oop* obj_p);
+};
+
+
+void JNILocalsDumper::do_oop(oop* obj_p) {
+ // ignore null or deleted handles
+ oop o = *obj_p;
+ if (o != NULL && o != JNIHandles::deleted_handle()) {
+ writer()->write_u1(HPROF_GC_ROOT_JNI_LOCAL);
+ writer()->write_objectID(o);
+ writer()->write_u4(_thread_serial_num);
+ writer()->write_u4((u4)-1); // empty
+ }
+}
+
+
+// Support class used to generate HPROF_GC_ROOT_JNI_GLOBAL records
+
+class JNIGlobalsDumper : public OopClosure {
+ private:
+ DumpWriter* _writer;
+ DumpWriter* writer() const { return _writer; }
+
+ public:
+ JNIGlobalsDumper(DumpWriter* writer) {
+ _writer = writer;
+ }
+ void do_oop(oop* obj_p);
+};
+
+void JNIGlobalsDumper::do_oop(oop* obj_p) {
+ oop o = *obj_p;
+
+ // ignore these
+ if (o == NULL || o == JNIHandles::deleted_handle()) return;
+
+ // we ignore global ref to symbols and other internal objects
+ if (o->is_instance() || o->is_objArray() || o->is_typeArray()) {
+ writer()->write_u1(HPROF_GC_ROOT_JNI_GLOBAL);
+ writer()->write_objectID(o);
+ writer()->write_objectID((oopDesc*)obj_p); // global ref ID
+ }
+};
+
+
+// Support class used to generate HPROF_GC_ROOT_MONITOR_USED records
+
+class MonitorUsedDumper : public OopClosure {
+ private:
+ DumpWriter* _writer;
+ DumpWriter* writer() const { return _writer; }
+ public:
+ MonitorUsedDumper(DumpWriter* writer) {
+ _writer = writer;
+ }
+ void do_oop(oop* obj_p) {
+ writer()->write_u1(HPROF_GC_ROOT_MONITOR_USED);
+ writer()->write_objectID(*obj_p);
+ }
+};
+
+
+// Support class used to generate HPROF_GC_ROOT_STICKY_CLASS records
+
+class StickyClassDumper : public OopClosure {
+ private:
+ DumpWriter* _writer;
+ DumpWriter* writer() const { return _writer; }
+ public:
+ StickyClassDumper(DumpWriter* writer) {
+ _writer = writer;
+ }
+ void do_oop(oop* obj_p);
+};
+
+void StickyClassDumper::do_oop(oop* obj_p) {
+ if (*obj_p != NULL) {
+ oop o = *obj_p;
+ if (o->is_klass()) {
+ klassOop k = klassOop(o);
+ if (Klass::cast(k)->oop_is_instance()) {
+ instanceKlass* ik = instanceKlass::cast(k);
+ writer()->write_u1(HPROF_GC_ROOT_STICKY_CLASS);
+ writer()->write_classID(ik);
+ }
+ }
+ }
+}
+
+
+class VM_HeapDumper;
+
+// Support class using when iterating over the heap.
+
+class HeapObjectDumper : public ObjectClosure {
+ private:
+ VM_HeapDumper* _dumper;
+ DumpWriter* _writer;
+
+ VM_HeapDumper* dumper() { return _dumper; }
+ DumpWriter* writer() { return _writer; }
+
+ // used to indicate that a record has been writen
+ void mark_end_of_record();
+
+ public:
+ HeapObjectDumper(VM_HeapDumper* dumper, DumpWriter* writer) {
+ _dumper = dumper;
+ _writer = writer;
+ }
+
+ // called for each object in the heap
+ void do_object(oop o);
+};
+
+void HeapObjectDumper::do_object(oop o) {
+ // hide the sentinel for deleted handles
+ if (o == JNIHandles::deleted_handle()) return;
+
+ // ignore KlassKlass objects
+ if (o->is_klass()) return;
+
+ // skip classes as these emitted as HPROF_GC_CLASS_DUMP records
+ if (o->klass() == SystemDictionary::class_klass()) {
+ if (!java_lang_Class::is_primitive(o)) {
+ return;
+ }
+ }
+
+ // create a HPROF_GC_INSTANCE record for each object
+ if (o->is_instance()) {
+ DumperSupport::dump_instance(writer(), o);
+ mark_end_of_record();
+ } else {
+ // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
+ if (o->is_objArray()) {
+ DumperSupport::dump_object_array(writer(), objArrayOop(o));
+ mark_end_of_record();
+ } else {
+ // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
+ if (o->is_typeArray()) {
+ DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
+ mark_end_of_record();
+ }
+ }
+ }
+}
+
+// The VM operation that performs the heap dump
+class VM_HeapDumper : public VM_GC_Operation {
+ private:
+ DumpWriter* _writer;
+ bool _gc_before_heap_dump;
+ bool _is_segmented_dump;
+ jlong _dump_start;
+
+ // accessors
+ DumpWriter* writer() const { return _writer; }
+ bool is_segmented_dump() const { return _is_segmented_dump; }
+ void set_segmented_dump() { _is_segmented_dump = true; }
+ jlong dump_start() const { return _dump_start; }
+ void set_dump_start(jlong pos);
+
+ bool skip_operation() const;
+
+ // writes a HPROF_LOAD_CLASS record
+ static void do_load_class(klassOop k);
+
+ // writes a HPROF_GC_CLASS_DUMP record for the given class
+ // (and each array class too)
+ static void do_class_dump(klassOop k);
+
+ // writes a HPROF_GC_CLASS_DUMP records for a given basic type
+ // array (and each multi-dimensional array too)
+ static void do_basic_type_array_class_dump(klassOop k);
+
+ // HPROF_GC_ROOT_THREAD_OBJ records
+ void do_thread(JavaThread* thread, u4 thread_serial_num);
+ void do_threads();
+
+ // writes a HPROF_HEAP_DUMP or HPROF_HEAP_DUMP_SEGMENT record
+ void write_dump_header();
+
+ // fixes up the length of the current dump record
+ void write_current_dump_record_length();
+
+ // fixes up the current dump record )and writes HPROF_HEAP_DUMP_END
+ // record in the case of a segmented heap dump)
+ void end_of_dump();
+
+ public:
+ VM_HeapDumper(DumpWriter* writer, bool gc_before_heap_dump) :
+ VM_GC_Operation(0 /* total collections, dummy, ignored */,
+ 0 /* total full collections, dummy, ignored */,
+ gc_before_heap_dump) {
+ _writer = writer;
+ _gc_before_heap_dump = gc_before_heap_dump;
+ _is_segmented_dump = false;
+ _dump_start = (jlong)-1;
+ }
+
+ VMOp_Type type() const { return VMOp_HeapDumper; }
+ // used to mark sub-record boundary
+ void check_segment_length();
+ void doit();
+};
+
+bool VM_HeapDumper::skip_operation() const {
+ return false;
+}
+
+// sets the dump starting position
+void VM_HeapDumper::set_dump_start(jlong pos) {
+ _dump_start = pos;
+}
+
+ // writes a HPROF_HEAP_DUMP or HPROF_HEAP_DUMP_SEGMENT record
+void VM_HeapDumper::write_dump_header() {
+ if (writer()->is_open()) {
+ if (is_segmented_dump()) {
+ writer()->write_u1(HPROF_HEAP_DUMP_SEGMENT);
+ } else {
+ writer()->write_u1(HPROF_HEAP_DUMP);
+ }
+ writer()->write_u4(0); // current ticks
+
+ // record the starting position for the dump (its length will be fixed up later)
+ set_dump_start(writer()->current_offset());
+ writer()->write_u4(0);
+ }
+}
+
+// fixes up the length of the current dump record
+void VM_HeapDumper::write_current_dump_record_length() {
+ if (writer()->is_open()) {
+ assert(dump_start() >= 0, "no dump start recorded");
+
+ // calculate the size of the dump record
+ jlong dump_end = writer()->current_offset();
+ jlong dump_len = (dump_end - dump_start() - 4);
+
+ // record length must fit in a u4
+ if (dump_len > (jlong)(4L*(jlong)G)) {
+ warning("record is too large");
+ }
+
+ // seek to the dump start and fix-up the length
+ writer()->seek_to_offset(dump_start());
+ writer()->write_u4((u4)dump_len);
+
+ // adjust the total size written to keep the bytes written correct.
+ writer()->adjust_bytes_written(-((long) sizeof(u4)));
+
+ // seek to dump end so we can continue
+ writer()->seek_to_offset(dump_end);
+
+ // no current dump record
+ set_dump_start((jlong)-1);
+ }
+}
+
+// used on a sub-record boundary to check if we need to start a
+// new segment.
+void VM_HeapDumper::check_segment_length() {
+ if (writer()->is_open()) {
+ if (is_segmented_dump()) {
+ // don't use current_offset that would be too expensive on a per record basis
+ jlong dump_end = writer()->bytes_written() + writer()->bytes_unwritten();
+ assert(dump_end == writer()->current_offset(), "checking");
+ jlong dump_len = (dump_end - dump_start() - 4);
+ assert(dump_len >= 0 && dump_len <= max_juint, "bad dump length");
+
+ if (dump_len > (jlong)HeapDumpSegmentSize) {
+ write_current_dump_record_length();
+ write_dump_header();
+ }
+ }
+ }
+}
+
+// fixes up the current dump record )and writes HPROF_HEAP_DUMP_END
+// record in the case of a segmented heap dump)
+void VM_HeapDumper::end_of_dump() {
+ if (writer()->is_open()) {
+ write_current_dump_record_length();
+
+ // for segmented dump we write the end record
+ if (is_segmented_dump()) {
+ writer()->write_u1(HPROF_HEAP_DUMP_END);
+ writer()->write_u4(0);
+ writer()->write_u4(0);
+ }
+ }
+}
+
+// marks sub-record boundary
+void HeapObjectDumper::mark_end_of_record() {
+ dumper()->check_segment_length();
+}
+
+// writes a HPROF_LOAD_CLASS record for the class (and each of its
+// array classes)
+void VM_HeapDumper::do_load_class(klassOop k) {
+ static u4 class_serial_num = 0;
+
+ VM_HeapDumper* dumper = ((VM_HeapDumper*)VMThread::vm_operation());
+ DumpWriter* writer = dumper->writer();
+
+ // len of HPROF_LOAD_CLASS record
+ u4 remaining = 2*oopSize + 2*sizeof(u4);
+
+ // write a HPROF_LOAD_CLASS for the class and each array class
+ do {
+ DumperSupport::write_header(writer, HPROF_LOAD_CLASS, remaining);
+
+ // class serial number is just a number
+ writer->write_u4(++class_serial_num);
+
+ // class ID
+ Klass* klass = Klass::cast(k);
+ writer->write_classID(klass);
+
+ writer->write_u4(STACK_TRACE_ID);
+
+ // class name ID
+ symbolOop name = klass->name();
+ writer->write_objectID(name);
+
+ // write a LOAD_CLASS record for the array type (if it exists)
+ k = klass->array_klass_or_null();
+ } while (k != NULL);
+}
+
+// writes a HPROF_GC_CLASS_DUMP record for the given class
+void VM_HeapDumper::do_class_dump(klassOop k) {
+ VM_HeapDumper* dumper = ((VM_HeapDumper*)VMThread::vm_operation());
+ DumpWriter* writer = dumper->writer();
+ DumperSupport::dump_class_and_array_classes(writer, k);
+}
+
+// writes a HPROF_GC_CLASS_DUMP records for a given basic type
+// array (and each multi-dimensional array too)
+void VM_HeapDumper::do_basic_type_array_class_dump(klassOop k) {
+ VM_HeapDumper* dumper = ((VM_HeapDumper*)VMThread::vm_operation());
+ DumpWriter* writer = dumper->writer();
+ DumperSupport::dump_basic_type_array_class(writer, k);
+}
+
+// Walk the stack of the given thread.
+// Dumps a HPROF_GC_ROOT_JAVA_FRAME record for each local
+// Dumps a HPROF_GC_ROOT_JNI_LOCAL record for each JNI local
+void VM_HeapDumper::do_thread(JavaThread* java_thread, u4 thread_serial_num) {
+ JNILocalsDumper blk(writer(), thread_serial_num);
+
+ oop threadObj = java_thread->threadObj();
+ assert(threadObj != NULL, "sanity check");
+
+ // JNI locals for the top frame
+ java_thread->active_handles()->oops_do(&blk);
+
+ if (java_thread->has_last_Java_frame()) {
+
+ // vframes are resource allocated
+ Thread* current_thread = Thread::current();
+ ResourceMark rm(current_thread);
+ HandleMark hm(current_thread);
+
+ RegisterMap reg_map(java_thread);
+ frame f = java_thread->last_frame();
+ vframe* vf = vframe::new_vframe(&f, &reg_map, java_thread);
+
+ while (vf != NULL) {
+ if (vf->is_java_frame()) {
+
+ // java frame (interpreted, compiled, ...)
+ javaVFrame *jvf = javaVFrame::cast(vf);
+
+ if (!(jvf->method()->is_native())) {
+ StackValueCollection* locals = jvf->locals();
+ for (int slot=0; slot<locals->size(); slot++) {
+ if (locals->at(slot)->type() == T_OBJECT) {
+ oop o = locals->obj_at(slot)();
+
+ if (o != NULL) {
+ writer()->write_u1(HPROF_GC_ROOT_JAVA_FRAME);
+ writer()->write_objectID(o);
+ writer()->write_u4(thread_serial_num);
+ writer()->write_u4((u4)-1); // empty
+ }
+ }
+ }
+ }
+ } else {
+
+ // externalVFrame - if it's an entry frame then report any JNI locals
+ // as roots
+ frame* fr = vf->frame_pointer();
+ assert(fr != NULL, "sanity check");
+ if (fr->is_entry_frame()) {
+ fr->entry_frame_call_wrapper()->handles()->oops_do(&blk);
+ }
+ }
+
+ vf = vf->sender();
+ }
+ }
+}
+
+
+// write a HPROF_GC_ROOT_THREAD_OBJ record for each java thread. Then walk
+// the stack so that locals and JNI locals are dumped.
+void VM_HeapDumper::do_threads() {
+ u4 thread_serial_num = 0;
+ for (JavaThread* thread = Threads::first(); thread != NULL ; thread = thread->next()) {
+ oop threadObj = thread->threadObj();
+ if (threadObj != NULL && !thread->is_exiting() && !thread->is_hidden_from_external_view()) {
+ ++thread_serial_num;
+
+ writer()->write_u1(HPROF_GC_ROOT_THREAD_OBJ);
+ writer()->write_objectID(threadObj);
+ writer()->write_u4(thread_serial_num);
+ writer()->write_u4(STACK_TRACE_ID);
+
+ do_thread(thread, thread_serial_num);
+ }
+ }
+}
+
+
+// The VM operation that dumps the heap. The dump consists of the following
+// records:
+//
+// HPROF_HEADER
+// HPROF_TRACE
+// [HPROF_UTF8]*
+// [HPROF_LOAD_CLASS]*
+// [HPROF_GC_CLASS_DUMP]*
+// HPROF_HEAP_DUMP
+//
+// The HPROF_TRACE record after the header is "dummy trace" record which does
+// not include any frames. Other records which require a stack trace ID will
+// specify the trace ID of this record (1). It also means we can run HAT without
+// needing the -stack false option.
+//
+// The HPROF_HEAP_DUMP record has a length following by sub-records. To allow
+// the heap dump be generated in a single pass we remember the position of
+// the dump length and fix it up after all sub-records have been written.
+// To generate the sub-records we iterate over the heap, writing
+// HPROF_GC_INSTANCE_DUMP, HPROF_GC_OBJ_ARRAY_DUMP, and HPROF_GC_PRIM_ARRAY_DUMP
+// records as we go. Once that is done we write records for some of the GC
+// roots.
+
+void VM_HeapDumper::doit() {
+
+ HandleMark hm;
+ CollectedHeap* ch = Universe::heap();
+ if (_gc_before_heap_dump) {
+ ch->collect_as_vm_thread(GCCause::_heap_dump);
+ } else {
+ // make the heap parsable (no need to retire TLABs)
+ ch->ensure_parsability(false);
+ }
+
+ // Write the file header - use 1.0.2 for large heaps, otherwise 1.0.1
+ size_t used;
+ const char* header;
+#ifndef SERIALGC
+ if (Universe::heap()->kind() == CollectedHeap::GenCollectedHeap) {
+ used = GenCollectedHeap::heap()->used();
+ } else {
+ used = ParallelScavengeHeap::heap()->used();
+ }
+#else // SERIALGC
+ used = GenCollectedHeap::heap()->used();
+#endif // SERIALGC
+ if (used > (size_t)SegmentedHeapDumpThreshold) {
+ set_segmented_dump();
+ header = "JAVA PROFILE 1.0.2";
+ } else {
+ header = "JAVA PROFILE 1.0.1";
+ }
+ // header is few bytes long - no chance to overflow int
+ writer()->write_raw((void*)header, (int)strlen(header));
+ writer()->write_u1(0); // terminator
+ writer()->write_u4(oopSize);
+ writer()->write_u8(os::javaTimeMillis());
+
+ // HPROF_TRACE record without any frames
+ DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4));
+ writer()->write_u4(STACK_TRACE_ID);
+ writer()->write_u4(0); // thread number
+ writer()->write_u4(0); // frame count
+
+ // HPROF_UTF8 records
+ SymbolTableDumper sym_dumper(writer());
+ SymbolTable::oops_do(&sym_dumper);
+
+ // write HPROF_LOAD_CLASS records
+ SystemDictionary::classes_do(&do_load_class);
+ Universe::basic_type_classes_do(&do_load_class);
+
+ // write HPROF_HEAP_DUMP or HPROF_HEAP_DUMP_SEGMENT
+ write_dump_header();
+
+ // Writes HPROF_GC_CLASS_DUMP records
+ SystemDictionary::classes_do(&do_class_dump);
+ Universe::basic_type_classes_do(&do_basic_type_array_class_dump);
+ check_segment_length();
+
+ // writes HPROF_GC_INSTANCE_DUMP records.
+ // After each sub-record is written check_segment_length will be invoked. When
+ // generated a segmented heap dump this allows us to check if the current
+ // segment exceeds a threshold and if so, then a new segment is started.
+ // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk
+ // of the heap dump.
+ HeapObjectDumper obj_dumper(this, writer());
+ Universe::heap()->object_iterate(&obj_dumper);
+
+ // HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals
+ do_threads();
+ check_segment_length();
+
+ // HPROF_GC_ROOT_MONITOR_USED
+ MonitorUsedDumper mon_dumper(writer());
+ ObjectSynchronizer::oops_do(&mon_dumper);
+ check_segment_length();
+
+ // HPROF_GC_ROOT_JNI_GLOBAL
+ JNIGlobalsDumper jni_dumper(writer());
+ JNIHandles::oops_do(&jni_dumper);
+ check_segment_length();
+
+ // HPROF_GC_ROOT_STICKY_CLASS
+ StickyClassDumper class_dumper(writer());
+ SystemDictionary::always_strong_oops_do(&class_dumper);
+
+ // fixes up the length of the dump record. In the case of a segmented
+ // heap then the HPROF_HEAP_DUMP_END record is also written.
+ end_of_dump();
+}
+
+
+// dump the heap to given path.
+int HeapDumper::dump(const char* path) {
+ assert(path != NULL && strlen(path) > 0, "path missing");
+
+ // print message in interactive case
+ if (print_to_tty()) {
+ tty->print_cr("Dumping heap to %s ...", path);
+ timer()->start();
+ }
+
+ // create the dump writer. If the file can be opened then bail
+ DumpWriter writer(path);
+ if (!writer.is_open()) {
+ set_error(writer.error());
+ if (print_to_tty()) {
+ tty->print_cr("Unable to create %s: %s", path,
+ (error() != NULL) ? error() : "reason unknown");
+ }
+ return -1;
+ }
+
+ // generate the dump
+ VM_HeapDumper dumper(&writer, _gc_before_heap_dump);
+ VMThread::execute(&dumper);
+
+ // close dump file and record any error that the writer may have encountered
+ writer.close();
+ set_error(writer.error());
+
+ // print message in interactive case
+ if (print_to_tty()) {
+ timer()->stop();
+ if (error() == NULL) {
+ char msg[256];
+ sprintf(msg, "Heap dump file created [%s bytes in %3.3f secs]",
+ os::jlong_format_specifier(), timer()->seconds());
+ tty->print_cr(msg, writer.bytes_written());
+ } else {
+ tty->print_cr("Dump file is incomplete: %s", writer.error());
+ }
+ }
+
+ return (writer.error() == NULL) ? 0 : -1;
+}
+
+// stop timer (if still active), and free any error string we might be holding
+HeapDumper::~HeapDumper() {
+ if (timer()->is_active()) {
+ timer()->stop();
+ }
+ set_error(NULL);
+}
+
+
+// returns the error string (resource allocated), or NULL
+char* HeapDumper::error_as_C_string() const {
+ if (error() != NULL) {
+ char* str = NEW_RESOURCE_ARRAY(char, strlen(error())+1);
+ strcpy(str, error());
+ return str;
+ } else {
+ return NULL;
+ }
+}
+
+// set the error string
+void HeapDumper::set_error(char* error) {
+ if (_error != NULL) {
+ os::free(_error);
+ }
+ if (error == NULL) {
+ _error = NULL;
+ } else {
+ _error = os::strdup(error);
+ assert(_error != NULL, "allocation failure");
+ }
+}
+
+
+// Called by error reporting
+void HeapDumper::dump_heap() {
+ static char path[JVM_MAXPATHLEN];
+
+ // The dump file defaults to java_pid<pid>.hprof in the current working
+ // directory. HeapDumpPath=<file> can be used to specify an alternative
+ // dump file name or a directory where dump file is created.
+ bool use_default_filename = true;
+ if (HeapDumpPath == NULL || HeapDumpPath[0] == '\0') {
+ path[0] = '\0'; // HeapDumpPath=<file> not specified
+ } else {
+ assert(strlen(HeapDumpPath) < sizeof(path), "HeapDumpPath too long");
+ strcpy(path, HeapDumpPath);
+ // check if the path is a directory (must exist)
+ DIR* dir = os::opendir(path);
+ if (dir == NULL) {
+ use_default_filename = false;
+ } else {
+ // HeapDumpPath specified a directory. We append a file separator
+ // (if needed).
+ os::closedir(dir);
+ size_t fs_len = strlen(os::file_separator());
+ if (strlen(path) >= fs_len) {
+ char* end = path;
+ end += (strlen(path) - fs_len);
+ if (strcmp(end, os::file_separator()) != 0) {
+ assert(strlen(path) + strlen(os::file_separator()) < sizeof(path),
+ "HeapDumpPath too long");
+ strcat(path, os::file_separator());
+ }
+ }
+ }
+ }
+ // If HeapDumpPath wasn't a file name then we append the default name
+ if (use_default_filename) {
+ char fn[32];
+ sprintf(fn, "java_pid%d.hprof", os::current_process_id());
+ assert(strlen(path) + strlen(fn) < sizeof(path), "HeapDumpPath too long");
+ strcat(path, fn);
+ }
+
+ HeapDumper dumper(false /* no GC before heap dump */,
+ true /* send to tty */);
+ dumper.dump(path);
+}
diff --git a/src/share/vm/services/heapDumper.hpp b/src/share/vm/services/heapDumper.hpp
new file mode 100644
index 000000000..247512e01
--- /dev/null
+++ b/src/share/vm/services/heapDumper.hpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// HeapDumper is used to dump the java heap to file in HPROF binary format:
+//
+// { HeapDumper dumper(true /* full GC before heap dump */);
+// if (dumper.dump("/export/java.hprof")) {
+// ResourceMark rm;
+// tty->print_cr("Dump failed: %s", dumper.error_as_C_string());
+// } else {
+// // dump succeeded
+// }
+// }
+//
+
+class HeapDumper : public StackObj {
+ private:
+ char* _error;
+ bool _print_to_tty;
+ bool _gc_before_heap_dump;
+ elapsedTimer _t;
+
+ // string representation of error
+ char* error() const { return _error; }
+ void set_error(char* error);
+
+ // indicates if progress messages can be sent to tty
+ bool print_to_tty() const { return _print_to_tty; }
+
+ // internal timer.
+ elapsedTimer* timer() { return &_t; }
+
+ public:
+ HeapDumper(bool gc_before_heap_dump) :
+ _gc_before_heap_dump(gc_before_heap_dump), _error(NULL), _print_to_tty(false) { }
+ HeapDumper(bool gc_before_heap_dump, bool print_to_tty) :
+ _gc_before_heap_dump(gc_before_heap_dump), _error(NULL), _print_to_tty(print_to_tty) { }
+
+ ~HeapDumper();
+
+ // dumps the heap to the specified file, returns 0 if success.
+ int dump(const char* path);
+
+ // returns error message (resource allocated), or NULL if no error
+ char* error_as_C_string() const;
+
+ static void dump_heap() KERNEL_RETURN;
+};
diff --git a/src/share/vm/services/jmm.h b/src/share/vm/services/jmm.h
new file mode 100644
index 000000000..b4777f9fe
--- /dev/null
+++ b/src/share/vm/services/jmm.h
@@ -0,0 +1,287 @@
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Sun designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Sun in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+#ifndef _JAVA_JMM_H_
+#define _JAVA_JMM_H_
+
+/*
+ * This is a private interface used by JDK for JVM monitoring
+ * and management.
+ *
+ * Bump the version number when either of the following happens:
+ *
+ * 1. There is a change in functions in JmmInterface.
+ *
+ * 2. There is a change in the contract between VM and Java classes.
+ */
+
+#include "jni.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum {
+ JMM_VERSION_1 = 0x20010000,
+ JMM_VERSION_1_0 = 0x20010000,
+ JMM_VERSION_1_1 = 0x20010100, // JDK 6
+ JMM_VERSION_1_2 = 0x20010200, // JDK 7
+ JMM_VERSION = 0x20010200
+};
+
+typedef struct {
+ unsigned int isLowMemoryDetectionSupported : 1;
+ unsigned int isCompilationTimeMonitoringSupported : 1;
+ unsigned int isThreadContentionMonitoringSupported : 1;
+ unsigned int isCurrentThreadCpuTimeSupported : 1;
+ unsigned int isOtherThreadCpuTimeSupported : 1;
+ unsigned int isBootClassPathSupported : 1;
+ unsigned int isObjectMonitorUsageSupported : 1;
+ unsigned int isSynchronizerUsageSupported : 1;
+ unsigned int : 24;
+} jmmOptionalSupport;
+
+typedef enum {
+ JMM_CLASS_LOADED_COUNT = 1, /* Total number of loaded classes */
+ JMM_CLASS_UNLOADED_COUNT = 2, /* Total number of unloaded classes */
+ JMM_THREAD_TOTAL_COUNT = 3, /* Total number of threads that have been started */
+ JMM_THREAD_LIVE_COUNT = 4, /* Current number of live threads */
+ JMM_THREAD_PEAK_COUNT = 5, /* Peak number of live threads */
+ JMM_THREAD_DAEMON_COUNT = 6, /* Current number of daemon threads */
+ JMM_JVM_INIT_DONE_TIME_MS = 7, /* Time when the JVM finished initialization */
+ JMM_COMPILE_TOTAL_TIME_MS = 8, /* Total accumulated time spent in compilation */
+ JMM_GC_TIME_MS = 9, /* Total accumulated time spent in collection */
+ JMM_GC_COUNT = 10, /* Total number of collections */
+
+ JMM_INTERNAL_ATTRIBUTE_INDEX = 100,
+ JMM_CLASS_LOADED_BYTES = 101, /* Number of bytes loaded instance classes */
+ JMM_CLASS_UNLOADED_BYTES = 102, /* Number of bytes unloaded instance classes */
+ JMM_TOTAL_CLASSLOAD_TIME_MS = 103, /* Accumulated VM class loader time (TraceClassLoadingTime) */
+ JMM_VM_GLOBAL_COUNT = 104, /* Number of VM internal flags */
+ JMM_SAFEPOINT_COUNT = 105, /* Total number of safepoints */
+ JMM_TOTAL_SAFEPOINTSYNC_TIME_MS = 106, /* Accumulated time spent getting to safepoints */
+ JMM_TOTAL_STOPPED_TIME_MS = 107, /* Accumulated time spent at safepoints */
+ JMM_TOTAL_APP_TIME_MS = 108, /* Accumulated time spent in Java application */
+ JMM_VM_THREAD_COUNT = 109, /* Current number of VM internal threads */
+ JMM_CLASS_INIT_TOTAL_COUNT = 110, /* Number of classes for which initializers were run */
+ JMM_CLASS_INIT_TOTAL_TIME_MS = 111, /* Accumulated time spent in class initializers */
+ JMM_METHOD_DATA_SIZE_BYTES = 112, /* Size of method data in memory */
+ JMM_CLASS_VERIFY_TOTAL_TIME_MS = 113, /* Accumulated time spent in class verifier */
+ JMM_SHARED_CLASS_LOADED_COUNT = 114, /* Number of shared classes loaded */
+ JMM_SHARED_CLASS_UNLOADED_COUNT = 115, /* Number of shared classes unloaded */
+ JMM_SHARED_CLASS_LOADED_BYTES = 116, /* Number of bytes loaded shared classes */
+ JMM_SHARED_CLASS_UNLOADED_BYTES = 117, /* Number of bytes unloaded shared classes */
+
+ JMM_OS_ATTRIBUTE_INDEX = 200,
+ JMM_OS_PROCESS_ID = 201, /* Process id of the JVM */
+ JMM_OS_MEM_TOTAL_PHYSICAL_BYTES = 202, /* Physical memory size */
+
+ JMM_GC_EXT_ATTRIBUTE_INFO_SIZE = 401 /* the size of the GC specific attributes for a given GC memory manager */
+} jmmLongAttribute;
+
+typedef enum {
+ JMM_VERBOSE_GC = 21,
+ JMM_VERBOSE_CLASS = 22,
+ JMM_THREAD_CONTENTION_MONITORING = 23,
+ JMM_THREAD_CPU_TIME = 24
+} jmmBoolAttribute;
+
+
+enum {
+ JMM_THREAD_STATE_FLAG_SUSPENDED = 0x00100000,
+ JMM_THREAD_STATE_FLAG_NATIVE = 0x00400000
+};
+
+#define JMM_THREAD_STATE_FLAG_MASK 0xFFF00000
+
+typedef enum {
+ JMM_STAT_PEAK_THREAD_COUNT = 801,
+ JMM_STAT_THREAD_CONTENTION_COUNT = 802,
+ JMM_STAT_THREAD_CONTENTION_TIME = 803,
+ JMM_STAT_THREAD_CONTENTION_STAT = 804,
+ JMM_STAT_PEAK_POOL_USAGE = 805,
+ JMM_STAT_GC_STAT = 806
+} jmmStatisticType;
+
+typedef enum {
+ JMM_USAGE_THRESHOLD_HIGH = 901,
+ JMM_USAGE_THRESHOLD_LOW = 902,
+ JMM_COLLECTION_USAGE_THRESHOLD_HIGH = 903,
+ JMM_COLLECTION_USAGE_THRESHOLD_LOW = 904
+} jmmThresholdType;
+
+/* Should match what is allowed in globals.hpp */
+typedef enum {
+ JMM_VMGLOBAL_TYPE_UNKNOWN = 0,
+ JMM_VMGLOBAL_TYPE_JBOOLEAN = 1,
+ JMM_VMGLOBAL_TYPE_JSTRING = 2,
+ JMM_VMGLOBAL_TYPE_JLONG = 3
+} jmmVMGlobalType;
+
+typedef enum {
+ JMM_VMGLOBAL_ORIGIN_DEFAULT = 1, /* Default value */
+ JMM_VMGLOBAL_ORIGIN_COMMAND_LINE = 2, /* Set at command line (or JNI invocation) */
+ JMM_VMGLOBAL_ORIGIN_MANAGEMENT = 3, /* Set via management interface */
+ JMM_VMGLOBAL_ORIGIN_ENVIRON_VAR = 4, /* Set via environment variables */
+ JMM_VMGLOBAL_ORIGIN_CONFIG_FILE = 5, /* Set via config file (such as .hotspotrc) */
+ JMM_VMGLOBAL_ORIGIN_ERGONOMIC = 6, /* Set via ergonomic */
+ JMM_VMGLOBAL_ORIGIN_OTHER = 99 /* Set via some other mechanism */
+} jmmVMGlobalOrigin;
+
+typedef struct {
+ jstring name;
+ jvalue value;
+ jmmVMGlobalType type; /* Data type */
+ jmmVMGlobalOrigin origin; /* Default or non-default value */
+ unsigned int writeable : 1; /* dynamically writeable */
+ unsigned int external : 1; /* external supported interface */
+ unsigned int reserved : 30;
+ void *reserved1;
+ void *reserved2;
+} jmmVMGlobal;
+
+typedef struct {
+ const char* name;
+ char type;
+ const char* description;
+} jmmExtAttributeInfo;
+
+/* Caller has to set the following fields before calling GetLastGCStat
+ * o usage_before_gc - array of MemoryUsage objects
+ * o usage_after_gc - array of MemoryUsage objects
+ * o gc_ext_attribute_values_size - size of gc_ext_atttribute_values array
+ * o gc_ext_attribtue_values - array of jvalues
+ */
+typedef struct {
+ jlong gc_index; /* Index of the collections */
+ jlong start_time; /* Start time of the GC */
+ jlong end_time; /* End time of the GC */
+ jobjectArray usage_before_gc; /* Memory usage array before GC */
+ jobjectArray usage_after_gc; /* Memory usage array after GC */
+ jint gc_ext_attribute_values_size; /* set by the caller of GetGCStat */
+ jvalue* gc_ext_attribute_values; /* Array of jvalue for GC extension attributes */
+ jint num_gc_ext_attributes; /* number of GC extension attribute values s are filled */
+ /* -1 indicates gc_ext_attribute_values is not big enough */
+} jmmGCStat;
+
+typedef struct jmmInterface_1_ {
+ void* reserved1;
+ void* reserved2;
+
+ jint (JNICALL *GetVersion) (JNIEnv *env);
+
+ jint (JNICALL *GetOptionalSupport) (JNIEnv *env,
+ jmmOptionalSupport* support_ptr);
+
+ /* This is used by JDK 6 and earlier.
+ * For JDK 7 and after, use GetInputArgumentArray.
+ */
+ jobject (JNICALL *GetInputArguments) (JNIEnv *env);
+
+ jint (JNICALL *GetThreadInfo) (JNIEnv *env,
+ jlongArray ids,
+ jint maxDepth,
+ jobjectArray infoArray);
+ jobjectArray (JNICALL *GetInputArgumentArray) (JNIEnv *env);
+
+ jobjectArray (JNICALL *GetMemoryPools) (JNIEnv* env, jobject mgr);
+
+ jobjectArray (JNICALL *GetMemoryManagers) (JNIEnv* env, jobject pool);
+
+ jobject (JNICALL *GetMemoryPoolUsage) (JNIEnv* env, jobject pool);
+ jobject (JNICALL *GetPeakMemoryPoolUsage) (JNIEnv* env, jobject pool);
+
+ void* reserved4;
+
+ jobject (JNICALL *GetMemoryUsage) (JNIEnv* env, jboolean heap);
+
+ jlong (JNICALL *GetLongAttribute) (JNIEnv *env, jobject obj, jmmLongAttribute att);
+ jboolean (JNICALL *GetBoolAttribute) (JNIEnv *env, jmmBoolAttribute att);
+ jboolean (JNICALL *SetBoolAttribute) (JNIEnv *env, jmmBoolAttribute att, jboolean flag);
+
+ jint (JNICALL *GetLongAttributes) (JNIEnv *env,
+ jobject obj,
+ jmmLongAttribute* atts,
+ jint count,
+ jlong* result);
+
+ jobjectArray (JNICALL *FindCircularBlockedThreads) (JNIEnv *env);
+ jlong (JNICALL *GetThreadCpuTime) (JNIEnv *env, jlong thread_id);
+
+ jobjectArray (JNICALL *GetVMGlobalNames) (JNIEnv *env);
+ jint (JNICALL *GetVMGlobals) (JNIEnv *env,
+ jobjectArray names,
+ jmmVMGlobal *globals,
+ jint count);
+
+ jint (JNICALL *GetInternalThreadTimes) (JNIEnv *env,
+ jobjectArray names,
+ jlongArray times);
+
+ jboolean (JNICALL *ResetStatistic) (JNIEnv *env,
+ jvalue obj,
+ jmmStatisticType type);
+
+ void (JNICALL *SetPoolSensor) (JNIEnv *env,
+ jobject pool,
+ jmmThresholdType type,
+ jobject sensor);
+
+ jlong (JNICALL *SetPoolThreshold) (JNIEnv *env,
+ jobject pool,
+ jmmThresholdType type,
+ jlong threshold);
+ jobject (JNICALL *GetPoolCollectionUsage) (JNIEnv* env, jobject pool);
+
+ jint (JNICALL *GetGCExtAttributeInfo) (JNIEnv *env,
+ jobject mgr,
+ jmmExtAttributeInfo *ext_info,
+ jint count);
+ void (JNICALL *GetLastGCStat) (JNIEnv *env,
+ jobject mgr,
+ jmmGCStat *gc_stat);
+ jlong (JNICALL *GetThreadCpuTimeWithKind) (JNIEnv *env,
+ jlong thread_id,
+ jboolean user_sys_cpu_time);
+ void* reserved5;
+ jint (JNICALL *DumpHeap0) (JNIEnv *env,
+ jstring outputfile,
+ jboolean live);
+ jobjectArray (JNICALL *FindDeadlocks) (JNIEnv *env, jboolean object_monitors_only);
+ void (JNICALL *SetVMGlobal) (JNIEnv *env,
+ jstring flag_name,
+ jvalue new_value);
+ void* reserved6;
+ jobjectArray (JNICALL *DumpThreads) (JNIEnv *env,
+ jlongArray ids,
+ jboolean lockedMonitors,
+ jboolean lockedSynchronizers);
+} JmmInterface;
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif /* __cplusplus */
+
+#endif /* !_JAVA_JMM_H_ */
diff --git a/src/share/vm/services/lowMemoryDetector.cpp b/src/share/vm/services/lowMemoryDetector.cpp
new file mode 100644
index 000000000..14ca5243c
--- /dev/null
+++ b/src/share/vm/services/lowMemoryDetector.cpp
@@ -0,0 +1,422 @@
+/*
+ * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_lowMemoryDetector.cpp.incl"
+
+LowMemoryDetectorThread* LowMemoryDetector::_detector_thread = NULL;
+volatile bool LowMemoryDetector::_enabled_for_collected_pools = false;
+volatile jint LowMemoryDetector::_disabled_count = 0;
+
+void LowMemoryDetector::initialize() {
+ EXCEPTION_MARK;
+
+ instanceKlassHandle klass (THREAD, SystemDictionary::thread_klass());
+ instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
+
+ const char thread_name[] = "Low Memory Detector";
+ Handle string = java_lang_String::create_from_str(thread_name, CHECK);
+
+ // Initialize thread_oop to put it into the system threadGroup
+ Handle thread_group (THREAD, Universe::system_thread_group());
+ JavaValue result(T_VOID);
+ JavaCalls::call_special(&result, thread_oop,
+ klass,
+ vmSymbolHandles::object_initializer_name(),
+ vmSymbolHandles::threadgroup_string_void_signature(),
+ thread_group,
+ string,
+ CHECK);
+
+ {
+ MutexLocker mu(Threads_lock);
+ _detector_thread = new LowMemoryDetectorThread(&low_memory_detector_thread_entry);
+
+ // At this point it may be possible that no osthread was created for the
+ // JavaThread due to lack of memory. We would have to throw an exception
+ // in that case. However, since this must work and we do not allow
+ // exceptions anyway, check and abort if this fails.
+ if (_detector_thread == NULL || _detector_thread->osthread() == NULL) {
+ vm_exit_during_initialization("java.lang.OutOfMemoryError",
+ "unable to create new native thread");
+ }
+
+ java_lang_Thread::set_thread(thread_oop(), _detector_thread);
+ java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
+ java_lang_Thread::set_daemon(thread_oop());
+ _detector_thread->set_threadObj(thread_oop());
+
+ Threads::add(_detector_thread);
+ Thread::start(_detector_thread);
+ }
+}
+
+bool LowMemoryDetector::has_pending_requests() {
+ assert(LowMemory_lock->owned_by_self(), "Must own LowMemory_lock");
+ bool has_requests = false;
+ int num_memory_pools = MemoryService::num_memory_pools();
+ for (int i = 0; i < num_memory_pools; i++) {
+ MemoryPool* pool = MemoryService::get_memory_pool(i);
+ SensorInfo* sensor = pool->usage_sensor();
+ if (sensor != NULL) {
+ has_requests = has_requests || sensor->has_pending_requests();
+ }
+
+ SensorInfo* gc_sensor = pool->gc_usage_sensor();
+ if (gc_sensor != NULL) {
+ has_requests = has_requests || gc_sensor->has_pending_requests();
+ }
+ }
+ return has_requests;
+}
+
+void LowMemoryDetector::low_memory_detector_thread_entry(JavaThread* jt, TRAPS) {
+ while (true) {
+ bool sensors_changed = false;
+
+ {
+ // _no_safepoint_check_flag is used here as LowMemory_lock is a
+ // special lock and the VMThread may acquire this lock at safepoint.
+ // Need state transition ThreadBlockInVM so that this thread
+ // will be handled by safepoint correctly when this thread is
+ // notified at a safepoint.
+
+ // This ThreadBlockInVM object is not also considered to be
+ // suspend-equivalent because LowMemoryDetector threads are
+ // not visible to external suspension.
+
+ ThreadBlockInVM tbivm(jt);
+
+ MutexLockerEx ml(LowMemory_lock, Mutex::_no_safepoint_check_flag);
+ while (!(sensors_changed = has_pending_requests())) {
+ // wait until one of the sensors has pending requests
+ LowMemory_lock->wait(Mutex::_no_safepoint_check_flag);
+ }
+ }
+
+ {
+ ResourceMark rm(THREAD);
+ HandleMark hm(THREAD);
+
+ // No need to hold LowMemory_lock to call out to Java
+ int num_memory_pools = MemoryService::num_memory_pools();
+ for (int i = 0; i < num_memory_pools; i++) {
+ MemoryPool* pool = MemoryService::get_memory_pool(i);
+ SensorInfo* sensor = pool->usage_sensor();
+ SensorInfo* gc_sensor = pool->gc_usage_sensor();
+ if (sensor != NULL && sensor->has_pending_requests()) {
+ sensor->process_pending_requests(CHECK);
+ }
+ if (gc_sensor != NULL && gc_sensor->has_pending_requests()) {
+ gc_sensor->process_pending_requests(CHECK);
+ }
+ }
+ }
+ }
+}
+
+// This method could be called from any Java threads
+// and also VMThread.
+void LowMemoryDetector::detect_low_memory() {
+ MutexLockerEx ml(LowMemory_lock, Mutex::_no_safepoint_check_flag);
+
+ bool has_pending_requests = false;
+ int num_memory_pools = MemoryService::num_memory_pools();
+ for (int i = 0; i < num_memory_pools; i++) {
+ MemoryPool* pool = MemoryService::get_memory_pool(i);
+ SensorInfo* sensor = pool->usage_sensor();
+ if (sensor != NULL &&
+ pool->usage_threshold()->is_high_threshold_supported() &&
+ pool->usage_threshold()->high_threshold() != 0) {
+ MemoryUsage usage = pool->get_memory_usage();
+ sensor->set_gauge_sensor_level(usage,
+ pool->usage_threshold());
+ has_pending_requests = has_pending_requests || sensor->has_pending_requests();
+ }
+ }
+
+ if (has_pending_requests) {
+ LowMemory_lock->notify_all();
+ }
+}
+
+// This method could be called from any Java threads
+// and also VMThread.
+void LowMemoryDetector::detect_low_memory(MemoryPool* pool) {
+ SensorInfo* sensor = pool->usage_sensor();
+ if (sensor == NULL ||
+ !pool->usage_threshold()->is_high_threshold_supported() ||
+ pool->usage_threshold()->high_threshold() == 0) {
+ return;
+ }
+
+ {
+ MutexLockerEx ml(LowMemory_lock, Mutex::_no_safepoint_check_flag);
+
+ MemoryUsage usage = pool->get_memory_usage();
+ sensor->set_gauge_sensor_level(usage,
+ pool->usage_threshold());
+ if (sensor->has_pending_requests()) {
+ // notify sensor state update
+ LowMemory_lock->notify_all();
+ }
+ }
+}
+
+// Only called by VMThread at GC time
+void LowMemoryDetector::detect_after_gc_memory(MemoryPool* pool) {
+ SensorInfo* sensor = pool->gc_usage_sensor();
+ if (sensor == NULL ||
+ !pool->gc_usage_threshold()->is_high_threshold_supported() ||
+ pool->gc_usage_threshold()->high_threshold() == 0) {
+ return;
+ }
+
+ {
+ MutexLockerEx ml(LowMemory_lock, Mutex::_no_safepoint_check_flag);
+
+ MemoryUsage usage = pool->get_last_collection_usage();
+ sensor->set_counter_sensor_level(usage, pool->gc_usage_threshold());
+
+ if (sensor->has_pending_requests()) {
+ // notify sensor state update
+ LowMemory_lock->notify_all();
+ }
+ }
+}
+
+// recompute enabled flag
+void LowMemoryDetector::recompute_enabled_for_collected_pools() {
+ bool enabled = false;
+ int num_memory_pools = MemoryService::num_memory_pools();
+ for (int i=0; i<num_memory_pools; i++) {
+ MemoryPool* pool = MemoryService::get_memory_pool(i);
+ if (pool->is_collected_pool() && is_enabled(pool)) {
+ enabled = true;
+ break;
+ }
+ }
+ _enabled_for_collected_pools = enabled;
+}
+
+SensorInfo::SensorInfo() {
+ _sensor_obj = NULL;
+ _sensor_on = false;
+ _sensor_count = 0;
+ _pending_trigger_count = 0;
+ _pending_clear_count = 0;
+}
+
+// When this method is used, the memory usage is monitored
+// as a gauge attribute. Sensor notifications (trigger or
+// clear) is only emitted at the first time it crosses
+// a threshold.
+//
+// High and low thresholds are designed to provide a
+// hysteresis mechanism to avoid repeated triggering
+// of notifications when the attribute value makes small oscillations
+// around the high or low threshold value.
+//
+// The sensor will be triggered if:
+// (1) the usage is crossing above the high threshold and
+// the sensor is currently off and no pending
+// trigger requests; or
+// (2) the usage is crossing above the high threshold and
+// the sensor will be off (i.e. sensor is currently on
+// and has pending clear requests).
+//
+// Subsequent crossings of the high threshold value do not cause
+// any triggers unless the usage becomes less than the low threshold.
+//
+// The sensor will be cleared if:
+// (1) the usage is crossing below the low threshold and
+// the sensor is currently on and no pending
+// clear requests; or
+// (2) the usage is crossing below the low threshold and
+// the sensor will be on (i.e. sensor is currently off
+// and has pending trigger requests).
+//
+// Subsequent crossings of the low threshold value do not cause
+// any clears unless the usage becomes greater than or equal
+// to the high threshold.
+//
+// If the current level is between high and low threhsold, no change.
+//
+void SensorInfo::set_gauge_sensor_level(MemoryUsage usage, ThresholdSupport* high_low_threshold) {
+ assert(high_low_threshold->is_high_threshold_supported(), "just checking");
+
+ bool is_over_high = high_low_threshold->is_high_threshold_crossed(usage);
+ bool is_below_low = high_low_threshold->is_low_threshold_crossed(usage);
+
+ assert(!(is_over_high && is_below_low), "Can't be both true");
+
+ if (is_over_high &&
+ ((!_sensor_on && _pending_trigger_count == 0) ||
+ _pending_clear_count > 0)) {
+ // low memory detected and need to increment the trigger pending count
+ // if the sensor is off or will be off due to _pending_clear_ > 0
+ // Request to trigger the sensor
+ _pending_trigger_count++;
+ _usage = usage;
+
+ if (_pending_clear_count > 0) {
+ // non-zero pending clear requests indicates that there are
+ // pending requests to clear this sensor.
+ // This trigger request needs to clear this clear count
+ // since the resulting sensor flag should be on.
+ _pending_clear_count = 0;
+ }
+ } else if (is_below_low &&
+ ((_sensor_on && _pending_clear_count == 0) ||
+ (_pending_trigger_count > 0 && _pending_clear_count == 0))) {
+ // memory usage returns below the threshold
+ // Request to clear the sensor if the sensor is on or will be on due to
+ // _pending_trigger_count > 0 and also no clear request
+ _pending_clear_count++;
+ }
+}
+
+// When this method is used, the memory usage is monitored as a
+// simple counter attribute. The sensor will be triggered
+// whenever the usage is crossing the threshold to keep track
+// of the number of times the VM detects such a condition occurs.
+//
+// High and low thresholds are designed to provide a
+// hysteresis mechanism to avoid repeated triggering
+// of notifications when the attribute value makes small oscillations
+// around the high or low threshold value.
+//
+// The sensor will be triggered if:
+// - the usage is crossing above the high threshold regardless
+// of the current sensor state.
+//
+// The sensor will be cleared if:
+// (1) the usage is crossing below the low threshold and
+// the sensor is currently on; or
+// (2) the usage is crossing below the low threshold and
+// the sensor will be on (i.e. sensor is currently off
+// and has pending trigger requests).
+void SensorInfo::set_counter_sensor_level(MemoryUsage usage, ThresholdSupport* counter_threshold) {
+ assert(counter_threshold->is_high_threshold_supported(), "just checking");
+
+ bool is_over_high = counter_threshold->is_high_threshold_crossed(usage);
+ bool is_below_low = counter_threshold->is_low_threshold_crossed(usage);
+
+ assert(!(is_over_high && is_below_low), "Can't be both true");
+
+ if (is_over_high) {
+ _pending_trigger_count++;
+ _usage = usage;
+ _pending_clear_count = 0;
+ } else if (is_below_low && (_sensor_on || _pending_trigger_count > 0)) {
+ _pending_clear_count++;
+ }
+}
+
+void SensorInfo::oops_do(OopClosure* f) {
+ f->do_oop((oop*) &_sensor_obj);
+}
+
+void SensorInfo::process_pending_requests(TRAPS) {
+ if (!has_pending_requests()) {
+ return;
+ }
+
+ int pending_count = pending_trigger_count();
+ if (pending_clear_count() > 0) {
+ clear(pending_count, CHECK);
+ } else {
+ trigger(pending_count, CHECK);
+ }
+
+}
+
+void SensorInfo::trigger(int count, TRAPS) {
+ assert(count <= _pending_trigger_count, "just checking");
+
+ if (_sensor_obj != NULL) {
+ klassOop k = Management::sun_management_Sensor_klass(CHECK);
+ instanceKlassHandle sensorKlass (THREAD, k);
+ Handle sensor_h(THREAD, _sensor_obj);
+ Handle usage_h = MemoryService::create_MemoryUsage_obj(_usage, CHECK);
+
+ JavaValue result(T_VOID);
+ JavaCallArguments args(sensor_h);
+ args.push_int((int) count);
+ args.push_oop(usage_h);
+
+ JavaCalls::call_virtual(&result,
+ sensorKlass,
+ vmSymbolHandles::trigger_name(),
+ vmSymbolHandles::trigger_method_signature(),
+ &args,
+ CHECK);
+ }
+
+ {
+ // Holds LowMemory_lock and update the sensor state
+ MutexLockerEx ml(LowMemory_lock, Mutex::_no_safepoint_check_flag);
+ _sensor_on = true;
+ _sensor_count += count;
+ _pending_trigger_count = _pending_trigger_count - count;
+ }
+}
+
+void SensorInfo::clear(int count, TRAPS) {
+ if (_sensor_obj != NULL) {
+ klassOop k = Management::sun_management_Sensor_klass(CHECK);
+ instanceKlassHandle sensorKlass (THREAD, k);
+ Handle sensor(THREAD, _sensor_obj);
+
+ JavaValue result(T_VOID);
+ JavaCallArguments args(sensor);
+ args.push_int((int) count);
+ JavaCalls::call_virtual(&result,
+ sensorKlass,
+ vmSymbolHandles::clear_name(),
+ vmSymbolHandles::int_void_signature(),
+ &args,
+ CHECK);
+ }
+
+ {
+ // Holds LowMemory_lock and update the sensor state
+ MutexLockerEx ml(LowMemory_lock, Mutex::_no_safepoint_check_flag);
+ _sensor_on = false;
+ _pending_clear_count = 0;
+ _pending_trigger_count = _pending_trigger_count - count;
+ }
+}
+
+//--------------------------------------------------------------
+// Non-product code
+
+#ifndef PRODUCT
+void SensorInfo::print() {
+ tty->print_cr("%s count = %ld pending_triggers = %ld pending_clears = %ld",
+ (_sensor_on ? "on" : "off"),
+ _sensor_count, _pending_trigger_count, _pending_clear_count);
+}
+
+#endif // PRODUCT
diff --git a/src/share/vm/services/lowMemoryDetector.hpp b/src/share/vm/services/lowMemoryDetector.hpp
new file mode 100644
index 000000000..684a66a23
--- /dev/null
+++ b/src/share/vm/services/lowMemoryDetector.hpp
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Low Memory Detection Support
+// Two memory alarms in the JDK (we called them sensors).
+// - Heap memory sensor
+// - Non-heap memory sensor
+// When the VM detects if the memory usage of a memory pool has reached
+// or exceeded its threshold, it will trigger the sensor for the type
+// of the memory pool (heap or nonheap or both).
+//
+// If threshold == -1, no low memory detection is supported and
+// the threshold value is not allowed to be changed.
+// If threshold == 0, no low memory detection is performed for
+// that memory pool. The threshold can be set to any non-negative
+// value.
+//
+// The default threshold of the Hotspot memory pools are:
+// Eden space -1
+// Survivor space 1 -1
+// Survivor space 2 -1
+// Old generation 0
+// Perm generation 0
+// CodeCache 0
+//
+// For heap memory, detection will be performed when GC finishes
+// and also in the slow path allocation.
+// For Code cache, detection will be performed in the allocation
+// and deallocation.
+//
+// May need to deal with hysteresis effect.
+//
+
+class LowMemoryDetectorThread;
+class OopClosure;
+class MemoryPool;
+
+class ThresholdSupport : public CHeapObj {
+ private:
+ bool _support_high_threshold;
+ bool _support_low_threshold;
+ size_t _high_threshold;
+ size_t _low_threshold;
+ public:
+ ThresholdSupport(bool support_high, bool support_low) {
+ _support_high_threshold = support_high;
+ _support_low_threshold = support_low;
+ _high_threshold = 0;
+ _low_threshold= 0;
+ }
+
+ size_t high_threshold() const { return _high_threshold; }
+ size_t low_threshold() const { return _low_threshold; }
+ bool is_high_threshold_supported() { return _support_high_threshold; }
+ bool is_low_threshold_supported() { return _support_low_threshold; }
+
+ bool is_high_threshold_crossed(MemoryUsage usage) {
+ if (_support_high_threshold && _high_threshold > 0) {
+ return (usage.used() >= _high_threshold);
+ }
+ return false;
+ }
+ bool is_low_threshold_crossed(MemoryUsage usage) {
+ if (_support_low_threshold && _low_threshold > 0) {
+ return (usage.used() < _low_threshold);
+ }
+ return false;
+ }
+
+ size_t set_high_threshold(size_t new_threshold) {
+ assert(_support_high_threshold, "can only be set if supported");
+ assert(new_threshold >= _low_threshold, "new_threshold must be >= _low_threshold");
+ size_t prev = _high_threshold;
+ _high_threshold = new_threshold;
+ return prev;
+ }
+
+ size_t set_low_threshold(size_t new_threshold) {
+ assert(_support_low_threshold, "can only be set if supported");
+ assert(new_threshold <= _high_threshold, "new_threshold must be <= _high_threshold");
+ size_t prev = _low_threshold;
+ _low_threshold = new_threshold;
+ return prev;
+ }
+};
+
+class SensorInfo : public CHeapObj {
+private:
+ instanceOop _sensor_obj;
+ bool _sensor_on;
+ size_t _sensor_count;
+
+ // before the actual sensor on flag and sensor count are set
+ // we maintain the number of pending triggers and clears.
+ // _pending_trigger_count means the number of pending triggers
+ // and the sensor count should be incremented by the same number.
+
+ int _pending_trigger_count;
+
+ // _pending_clear_count takes precedence if it's > 0 which
+ // indicates the resulting sensor will be off
+ // Sensor trigger requests will reset this clear count to
+ // indicate the resulting flag should be on.
+
+ int _pending_clear_count;
+
+ MemoryUsage _usage;
+
+ void clear(int count, TRAPS);
+ void trigger(int count, TRAPS);
+public:
+ SensorInfo();
+ void set_sensor(instanceOop sensor) {
+ assert(_sensor_obj == NULL, "Should be set only once");
+ _sensor_obj = sensor;
+ }
+
+ bool has_pending_requests() {
+ return (_pending_trigger_count > 0 || _pending_clear_count > 0);
+ }
+
+ int pending_trigger_count() { return _pending_trigger_count; }
+ int pending_clear_count() { return _pending_clear_count; }
+
+ // When this method is used, the memory usage is monitored
+ // as a gauge attribute. High and low thresholds are designed
+ // to provide a hysteresis mechanism to avoid repeated triggering
+ // of notifications when the attribute value makes small oscillations
+ // around the high or low threshold value.
+ //
+ // The sensor will be triggered if:
+ // (1) the usage is crossing above the high threshold and
+ // the sensor is currently off and no pending
+ // trigger requests; or
+ // (2) the usage is crossing above the high threshold and
+ // the sensor will be off (i.e. sensor is currently on
+ // and has pending clear requests).
+ //
+ // Subsequent crossings of the high threshold value do not cause
+ // any triggers unless the usage becomes less than the low threshold.
+ //
+ // The sensor will be cleared if:
+ // (1) the usage is crossing below the low threshold and
+ // the sensor is currently on and no pending
+ // clear requests; or
+ // (2) the usage is crossing below the low threshold and
+ // the sensor will be on (i.e. sensor is currently off
+ // and has pending trigger requests).
+ //
+ // Subsequent crossings of the low threshold value do not cause
+ // any clears unless the usage becomes greater than or equal
+ // to the high threshold.
+ //
+ // If the current level is between high and low threhsold, no change.
+ //
+ void set_gauge_sensor_level(MemoryUsage usage, ThresholdSupport* high_low_threshold);
+
+ // When this method is used, the memory usage is monitored as a
+ // simple counter attribute. The sensor will be triggered
+ // whenever the usage is crossing the threshold to keep track
+ // of the number of times the VM detects such a condition occurs.
+ //
+ // The sensor will be triggered if:
+ // - the usage is crossing above the high threshold regardless
+ // of the current sensor state.
+ //
+ // The sensor will be cleared if:
+ // (1) the usage is crossing below the low threshold and
+ // the sensor is currently on; or
+ // (2) the usage is crossing below the low threshold and
+ // the sensor will be on (i.e. sensor is currently off
+ // and has pending trigger requests).
+ //
+ void set_counter_sensor_level(MemoryUsage usage, ThresholdSupport* counter_threshold);
+
+ void process_pending_requests(TRAPS);
+ void oops_do(OopClosure* f);
+
+#ifndef PRODUCT
+ // printing on default output stream;
+ void print();
+#endif // PRODUCT
+};
+
+class LowMemoryDetector : public AllStatic {
+friend class LowMemoryDetectorDisabler;
+private:
+ // true if any collected heap has low memory detection enabled
+ static volatile bool _enabled_for_collected_pools;
+ // > 0 if temporary disabed
+ static volatile jint _disabled_count;
+
+ static LowMemoryDetectorThread* _detector_thread;
+ static void low_memory_detector_thread_entry(JavaThread* thread, TRAPS);
+ static void check_memory_usage();
+ static bool has_pending_requests();
+ static bool temporary_disabled() { return _disabled_count > 0; }
+ static void disable() { Atomic::inc(&_disabled_count); }
+ static void enable() { Atomic::dec(&_disabled_count); }
+
+public:
+ static void initialize();
+ static void detect_low_memory();
+ static void detect_low_memory(MemoryPool* pool);
+ static void detect_after_gc_memory(MemoryPool* pool);
+
+ static bool is_enabled(MemoryPool* pool) {
+ // low memory detection is enabled for collected memory pools
+ // iff one of the collected memory pool has a sensor and the
+ // threshold set non-zero
+ if (pool->usage_sensor() == NULL) {
+ return false;
+ } else {
+ ThresholdSupport* threshold_support = pool->usage_threshold();
+ return (threshold_support->is_high_threshold_supported() ?
+ (threshold_support->high_threshold() > 0) : false);
+ }
+ }
+
+ // indicates if low memory detection is enabled for any collected
+ // memory pools
+ static inline bool is_enabled_for_collected_pools() {
+ return !temporary_disabled() && _enabled_for_collected_pools;
+ }
+
+ // recompute enabled flag
+ static void recompute_enabled_for_collected_pools();
+
+ // low memory detection for collected memory pools.
+ static inline void detect_low_memory_for_collected_pools() {
+ // no-op if low memory detection not enabled
+ if (!is_enabled_for_collected_pools()) {
+ return;
+ }
+ int num_memory_pools = MemoryService::num_memory_pools();
+ for (int i=0; i<num_memory_pools; i++) {
+ MemoryPool* pool = MemoryService::get_memory_pool(i);
+
+ // if low memory detection is enabled then check if the
+ // current used exceeds the high threshold
+ if (pool->is_collected_pool() && is_enabled(pool)) {
+ size_t used = pool->used_in_bytes();
+ size_t high = pool->usage_threshold()->high_threshold();
+ if (used > high) {
+ detect_low_memory(pool);
+ }
+ }
+ }
+ }
+
+};
+
+class LowMemoryDetectorDisabler: public StackObj {
+public:
+ LowMemoryDetectorDisabler()
+ {
+ LowMemoryDetector::disable();
+ }
+ ~LowMemoryDetectorDisabler()
+ {
+ assert(LowMemoryDetector::temporary_disabled(), "should be disabled!");
+ LowMemoryDetector::enable();
+ }
+};
diff --git a/src/share/vm/services/management.cpp b/src/share/vm/services/management.cpp
new file mode 100644
index 000000000..88e5f1e5b
--- /dev/null
+++ b/src/share/vm/services/management.cpp
@@ -0,0 +1,2019 @@
+/*
+ * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_management.cpp.incl"
+
+PerfVariable* Management::_begin_vm_creation_time = NULL;
+PerfVariable* Management::_end_vm_creation_time = NULL;
+PerfVariable* Management::_vm_init_done_time = NULL;
+
+klassOop Management::_sensor_klass = NULL;
+klassOop Management::_threadInfo_klass = NULL;
+klassOop Management::_memoryUsage_klass = NULL;
+klassOop Management::_memoryPoolMXBean_klass = NULL;
+klassOop Management::_memoryManagerMXBean_klass = NULL;
+klassOop Management::_garbageCollectorMXBean_klass = NULL;
+klassOop Management::_managementFactory_klass = NULL;
+
+jmmOptionalSupport Management::_optional_support = {0};
+TimeStamp Management::_stamp;
+
+void management_init() {
+ Management::init();
+ ThreadService::init();
+ RuntimeService::init();
+ ClassLoadingService::init();
+}
+
+void Management::init() {
+ EXCEPTION_MARK;
+
+ // These counters are for java.lang.management API support.
+ // They are created even if -XX:-UsePerfData is set and in
+ // that case, they will be allocated on C heap.
+
+ _begin_vm_creation_time =
+ PerfDataManager::create_variable(SUN_RT, "createVmBeginTime",
+ PerfData::U_None, CHECK);
+
+ _end_vm_creation_time =
+ PerfDataManager::create_variable(SUN_RT, "createVmEndTime",
+ PerfData::U_None, CHECK);
+
+ _vm_init_done_time =
+ PerfDataManager::create_variable(SUN_RT, "vmInitDoneTime",
+ PerfData::U_None, CHECK);
+
+ // Initialize optional support
+ _optional_support.isLowMemoryDetectionSupported = 1;
+ _optional_support.isCompilationTimeMonitoringSupported = 1;
+ _optional_support.isThreadContentionMonitoringSupported = 1;
+
+ if (os::is_thread_cpu_time_supported()) {
+ _optional_support.isCurrentThreadCpuTimeSupported = 1;
+ _optional_support.isOtherThreadCpuTimeSupported = 1;
+ } else {
+ _optional_support.isCurrentThreadCpuTimeSupported = 0;
+ _optional_support.isOtherThreadCpuTimeSupported = 0;
+ }
+ _optional_support.isBootClassPathSupported = 1;
+ _optional_support.isObjectMonitorUsageSupported = 1;
+#ifndef SERVICES_KERNEL
+ // This depends on the heap inspector
+ _optional_support.isSynchronizerUsageSupported = 1;
+#endif // SERVICES_KERNEL
+}
+
+void Management::initialize(TRAPS) {
+ // Start the low memory detector thread
+ LowMemoryDetector::initialize();
+
+ if (ManagementServer) {
+ ResourceMark rm(THREAD);
+ HandleMark hm(THREAD);
+
+ // Load and initialize the sun.management.Agent class
+ // invoke startAgent method to start the management server
+ Handle loader = Handle(THREAD, SystemDictionary::java_system_loader());
+ klassOop k = SystemDictionary::resolve_or_fail(vmSymbolHandles::sun_management_Agent(),
+ loader,
+ Handle(),
+ true,
+ CHECK);
+ instanceKlassHandle ik (THREAD, k);
+
+ JavaValue result(T_VOID);
+ JavaCalls::call_static(&result,
+ ik,
+ vmSymbolHandles::startAgent_name(),
+ vmSymbolHandles::void_method_signature(),
+ CHECK);
+ }
+}
+
+void Management::get_optional_support(jmmOptionalSupport* support) {
+ memcpy(support, &_optional_support, sizeof(jmmOptionalSupport));
+}
+
+klassOop Management::load_and_initialize_klass(symbolHandle sh, TRAPS) {
+ klassOop k = SystemDictionary::resolve_or_fail(sh, true, CHECK_NULL);
+ instanceKlassHandle ik (THREAD, k);
+ if (ik->should_be_initialized()) {
+ ik->initialize(CHECK_NULL);
+ }
+ return ik();
+}
+
+void Management::record_vm_startup_time(jlong begin, jlong duration) {
+ // if the performance counter is not initialized,
+ // then vm initialization failed; simply return.
+ if (_begin_vm_creation_time == NULL) return;
+
+ _begin_vm_creation_time->set_value(begin);
+ _end_vm_creation_time->set_value(begin + duration);
+ PerfMemory::set_accessible(true);
+}
+
+jlong Management::timestamp() {
+ TimeStamp t;
+ t.update();
+ return t.ticks() - _stamp.ticks();
+}
+
+void Management::oops_do(OopClosure* f) {
+ MemoryService::oops_do(f);
+ ThreadService::oops_do(f);
+
+ f->do_oop((oop*) &_sensor_klass);
+ f->do_oop((oop*) &_threadInfo_klass);
+ f->do_oop((oop*) &_memoryUsage_klass);
+ f->do_oop((oop*) &_memoryPoolMXBean_klass);
+ f->do_oop((oop*) &_memoryManagerMXBean_klass);
+ f->do_oop((oop*) &_garbageCollectorMXBean_klass);
+ f->do_oop((oop*) &_managementFactory_klass);
+}
+
+klassOop Management::java_lang_management_ThreadInfo_klass(TRAPS) {
+ if (_threadInfo_klass == NULL) {
+ _threadInfo_klass = load_and_initialize_klass(vmSymbolHandles::java_lang_management_ThreadInfo(), CHECK_NULL);
+ }
+ return _threadInfo_klass;
+}
+
+klassOop Management::java_lang_management_MemoryUsage_klass(TRAPS) {
+ if (_memoryUsage_klass == NULL) {
+ _memoryUsage_klass = load_and_initialize_klass(vmSymbolHandles::java_lang_management_MemoryUsage(), CHECK_NULL);
+ }
+ return _memoryUsage_klass;
+}
+
+klassOop Management::java_lang_management_MemoryPoolMXBean_klass(TRAPS) {
+ if (_memoryPoolMXBean_klass == NULL) {
+ _memoryPoolMXBean_klass = load_and_initialize_klass(vmSymbolHandles::java_lang_management_MemoryPoolMXBean(), CHECK_NULL);
+ }
+ return _memoryPoolMXBean_klass;
+}
+
+klassOop Management::java_lang_management_MemoryManagerMXBean_klass(TRAPS) {
+ if (_memoryManagerMXBean_klass == NULL) {
+ _memoryManagerMXBean_klass = load_and_initialize_klass(vmSymbolHandles::java_lang_management_MemoryManagerMXBean(), CHECK_NULL);
+ }
+ return _memoryManagerMXBean_klass;
+}
+
+klassOop Management::java_lang_management_GarbageCollectorMXBean_klass(TRAPS) {
+ if (_garbageCollectorMXBean_klass == NULL) {
+ _garbageCollectorMXBean_klass = load_and_initialize_klass(vmSymbolHandles::java_lang_management_GarbageCollectorMXBean(), CHECK_NULL);
+ }
+ return _garbageCollectorMXBean_klass;
+}
+
+klassOop Management::sun_management_Sensor_klass(TRAPS) {
+ if (_sensor_klass == NULL) {
+ _sensor_klass = load_and_initialize_klass(vmSymbolHandles::sun_management_Sensor(), CHECK_NULL);
+ }
+ return _sensor_klass;
+}
+
+klassOop Management::sun_management_ManagementFactory_klass(TRAPS) {
+ if (_managementFactory_klass == NULL) {
+ _managementFactory_klass = load_and_initialize_klass(vmSymbolHandles::sun_management_ManagementFactory(), CHECK_NULL);
+ }
+ return _managementFactory_klass;
+}
+
+static void initialize_ThreadInfo_constructor_arguments(JavaCallArguments* args, ThreadSnapshot* snapshot, TRAPS) {
+ Handle snapshot_thread(THREAD, snapshot->threadObj());
+
+ jlong contended_time;
+ jlong waited_time;
+ if (ThreadService::is_thread_monitoring_contention()) {
+ contended_time = Management::ticks_to_ms(snapshot->contended_enter_ticks());
+ waited_time = Management::ticks_to_ms(snapshot->monitor_wait_ticks() + snapshot->sleep_ticks());
+ } else {
+ // set them to -1 if thread contention monitoring is disabled.
+ contended_time = max_julong;
+ waited_time = max_julong;
+ }
+
+ int thread_status = snapshot->thread_status();
+ assert((thread_status & JMM_THREAD_STATE_FLAG_MASK) == 0, "Flags already set in thread_status in Thread object");
+ if (snapshot->is_ext_suspended()) {
+ thread_status |= JMM_THREAD_STATE_FLAG_SUSPENDED;
+ }
+ if (snapshot->is_in_native()) {
+ thread_status |= JMM_THREAD_STATE_FLAG_NATIVE;
+ }
+
+ ThreadStackTrace* st = snapshot->get_stack_trace();
+ Handle stacktrace_h;
+ if (st != NULL) {
+ stacktrace_h = st->allocate_fill_stack_trace_element_array(CHECK);
+ } else {
+ stacktrace_h = Handle();
+ }
+
+ args->push_oop(snapshot_thread);
+ args->push_int(thread_status);
+ args->push_oop(Handle(THREAD, snapshot->blocker_object()));
+ args->push_oop(Handle(THREAD, snapshot->blocker_object_owner()));
+ args->push_long(snapshot->contended_enter_count());
+ args->push_long(contended_time);
+ args->push_long(snapshot->monitor_wait_count() + snapshot->sleep_count());
+ args->push_long(waited_time);
+ args->push_oop(stacktrace_h);
+}
+
+// Helper function to construct a ThreadInfo object
+instanceOop Management::create_thread_info_instance(ThreadSnapshot* snapshot, TRAPS) {
+ klassOop k = Management::java_lang_management_ThreadInfo_klass(CHECK_NULL);
+ instanceKlassHandle ik (THREAD, k);
+
+ JavaValue result(T_VOID);
+ JavaCallArguments args(14);
+
+ // First allocate a ThreadObj object and
+ // push the receiver as the first argument
+ Handle element = ik->allocate_instance_handle(CHECK_NULL);
+ args.push_oop(element);
+
+ // initialize the arguments for the ThreadInfo constructor
+ initialize_ThreadInfo_constructor_arguments(&args, snapshot, CHECK_NULL);
+
+ // Call ThreadInfo constructor with no locked monitors and synchronizers
+ JavaCalls::call_special(&result,
+ ik,
+ vmSymbolHandles::object_initializer_name(),
+ vmSymbolHandles::java_lang_management_ThreadInfo_constructor_signature(),
+ &args,
+ CHECK_NULL);
+
+ return (instanceOop) element();
+}
+
+instanceOop Management::create_thread_info_instance(ThreadSnapshot* snapshot,
+ objArrayHandle monitors_array,
+ typeArrayHandle depths_array,
+ objArrayHandle synchronizers_array,
+ TRAPS) {
+ klassOop k = Management::java_lang_management_ThreadInfo_klass(CHECK_NULL);
+ instanceKlassHandle ik (THREAD, k);
+
+ JavaValue result(T_VOID);
+ JavaCallArguments args(17);
+
+ // First allocate a ThreadObj object and
+ // push the receiver as the first argument
+ Handle element = ik->allocate_instance_handle(CHECK_NULL);
+ args.push_oop(element);
+
+ // initialize the arguments for the ThreadInfo constructor
+ initialize_ThreadInfo_constructor_arguments(&args, snapshot, CHECK_NULL);
+
+ // push the locked monitors and synchronizers in the arguments
+ args.push_oop(monitors_array);
+ args.push_oop(depths_array);
+ args.push_oop(synchronizers_array);
+
+ // Call ThreadInfo constructor with locked monitors and synchronizers
+ JavaCalls::call_special(&result,
+ ik,
+ vmSymbolHandles::object_initializer_name(),
+ vmSymbolHandles::java_lang_management_ThreadInfo_with_locks_constructor_signature(),
+ &args,
+ CHECK_NULL);
+
+ return (instanceOop) element();
+}
+
+// Helper functions
+static JavaThread* find_java_thread_from_id(jlong thread_id) {
+ assert(Threads_lock->owned_by_self(), "Must hold Threads_lock");
+
+ JavaThread* java_thread = NULL;
+ // Sequential search for now. Need to do better optimization later.
+ for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
+ oop tobj = thread->threadObj();
+ if (!thread->is_exiting() &&
+ tobj != NULL &&
+ thread_id == java_lang_Thread::thread_id(tobj)) {
+ java_thread = thread;
+ break;
+ }
+ }
+ return java_thread;
+}
+
+static GCMemoryManager* get_gc_memory_manager_from_jobject(jobject mgr, TRAPS) {
+ if (mgr == NULL) {
+ THROW_(vmSymbols::java_lang_NullPointerException(), NULL);
+ }
+ oop mgr_obj = JNIHandles::resolve(mgr);
+ instanceHandle h(THREAD, (instanceOop) mgr_obj);
+
+ klassOop k = Management::java_lang_management_GarbageCollectorMXBean_klass(CHECK_NULL);
+ if (!h->is_a(k)) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+ "the object is not an instance of java.lang.management.GarbageCollectorMXBean class",
+ NULL);
+ }
+
+ MemoryManager* gc = MemoryService::get_memory_manager(h);
+ if (gc == NULL || !gc->is_gc_memory_manager()) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+ "Invalid GC memory manager",
+ NULL);
+ }
+ return (GCMemoryManager*) gc;
+}
+
+static MemoryPool* get_memory_pool_from_jobject(jobject obj, TRAPS) {
+ if (obj == NULL) {
+ THROW_(vmSymbols::java_lang_NullPointerException(), NULL);
+ }
+
+ oop pool_obj = JNIHandles::resolve(obj);
+ assert(pool_obj->is_instance(), "Should be an instanceOop");
+ instanceHandle ph(THREAD, (instanceOop) pool_obj);
+
+ return MemoryService::get_memory_pool(ph);
+}
+
+static void validate_thread_id_array(typeArrayHandle ids_ah, TRAPS) {
+ int num_threads = ids_ah->length();
+ // should be non-empty array
+ if (num_threads == 0) {
+ THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+ "Empty array of thread IDs");
+ }
+
+ // Validate input thread IDs
+ int i = 0;
+ for (i = 0; i < num_threads; i++) {
+ jlong tid = ids_ah->long_at(i);
+ if (tid <= 0) {
+ // throw exception if invalid thread id.
+ THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+ "Invalid thread ID entry");
+ }
+ }
+
+}
+
+static void validate_thread_info_array(objArrayHandle infoArray_h, TRAPS) {
+
+ // check if the element of infoArray is of type ThreadInfo class
+ klassOop threadinfo_klass = Management::java_lang_management_ThreadInfo_klass(CHECK);
+ klassOop element_klass = objArrayKlass::cast(infoArray_h->klass())->element_klass();
+ if (element_klass != threadinfo_klass) {
+ THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+ "infoArray element type is not ThreadInfo class");
+ }
+
+}
+
+
+static MemoryManager* get_memory_manager_from_jobject(jobject obj, TRAPS) {
+ if (obj == NULL) {
+ THROW_(vmSymbols::java_lang_NullPointerException(), NULL);
+ }
+
+ oop mgr_obj = JNIHandles::resolve(obj);
+ assert(mgr_obj->is_instance(), "Should be an instanceOop");
+ instanceHandle mh(THREAD, (instanceOop) mgr_obj);
+
+ return MemoryService::get_memory_manager(mh);
+}
+
+// Returns a version string and sets major and minor version if
+// the input parameters are non-null.
+JVM_LEAF(jint, jmm_GetVersion(JNIEnv *env))
+ return JMM_VERSION;
+JVM_END
+
+// Gets the list of VM monitoring and management optional supports
+// Returns 0 if succeeded; otherwise returns non-zero.
+JVM_LEAF(jint, jmm_GetOptionalSupport(JNIEnv *env, jmmOptionalSupport* support))
+ if (support == NULL) {
+ return -1;
+ }
+ Management::get_optional_support(support);
+ return 0;
+JVM_END
+
+// Returns a java.lang.String object containing the input arguments to the VM.
+JVM_ENTRY(jobject, jmm_GetInputArguments(JNIEnv *env))
+ ResourceMark rm(THREAD);
+
+ if (Arguments::num_jvm_args() == 0 && Arguments::num_jvm_flags() == 0) {
+ return NULL;
+ }
+
+ char** vm_flags = Arguments::jvm_flags_array();
+ char** vm_args = Arguments::jvm_args_array();
+ int num_flags = Arguments::num_jvm_flags();
+ int num_args = Arguments::num_jvm_args();
+
+ size_t length = 1; // null terminator
+ int i;
+ for (i = 0; i < num_flags; i++) {
+ length += strlen(vm_flags[i]);
+ }
+ for (i = 0; i < num_args; i++) {
+ length += strlen(vm_args[i]);
+ }
+ // add a space between each argument
+ length += num_flags + num_args - 1;
+
+ // Return the list of input arguments passed to the VM
+ // and preserve the order that the VM processes.
+ char* args = NEW_RESOURCE_ARRAY(char, length);
+ args[0] = '\0';
+ // concatenate all jvm_flags
+ if (num_flags > 0) {
+ strcat(args, vm_flags[0]);
+ for (i = 1; i < num_flags; i++) {
+ strcat(args, " ");
+ strcat(args, vm_flags[i]);
+ }
+ }
+
+ if (num_args > 0 && num_flags > 0) {
+ // append a space if args already contains one or more jvm_flags
+ strcat(args, " ");
+ }
+
+ // concatenate all jvm_args
+ if (num_args > 0) {
+ strcat(args, vm_args[0]);
+ for (i = 1; i < num_args; i++) {
+ strcat(args, " ");
+ strcat(args, vm_args[i]);
+ }
+ }
+
+ Handle hargs = java_lang_String::create_from_platform_dependent_str(args, CHECK_NULL);
+ return JNIHandles::make_local(env, hargs());
+JVM_END
+
+// Returns an array of java.lang.String object containing the input arguments to the VM.
+JVM_ENTRY(jobjectArray, jmm_GetInputArgumentArray(JNIEnv *env))
+ ResourceMark rm(THREAD);
+
+ if (Arguments::num_jvm_args() == 0 && Arguments::num_jvm_flags() == 0) {
+ return NULL;
+ }
+
+ char** vm_flags = Arguments::jvm_flags_array();
+ char** vm_args = Arguments::jvm_args_array();
+ int num_flags = Arguments::num_jvm_flags();
+ int num_args = Arguments::num_jvm_args();
+
+ instanceKlassHandle ik (THREAD, SystemDictionary::string_klass());
+ objArrayOop r = oopFactory::new_objArray(ik(), num_args + num_flags, CHECK_NULL);
+ objArrayHandle result_h(THREAD, r);
+
+ int index = 0;
+ for (int j = 0; j < num_flags; j++, index++) {
+ Handle h = java_lang_String::create_from_platform_dependent_str(vm_flags[j], CHECK_NULL);
+ result_h->obj_at_put(index, h());
+ }
+ for (int i = 0; i < num_args; i++, index++) {
+ Handle h = java_lang_String::create_from_platform_dependent_str(vm_args[i], CHECK_NULL);
+ result_h->obj_at_put(index, h());
+ }
+ return (jobjectArray) JNIHandles::make_local(env, result_h());
+JVM_END
+
+// Returns an array of java/lang/management/MemoryPoolMXBean object
+// one for each memory pool if obj == null; otherwise returns
+// an array of memory pools for a given memory manager if
+// it is a valid memory manager.
+JVM_ENTRY(jobjectArray, jmm_GetMemoryPools(JNIEnv* env, jobject obj))
+ ResourceMark rm(THREAD);
+
+ int num_memory_pools;
+ MemoryManager* mgr = NULL;
+ if (obj == NULL) {
+ num_memory_pools = MemoryService::num_memory_pools();
+ } else {
+ mgr = get_memory_manager_from_jobject(obj, CHECK_NULL);
+ if (mgr == NULL) {
+ return NULL;
+ }
+ num_memory_pools = mgr->num_memory_pools();
+ }
+
+ // Allocate the resulting MemoryPoolMXBean[] object
+ klassOop k = Management::java_lang_management_MemoryPoolMXBean_klass(CHECK_NULL);
+ instanceKlassHandle ik (THREAD, k);
+ objArrayOop r = oopFactory::new_objArray(ik(), num_memory_pools, CHECK_NULL);
+ objArrayHandle poolArray(THREAD, r);
+
+ if (mgr == NULL) {
+ // Get all memory pools
+ for (int i = 0; i < num_memory_pools; i++) {
+ MemoryPool* pool = MemoryService::get_memory_pool(i);
+ instanceOop p = pool->get_memory_pool_instance(CHECK_NULL);
+ instanceHandle ph(THREAD, p);
+ poolArray->obj_at_put(i, ph());
+ }
+ } else {
+ // Get memory pools managed by a given memory manager
+ for (int i = 0; i < num_memory_pools; i++) {
+ MemoryPool* pool = mgr->get_memory_pool(i);
+ instanceOop p = pool->get_memory_pool_instance(CHECK_NULL);
+ instanceHandle ph(THREAD, p);
+ poolArray->obj_at_put(i, ph());
+ }
+ }
+ return (jobjectArray) JNIHandles::make_local(env, poolArray());
+JVM_END
+
+// Returns an array of java/lang/management/MemoryManagerMXBean object
+// one for each memory manager if obj == null; otherwise returns
+// an array of memory managers for a given memory pool if
+// it is a valid memory pool.
+JVM_ENTRY(jobjectArray, jmm_GetMemoryManagers(JNIEnv* env, jobject obj))
+ ResourceMark rm(THREAD);
+
+ int num_mgrs;
+ MemoryPool* pool = NULL;
+ if (obj == NULL) {
+ num_mgrs = MemoryService::num_memory_managers();
+ } else {
+ pool = get_memory_pool_from_jobject(obj, CHECK_NULL);
+ if (pool == NULL) {
+ return NULL;
+ }
+ num_mgrs = pool->num_memory_managers();
+ }
+
+ // Allocate the resulting MemoryManagerMXBean[] object
+ klassOop k = Management::java_lang_management_MemoryManagerMXBean_klass(CHECK_NULL);
+ instanceKlassHandle ik (THREAD, k);
+ objArrayOop r = oopFactory::new_objArray(ik(), num_mgrs, CHECK_NULL);
+ objArrayHandle mgrArray(THREAD, r);
+
+ if (pool == NULL) {
+ // Get all memory managers
+ for (int i = 0; i < num_mgrs; i++) {
+ MemoryManager* mgr = MemoryService::get_memory_manager(i);
+ instanceOop p = mgr->get_memory_manager_instance(CHECK_NULL);
+ instanceHandle ph(THREAD, p);
+ mgrArray->obj_at_put(i, ph());
+ }
+ } else {
+ // Get memory managers for a given memory pool
+ for (int i = 0; i < num_mgrs; i++) {
+ MemoryManager* mgr = pool->get_memory_manager(i);
+ instanceOop p = mgr->get_memory_manager_instance(CHECK_NULL);
+ instanceHandle ph(THREAD, p);
+ mgrArray->obj_at_put(i, ph());
+ }
+ }
+ return (jobjectArray) JNIHandles::make_local(env, mgrArray());
+JVM_END
+
+
+// Returns a java/lang/management/MemoryUsage object containing the memory usage
+// of a given memory pool.
+JVM_ENTRY(jobject, jmm_GetMemoryPoolUsage(JNIEnv* env, jobject obj))
+ ResourceMark rm(THREAD);
+
+ MemoryPool* pool = get_memory_pool_from_jobject(obj, CHECK_NULL);
+ if (pool != NULL) {
+ MemoryUsage usage = pool->get_memory_usage();
+ Handle h = MemoryService::create_MemoryUsage_obj(usage, CHECK_NULL);
+ return JNIHandles::make_local(env, h());
+ } else {
+ return NULL;
+ }
+JVM_END
+
+// Returns a java/lang/management/MemoryUsage object containing the memory usage
+// of a given memory pool.
+JVM_ENTRY(jobject, jmm_GetPeakMemoryPoolUsage(JNIEnv* env, jobject obj))
+ ResourceMark rm(THREAD);
+
+ MemoryPool* pool = get_memory_pool_from_jobject(obj, CHECK_NULL);
+ if (pool != NULL) {
+ MemoryUsage usage = pool->get_peak_memory_usage();
+ Handle h = MemoryService::create_MemoryUsage_obj(usage, CHECK_NULL);
+ return JNIHandles::make_local(env, h());
+ } else {
+ return NULL;
+ }
+JVM_END
+
+// Returns a java/lang/management/MemoryUsage object containing the memory usage
+// of a given memory pool after most recent GC.
+JVM_ENTRY(jobject, jmm_GetPoolCollectionUsage(JNIEnv* env, jobject obj))
+ ResourceMark rm(THREAD);
+
+ MemoryPool* pool = get_memory_pool_from_jobject(obj, CHECK_NULL);
+ if (pool != NULL && pool->is_collected_pool()) {
+ MemoryUsage usage = pool->get_last_collection_usage();
+ Handle h = MemoryService::create_MemoryUsage_obj(usage, CHECK_NULL);
+ return JNIHandles::make_local(env, h());
+ } else {
+ return NULL;
+ }
+JVM_END
+
+// Sets the memory pool sensor for a threshold type
+JVM_ENTRY(void, jmm_SetPoolSensor(JNIEnv* env, jobject obj, jmmThresholdType type, jobject sensorObj))
+ if (obj == NULL || sensorObj == NULL) {
+ THROW(vmSymbols::java_lang_NullPointerException());
+ }
+
+ klassOop sensor_klass = Management::sun_management_Sensor_klass(CHECK);
+ oop s = JNIHandles::resolve(sensorObj);
+ assert(s->is_instance(), "Sensor should be an instanceOop");
+ instanceHandle sensor_h(THREAD, (instanceOop) s);
+ if (!sensor_h->is_a(sensor_klass)) {
+ THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+ "Sensor is not an instance of sun.management.Sensor class");
+ }
+
+ MemoryPool* mpool = get_memory_pool_from_jobject(obj, CHECK);
+ assert(mpool != NULL, "MemoryPool should exist");
+
+ switch (type) {
+ case JMM_USAGE_THRESHOLD_HIGH:
+ case JMM_USAGE_THRESHOLD_LOW:
+ // have only one sensor for threshold high and low
+ mpool->set_usage_sensor_obj(sensor_h);
+ break;
+ case JMM_COLLECTION_USAGE_THRESHOLD_HIGH:
+ case JMM_COLLECTION_USAGE_THRESHOLD_LOW:
+ // have only one sensor for threshold high and low
+ mpool->set_gc_usage_sensor_obj(sensor_h);
+ break;
+ default:
+ assert(false, "Unrecognized type");
+ }
+
+JVM_END
+
+
+// Sets the threshold of a given memory pool.
+// Returns the previous threshold.
+//
+// Input parameters:
+// pool - the MemoryPoolMXBean object
+// type - threshold type
+// threshold - the new threshold (must not be negative)
+//
+JVM_ENTRY(jlong, jmm_SetPoolThreshold(JNIEnv* env, jobject obj, jmmThresholdType type, jlong threshold))
+ if (threshold < 0) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+ "Invalid threshold value",
+ -1);
+ }
+
+ if (threshold > max_intx) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+ "Invalid threshold value > max value of size_t",
+ -1);
+ }
+
+ MemoryPool* pool = get_memory_pool_from_jobject(obj, CHECK_(0L));
+ assert(pool != NULL, "MemoryPool should exist");
+
+ jlong prev = 0;
+ switch (type) {
+ case JMM_USAGE_THRESHOLD_HIGH:
+ if (!pool->usage_threshold()->is_high_threshold_supported()) {
+ return -1;
+ }
+ prev = pool->usage_threshold()->set_high_threshold((size_t) threshold);
+ break;
+
+ case JMM_USAGE_THRESHOLD_LOW:
+ if (!pool->usage_threshold()->is_low_threshold_supported()) {
+ return -1;
+ }
+ prev = pool->usage_threshold()->set_low_threshold((size_t) threshold);
+ break;
+
+ case JMM_COLLECTION_USAGE_THRESHOLD_HIGH:
+ if (!pool->gc_usage_threshold()->is_high_threshold_supported()) {
+ return -1;
+ }
+ // return and the new threshold is effective for the next GC
+ return pool->gc_usage_threshold()->set_high_threshold((size_t) threshold);
+
+ case JMM_COLLECTION_USAGE_THRESHOLD_LOW:
+ if (!pool->gc_usage_threshold()->is_low_threshold_supported()) {
+ return -1;
+ }
+ // return and the new threshold is effective for the next GC
+ return pool->gc_usage_threshold()->set_low_threshold((size_t) threshold);
+
+ default:
+ assert(false, "Unrecognized type");
+ return -1;
+ }
+
+ // When the threshold is changed, reevaluate if the low memory
+ // detection is enabled.
+ if (prev != threshold) {
+ LowMemoryDetector::recompute_enabled_for_collected_pools();
+ LowMemoryDetector::detect_low_memory(pool);
+ }
+ return prev;
+JVM_END
+
+// Returns a java/lang/management/MemoryUsage object representing
+// the memory usage for the heap or non-heap memory.
+JVM_ENTRY(jobject, jmm_GetMemoryUsage(JNIEnv* env, jboolean heap))
+ ResourceMark rm(THREAD);
+
+ // Calculate the memory usage
+ size_t total_init = 0;
+ size_t total_used = 0;
+ size_t total_committed = 0;
+ size_t total_max = 0;
+ bool has_undefined_init_size = false;
+ bool has_undefined_max_size = false;
+
+ for (int i = 0; i < MemoryService::num_memory_pools(); i++) {
+ MemoryPool* pool = MemoryService::get_memory_pool(i);
+ if ((heap && pool->is_heap()) || (!heap && pool->is_non_heap())) {
+ MemoryUsage u = pool->get_memory_usage();
+ total_used += u.used();
+ total_committed += u.committed();
+
+ // if any one of the memory pool has undefined init_size or max_size,
+ // set it to -1
+ if (u.init_size() == (size_t)-1) {
+ has_undefined_init_size = true;
+ }
+ if (!has_undefined_init_size) {
+ total_init += u.init_size();
+ }
+
+ if (u.max_size() == (size_t)-1) {
+ has_undefined_max_size = true;
+ }
+ if (!has_undefined_max_size) {
+ total_max += u.max_size();
+ }
+ }
+ }
+
+ // In our current implementation, all pools should have
+ // defined init and max size
+ assert(!has_undefined_init_size, "Undefined init size");
+ assert(!has_undefined_max_size, "Undefined max size");
+
+ MemoryUsage usage((heap ? Arguments::initial_heap_size() : total_init),
+ total_used,
+ total_committed,
+ (heap ? Universe::heap()->max_capacity() : total_max));
+
+ Handle obj = MemoryService::create_MemoryUsage_obj(usage, CHECK_NULL);
+ return JNIHandles::make_local(env, obj());
+JVM_END
+
+// Returns the boolean value of a given attribute.
+JVM_LEAF(jboolean, jmm_GetBoolAttribute(JNIEnv *env, jmmBoolAttribute att))
+ switch (att) {
+ case JMM_VERBOSE_GC:
+ return MemoryService::get_verbose();
+ case JMM_VERBOSE_CLASS:
+ return ClassLoadingService::get_verbose();
+ case JMM_THREAD_CONTENTION_MONITORING:
+ return ThreadService::is_thread_monitoring_contention();
+ case JMM_THREAD_CPU_TIME:
+ return ThreadService::is_thread_cpu_time_enabled();
+ default:
+ assert(0, "Unrecognized attribute");
+ return false;
+ }
+JVM_END
+
+// Sets the given boolean attribute and returns the previous value.
+JVM_ENTRY(jboolean, jmm_SetBoolAttribute(JNIEnv *env, jmmBoolAttribute att, jboolean flag))
+ switch (att) {
+ case JMM_VERBOSE_GC:
+ return MemoryService::set_verbose(flag != 0);
+ case JMM_VERBOSE_CLASS:
+ return ClassLoadingService::set_verbose(flag != 0);
+ case JMM_THREAD_CONTENTION_MONITORING:
+ return ThreadService::set_thread_monitoring_contention(flag != 0);
+ case JMM_THREAD_CPU_TIME:
+ return ThreadService::set_thread_cpu_time_enabled(flag != 0);
+ default:
+ assert(0, "Unrecognized attribute");
+ return false;
+ }
+JVM_END
+
+
+static jlong get_gc_attribute(GCMemoryManager* mgr, jmmLongAttribute att) {
+ switch (att) {
+ case JMM_GC_TIME_MS:
+ return mgr->gc_time_ms();
+
+ case JMM_GC_COUNT:
+ return mgr->gc_count();
+
+ case JMM_GC_EXT_ATTRIBUTE_INFO_SIZE:
+ // current implementation only has 1 ext attribute
+ return 1;
+
+ default:
+ assert(0, "Unrecognized GC attribute");
+ return -1;
+ }
+}
+
+class VmThreadCountClosure: public ThreadClosure {
+ private:
+ int _count;
+ public:
+ VmThreadCountClosure() : _count(0) {};
+ void do_thread(Thread* thread);
+ int count() { return _count; }
+};
+
+void VmThreadCountClosure::do_thread(Thread* thread) {
+ // exclude externally visible JavaThreads
+ if (thread->is_Java_thread() && !thread->is_hidden_from_external_view()) {
+ return;
+ }
+
+ _count++;
+}
+
+static jint get_vm_thread_count() {
+ VmThreadCountClosure vmtcc;
+ {
+ MutexLockerEx ml(Threads_lock);
+ Threads::threads_do(&vmtcc);
+ }
+
+ return vmtcc.count();
+}
+
+static jint get_num_flags() {
+ // last flag entry is always NULL, so subtract 1
+ int nFlags = (int) Flag::numFlags - 1;
+ int count = 0;
+ for (int i = 0; i < nFlags; i++) {
+ Flag* flag = &Flag::flags[i];
+ // Exclude the diagnostic flags
+ if (flag->is_unlocked() || flag->is_unlocker()) {
+ count++;
+ }
+ }
+ return count;
+}
+
+static jlong get_long_attribute(jmmLongAttribute att) {
+ switch (att) {
+ case JMM_CLASS_LOADED_COUNT:
+ return ClassLoadingService::loaded_class_count();
+
+ case JMM_CLASS_UNLOADED_COUNT:
+ return ClassLoadingService::unloaded_class_count();
+
+ case JMM_THREAD_TOTAL_COUNT:
+ return ThreadService::get_total_thread_count();
+
+ case JMM_THREAD_LIVE_COUNT:
+ return ThreadService::get_live_thread_count();
+
+ case JMM_THREAD_PEAK_COUNT:
+ return ThreadService::get_peak_thread_count();
+
+ case JMM_THREAD_DAEMON_COUNT:
+ return ThreadService::get_daemon_thread_count();
+
+ case JMM_JVM_INIT_DONE_TIME_MS:
+ return Management::vm_init_done_time();
+
+ case JMM_COMPILE_TOTAL_TIME_MS:
+ return Management::ticks_to_ms(CompileBroker::total_compilation_ticks());
+
+ case JMM_OS_PROCESS_ID:
+ return os::current_process_id();
+
+ // Hotspot-specific counters
+ case JMM_CLASS_LOADED_BYTES:
+ return ClassLoadingService::loaded_class_bytes();
+
+ case JMM_CLASS_UNLOADED_BYTES:
+ return ClassLoadingService::unloaded_class_bytes();
+
+ case JMM_SHARED_CLASS_LOADED_COUNT:
+ return ClassLoadingService::loaded_shared_class_count();
+
+ case JMM_SHARED_CLASS_UNLOADED_COUNT:
+ return ClassLoadingService::unloaded_shared_class_count();
+
+
+ case JMM_SHARED_CLASS_LOADED_BYTES:
+ return ClassLoadingService::loaded_shared_class_bytes();
+
+ case JMM_SHARED_CLASS_UNLOADED_BYTES:
+ return ClassLoadingService::unloaded_shared_class_bytes();
+
+ case JMM_TOTAL_CLASSLOAD_TIME_MS:
+ return ClassLoader::classloader_time_ms();
+
+ case JMM_VM_GLOBAL_COUNT:
+ return get_num_flags();
+
+ case JMM_SAFEPOINT_COUNT:
+ return RuntimeService::safepoint_count();
+
+ case JMM_TOTAL_SAFEPOINTSYNC_TIME_MS:
+ return RuntimeService::safepoint_sync_time_ms();
+
+ case JMM_TOTAL_STOPPED_TIME_MS:
+ return RuntimeService::safepoint_time_ms();
+
+ case JMM_TOTAL_APP_TIME_MS:
+ return RuntimeService::application_time_ms();
+
+ case JMM_VM_THREAD_COUNT:
+ return get_vm_thread_count();
+
+ case JMM_CLASS_INIT_TOTAL_COUNT:
+ return ClassLoader::class_init_count();
+
+ case JMM_CLASS_INIT_TOTAL_TIME_MS:
+ return ClassLoader::class_init_time_ms();
+
+ case JMM_CLASS_VERIFY_TOTAL_TIME_MS:
+ return ClassLoader::class_verify_time_ms();
+
+ case JMM_METHOD_DATA_SIZE_BYTES:
+ return ClassLoadingService::class_method_data_size();
+
+ case JMM_OS_MEM_TOTAL_PHYSICAL_BYTES:
+ return os::physical_memory();
+
+ default:
+ return -1;
+ }
+}
+
+
+// Returns the long value of a given attribute.
+JVM_ENTRY(jlong, jmm_GetLongAttribute(JNIEnv *env, jobject obj, jmmLongAttribute att))
+ if (obj == NULL) {
+ return get_long_attribute(att);
+ } else {
+ GCMemoryManager* mgr = get_gc_memory_manager_from_jobject(obj, CHECK_(0L));
+ if (mgr != NULL) {
+ return get_gc_attribute(mgr, att);
+ }
+ }
+ return -1;
+JVM_END
+
+// Gets the value of all attributes specified in the given array
+// and sets the value in the result array.
+// Returns the number of attributes found.
+JVM_ENTRY(jint, jmm_GetLongAttributes(JNIEnv *env,
+ jobject obj,
+ jmmLongAttribute* atts,
+ jint count,
+ jlong* result))
+
+ int num_atts = 0;
+ if (obj == NULL) {
+ for (int i = 0; i < count; i++) {
+ result[i] = get_long_attribute(atts[i]);
+ if (result[i] != -1) {
+ num_atts++;
+ }
+ }
+ } else {
+ GCMemoryManager* mgr = get_gc_memory_manager_from_jobject(obj, CHECK_0);
+ for (int i = 0; i < count; i++) {
+ result[i] = get_gc_attribute(mgr, atts[i]);
+ if (result[i] != -1) {
+ num_atts++;
+ }
+ }
+ }
+ return num_atts;
+JVM_END
+
+// Helper function to do thread dump for a specific list of threads
+static void do_thread_dump(ThreadDumpResult* dump_result,
+ typeArrayHandle ids_ah, // array of thread ID (long[])
+ int num_threads,
+ int max_depth,
+ bool with_locked_monitors,
+ bool with_locked_synchronizers,
+ TRAPS) {
+
+ // First get an array of threadObj handles.
+ // A JavaThread may terminate before we get the stack trace.
+ GrowableArray<instanceHandle>* thread_handle_array = new GrowableArray<instanceHandle>(num_threads);
+ {
+ MutexLockerEx ml(Threads_lock);
+ for (int i = 0; i < num_threads; i++) {
+ jlong tid = ids_ah->long_at(i);
+ JavaThread* jt = find_java_thread_from_id(tid);
+ oop thread_obj = (jt != NULL ? jt->threadObj() : (oop)NULL);
+ instanceHandle threadObj_h(THREAD, (instanceOop) thread_obj);
+ thread_handle_array->append(threadObj_h);
+ }
+ }
+
+ // Obtain thread dumps and thread snapshot information
+ VM_ThreadDump op(dump_result,
+ thread_handle_array,
+ num_threads,
+ max_depth, /* stack depth */
+ with_locked_monitors,
+ with_locked_synchronizers);
+ VMThread::execute(&op);
+}
+
+// Gets an array of ThreadInfo objects. Each element is the ThreadInfo
+// for the thread ID specified in the corresponding entry in
+// the given array of thread IDs; or NULL if the thread does not exist
+// or has terminated.
+//
+// Input parameters:
+// ids - array of thread IDs
+// maxDepth - the maximum depth of stack traces to be dumped:
+// maxDepth == -1 requests to dump entire stack trace.
+// maxDepth == 0 requests no stack trace.
+// infoArray - array of ThreadInfo objects
+//
+JVM_ENTRY(jint, jmm_GetThreadInfo(JNIEnv *env, jlongArray ids, jint maxDepth, jobjectArray infoArray))
+ // Check if threads is null
+ if (ids == NULL || infoArray == NULL) {
+ THROW_(vmSymbols::java_lang_NullPointerException(), -1);
+ }
+
+ if (maxDepth < -1) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+ "Invalid maxDepth", -1);
+ }
+
+ ResourceMark rm(THREAD);
+ typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
+ typeArrayHandle ids_ah(THREAD, ta);
+
+ oop infoArray_obj = JNIHandles::resolve_non_null(infoArray);
+ objArrayOop oa = objArrayOop(infoArray_obj);
+ objArrayHandle infoArray_h(THREAD, oa);
+
+ // validate the thread id array
+ validate_thread_id_array(ids_ah, CHECK_0);
+
+ // validate the ThreadInfo[] parameters
+ validate_thread_info_array(infoArray_h, CHECK_0);
+
+ // infoArray must be of the same length as the given array of thread IDs
+ int num_threads = ids_ah->length();
+ if (num_threads != infoArray_h->length()) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+ "The length of the given ThreadInfo array does not match the length of the given array of thread IDs", -1);
+ }
+
+ if (JDK_Version::is_gte_jdk16x_version()) {
+ // make sure the AbstractOwnableSynchronizer klass is loaded before taking thread snapshots
+ java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(CHECK_0);
+ }
+
+ // Must use ThreadDumpResult to store the ThreadSnapshot.
+ // GC may occur after the thread snapshots are taken but before
+ // this function returns. The threadObj and other oops kept
+ // in the ThreadSnapshot are marked and adjusted during GC.
+ ThreadDumpResult dump_result(num_threads);
+
+ if (maxDepth == 0) {
+ // no stack trace dumped - do not need to stop the world
+ {
+ MutexLockerEx ml(Threads_lock);
+ for (int i = 0; i < num_threads; i++) {
+ jlong tid = ids_ah->long_at(i);
+ JavaThread* jt = find_java_thread_from_id(tid);
+ ThreadSnapshot* ts;
+ if (jt == NULL) {
+ // if the thread does not exist or now it is terminated,
+ // create dummy snapshot
+ ts = new ThreadSnapshot();
+ } else {
+ ts = new ThreadSnapshot(jt);
+ }
+ dump_result.add_thread_snapshot(ts);
+ }
+ }
+ } else {
+ // obtain thread dump with the specific list of threads with stack trace
+
+ do_thread_dump(&dump_result,
+ ids_ah,
+ num_threads,
+ maxDepth,
+ false, /* no locked monitor */
+ false, /* no locked synchronizers */
+ CHECK_0);
+ }
+
+ int num_snapshots = dump_result.num_snapshots();
+ assert(num_snapshots == num_threads, "Must match the number of thread snapshots");
+ int index = 0;
+ for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; index++, ts = ts->next()) {
+ // For each thread, create an java/lang/management/ThreadInfo object
+ // and fill with the thread information
+
+ if (ts->threadObj() == NULL) {
+ // if the thread does not exist or now it is terminated, set threadinfo to NULL
+ infoArray_h->obj_at_put(index, NULL);
+ continue;
+ }
+
+ // Create java.lang.management.ThreadInfo object
+ instanceOop info_obj = Management::create_thread_info_instance(ts, CHECK_0);
+ infoArray_h->obj_at_put(index, info_obj);
+ }
+ return 0;
+JVM_END
+
+// Dump thread info for the specified threads.
+// It returns an array of ThreadInfo objects. Each element is the ThreadInfo
+// for the thread ID specified in the corresponding entry in
+// the given array of thread IDs; or NULL if the thread does not exist
+// or has terminated.
+//
+// Input parameter:
+// ids - array of thread IDs; NULL indicates all live threads
+// locked_monitors - if true, dump locked object monitors
+// locked_synchronizers - if true, dump locked JSR-166 synchronizers
+//
+JVM_ENTRY(jobjectArray, jmm_DumpThreads(JNIEnv *env, jlongArray thread_ids, jboolean locked_monitors, jboolean locked_synchronizers))
+ ResourceMark rm(THREAD);
+
+ if (JDK_Version::is_gte_jdk16x_version()) {
+ // make sure the AbstractOwnableSynchronizer klass is loaded before taking thread snapshots
+ java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(CHECK_NULL);
+ }
+
+ typeArrayOop ta = typeArrayOop(JNIHandles::resolve(thread_ids));
+ int num_threads = (ta != NULL ? ta->length() : 0);
+ typeArrayHandle ids_ah(THREAD, ta);
+
+ ThreadDumpResult dump_result(num_threads); // can safepoint
+
+ if (ids_ah() != NULL) {
+
+ // validate the thread id array
+ validate_thread_id_array(ids_ah, CHECK_NULL);
+
+ // obtain thread dump of a specific list of threads
+ do_thread_dump(&dump_result,
+ ids_ah,
+ num_threads,
+ -1, /* entire stack */
+ (locked_monitors ? true : false), /* with locked monitors */
+ (locked_synchronizers ? true : false), /* with locked synchronizers */
+ CHECK_NULL);
+ } else {
+ // obtain thread dump of all threads
+ VM_ThreadDump op(&dump_result,
+ -1, /* entire stack */
+ (locked_monitors ? true : false), /* with locked monitors */
+ (locked_synchronizers ? true : false) /* with locked synchronizers */);
+ VMThread::execute(&op);
+ }
+
+ int num_snapshots = dump_result.num_snapshots();
+
+ // create the result ThreadInfo[] object
+ klassOop k = Management::java_lang_management_ThreadInfo_klass(CHECK_NULL);
+ instanceKlassHandle ik (THREAD, k);
+ objArrayOop r = oopFactory::new_objArray(ik(), num_snapshots, CHECK_NULL);
+ objArrayHandle result_h(THREAD, r);
+
+ int index = 0;
+ for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; ts = ts->next(), index++) {
+ if (ts->threadObj() == NULL) {
+ // if the thread does not exist or now it is terminated, set threadinfo to NULL
+ result_h->obj_at_put(index, NULL);
+ continue;
+ }
+
+
+
+ ThreadStackTrace* stacktrace = ts->get_stack_trace();
+ assert(stacktrace != NULL, "Must have a stack trace dumped");
+
+ // Create Object[] filled with locked monitors
+ // Create int[] filled with the stack depth where a monitor was locked
+ int num_frames = stacktrace->get_stack_depth();
+ int num_locked_monitors = stacktrace->num_jni_locked_monitors();
+
+ // Count the total number of locked monitors
+ for (int i = 0; i < num_frames; i++) {
+ StackFrameInfo* frame = stacktrace->stack_frame_at(i);
+ num_locked_monitors += frame->num_locked_monitors();
+ }
+
+ objArrayHandle monitors_array;
+ typeArrayHandle depths_array;
+ objArrayHandle synchronizers_array;
+
+ if (locked_monitors) {
+ // Constructs Object[] and int[] to contain the object monitor and the stack depth
+ // where the thread locked it
+ objArrayOop array = oopFactory::new_system_objArray(num_locked_monitors, CHECK_NULL);
+ objArrayHandle mh(THREAD, array);
+ monitors_array = mh;
+
+ typeArrayOop tarray = oopFactory::new_typeArray(T_INT, num_locked_monitors, CHECK_NULL);
+ typeArrayHandle dh(THREAD, tarray);
+ depths_array = dh;
+
+ int count = 0;
+ int j = 0;
+ for (int depth = 0; depth < num_frames; depth++) {
+ StackFrameInfo* frame = stacktrace->stack_frame_at(depth);
+ int len = frame->num_locked_monitors();
+ GrowableArray<oop>* locked_monitors = frame->locked_monitors();
+ for (j = 0; j < len; j++) {
+ oop monitor = locked_monitors->at(j);
+ assert(monitor != NULL && monitor->is_instance(), "must be a Java object");
+ monitors_array->obj_at_put(count, monitor);
+ depths_array->int_at_put(count, depth);
+ count++;
+ }
+ }
+
+ GrowableArray<oop>* jni_locked_monitors = stacktrace->jni_locked_monitors();
+ for (j = 0; j < jni_locked_monitors->length(); j++) {
+ oop object = jni_locked_monitors->at(j);
+ assert(object != NULL && object->is_instance(), "must be a Java object");
+ monitors_array->obj_at_put(count, object);
+ // Monitor locked via JNI MonitorEnter call doesn't have stack depth info
+ depths_array->int_at_put(count, -1);
+ count++;
+ }
+ assert(count == num_locked_monitors, "number of locked monitors doesn't match");
+ }
+
+ if (locked_synchronizers) {
+ // Create Object[] filled with locked JSR-166 synchronizers
+ assert(ts->threadObj() != NULL, "Must be a valid JavaThread");
+ ThreadConcurrentLocks* tcl = ts->get_concurrent_locks();
+ GrowableArray<instanceOop>* locks = (tcl != NULL ? tcl->owned_locks() : NULL);
+ int num_locked_synchronizers = (locks != NULL ? locks->length() : 0);
+
+ objArrayOop array = oopFactory::new_system_objArray(num_locked_synchronizers, CHECK_NULL);
+ objArrayHandle sh(THREAD, array);
+ synchronizers_array = sh;
+
+ for (int k = 0; k < num_locked_synchronizers; k++) {
+ synchronizers_array->obj_at_put(k, locks->at(k));
+ }
+ }
+
+ // Create java.lang.management.ThreadInfo object
+ instanceOop info_obj = Management::create_thread_info_instance(ts,
+ monitors_array,
+ depths_array,
+ synchronizers_array,
+ CHECK_NULL);
+ result_h->obj_at_put(index, info_obj);
+ }
+
+ return (jobjectArray) JNIHandles::make_local(env, result_h());
+JVM_END
+
+// Returns an array of Class objects.
+JVM_ENTRY(jobjectArray, jmm_GetLoadedClasses(JNIEnv *env))
+ ResourceMark rm(THREAD);
+
+ LoadedClassesEnumerator lce(THREAD); // Pass current Thread as parameter
+
+ int num_classes = lce.num_loaded_classes();
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::class_klass(), num_classes, CHECK_0);
+ objArrayHandle classes_ah(THREAD, r);
+
+ for (int i = 0; i < num_classes; i++) {
+ KlassHandle kh = lce.get_klass(i);
+ oop mirror = Klass::cast(kh())->java_mirror();
+ classes_ah->obj_at_put(i, mirror);
+ }
+
+ return (jobjectArray) JNIHandles::make_local(env, classes_ah());
+JVM_END
+
+// Reset statistic. Return true if the requested statistic is reset.
+// Otherwise, return false.
+//
+// Input parameters:
+// obj - specify which instance the statistic associated with to be reset
+// For PEAK_POOL_USAGE stat, obj is required to be a memory pool object.
+// For THREAD_CONTENTION_COUNT and TIME stat, obj is required to be a thread ID.
+// type - the type of statistic to be reset
+//
+JVM_ENTRY(jboolean, jmm_ResetStatistic(JNIEnv *env, jvalue obj, jmmStatisticType type))
+ ResourceMark rm(THREAD);
+
+ switch (type) {
+ case JMM_STAT_PEAK_THREAD_COUNT:
+ ThreadService::reset_peak_thread_count();
+ return true;
+
+ case JMM_STAT_THREAD_CONTENTION_COUNT:
+ case JMM_STAT_THREAD_CONTENTION_TIME: {
+ jlong tid = obj.j;
+ if (tid < 0) {
+ THROW_(vmSymbols::java_lang_IllegalArgumentException(), JNI_FALSE);
+ }
+
+ // Look for the JavaThread of this given tid
+ MutexLockerEx ml(Threads_lock);
+ if (tid == 0) {
+ // reset contention statistics for all threads if tid == 0
+ for (JavaThread* java_thread = Threads::first(); java_thread != NULL; java_thread = java_thread->next()) {
+ if (type == JMM_STAT_THREAD_CONTENTION_COUNT) {
+ ThreadService::reset_contention_count_stat(java_thread);
+ } else {
+ ThreadService::reset_contention_time_stat(java_thread);
+ }
+ }
+ } else {
+ // reset contention statistics for a given thread
+ JavaThread* java_thread = find_java_thread_from_id(tid);
+ if (java_thread == NULL) {
+ return false;
+ }
+
+ if (type == JMM_STAT_THREAD_CONTENTION_COUNT) {
+ ThreadService::reset_contention_count_stat(java_thread);
+ } else {
+ ThreadService::reset_contention_time_stat(java_thread);
+ }
+ }
+ return true;
+ break;
+ }
+ case JMM_STAT_PEAK_POOL_USAGE: {
+ jobject o = obj.l;
+ if (o == NULL) {
+ THROW_(vmSymbols::java_lang_NullPointerException(), JNI_FALSE);
+ }
+
+ oop pool_obj = JNIHandles::resolve(o);
+ assert(pool_obj->is_instance(), "Should be an instanceOop");
+ instanceHandle ph(THREAD, (instanceOop) pool_obj);
+
+ MemoryPool* pool = MemoryService::get_memory_pool(ph);
+ if (pool != NULL) {
+ pool->reset_peak_memory_usage();
+ return true;
+ }
+ break;
+ }
+ case JMM_STAT_GC_STAT: {
+ jobject o = obj.l;
+ if (o == NULL) {
+ THROW_(vmSymbols::java_lang_NullPointerException(), JNI_FALSE);
+ }
+
+ GCMemoryManager* mgr = get_gc_memory_manager_from_jobject(o, CHECK_0);
+ if (mgr != NULL) {
+ mgr->reset_gc_stat();
+ return true;
+ }
+ break;
+ }
+ default:
+ assert(0, "Unknown Statistic Type");
+ }
+ return false;
+JVM_END
+
+// Returns the fast estimate of CPU time consumed by
+// a given thread (in nanoseconds).
+// If thread_id == 0, return CPU time for the current thread.
+JVM_ENTRY(jlong, jmm_GetThreadCpuTime(JNIEnv *env, jlong thread_id))
+ if (!os::is_thread_cpu_time_supported()) {
+ return -1;
+ }
+
+ if (thread_id < 0) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+ "Invalid thread ID", -1);
+ }
+
+ JavaThread* java_thread = NULL;
+ if (thread_id == 0) {
+ // current thread
+ return os::current_thread_cpu_time();
+ } else {
+ MutexLockerEx ml(Threads_lock);
+ java_thread = find_java_thread_from_id(thread_id);
+ if (java_thread != NULL) {
+ return os::thread_cpu_time((Thread*) java_thread);
+ }
+ }
+ return -1;
+JVM_END
+
+// Returns the CPU time consumed by a given thread (in nanoseconds).
+// If thread_id == 0, CPU time for the current thread is returned.
+// If user_sys_cpu_time = true, user level and system CPU time of
+// a given thread is returned; otherwise, only user level CPU time
+// is returned.
+JVM_ENTRY(jlong, jmm_GetThreadCpuTimeWithKind(JNIEnv *env, jlong thread_id, jboolean user_sys_cpu_time))
+ if (!os::is_thread_cpu_time_supported()) {
+ return -1;
+ }
+
+ if (thread_id < 0) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+ "Invalid thread ID", -1);
+ }
+
+ JavaThread* java_thread = NULL;
+ if (thread_id == 0) {
+ // current thread
+ return os::current_thread_cpu_time(user_sys_cpu_time != 0);
+ } else {
+ MutexLockerEx ml(Threads_lock);
+ java_thread = find_java_thread_from_id(thread_id);
+ if (java_thread != NULL) {
+ return os::thread_cpu_time((Thread*) java_thread, user_sys_cpu_time != 0);
+ }
+ }
+ return -1;
+JVM_END
+
+// Returns a String array of all VM global flag names
+JVM_ENTRY(jobjectArray, jmm_GetVMGlobalNames(JNIEnv *env))
+ // last flag entry is always NULL, so subtract 1
+ int nFlags = (int) Flag::numFlags - 1;
+ // allocate a temp array
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ nFlags, CHECK_0);
+ objArrayHandle flags_ah(THREAD, r);
+ int num_entries = 0;
+ for (int i = 0; i < nFlags; i++) {
+ Flag* flag = &Flag::flags[i];
+ // Exclude the diagnostic flags
+ if (flag->is_unlocked() || flag->is_unlocker()) {
+ Handle s = java_lang_String::create_from_str(flag->name, CHECK_0);
+ flags_ah->obj_at_put(num_entries, s());
+ num_entries++;
+ }
+ }
+
+ if (num_entries < nFlags) {
+ // Return array of right length
+ objArrayOop res = oopFactory::new_objArray(SystemDictionary::string_klass(), num_entries, CHECK_0);
+ for(int i = 0; i < num_entries; i++) {
+ res->obj_at_put(i, flags_ah->obj_at(i));
+ }
+ return (jobjectArray)JNIHandles::make_local(env, res);
+ }
+
+ return (jobjectArray)JNIHandles::make_local(env, flags_ah());
+JVM_END
+
+// utility function used by jmm_GetVMGlobals
+void add_global_entry(JNIEnv* env, Handle name, jmmVMGlobal *global, Flag *flag, TRAPS) {
+ Handle flag_name;
+ if (name() == NULL) {
+ flag_name = java_lang_String::create_from_str(flag->name, CHECK);
+ } else {
+ flag_name = name;
+ }
+ global->name = (jstring)JNIHandles::make_local(env, flag_name());
+ global->type = JMM_VMGLOBAL_TYPE_UNKNOWN;
+
+ if (flag->is_bool()) {
+ global->value.z = flag->get_bool() ? JNI_TRUE : JNI_FALSE;
+ global->type = JMM_VMGLOBAL_TYPE_JBOOLEAN;
+ } else if (flag->is_intx()) {
+ global->value.j = (jlong)flag->get_intx();
+ global->type = JMM_VMGLOBAL_TYPE_JLONG;
+ } else if (flag->is_uintx()) {
+ global->value.j = (jlong)flag->get_uintx();
+ global->type = JMM_VMGLOBAL_TYPE_JLONG;
+ } else if (flag->is_ccstr()) {
+ Handle str = java_lang_String::create_from_str(flag->get_ccstr(), CHECK);
+ global->value.l = (jobject)JNIHandles::make_local(env, str());
+ global->type = JMM_VMGLOBAL_TYPE_JSTRING;
+ }
+
+ global->writeable = flag->is_writeable();
+ global->external = flag->is_external();
+ switch (flag->origin) {
+ case DEFAULT:
+ global->origin = JMM_VMGLOBAL_ORIGIN_DEFAULT;
+ break;
+ case COMMAND_LINE:
+ global->origin = JMM_VMGLOBAL_ORIGIN_COMMAND_LINE;
+ break;
+ case ENVIRON_VAR:
+ global->origin = JMM_VMGLOBAL_ORIGIN_ENVIRON_VAR;
+ break;
+ case CONFIG_FILE:
+ global->origin = JMM_VMGLOBAL_ORIGIN_CONFIG_FILE;
+ break;
+ case MANAGEMENT:
+ global->origin = JMM_VMGLOBAL_ORIGIN_MANAGEMENT;
+ break;
+ case ERGONOMIC:
+ global->origin = JMM_VMGLOBAL_ORIGIN_ERGONOMIC;
+ break;
+ default:
+ global->origin = JMM_VMGLOBAL_ORIGIN_OTHER;
+ }
+}
+
+// Fill globals array of count length with jmmVMGlobal entries
+// specified by names. If names == NULL, fill globals array
+// with all Flags. Return value is number of entries
+// created in globals.
+// If a Flag with a given name in an array element does not
+// exist, globals[i].name will be set to NULL.
+JVM_ENTRY(jint, jmm_GetVMGlobals(JNIEnv *env,
+ jobjectArray names,
+ jmmVMGlobal *globals,
+ jint count))
+
+
+ if (globals == NULL) {
+ THROW_(vmSymbols::java_lang_NullPointerException(), 0);
+ }
+
+ ResourceMark rm(THREAD);
+
+ if (names != NULL) {
+ // return the requested globals
+ objArrayOop ta = objArrayOop(JNIHandles::resolve_non_null(names));
+ objArrayHandle names_ah(THREAD, ta);
+ // Make sure we have a String array
+ klassOop element_klass = objArrayKlass::cast(names_ah->klass())->element_klass();
+ if (element_klass != SystemDictionary::string_klass()) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+ "Array element type is not String class", 0);
+ }
+
+ int names_length = names_ah->length();
+ int num_entries = 0;
+ for (int i = 0; i < names_length && i < count; i++) {
+ oop s = names_ah->obj_at(i);
+ if (s == NULL) {
+ THROW_(vmSymbols::java_lang_NullPointerException(), 0);
+ }
+
+ Handle sh(THREAD, s);
+ char* str = java_lang_String::as_utf8_string(s);
+ Flag* flag = Flag::find_flag(str, strlen(str));
+ if (flag != NULL) {
+ add_global_entry(env, sh, &globals[i], flag, THREAD);
+ num_entries++;
+ } else {
+ globals[i].name = NULL;
+ }
+ }
+ return num_entries;
+ } else {
+ // return all globals if names == NULL
+
+ // last flag entry is always NULL, so subtract 1
+ int nFlags = (int) Flag::numFlags - 1;
+ Handle null_h;
+ int num_entries = 0;
+ for (int i = 0; i < nFlags && num_entries < count; i++) {
+ Flag* flag = &Flag::flags[i];
+ // Exclude the diagnostic flags
+ if (flag->is_unlocked() || flag->is_unlocker()) {
+ add_global_entry(env, null_h, &globals[num_entries], flag, THREAD);
+ num_entries++;
+ }
+ }
+ return num_entries;
+ }
+JVM_END
+
+JVM_ENTRY(void, jmm_SetVMGlobal(JNIEnv *env, jstring flag_name, jvalue new_value))
+ ResourceMark rm(THREAD);
+
+ oop fn = JNIHandles::resolve_external_guard(flag_name);
+ if (fn == NULL) {
+ THROW_MSG(vmSymbols::java_lang_NullPointerException(),
+ "The flag name cannot be null.");
+ }
+ char* name = java_lang_String::as_utf8_string(fn);
+ Flag* flag = Flag::find_flag(name, strlen(name));
+ if (flag == NULL) {
+ THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+ "Flag does not exist.");
+ }
+ if (!flag->is_writeable()) {
+ THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+ "This flag is not writeable.");
+ }
+
+ bool succeed;
+ if (flag->is_bool()) {
+ bool bvalue = (new_value.z == JNI_TRUE ? true : false);
+ succeed = CommandLineFlags::boolAtPut(name, &bvalue, MANAGEMENT);
+ } else if (flag->is_intx()) {
+ intx ivalue = new_value.j;
+ succeed = CommandLineFlags::intxAtPut(name, &ivalue, MANAGEMENT);
+ } else if (flag->is_uintx()) {
+ uintx uvalue = new_value.j;
+ succeed = CommandLineFlags::uintxAtPut(name, &uvalue, MANAGEMENT);
+ } else if (flag->is_ccstr()) {
+ oop str = JNIHandles::resolve_external_guard(new_value.l);
+ if (str == NULL) {
+ THROW(vmSymbols::java_lang_NullPointerException());
+ }
+ ccstr svalue = java_lang_String::as_utf8_string(str);
+ succeed = CommandLineFlags::ccstrAtPut(name, &svalue, MANAGEMENT);
+ }
+ assert(succeed, "Setting flag should succeed");
+JVM_END
+
+class ThreadTimesClosure: public ThreadClosure {
+ private:
+ objArrayOop _names;
+ typeArrayOop _times;
+ int _names_len;
+ int _times_len;
+ int _count;
+
+ public:
+ ThreadTimesClosure(objArrayOop names, typeArrayOop times);
+ virtual void do_thread(Thread* thread);
+ int count() { return _count; }
+};
+
+ThreadTimesClosure::ThreadTimesClosure(objArrayOop names,
+ typeArrayOop times) {
+ assert(names != NULL, "names was NULL");
+ assert(times != NULL, "times was NULL");
+ _names = names;
+ _names_len = names->length();
+ _times = times;
+ _times_len = times->length();
+ _count = 0;
+}
+
+void ThreadTimesClosure::do_thread(Thread* thread) {
+ Handle s;
+ assert(thread != NULL, "thread was NULL");
+
+ // exclude externally visible JavaThreads
+ if (thread->is_Java_thread() && !thread->is_hidden_from_external_view()) {
+ return;
+ }
+
+ if (_count >= _names_len || _count >= _times_len) {
+ // skip if the result array is not big enough
+ return;
+ }
+
+ EXCEPTION_MARK;
+
+ assert(thread->name() != NULL, "All threads should have a name");
+ s = java_lang_String::create_from_str(thread->name(), CHECK);
+ _names->obj_at_put(_count, s());
+
+ _times->long_at_put(_count, os::is_thread_cpu_time_supported() ?
+ os::thread_cpu_time(thread) : -1);
+ _count++;
+}
+
+// Fills names with VM internal thread names and times with the corresponding
+// CPU times. If names or times is NULL, a NullPointerException is thrown.
+// If the element type of names is not String, an IllegalArgumentException is
+// thrown.
+// If an array is not large enough to hold all the entries, only the entries
+// that fit will be returned. Return value is the number of VM internal
+// threads entries.
+JVM_ENTRY(jint, jmm_GetInternalThreadTimes(JNIEnv *env,
+ jobjectArray names,
+ jlongArray times))
+ if (names == NULL || times == NULL) {
+ THROW_(vmSymbols::java_lang_NullPointerException(), 0);
+ }
+ objArrayOop na = objArrayOop(JNIHandles::resolve_non_null(names));
+ objArrayHandle names_ah(THREAD, na);
+
+ // Make sure we have a String array
+ klassOop element_klass = objArrayKlass::cast(names_ah->klass())->element_klass();
+ if (element_klass != SystemDictionary::string_klass()) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+ "Array element type is not String class", 0);
+ }
+
+ typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(times));
+ typeArrayHandle times_ah(THREAD, ta);
+
+ ThreadTimesClosure ttc(names_ah(), times_ah());
+ {
+ MutexLockerEx ml(Threads_lock);
+ Threads::threads_do(&ttc);
+ }
+
+ return ttc.count();
+JVM_END
+
+static Handle find_deadlocks(bool object_monitors_only, TRAPS) {
+ ResourceMark rm(THREAD);
+
+ VM_FindDeadlocks op(!object_monitors_only /* also check concurrent locks? */);
+ VMThread::execute(&op);
+
+ DeadlockCycle* deadlocks = op.result();
+ if (deadlocks == NULL) {
+ // no deadlock found and return
+ return Handle();
+ }
+
+ int num_threads = 0;
+ DeadlockCycle* cycle;
+ for (cycle = deadlocks; cycle != NULL; cycle = cycle->next()) {
+ num_threads += cycle->num_threads();
+ }
+
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::thread_klass(), num_threads, CHECK_NH);
+ objArrayHandle threads_ah(THREAD, r);
+
+ int index = 0;
+ for (cycle = deadlocks; cycle != NULL; cycle = cycle->next()) {
+ GrowableArray<JavaThread*>* deadlock_threads = cycle->threads();
+ int len = deadlock_threads->length();
+ for (int i = 0; i < len; i++) {
+ threads_ah->obj_at_put(index, deadlock_threads->at(i)->threadObj());
+ index++;
+ }
+ }
+ return threads_ah;
+}
+
+// Finds cycles of threads that are deadlocked involved in object monitors
+// and JSR-166 synchronizers.
+// Returns an array of Thread objects which are in deadlock, if any.
+// Otherwise, returns NULL.
+//
+// Input parameter:
+// object_monitors_only - if true, only check object monitors
+//
+JVM_ENTRY(jobjectArray, jmm_FindDeadlockedThreads(JNIEnv *env, jboolean object_monitors_only))
+ Handle result = find_deadlocks(object_monitors_only != 0, CHECK_0);
+ return (jobjectArray) JNIHandles::make_local(env, result());
+JVM_END
+
+// Finds cycles of threads that are deadlocked on monitor locks
+// Returns an array of Thread objects which are in deadlock, if any.
+// Otherwise, returns NULL.
+JVM_ENTRY(jobjectArray, jmm_FindMonitorDeadlockedThreads(JNIEnv *env))
+ Handle result = find_deadlocks(true, CHECK_0);
+ return (jobjectArray) JNIHandles::make_local(env, result());
+JVM_END
+
+// Gets the information about GC extension attributes including
+// the name of the attribute, its type, and a short description.
+//
+// Input parameters:
+// mgr - GC memory manager
+// info - caller allocated array of jmmExtAttributeInfo
+// count - number of elements of the info array
+//
+// Returns the number of GC extension attributes filled in the info array; or
+// -1 if info is not big enough
+//
+JVM_ENTRY(jint, jmm_GetGCExtAttributeInfo(JNIEnv *env, jobject mgr, jmmExtAttributeInfo* info, jint count))
+ // All GC memory managers have 1 attribute (number of GC threads)
+ if (count == 0) {
+ return 0;
+ }
+
+ if (info == NULL) {
+ THROW_(vmSymbols::java_lang_NullPointerException(), 0);
+ }
+
+ info[0].name = "GcThreadCount";
+ info[0].type = 'I';
+ info[0].description = "Number of GC threads";
+ return 1;
+JVM_END
+
+// verify the given array is an array of java/lang/management/MemoryUsage objects
+// of a given length and return the objArrayOop
+static objArrayOop get_memory_usage_objArray(jobjectArray array, int length, TRAPS) {
+ if (array == NULL) {
+ THROW_(vmSymbols::java_lang_NullPointerException(), 0);
+ }
+
+ objArrayOop oa = objArrayOop(JNIHandles::resolve_non_null(array));
+ objArrayHandle array_h(THREAD, oa);
+
+ // array must be of the given length
+ if (length != array_h->length()) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+ "The length of the given MemoryUsage array does not match the number of memory pools.", 0);
+ }
+
+ // check if the element of array is of type MemoryUsage class
+ klassOop usage_klass = Management::java_lang_management_MemoryUsage_klass(CHECK_0);
+ klassOop element_klass = objArrayKlass::cast(array_h->klass())->element_klass();
+ if (element_klass != usage_klass) {
+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+ "The element type is not MemoryUsage class", 0);
+ }
+
+ return array_h();
+}
+
+// Gets the statistics of the last GC of a given GC memory manager.
+// Input parameters:
+// obj - GarbageCollectorMXBean object
+// gc_stat - caller allocated jmmGCStat where:
+// a. before_gc_usage - array of MemoryUsage objects
+// b. after_gc_usage - array of MemoryUsage objects
+// c. gc_ext_attributes_values_size is set to the
+// gc_ext_attribute_values array allocated
+// d. gc_ext_attribute_values is a caller allocated array of jvalue.
+//
+// On return,
+// gc_index == 0 indicates no GC statistics available
+//
+// before_gc_usage and after_gc_usage - filled with per memory pool
+// before and after GC usage in the same order as the memory pools
+// returned by GetMemoryPools for a given GC memory manager.
+// num_gc_ext_attributes indicates the number of elements in
+// the gc_ext_attribute_values array is filled; or
+// -1 if the gc_ext_attributes_values array is not big enough
+//
+JVM_ENTRY(void, jmm_GetLastGCStat(JNIEnv *env, jobject obj, jmmGCStat *gc_stat))
+ ResourceMark rm(THREAD);
+
+ if (gc_stat->gc_ext_attribute_values_size > 0 && gc_stat->gc_ext_attribute_values == NULL) {
+ THROW(vmSymbols::java_lang_NullPointerException());
+ }
+
+ // Get the GCMemoryManager
+ GCMemoryManager* mgr = get_gc_memory_manager_from_jobject(obj, CHECK);
+ if (mgr->last_gc_stat() == NULL) {
+ gc_stat->gc_index = 0;
+ return;
+ }
+
+ // Make a copy of the last GC statistics
+ // GC may occur while constructing the last GC information
+ int num_pools = MemoryService::num_memory_pools();
+ GCStatInfo* stat = new GCStatInfo(num_pools);
+ stat->copy_stat(mgr->last_gc_stat());
+
+ gc_stat->gc_index = stat->gc_index();
+ gc_stat->start_time = Management::ticks_to_ms(stat->start_time());
+ gc_stat->end_time = Management::ticks_to_ms(stat->end_time());
+
+ // Current implementation does not have GC extension attributes
+ gc_stat->num_gc_ext_attributes = 0;
+
+ // Fill the arrays of MemoryUsage objects with before and after GC
+ // per pool memory usage
+ objArrayOop bu = get_memory_usage_objArray(gc_stat->usage_before_gc,
+ num_pools,
+ CHECK);
+ objArrayHandle usage_before_gc_ah(THREAD, bu);
+
+ objArrayOop au = get_memory_usage_objArray(gc_stat->usage_after_gc,
+ num_pools,
+ CHECK);
+ objArrayHandle usage_after_gc_ah(THREAD, au);
+
+ for (int i = 0; i < num_pools; i++) {
+ Handle before_usage = MemoryService::create_MemoryUsage_obj(stat->before_gc_usage_for_pool(i), CHECK);
+ Handle after_usage;
+
+ MemoryUsage u = stat->after_gc_usage_for_pool(i);
+ if (u.max_size() == 0 && u.used() > 0) {
+ // If max size == 0, this pool is a survivor space.
+ // Set max size = -1 since the pools will be swapped after GC.
+ MemoryUsage usage(u.init_size(), u.used(), u.committed(), (size_t)-1);
+ after_usage = MemoryService::create_MemoryUsage_obj(usage, CHECK);
+ } else {
+ after_usage = MemoryService::create_MemoryUsage_obj(stat->after_gc_usage_for_pool(i), CHECK);
+ }
+ usage_before_gc_ah->obj_at_put(i, before_usage());
+ usage_after_gc_ah->obj_at_put(i, after_usage());
+ }
+
+ if (gc_stat->gc_ext_attribute_values_size > 0) {
+ // Current implementation only has 1 attribute (number of GC threads)
+ // The type is 'I'
+ gc_stat->gc_ext_attribute_values[0].i = mgr->num_gc_threads();
+ }
+JVM_END
+
+// Dump heap - Returns 0 if succeeds.
+JVM_ENTRY(jint, jmm_DumpHeap0(JNIEnv *env, jstring outputfile, jboolean live))
+#ifndef SERVICES_KERNEL
+ ResourceMark rm(THREAD);
+ oop on = JNIHandles::resolve_external_guard(outputfile);
+ if (on == NULL) {
+ THROW_MSG_(vmSymbols::java_lang_NullPointerException(),
+ "Output file name cannot be null.", -1);
+ }
+ char* name = java_lang_String::as_utf8_string(on);
+ if (name == NULL) {
+ THROW_MSG_(vmSymbols::java_lang_NullPointerException(),
+ "Output file name cannot be null.", -1);
+ }
+ HeapDumper dumper(live ? true : false);
+ if (dumper.dump(name) != 0) {
+ const char* errmsg = dumper.error_as_C_string();
+ THROW_MSG_(vmSymbols::java_io_IOException(), errmsg, -1);
+ }
+ return 0;
+#else // SERVICES_KERNEL
+ return -1;
+#endif // SERVICES_KERNEL
+JVM_END
+
+jlong Management::ticks_to_ms(jlong ticks) {
+ assert(os::elapsed_frequency() > 0, "Must be non-zero");
+ return (jlong)(((double)ticks / (double)os::elapsed_frequency())
+ * (double)1000.0);
+}
+
+const struct jmmInterface_1_ jmm_interface = {
+ NULL,
+ NULL,
+ jmm_GetVersion,
+ jmm_GetOptionalSupport,
+ jmm_GetInputArguments,
+ jmm_GetThreadInfo,
+ jmm_GetInputArgumentArray,
+ jmm_GetMemoryPools,
+ jmm_GetMemoryManagers,
+ jmm_GetMemoryPoolUsage,
+ jmm_GetPeakMemoryPoolUsage,
+ NULL,
+ jmm_GetMemoryUsage,
+ jmm_GetLongAttribute,
+ jmm_GetBoolAttribute,
+ jmm_SetBoolAttribute,
+ jmm_GetLongAttributes,
+ jmm_FindMonitorDeadlockedThreads,
+ jmm_GetThreadCpuTime,
+ jmm_GetVMGlobalNames,
+ jmm_GetVMGlobals,
+ jmm_GetInternalThreadTimes,
+ jmm_ResetStatistic,
+ jmm_SetPoolSensor,
+ jmm_SetPoolThreshold,
+ jmm_GetPoolCollectionUsage,
+ jmm_GetGCExtAttributeInfo,
+ jmm_GetLastGCStat,
+ jmm_GetThreadCpuTimeWithKind,
+ NULL,
+ jmm_DumpHeap0,
+ jmm_FindDeadlockedThreads,
+ jmm_SetVMGlobal,
+ NULL,
+ jmm_DumpThreads
+};
+
+void* Management::get_jmm_interface(int version) {
+ if (version == JMM_VERSION_1_0) {
+ return (void*) &jmm_interface;
+ }
+ return NULL;
+}
diff --git a/src/share/vm/services/management.hpp b/src/share/vm/services/management.hpp
new file mode 100644
index 000000000..c37a25fab
--- /dev/null
+++ b/src/share/vm/services/management.hpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class OopClosure;
+class ThreadSnapshot;
+
+class Management : public AllStatic {
+private:
+ static PerfVariable* _begin_vm_creation_time;
+ static PerfVariable* _end_vm_creation_time;
+ static PerfVariable* _vm_init_done_time;
+ static jmmOptionalSupport _optional_support;
+ static TimeStamp _stamp; // Timestamp since vm init done time
+
+ // Management klasses
+ static klassOop _sensor_klass;
+ static klassOop _threadInfo_klass;
+ static klassOop _memoryUsage_klass;
+ static klassOop _memoryPoolMXBean_klass;
+ static klassOop _memoryManagerMXBean_klass;
+ static klassOop _garbageCollectorMXBean_klass;
+ static klassOop _managementFactory_klass;
+
+ static klassOop load_and_initialize_klass(symbolHandle sh, TRAPS);
+
+public:
+ static void init();
+ static void initialize(TRAPS);
+
+ static jlong ticks_to_ms(jlong ticks);
+ static jlong timestamp();
+
+ static void oops_do(OopClosure* f);
+ static void* get_jmm_interface(int version);
+ static void get_optional_support(jmmOptionalSupport* support);
+
+ static void get_loaded_classes(JavaThread* cur_thread, GrowableArray<KlassHandle>* klass_handle_array);
+
+ static void record_vm_startup_time(jlong begin, jlong duration);
+ static void record_vm_init_completed() {
+ // Initialize the timestamp to get the current time
+ _vm_init_done_time->set_value(os::javaTimeMillis());
+
+ // Update the timestamp to the vm init done time
+ _stamp.update();
+ }
+
+ static jlong vm_init_done_time() {
+ return _vm_init_done_time->get_value();
+ }
+
+ // methods to return a klassOop.
+ static klassOop java_lang_management_ThreadInfo_klass(TRAPS);
+ static klassOop java_lang_management_MemoryUsage_klass(TRAPS);
+ static klassOop java_lang_management_MemoryPoolMXBean_klass(TRAPS);
+ static klassOop java_lang_management_MemoryManagerMXBean_klass(TRAPS);
+ static klassOop java_lang_management_GarbageCollectorMXBean_klass(TRAPS);
+ static klassOop sun_management_Sensor_klass(TRAPS);
+ static klassOop sun_management_ManagementFactory_klass(TRAPS);
+
+ static instanceOop create_thread_info_instance(ThreadSnapshot* snapshot, TRAPS);
+ static instanceOop create_thread_info_instance(ThreadSnapshot* snapshot, objArrayHandle monitors_array, typeArrayHandle depths_array, objArrayHandle synchronizers_array, TRAPS);
+};
+
+class TraceVmCreationTime : public StackObj {
+private:
+ TimeStamp _timer;
+ jlong _begin_time;
+
+public:
+ TraceVmCreationTime() {}
+ ~TraceVmCreationTime() {}
+
+ void start()
+ { _timer.update_to(0); _begin_time = os::javaTimeMillis(); }
+
+ /**
+ * Only call this if initialization completes successfully; it will
+ * crash if PerfMemory_exit() has already been called (usually by
+ * os::shutdown() when there was an initialization failure).
+ */
+ void end()
+ { Management::record_vm_startup_time(_begin_time, _timer.milliseconds()); }
+
+};
diff --git a/src/share/vm/services/memoryManager.cpp b/src/share/vm/services/memoryManager.cpp
new file mode 100644
index 000000000..7b7905c5a
--- /dev/null
+++ b/src/share/vm/services/memoryManager.cpp
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_memoryManager.cpp.incl"
+
+HS_DTRACE_PROBE_DECL8(hotspot, mem__pool__gc__begin, char*, int, char*, int,
+ size_t, size_t, size_t, size_t);
+HS_DTRACE_PROBE_DECL8(hotspot, mem__pool__gc__end, char*, int, char*, int,
+ size_t, size_t, size_t, size_t);
+
+MemoryManager::MemoryManager() {
+ _num_pools = 0;
+ _memory_mgr_obj = NULL;
+}
+
+void MemoryManager::add_pool(MemoryPool* pool) {
+ assert(_num_pools < MemoryManager::max_num_pools, "_num_pools exceeds the max");
+ if (_num_pools < MemoryManager::max_num_pools) {
+ _pools[_num_pools] = pool;
+ _num_pools++;
+ }
+ pool->add_manager(this);
+}
+
+MemoryManager* MemoryManager::get_code_cache_memory_manager() {
+ return (MemoryManager*) new CodeCacheMemoryManager();
+}
+
+GCMemoryManager* MemoryManager::get_copy_memory_manager() {
+ return (GCMemoryManager*) new CopyMemoryManager();
+}
+
+GCMemoryManager* MemoryManager::get_msc_memory_manager() {
+ return (GCMemoryManager*) new MSCMemoryManager();
+}
+
+GCMemoryManager* MemoryManager::get_parnew_memory_manager() {
+ return (GCMemoryManager*) new ParNewMemoryManager();
+}
+
+GCMemoryManager* MemoryManager::get_cms_memory_manager() {
+ return (GCMemoryManager*) new CMSMemoryManager();
+}
+
+GCMemoryManager* MemoryManager::get_psScavenge_memory_manager() {
+ return (GCMemoryManager*) new PSScavengeMemoryManager();
+}
+
+GCMemoryManager* MemoryManager::get_psMarkSweep_memory_manager() {
+ return (GCMemoryManager*) new PSMarkSweepMemoryManager();
+}
+
+instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
+ // Must do an acquire so as to force ordering of subsequent
+ // loads from anything _memory_mgr_obj points to or implies.
+ instanceOop mgr_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_mgr_obj);
+ if (mgr_obj == NULL) {
+ // It's ok for more than one thread to execute the code up to the locked region.
+ // Extra manager instances will just be gc'ed.
+ klassOop k = Management::sun_management_ManagementFactory_klass(CHECK_0);
+ instanceKlassHandle ik(THREAD, k);
+
+ Handle mgr_name = java_lang_String::create_from_str(name(), CHECK_0);
+
+ JavaValue result(T_OBJECT);
+ JavaCallArguments args;
+ args.push_oop(mgr_name); // Argument 1
+
+ symbolHandle method_name;
+ symbolHandle signature;
+ if (is_gc_memory_manager()) {
+ method_name = vmSymbolHandles::createGarbageCollector_name();
+ signature = vmSymbolHandles::createGarbageCollector_signature();
+ args.push_oop(Handle()); // Argument 2 (for future extension)
+ } else {
+ method_name = vmSymbolHandles::createMemoryManager_name();
+ signature = vmSymbolHandles::createMemoryManager_signature();
+ }
+
+ JavaCalls::call_static(&result,
+ ik,
+ method_name,
+ signature,
+ &args,
+ CHECK_0);
+
+ instanceOop m = (instanceOop) result.get_jobject();
+ instanceHandle mgr(THREAD, m);
+
+ {
+ // Get lock before setting _memory_mgr_obj
+ // since another thread may have created the instance
+ MutexLocker ml(Management_lock);
+
+ // Check if another thread has created the management object. We reload
+ // _memory_mgr_obj here because some other thread may have initialized
+ // it while we were executing the code before the lock.
+ //
+ // The lock has done an acquire, so the load can't float above it, but
+ // we need to do a load_acquire as above.
+ mgr_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_mgr_obj);
+ if (mgr_obj != NULL) {
+ return mgr_obj;
+ }
+
+ // Get the address of the object we created via call_special.
+ mgr_obj = mgr();
+
+ // Use store barrier to make sure the memory accesses associated
+ // with creating the management object are visible before publishing
+ // its address. The unlock will publish the store to _memory_mgr_obj
+ // because it does a release first.
+ OrderAccess::release_store_ptr(&_memory_mgr_obj, mgr_obj);
+ }
+ }
+
+ return mgr_obj;
+}
+
+void MemoryManager::oops_do(OopClosure* f) {
+ f->do_oop((oop*) &_memory_mgr_obj);
+}
+
+GCStatInfo::GCStatInfo(int num_pools) {
+ // initialize the arrays for memory usage
+ _before_gc_usage_array = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools);
+ _after_gc_usage_array = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools);
+ size_t len = num_pools * sizeof(MemoryUsage);
+ memset(_before_gc_usage_array, 0, len);
+ memset(_after_gc_usage_array, 0, len);
+ _usage_array_size = num_pools;
+}
+
+GCStatInfo::~GCStatInfo() {
+ FREE_C_HEAP_ARRAY(MemoryUsage*, _before_gc_usage_array);
+ FREE_C_HEAP_ARRAY(MemoryUsage*, _after_gc_usage_array);
+}
+
+void GCStatInfo::copy_stat(GCStatInfo* stat) {
+ set_index(stat->gc_index());
+ set_start_time(stat->start_time());
+ set_end_time(stat->end_time());
+ assert(_usage_array_size == stat->usage_array_size(), "Must have same array size");
+ for (int i = 0; i < _usage_array_size; i++) {
+ set_before_gc_usage(i, stat->before_gc_usage_for_pool(i));
+ set_after_gc_usage(i, stat->after_gc_usage_for_pool(i));
+ }
+}
+
+void GCStatInfo::set_gc_usage(int pool_index, MemoryUsage usage, bool before_gc) {
+ MemoryUsage* gc_usage_array;
+ if (before_gc) {
+ gc_usage_array = _before_gc_usage_array;
+ } else {
+ gc_usage_array = _after_gc_usage_array;
+ }
+ gc_usage_array[pool_index] = usage;
+}
+
+GCMemoryManager::GCMemoryManager() : MemoryManager() {
+ _num_collections = 0;
+ _last_gc_stat = NULL;
+ _num_gc_threads = 1;
+}
+
+GCMemoryManager::~GCMemoryManager() {
+ delete _last_gc_stat;
+}
+
+void GCMemoryManager::initialize_gc_stat_info() {
+ assert(MemoryService::num_memory_pools() > 0, "should have one or more memory pools");
+ _last_gc_stat = new GCStatInfo(MemoryService::num_memory_pools());
+}
+
+void GCMemoryManager::gc_begin() {
+ assert(_last_gc_stat != NULL, "Just checking");
+ _accumulated_timer.start();
+ _num_collections++;
+ _last_gc_stat->set_index(_num_collections);
+ _last_gc_stat->set_start_time(Management::timestamp());
+
+ // Keep memory usage of all memory pools
+ for (int i = 0; i < MemoryService::num_memory_pools(); i++) {
+ MemoryPool* pool = MemoryService::get_memory_pool(i);
+ MemoryUsage usage = pool->get_memory_usage();
+ _last_gc_stat->set_before_gc_usage(i, usage);
+ HS_DTRACE_PROBE8(hotspot, mem__pool__gc__begin,
+ name(), strlen(name()),
+ pool->name(), strlen(pool->name()),
+ usage.init_size(), usage.used(),
+ usage.committed(), usage.max_size());
+ }
+}
+
+void GCMemoryManager::gc_end() {
+ _accumulated_timer.stop();
+ _last_gc_stat->set_end_time(Management::timestamp());
+
+ int i;
+ // keep the last gc statistics for all memory pools
+ for (i = 0; i < MemoryService::num_memory_pools(); i++) {
+ MemoryPool* pool = MemoryService::get_memory_pool(i);
+ MemoryUsage usage = pool->get_memory_usage();
+
+ HS_DTRACE_PROBE8(hotspot, mem__pool__gc__end,
+ name(), strlen(name()),
+ pool->name(), strlen(pool->name()),
+ usage.init_size(), usage.used(),
+ usage.committed(), usage.max_size());
+
+ _last_gc_stat->set_after_gc_usage(i, usage);
+ }
+
+ // Set last collection usage of the memory pools managed by this collector
+ for (i = 0; i < num_memory_pools(); i++) {
+ MemoryPool* pool = get_memory_pool(i);
+ MemoryUsage usage = pool->get_memory_usage();
+
+ // Compare with GC usage threshold
+ pool->set_last_collection_usage(usage);
+ LowMemoryDetector::detect_after_gc_memory(pool);
+ }
+}
diff --git a/src/share/vm/services/memoryManager.hpp b/src/share/vm/services/memoryManager.hpp
new file mode 100644
index 000000000..4efc955eb
--- /dev/null
+++ b/src/share/vm/services/memoryManager.hpp
@@ -0,0 +1,233 @@
+/*
+ * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// A memory manager is responsible for managing one or more memory pools.
+// The garbage collector is one type of memory managers responsible
+// for reclaiming memory occupied by unreachable objects. A Java virtual
+// machine may have one or more memory managers. It may
+// add or remove memory managers during execution.
+// A memory pool can be managed by more than one memory managers.
+
+class MemoryPool;
+class GCMemoryManager;
+class OopClosure;
+
+class MemoryManager : public CHeapObj {
+private:
+ enum {
+ max_num_pools = 10
+ };
+
+ MemoryPool* _pools[max_num_pools];
+ int _num_pools;
+
+protected:
+ volatile instanceOop _memory_mgr_obj;
+
+public:
+ enum Name {
+ Abstract,
+ CodeCache,
+ Copy,
+ MarkSweepCompact,
+ ParNew,
+ ConcurrentMarkSweep,
+ PSScavenge,
+ PSMarkSweep
+ };
+
+ MemoryManager();
+
+ int num_memory_pools() const { return _num_pools; }
+ MemoryPool* get_memory_pool(int index) {
+ assert(index >= 0 && index < _num_pools, "Invalid index");
+ return _pools[index];
+ }
+
+ void add_pool(MemoryPool* pool);
+
+ bool is_manager(instanceHandle mh) { return mh() == _memory_mgr_obj; }
+
+ virtual instanceOop get_memory_manager_instance(TRAPS);
+ virtual MemoryManager::Name kind() { return MemoryManager::Abstract; }
+ virtual bool is_gc_memory_manager() { return false; }
+ virtual const char* name() = 0;
+
+ // GC support
+ void oops_do(OopClosure* f);
+
+ // Static factory methods to get a memory manager of a specific type
+ static MemoryManager* get_code_cache_memory_manager();
+ static GCMemoryManager* get_copy_memory_manager();
+ static GCMemoryManager* get_msc_memory_manager();
+ static GCMemoryManager* get_parnew_memory_manager();
+ static GCMemoryManager* get_cms_memory_manager();
+ static GCMemoryManager* get_psScavenge_memory_manager();
+ static GCMemoryManager* get_psMarkSweep_memory_manager();
+
+};
+
+class CodeCacheMemoryManager : public MemoryManager {
+private:
+public:
+ CodeCacheMemoryManager() : MemoryManager() {}
+
+ MemoryManager::Name kind() { return MemoryManager::CodeCache; }
+ const char* name() { return "CodeCacheManager"; }
+};
+
+class GCStatInfo : public CHeapObj {
+private:
+ size_t _index;
+ jlong _start_time;
+ jlong _end_time;
+
+ // We keep memory usage of all memory pools
+ MemoryUsage* _before_gc_usage_array;
+ MemoryUsage* _after_gc_usage_array;
+ int _usage_array_size;
+
+ void set_gc_usage(int pool_index, MemoryUsage, bool before_gc);
+
+public:
+ GCStatInfo(int num_pools);
+ ~GCStatInfo();
+
+ size_t gc_index() { return _index; }
+ jlong start_time() { return _start_time; }
+ jlong end_time() { return _end_time; }
+ int usage_array_size() { return _usage_array_size; }
+ MemoryUsage before_gc_usage_for_pool(int pool_index) {
+ assert(pool_index >= 0 && pool_index < _usage_array_size, "Range checking");
+ return _before_gc_usage_array[pool_index];
+ }
+ MemoryUsage after_gc_usage_for_pool(int pool_index) {
+ assert(pool_index >= 0 && pool_index < _usage_array_size, "Range checking");
+ return _after_gc_usage_array[pool_index];
+ }
+
+ void set_index(size_t index) { _index = index; }
+ void set_start_time(jlong time) { _start_time = time; }
+ void set_end_time(jlong time) { _end_time = time; }
+ void set_before_gc_usage(int pool_index, MemoryUsage usage) {
+ assert(pool_index >= 0 && pool_index < _usage_array_size, "Range checking");
+ set_gc_usage(pool_index, usage, true /* before gc */);
+ }
+ void set_after_gc_usage(int pool_index, MemoryUsage usage) {
+ assert(pool_index >= 0 && pool_index < _usage_array_size, "Range checking");
+ set_gc_usage(pool_index, usage, false /* after gc */);
+ }
+
+ void copy_stat(GCStatInfo* stat);
+};
+
+class GCMemoryManager : public MemoryManager {
+private:
+ // TODO: We should unify the GCCounter and GCMemoryManager statistic
+ size_t _num_collections;
+ elapsedTimer _accumulated_timer;
+ elapsedTimer _gc_timer; // for measuring every GC duration
+ GCStatInfo* _last_gc_stat;
+ int _num_gc_threads;
+public:
+ GCMemoryManager();
+ ~GCMemoryManager();
+
+ void initialize_gc_stat_info();
+
+ bool is_gc_memory_manager() { return true; }
+ jlong gc_time_ms() { return _accumulated_timer.milliseconds(); }
+ size_t gc_count() { return _num_collections; }
+ int num_gc_threads() { return _num_gc_threads; }
+ void set_num_gc_threads(int count) { _num_gc_threads = count; }
+
+ void gc_begin();
+ void gc_end();
+
+ void reset_gc_stat() { _num_collections = 0; _accumulated_timer.reset(); }
+ GCStatInfo* last_gc_stat() { return _last_gc_stat; }
+
+ virtual MemoryManager::Name kind() = 0;
+};
+
+// These subclasses of GCMemoryManager are defined to include
+// GC-specific information.
+// TODO: Add GC-specific information
+class CopyMemoryManager : public GCMemoryManager {
+private:
+public:
+ CopyMemoryManager() : GCMemoryManager() {}
+
+ MemoryManager::Name kind() { return MemoryManager::Copy; }
+ const char* name() { return "Copy"; }
+};
+
+class MSCMemoryManager : public GCMemoryManager {
+private:
+public:
+ MSCMemoryManager() : GCMemoryManager() {}
+
+ MemoryManager::Name kind() { return MemoryManager::MarkSweepCompact; }
+ const char* name() { return "MarkSweepCompact"; }
+
+};
+
+class ParNewMemoryManager : public GCMemoryManager {
+private:
+public:
+ ParNewMemoryManager() : GCMemoryManager() {}
+
+ MemoryManager::Name kind() { return MemoryManager::ParNew; }
+ const char* name() { return "ParNew"; }
+
+};
+
+class CMSMemoryManager : public GCMemoryManager {
+private:
+public:
+ CMSMemoryManager() : GCMemoryManager() {}
+
+ MemoryManager::Name kind() { return MemoryManager::ConcurrentMarkSweep; }
+ const char* name() { return "ConcurrentMarkSweep";}
+
+};
+
+class PSScavengeMemoryManager : public GCMemoryManager {
+private:
+public:
+ PSScavengeMemoryManager() : GCMemoryManager() {}
+
+ MemoryManager::Name kind() { return MemoryManager::PSScavenge; }
+ const char* name() { return "PS Scavenge"; }
+
+};
+
+class PSMarkSweepMemoryManager : public GCMemoryManager {
+private:
+public:
+ PSMarkSweepMemoryManager() : GCMemoryManager() {}
+
+ MemoryManager::Name kind() { return MemoryManager::PSMarkSweep; }
+ const char* name() { return "PS MarkSweep"; }
+};
diff --git a/src/share/vm/services/memoryPool.cpp b/src/share/vm/services/memoryPool.cpp
new file mode 100644
index 000000000..745577476
--- /dev/null
+++ b/src/share/vm/services/memoryPool.cpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_memoryPool.cpp.incl"
+
+MemoryPool::MemoryPool(const char* name,
+ PoolType type,
+ size_t init_size,
+ size_t max_size,
+ bool support_usage_threshold,
+ bool support_gc_threshold) {
+ _name = name;
+ _initial_size = init_size;
+ _max_size = max_size;
+ _memory_pool_obj = NULL;
+ _available_for_allocation = true;
+ _num_managers = 0;
+ _type = type;
+
+ // initialize the max and init size of collection usage
+ _after_gc_usage = MemoryUsage(_initial_size, 0, 0, _max_size);
+
+ _usage_sensor = NULL;
+ _gc_usage_sensor = NULL;
+ // usage threshold supports both high and low threshold
+ _usage_threshold = new ThresholdSupport(support_usage_threshold, support_usage_threshold);
+ // gc usage threshold supports only high threshold
+ _gc_usage_threshold = new ThresholdSupport(support_gc_threshold, support_gc_threshold);
+}
+
+void MemoryPool::add_manager(MemoryManager* mgr) {
+ assert(_num_managers < MemoryPool::max_num_managers, "_num_managers exceeds the max");
+ if (_num_managers < MemoryPool::max_num_managers) {
+ _managers[_num_managers] = mgr;
+ _num_managers++;
+ }
+}
+
+
+// Returns an instanceHandle of a MemoryPool object.
+// It creates a MemoryPool instance when the first time
+// this function is called.
+instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
+ // Must do an acquire so as to force ordering of subsequent
+ // loads from anything _memory_pool_obj points to or implies.
+ instanceOop pool_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_pool_obj);
+ if (pool_obj == NULL) {
+ // It's ok for more than one thread to execute the code up to the locked region.
+ // Extra pool instances will just be gc'ed.
+ klassOop k = Management::sun_management_ManagementFactory_klass(CHECK_NULL);
+ instanceKlassHandle ik(THREAD, k);
+
+ Handle pool_name = java_lang_String::create_from_str(_name, CHECK_NULL);
+ jlong usage_threshold_value = (_usage_threshold->is_high_threshold_supported() ? 0 : -1L);
+ jlong gc_usage_threshold_value = (_gc_usage_threshold->is_high_threshold_supported() ? 0 : -1L);
+
+ JavaValue result(T_OBJECT);
+ JavaCallArguments args;
+ args.push_oop(pool_name); // Argument 1
+ args.push_int((int) is_heap()); // Argument 2
+
+ symbolHandle method_name = vmSymbolHandles::createMemoryPool_name();
+ symbolHandle signature = vmSymbolHandles::createMemoryPool_signature();
+
+ args.push_long(usage_threshold_value); // Argument 3
+ args.push_long(gc_usage_threshold_value); // Argument 4
+
+ JavaCalls::call_static(&result,
+ ik,
+ method_name,
+ signature,
+ &args,
+ CHECK_NULL);
+
+ instanceOop p = (instanceOop) result.get_jobject();
+ instanceHandle pool(THREAD, p);
+
+ {
+ // Get lock since another thread may have create the instance
+ MutexLocker ml(Management_lock);
+
+ // Check if another thread has created the pool. We reload
+ // _memory_pool_obj here because some other thread may have
+ // initialized it while we were executing the code before the lock.
+ //
+ // The lock has done an acquire, so the load can't float above it,
+ // but we need to do a load_acquire as above.
+ pool_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_pool_obj);
+ if (pool_obj != NULL) {
+ return pool_obj;
+ }
+
+ // Get the address of the object we created via call_special.
+ pool_obj = pool();
+
+ // Use store barrier to make sure the memory accesses associated
+ // with creating the pool are visible before publishing its address.
+ // The unlock will publish the store to _memory_pool_obj because
+ // it does a release first.
+ OrderAccess::release_store_ptr(&_memory_pool_obj, pool_obj);
+ }
+ }
+
+ return pool_obj;
+}
+
+inline static size_t get_max_value(size_t val1, size_t val2) {
+ return (val1 > val2 ? val1 : val2);
+}
+
+void MemoryPool::record_peak_memory_usage() {
+ // Caller in JDK is responsible for synchronization -
+ // acquire the lock for this memory pool before calling VM
+ MemoryUsage usage = get_memory_usage();
+ size_t peak_used = get_max_value(usage.used(), _peak_usage.used());
+ size_t peak_committed = get_max_value(usage.committed(), _peak_usage.committed());
+ size_t peak_max_size = get_max_value(usage.max_size(), _peak_usage.max_size());
+
+ _peak_usage = MemoryUsage(initial_size(), peak_used, peak_committed, peak_max_size);
+}
+
+static void set_sensor_obj_at(SensorInfo** sensor_ptr, instanceHandle sh) {
+ assert(*sensor_ptr == NULL, "Should be called only once");
+ SensorInfo* sensor = new SensorInfo();
+ sensor->set_sensor(sh());
+ *sensor_ptr = sensor;
+}
+
+void MemoryPool::set_usage_sensor_obj(instanceHandle sh) {
+ set_sensor_obj_at(&_usage_sensor, sh);
+}
+
+void MemoryPool::set_gc_usage_sensor_obj(instanceHandle sh) {
+ set_sensor_obj_at(&_gc_usage_sensor, sh);
+}
+
+void MemoryPool::oops_do(OopClosure* f) {
+ f->do_oop((oop*) &_memory_pool_obj);
+ if (_usage_sensor != NULL) {
+ _usage_sensor->oops_do(f);
+ }
+ if (_gc_usage_sensor != NULL) {
+ _gc_usage_sensor->oops_do(f);
+ }
+}
+
+ContiguousSpacePool::ContiguousSpacePool(ContiguousSpace* space,
+ const char* name,
+ PoolType type,
+ size_t max_size,
+ bool support_usage_threshold) :
+ CollectedMemoryPool(name, type, space->capacity(), max_size,
+ support_usage_threshold), _space(space) {
+}
+
+MemoryUsage ContiguousSpacePool::get_memory_usage() {
+ size_t maxSize = (available_for_allocation() ? max_size() : 0);
+ size_t used = used_in_bytes();
+ size_t committed = _space->capacity();
+
+ return MemoryUsage(initial_size(), used, committed, maxSize);
+}
+
+SurvivorContiguousSpacePool::SurvivorContiguousSpacePool(DefNewGeneration* gen,
+ const char* name,
+ PoolType type,
+ size_t max_size,
+ bool support_usage_threshold) :
+ CollectedMemoryPool(name, type, gen->from()->capacity(), max_size,
+ support_usage_threshold), _gen(gen) {
+}
+
+MemoryUsage SurvivorContiguousSpacePool::get_memory_usage() {
+ size_t maxSize = (available_for_allocation() ? max_size() : 0);
+ size_t used = used_in_bytes();
+ size_t committed = committed_in_bytes();
+
+ return MemoryUsage(initial_size(), used, committed, maxSize);
+}
+
+#ifndef SERIALGC
+CompactibleFreeListSpacePool::CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
+ const char* name,
+ PoolType type,
+ size_t max_size,
+ bool support_usage_threshold) :
+ CollectedMemoryPool(name, type, space->capacity(), max_size,
+ support_usage_threshold), _space(space) {
+}
+
+MemoryUsage CompactibleFreeListSpacePool::get_memory_usage() {
+ size_t maxSize = (available_for_allocation() ? max_size() : 0);
+ size_t used = used_in_bytes();
+ size_t committed = _space->capacity();
+
+ return MemoryUsage(initial_size(), used, committed, maxSize);
+}
+#endif // SERIALGC
+
+GenerationPool::GenerationPool(Generation* gen,
+ const char* name,
+ PoolType type,
+ bool support_usage_threshold) :
+ CollectedMemoryPool(name, type, gen->capacity(), gen->max_capacity(),
+ support_usage_threshold), _gen(gen) {
+}
+
+MemoryUsage GenerationPool::get_memory_usage() {
+ size_t used = used_in_bytes();
+ size_t committed = _gen->capacity();
+ size_t maxSize = (available_for_allocation() ? max_size() : 0);
+
+ return MemoryUsage(initial_size(), used, committed, maxSize);
+}
+
+CodeHeapPool::CodeHeapPool(CodeHeap* codeHeap, const char* name, bool support_usage_threshold) :
+ MemoryPool(name, NonHeap, codeHeap->capacity(), codeHeap->max_capacity(),
+ support_usage_threshold, false), _codeHeap(codeHeap) {
+}
+
+MemoryUsage CodeHeapPool::get_memory_usage() {
+ size_t used = used_in_bytes();
+ size_t committed = _codeHeap->capacity();
+ size_t maxSize = (available_for_allocation() ? max_size() : 0);
+
+ return MemoryUsage(initial_size(), used, committed, maxSize);
+}
diff --git a/src/share/vm/services/memoryPool.hpp b/src/share/vm/services/memoryPool.hpp
new file mode 100644
index 000000000..953890c05
--- /dev/null
+++ b/src/share/vm/services/memoryPool.hpp
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2003-2004 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// A memory pool represents the memory area that the VM manages.
+// The Java virtual machine has at least one memory pool
+// and it may create or remove memory pools during execution.
+// A memory pool can belong to the heap or the non-heap memory.
+// A Java virtual machine may also have memory pools belonging to
+// both heap and non-heap memory.
+
+// Forward declaration
+class MemoryManager;
+class SensorInfo;
+class Generation;
+class DefNewGeneration;
+class PSPermGen;
+class PermGen;
+class ThresholdSupport;
+
+class MemoryPool : public CHeapObj {
+ friend class MemoryManager;
+ public:
+ enum PoolType {
+ Heap = 1,
+ NonHeap = 2
+ };
+
+ private:
+ enum {
+ max_num_managers = 5
+ };
+
+ // We could make some of the following as performance counters
+ // for external monitoring.
+ const char* _name;
+ PoolType _type;
+ size_t _initial_size;
+ size_t _max_size;
+ bool _available_for_allocation; // Default is true
+ MemoryManager* _managers[max_num_managers];
+ int _num_managers;
+ MemoryUsage _peak_usage; // Peak memory usage
+ MemoryUsage _after_gc_usage; // After GC memory usage
+
+ ThresholdSupport* _usage_threshold;
+ ThresholdSupport* _gc_usage_threshold;
+
+ SensorInfo* _usage_sensor;
+ SensorInfo* _gc_usage_sensor;
+
+ volatile instanceOop _memory_pool_obj;
+
+ void add_manager(MemoryManager* mgr);
+
+ public:
+ MemoryPool(const char* name,
+ PoolType type,
+ size_t init_size,
+ size_t max_size,
+ bool support_usage_threshold,
+ bool support_gc_threshold);
+
+ const char* name() { return _name; }
+ bool is_heap() { return _type == Heap; }
+ bool is_non_heap() { return _type == NonHeap; }
+ size_t initial_size() const { return _initial_size; }
+ int num_memory_managers() const { return _num_managers; }
+ // max size could be changed
+ virtual size_t max_size() const { return _max_size; }
+
+ bool is_pool(instanceHandle pool) { return (pool() == _memory_pool_obj); }
+
+ bool available_for_allocation() { return _available_for_allocation; }
+ bool set_available_for_allocation(bool value) {
+ bool prev = _available_for_allocation;
+ _available_for_allocation = value;
+ return prev;
+ }
+
+ MemoryManager* get_memory_manager(int index) {
+ assert(index >= 0 && index < _num_managers, "Invalid index");
+ return _managers[index];
+ }
+
+ // Records current memory usage if it's a peak usage
+ void record_peak_memory_usage();
+
+ MemoryUsage get_peak_memory_usage() {
+ // check current memory usage first and then return peak usage
+ record_peak_memory_usage();
+ return _peak_usage;
+ }
+ void reset_peak_memory_usage() {
+ _peak_usage = get_memory_usage();
+ }
+
+ ThresholdSupport* usage_threshold() { return _usage_threshold; }
+ ThresholdSupport* gc_usage_threshold() { return _gc_usage_threshold; }
+
+ SensorInfo* usage_sensor() { return _usage_sensor; }
+ SensorInfo* gc_usage_sensor() { return _gc_usage_sensor; }
+
+ void set_usage_sensor_obj(instanceHandle s);
+ void set_gc_usage_sensor_obj(instanceHandle s);
+ void set_last_collection_usage(MemoryUsage u) { _after_gc_usage = u; }
+
+ virtual instanceOop get_memory_pool_instance(TRAPS);
+ virtual MemoryUsage get_memory_usage() = 0;
+ virtual size_t used_in_bytes() = 0;
+ virtual bool is_collected_pool() { return false; }
+ virtual MemoryUsage get_last_collection_usage() { return _after_gc_usage; }
+
+ // GC support
+ void oops_do(OopClosure* f);
+};
+
+class CollectedMemoryPool : public MemoryPool {
+public:
+ CollectedMemoryPool(const char* name, PoolType type, size_t init_size, size_t max_size, bool support_usage_threshold) :
+ MemoryPool(name, type, init_size, max_size, support_usage_threshold, true) {};
+ bool is_collected_pool() { return true; }
+};
+
+class ContiguousSpacePool : public CollectedMemoryPool {
+private:
+ ContiguousSpace* _space;
+
+public:
+ ContiguousSpacePool(ContiguousSpace* space, const char* name, PoolType type, size_t max_size, bool support_usage_threshold);
+
+ ContiguousSpace* space() { return _space; }
+ MemoryUsage get_memory_usage();
+ size_t used_in_bytes() { return space()->used(); }
+};
+
+class SurvivorContiguousSpacePool : public CollectedMemoryPool {
+private:
+ DefNewGeneration* _gen;
+
+public:
+ SurvivorContiguousSpacePool(DefNewGeneration* gen,
+ const char* name,
+ PoolType type,
+ size_t max_size,
+ bool support_usage_threshold);
+
+ MemoryUsage get_memory_usage();
+
+ size_t used_in_bytes() {
+ return _gen->from()->used();
+ }
+ size_t committed_in_bytes() {
+ return _gen->from()->capacity();
+ }
+};
+
+#ifndef SERIALGC
+class CompactibleFreeListSpacePool : public CollectedMemoryPool {
+private:
+ CompactibleFreeListSpace* _space;
+public:
+ CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
+ const char* name,
+ PoolType type,
+ size_t max_size,
+ bool support_usage_threshold);
+
+ MemoryUsage get_memory_usage();
+ size_t used_in_bytes() { return _space->used(); }
+};
+#endif // SERIALGC
+
+
+class GenerationPool : public CollectedMemoryPool {
+private:
+ Generation* _gen;
+public:
+ GenerationPool(Generation* gen, const char* name, PoolType type, bool support_usage_threshold);
+
+ MemoryUsage get_memory_usage();
+ size_t used_in_bytes() { return _gen->used(); }
+};
+
+class CodeHeapPool: public MemoryPool {
+private:
+ CodeHeap* _codeHeap;
+public:
+ CodeHeapPool(CodeHeap* codeHeap, const char* name, bool support_usage_threshold);
+ MemoryUsage get_memory_usage();
+ size_t used_in_bytes() { return _codeHeap->allocated_capacity(); }
+};
diff --git a/src/share/vm/services/memoryService.cpp b/src/share/vm/services/memoryService.cpp
new file mode 100644
index 000000000..176f11e89
--- /dev/null
+++ b/src/share/vm/services/memoryService.cpp
@@ -0,0 +1,548 @@
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_memoryService.cpp.incl"
+
+GrowableArray<MemoryPool*>* MemoryService::_pools_list =
+ new (ResourceObj::C_HEAP) GrowableArray<MemoryPool*>(init_pools_list_size, true);
+GrowableArray<MemoryManager*>* MemoryService::_managers_list =
+ new (ResourceObj::C_HEAP) GrowableArray<MemoryManager*>(init_managers_list_size, true);
+
+GCMemoryManager* MemoryService::_minor_gc_manager = NULL;
+GCMemoryManager* MemoryService::_major_gc_manager = NULL;
+MemoryPool* MemoryService::_code_heap_pool = NULL;
+
+class GcThreadCountClosure: public ThreadClosure {
+ private:
+ int _count;
+ public:
+ GcThreadCountClosure() : _count(0) {};
+ void do_thread(Thread* thread);
+ int count() { return _count; }
+};
+
+void GcThreadCountClosure::do_thread(Thread* thread) {
+ _count++;
+}
+
+void MemoryService::set_universe_heap(CollectedHeap* heap) {
+ CollectedHeap::Name kind = heap->kind();
+ switch (kind) {
+ case CollectedHeap::GenCollectedHeap : {
+ add_gen_collected_heap_info(GenCollectedHeap::heap());
+ break;
+ }
+#ifndef SERIALGC
+ case CollectedHeap::ParallelScavengeHeap : {
+ add_parallel_scavenge_heap_info(ParallelScavengeHeap::heap());
+ break;
+ }
+#endif // SERIALGC
+ default: {
+ guarantee(false, "Not recognized kind of heap");
+ }
+ }
+
+ // set the GC thread count
+ GcThreadCountClosure gctcc;
+ heap->gc_threads_do(&gctcc);
+ int count = gctcc.count();
+ if (count > 0) {
+ _minor_gc_manager->set_num_gc_threads(count);
+ _major_gc_manager->set_num_gc_threads(count);
+ }
+
+ // All memory pools and memory managers are initialized.
+ //
+ _minor_gc_manager->initialize_gc_stat_info();
+ _major_gc_manager->initialize_gc_stat_info();
+}
+
+// Add memory pools for GenCollectedHeap
+// This function currently only supports two generations collected heap.
+// The collector for GenCollectedHeap will have two memory managers.
+void MemoryService::add_gen_collected_heap_info(GenCollectedHeap* heap) {
+ CollectorPolicy* policy = heap->collector_policy();
+
+ assert(policy->is_two_generation_policy(), "Only support two generations");
+ guarantee(heap->n_gens() == 2, "Only support two-generation heap");
+
+ TwoGenerationCollectorPolicy* two_gen_policy = policy->as_two_generation_policy();
+ if (two_gen_policy != NULL) {
+ GenerationSpec** specs = two_gen_policy->generations();
+ Generation::Name kind = specs[0]->name();
+ switch (kind) {
+ case Generation::DefNew:
+ _minor_gc_manager = MemoryManager::get_copy_memory_manager();
+ break;
+#ifndef SERIALGC
+ case Generation::ParNew:
+ case Generation::ASParNew:
+ _minor_gc_manager = MemoryManager::get_parnew_memory_manager();
+ break;
+#endif // SERIALGC
+ default:
+ guarantee(false, "Unrecognized generation spec");
+ break;
+ }
+ if (policy->is_mark_sweep_policy()) {
+ _major_gc_manager = MemoryManager::get_msc_memory_manager();
+#ifndef SERIALGC
+ } else if (policy->is_concurrent_mark_sweep_policy()) {
+ _major_gc_manager = MemoryManager::get_cms_memory_manager();
+#endif // SERIALGC
+ } else {
+ guarantee(false, "Unknown two-gen policy");
+ }
+ } else {
+ guarantee(false, "Non two-gen policy");
+ }
+ _managers_list->append(_minor_gc_manager);
+ _managers_list->append(_major_gc_manager);
+
+ add_generation_memory_pool(heap->get_gen(minor), _major_gc_manager, _minor_gc_manager);
+ add_generation_memory_pool(heap->get_gen(major), _major_gc_manager);
+
+ PermGen::Name name = policy->permanent_generation()->name();
+ switch (name) {
+ case PermGen::MarkSweepCompact: {
+ CompactingPermGenGen* perm_gen = (CompactingPermGenGen*) heap->perm_gen();
+ add_compact_perm_gen_memory_pool(perm_gen, _major_gc_manager);
+ break;
+ }
+#ifndef SERIALGC
+ case PermGen::ConcurrentMarkSweep: {
+ CMSPermGenGen* cms_gen = (CMSPermGenGen*) heap->perm_gen();
+ add_cms_perm_gen_memory_pool(cms_gen, _major_gc_manager);
+ break;
+ }
+#endif // SERIALGC
+ default:
+ guarantee(false, "Unrecognized perm generation");
+ break;
+ }
+}
+
+#ifndef SERIALGC
+// Add memory pools for ParallelScavengeHeap
+// This function currently only supports two generations collected heap.
+// The collector for ParallelScavengeHeap will have two memory managers.
+void MemoryService::add_parallel_scavenge_heap_info(ParallelScavengeHeap* heap) {
+ // Two managers to keep statistics about _minor_gc_manager and _major_gc_manager GC.
+ _minor_gc_manager = MemoryManager::get_psScavenge_memory_manager();
+ _major_gc_manager = MemoryManager::get_psMarkSweep_memory_manager();
+ _managers_list->append(_minor_gc_manager);
+ _managers_list->append(_major_gc_manager);
+
+ add_psYoung_memory_pool(heap->young_gen(), _major_gc_manager, _minor_gc_manager);
+ add_psOld_memory_pool(heap->old_gen(), _major_gc_manager);
+ add_psPerm_memory_pool(heap->perm_gen(), _major_gc_manager);
+}
+#endif // SERIALGC
+
+MemoryPool* MemoryService::add_gen(Generation* gen,
+ const char* name,
+ bool is_heap,
+ bool support_usage_threshold) {
+
+ MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
+ GenerationPool* pool = new GenerationPool(gen, name, type, support_usage_threshold);
+ _pools_list->append(pool);
+ return (MemoryPool*) pool;
+}
+
+MemoryPool* MemoryService::add_space(ContiguousSpace* space,
+ const char* name,
+ bool is_heap,
+ size_t max_size,
+ bool support_usage_threshold) {
+ MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
+ ContiguousSpacePool* pool = new ContiguousSpacePool(space, name, type, max_size, support_usage_threshold);
+
+ _pools_list->append(pool);
+ return (MemoryPool*) pool;
+}
+
+MemoryPool* MemoryService::add_survivor_spaces(DefNewGeneration* gen,
+ const char* name,
+ bool is_heap,
+ size_t max_size,
+ bool support_usage_threshold) {
+ MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
+ SurvivorContiguousSpacePool* pool = new SurvivorContiguousSpacePool(gen, name, type, max_size, support_usage_threshold);
+
+ _pools_list->append(pool);
+ return (MemoryPool*) pool;
+}
+
+#ifndef SERIALGC
+MemoryPool* MemoryService::add_cms_space(CompactibleFreeListSpace* space,
+ const char* name,
+ bool is_heap,
+ size_t max_size,
+ bool support_usage_threshold) {
+ MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
+ CompactibleFreeListSpacePool* pool = new CompactibleFreeListSpacePool(space, name, type, max_size, support_usage_threshold);
+ _pools_list->append(pool);
+ return (MemoryPool*) pool;
+}
+#endif // SERIALGC
+
+// Add memory pool(s) for one generation
+void MemoryService::add_generation_memory_pool(Generation* gen,
+ MemoryManager* major_mgr,
+ MemoryManager* minor_mgr) {
+ Generation::Name kind = gen->kind();
+ int index = _pools_list->length();
+
+ switch (kind) {
+ case Generation::DefNew: {
+ assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
+ DefNewGeneration* young_gen = (DefNewGeneration*) gen;
+ // Add a memory pool for each space and young gen doesn't
+ // support low memory detection as it is expected to get filled up.
+ MemoryPool* eden = add_space(young_gen->eden(),
+ "Eden Space",
+ true, /* is_heap */
+ young_gen->max_eden_size(),
+ false /* support_usage_threshold */);
+ MemoryPool* survivor = add_survivor_spaces(young_gen,
+ "Survivor Space",
+ true, /* is_heap */
+ young_gen->max_survivor_size(),
+ false /* support_usage_threshold */);
+ break;
+ }
+
+#ifndef SERIALGC
+ case Generation::ParNew:
+ case Generation::ASParNew:
+ {
+ assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
+ // Add a memory pool for each space and young gen doesn't
+ // support low memory detection as it is expected to get filled up.
+ ParNewGeneration* parnew_gen = (ParNewGeneration*) gen;
+ MemoryPool* eden = add_space(parnew_gen->eden(),
+ "Par Eden Space",
+ true /* is_heap */,
+ parnew_gen->max_eden_size(),
+ false /* support_usage_threshold */);
+ MemoryPool* survivor = add_survivor_spaces(parnew_gen,
+ "Par Survivor Space",
+ true, /* is_heap */
+ parnew_gen->max_survivor_size(),
+ false /* support_usage_threshold */);
+
+ break;
+ }
+#endif // SERIALGC
+
+ case Generation::MarkSweepCompact: {
+ assert(major_mgr != NULL && minor_mgr == NULL, "Should have only one manager");
+ add_gen(gen,
+ "Tenured Gen",
+ true, /* is_heap */
+ true /* support_usage_threshold */);
+ break;
+ }
+
+#ifndef SERIALGC
+ case Generation::ConcurrentMarkSweep:
+ case Generation::ASConcurrentMarkSweep:
+ {
+ assert(major_mgr != NULL && minor_mgr == NULL, "Should have only one manager");
+ ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*) gen;
+ MemoryPool* pool = add_cms_space(cms->cmsSpace(),
+ "CMS Old Gen",
+ true, /* is_heap */
+ cms->reserved().byte_size(),
+ true /* support_usage_threshold */);
+ break;
+ }
+#endif // SERIALGC
+
+ default:
+ assert(false, "should not reach here");
+ // no memory pool added for others
+ break;
+ }
+
+ assert(major_mgr != NULL, "Should have at least one manager");
+ // Link managers and the memory pools together
+ for (int i = index; i < _pools_list->length(); i++) {
+ MemoryPool* pool = _pools_list->at(i);
+ major_mgr->add_pool(pool);
+ if (minor_mgr != NULL) {
+ minor_mgr->add_pool(pool);
+ }
+ }
+}
+
+void MemoryService::add_compact_perm_gen_memory_pool(CompactingPermGenGen* perm_gen,
+ MemoryManager* mgr) {
+ PermanentGenerationSpec* spec = perm_gen->spec();
+ size_t max_size = spec->max_size() - spec->read_only_size() - spec->read_write_size();
+ MemoryPool* pool = add_space(perm_gen->unshared_space(),
+ "Perm Gen",
+ false, /* is_heap */
+ max_size,
+ true /* support_usage_threshold */);
+ mgr->add_pool(pool);
+ if (UseSharedSpaces) {
+ pool = add_space(perm_gen->ro_space(),
+ "Perm Gen [shared-ro]",
+ false, /* is_heap */
+ spec->read_only_size(),
+ true /* support_usage_threshold */);
+ mgr->add_pool(pool);
+
+ pool = add_space(perm_gen->rw_space(),
+ "Perm Gen [shared-rw]",
+ false, /* is_heap */
+ spec->read_write_size(),
+ true /* support_usage_threshold */);
+ mgr->add_pool(pool);
+ }
+}
+
+#ifndef SERIALGC
+void MemoryService::add_cms_perm_gen_memory_pool(CMSPermGenGen* cms_gen,
+ MemoryManager* mgr) {
+
+ MemoryPool* pool = add_cms_space(cms_gen->cmsSpace(),
+ "CMS Perm Gen",
+ false, /* is_heap */
+ cms_gen->reserved().byte_size(),
+ true /* support_usage_threshold */);
+ mgr->add_pool(pool);
+}
+
+void MemoryService::add_psYoung_memory_pool(PSYoungGen* gen, MemoryManager* major_mgr, MemoryManager* minor_mgr) {
+ assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
+
+ // Add a memory pool for each space and young gen doesn't
+ // support low memory detection as it is expected to get filled up.
+ EdenMutableSpacePool* eden = new EdenMutableSpacePool(gen,
+ gen->eden_space(),
+ "PS Eden Space",
+ MemoryPool::Heap,
+ false /* support_usage_threshold */);
+
+ SurvivorMutableSpacePool* survivor = new SurvivorMutableSpacePool(gen,
+ "PS Survivor Space",
+ MemoryPool::Heap,
+ false /* support_usage_threshold */);
+
+ major_mgr->add_pool(eden);
+ major_mgr->add_pool(survivor);
+ minor_mgr->add_pool(eden);
+ minor_mgr->add_pool(survivor);
+ _pools_list->append(eden);
+ _pools_list->append(survivor);
+}
+
+void MemoryService::add_psOld_memory_pool(PSOldGen* gen, MemoryManager* mgr) {
+ PSGenerationPool* old_gen = new PSGenerationPool(gen,
+ "PS Old Gen",
+ MemoryPool::Heap,
+ true /* support_usage_threshold */);
+ mgr->add_pool(old_gen);
+ _pools_list->append(old_gen);
+}
+
+void MemoryService::add_psPerm_memory_pool(PSPermGen* gen, MemoryManager* mgr) {
+ PSGenerationPool* perm_gen = new PSGenerationPool(gen,
+ "PS Perm Gen",
+ MemoryPool::NonHeap,
+ true /* support_usage_threshold */);
+ mgr->add_pool(perm_gen);
+ _pools_list->append(perm_gen);
+}
+#endif // SERIALGC
+
+void MemoryService::add_code_heap_memory_pool(CodeHeap* heap) {
+ _code_heap_pool = new CodeHeapPool(heap,
+ "Code Cache",
+ true /* support_usage_threshold */);
+ MemoryManager* mgr = MemoryManager::get_code_cache_memory_manager();
+ mgr->add_pool(_code_heap_pool);
+
+ _pools_list->append(_code_heap_pool);
+ _managers_list->append(mgr);
+}
+
+MemoryManager* MemoryService::get_memory_manager(instanceHandle mh) {
+ for (int i = 0; i < _managers_list->length(); i++) {
+ MemoryManager* mgr = _managers_list->at(i);
+ if (mgr->is_manager(mh)) {
+ return mgr;
+ }
+ }
+ return NULL;
+}
+
+MemoryPool* MemoryService::get_memory_pool(instanceHandle ph) {
+ for (int i = 0; i < _pools_list->length(); i++) {
+ MemoryPool* pool = _pools_list->at(i);
+ if (pool->is_pool(ph)) {
+ return pool;
+ }
+ }
+ return NULL;
+}
+
+void MemoryService::track_memory_usage() {
+ // Track the peak memory usage
+ for (int i = 0; i < _pools_list->length(); i++) {
+ MemoryPool* pool = _pools_list->at(i);
+ pool->record_peak_memory_usage();
+ }
+
+ // Detect low memory
+ LowMemoryDetector::detect_low_memory();
+}
+
+void MemoryService::track_memory_pool_usage(MemoryPool* pool) {
+ // Track the peak memory usage
+ pool->record_peak_memory_usage();
+
+ // Detect low memory
+ if (LowMemoryDetector::is_enabled(pool)) {
+ LowMemoryDetector::detect_low_memory(pool);
+ }
+}
+
+void MemoryService::gc_begin(bool fullGC) {
+ GCMemoryManager* mgr;
+ if (fullGC) {
+ mgr = _major_gc_manager;
+ } else {
+ mgr = _minor_gc_manager;
+ }
+ assert(mgr->is_gc_memory_manager(), "Sanity check");
+ mgr->gc_begin();
+
+ // Track the peak memory usage when GC begins
+ for (int i = 0; i < _pools_list->length(); i++) {
+ MemoryPool* pool = _pools_list->at(i);
+ pool->record_peak_memory_usage();
+ }
+}
+
+void MemoryService::gc_end(bool fullGC) {
+ GCMemoryManager* mgr;
+ if (fullGC) {
+ mgr = (GCMemoryManager*) _major_gc_manager;
+ } else {
+ mgr = (GCMemoryManager*) _minor_gc_manager;
+ }
+ assert(mgr->is_gc_memory_manager(), "Sanity check");
+
+ // register the GC end statistics and memory usage
+ mgr->gc_end();
+}
+
+void MemoryService::oops_do(OopClosure* f) {
+ int i;
+
+ for (i = 0; i < _pools_list->length(); i++) {
+ MemoryPool* pool = _pools_list->at(i);
+ pool->oops_do(f);
+ }
+ for (i = 0; i < _managers_list->length(); i++) {
+ MemoryManager* mgr = _managers_list->at(i);
+ mgr->oops_do(f);
+ }
+}
+
+bool MemoryService::set_verbose(bool verbose) {
+ MutexLocker m(Management_lock);
+ // verbose will be set to the previous value
+ bool succeed = CommandLineFlags::boolAtPut((char*)"PrintGC", &verbose, MANAGEMENT);
+ assert(succeed, "Setting PrintGC flag fails");
+ ClassLoadingService::reset_trace_class_unloading();
+
+ return verbose;
+}
+
+Handle MemoryService::create_MemoryUsage_obj(MemoryUsage usage, TRAPS) {
+ klassOop k = Management::java_lang_management_MemoryUsage_klass(CHECK_NH);
+ instanceKlassHandle ik(THREAD, k);
+
+ instanceHandle obj = ik->allocate_instance_handle(CHECK_NH);
+
+ JavaValue result(T_VOID);
+ JavaCallArguments args(10);
+ args.push_oop(obj); // receiver
+ args.push_long(usage.init_size_as_jlong()); // Argument 1
+ args.push_long(usage.used_as_jlong()); // Argument 2
+ args.push_long(usage.committed_as_jlong()); // Argument 3
+ args.push_long(usage.max_size_as_jlong()); // Argument 4
+
+ JavaCalls::call_special(&result,
+ ik,
+ vmSymbolHandles::object_initializer_name(),
+ vmSymbolHandles::long_long_long_long_void_signature(),
+ &args,
+ CHECK_NH);
+ return obj;
+}
+//
+// GC manager type depends on the type of Generation. Depending the space
+// availablity and vm option the gc uses major gc manager or minor gc
+// manager or both. The type of gc manager depends on the generation kind.
+// For DefNew, ParNew and ASParNew generation doing scavange gc uses minor
+// gc manager (so _fullGC is set to false ) and for other generation kind
+// DOing mark-sweep-compact uses major gc manager (so _fullGC is set
+// to true).
+TraceMemoryManagerStats::TraceMemoryManagerStats(Generation::Name kind) {
+ switch (kind) {
+ case Generation::DefNew:
+#ifndef SERIALGC
+ case Generation::ParNew:
+ case Generation::ASParNew:
+#endif // SERIALGC
+ _fullGC=false;
+ break;
+ case Generation::MarkSweepCompact:
+#ifndef SERIALGC
+ case Generation::ConcurrentMarkSweep:
+ case Generation::ASConcurrentMarkSweep:
+#endif // SERIALGC
+ _fullGC=true;
+ break;
+ default:
+ assert(false, "Unrecognized gc generation kind.");
+ }
+ MemoryService::gc_begin(_fullGC);
+}
+TraceMemoryManagerStats::TraceMemoryManagerStats(bool fullGC) {
+ _fullGC = fullGC;
+ MemoryService::gc_begin(_fullGC);
+}
+
+TraceMemoryManagerStats::~TraceMemoryManagerStats() {
+ MemoryService::gc_end(_fullGC);
+}
diff --git a/src/share/vm/services/memoryService.hpp b/src/share/vm/services/memoryService.hpp
new file mode 100644
index 000000000..52b76a729
--- /dev/null
+++ b/src/share/vm/services/memoryService.hpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Forward declaration
+class MemoryPool;
+class MemoryManager;
+class GCMemoryManager;
+class CollectedHeap;
+class Generation;
+class DefNewGeneration;
+class PSYoungGen;
+class PSOldGen;
+class PSPermGen;
+class CodeHeap;
+class ContiguousSpace;
+class CompactibleFreeListSpace;
+class PermanentGenerationSpec;
+class GenCollectedHeap;
+class ParallelScavengeHeap;
+class CompactingPermGenGen;
+class CMSPermGenGen;
+
+// VM Monitoring and Management Support
+
+class MemoryService : public AllStatic {
+private:
+ enum {
+ init_pools_list_size = 10,
+ init_managers_list_size = 5
+ };
+
+ // index for minor and major generations
+ enum {
+ minor = 0,
+ major = 1,
+ n_gens = 2
+ };
+
+ static GrowableArray<MemoryPool*>* _pools_list;
+ static GrowableArray<MemoryManager*>* _managers_list;
+
+ // memory managers for minor and major GC statistics
+ static GCMemoryManager* _major_gc_manager;
+ static GCMemoryManager* _minor_gc_manager;
+
+ // Code heap memory pool
+ static MemoryPool* _code_heap_pool;
+
+ static void add_generation_memory_pool(Generation* gen,
+ MemoryManager* major_mgr,
+ MemoryManager* minor_mgr);
+ static void add_generation_memory_pool(Generation* gen,
+ MemoryManager* major_mgr) {
+ add_generation_memory_pool(gen, major_mgr, NULL);
+ }
+
+ static void add_compact_perm_gen_memory_pool(CompactingPermGenGen* perm_gen,
+ MemoryManager* mgr);
+ static void add_cms_perm_gen_memory_pool(CMSPermGenGen* perm_gen,
+ MemoryManager* mgr);
+
+ static void add_psYoung_memory_pool(PSYoungGen* gen,
+ MemoryManager* major_mgr,
+ MemoryManager* minor_mgr);
+ static void add_psOld_memory_pool(PSOldGen* gen,
+ MemoryManager* mgr);
+ static void add_psPerm_memory_pool(PSPermGen* perm,
+ MemoryManager* mgr);
+
+
+ static MemoryPool* add_space(ContiguousSpace* space,
+ const char* name,
+ bool is_heap,
+ size_t max_size,
+ bool support_usage_threshold);
+ static MemoryPool* add_survivor_spaces(DefNewGeneration* gen,
+ const char* name,
+ bool is_heap,
+ size_t max_size,
+ bool support_usage_threshold);
+ static MemoryPool* add_gen(Generation* gen,
+ const char* name,
+ bool is_heap,
+ bool support_usage_threshold);
+ static MemoryPool* add_cms_space(CompactibleFreeListSpace* space,
+ const char* name,
+ bool is_heap,
+ size_t max_size,
+ bool support_usage_threshold);
+
+ static void add_gen_collected_heap_info(GenCollectedHeap* heap);
+ static void add_parallel_scavenge_heap_info(ParallelScavengeHeap* heap);
+
+public:
+ static void set_universe_heap(CollectedHeap* heap);
+ static void add_code_heap_memory_pool(CodeHeap* heap);
+
+ static MemoryPool* get_memory_pool(instanceHandle pool);
+ static MemoryManager* get_memory_manager(instanceHandle mgr);
+
+ static const int num_memory_pools() {
+ return _pools_list->length();
+ }
+ static const int num_memory_managers() {
+ return _managers_list->length();
+ }
+
+ static MemoryPool* get_memory_pool(int index) {
+ return _pools_list->at(index);
+ }
+
+ static MemoryManager* get_memory_manager(int index) {
+ return _managers_list->at(index);
+ }
+
+ static void track_memory_usage();
+ static void track_code_cache_memory_usage() {
+ track_memory_pool_usage(_code_heap_pool);
+ }
+ static void track_memory_pool_usage(MemoryPool* pool);
+
+ static void gc_begin(bool fullGC);
+ static void gc_end(bool fullGC);
+
+ static void oops_do(OopClosure* f);
+
+ static bool get_verbose() { return PrintGC; }
+ static bool set_verbose(bool verbose);
+
+ // Create an instance of java/lang/management/MemoryUsage
+ static Handle create_MemoryUsage_obj(MemoryUsage usage, TRAPS);
+};
+
+class TraceMemoryManagerStats : public StackObj {
+private:
+ bool _fullGC;
+public:
+ TraceMemoryManagerStats(bool fullGC);
+ TraceMemoryManagerStats(Generation::Name kind);
+ ~TraceMemoryManagerStats();
+};
diff --git a/src/share/vm/services/memoryUsage.hpp b/src/share/vm/services/memoryUsage.hpp
new file mode 100644
index 000000000..7276d9ca5
--- /dev/null
+++ b/src/share/vm/services/memoryUsage.hpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// A memory usage contains the following attributes about memory usage:
+// initSize - represents the initial amount of memory (in bytes) that
+// the Java virtual machine requests from the operating system
+// for memory management. The Java virtual machine may request
+// additional memory from the operating system later when appropriate.
+// Its value may be undefined.
+// used - represents the amount of memory currently used (in bytes).
+// committed - represents the amount of memory (in bytes) that is
+// guaranteed to be available for use by the Java virtual machine.
+// The amount of committed memory may change over time (increase
+// or decrease). It is guaranteed to be greater than or equal
+// to initSize.
+// maxSize - represents the maximum amount of memory (in bytes)
+// that can be used for memory management. The maximum amount of
+// memory for memory management could be less than the amount of
+// committed memory. Its value may be undefined.
+
+class MemoryUsage VALUE_OBJ_CLASS_SPEC {
+private:
+ size_t _initSize;
+ size_t _used;
+ size_t _committed;
+ size_t _maxSize;
+
+public:
+ // Constructors
+ MemoryUsage(size_t i, size_t u, size_t c, size_t m) :
+ _initSize(i), _used(u), _committed(c), _maxSize(m) {};
+ MemoryUsage() :
+ _initSize(0), _used(0), _committed(0), _maxSize(0) {};
+
+ size_t init_size() const { return _initSize; }
+ size_t used() const { return _used; }
+ size_t committed() const { return _committed; }
+ size_t max_size() const { return _maxSize; }
+
+ inline static jlong convert_to_jlong(size_t val) {
+ // In the 64-bit vm, a size_t can overflow a jlong (which is signed).
+ jlong ret;
+ if (val == (size_t)-1) {
+ ret = -1L;
+ } else {
+ NOT_LP64(ret = val;)
+ LP64_ONLY(ret = MIN2(val, (size_t)max_jlong);)
+ }
+ return ret;
+ }
+
+ jlong init_size_as_jlong() const { return convert_to_jlong(_initSize); }
+ jlong used_as_jlong() const { return convert_to_jlong(_used); }
+ jlong committed_as_jlong() const { return convert_to_jlong(_committed); }
+ jlong max_size_as_jlong() const { return convert_to_jlong(_maxSize); }
+};
diff --git a/src/share/vm/services/psMemoryPool.cpp b/src/share/vm/services/psMemoryPool.cpp
new file mode 100644
index 000000000..3541d00a1
--- /dev/null
+++ b/src/share/vm/services/psMemoryPool.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_psMemoryPool.cpp.incl"
+
+PSGenerationPool::PSGenerationPool(PSOldGen* gen,
+ const char* name,
+ PoolType type,
+ bool support_usage_threshold) :
+ CollectedMemoryPool(name, type, gen->capacity_in_bytes(),
+ gen->reserved().byte_size(), support_usage_threshold), _gen(gen) {
+}
+
+PSGenerationPool::PSGenerationPool(PSPermGen* gen,
+ const char* name,
+ PoolType type,
+ bool support_usage_threshold) :
+ CollectedMemoryPool(name, type, gen->capacity_in_bytes(),
+ gen->reserved().byte_size(), support_usage_threshold), _gen(gen) {
+}
+
+MemoryUsage PSGenerationPool::get_memory_usage() {
+ size_t maxSize = (available_for_allocation() ? max_size() : 0);
+ size_t used = used_in_bytes();
+ size_t committed = _gen->capacity_in_bytes();
+
+ return MemoryUsage(initial_size(), used, committed, maxSize);
+}
+
+// The max size of EdenMutableSpacePool =
+// max size of the PSYoungGen - capacity of two survivor spaces
+//
+// Max size of PS eden space is changing due to ergonomic.
+// PSYoungGen, PSOldGen, Eden, Survivor spaces are all resizable.
+//
+EdenMutableSpacePool::EdenMutableSpacePool(PSYoungGen* gen,
+ MutableSpace* space,
+ const char* name,
+ PoolType type,
+ bool support_usage_threshold) :
+ CollectedMemoryPool(name, type, space->capacity_in_bytes(),
+ (gen->max_size() - gen->from_space()->capacity_in_bytes() - gen->to_space()->capacity_in_bytes()),
+ support_usage_threshold),
+ _gen(gen), _space(space) {
+}
+
+MemoryUsage EdenMutableSpacePool::get_memory_usage() {
+ size_t maxSize = (available_for_allocation() ? max_size() : 0);
+ size_t used = used_in_bytes();
+ size_t committed = _space->capacity_in_bytes();
+
+ return MemoryUsage(initial_size(), used, committed, maxSize);
+}
+
+// The max size of SurvivorMutableSpacePool =
+// current capacity of the from-space
+//
+// PS from and to survivor spaces could have different sizes.
+//
+SurvivorMutableSpacePool::SurvivorMutableSpacePool(PSYoungGen* gen,
+ const char* name,
+ PoolType type,
+ bool support_usage_threshold) :
+ CollectedMemoryPool(name, type, gen->from_space()->capacity_in_bytes(),
+ gen->from_space()->capacity_in_bytes(),
+ support_usage_threshold), _gen(gen) {
+}
+
+MemoryUsage SurvivorMutableSpacePool::get_memory_usage() {
+ size_t maxSize = (available_for_allocation() ? max_size() : 0);
+ size_t used = used_in_bytes();
+ size_t committed = committed_in_bytes();
+ return MemoryUsage(initial_size(), used, committed, maxSize);
+}
diff --git a/src/share/vm/services/psMemoryPool.hpp b/src/share/vm/services/psMemoryPool.hpp
new file mode 100644
index 000000000..5188a8ff5
--- /dev/null
+++ b/src/share/vm/services/psMemoryPool.hpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class PSGenerationPool : public CollectedMemoryPool {
+private:
+ PSOldGen* _gen;
+
+public:
+ PSGenerationPool(PSOldGen* pool, const char* name, PoolType type, bool support_usage_threshold);
+ PSGenerationPool(PSPermGen* pool, const char* name, PoolType type, bool support_usage_threshold);
+
+ MemoryUsage get_memory_usage();
+ size_t used_in_bytes() { return _gen->used_in_bytes(); }
+ size_t max_size() const { return _gen->reserved().byte_size(); }
+};
+
+class EdenMutableSpacePool : public CollectedMemoryPool {
+private:
+ PSYoungGen* _gen;
+ MutableSpace* _space;
+
+public:
+ EdenMutableSpacePool(PSYoungGen* gen,
+ MutableSpace* space,
+ const char* name,
+ PoolType type,
+ bool support_usage_threshold);
+
+ MutableSpace* space() { return _space; }
+ MemoryUsage get_memory_usage();
+ size_t used_in_bytes() { return space()->used_in_bytes(); }
+ size_t max_size() const {
+ // Eden's max_size = max_size of Young Gen - the current committed size of survivor spaces
+ return _gen->max_size() - _gen->from_space()->capacity_in_bytes() - _gen->to_space()->capacity_in_bytes();
+ }
+};
+
+class SurvivorMutableSpacePool : public CollectedMemoryPool {
+private:
+ PSYoungGen* _gen;
+
+public:
+ SurvivorMutableSpacePool(PSYoungGen* gen,
+ const char* name,
+ PoolType type,
+ bool support_usage_threshold);
+
+ MemoryUsage get_memory_usage();
+
+ size_t used_in_bytes() {
+ return _gen->from_space()->used_in_bytes();
+ }
+ size_t committed_in_bytes() {
+ return _gen->from_space()->capacity_in_bytes();
+ }
+ size_t max_size() const {
+ // Return current committed size of the from-space
+ return _gen->from_space()->capacity_in_bytes();
+ }
+};
diff --git a/src/share/vm/services/runtimeService.cpp b/src/share/vm/services/runtimeService.cpp
new file mode 100644
index 000000000..16811a2aa
--- /dev/null
+++ b/src/share/vm/services/runtimeService.cpp
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_runtimeService.cpp.incl"
+
+HS_DTRACE_PROBE_DECL(hs_private, safepoint__begin);
+HS_DTRACE_PROBE_DECL(hs_private, safepoint__end);
+
+TimeStamp RuntimeService::_app_timer;
+TimeStamp RuntimeService::_safepoint_timer;
+PerfCounter* RuntimeService::_sync_time_ticks = NULL;
+PerfCounter* RuntimeService::_total_safepoints = NULL;
+PerfCounter* RuntimeService::_safepoint_time_ticks = NULL;
+PerfCounter* RuntimeService::_application_time_ticks = NULL;
+PerfCounter* RuntimeService::_thread_interrupt_signaled_count = NULL;
+PerfCounter* RuntimeService::_interrupted_before_count = NULL;
+PerfCounter* RuntimeService::_interrupted_during_count = NULL;
+
+void RuntimeService::init() {
+ // Make sure the VM version is initialized
+ Abstract_VM_Version::initialize();
+
+ if (UsePerfData) {
+ EXCEPTION_MARK;
+
+ _sync_time_ticks =
+ PerfDataManager::create_counter(SUN_RT, "safepointSyncTime",
+ PerfData::U_Ticks, CHECK);
+
+ _total_safepoints =
+ PerfDataManager::create_counter(SUN_RT, "safepoints",
+ PerfData::U_Events, CHECK);
+
+ _safepoint_time_ticks =
+ PerfDataManager::create_counter(SUN_RT, "safepointTime",
+ PerfData::U_Ticks, CHECK);
+
+ _application_time_ticks =
+ PerfDataManager::create_counter(SUN_RT, "applicationTime",
+ PerfData::U_Ticks, CHECK);
+
+
+ // create performance counters for jvm_version and its capabilities
+ PerfDataManager::create_constant(SUN_RT, "jvmVersion", PerfData::U_None,
+ (jlong) Abstract_VM_Version::jvm_version(), CHECK);
+
+ // I/O interruption related counters
+
+ // thread signaling via os::interrupt()
+
+ _thread_interrupt_signaled_count =
+ PerfDataManager::create_counter(SUN_RT,
+ "threadInterruptSignaled", PerfData::U_Events, CHECK);
+
+ // OS_INTRPT via "check before" in _INTERRUPTIBLE
+
+ _interrupted_before_count =
+ PerfDataManager::create_counter(SUN_RT, "interruptedBeforeIO",
+ PerfData::U_Events, CHECK);
+
+ // OS_INTRPT via "check during" in _INTERRUPTIBLE
+
+ _interrupted_during_count =
+ PerfDataManager::create_counter(SUN_RT, "interruptedDuringIO",
+ PerfData::U_Events, CHECK);
+
+ // The capabilities counter is a binary representation of the VM capabilities in string.
+ // This string respresentation simplifies the implementation of the client side
+ // to parse the value.
+ char capabilities[65];
+ size_t len = sizeof(capabilities);
+ memset((void*) capabilities, '0', len);
+ capabilities[len-1] = '\0';
+ capabilities[0] = AttachListener::is_attach_supported() ? '1' : '0';
+#ifdef KERNEL
+ capabilities[1] = '1';
+#endif // KERNEL
+ PerfDataManager::create_string_constant(SUN_RT, "jvmCapabilities",
+ capabilities, CHECK);
+ }
+}
+
+void RuntimeService::record_safepoint_begin() {
+ HS_DTRACE_PROBE(hs_private, safepoint__begin);
+ // update the time stamp to begin recording safepoint time
+ _safepoint_timer.update();
+ if (UsePerfData) {
+ _total_safepoints->inc();
+ if (_app_timer.is_updated()) {
+ _application_time_ticks->inc(_app_timer.ticks_since_update());
+ }
+ }
+}
+
+void RuntimeService::record_safepoint_synchronized() {
+ if (UsePerfData) {
+ _sync_time_ticks->inc(_safepoint_timer.ticks_since_update());
+ }
+}
+
+void RuntimeService::record_safepoint_end() {
+ HS_DTRACE_PROBE(hs_private, safepoint__end);
+ // update the time stamp to begin recording app time
+ _app_timer.update();
+ if (UsePerfData) {
+ _safepoint_time_ticks->inc(_safepoint_timer.ticks_since_update());
+ }
+}
+
+void RuntimeService::record_application_start() {
+ // update the time stamp to begin recording app time
+ _app_timer.update();
+}
+
+// Don't need to record application end because we currently
+// exit at a safepoint and record_safepoint_begin() handles updating
+// the application time counter at VM exit.
+
+jlong RuntimeService::safepoint_sync_time_ms() {
+ return UsePerfData ?
+ Management::ticks_to_ms(_sync_time_ticks->get_value()) : -1;
+}
+
+jlong RuntimeService::safepoint_count() {
+ return UsePerfData ?
+ _total_safepoints->get_value() : -1;
+}
+jlong RuntimeService::safepoint_time_ms() {
+ return UsePerfData ?
+ Management::ticks_to_ms(_safepoint_time_ticks->get_value()) : -1;
+}
+
+jlong RuntimeService::application_time_ms() {
+ return UsePerfData ?
+ Management::ticks_to_ms(_application_time_ticks->get_value()) : -1;
+}
+
+void RuntimeService::record_interrupted_before_count() {
+ if (UsePerfData) {
+ _interrupted_before_count->inc();
+ }
+}
+
+void RuntimeService::record_interrupted_during_count() {
+ if (UsePerfData) {
+ _interrupted_during_count->inc();
+ }
+}
+
+void RuntimeService::record_thread_interrupt_signaled_count() {
+ if (UsePerfData) {
+ _thread_interrupt_signaled_count->inc();
+ }
+}
diff --git a/src/share/vm/services/runtimeService.hpp b/src/share/vm/services/runtimeService.hpp
new file mode 100644
index 000000000..47de1c056
--- /dev/null
+++ b/src/share/vm/services/runtimeService.hpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class RuntimeService : public AllStatic {
+private:
+ static PerfCounter* _sync_time_ticks; // Accumulated time spent getting to safepoints
+ static PerfCounter* _total_safepoints;
+ static PerfCounter* _safepoint_time_ticks; // Accumulated time at safepoints
+ static PerfCounter* _application_time_ticks; // Accumulated time not at safepoints
+ static PerfCounter* _thread_interrupt_signaled_count;// os:interrupt thr_kill
+ static PerfCounter* _interrupted_before_count; // _INTERRUPTIBLE OS_INTRPT
+ static PerfCounter* _interrupted_during_count; // _INTERRUPTIBLE OS_INTRPT
+
+ static TimeStamp _safepoint_timer;
+ static TimeStamp _app_timer;
+
+public:
+ static void init();
+
+ static jlong safepoint_sync_time_ms();
+ static jlong safepoint_count();
+ static jlong safepoint_time_ms();
+ static jlong application_time_ms();
+
+ static double last_safepoint_time_sec() { return _safepoint_timer.seconds(); }
+ static double last_application_time_sec() { return _app_timer.seconds(); }
+
+ // callbacks
+ static void record_safepoint_begin();
+ static void record_safepoint_synchronized();
+ static void record_safepoint_end();
+ static void record_application_start();
+
+ // interruption events
+ static void record_interrupted_before_count();
+ static void record_interrupted_during_count();
+ static void record_thread_interrupt_signaled_count();
+};
diff --git a/src/share/vm/services/serviceUtil.hpp b/src/share/vm/services/serviceUtil.hpp
new file mode 100644
index 000000000..bf907fe00
--- /dev/null
+++ b/src/share/vm/services/serviceUtil.hpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2003 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+//
+// Serviceability utility functions.
+// (Shared by MM and JVMTI).
+//
+class ServiceUtil : public AllStatic {
+ public:
+
+ // Return true if oop represents an object that is "visible"
+ // to the java world.
+ static inline bool visible_oop(oop o) {
+ // the sentinel for deleted handles isn't visible
+ if (o == JNIHandles::deleted_handle()) {
+ return false;
+ }
+
+ // ignore KlassKlass
+ if (o->is_klass()) {
+ return false;
+ }
+
+ // instance
+ if (o->is_instance()) {
+ // instance objects are visible
+ if (o->klass() != SystemDictionary::class_klass()) {
+ return true;
+ }
+ if (java_lang_Class::is_primitive(o)) {
+ return true;
+ }
+ // java.lang.Classes are visible
+ o = java_lang_Class::as_klassOop(o);
+ if (o->is_klass()) {
+ // if it's a class for an object, an object array, or
+ // primitive (type) array then it's visible.
+ klassOop klass = (klassOop)o;
+ if (Klass::cast(klass)->oop_is_instance()) {
+ return true;
+ }
+ if (Klass::cast(klass)->oop_is_objArray()) {
+ return true;
+ }
+ if (Klass::cast(klass)->oop_is_typeArray()) {
+ return true;
+ }
+ }
+ return false;
+ }
+ // object arrays are visible if they aren't system object arrays
+ if (o->is_objArray()) {
+ objArrayOop array = (objArrayOop)o;
+ if (array->klass() != Universe::systemObjArrayKlassObj()) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+ // type arrays are visible
+ if (o->is_typeArray()) {
+ return true;
+ }
+ // everything else (methodOops, ...) aren't visible
+ return false;
+ }; // end of visible_oop()
+
+};
diff --git a/src/share/vm/services/threadService.cpp b/src/share/vm/services/threadService.cpp
new file mode 100644
index 000000000..80dcb486c
--- /dev/null
+++ b/src/share/vm/services/threadService.cpp
@@ -0,0 +1,885 @@
+/*
+ * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_threadService.cpp.incl"
+
+// TODO: we need to define a naming convention for perf counters
+// to distinguish counters for:
+// - standard JSR174 use
+// - Hotspot extension (public and committed)
+// - Hotspot extension (private/internal and uncommitted)
+
+// Default is disabled.
+bool ThreadService::_thread_monitoring_contention_enabled = false;
+bool ThreadService::_thread_cpu_time_enabled = false;
+
+PerfCounter* ThreadService::_total_threads_count = NULL;
+PerfVariable* ThreadService::_live_threads_count = NULL;
+PerfVariable* ThreadService::_peak_threads_count = NULL;
+PerfVariable* ThreadService::_daemon_threads_count = NULL;
+volatile int ThreadService::_exiting_threads_count = 0;
+volatile int ThreadService::_exiting_daemon_threads_count = 0;
+
+ThreadDumpResult* ThreadService::_threaddump_list = NULL;
+
+static const int INITIAL_ARRAY_SIZE = 10;
+
+void ThreadService::init() {
+ EXCEPTION_MARK;
+
+ // These counters are for java.lang.management API support.
+ // They are created even if -XX:-UsePerfData is set and in
+ // that case, they will be allocated on C heap.
+
+ _total_threads_count =
+ PerfDataManager::create_counter(JAVA_THREADS, "started",
+ PerfData::U_Events, CHECK);
+
+ _live_threads_count =
+ PerfDataManager::create_variable(JAVA_THREADS, "live",
+ PerfData::U_None, CHECK);
+
+ _peak_threads_count =
+ PerfDataManager::create_variable(JAVA_THREADS, "livePeak",
+ PerfData::U_None, CHECK);
+
+ _daemon_threads_count =
+ PerfDataManager::create_variable(JAVA_THREADS, "daemon",
+ PerfData::U_None, CHECK);
+
+ if (os::is_thread_cpu_time_supported()) {
+ _thread_cpu_time_enabled = true;
+ }
+}
+
+void ThreadService::reset_peak_thread_count() {
+ // Acquire the lock to update the peak thread count
+ // to synchronize with thread addition and removal.
+ MutexLockerEx mu(Threads_lock);
+ _peak_threads_count->set_value(get_live_thread_count());
+}
+
+void ThreadService::add_thread(JavaThread* thread, bool daemon) {
+ // Do not count VM internal or JVMTI agent threads
+ if (thread->is_hidden_from_external_view() ||
+ thread->is_jvmti_agent_thread()) {
+ return;
+ }
+
+ _total_threads_count->inc();
+ _live_threads_count->inc();
+
+ if (_live_threads_count->get_value() > _peak_threads_count->get_value()) {
+ _peak_threads_count->set_value(_live_threads_count->get_value());
+ }
+
+ if (daemon) {
+ _daemon_threads_count->inc();
+ }
+}
+
+void ThreadService::remove_thread(JavaThread* thread, bool daemon) {
+ Atomic::dec((jint*) &_exiting_threads_count);
+
+ if (thread->is_hidden_from_external_view() ||
+ thread->is_jvmti_agent_thread()) {
+ return;
+ }
+
+ _live_threads_count->set_value(_live_threads_count->get_value() - 1);
+
+ if (daemon) {
+ _daemon_threads_count->set_value(_daemon_threads_count->get_value() - 1);
+ Atomic::dec((jint*) &_exiting_daemon_threads_count);
+ }
+}
+
+void ThreadService::current_thread_exiting(JavaThread* jt) {
+ assert(jt == JavaThread::current(), "Called by current thread");
+ Atomic::inc((jint*) &_exiting_threads_count);
+
+ oop threadObj = jt->threadObj();
+ if (threadObj != NULL && java_lang_Thread::is_daemon(threadObj)) {
+ Atomic::inc((jint*) &_exiting_daemon_threads_count);
+ }
+}
+
+// FIXME: JVMTI should call this function
+Handle ThreadService::get_current_contended_monitor(JavaThread* thread) {
+ assert(thread != NULL, "should be non-NULL");
+ assert(Threads_lock->owned_by_self(), "must grab Threads_lock or be at safepoint");
+
+ ObjectMonitor *wait_obj = thread->current_waiting_monitor();
+
+ oop obj = NULL;
+ if (wait_obj != NULL) {
+ // thread is doing an Object.wait() call
+ obj = (oop) wait_obj->object();
+ assert(obj != NULL, "Object.wait() should have an object");
+ } else {
+ ObjectMonitor *enter_obj = thread->current_pending_monitor();
+ if (enter_obj != NULL) {
+ // thread is trying to enter() or raw_enter() an ObjectMonitor.
+ obj = (oop) enter_obj->object();
+ }
+ // If obj == NULL, then ObjectMonitor is raw which doesn't count.
+ }
+
+ Handle h(obj);
+ return h;
+}
+
+bool ThreadService::set_thread_monitoring_contention(bool flag) {
+ MutexLocker m(Management_lock);
+
+ bool prev = _thread_monitoring_contention_enabled;
+ _thread_monitoring_contention_enabled = flag;
+
+ return prev;
+}
+
+bool ThreadService::set_thread_cpu_time_enabled(bool flag) {
+ MutexLocker m(Management_lock);
+
+ bool prev = _thread_cpu_time_enabled;
+ _thread_cpu_time_enabled = flag;
+
+ return prev;
+}
+
+// GC support
+void ThreadService::oops_do(OopClosure* f) {
+ for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) {
+ dump->oops_do(f);
+ }
+}
+
+void ThreadService::add_thread_dump(ThreadDumpResult* dump) {
+ MutexLocker ml(Management_lock);
+ if (_threaddump_list == NULL) {
+ _threaddump_list = dump;
+ } else {
+ dump->set_next(_threaddump_list);
+ _threaddump_list = dump;
+ }
+}
+
+void ThreadService::remove_thread_dump(ThreadDumpResult* dump) {
+ MutexLocker ml(Management_lock);
+
+ ThreadDumpResult* prev = NULL;
+ bool found = false;
+ for (ThreadDumpResult* d = _threaddump_list; d != NULL; prev = d, d = d->next()) {
+ if (d == dump) {
+ if (prev == NULL) {
+ _threaddump_list = dump->next();
+ } else {
+ prev->set_next(dump->next());
+ }
+ found = true;
+ break;
+ }
+ }
+ assert(found, "The threaddump result to be removed must exist.");
+}
+
+// Dump stack trace of threads specified in the given threads array.
+// Returns StackTraceElement[][] each element is the stack trace of a thread in
+// the corresponding entry in the given threads array
+Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads,
+ int num_threads,
+ TRAPS) {
+ assert(num_threads > 0, "just checking");
+
+ ThreadDumpResult dump_result;
+ VM_ThreadDump op(&dump_result,
+ threads,
+ num_threads,
+ -1, /* entire stack */
+ false, /* with locked monitors */
+ false /* with locked synchronizers */);
+ VMThread::execute(&op);
+
+ // Allocate the resulting StackTraceElement[][] object
+
+ ResourceMark rm(THREAD);
+ klassOop k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_StackTraceElement_array(), true, CHECK_NH);
+ objArrayKlassHandle ik (THREAD, k);
+ objArrayOop r = oopFactory::new_objArray(ik(), num_threads, CHECK_NH);
+ objArrayHandle result_obj(THREAD, r);
+
+ int num_snapshots = dump_result.num_snapshots();
+ assert(num_snapshots == num_threads, "Must have num_threads thread snapshots");
+ int i = 0;
+ for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; i++, ts = ts->next()) {
+ ThreadStackTrace* stacktrace = ts->get_stack_trace();
+ if (stacktrace == NULL) {
+ // No stack trace
+ result_obj->obj_at_put(i, NULL);
+ } else {
+ // Construct an array of java/lang/StackTraceElement object
+ Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH);
+ result_obj->obj_at_put(i, backtrace_h());
+ }
+ }
+
+ return result_obj;
+}
+
+void ThreadService::reset_contention_count_stat(JavaThread* thread) {
+ ThreadStatistics* stat = thread->get_thread_stat();
+ if (stat != NULL) {
+ stat->reset_count_stat();
+ }
+}
+
+void ThreadService::reset_contention_time_stat(JavaThread* thread) {
+ ThreadStatistics* stat = thread->get_thread_stat();
+ if (stat != NULL) {
+ stat->reset_time_stat();
+ }
+}
+
+// Find deadlocks involving object monitors and concurrent locks if concurrent_locks is true
+DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(bool concurrent_locks) {
+ // This code was modified from the original Threads::find_deadlocks code.
+ int globalDfn = 0, thisDfn;
+ ObjectMonitor* waitingToLockMonitor = NULL;
+ oop waitingToLockBlocker = NULL;
+ bool blocked_on_monitor = false;
+ JavaThread *currentThread, *previousThread;
+ int num_deadlocks = 0;
+
+ for (JavaThread* p = Threads::first(); p != NULL; p = p->next()) {
+ // Initialize the depth-first-number
+ p->set_depth_first_number(-1);
+ }
+
+ DeadlockCycle* deadlocks = NULL;
+ DeadlockCycle* last = NULL;
+ DeadlockCycle* cycle = new DeadlockCycle();
+ for (JavaThread* jt = Threads::first(); jt != NULL; jt = jt->next()) {
+ if (jt->depth_first_number() >= 0) {
+ // this thread was already visited
+ continue;
+ }
+
+ thisDfn = globalDfn;
+ jt->set_depth_first_number(globalDfn++);
+ previousThread = jt;
+ currentThread = jt;
+
+ cycle->reset();
+
+ // When there is a deadlock, all the monitors involved in the dependency
+ // cycle must be contended and heavyweight. So we only care about the
+ // heavyweight monitor a thread is waiting to lock.
+ waitingToLockMonitor = (ObjectMonitor*)jt->current_pending_monitor();
+ if (concurrent_locks) {
+ waitingToLockBlocker = jt->current_park_blocker();
+ }
+ while (waitingToLockMonitor != NULL || waitingToLockBlocker != NULL) {
+ cycle->add_thread(currentThread);
+ if (waitingToLockMonitor != NULL) {
+ currentThread = Threads::owning_thread_from_monitor_owner((address)waitingToLockMonitor->owner(),
+ false /* no locking needed */);
+ } else {
+ if (concurrent_locks) {
+ if (waitingToLockBlocker->is_a(SystemDictionary::abstract_ownable_synchronizer_klass())) {
+ oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
+ currentThread = threadObj != NULL ? java_lang_Thread::thread(threadObj) : NULL;
+ } else {
+ currentThread = NULL;
+ }
+ }
+ }
+
+ if (currentThread == NULL) {
+ // No dependency on another thread
+ break;
+ }
+ if (currentThread->depth_first_number() < 0) {
+ // First visit to this thread
+ currentThread->set_depth_first_number(globalDfn++);
+ } else if (currentThread->depth_first_number() < thisDfn) {
+ // Thread already visited, and not on a (new) cycle
+ break;
+ } else if (currentThread == previousThread) {
+ // Self-loop, ignore
+ break;
+ } else {
+ // We have a (new) cycle
+ num_deadlocks++;
+
+ cycle->set_deadlock(true);
+
+ // add this cycle to the deadlocks list
+ if (deadlocks == NULL) {
+ deadlocks = cycle;
+ } else {
+ last->set_next(cycle);
+ }
+ last = cycle;
+ cycle = new DeadlockCycle();
+ break;
+ }
+ previousThread = currentThread;
+ waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor();
+ if (concurrent_locks) {
+ waitingToLockBlocker = currentThread->current_park_blocker();
+ }
+ }
+
+ }
+
+ return deadlocks;
+}
+
+ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(NULL), _next(NULL), _last(NULL) {
+
+ // Create a new ThreadDumpResult object and append to the list.
+ // If GC happens before this function returns, methodOop
+ // in the stack trace will be visited.
+ ThreadService::add_thread_dump(this);
+}
+
+ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(NULL), _next(NULL), _last(NULL) {
+ // Create a new ThreadDumpResult object and append to the list.
+ // If GC happens before this function returns, oops
+ // will be visited.
+ ThreadService::add_thread_dump(this);
+}
+
+ThreadDumpResult::~ThreadDumpResult() {
+ ThreadService::remove_thread_dump(this);
+
+ // free all the ThreadSnapshot objects created during
+ // the VM_ThreadDump operation
+ ThreadSnapshot* ts = _snapshots;
+ while (ts != NULL) {
+ ThreadSnapshot* p = ts;
+ ts = ts->next();
+ delete p;
+ }
+}
+
+
+void ThreadDumpResult::add_thread_snapshot(ThreadSnapshot* ts) {
+ assert(_num_threads == 0 || _num_snapshots < _num_threads,
+ "_num_snapshots must be less than _num_threads");
+ _num_snapshots++;
+ if (_snapshots == NULL) {
+ _snapshots = ts;
+ } else {
+ _last->set_next(ts);
+ }
+ _last = ts;
+}
+
+void ThreadDumpResult::oops_do(OopClosure* f) {
+ for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) {
+ ts->oops_do(f);
+ }
+}
+
+StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) {
+ _method = jvf->method();
+ _bci = jvf->bci();
+ _locked_monitors = NULL;
+ if (with_lock_info) {
+ ResourceMark rm;
+ GrowableArray<MonitorInfo*>* list = jvf->locked_monitors();
+ int length = list->length();
+ if (length > 0) {
+ _locked_monitors = new (ResourceObj::C_HEAP) GrowableArray<oop>(length, true);
+ for (int i = 0; i < length; i++) {
+ MonitorInfo* monitor = list->at(i);
+ assert(monitor->owner(), "This monitor must have an owning object");
+ _locked_monitors->append(monitor->owner());
+ }
+ }
+ }
+}
+
+void StackFrameInfo::oops_do(OopClosure* f) {
+ f->do_oop((oop*) &_method);
+ if (_locked_monitors != NULL) {
+ int length = _locked_monitors->length();
+ for (int i = 0; i < length; i++) {
+ f->do_oop((oop*) _locked_monitors->adr_at(i));
+ }
+ }
+}
+
+void StackFrameInfo::print_on(outputStream* st) const {
+ ResourceMark rm;
+ java_lang_Throwable::print_stack_element(st, method(), bci());
+ int len = (_locked_monitors != NULL ? _locked_monitors->length() : 0);
+ for (int i = 0; i < len; i++) {
+ oop o = _locked_monitors->at(i);
+ instanceKlass* ik = instanceKlass::cast(o->klass());
+ st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", (address)o, ik->external_name());
+ }
+
+}
+
+// Iterate through monitor cache to find JNI locked monitors
+class InflatedMonitorsClosure: public MonitorClosure {
+private:
+ ThreadStackTrace* _stack_trace;
+ Thread* _thread;
+public:
+ InflatedMonitorsClosure(Thread* t, ThreadStackTrace* st) {
+ _thread = t;
+ _stack_trace = st;
+ }
+ void do_monitor(ObjectMonitor* mid) {
+ if (mid->owner() == _thread) {
+ oop object = (oop) mid->object();
+ if (!_stack_trace->is_owned_monitor_on_stack(object)) {
+ _stack_trace->add_jni_locked_monitor(object);
+ }
+ }
+ }
+};
+
+ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) {
+ _thread = t;
+ _frames = new (ResourceObj::C_HEAP) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, true);
+ _depth = 0;
+ _with_locked_monitors = with_locked_monitors;
+ if (_with_locked_monitors) {
+ _jni_locked_monitors = new (ResourceObj::C_HEAP) GrowableArray<oop>(INITIAL_ARRAY_SIZE, true);
+ } else {
+ _jni_locked_monitors = NULL;
+ }
+}
+
+ThreadStackTrace::~ThreadStackTrace() {
+ for (int i = 0; i < _frames->length(); i++) {
+ delete _frames->at(i);
+ }
+ delete _frames;
+ if (_jni_locked_monitors != NULL) {
+ delete _jni_locked_monitors;
+ }
+}
+
+void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth) {
+ assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
+
+ if (_thread->has_last_Java_frame()) {
+ RegisterMap reg_map(_thread);
+ vframe* start_vf = _thread->last_java_vframe(&reg_map);
+ int count = 0;
+ for (vframe* f = start_vf; f; f = f->sender() ) {
+ if (f->is_java_frame()) {
+ javaVFrame* jvf = javaVFrame::cast(f);
+ add_stack_frame(jvf);
+ count++;
+ } else {
+ // Ignore non-Java frames
+ }
+ if (maxDepth > 0 && count == maxDepth) {
+ // Skip frames if more than maxDepth
+ break;
+ }
+ }
+ }
+
+ if (_with_locked_monitors) {
+ // Iterate inflated monitors and find monitors locked by this thread
+ // not found in the stack
+ InflatedMonitorsClosure imc(_thread, this);
+ ObjectSynchronizer::monitors_iterate(&imc);
+ }
+}
+
+
+bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) {
+ assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
+
+ bool found = false;
+ int num_frames = get_stack_depth();
+ for (int depth = 0; depth < num_frames; depth++) {
+ StackFrameInfo* frame = stack_frame_at(depth);
+ int len = frame->num_locked_monitors();
+ GrowableArray<oop>* locked_monitors = frame->locked_monitors();
+ for (int j = 0; j < len; j++) {
+ oop monitor = locked_monitors->at(j);
+ assert(monitor != NULL && monitor->is_instance(), "must be a Java object");
+ if (monitor == object) {
+ found = true;
+ break;
+ }
+ }
+ }
+ return found;
+}
+
+Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) {
+ klassOop k = SystemDictionary::stackTraceElement_klass();
+ instanceKlassHandle ik(THREAD, k);
+
+ // Allocate an array of java/lang/StackTraceElement object
+ objArrayOop ste = oopFactory::new_objArray(ik(), _depth, CHECK_NH);
+ objArrayHandle backtrace(THREAD, ste);
+ for (int j = 0; j < _depth; j++) {
+ StackFrameInfo* frame = _frames->at(j);
+ methodHandle mh(THREAD, frame->method());
+ oop element = java_lang_StackTraceElement::create(mh, frame->bci(), CHECK_NH);
+ backtrace->obj_at_put(j, element);
+ }
+ return backtrace;
+}
+
+void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) {
+ StackFrameInfo* frame = new StackFrameInfo(jvf, _with_locked_monitors);
+ _frames->append(frame);
+ _depth++;
+}
+
+void ThreadStackTrace::oops_do(OopClosure* f) {
+ int length = _frames->length();
+ for (int i = 0; i < length; i++) {
+ _frames->at(i)->oops_do(f);
+ }
+
+ length = (_jni_locked_monitors != NULL ? _jni_locked_monitors->length() : 0);
+ for (int j = 0; j < length; j++) {
+ f->do_oop((oop*) _jni_locked_monitors->adr_at(j));
+ }
+}
+
+ConcurrentLocksDump::~ConcurrentLocksDump() {
+ if (_retain_map_on_free) {
+ return;
+ }
+
+ for (ThreadConcurrentLocks* t = _map; t != NULL;) {
+ ThreadConcurrentLocks* tcl = t;
+ t = t->next();
+ delete tcl;
+ }
+}
+
+void ConcurrentLocksDump::dump_at_safepoint() {
+ // dump all locked concurrent locks
+ assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
+
+ if (JDK_Version::is_gte_jdk16x_version()) {
+ ResourceMark rm;
+
+ GrowableArray<oop>* aos_objects = new GrowableArray<oop>(INITIAL_ARRAY_SIZE);
+
+ // Find all instances of AbstractOwnableSynchronizer
+ HeapInspection::find_instances_at_safepoint(SystemDictionary::abstract_ownable_synchronizer_klass(),
+ aos_objects);
+ // Build a map of thread to its owned AQS locks
+ build_map(aos_objects);
+ }
+}
+
+
+// build a map of JavaThread to all its owned AbstractOwnableSynchronizer
+void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) {
+ int length = aos_objects->length();
+ for (int i = 0; i < length; i++) {
+ oop o = aos_objects->at(i);
+ oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o);
+ if (owner_thread_obj != NULL) {
+ JavaThread* thread = java_lang_Thread::thread(owner_thread_obj);
+ assert(o->is_instance(), "Must be an instanceOop");
+ add_lock(thread, (instanceOop) o);
+ }
+ }
+}
+
+void ConcurrentLocksDump::add_lock(JavaThread* thread, instanceOop o) {
+ ThreadConcurrentLocks* tcl = thread_concurrent_locks(thread);
+ if (tcl != NULL) {
+ tcl->add_lock(o);
+ return;
+ }
+
+ // First owned lock found for this thread
+ tcl = new ThreadConcurrentLocks(thread);
+ tcl->add_lock(o);
+ if (_map == NULL) {
+ _map = tcl;
+ } else {
+ _last->set_next(tcl);
+ }
+ _last = tcl;
+}
+
+ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* thread) {
+ for (ThreadConcurrentLocks* tcl = _map; tcl != NULL; tcl = tcl->next()) {
+ if (tcl->java_thread() == thread) {
+ return tcl;
+ }
+ }
+ return NULL;
+}
+
+void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) {
+ st->print_cr(" Locked ownable synchronizers:");
+ ThreadConcurrentLocks* tcl = thread_concurrent_locks(t);
+ GrowableArray<instanceOop>* locks = (tcl != NULL ? tcl->owned_locks() : NULL);
+ if (locks == NULL || locks->is_empty()) {
+ st->print_cr("\t- None");
+ st->cr();
+ return;
+ }
+
+ for (int i = 0; i < locks->length(); i++) {
+ instanceOop obj = locks->at(i);
+ instanceKlass* ik = instanceKlass::cast(obj->klass());
+ st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", (address)obj, ik->external_name());
+ }
+ st->cr();
+}
+
+ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) {
+ _thread = thread;
+ _owned_locks = new (ResourceObj::C_HEAP) GrowableArray<instanceOop>(INITIAL_ARRAY_SIZE, true);
+ _next = NULL;
+}
+
+ThreadConcurrentLocks::~ThreadConcurrentLocks() {
+ delete _owned_locks;
+}
+
+void ThreadConcurrentLocks::add_lock(instanceOop o) {
+ _owned_locks->append(o);
+}
+
+void ThreadConcurrentLocks::oops_do(OopClosure* f) {
+ int length = _owned_locks->length();
+ for (int i = 0; i < length; i++) {
+ f->do_oop((oop*) _owned_locks->adr_at(i));
+ }
+}
+
+ThreadStatistics::ThreadStatistics() {
+ _contended_enter_count = 0;
+ _monitor_wait_count = 0;
+ _sleep_count = 0;
+ _class_init_recursion_count = 0;
+ _class_verify_recursion_count = 0;
+ _count_pending_reset = false;
+ _timer_pending_reset = false;
+}
+
+ThreadSnapshot::ThreadSnapshot(JavaThread* thread) {
+ _thread = thread;
+ _threadObj = thread->threadObj();
+ _stack_trace = NULL;
+ _concurrent_locks = NULL;
+ _next = NULL;
+
+ ThreadStatistics* stat = thread->get_thread_stat();
+ _contended_enter_ticks = stat->contended_enter_ticks();
+ _contended_enter_count = stat->contended_enter_count();
+ _monitor_wait_ticks = stat->monitor_wait_ticks();
+ _monitor_wait_count = stat->monitor_wait_count();
+ _sleep_ticks = stat->sleep_ticks();
+ _sleep_count = stat->sleep_count();
+
+ _blocker_object = NULL;
+ _blocker_object_owner = NULL;
+
+ _thread_status = java_lang_Thread::get_thread_status(_threadObj);
+ _is_ext_suspended = thread->is_being_ext_suspended();
+ _is_in_native = (thread->thread_state() == _thread_in_native);
+
+ if (_thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER ||
+ _thread_status == java_lang_Thread::IN_OBJECT_WAIT ||
+ _thread_status == java_lang_Thread::IN_OBJECT_WAIT_TIMED) {
+
+ Handle obj = ThreadService::get_current_contended_monitor(thread);
+ if (obj() == NULL) {
+ // monitor no longer exists; thread is not blocked
+ _thread_status = java_lang_Thread::RUNNABLE;
+ } else {
+ _blocker_object = obj();
+ JavaThread* owner = ObjectSynchronizer::get_lock_owner(obj, false);
+ if ((owner == NULL && _thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER)
+ || (owner != NULL && owner->is_attaching())) {
+ // ownership information of the monitor is not available
+ // (may no longer be owned or releasing to some other thread)
+ // make this thread in RUNNABLE state.
+ // And when the owner thread is in attaching state, the java thread
+ // is not completely initialized. For example thread name and id
+ // and may not be set, so hide the attaching thread.
+ _thread_status = java_lang_Thread::RUNNABLE;
+ _blocker_object = NULL;
+ } else if (owner != NULL) {
+ _blocker_object_owner = owner->threadObj();
+ }
+ }
+ }
+
+ // Support for JSR-166 locks
+ if (JDK_Version::supports_thread_park_blocker() &&
+ (_thread_status == java_lang_Thread::PARKED ||
+ _thread_status == java_lang_Thread::PARKED_TIMED)) {
+
+ _blocker_object = thread->current_park_blocker();
+ if (_blocker_object != NULL && _blocker_object->is_a(SystemDictionary::abstract_ownable_synchronizer_klass())) {
+ _blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(_blocker_object);
+ }
+ }
+}
+
+ThreadSnapshot::~ThreadSnapshot() {
+ delete _stack_trace;
+ delete _concurrent_locks;
+}
+
+void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors) {
+ _stack_trace = new ThreadStackTrace(_thread, with_locked_monitors);
+ _stack_trace->dump_stack_at_safepoint(max_depth);
+}
+
+
+void ThreadSnapshot::oops_do(OopClosure* f) {
+ f->do_oop(&_threadObj);
+ f->do_oop(&_blocker_object);
+ f->do_oop(&_blocker_object_owner);
+ if (_stack_trace != NULL) {
+ _stack_trace->oops_do(f);
+ }
+ if (_concurrent_locks != NULL) {
+ _concurrent_locks->oops_do(f);
+ }
+}
+
+DeadlockCycle::DeadlockCycle() {
+ _is_deadlock = false;
+ _threads = new (ResourceObj::C_HEAP) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, true);
+ _next = NULL;
+}
+
+DeadlockCycle::~DeadlockCycle() {
+ delete _threads;
+}
+
+void DeadlockCycle::print_on(outputStream* st) const {
+ st->cr();
+ st->print_cr("Found one Java-level deadlock:");
+ st->print("=============================");
+
+ JavaThread* currentThread;
+ ObjectMonitor* waitingToLockMonitor;
+ oop waitingToLockBlocker;
+ int len = _threads->length();
+ for (int i = 0; i < len; i++) {
+ currentThread = _threads->at(i);
+ waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor();
+ waitingToLockBlocker = currentThread->current_park_blocker();
+ st->cr();
+ st->print_cr("\"%s\":", currentThread->get_thread_name());
+ const char* owner_desc = ",\n which is held by";
+ if (waitingToLockMonitor != NULL) {
+ st->print(" waiting to lock monitor " INTPTR_FORMAT, waitingToLockMonitor);
+ oop obj = (oop)waitingToLockMonitor->object();
+ if (obj != NULL) {
+ st->print(" (object "INTPTR_FORMAT ", a %s)", (address)obj,
+ (instanceKlass::cast(obj->klass()))->external_name());
+
+ if (!currentThread->current_pending_monitor_is_from_java()) {
+ owner_desc = "\n in JNI, which is held by";
+ }
+ } else {
+ // No Java object associated - a JVMTI raw monitor
+ owner_desc = " (JVMTI raw monitor),\n which is held by";
+ }
+ currentThread = Threads::owning_thread_from_monitor_owner(
+ (address)waitingToLockMonitor->owner(), false /* no locking needed */);
+ } else {
+ st->print(" waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)",
+ (address)waitingToLockBlocker,
+ (instanceKlass::cast(waitingToLockBlocker->klass()))->external_name());
+ assert(waitingToLockBlocker->is_a(SystemDictionary::abstract_ownable_synchronizer_klass()),
+ "Must be an AbstractOwnableSynchronizer");
+ oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
+ currentThread = java_lang_Thread::thread(ownerObj);
+ }
+ st->print("%s \"%s\"", owner_desc, currentThread->get_thread_name());
+ }
+
+ st->cr();
+ st->cr();
+
+ // Print stack traces
+ bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace;
+ JavaMonitorsInStackTrace = true;
+ st->print_cr("Java stack information for the threads listed above:");
+ st->print_cr("===================================================");
+ for (int j = 0; j < len; j++) {
+ currentThread = _threads->at(j);
+ st->print_cr("\"%s\":", currentThread->get_thread_name());
+ currentThread->print_stack_on(st);
+ }
+ JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace;
+}
+
+ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread,
+ bool include_jvmti_agent_threads,
+ bool include_jni_attaching_threads) {
+ assert(cur_thread == Thread::current(), "Check current thread");
+
+ int init_size = ThreadService::get_live_thread_count();
+ _threads_array = new GrowableArray<instanceHandle>(init_size);
+
+ MutexLockerEx ml(Threads_lock);
+
+ for (JavaThread* jt = Threads::first(); jt != NULL; jt = jt->next()) {
+ // skips JavaThreads in the process of exiting
+ // and also skips VM internal JavaThreads
+ // Threads in _thread_new or _thread_new_trans state are included.
+ // i.e. threads have been started but not yet running.
+ if (jt->threadObj() == NULL ||
+ jt->is_exiting() ||
+ !java_lang_Thread::is_alive(jt->threadObj()) ||
+ jt->is_hidden_from_external_view()) {
+ continue;
+ }
+
+ // skip agent threads
+ if (!include_jvmti_agent_threads && jt->is_jvmti_agent_thread()) {
+ continue;
+ }
+
+ // skip jni threads in the process of attaching
+ if (!include_jni_attaching_threads && jt->is_attaching()) {
+ continue;
+ }
+
+ instanceHandle h(cur_thread, (instanceOop) jt->threadObj());
+ _threads_array->append(h);
+ }
+}
diff --git a/src/share/vm/services/threadService.hpp b/src/share/vm/services/threadService.hpp
new file mode 100644
index 000000000..291a8eebb
--- /dev/null
+++ b/src/share/vm/services/threadService.hpp
@@ -0,0 +1,566 @@
+/*
+ * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class OopClosure;
+class ThreadDumpResult;
+class ThreadStackTrace;
+class ThreadSnapshot;
+class StackFrameInfo;
+class ThreadConcurrentLocks;
+class DeadlockCycle;
+
+// VM monitoring and management support for the thread and
+// synchronization subsystem
+//
+// Thread contention monitoring is disabled by default.
+// When enabled, the VM will begin measuring the accumulated
+// elapsed time a thread blocked on synchronization.
+//
+class ThreadService : public AllStatic {
+private:
+ // These counters could be moved to Threads class
+ static PerfCounter* _total_threads_count;
+ static PerfVariable* _live_threads_count;
+ static PerfVariable* _peak_threads_count;
+ static PerfVariable* _daemon_threads_count;
+
+ // These 2 counters are atomically incremented once the thread is exiting.
+ // They will be atomically decremented when ThreadService::remove_thread is called.
+ static volatile int _exiting_threads_count;
+ static volatile int _exiting_daemon_threads_count;
+
+ static bool _thread_monitoring_contention_enabled;
+ static bool _thread_cpu_time_enabled;
+
+ // Need to keep the list of thread dump result that
+ // keep references to methodOop since thread dump can be
+ // requested by multiple threads concurrently.
+ static ThreadDumpResult* _threaddump_list;
+
+public:
+ static void init();
+ static void add_thread(JavaThread* thread, bool daemon);
+ static void remove_thread(JavaThread* thread, bool daemon);
+ static void current_thread_exiting(JavaThread* jt);
+
+ static bool set_thread_monitoring_contention(bool flag);
+ static bool is_thread_monitoring_contention() { return _thread_monitoring_contention_enabled; }
+
+ static bool set_thread_cpu_time_enabled(bool flag);
+ static bool is_thread_cpu_time_enabled() { return _thread_cpu_time_enabled; }
+
+ static jlong get_total_thread_count() { return _total_threads_count->get_value(); }
+ static jlong get_peak_thread_count() { return _peak_threads_count->get_value(); }
+ static jlong get_live_thread_count() { return _live_threads_count->get_value() - _exiting_threads_count; }
+ static jlong get_daemon_thread_count() { return _daemon_threads_count->get_value() - _exiting_daemon_threads_count; }
+
+ static int exiting_threads_count() { return _exiting_threads_count; }
+ static int exiting_daemon_threads_count() { return _exiting_daemon_threads_count; }
+
+ // Support for thread dump
+ static void add_thread_dump(ThreadDumpResult* dump);
+ static void remove_thread_dump(ThreadDumpResult* dump);
+
+ static Handle get_current_contended_monitor(JavaThread* thread);
+
+ // This function is called by JVM_DumpThreads.
+ static Handle dump_stack_traces(GrowableArray<instanceHandle>* threads,
+ int num_threads, TRAPS);
+
+ static void reset_peak_thread_count();
+ static void reset_contention_count_stat(JavaThread* thread);
+ static void reset_contention_time_stat(JavaThread* thread);
+
+ static DeadlockCycle* find_deadlocks_at_safepoint(bool object_monitors_only);
+
+ // GC support
+ static void oops_do(OopClosure* f);
+};
+
+// Per-thread Statistics for synchronization
+class ThreadStatistics : public CHeapObj {
+private:
+ // The following contention statistics are only updated by
+ // the thread owning these statistics when contention occurs.
+
+ jlong _contended_enter_count;
+ elapsedTimer _contended_enter_timer;
+ jlong _monitor_wait_count;
+ elapsedTimer _monitor_wait_timer;
+ jlong _sleep_count;
+ elapsedTimer _sleep_timer;
+
+
+ // These two reset flags are set to true when another thread
+ // requests to reset the statistics. The actual statistics
+ // are reset when the thread contention occurs and attempts
+ // to update the statistics.
+ bool _count_pending_reset;
+ bool _timer_pending_reset;
+
+ // Keep accurate times for potentially recursive class operations
+ int _class_init_recursion_count;
+ int _class_verify_recursion_count;
+ int _class_link_recursion_count;
+
+ // utility functions
+ void check_and_reset_count() {
+ if (!_count_pending_reset) return;
+ _contended_enter_count = 0;
+ _monitor_wait_count = 0;
+ _sleep_count = 0;
+ _count_pending_reset = 0;
+ }
+ void check_and_reset_timer() {
+ if (!_timer_pending_reset) return;
+ _contended_enter_timer.reset();
+ _monitor_wait_timer.reset();
+ _sleep_timer.reset();
+ _timer_pending_reset = 0;
+ }
+
+public:
+ ThreadStatistics();
+
+ jlong contended_enter_count() { return (_count_pending_reset ? 0 : _contended_enter_count); }
+ jlong contended_enter_ticks() { return (_timer_pending_reset ? 0 : _contended_enter_timer.active_ticks()); }
+ jlong monitor_wait_count() { return (_count_pending_reset ? 0 : _monitor_wait_count); }
+ jlong monitor_wait_ticks() { return (_timer_pending_reset ? 0 : _monitor_wait_timer.active_ticks()); }
+ jlong sleep_count() { return (_count_pending_reset ? 0 : _sleep_count); }
+ jlong sleep_ticks() { return (_timer_pending_reset ? 0 : _sleep_timer.active_ticks()); }
+
+ void monitor_wait() { check_and_reset_count(); _monitor_wait_count++; }
+ void monitor_wait_begin() { check_and_reset_timer(); _monitor_wait_timer.start(); }
+ void monitor_wait_end() { _monitor_wait_timer.stop(); check_and_reset_timer(); }
+
+ void thread_sleep() { check_and_reset_count(); _sleep_count++; }
+ void thread_sleep_begin() { check_and_reset_timer(); _sleep_timer.start(); }
+ void thread_sleep_end() { _sleep_timer.stop(); check_and_reset_timer(); }
+
+ void contended_enter() { check_and_reset_count(); _contended_enter_count++; }
+ void contended_enter_begin() { check_and_reset_timer(); _contended_enter_timer.start(); }
+ void contended_enter_end() { _contended_enter_timer.stop(); check_and_reset_timer(); }
+
+ void reset_count_stat() { _count_pending_reset = true; }
+ void reset_time_stat() { _timer_pending_reset = true; }
+
+ int* class_init_recursion_count_addr() { return &_class_init_recursion_count; }
+ int* class_verify_recursion_count_addr() { return &_class_verify_recursion_count; }
+ int* class_link_recursion_count_addr() { return &_class_link_recursion_count; }
+};
+
+// Thread snapshot to represent the thread state and statistics
+class ThreadSnapshot : public CHeapObj {
+private:
+ JavaThread* _thread;
+ oop _threadObj;
+ java_lang_Thread::ThreadStatus _thread_status;
+
+ bool _is_ext_suspended;
+ bool _is_in_native;
+
+ jlong _contended_enter_ticks;
+ jlong _contended_enter_count;
+ jlong _monitor_wait_ticks;
+ jlong _monitor_wait_count;
+ jlong _sleep_ticks;
+ jlong _sleep_count;
+ oop _blocker_object;
+ oop _blocker_object_owner;
+
+ ThreadStackTrace* _stack_trace;
+ ThreadConcurrentLocks* _concurrent_locks;
+ ThreadSnapshot* _next;
+
+public:
+ // Dummy snapshot
+ ThreadSnapshot() : _thread(NULL), _threadObj(NULL), _stack_trace(NULL), _concurrent_locks(NULL), _next(NULL),
+ _blocker_object(NULL), _blocker_object_owner(NULL) {};
+ ThreadSnapshot(JavaThread* thread);
+ ~ThreadSnapshot();
+
+ java_lang_Thread::ThreadStatus thread_status() { return _thread_status; }
+
+ oop threadObj() const { return _threadObj; }
+
+ void set_next(ThreadSnapshot* n) { _next = n; }
+
+ bool is_ext_suspended() { return _is_ext_suspended; }
+ bool is_in_native() { return _is_in_native; }
+
+ jlong contended_enter_count() { return _contended_enter_count; }
+ jlong contended_enter_ticks() { return _contended_enter_ticks; }
+ jlong monitor_wait_count() { return _monitor_wait_count; }
+ jlong monitor_wait_ticks() { return _monitor_wait_ticks; }
+ jlong sleep_count() { return _sleep_count; }
+ jlong sleep_ticks() { return _sleep_ticks; }
+
+
+ oop blocker_object() { return _blocker_object; }
+ oop blocker_object_owner() { return _blocker_object_owner; }
+
+ ThreadSnapshot* next() const { return _next; }
+ ThreadStackTrace* get_stack_trace() { return _stack_trace; }
+ ThreadConcurrentLocks* get_concurrent_locks() { return _concurrent_locks; }
+
+ void dump_stack_at_safepoint(int max_depth, bool with_locked_monitors);
+ void set_concurrent_locks(ThreadConcurrentLocks* l) { _concurrent_locks = l; }
+ void oops_do(OopClosure* f);
+};
+
+class ThreadStackTrace : public CHeapObj {
+ private:
+ JavaThread* _thread;
+ int _depth; // number of stack frames added
+ bool _with_locked_monitors;
+ GrowableArray<StackFrameInfo*>* _frames;
+ GrowableArray<oop>* _jni_locked_monitors;
+
+ public:
+
+ ThreadStackTrace(JavaThread* thread, bool with_locked_monitors);
+ ~ThreadStackTrace();
+
+ StackFrameInfo* stack_frame_at(int i) { return _frames->at(i); }
+ int get_stack_depth() { return _depth; }
+
+ void add_stack_frame(javaVFrame* jvf);
+ void dump_stack_at_safepoint(int max_depth);
+ Handle allocate_fill_stack_trace_element_array(TRAPS);
+ void oops_do(OopClosure* f);
+ GrowableArray<oop>* jni_locked_monitors() { return _jni_locked_monitors; }
+ int num_jni_locked_monitors() { return (_jni_locked_monitors != NULL ? _jni_locked_monitors->length() : 0); }
+
+ bool is_owned_monitor_on_stack(oop object);
+ void add_jni_locked_monitor(oop object) { _jni_locked_monitors->append(object); }
+};
+
+// StackFrameInfo for keeping methodOop and bci during
+// stack walking for later construction of StackTraceElement[]
+// Java instances
+class StackFrameInfo : public CHeapObj {
+ private:
+ methodOop _method;
+ int _bci;
+ GrowableArray<oop>* _locked_monitors; // list of object monitors locked by this frame
+
+ public:
+
+ StackFrameInfo(javaVFrame* jvf, bool with_locked_monitors);
+ ~StackFrameInfo() {
+ if (_locked_monitors != NULL) {
+ delete _locked_monitors;
+ }
+ };
+ methodOop method() const { return _method; }
+ int bci() const { return _bci; }
+ void oops_do(OopClosure* f);
+
+ int num_locked_monitors() { return (_locked_monitors != NULL ? _locked_monitors->length() : 0); }
+ GrowableArray<oop>* locked_monitors() { return _locked_monitors; }
+
+ void print_on(outputStream* st) const;
+};
+
+class ThreadConcurrentLocks : public CHeapObj {
+private:
+ GrowableArray<instanceOop>* _owned_locks;
+ ThreadConcurrentLocks* _next;
+ JavaThread* _thread;
+ public:
+ ThreadConcurrentLocks(JavaThread* thread);
+ ~ThreadConcurrentLocks();
+
+ void add_lock(instanceOop o);
+ void set_next(ThreadConcurrentLocks* n) { _next = n; }
+ ThreadConcurrentLocks* next() { return _next; }
+ JavaThread* java_thread() { return _thread; }
+ GrowableArray<instanceOop>* owned_locks() { return _owned_locks; }
+ void oops_do(OopClosure* f);
+};
+
+class ConcurrentLocksDump : public StackObj {
+ private:
+ ThreadConcurrentLocks* _map;
+ ThreadConcurrentLocks* _last; // Last ThreadConcurrentLocks in the map
+ bool _retain_map_on_free;
+
+ void build_map(GrowableArray<oop>* aos_objects);
+ void add_lock(JavaThread* thread, instanceOop o);
+
+ public:
+ ConcurrentLocksDump(bool retain_map_on_free) : _map(NULL), _last(NULL), _retain_map_on_free(retain_map_on_free) {};
+ ConcurrentLocksDump() : _map(NULL), _last(NULL), _retain_map_on_free(false) {};
+ ~ConcurrentLocksDump();
+
+ void dump_at_safepoint();
+ ThreadConcurrentLocks* thread_concurrent_locks(JavaThread* thread);
+ void print_locks_on(JavaThread* t, outputStream* st);
+};
+
+class ThreadDumpResult : public StackObj {
+ private:
+ int _num_threads;
+ int _num_snapshots;
+ ThreadSnapshot* _snapshots;
+ ThreadSnapshot* _last;
+ ThreadDumpResult* _next;
+ public:
+ ThreadDumpResult();
+ ThreadDumpResult(int num_threads);
+ ~ThreadDumpResult();
+
+ void add_thread_snapshot(ThreadSnapshot* ts);
+ void set_next(ThreadDumpResult* next) { _next = next; }
+ ThreadDumpResult* next() { return _next; }
+ int num_threads() { return _num_threads; }
+ int num_snapshots() { return _num_snapshots; }
+ ThreadSnapshot* snapshots() { return _snapshots; }
+ void oops_do(OopClosure* f);
+};
+
+class DeadlockCycle : public CHeapObj {
+ private:
+ bool _is_deadlock;
+ GrowableArray<JavaThread*>* _threads;
+ DeadlockCycle* _next;
+ public:
+ DeadlockCycle();
+ ~DeadlockCycle();
+
+ DeadlockCycle* next() { return _next; }
+ void set_next(DeadlockCycle* d) { _next = d; }
+ void add_thread(JavaThread* t) { _threads->append(t); }
+ void reset() { _is_deadlock = false; _threads->clear(); }
+ void set_deadlock(bool value) { _is_deadlock = value; }
+ bool is_deadlock() { return _is_deadlock; }
+ int num_threads() { return _threads->length(); }
+ GrowableArray<JavaThread*>* threads() { return _threads; }
+ void print_on(outputStream* st) const;
+};
+
+// Utility class to get list of java threads.
+class ThreadsListEnumerator : public StackObj {
+private:
+ GrowableArray<instanceHandle>* _threads_array;
+public:
+ ThreadsListEnumerator(Thread* cur_thread,
+ bool include_jvmti_agent_threads = false,
+ bool include_jni_attaching_threads = true);
+ int num_threads() { return _threads_array->length(); }
+ instanceHandle get_threadObj(int index) { return _threads_array->at(index); }
+};
+
+
+// abstract utility class to set new thread states, and restore previous after the block exits
+class JavaThreadStatusChanger : public StackObj {
+ private:
+ java_lang_Thread::ThreadStatus _old_state;
+ JavaThread* _java_thread;
+ bool _is_alive;
+
+ void save_old_state(JavaThread* java_thread) {
+ _java_thread = java_thread;
+ _is_alive = is_alive(java_thread);
+ if (is_alive()) {
+ _old_state = java_lang_Thread::get_thread_status(_java_thread->threadObj());
+ }
+ }
+
+ public:
+ static void set_thread_status(JavaThread* java_thread,
+ java_lang_Thread::ThreadStatus state) {
+ java_lang_Thread::set_thread_status(java_thread->threadObj(), state);
+ }
+
+ void set_thread_status(java_lang_Thread::ThreadStatus state) {
+ if (is_alive()) {
+ set_thread_status(_java_thread, state);
+ }
+ }
+
+ JavaThreadStatusChanger(JavaThread* java_thread,
+ java_lang_Thread::ThreadStatus state) {
+ save_old_state(java_thread);
+ set_thread_status(state);
+ }
+
+ JavaThreadStatusChanger(JavaThread* java_thread) {
+ save_old_state(java_thread);
+ }
+
+ ~JavaThreadStatusChanger() {
+ set_thread_status(_old_state);
+ }
+
+ static bool is_alive(JavaThread* java_thread) {
+ return java_thread != NULL && java_thread->threadObj() != NULL;
+ }
+
+ bool is_alive() {
+ return _is_alive;
+ }
+};
+
+// Change status to waiting on an object (timed or indefinite)
+class JavaThreadInObjectWaitState : public JavaThreadStatusChanger {
+ private:
+ ThreadStatistics* _stat;
+ bool _active;
+
+ public:
+ JavaThreadInObjectWaitState(JavaThread *java_thread, bool timed) :
+ JavaThreadStatusChanger(java_thread,
+ timed ? java_lang_Thread::IN_OBJECT_WAIT_TIMED : java_lang_Thread::IN_OBJECT_WAIT) {
+ if (is_alive()) {
+ _stat = java_thread->get_thread_stat();
+ _active = ThreadService::is_thread_monitoring_contention();
+ _stat->monitor_wait();
+ if (_active) {
+ _stat->monitor_wait_begin();
+ }
+ } else {
+ _active = false;
+ }
+ }
+
+ ~JavaThreadInObjectWaitState() {
+ if (_active) {
+ _stat->monitor_wait_end();
+ }
+ }
+};
+
+// Change status to parked (timed or indefinite)
+class JavaThreadParkedState : public JavaThreadStatusChanger {
+ private:
+ ThreadStatistics* _stat;
+ bool _active;
+
+ public:
+ JavaThreadParkedState(JavaThread *java_thread, bool timed) :
+ JavaThreadStatusChanger(java_thread,
+ timed ? java_lang_Thread::PARKED_TIMED : java_lang_Thread::PARKED) {
+ if (is_alive()) {
+ _stat = java_thread->get_thread_stat();
+ _active = ThreadService::is_thread_monitoring_contention();
+ _stat->monitor_wait();
+ if (_active) {
+ _stat->monitor_wait_begin();
+ }
+ } else {
+ _active = false;
+ }
+ }
+
+ ~JavaThreadParkedState() {
+ if (_active) {
+ _stat->monitor_wait_end();
+ }
+ }
+};
+
+// Change status to blocked on (re-)entering a synchronization block
+class JavaThreadBlockedOnMonitorEnterState : public JavaThreadStatusChanger {
+ private:
+ ThreadStatistics* _stat;
+ bool _active;
+
+ static bool contended_enter_begin(JavaThread *java_thread) {
+ set_thread_status(java_thread, java_lang_Thread::BLOCKED_ON_MONITOR_ENTER);
+ ThreadStatistics* stat = java_thread->get_thread_stat();
+ stat->contended_enter();
+ bool active = ThreadService::is_thread_monitoring_contention();
+ if (active) {
+ stat->contended_enter_begin();
+ }
+ return active;
+ }
+
+ public:
+ // java_thread is waiting thread being blocked on monitor reenter.
+ // Current thread is the notifying thread which holds the monitor.
+ static bool wait_reenter_begin(JavaThread *java_thread, ObjectMonitor *obj_m) {
+ assert((java_thread != NULL), "Java thread should not be null here");
+ bool active = false;
+ if (is_alive(java_thread) && ServiceUtil::visible_oop((oop)obj_m->object())) {
+ active = contended_enter_begin(java_thread);
+ }
+ return active;
+ }
+
+ static void wait_reenter_end(JavaThread *java_thread, bool active) {
+ if (active) {
+ java_thread->get_thread_stat()->contended_enter_end();
+ }
+ set_thread_status(java_thread, java_lang_Thread::RUNNABLE);
+ }
+
+ JavaThreadBlockedOnMonitorEnterState(JavaThread *java_thread, ObjectMonitor *obj_m) :
+ JavaThreadStatusChanger(java_thread) {
+ assert((java_thread != NULL), "Java thread should not be null here");
+ // Change thread status and collect contended enter stats for monitor contended
+ // enter done for external java world objects and it is contended. All other cases
+ // like for vm internal objects and for external objects which are not contended
+ // thread status is not changed and contended enter stat is not collected.
+ _active = false;
+ if (is_alive() && ServiceUtil::visible_oop((oop)obj_m->object()) && obj_m->contentions() > 0) {
+ _stat = java_thread->get_thread_stat();
+ _active = contended_enter_begin(java_thread);
+ }
+ }
+
+ ~JavaThreadBlockedOnMonitorEnterState() {
+ if (_active) {
+ _stat->contended_enter_end();
+ }
+ }
+};
+
+// Change status to sleeping
+class JavaThreadSleepState : public JavaThreadStatusChanger {
+ private:
+ ThreadStatistics* _stat;
+ bool _active;
+ public:
+ JavaThreadSleepState(JavaThread *java_thread) :
+ JavaThreadStatusChanger(java_thread, java_lang_Thread::SLEEPING) {
+ if (is_alive()) {
+ _stat = java_thread->get_thread_stat();
+ _active = ThreadService::is_thread_monitoring_contention();
+ _stat->thread_sleep();
+ if (_active) {
+ _stat->thread_sleep_begin();
+ }
+ } else {
+ _active = false;
+ }
+ }
+
+ ~JavaThreadSleepState() {
+ if (_active) {
+ _stat->thread_sleep_end();
+ }
+ }
+};