aboutsummaryrefslogtreecommitdiff
path: root/src/share
diff options
context:
space:
mode:
authortrims <none@none>2010-05-27 12:42:44 -0700
committertrims <none@none>2010-05-27 12:42:44 -0700
commitf9b340d691720c24a0778c5df18794c09b047561 (patch)
tree726a4f785ad823af0a386dca804b4d2f7778438a /src/share
parent61d06655ee8ff65282e5a863fee588c34d83deb3 (diff)
parent122ff3744ee159ae177751480b89dfab1cfb19b4 (diff)
Merge
Diffstat (limited to 'src/share')
-rw-r--r--src/share/vm/adlc/formssel.cpp8
-rw-r--r--src/share/vm/adlc/formssel.hpp6
-rw-r--r--src/share/vm/adlc/output_c.cpp12
-rw-r--r--src/share/vm/adlc/output_h.cpp11
-rw-r--r--src/share/vm/c1/c1_GraphBuilder.cpp6
-rw-r--r--src/share/vm/code/codeCache.cpp24
-rw-r--r--src/share/vm/code/codeCache.hpp2
-rw-r--r--src/share/vm/code/nmethod.cpp42
-rw-r--r--src/share/vm/code/nmethod.hpp9
-rw-r--r--src/share/vm/compiler/compileBroker.cpp15
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp2
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp61
-rw-r--r--src/share/vm/gc_implementation/g1/concurrentMark.cpp2
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp33
-rw-r--r--src/share/vm/gc_implementation/g1/heapRegion.cpp12
-rw-r--r--src/share/vm/gc_implementation/shared/spaceDecorator.hpp2
-rw-r--r--src/share/vm/gc_implementation/shared/vmGCOperations.cpp2
-rw-r--r--src/share/vm/gc_interface/collectedHeap.cpp4
-rw-r--r--src/share/vm/oops/markOop.hpp29
-rw-r--r--src/share/vm/opto/addnode.cpp65
-rw-r--r--src/share/vm/opto/addnode.hpp1
-rw-r--r--src/share/vm/opto/cfgnode.cpp58
-rw-r--r--src/share/vm/opto/escape.cpp40
-rw-r--r--src/share/vm/runtime/globals.hpp3
-rw-r--r--src/share/vm/runtime/java.cpp1
-rw-r--r--src/share/vm/runtime/jniHandles.hpp18
-rw-r--r--src/share/vm/runtime/safepoint.cpp2
-rw-r--r--src/share/vm/runtime/sweeper.cpp157
-rw-r--r--src/share/vm/runtime/sweeper.hpp6
-rw-r--r--src/share/vm/runtime/vframe.cpp4
-rw-r--r--src/share/vm/runtime/vm_version.cpp2
-rw-r--r--src/share/vm/utilities/dtrace.hpp6
32 files changed, 413 insertions, 232 deletions
diff --git a/src/share/vm/adlc/formssel.cpp b/src/share/vm/adlc/formssel.cpp
index 5791248f3..dce836ad0 100644
--- a/src/share/vm/adlc/formssel.cpp
+++ b/src/share/vm/adlc/formssel.cpp
@@ -735,7 +735,7 @@ int InstructForm::memory_operand(FormDict &globals) const {
// This instruction captures the machine-independent bottom_type
// Expected use is for pointer vs oop determination for LoadP
-bool InstructForm::captures_bottom_type() const {
+bool InstructForm::captures_bottom_type(FormDict &globals) const {
if( _matrule && _matrule->_rChild &&
(!strcmp(_matrule->_rChild->_opType,"CastPP") || // new result type
!strcmp(_matrule->_rChild->_opType,"CastX2P") || // new result type
@@ -748,6 +748,8 @@ bool InstructForm::captures_bottom_type() const {
else if ( is_ideal_load() == Form::idealP ) return true;
else if ( is_ideal_store() != Form::none ) return true;
+ if (needs_base_oop_edge(globals)) return true;
+
return false;
}
@@ -1061,7 +1063,7 @@ const char *InstructForm::reduce_left(FormDict &globals) const {
// Base class for this instruction, MachNode except for calls
-const char *InstructForm::mach_base_class() const {
+const char *InstructForm::mach_base_class(FormDict &globals) const {
if( is_ideal_call() == Form::JAVA_STATIC ) {
return "MachCallStaticJavaNode";
}
@@ -1092,7 +1094,7 @@ const char *InstructForm::mach_base_class() const {
else if (is_ideal_nop()) {
return "MachNopNode";
}
- else if (captures_bottom_type()) {
+ else if (captures_bottom_type(globals)) {
return "MachTypeNode";
} else {
return "MachNode";
diff --git a/src/share/vm/adlc/formssel.hpp b/src/share/vm/adlc/formssel.hpp
index 66583ef1a..57b418bb1 100644
--- a/src/share/vm/adlc/formssel.hpp
+++ b/src/share/vm/adlc/formssel.hpp
@@ -188,7 +188,7 @@ public:
// This instruction captures the machine-independent bottom_type
// Expected use is for pointer vs oop determination for LoadP
- virtual bool captures_bottom_type() const;
+ virtual bool captures_bottom_type(FormDict& globals) const;
virtual const char *cost(); // Access ins_cost attribute
virtual uint num_opnds(); // Count of num_opnds for MachNode class
@@ -229,7 +229,7 @@ public:
const char *reduce_left(FormDict &globals) const;
// Base class for this instruction, MachNode except for calls
- virtual const char *mach_base_class() const;
+ virtual const char *mach_base_class(FormDict &globals) const;
// Check if this instruction can cisc-spill to 'alternate'
bool cisc_spills_to(ArchDesc &AD, InstructForm *alternate);
@@ -252,7 +252,7 @@ public:
bool has_short_branch_form() { return _short_branch_form != NULL; }
// Output short branch prototypes and method bodies
void declare_short_branch_methods(FILE *fp_cpp);
- bool define_short_branch_methods(FILE *fp_cpp);
+ bool define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp);
uint alignment() { return _alignment; }
void set_alignment(uint val) { _alignment = val; }
diff --git a/src/share/vm/adlc/output_c.cpp b/src/share/vm/adlc/output_c.cpp
index 580a022ad..14b1dc9e5 100644
--- a/src/share/vm/adlc/output_c.cpp
+++ b/src/share/vm/adlc/output_c.cpp
@@ -1382,7 +1382,7 @@ static void generate_peepreplace( FILE *fp, FormDict &globals, PeepMatch *pmatch
inst_num, unmatched_edge);
}
// If new instruction captures bottom type
- if( root_form->captures_bottom_type() ) {
+ if( root_form->captures_bottom_type(globals) ) {
// Get bottom type from instruction whose result we are replacing
fprintf(fp, " root->_bottom_type = inst%d->bottom_type();\n", inst_num);
}
@@ -2963,7 +2963,7 @@ void ArchDesc::defineClasses(FILE *fp) {
used |= instr->define_cisc_version(*this, fp);
// Output code to convert to the short branch version, if applicable
- used |= instr->define_short_branch_methods(fp);
+ used |= instr->define_short_branch_methods(*this, fp);
}
// Construct the method called by cisc_version() to copy inputs and operands.
@@ -3708,7 +3708,7 @@ void ArchDesc::buildMachNode(FILE *fp_cpp, InstructForm *inst, const char *inden
}
// Fill in the bottom_type where requested
- if ( inst->captures_bottom_type() ) {
+ if ( inst->captures_bottom_type(_globalNames) ) {
fprintf(fp_cpp, "%s node->_bottom_type = _leaf->bottom_type();\n", indent);
}
if( inst->is_ideal_if() ) {
@@ -3762,7 +3762,7 @@ bool InstructForm::define_cisc_version(ArchDesc &AD, FILE *fp_cpp) {
// Create the MachNode object
fprintf(fp_cpp, " %sNode *node = new (C) %sNode();\n", name, name);
// Fill in the bottom_type where requested
- if ( this->captures_bottom_type() ) {
+ if ( this->captures_bottom_type(AD.globalNames()) ) {
fprintf(fp_cpp, " node->_bottom_type = bottom_type();\n");
}
@@ -3798,7 +3798,7 @@ void InstructForm::declare_short_branch_methods(FILE *fp_hpp) {
//---------------------------define_short_branch_methods-----------------------
// Build definitions for short branch methods
-bool InstructForm::define_short_branch_methods(FILE *fp_cpp) {
+bool InstructForm::define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp) {
if (has_short_branch_form()) {
InstructForm *short_branch = short_branch_form();
const char *name = short_branch->_ident;
@@ -3813,7 +3813,7 @@ bool InstructForm::define_short_branch_methods(FILE *fp_cpp) {
fprintf(fp_cpp, " node->_fcnt = _fcnt;\n");
}
// Fill in the bottom_type where requested
- if ( this->captures_bottom_type() ) {
+ if ( this->captures_bottom_type(AD.globalNames()) ) {
fprintf(fp_cpp, " node->_bottom_type = bottom_type();\n");
}
diff --git a/src/share/vm/adlc/output_h.cpp b/src/share/vm/adlc/output_h.cpp
index 2e27f706c..bd357fd2e 100644
--- a/src/share/vm/adlc/output_h.cpp
+++ b/src/share/vm/adlc/output_h.cpp
@@ -1493,7 +1493,7 @@ void ArchDesc::declareClasses(FILE *fp) {
// Build class definition for this instruction
fprintf(fp,"\n");
fprintf(fp,"class %sNode : public %s { \n",
- instr->_ident, instr->mach_base_class() );
+ instr->_ident, instr->mach_base_class(_globalNames) );
fprintf(fp,"private:\n");
fprintf(fp," MachOper *_opnd_array[%d];\n", instr->num_opnds() );
if ( instr->is_ideal_jump() ) {
@@ -1566,7 +1566,7 @@ void ArchDesc::declareClasses(FILE *fp) {
// Use MachNode::ideal_Opcode() for nodes based on MachNode class
// if the ideal_Opcode == Op_Node.
if ( strcmp("Node", instr->ideal_Opcode(_globalNames)) != 0 ||
- strcmp("MachNode", instr->mach_base_class()) != 0 ) {
+ strcmp("MachNode", instr->mach_base_class(_globalNames)) != 0 ) {
fprintf(fp," virtual int ideal_Opcode() const { return Op_%s; }\n",
instr->ideal_Opcode(_globalNames) );
}
@@ -1631,7 +1631,7 @@ void ArchDesc::declareClasses(FILE *fp) {
// Use MachNode::oper_input_base() for nodes based on MachNode class
// if the base == 1.
if ( instr->oper_input_base(_globalNames) != 1 ||
- strcmp("MachNode", instr->mach_base_class()) != 0 ) {
+ strcmp("MachNode", instr->mach_base_class(_globalNames)) != 0 ) {
fprintf(fp," virtual uint oper_input_base() const { return %d; }\n",
instr->oper_input_base(_globalNames));
}
@@ -1906,11 +1906,6 @@ void ArchDesc::declareClasses(FILE *fp) {
fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveN\n",
offset, offset+1, offset+1);
}
- else if( instr->needs_base_oop_edge(_globalNames) ) {
- // Special hack for ideal AddP. Bottom type is an oop IFF it has a
- // legal base-pointer input. Otherwise it is NOT an oop.
- fprintf(fp," const Type *bottom_type() const { return AddPNode::mach_bottom_type(this); } // AddP\n");
- }
else if (instr->is_tls_instruction()) {
// Special hack for tlsLoadP
fprintf(fp," const Type *bottom_type() const { return TypeRawPtr::BOTTOM; } // tlsLoadP\n");
diff --git a/src/share/vm/c1/c1_GraphBuilder.cpp b/src/share/vm/c1/c1_GraphBuilder.cpp
index 5e622e4a8..27ca666ff 100644
--- a/src/share/vm/c1/c1_GraphBuilder.cpp
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp
@@ -2978,7 +2978,11 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
if (!InlineNatives ) INLINE_BAILOUT("intrinsic method inlining disabled");
- if (callee->is_synchronized()) INLINE_BAILOUT("intrinsic method is synchronized");
+ if (callee->is_synchronized()) {
+ // We don't currently support any synchronized intrinsics
+ return false;
+ }
+
// callee seems like a good candidate
// determine id
bool preserves_state = false;
diff --git a/src/share/vm/code/codeCache.cpp b/src/share/vm/code/codeCache.cpp
index b7b1e285b..1d333d210 100644
--- a/src/share/vm/code/codeCache.cpp
+++ b/src/share/vm/code/codeCache.cpp
@@ -124,6 +124,23 @@ nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
return (nmethod*)cb;
}
+nmethod* CodeCache::first_nmethod() {
+ assert_locked_or_safepoint(CodeCache_lock);
+ CodeBlob* cb = first();
+ while (cb != NULL && !cb->is_nmethod()) {
+ cb = next(cb);
+ }
+ return (nmethod*)cb;
+}
+
+nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
+ assert_locked_or_safepoint(CodeCache_lock);
+ cb = next(cb);
+ while (cb != NULL && !cb->is_nmethod()) {
+ cb = next(cb);
+ }
+ return (nmethod*)cb;
+}
CodeBlob* CodeCache::allocate(int size) {
// Do not seize the CodeCache lock here--if the caller has not
@@ -414,7 +431,7 @@ nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
saved->set_speculatively_disconnected(false);
saved->set_saved_nmethod_link(NULL);
if (PrintMethodFlushing) {
- saved->print_on(tty, " ### nmethod is reconnected");
+ saved->print_on(tty, " ### nmethod is reconnected\n");
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
@@ -432,7 +449,8 @@ nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
}
void CodeCache::remove_saved_code(nmethod* nm) {
- MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ // For conc swpr this will be called with CodeCache_lock taken by caller
+ assert_locked_or_safepoint(CodeCache_lock);
assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
nmethod* saved = _saved_nmethods;
nmethod* prev = NULL;
@@ -463,7 +481,7 @@ void CodeCache::speculatively_disconnect(nmethod* nm) {
nm->set_saved_nmethod_link(_saved_nmethods);
_saved_nmethods = nm;
if (PrintMethodFlushing) {
- nm->print_on(tty, " ### nmethod is speculatively disconnected");
+ nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
diff --git a/src/share/vm/code/codeCache.hpp b/src/share/vm/code/codeCache.hpp
index 9eacd5d4c..3107e9778 100644
--- a/src/share/vm/code/codeCache.hpp
+++ b/src/share/vm/code/codeCache.hpp
@@ -102,6 +102,8 @@ class CodeCache : AllStatic {
static CodeBlob* next (CodeBlob* cb);
static CodeBlob* alive(CodeBlob *cb);
static nmethod* alive_nmethod(CodeBlob *cb);
+ static nmethod* first_nmethod();
+ static nmethod* next_nmethod (CodeBlob* cb);
static int nof_blobs() { return _number_of_blobs; }
// GC support
diff --git a/src/share/vm/code/nmethod.cpp b/src/share/vm/code/nmethod.cpp
index f7845bb8b..f0b80fc4e 100644
--- a/src/share/vm/code/nmethod.cpp
+++ b/src/share/vm/code/nmethod.cpp
@@ -1014,9 +1014,7 @@ void nmethod::clear_inline_caches() {
void nmethod::cleanup_inline_caches() {
- assert(SafepointSynchronize::is_at_safepoint() &&
- !CompiledIC_lock->is_locked() &&
- !Patching_lock->is_locked(), "no threads must be updating the inline caches by them selfs");
+ assert_locked_or_safepoint(CompiledIC_lock);
// If the method is not entrant or zombie then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
@@ -1071,7 +1069,6 @@ void nmethod::mark_as_seen_on_stack() {
// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
bool nmethod::can_not_entrant_be_converted() {
assert(is_not_entrant(), "must be a non-entrant method");
- assert(SafepointSynchronize::is_at_safepoint(), "must be called during a safepoint");
// Since the nmethod sweeper only does partial sweep the sweeper's traversal
// count can be greater than the stack traversal count before it hits the
@@ -1127,7 +1124,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
_method = NULL; // Clear the method of this dead nmethod
}
// Make the class unloaded - i.e., change state and notify sweeper
- check_safepoint();
+ assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
if (is_in_use()) {
// Transitioning directly from live to unloaded -- so
// we need to force a cache clean-up; remember this
@@ -1220,17 +1217,6 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");
}
- // When the nmethod becomes zombie it is no longer alive so the
- // dependencies must be flushed. nmethods in the not_entrant
- // state will be flushed later when the transition to zombie
- // happens or they get unloaded.
- if (state == zombie) {
- assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
- flush_dependencies(NULL);
- } else {
- assert(state == not_entrant, "other cases may need to be handled differently");
- }
-
was_alive = is_in_use(); // Read state under lock
// Change state
@@ -1241,6 +1227,17 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
} // leave critical region under Patching_lock
+ // When the nmethod becomes zombie it is no longer alive so the
+ // dependencies must be flushed. nmethods in the not_entrant
+ // state will be flushed later when the transition to zombie
+ // happens or they get unloaded.
+ if (state == zombie) {
+ MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ flush_dependencies(NULL);
+ } else {
+ assert(state == not_entrant, "other cases may need to be handled differently");
+ }
+
if (state == not_entrant) {
Events::log("Make nmethod not entrant " INTPTR_FORMAT, this);
} else {
@@ -1310,21 +1307,13 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
return true;
}
-
-#ifndef PRODUCT
-void nmethod::check_safepoint() {
- assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-}
-#endif
-
-
void nmethod::flush() {
// Note that there are no valid oops in the nmethod anymore.
assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
- check_safepoint();
+ assert_locked_or_safepoint(CodeCache_lock);
// completely deallocate this method
EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
@@ -1373,7 +1362,7 @@ void nmethod::flush() {
// notifies instanceKlasses that are reachable
void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
- assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
+ assert_locked_or_safepoint(CodeCache_lock);
assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
"is_alive is non-NULL if and only if we are called during GC");
if (!has_flushed_dependencies()) {
@@ -2266,7 +2255,6 @@ void nmethod::print() const {
tty->print(" for method " INTPTR_FORMAT , (address)method());
tty->print(" { ");
if (version()) tty->print("v%d ", version());
- if (level()) tty->print("l%d ", level());
if (is_in_use()) tty->print("in_use ");
if (is_not_entrant()) tty->print("not_entrant ");
if (is_zombie()) tty->print("zombie ");
diff --git a/src/share/vm/code/nmethod.hpp b/src/share/vm/code/nmethod.hpp
index 05664fd97..9dde054fb 100644
--- a/src/share/vm/code/nmethod.hpp
+++ b/src/share/vm/code/nmethod.hpp
@@ -82,7 +82,6 @@ class PcDescCache VALUE_OBJ_CLASS_SPEC {
struct nmFlags {
friend class VMStructs;
unsigned int version:8; // version number (0 = first version)
- unsigned int level:4; // optimization level
unsigned int age:4; // age (in # of sweep steps)
unsigned int state:2; // {alive, zombie, unloaded)
@@ -410,14 +409,13 @@ class nmethod : public CodeBlob {
void flush_dependencies(BoolObjectClosure* is_alive);
bool has_flushed_dependencies() { return flags.hasFlushedDependencies; }
void set_has_flushed_dependencies() {
- check_safepoint();
assert(!has_flushed_dependencies(), "should only happen once");
flags.hasFlushedDependencies = 1;
}
bool is_marked_for_reclamation() const { return flags.markedForReclamation; }
- void mark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 1; }
- void unmark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 0; }
+ void mark_for_reclamation() { flags.markedForReclamation = 1; }
+ void unmark_for_reclamation() { flags.markedForReclamation = 0; }
bool has_unsafe_access() const { return flags.has_unsafe_access; }
void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
@@ -428,9 +426,6 @@ class nmethod : public CodeBlob {
bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; }
void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; }
- int level() const { return flags.level; }
- void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; }
-
int comp_level() const { return _comp_level; }
int version() const { return flags.version; }
diff --git a/src/share/vm/compiler/compileBroker.cpp b/src/share/vm/compiler/compileBroker.cpp
index f3a0514d1..8bbbb6ffc 100644
--- a/src/share/vm/compiler/compileBroker.cpp
+++ b/src/share/vm/compiler/compileBroker.cpp
@@ -461,12 +461,25 @@ void CompileQueue::add(CompileTask* task) {
//
// Get the next CompileTask from a CompileQueue
CompileTask* CompileQueue::get() {
+ NMethodSweeper::possibly_sweep();
+
MutexLocker locker(lock());
// Wait for an available CompileTask.
while (_first == NULL) {
// There is no work to be done right now. Wait.
- lock()->wait();
+ if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) {
+ // During the emergency sweeping periods, wake up and sweep occasionally
+ bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000);
+ if (timedout) {
+ MutexUnlocker ul(lock());
+ // When otherwise not busy, run nmethod sweeping
+ NMethodSweeper::possibly_sweep();
+ }
+ } else {
+ // During normal operation no need to wake up on timer
+ lock()->wait();
+ }
}
CompileTask* task = _first;
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp
index aed28a229..61c6a6251 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp
index d49e307ea..238999ed8 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,30 +32,75 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
displaced_mark = nth_bit(2), // i.e. 0x4
next_mask = ~(right_n_bits(3)) // i.e. ~(0x7)
};
- intptr_t _next;
+
+ // Below, we want _narrow_next in the "higher" 32 bit slot,
+ // whose position will depend on endian-ness of the platform.
+ // This is so that there is no interference with the
+ // cms_free_bit occupying bit position 7 (lsb == 0)
+ // when we are using compressed oops; see FreeChunk::isFree().
+ // We cannot move the cms_free_bit down because currently
+ // biased locking code assumes that age bits are contiguous
+ // with the lock bits. Even if that assumption were relaxed,
+ // the least position we could move this bit to would be
+ // to bit position 3, which would require 16 byte alignment.
+ typedef struct {
+#ifdef VM_LITTLE_ENDIAN
+ LP64_ONLY(narrowOop _pad;)
+ narrowOop _narrow_next;
+#else
+ narrowOop _narrow_next;
+ LP64_ONLY(narrowOop _pad;)
+#endif
+ } Data;
+
+ union {
+ intptr_t _next;
+ Data _data;
+ };
public:
inline PromotedObject* next() const {
- return (PromotedObject*)(_next & next_mask);
+ assert(!((FreeChunk*)this)->isFree(), "Error");
+ PromotedObject* res;
+ if (UseCompressedOops) {
+ // The next pointer is a compressed oop stored in the top 32 bits
+ res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next);
+ } else {
+ res = (PromotedObject*)(_next & next_mask);
+ }
+ assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Not an oop?");
+ return res;
}
inline void setNext(PromotedObject* x) {
- assert(((intptr_t)x & ~next_mask) == 0,
- "Conflict in bit usage, "
- " or insufficient alignment of objects");
- _next |= (intptr_t)x;
+ assert(((intptr_t)x & ~next_mask) == 0, "Conflict in bit usage, "
+ "or insufficient alignment of objects");
+ if (UseCompressedOops) {
+ assert(_data._narrow_next == 0, "Overwrite?");
+ _data._narrow_next = oopDesc::encode_heap_oop(oop(x));
+ } else {
+ _next |= (intptr_t)x;
+ }
+ assert(!((FreeChunk*)this)->isFree(), "Error");
}
inline void setPromotedMark() {
_next |= promoted_mask;
+ assert(!((FreeChunk*)this)->isFree(), "Error");
}
inline bool hasPromotedMark() const {
+ assert(!((FreeChunk*)this)->isFree(), "Error");
return (_next & promoted_mask) == promoted_mask;
}
inline void setDisplacedMark() {
_next |= displaced_mark;
+ assert(!((FreeChunk*)this)->isFree(), "Error");
}
inline bool hasDisplacedMark() const {
+ assert(!((FreeChunk*)this)->isFree(), "Error");
return (_next & displaced_mark) != 0;
}
- inline void clearNext() { _next = 0; }
+ inline void clearNext() {
+ _next = 0;
+ assert(!((FreeChunk*)this)->isFree(), "Error");
+ }
debug_only(void *next_addr() { return (void *) &_next; })
};
diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/src/share/vm/gc_implementation/g1/concurrentMark.cpp
index d26f47c24..feee1bbad 100644
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp
@@ -766,10 +766,12 @@ void ConcurrentMark::checkpointRootsInitialPre() {
_has_aborted = false;
+#ifndef PRODUCT
if (G1PrintReachableAtInitialMark) {
print_reachable("at-cycle-start",
true /* use_prev_marking */, true /* all */);
}
+#endif
// Initialise marking structures. This has to be done in a STW phase.
reset();
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
index 017102285..963cca836 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
@@ -471,21 +471,23 @@ HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
res->zero_fill_state() == HeapRegion::Allocated)),
"Alloc Regions must be zero filled (and non-H)");
}
- if (res != NULL && res->is_empty()) _free_regions--;
- assert(res == NULL ||
- (!res->isHumongous() &&
- (!zero_filled ||
- res->zero_fill_state() == HeapRegion::Allocated)),
- "Non-young alloc Regions must be zero filled (and non-H)");
-
- if (G1PrintHeapRegions) {
- if (res != NULL) {
+ if (res != NULL) {
+ if (res->is_empty()) {
+ _free_regions--;
+ }
+ assert(!res->isHumongous() &&
+ (!zero_filled || res->zero_fill_state() == HeapRegion::Allocated),
+ err_msg("Non-young alloc Regions must be zero filled (and non-H):"
+ " res->isHumongous()=%d, zero_filled=%d, res->zero_fill_state()=%d",
+ res->isHumongous(), zero_filled, res->zero_fill_state()));
+ assert(!res->is_on_unclean_list(),
+ "Alloc Regions must not be on the unclean list");
+ if (G1PrintHeapRegions) {
gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
"top "PTR_FORMAT,
res->hrs_index(), res->bottom(), res->end(), res->top());
}
}
-
return res;
}
@@ -2338,10 +2340,12 @@ void G1CollectedHeap::verify(bool allow_dirty,
gclog_or_tty->print_cr("Heap:");
print_on(gclog_or_tty, true /* extended */);
gclog_or_tty->print_cr("");
+#ifndef PRODUCT
if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
concurrent_mark()->print_reachable("at-verification-failure",
use_prev_marking, false /* all */);
}
+#endif
gclog_or_tty->flush();
}
guarantee(!failures, "there should not have been any failures");
@@ -4600,6 +4604,15 @@ void G1CollectedHeap::wait_for_cleanup_complete_locked() {
void
G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) {
assert(ZF_mon->owned_by_self(), "precondition.");
+#ifdef ASSERT
+ if (r->is_gc_alloc_region()) {
+ ResourceMark rm;
+ stringStream region_str;
+ print_on(&region_str);
+ assert(!r->is_gc_alloc_region(), err_msg("Unexpected GC allocation region: %s",
+ region_str.as_string()));
+ }
+#endif
_unclean_region_list.insert_before_head(r);
}
diff --git a/src/share/vm/gc_implementation/g1/heapRegion.cpp b/src/share/vm/gc_implementation/g1/heapRegion.cpp
index 6a5a9d8d0..326660cf6 100644
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp
@@ -554,11 +554,19 @@ HeapWord* HeapRegion::allocate(size_t size) {
#endif
void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) {
- assert(top() == bottom() || zfs == Allocated,
- "Region must be empty, or we must be setting it to allocated.");
assert(ZF_mon->owned_by_self() ||
Universe::heap()->is_gc_active(),
"Must hold the lock or be a full GC to modify.");
+#ifdef ASSERT
+ if (top() != bottom() && zfs != Allocated) {
+ ResourceMark rm;
+ stringStream region_str;
+ print_on(&region_str);
+ assert(top() == bottom() || zfs == Allocated,
+ err_msg("Region must be empty, or we must be setting it to allocated. "
+ "_zfs=%d, zfs=%d, region: %s", _zfs, zfs, region_str.as_string()));
+ }
+#endif
_zfs = zfs;
}
diff --git a/src/share/vm/gc_implementation/shared/spaceDecorator.hpp b/src/share/vm/gc_implementation/shared/spaceDecorator.hpp
index 9566512cb..4edb7a0f2 100644
--- a/src/share/vm/gc_implementation/shared/spaceDecorator.hpp
+++ b/src/share/vm/gc_implementation/shared/spaceDecorator.hpp
@@ -109,7 +109,7 @@ class SpaceMangler: public CHeapObj {
// is fully constructed. Also is used when a generation is expanded
// and possibly before the spaces have been reshaped to to the new
// size of the generation.
- static void mangle_region(MemRegion mr);
+ static void mangle_region(MemRegion mr) PRODUCT_RETURN;
};
class ContiguousSpace;
diff --git a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
index 17d18db32..08a32b9a8 100644
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
@@ -32,10 +32,12 @@ HS_DTRACE_PROBE_DECL(hotspot, gc__end);
// for the other file anymore. The dtrace probes have to remain stable.
void VM_GC_Operation::notify_gc_begin(bool full) {
HS_DTRACE_PROBE1(hotspot, gc__begin, full);
+ HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
}
void VM_GC_Operation::notify_gc_end() {
HS_DTRACE_PROBE(hotspot, gc__end);
+ HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
}
void VM_GC_Operation::acquire_pending_list_lock() {
diff --git a/src/share/vm/gc_interface/collectedHeap.cpp b/src/share/vm/gc_interface/collectedHeap.cpp
index c5bf893c9..50fad27d8 100644
--- a/src/share/vm/gc_interface/collectedHeap.cpp
+++ b/src/share/vm/gc_interface/collectedHeap.cpp
@@ -65,7 +65,7 @@ CollectedHeap::CollectedHeap()
void CollectedHeap::pre_initialize() {
// Used for ReduceInitialCardMarks (when COMPILER2 is used);
// otherwise remains unused.
-#ifdef COMPLER2
+#ifdef COMPILER2
_defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers()
&& (DeferInitialCardMark || card_mark_must_follow_store());
#else
@@ -309,7 +309,7 @@ void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
DEBUG_ONLY(fill_args_check(start, words);)
HandleMark hm; // Free handles before leaving.
-#ifdef LP64
+#ifdef _LP64
// A single array can fill ~8G, so multiple objects are needed only in 64-bit.
// First fill with arrays, ensuring that any remaining space is big enough to
// fill. The remainder is filled with a single object.
diff --git a/src/share/vm/oops/markOop.hpp b/src/share/vm/oops/markOop.hpp
index 8dd73688f..f2af03ae2 100644
--- a/src/share/vm/oops/markOop.hpp
+++ b/src/share/vm/oops/markOop.hpp
@@ -27,12 +27,26 @@
// Note that the mark is not a real oop but just a word.
// It is placed in the oop hierarchy for historical reasons.
//
-// Bit-format of an object header (most significant first):
+// Bit-format of an object header (most significant first, big endian layout below):
//
-// 32 bits: unused:0 hash:25 age:4 biased_lock:1 lock:2
-// 64 bits: unused:24 hash:31 cms:2 age:4 biased_lock:1 lock:2
-// unused:20 size:35 cms:2 age:4 biased_lock:1 lock:2 (if cms
-// free chunk)
+// 32 bits:
+// --------
+// hash:25 ------------>| age:4 biased_lock:1 lock:2 (normal object)
+// JavaThread*:23 epoch:2 age:4 biased_lock:1 lock:2 (biased object)
+// size:32 ------------------------------------------>| (CMS free block)
+// PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object)
+//
+// 64 bits:
+// --------
+// unused:25 hash:31 -->| unused:1 age:4 biased_lock:1 lock:2 (normal object)
+// JavaThread*:54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased object)
+// PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object)
+// size:64 ----------------------------------------------------->| (CMS free block)
+//
+// unused:25 hash:31 -->| cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && normal object)
+// JavaThread*:54 epoch:2 cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && biased object)
+// narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object)
+// unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block)
//
// - hash contains the identity hash value: largest value is
// 31 bits, see os::random(). Also, 64-bit vm's require
@@ -61,8 +75,9 @@
// significant fraction of the eden semispaces and were not
// promoted promptly, causing an increase in the amount of copying
// performed. The runtime system aligns all JavaThread* pointers to
-// a very large value (currently 128 bytes) to make room for the
-// age bits when biased locking is enabled.
+// a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM))
+// to make room for the age bits & the epoch bits (used in support of
+// biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs).
//
// [JavaThread* | epoch | age | 1 | 01] lock is biased toward given thread
// [0 | epoch | age | 1 | 01] lock is anonymously biased
diff --git a/src/share/vm/opto/addnode.cpp b/src/share/vm/opto/addnode.cpp
index 0af6bafd7..0e7859fc7 100644
--- a/src/share/vm/opto/addnode.cpp
+++ b/src/share/vm/opto/addnode.cpp
@@ -714,71 +714,6 @@ uint AddPNode::match_edge(uint idx) const {
return idx > Base;
}
-//---------------------------mach_bottom_type----------------------------------
-// Utility function for use by ADLC. Implements bottom_type for matched AddP.
-const Type *AddPNode::mach_bottom_type( const MachNode* n) {
- Node* base = n->in(Base);
- const Type *t = base->bottom_type();
- if ( t == Type::TOP ) {
- // an untyped pointer
- return TypeRawPtr::BOTTOM;
- }
- const TypePtr* tp = t->isa_oopptr();
- if ( tp == NULL ) return t;
- if ( tp->_offset == TypePtr::OffsetBot ) return tp;
-
- // We must carefully add up the various offsets...
- intptr_t offset = 0;
- const TypePtr* tptr = NULL;
-
- uint numopnds = n->num_opnds();
- uint index = n->oper_input_base();
- for ( uint i = 1; i < numopnds; i++ ) {
- MachOper *opnd = n->_opnds[i];
- // Check for any interesting operand info.
- // In particular, check for both memory and non-memory operands.
- // %%%%% Clean this up: use xadd_offset
- intptr_t con = opnd->constant();
- if ( con == TypePtr::OffsetBot ) goto bottom_out;
- offset += con;
- con = opnd->constant_disp();
- if ( con == TypePtr::OffsetBot ) goto bottom_out;
- offset += con;
- if( opnd->scale() != 0 ) goto bottom_out;
-
- // Check each operand input edge. Find the 1 allowed pointer
- // edge. Other edges must be index edges; track exact constant
- // inputs and otherwise assume the worst.
- for ( uint j = opnd->num_edges(); j > 0; j-- ) {
- Node* edge = n->in(index++);
- const Type* et = edge->bottom_type();
- const TypeX* eti = et->isa_intptr_t();
- if ( eti == NULL ) {
- // there must be one pointer among the operands
- guarantee(tptr == NULL, "must be only one pointer operand");
- if (UseCompressedOops && Universe::narrow_oop_shift() == 0) {
- // 32-bits narrow oop can be the base of address expressions
- tptr = et->make_ptr()->isa_oopptr();
- } else {
- // only regular oops are expected here
- tptr = et->isa_oopptr();
- }
- guarantee(tptr != NULL, "non-int operand must be pointer");
- if (tptr->higher_equal(tp->add_offset(tptr->offset())))
- tp = tptr; // Set more precise type for bailout
- continue;
- }
- if ( eti->_hi != eti->_lo ) goto bottom_out;
- offset += eti->_lo;
- }
- }
- guarantee(tptr != NULL, "must be exactly one pointer operand");
- return tptr->add_offset(offset);
-
- bottom_out:
- return tp->add_offset(TypePtr::OffsetBot);
-}
-
//=============================================================================
//------------------------------Identity---------------------------------------
Node *OrINode::Identity( PhaseTransform *phase ) {
diff --git a/src/share/vm/opto/addnode.hpp b/src/share/vm/opto/addnode.hpp
index acd210c93..9512b1b26 100644
--- a/src/share/vm/opto/addnode.hpp
+++ b/src/share/vm/opto/addnode.hpp
@@ -151,7 +151,6 @@ public:
// Do not match base-ptr edge
virtual uint match_edge(uint idx) const;
- static const Type *mach_bottom_type(const MachNode* n); // used by ad_<arch>.hpp
};
//------------------------------OrINode----------------------------------------
diff --git a/src/share/vm/opto/cfgnode.cpp b/src/share/vm/opto/cfgnode.cpp
index 383f4f8c9..8daff2519 100644
--- a/src/share/vm/opto/cfgnode.cpp
+++ b/src/share/vm/opto/cfgnode.cpp
@@ -1654,6 +1654,64 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (opt != NULL) return opt;
}
+ if (in(1) != NULL && in(1)->Opcode() == Op_AddP && can_reshape) {
+ // Try to undo Phi of AddP:
+ // (Phi (AddP base base y) (AddP base2 base2 y))
+ // becomes:
+ // newbase := (Phi base base2)
+ // (AddP newbase newbase y)
+ //
+ // This occurs as a result of unsuccessful split_thru_phi and
+ // interferes with taking advantage of addressing modes. See the
+ // clone_shift_expressions code in matcher.cpp
+ Node* addp = in(1);
+ const Type* type = addp->in(AddPNode::Base)->bottom_type();
+ Node* y = addp->in(AddPNode::Offset);
+ if (y != NULL && addp->in(AddPNode::Base) == addp->in(AddPNode::Address)) {
+ // make sure that all the inputs are similar to the first one,
+ // i.e. AddP with base == address and same offset as first AddP
+ bool doit = true;
+ for (uint i = 2; i < req(); i++) {
+ if (in(i) == NULL ||
+ in(i)->Opcode() != Op_AddP ||
+ in(i)->in(AddPNode::Base) != in(i)->in(AddPNode::Address) ||
+ in(i)->in(AddPNode::Offset) != y) {
+ doit = false;
+ break;
+ }
+ // Accumulate type for resulting Phi
+ type = type->meet(in(i)->in(AddPNode::Base)->bottom_type());
+ }
+ Node* base = NULL;
+ if (doit) {
+ // Check for neighboring AddP nodes in a tree.
+ // If they have a base, use that it.
+ for (DUIterator_Fast kmax, k = this->fast_outs(kmax); k < kmax; k++) {
+ Node* u = this->fast_out(k);
+ if (u->is_AddP()) {
+ Node* base2 = u->in(AddPNode::Base);
+ if (base2 != NULL && !base2->is_top()) {
+ if (base == NULL)
+ base = base2;
+ else if (base != base2)
+ { doit = false; break; }
+ }
+ }
+ }
+ }
+ if (doit) {
+ if (base == NULL) {
+ base = new (phase->C, in(0)->req()) PhiNode(in(0), type, NULL);
+ for (uint i = 1; i < req(); i++) {
+ base->init_req(i, in(i)->in(AddPNode::Base));
+ }
+ phase->is_IterGVN()->register_new_node_with_optimizer(base);
+ }
+ return new (phase->C, 4) AddPNode(base, base, y);
+ }
+ }
+ }
+
// Split phis through memory merges, so that the memory merges will go away.
// Piggy-back this transformation on the search for a unique input....
// It will be as if the merged memory is the unique value of the phi.
diff --git a/src/share/vm/opto/escape.cpp b/src/share/vm/opto/escape.cpp
index 111443cd1..342b80a13 100644
--- a/src/share/vm/opto/escape.cpp
+++ b/src/share/vm/opto/escape.cpp
@@ -1989,20 +1989,15 @@ void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *pha
case Op_Allocate:
{
Node *k = call->in(AllocateNode::KlassNode);
- const TypeKlassPtr *kt;
- if (k->Opcode() == Op_LoadKlass) {
- kt = k->as_Load()->type()->isa_klassptr();
- } else {
- // Also works for DecodeN(LoadNKlass).
- kt = k->as_Type()->type()->isa_klassptr();
- }
+ const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
assert(kt != NULL, "TypeKlassPtr required.");
ciKlass* cik = kt->klass();
- ciInstanceKlass* ciik = cik->as_instance_klass();
PointsToNode::EscapeState es;
uint edge_to;
- if (cik->is_subclass_of(_compile->env()->Thread_klass()) || ciik->has_finalizer()) {
+ if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
+ !cik->is_instance_klass() || // StressReflectiveCode
+ cik->as_instance_klass()->has_finalizer()) {
es = PointsToNode::GlobalEscape;
edge_to = _phantom_object; // Could not be worse
} else {
@@ -2017,13 +2012,28 @@ void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *pha
case Op_AllocateArray:
{
- int length = call->in(AllocateNode::ALength)->find_int_con(-1);
- if (length < 0 || length > EliminateAllocationArraySizeLimit) {
- // Not scalar replaceable if the length is not constant or too big.
- ptnode_adr(call_idx)->_scalar_replaceable = false;
+
+ Node *k = call->in(AllocateNode::KlassNode);
+ const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
+ assert(kt != NULL, "TypeKlassPtr required.");
+ ciKlass* cik = kt->klass();
+
+ PointsToNode::EscapeState es;
+ uint edge_to;
+ if (!cik->is_array_klass()) { // StressReflectiveCode
+ es = PointsToNode::GlobalEscape;
+ edge_to = _phantom_object;
+ } else {
+ es = PointsToNode::NoEscape;
+ edge_to = call_idx;
+ int length = call->in(AllocateNode::ALength)->find_int_con(-1);
+ if (length < 0 || length > EliminateAllocationArraySizeLimit) {
+ // Not scalar replaceable if the length is not constant or too big.
+ ptnode_adr(call_idx)->_scalar_replaceable = false;
+ }
}
- set_escape_state(call_idx, PointsToNode::NoEscape);
- add_pointsto_edge(resproj_idx, call_idx);
+ set_escape_state(call_idx, es);
+ add_pointsto_edge(resproj_idx, edge_to);
_processed.set(resproj_idx);
break;
}
diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
index 4b459859e..0a2f44d15 100644
--- a/src/share/vm/runtime/globals.hpp
+++ b/src/share/vm/runtime/globals.hpp
@@ -2764,6 +2764,9 @@ class CommandLineFlags {
product(intx, NmethodSweepFraction, 4, \
"Number of invocations of sweeper to cover all nmethods") \
\
+ product(intx, NmethodSweepCheckInterval, 5, \
+ "Compilers wake up every n seconds to possibly sweep nmethods") \
+ \
notproduct(intx, MemProfilingInterval, 500, \
"Time between each invocation of the MemProfiler") \
\
diff --git a/src/share/vm/runtime/java.cpp b/src/share/vm/runtime/java.cpp
index ad992664a..8efea9a66 100644
--- a/src/share/vm/runtime/java.cpp
+++ b/src/share/vm/runtime/java.cpp
@@ -470,6 +470,7 @@ void vm_exit(int code) {
void notify_vm_shutdown() {
// For now, just a dtrace probe.
HS_DTRACE_PROBE(hotspot, vm__shutdown);
+ HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
}
void vm_direct_exit(int code) {
diff --git a/src/share/vm/runtime/jniHandles.hpp b/src/share/vm/runtime/jniHandles.hpp
index 0cd24950c..73bf84b3c 100644
--- a/src/share/vm/runtime/jniHandles.hpp
+++ b/src/share/vm/runtime/jniHandles.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -63,8 +63,14 @@ class JNIHandles : AllStatic {
// refers to NULL (as is the case for any weak reference).
static jmethodID make_jmethod_id(methodHandle mh);
static void destroy_jmethod_id(jmethodID mid);
+ // Use resolve_jmethod_id() in situations where the caller is expected
+ // to provide a valid jmethodID; the only sanity checks are in asserts;
+ // result guaranteed not to be NULL.
inline static methodOop resolve_jmethod_id(jmethodID mid);
- inline static methodOop checked_resolve_jmethod_id(jmethodID mid); // NULL on invalid jmethodID
+ // Use checked_resolve_jmethod_id() in situations where the caller
+ // should provide a valid jmethodID, but might not. NULL is returned
+ // when the jmethodID does not refer to a valid method.
+ inline static methodOop checked_resolve_jmethod_id(jmethodID mid);
static void change_method_associated_with_jmethod_id(jmethodID jmid, methodHandle mh);
// Sentinel marking deleted handles in block. Note that we cannot store NULL as
@@ -200,12 +206,8 @@ inline methodOop JNIHandles::resolve_jmethod_id(jmethodID mid) {
};
inline methodOop JNIHandles::checked_resolve_jmethod_id(jmethodID mid) {
- if (mid == NULL) {
- return (methodOop) NULL;
- }
-
- oop o = resolve_non_null((jobject) mid);
- if (!o->is_method()) {
+ oop o = resolve_external_guard((jobject) mid);
+ if (o == NULL || !o->is_method()) {
return (methodOop) NULL;
}
diff --git a/src/share/vm/runtime/safepoint.cpp b/src/share/vm/runtime/safepoint.cpp
index 283896292..af68055bd 100644
--- a/src/share/vm/runtime/safepoint.cpp
+++ b/src/share/vm/runtime/safepoint.cpp
@@ -472,7 +472,7 @@ void SafepointSynchronize::do_cleanup_tasks() {
}
TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
- NMethodSweeper::sweep();
+ NMethodSweeper::scan_stacks();
}
diff --git a/src/share/vm/runtime/sweeper.cpp b/src/share/vm/runtime/sweeper.cpp
index 9b319ef38..d348817e8 100644
--- a/src/share/vm/runtime/sweeper.cpp
+++ b/src/share/vm/runtime/sweeper.cpp
@@ -33,6 +33,8 @@ int NMethodSweeper::_invocations = 0; // No. of invocations left until we
jint NMethodSweeper::_locked_seen = 0;
jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
bool NMethodSweeper::_rescan = false;
+bool NMethodSweeper::_do_sweep = false;
+jint NMethodSweeper::_sweep_started = 0;
bool NMethodSweeper::_was_full = false;
jint NMethodSweeper::_advise_to_sweep = 0;
jlong NMethodSweeper::_last_was_full = 0;
@@ -50,14 +52,20 @@ public:
};
static MarkActivationClosure mark_activation_closure;
-void NMethodSweeper::sweep() {
+void NMethodSweeper::scan_stacks() {
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
if (!MethodFlushing) return;
+ _do_sweep = true;
// No need to synchronize access, since this is always executed at a
// safepoint. If we aren't in the middle of scan and a rescan
- // hasn't been requested then just return.
- if (_current == NULL && !_rescan) return;
+ // hasn't been requested then just return. If UseCodeCacheFlushing is on and
+ // code cache flushing is in progress, don't skip sweeping to help make progress
+ // clearing space in the code cache.
+ if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
+ _do_sweep = false;
+ return;
+ }
// Make sure CompiledIC_lock in unlocked, since we might update some
// inline caches. If it is, we just bail-out and try later.
@@ -68,7 +76,7 @@ void NMethodSweeper::sweep() {
if (_current == NULL) {
_seen = 0;
_invocations = NmethodSweepFraction;
- _current = CodeCache::first();
+ _current = CodeCache::first_nmethod();
_traversals += 1;
if (PrintMethodFlushing) {
tty->print_cr("### Sweep: stack traversal %d", _traversals);
@@ -81,48 +89,9 @@ void NMethodSweeper::sweep() {
_not_entrant_seen_on_stack = 0;
}
- if (PrintMethodFlushing && Verbose) {
- tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
- }
-
- // We want to visit all nmethods after NmethodSweepFraction invocations.
- // If invocation is 1 we do the rest
- int todo = CodeCache::nof_blobs();
- if (_invocations != 1) {
- todo = (CodeCache::nof_blobs() - _seen) / _invocations;
- _invocations--;
- }
-
- for(int i = 0; i < todo && _current != NULL; i++) {
- CodeBlob* next = CodeCache::next(_current); // Read next before we potentially delete current
- if (_current->is_nmethod()) {
- process_nmethod((nmethod *)_current);
- }
- _seen++;
- _current = next;
- }
- // Because we could stop on a codeBlob other than an nmethod we skip forward
- // to the next nmethod (if any). codeBlobs other than nmethods can be freed
- // async to us and make _current invalid while we sleep.
- while (_current != NULL && !_current->is_nmethod()) {
- _current = CodeCache::next(_current);
- }
-
- if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
- // we've completed a scan without making progress but there were
- // nmethods we were unable to process either because they were
- // locked or were still on stack. We don't have to aggresively
- // clean them up so just stop scanning. We could scan once more
- // but that complicates the control logic and it's unlikely to
- // matter much.
- if (PrintMethodFlushing) {
- tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
- }
- }
-
if (UseCodeCacheFlushing) {
if (!CodeCache::needs_flushing()) {
- // In a safepoint, no race with setters
+ // scan_stacks() runs during a safepoint, no race with setters
_advise_to_sweep = 0;
}
@@ -155,13 +124,99 @@ void NMethodSweeper::sweep() {
}
}
+void NMethodSweeper::possibly_sweep() {
+ if ((!MethodFlushing) || (!_do_sweep)) return;
+
+ if (_invocations > 0) {
+ // Only one thread at a time will sweep
+ jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
+ if (old != 0) {
+ return;
+ }
+ sweep_code_cache();
+ }
+ _sweep_started = 0;
+}
+
+void NMethodSweeper::sweep_code_cache() {
+#ifdef ASSERT
+ jlong sweep_start;
+ if(PrintMethodFlushing) {
+ sweep_start = os::javaTimeMillis();
+ }
+#endif
+ if (PrintMethodFlushing && Verbose) {
+ tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
+ }
+
+ // We want to visit all nmethods after NmethodSweepFraction invocations.
+ // If invocation is 1 we do the rest
+ int todo = CodeCache::nof_blobs();
+ if (_invocations > 1) {
+ todo = (CodeCache::nof_blobs() - _seen) / _invocations;
+ }
+
+ // Compilers may check to sweep more often than stack scans happen,
+ // don't keep trying once it is all scanned
+ _invocations--;
+
+ assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
+ assert(!CodeCache_lock->owned_by_self(), "just checking");
+
+ {
+ MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+
+ for(int i = 0; i < todo && _current != NULL; i++) {
+
+ // Since we will give up the CodeCache_lock, always skip ahead to an nmethod.
+ // Other blobs can be deleted by other threads
+ // Read next before we potentially delete current
+ CodeBlob* next = CodeCache::next_nmethod(_current);
+
+ // Now ready to process nmethod and give up CodeCache_lock
+ {
+ MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ process_nmethod((nmethod *)_current);
+ }
+ _seen++;
+ _current = next;
+ }
+
+ // Skip forward to the next nmethod (if any). Code blobs other than nmethods
+ // can be freed async to us and make _current invalid while we sleep.
+ _current = CodeCache::next_nmethod(_current);
+ }
+
+ if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
+ // we've completed a scan without making progress but there were
+ // nmethods we were unable to process either because they were
+ // locked or were still on stack. We don't have to aggresively
+ // clean them up so just stop scanning. We could scan once more
+ // but that complicates the control logic and it's unlikely to
+ // matter much.
+ if (PrintMethodFlushing) {
+ tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
+ }
+ }
+
+#ifdef ASSERT
+ if(PrintMethodFlushing) {
+ jlong sweep_end = os::javaTimeMillis();
+ tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
+ }
+#endif
+}
+
void NMethodSweeper::process_nmethod(nmethod *nm) {
+ assert(!CodeCache_lock->owned_by_self(), "just checking");
+
// Skip methods that are currently referenced by the VM
if (nm->is_locked_by_vm()) {
// But still remember to clean-up inline caches for alive nmethods
if (nm->is_alive()) {
// Clean-up all inline caches that points to zombie/non-reentrant methods
+ MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
} else {
_locked_seen++;
@@ -178,6 +233,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
}
+ MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nm->flush();
} else {
if (PrintMethodFlushing && Verbose) {
@@ -197,10 +253,11 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
_rescan = true;
} else {
// Still alive, clean up its inline caches
+ MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
// we coudn't transition this nmethod so don't immediately
// request a rescan. If this method stays on the stack for a
- // long time we don't want to keep rescanning at every safepoint.
+ // long time we don't want to keep rescanning the code cache.
_not_entrant_seen_on_stack++;
}
} else if (nm->is_unloaded()) {
@@ -209,6 +266,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
if (nm->is_osr_method()) {
// No inline caches will ever point to osr methods, so we can just remove it
+ MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nm->flush();
} else {
nm->make_zombie();
@@ -227,6 +285,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
}
// Clean-up all inline caches that points to zombie/non-reentrant methods
+ MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
}
}
@@ -235,8 +294,8 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
// they will call a vm op that comes here. This code attempts to speculatively
// unload the oldest half of the nmethods (based on the compile job id) by
// saving the old code in a list in the CodeCache. Then
-// execution resumes. If a method so marked is not called by the second
-// safepoint from the current one, the nmethod will be marked non-entrant and
+// execution resumes. If a method so marked is not called by the second sweeper
+// stack traversal after the current one, the nmethod will be marked non-entrant and
// got rid of by normal sweeping. If the method is called, the methodOop's
// _code field is restored and the methodOop/nmethod
// go back to their normal state.
@@ -364,8 +423,8 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
xtty->end_elem();
}
- // Shut off compiler. Sweeper will run exiting from this safepoint
- // and turn it back on if it clears enough space
+ // Shut off compiler. Sweeper will start over with a new stack scan and
+ // traversal cycle and turn it back on if it clears enough space.
if (was_full()) {
_last_was_full = os::javaTimeMillis();
CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
diff --git a/src/share/vm/runtime/sweeper.hpp b/src/share/vm/runtime/sweeper.hpp
index 69b2e2056..8afdb0668 100644
--- a/src/share/vm/runtime/sweeper.hpp
+++ b/src/share/vm/runtime/sweeper.hpp
@@ -35,6 +35,8 @@ class NMethodSweeper : public AllStatic {
static bool _rescan; // Indicates that we should do a full rescan of the
// of the code cache looking for work to do.
+ static bool _do_sweep; // Flag to skip the conc sweep if no stack scan happened
+ static jint _sweep_started; // Flag to control conc sweeper
static int _locked_seen; // Number of locked nmethods encountered during the scan
static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
@@ -48,7 +50,9 @@ class NMethodSweeper : public AllStatic {
public:
static long traversal_count() { return _traversals; }
- static void sweep(); // Invoked at the end of each safepoint
+ static void scan_stacks(); // Invoked at the end of each safepoint
+ static void sweep_code_cache(); // Concurrent part of sweep job
+ static void possibly_sweep(); // Compiler threads call this to sweep
static void notify(nmethod* nm) {
// Perform a full scan of the code cache from the beginning. No
diff --git a/src/share/vm/runtime/vframe.cpp b/src/share/vm/runtime/vframe.cpp
index a4d25b20f..ba3ac85da 100644
--- a/src/share/vm/runtime/vframe.cpp
+++ b/src/share/vm/runtime/vframe.cpp
@@ -101,8 +101,8 @@ GrowableArray<MonitorInfo*>* javaVFrame::locked_monitors() {
bool found_first_monitor = false;
ObjectMonitor *pending_monitor = thread()->current_pending_monitor();
ObjectMonitor *waiting_monitor = thread()->current_waiting_monitor();
- oop pending_obj = (pending_monitor != NULL ? (oop) pending_monitor->object() : NULL);
- oop waiting_obj = (waiting_monitor != NULL ? (oop) waiting_monitor->object() : NULL);
+ oop pending_obj = (pending_monitor != NULL ? (oop) pending_monitor->object() : (oop) NULL);
+ oop waiting_obj = (waiting_monitor != NULL ? (oop) waiting_monitor->object() : (oop) NULL);
for (int index = (mons->length()-1); index >= 0; index--) {
MonitorInfo* monitor = mons->at(index);
diff --git a/src/share/vm/runtime/vm_version.cpp b/src/share/vm/runtime/vm_version.cpp
index 39c08fc7f..a05366835 100644
--- a/src/share/vm/runtime/vm_version.cpp
+++ b/src/share/vm/runtime/vm_version.cpp
@@ -190,6 +190,8 @@ const char* Abstract_VM_Version::internal_vm_info_string() {
#define HOTSPOT_BUILD_COMPILER "Workshop 5.8"
#elif __SUNPRO_CC == 0x590
#define HOTSPOT_BUILD_COMPILER "Workshop 5.9"
+ #elif __SUNPRO_CC == 0x5100
+ #define HOTSPOT_BUILD_COMPILER "Sun Studio 12u1"
#else
#define HOTSPOT_BUILD_COMPILER "unknown Workshop:" XSTR(__SUNPRO_CC)
#endif
diff --git a/src/share/vm/utilities/dtrace.hpp b/src/share/vm/utilities/dtrace.hpp
index e4e9f03a4..f06b2fcac 100644
--- a/src/share/vm/utilities/dtrace.hpp
+++ b/src/share/vm/utilities/dtrace.hpp
@@ -29,6 +29,10 @@
#define DTRACE_ONLY(x) x
#define NOT_DTRACE(x)
+// Work around dtrace tail call bug 6672627 until it is fixed in solaris 10.
+#define HS_DTRACE_WORKAROUND_TAIL_CALL_BUG() \
+ do { volatile size_t dtrace_workaround_tail_call_bug = 1; } while (0)
+
#else // ndef SOLARIS || ndef DTRACE_ENABLED
#define DTRACE_ONLY(x)
@@ -41,6 +45,8 @@
#define DTRACE_PROBE4(a,b,c,d,e,f) {;}
#define DTRACE_PROBE5(a,b,c,d,e,f,g) {;}
+#define HS_DTRACE_WORKAROUND_TAIL_CALL_BUG()
+
#endif
#define HS_DTRACE_PROBE_FN(provider,name)\