aboutsummaryrefslogtreecommitdiff
path: root/src/share/vm
diff options
context:
space:
mode:
authorjohnc <none@none>2011-04-14 13:45:41 -0700
committerjohnc <none@none>2011-04-14 13:45:41 -0700
commit499aa53b4cd03c796ba8de6b8f0fb2c4227d59b2 (patch)
tree664b8a5052247018585f50cec5e04c1f99b3b54f /src/share/vm
parent3dba0c694c7215ccc865fe0f65352ba4ec5456e8 (diff)
parent7554f3eed7f2c3b8a4916dd0516ef8f97f14ede9 (diff)
Merge
Diffstat (limited to 'src/share/vm')
-rw-r--r--src/share/vm/c1/c1_CodeStubs.hpp102
-rw-r--r--src/share/vm/c1/c1_GraphBuilder.cpp49
-rw-r--r--src/share/vm/c1/c1_LIRGenerator.cpp221
-rw-r--r--src/share/vm/c1/c1_LIRGenerator.hpp6
-rw-r--r--src/share/vm/classfile/vmSymbols.hpp4
-rw-r--r--src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp20
-rw-r--r--src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp7
-rw-r--r--src/share/vm/gc_implementation/g1/g1_globals.hpp14
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp6
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp5
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp50
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp3
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp8
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psPermGen.hpp5
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp8
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp5
-rw-r--r--src/share/vm/interpreter/abstractInterpreter.hpp3
-rw-r--r--src/share/vm/interpreter/cppInterpreter.cpp3
-rw-r--r--src/share/vm/interpreter/interpreter.cpp15
-rw-r--r--src/share/vm/interpreter/templateInterpreter.cpp1
-rw-r--r--src/share/vm/oops/instanceKlass.hpp2
-rw-r--r--src/share/vm/opto/compile.cpp81
-rw-r--r--src/share/vm/opto/graphKit.cpp63
-rw-r--r--src/share/vm/opto/graphKit.hpp10
-rw-r--r--src/share/vm/opto/lcm.cpp22
-rw-r--r--src/share/vm/opto/library_call.cpp204
-rw-r--r--src/share/vm/opto/memnode.cpp9
-rw-r--r--src/share/vm/opto/output.cpp21
-rw-r--r--src/share/vm/prims/jni.cpp23
-rw-r--r--src/share/vm/prims/unsafe.cpp57
-rw-r--r--src/share/vm/runtime/arguments.cpp40
-rw-r--r--src/share/vm/runtime/globals.hpp10
32 files changed, 849 insertions, 228 deletions
diff --git a/src/share/vm/c1/c1_CodeStubs.hpp b/src/share/vm/c1/c1_CodeStubs.hpp
index 1367f080d..128a6d48c 100644
--- a/src/share/vm/c1/c1_CodeStubs.hpp
+++ b/src/share/vm/c1/c1_CodeStubs.hpp
@@ -519,42 +519,126 @@ class ArrayCopyStub: public CodeStub {
// Code stubs for Garbage-First barriers.
class G1PreBarrierStub: public CodeStub {
private:
+ bool _do_load;
LIR_Opr _addr;
LIR_Opr _pre_val;
LIR_PatchCode _patch_code;
CodeEmitInfo* _info;
public:
- // pre_val (a temporary register) must be a register;
+ // Version that _does_ generate a load of the previous value from addr.
// addr (the address of the field to be read) must be a LIR_Address
+ // pre_val (a temporary register) must be a register;
G1PreBarrierStub(LIR_Opr addr, LIR_Opr pre_val, LIR_PatchCode patch_code, CodeEmitInfo* info) :
- _addr(addr), _pre_val(pre_val), _patch_code(patch_code), _info(info)
+ _addr(addr), _pre_val(pre_val), _do_load(true),
+ _patch_code(patch_code), _info(info)
{
assert(_pre_val->is_register(), "should be temporary register");
assert(_addr->is_address(), "should be the address of the field");
}
+ // Version that _does not_ generate load of the previous value; the
+ // previous value is assumed to have already been loaded into pre_val.
+ G1PreBarrierStub(LIR_Opr pre_val) :
+ _addr(LIR_OprFact::illegalOpr), _pre_val(pre_val), _do_load(false),
+ _patch_code(lir_patch_none), _info(NULL)
+ {
+ assert(_pre_val->is_register(), "should be a register");
+ }
+
LIR_Opr addr() const { return _addr; }
LIR_Opr pre_val() const { return _pre_val; }
LIR_PatchCode patch_code() const { return _patch_code; }
CodeEmitInfo* info() const { return _info; }
+ bool do_load() const { return _do_load; }
virtual void emit_code(LIR_Assembler* e);
virtual void visit(LIR_OpVisitState* visitor) {
- // don't pass in the code emit info since it's processed in the fast
- // path
- if (_info != NULL)
- visitor->do_slow_case(_info);
- else
+ if (_do_load) {
+ // don't pass in the code emit info since it's processed in the fast
+ // path
+ if (_info != NULL)
+ visitor->do_slow_case(_info);
+ else
+ visitor->do_slow_case();
+
+ visitor->do_input(_addr);
+ visitor->do_temp(_pre_val);
+ } else {
visitor->do_slow_case();
- visitor->do_input(_addr);
- visitor->do_temp(_pre_val);
+ visitor->do_input(_pre_val);
+ }
}
#ifndef PRODUCT
virtual void print_name(outputStream* out) const { out->print("G1PreBarrierStub"); }
#endif // PRODUCT
};
+// This G1 barrier code stub is used in Unsafe.getObject.
+// It generates a sequence of guards around the SATB
+// barrier code that are used to detect when we have
+// the referent field of a Reference object.
+// The first check is assumed to have been generated
+// in the code generated for Unsafe.getObject().
+
+class G1UnsafeGetObjSATBBarrierStub: public CodeStub {
+ private:
+ LIR_Opr _val;
+ LIR_Opr _src;
+
+ LIR_Opr _tmp;
+ LIR_Opr _thread;
+
+ bool _gen_src_check;
+
+ public:
+ // A G1 barrier that is guarded by generated guards that determine whether
+ // val (which is the result of Unsafe.getObject() should be recorded in an
+ // SATB log buffer. We could be reading the referent field of a Reference object
+ // using Unsafe.getObject() and we need to record the referent.
+ //
+ // * val is the operand returned by the unsafe.getObject routine.
+ // * src is the base object
+ // * tmp is a temp used to load the klass of src, and then reference type
+ // * thread is the thread object.
+
+ G1UnsafeGetObjSATBBarrierStub(LIR_Opr val, LIR_Opr src,
+ LIR_Opr tmp, LIR_Opr thread,
+ bool gen_src_check) :
+ _val(val), _src(src),
+ _tmp(tmp), _thread(thread),
+ _gen_src_check(gen_src_check)
+ {
+ assert(_val->is_register(), "should have already been loaded");
+ assert(_src->is_register(), "should have already been loaded");
+
+ assert(_tmp->is_register(), "should be a temporary register");
+ }
+
+ LIR_Opr val() const { return _val; }
+ LIR_Opr src() const { return _src; }
+
+ LIR_Opr tmp() const { return _tmp; }
+ LIR_Opr thread() const { return _thread; }
+
+ bool gen_src_check() const { return _gen_src_check; }
+
+ virtual void emit_code(LIR_Assembler* e);
+
+ virtual void visit(LIR_OpVisitState* visitor) {
+ visitor->do_slow_case();
+ visitor->do_input(_val);
+ visitor->do_input(_src);
+ visitor->do_input(_thread);
+
+ visitor->do_temp(_tmp);
+ }
+
+#ifndef PRODUCT
+ virtual void print_name(outputStream* out) const { out->print("G1UnsafeGetObjSATBBarrierStub"); }
+#endif // PRODUCT
+};
+
class G1PostBarrierStub: public CodeStub {
private:
LIR_Opr _addr;
diff --git a/src/share/vm/c1/c1_GraphBuilder.cpp b/src/share/vm/c1/c1_GraphBuilder.cpp
index 9bac474f5..ffe9e1248 100644
--- a/src/share/vm/c1/c1_GraphBuilder.cpp
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp
@@ -2913,6 +2913,46 @@ GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
block()->set_end(end);
break;
}
+
+ case vmIntrinsics::_Reference_get:
+ {
+ if (UseG1GC) {
+ // With java.lang.ref.reference.get() we must go through the
+ // intrinsic - when G1 is enabled - even when get() is the root
+ // method of the compile so that, if necessary, the value in
+ // the referent field of the reference object gets recorded by
+ // the pre-barrier code.
+ // Specifically, if G1 is enabled, the value in the referent
+ // field is recorded by the G1 SATB pre barrier. This will
+ // result in the referent being marked live and the reference
+ // object removed from the list of discovered references during
+ // reference processing.
+
+ // Set up a stream so that appending instructions works properly.
+ ciBytecodeStream s(scope->method());
+ s.reset_to_bci(0);
+ scope_data()->set_stream(&s);
+ s.next();
+
+ // setup the initial block state
+ _block = start_block;
+ _state = start_block->state()->copy_for_parsing();
+ _last = start_block;
+ load_local(objectType, 0);
+
+ // Emit the intrinsic node.
+ bool result = try_inline_intrinsics(scope->method());
+ if (!result) BAILOUT("failed to inline intrinsic");
+ method_return(apop());
+
+ // connect the begin and end blocks and we're all done.
+ BlockEnd* end = last()->as_BlockEnd();
+ block()->set_end(end);
+ break;
+ }
+ // Otherwise, fall thru
+ }
+
default:
scope_data()->add_to_work_list(start_block);
iterate_all_blocks();
@@ -3150,6 +3190,15 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
append_unsafe_CAS(callee);
return true;
+ case vmIntrinsics::_Reference_get:
+ // It is only when G1 is enabled that we absolutely
+ // need to use the intrinsic version of Reference.get()
+ // so that the value in the referent field, if necessary,
+ // can be registered by the pre-barrier code.
+ if (!UseG1GC) return false;
+ preserves_state = true;
+ break;
+
default : return false; // do not inline
}
// create intrinsic node
diff --git a/src/share/vm/c1/c1_LIRGenerator.cpp b/src/share/vm/c1/c1_LIRGenerator.cpp
index 33278ef50..8226935f0 100644
--- a/src/share/vm/c1/c1_LIRGenerator.cpp
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp
@@ -1209,6 +1209,38 @@ void LIRGenerator::do_Return(Return* x) {
set_no_result(x);
}
+// Examble: ref.get()
+// Combination of LoadField and g1 pre-write barrier
+void LIRGenerator::do_Reference_get(Intrinsic* x) {
+
+ const int referent_offset = java_lang_ref_Reference::referent_offset;
+ guarantee(referent_offset > 0, "referent offset not initialized");
+
+ assert(x->number_of_arguments() == 1, "wrong type");
+
+ LIRItem reference(x->argument_at(0), this);
+ reference.load_item();
+
+ // need to perform the null check on the reference objecy
+ CodeEmitInfo* info = NULL;
+ if (x->needs_null_check()) {
+ info = state_for(x);
+ }
+
+ LIR_Address* referent_field_adr =
+ new LIR_Address(reference.result(), referent_offset, T_OBJECT);
+
+ LIR_Opr result = rlock_result(x);
+
+ __ load(referent_field_adr, result, info);
+
+ // Register the value in the referent field with the pre-barrier
+ pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
+ result /* pre_val */,
+ false /* do_load */,
+ false /* patch */,
+ NULL /* info */);
+}
// Example: object.getClass ()
void LIRGenerator::do_getClass(Intrinsic* x) {
@@ -1351,13 +1383,14 @@ LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
// Various barriers
-void LIRGenerator::pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) {
+void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
+ bool do_load, bool patch, CodeEmitInfo* info) {
// Do the pre-write barrier, if any.
switch (_bs->kind()) {
#ifndef SERIALGC
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
- G1SATBCardTableModRef_pre_barrier(addr_opr, patch, info);
+ G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
break;
#endif // SERIALGC
case BarrierSet::CardTableModRef:
@@ -1398,9 +1431,8 @@ void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
////////////////////////////////////////////////////////////////////////
#ifndef SERIALGC
-void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) {
- if (G1DisablePreBarrier) return;
-
+void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
+ bool do_load, bool patch, CodeEmitInfo* info) {
// First we test whether marking is in progress.
BasicType flag_type;
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
@@ -1419,26 +1451,40 @@ void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patc
// Read the marking-in-progress flag.
LIR_Opr flag_val = new_register(T_INT);
__ load(mark_active_flag_addr, flag_val);
+ __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
- LIR_PatchCode pre_val_patch_code =
- patch ? lir_patch_normal : lir_patch_none;
+ LIR_PatchCode pre_val_patch_code = lir_patch_none;
- LIR_Opr pre_val = new_register(T_OBJECT);
+ CodeStub* slow;
- __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
- if (!addr_opr->is_address()) {
- assert(addr_opr->is_register(), "must be");
- addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
+ if (do_load) {
+ assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
+ assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
+
+ if (patch)
+ pre_val_patch_code = lir_patch_normal;
+
+ pre_val = new_register(T_OBJECT);
+
+ if (!addr_opr->is_address()) {
+ assert(addr_opr->is_register(), "must be");
+ addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
+ }
+ slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
+ } else {
+ assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
+ assert(pre_val->is_register(), "must be");
+ assert(pre_val->type() == T_OBJECT, "must be an object");
+ assert(info == NULL, "sanity");
+
+ slow = new G1PreBarrierStub(pre_val);
}
- CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code,
- info);
+
__ branch(lir_cond_notEqual, T_INT, slow);
__ branch_destination(slow->continuation());
}
void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
- if (G1DisablePostBarrier) return;
-
// If the "new_val" is a constant NULL, no barrier is necessary.
if (new_val->is_constant() &&
new_val->as_constant_ptr()->as_jobject() == NULL) return;
@@ -1662,6 +1708,8 @@ void LIRGenerator::do_StoreField(StoreField* x) {
if (is_oop) {
// Do the pre-write barrier, if any.
pre_barrier(LIR_OprFact::address(address),
+ LIR_OprFact::illegalOpr /* pre_val */,
+ true /* do_load*/,
needs_patching,
(info ? new CodeEmitInfo(info) : NULL));
}
@@ -2091,9 +2139,144 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
off.load_item();
src.load_item();
- LIR_Opr reg = reg = rlock_result(x, x->basic_type());
+ LIR_Opr reg = rlock_result(x, x->basic_type());
get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
+
+#ifndef SERIALGC
+ // We might be reading the value of the referent field of a
+ // Reference object in order to attach it back to the live
+ // object graph. If G1 is enabled then we need to record
+ // the value that is being returned in an SATB log buffer.
+ //
+ // We need to generate code similar to the following...
+ //
+ // if (offset == java_lang_ref_Reference::referent_offset) {
+ // if (src != NULL) {
+ // if (klass(src)->reference_type() != REF_NONE) {
+ // pre_barrier(..., reg, ...);
+ // }
+ // }
+ // }
+ //
+ // The first non-constant check of either the offset or
+ // the src operand will be done here; the remainder
+ // will take place in the generated code stub.
+
+ if (UseG1GC && type == T_OBJECT) {
+ bool gen_code_stub = true; // Assume we need to generate the slow code stub.
+ bool gen_offset_check = true; // Assume the code stub has to generate the offset guard.
+ bool gen_source_check = true; // Assume the code stub has to check the src object for null.
+
+ if (off.is_constant()) {
+ jlong off_con = (off.type()->is_int() ?
+ (jlong) off.get_jint_constant() :
+ off.get_jlong_constant());
+
+
+ if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
+ // The constant offset is something other than referent_offset.
+ // We can skip generating/checking the remaining guards and
+ // skip generation of the code stub.
+ gen_code_stub = false;
+ } else {
+ // The constant offset is the same as referent_offset -
+ // we do not need to generate a runtime offset check.
+ gen_offset_check = false;
+ }
+ }
+
+ // We don't need to generate stub if the source object is an array
+ if (gen_code_stub && src.type()->is_array()) {
+ gen_code_stub = false;
+ }
+
+ if (gen_code_stub) {
+ // We still need to continue with the checks.
+ if (src.is_constant()) {
+ ciObject* src_con = src.get_jobject_constant();
+
+ if (src_con->is_null_object()) {
+ // The constant src object is null - We can skip
+ // generating the code stub.
+ gen_code_stub = false;
+ } else {
+ // Non-null constant source object. We still have to generate
+ // the slow stub - but we don't need to generate the runtime
+ // null object check.
+ gen_source_check = false;
+ }
+ }
+ }
+
+ if (gen_code_stub) {
+ // Temoraries.
+ LIR_Opr src_klass = new_register(T_OBJECT);
+
+ // Get the thread pointer for the pre-barrier
+ LIR_Opr thread = getThreadPointer();
+
+ CodeStub* stub;
+
+ // We can have generate one runtime check here. Let's start with
+ // the offset check.
+ if (gen_offset_check) {
+ // if (offset == referent_offset) -> slow code stub
+ // If offset is an int then we can do the comparison with the
+ // referent_offset constant; otherwise we need to move
+ // referent_offset into a temporary register and generate
+ // a reg-reg compare.
+
+ LIR_Opr referent_off;
+
+ if (off.type()->is_int()) {
+ referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
+ } else {
+ assert(off.type()->is_long(), "what else?");
+ referent_off = new_register(T_LONG);
+ __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
+ }
+
+ __ cmp(lir_cond_equal, off.result(), referent_off);
+
+ // Optionally generate "src == null" check.
+ stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
+ src_klass, thread,
+ gen_source_check);
+
+ __ branch(lir_cond_equal, as_BasicType(off.type()), stub);
+ } else {
+ if (gen_source_check) {
+ // offset is a const and equals referent offset
+ // if (source != null) -> slow code stub
+ __ cmp(lir_cond_notEqual, src.result(), LIR_OprFact::oopConst(NULL));
+
+ // Since we are generating the "if src == null" guard here,
+ // there is no need to generate the "src == null" check again.
+ stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
+ src_klass, thread,
+ false);
+
+ __ branch(lir_cond_notEqual, T_OBJECT, stub);
+ } else {
+ // We have statically determined that offset == referent_offset
+ // && src != null so we unconditionally branch to code stub
+ // to perform the guards and record reg in the SATB log buffer.
+
+ stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
+ src_klass, thread,
+ false);
+
+ __ branch(lir_cond_always, T_ILLEGAL, stub);
+ }
+ }
+
+ // Continuation point
+ __ branch_destination(stub->continuation());
+ }
+ }
+#endif // SERIALGC
+
if (x->is_volatile() && os::is_MP()) __ membar_acquire();
}
@@ -2759,6 +2942,10 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
do_AttemptUpdate(x);
break;
+ case vmIntrinsics::_Reference_get:
+ do_Reference_get(x);
+ break;
+
default: ShouldNotReachHere(); break;
}
}
diff --git a/src/share/vm/c1/c1_LIRGenerator.hpp b/src/share/vm/c1/c1_LIRGenerator.hpp
index 4b9c4aeca..2b8fc23e6 100644
--- a/src/share/vm/c1/c1_LIRGenerator.hpp
+++ b/src/share/vm/c1/c1_LIRGenerator.hpp
@@ -246,6 +246,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void do_AttemptUpdate(Intrinsic* x);
void do_NIOCheckIndex(Intrinsic* x);
void do_FPIntrinsics(Intrinsic* x);
+ void do_Reference_get(Intrinsic* x);
void do_UnsafePrefetch(UnsafePrefetch* x, bool is_store);
@@ -260,13 +261,14 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
// generic interface
- void pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info);
+ void pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, bool do_load, bool patch, CodeEmitInfo* info);
void post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
// specific implementations
// pre barriers
- void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info);
+ void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
+ bool do_load, bool patch, CodeEmitInfo* info);
// post barriers
diff --git a/src/share/vm/classfile/vmSymbols.hpp b/src/share/vm/classfile/vmSymbols.hpp
index 00bd191c8..6b72a3ff3 100644
--- a/src/share/vm/classfile/vmSymbols.hpp
+++ b/src/share/vm/classfile/vmSymbols.hpp
@@ -677,6 +677,10 @@
do_intrinsic(_checkIndex, java_nio_Buffer, checkIndex_name, int_int_signature, F_R) \
do_name( checkIndex_name, "checkIndex") \
\
+ /* java/lang/ref/Reference */ \
+ do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \
+ \
+ \
do_class(sun_misc_AtomicLongCSImpl, "sun/misc/AtomicLongCSImpl") \
do_intrinsic(_get_AtomicLong, sun_misc_AtomicLongCSImpl, get_name, void_long_signature, F_R) \
/* (symbols get_name and void_long_signature defined above) */ \
diff --git a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
index 8145aa9fc..59d259065 100644
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,9 @@ G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
- assert(pre_val->is_oop_or_null(true), "Error");
+ // Nulls should have been already filtered.
+ assert(pre_val->is_oop(true), "Error");
+
if (!JavaThread::satb_mark_queue_set().is_active()) return;
Thread* thr = Thread::current();
if (thr->is_Java_thread()) {
@@ -59,20 +61,6 @@ void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
}
}
-// When we know the current java thread:
-template <class T> void
-G1SATBCardTableModRefBS::write_ref_field_pre_static(T* field,
- oop new_val,
- JavaThread* jt) {
- if (!JavaThread::satb_mark_queue_set().is_active()) return;
- T heap_oop = oopDesc::load_heap_oop(field);
- if (!oopDesc::is_null(heap_oop)) {
- oop pre_val = oopDesc::decode_heap_oop_not_null(heap_oop);
- assert(pre_val->is_oop(true /* ignore mark word */), "Error");
- jt->satb_mark_queue().enqueue(pre_val);
- }
-}
-
template <class T> void
G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
if (!JavaThread::satb_mark_queue_set().is_active()) return;
diff --git a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp
index 50cc99d09..dde3ba4be 100644
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp
@@ -37,12 +37,11 @@ class DirtyCardQueueSet;
// snapshot-at-the-beginning marking.
class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
-private:
+public:
// Add "pre_val" to a set of objects that may have been disconnected from the
// pre-marking object graph.
static void enqueue(oop pre_val);
-public:
G1SATBCardTableModRefBS(MemRegion whole_heap,
int max_covered_regions);
@@ -61,10 +60,6 @@ public:
}
}
- // When we know the current java thread:
- template <class T> static void write_ref_field_pre_static(T* field, oop newVal,
- JavaThread* jt);
-
// We export this to make it available in cases where the static
// type of the barrier set is known. Note that it is non-virtual.
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
diff --git a/src/share/vm/gc_implementation/g1/g1_globals.hpp b/src/share/vm/gc_implementation/g1/g1_globals.hpp
index b42949f22..5fd535533 100644
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp
@@ -89,13 +89,9 @@
"The number of discovered reference objects to process before " \
"draining concurrent marking work queues.") \
\
- experimental(bool, G1UseConcMarkReferenceProcessing, false, \
+ experimental(bool, G1UseConcMarkReferenceProcessing, true, \
"If true, enable reference discovery during concurrent " \
- "marking and reference processing at the end of remark " \
- "(unsafe).") \
- \
- develop(bool, G1SATBBarrierPrintNullPreVals, false, \
- "If true, count frac of ptr writes with null pre-vals.") \
+ "marking and reference processing at the end of remark.") \
\
product(intx, G1SATBBufferSize, 1*K, \
"Number of entries in an SATB log buffer.") \
@@ -150,12 +146,6 @@
develop(bool, G1PrintParCleanupStats, false, \
"When true, print extra stats about parallel cleanup.") \
\
- develop(bool, G1DisablePreBarrier, false, \
- "Disable generation of pre-barrier (i.e., marking barrier) ") \
- \
- develop(bool, G1DisablePostBarrier, false, \
- "Disable generation of post-barrier (i.e., RS barrier) ") \
- \
product(intx, G1UpdateBufferSize, 256, \
"Size of an update buffer") \
\
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
index df0865ee3..acbe76f94 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -176,10 +176,6 @@ void PSOldGen::compact() {
object_mark_sweep()->compact(ZapUnusedHeapArea);
}
-void PSOldGen::move_and_update(ParCompactionManager* cm) {
- PSParallelCompact::move_and_update(cm, PSParallelCompact::old_space_id);
-}
-
size_t PSOldGen::contiguous_available() const {
return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
}
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp
index 0d9cdd75d..76f43f51a 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -143,9 +143,6 @@ class PSOldGen : public CHeapObj {
void adjust_pointers();
void compact();
- // Parallel old
- virtual void move_and_update(ParCompactionManager* cm);
-
// Size info
size_t capacity_in_bytes() const { return object_space()->capacity_in_bytes(); }
size_t used_in_bytes() const { return object_space()->used_in_bytes(); }
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
index 5f7504ded..7ff75bd8d 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
@@ -2104,11 +2104,7 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
// klasses are used in the update of an object?
compact_perm(vmthread_cm);
- if (UseParallelOldGCCompacting) {
- compact();
- } else {
- compact_serial(vmthread_cm);
- }
+ compact();
// Reset the mark bitmap, summary data, and do other bookkeeping. Must be
// done before resizing.
@@ -2582,18 +2578,16 @@ void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
// each thread?
if (total_dense_prefix_regions > 0) {
uint tasks_for_dense_prefix = 1;
- if (UseParallelDensePrefixUpdate) {
- if (total_dense_prefix_regions <=
- (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
- // Don't over partition. This assumes that
- // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
- // so there are not many regions to process.
- tasks_for_dense_prefix = parallel_gc_threads;
- } else {
- // Over partition
- tasks_for_dense_prefix = parallel_gc_threads *
- PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
- }
+ if (total_dense_prefix_regions <=
+ (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
+ // Don't over partition. This assumes that
+ // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
+ // so there are not many regions to process.
+ tasks_for_dense_prefix = parallel_gc_threads;
+ } else {
+ // Over partition
+ tasks_for_dense_prefix = parallel_gc_threads *
+ PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
}
size_t regions_per_thread = total_dense_prefix_regions /
tasks_for_dense_prefix;
@@ -2733,21 +2727,6 @@ void PSParallelCompact::verify_complete(SpaceId space_id) {
}
#endif // #ifdef ASSERT
-void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
- EventMark m("5 compact serial");
- TraceTime tm("compact serial", print_phases(), true, gclog_or_tty);
-
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-
- PSYoungGen* young_gen = heap->young_gen();
- PSOldGen* old_gen = heap->old_gen();
-
- old_gen->start_array()->reset();
- old_gen->move_and_update(cm);
- young_gen->move_and_update(cm);
-}
-
void
PSParallelCompact::follow_weak_klass_links() {
// All klasses on the revisit stack are marked at this point.
@@ -3530,11 +3509,8 @@ PSParallelCompact::VerifyUpdateClosure::do_addr(HeapWord* addr, size_t words) {
"Object liveness is wrong.");
return ParMarkBitMap::incomplete;
}
- assert(UseParallelOldGCDensePrefix ||
- (HeapMaximumCompactionInterval > 1) ||
- (MarkSweepAlwaysCompactCount > 1) ||
- (forwarding_ptr == new_pointer),
- "Calculation of new location is incorrect");
+ assert(HeapMaximumCompactionInterval > 1 || MarkSweepAlwaysCompactCount > 1 ||
+ forwarding_ptr == new_pointer, "new location is incorrect");
return ParMarkBitMap::incomplete;
}
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
index 0e2c6477a..f47bff5c2 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
@@ -1027,9 +1027,6 @@ class PSParallelCompact : AllStatic {
ParallelTaskTerminator* terminator_ptr,
uint parallel_gc_threads);
- // For debugging only - compacts the old gen serially
- static void compact_serial(ParCompactionManager* cm);
-
// If objects are left in eden after a collection, try to move the boundary
// and absorb them into the old gen. Returns true if eden was emptied.
static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp b/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp
index 60fea3165..eacb47fb2 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -121,12 +121,6 @@ void PSPermGen::compute_new_size(size_t used_before_collection) {
}
}
-
-
-void PSPermGen::move_and_update(ParCompactionManager* cm) {
- PSParallelCompact::move_and_update(cm, PSParallelCompact::perm_space_id);
-}
-
void PSPermGen::precompact() {
// Reset start array first.
_start_array.reset();
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPermGen.hpp b/src/share/vm/gc_implementation/parallelScavenge/psPermGen.hpp
index 7a890fc8d..ba384d429 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psPermGen.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPermGen.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,9 +51,6 @@ class PSPermGen : public PSOldGen {
// MarkSweep code
virtual void precompact();
- // Parallel old
- virtual void move_and_update(ParCompactionManager* cm);
-
virtual const char* name() const { return "PSPermGen"; }
};
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp
index b5e6462e7..5355abe0a 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -792,12 +792,6 @@ void PSYoungGen::compact() {
to_mark_sweep()->compact(false);
}
-void PSYoungGen::move_and_update(ParCompactionManager* cm) {
- PSParallelCompact::move_and_update(cm, PSParallelCompact::eden_space_id);
- PSParallelCompact::move_and_update(cm, PSParallelCompact::from_space_id);
- PSParallelCompact::move_and_update(cm, PSParallelCompact::to_space_id);
-}
-
void PSYoungGen::print() const { print_on(tty); }
void PSYoungGen::print_on(outputStream* st) const {
st->print(" %-15s", "PSYoungGen");
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp
index 3d4c56637..898b56f57 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -127,9 +127,6 @@ class PSYoungGen : public CHeapObj {
void adjust_pointers();
void compact();
- // Parallel Old
- void move_and_update(ParCompactionManager* cm);
-
// Called during/after gc
void swap_spaces();
diff --git a/src/share/vm/interpreter/abstractInterpreter.hpp b/src/share/vm/interpreter/abstractInterpreter.hpp
index a1908a2c9..c407fbaba 100644
--- a/src/share/vm/interpreter/abstractInterpreter.hpp
+++ b/src/share/vm/interpreter/abstractInterpreter.hpp
@@ -104,6 +104,7 @@ class AbstractInterpreter: AllStatic {
java_lang_math_sqrt, // implementation of java.lang.Math.sqrt (x)
java_lang_math_log, // implementation of java.lang.Math.log (x)
java_lang_math_log10, // implementation of java.lang.Math.log10 (x)
+ java_lang_ref_reference_get, // implementation of java.lang.ref.Reference.get()
number_of_method_entries,
invalid = -1
};
@@ -140,7 +141,7 @@ class AbstractInterpreter: AllStatic {
// Method activation
static MethodKind method_kind(methodHandle m);
static address entry_for_kind(MethodKind k) { assert(0 <= k && k < number_of_method_entries, "illegal kind"); return _entry_table[k]; }
- static address entry_for_method(methodHandle m) { return _entry_table[method_kind(m)]; }
+ static address entry_for_method(methodHandle m) { return entry_for_kind(method_kind(m)); }
static void print_method_kind(MethodKind kind) PRODUCT_RETURN;
diff --git a/src/share/vm/interpreter/cppInterpreter.cpp b/src/share/vm/interpreter/cppInterpreter.cpp
index 6ce16e271..9a6669519 100644
--- a/src/share/vm/interpreter/cppInterpreter.cpp
+++ b/src/share/vm/interpreter/cppInterpreter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -125,6 +125,7 @@ void CppInterpreterGenerator::generate_all() {
method_entry(java_lang_math_sqrt );
method_entry(java_lang_math_log );
method_entry(java_lang_math_log10 );
+ method_entry(java_lang_ref_reference_get);
Interpreter::_native_entry_begin = Interpreter::code()->code_end();
method_entry(native);
method_entry(native_synchronized);
diff --git a/src/share/vm/interpreter/interpreter.cpp b/src/share/vm/interpreter/interpreter.cpp
index 04a9d68f4..137f50ee3 100644
--- a/src/share/vm/interpreter/interpreter.cpp
+++ b/src/share/vm/interpreter/interpreter.cpp
@@ -208,12 +208,6 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
return empty;
}
- // Accessor method?
- if (m->is_accessor()) {
- assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
- return accessor;
- }
-
// Special intrinsic method?
// Note: This test must come _after_ the test for native methods,
// otherwise we will run into problems with JDK 1.2, see also
@@ -227,6 +221,15 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ;
case vmIntrinsics::_dlog : return java_lang_math_log ;
case vmIntrinsics::_dlog10: return java_lang_math_log10;
+
+ case vmIntrinsics::_Reference_get:
+ return java_lang_ref_reference_get;
+ }
+
+ // Accessor method?
+ if (m->is_accessor()) {
+ assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
+ return accessor;
}
// Note: for now: zero locals for all non-empty methods
diff --git a/src/share/vm/interpreter/templateInterpreter.cpp b/src/share/vm/interpreter/templateInterpreter.cpp
index 0d1125ecb..6b22dc1eb 100644
--- a/src/share/vm/interpreter/templateInterpreter.cpp
+++ b/src/share/vm/interpreter/templateInterpreter.cpp
@@ -372,6 +372,7 @@ void TemplateInterpreterGenerator::generate_all() {
method_entry(java_lang_math_sqrt )
method_entry(java_lang_math_log )
method_entry(java_lang_math_log10)
+ method_entry(java_lang_ref_reference_get)
// all native method kinds (must be one contiguous block)
Interpreter::_native_entry_begin = Interpreter::code()->code_end();
diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKlass.hpp
index e0d70ae68..ab9ddf326 100644
--- a/src/share/vm/oops/instanceKlass.hpp
+++ b/src/share/vm/oops/instanceKlass.hpp
@@ -401,6 +401,8 @@ class instanceKlass: public Klass {
ReferenceType reference_type() const { return _reference_type; }
void set_reference_type(ReferenceType t) { _reference_type = t; }
+ static int reference_type_offset_in_bytes() { return offset_of(instanceKlass, _reference_type); }
+
// find local field, returns true if found
bool find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const;
// find field in direct superinterfaces, returns the interface in which the field is defined
diff --git a/src/share/vm/opto/compile.cpp b/src/share/vm/opto/compile.cpp
index e6084be0e..7117f7ccc 100644
--- a/src/share/vm/opto/compile.cpp
+++ b/src/share/vm/opto/compile.cpp
@@ -629,7 +629,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
initial_gvn()->transform_no_reclaim(top());
// Set up tf(), start(), and find a CallGenerator.
- CallGenerator* cg;
+ CallGenerator* cg = NULL;
if (is_osr_compilation()) {
const TypeTuple *domain = StartOSRNode::osr_domain();
const TypeTuple *range = TypeTuple::make_range(method()->signature());
@@ -644,9 +644,24 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
StartNode* s = new (this, 2) StartNode(root(), tf()->domain());
initial_gvn()->set_type_bottom(s);
init_start(s);
- float past_uses = method()->interpreter_invocation_count();
- float expected_uses = past_uses;
- cg = CallGenerator::for_inline(method(), expected_uses);
+ if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) {
+ // With java.lang.ref.reference.get() we must go through the
+ // intrinsic when G1 is enabled - even when get() is the root
+ // method of the compile - so that, if necessary, the value in
+ // the referent field of the reference object gets recorded by
+ // the pre-barrier code.
+ // Specifically, if G1 is enabled, the value in the referent
+ // field is recorded by the G1 SATB pre barrier. This will
+ // result in the referent being marked live and the reference
+ // object removed from the list of discovered references during
+ // reference processing.
+ cg = find_intrinsic(method(), false);
+ }
+ if (cg == NULL) {
+ float past_uses = method()->interpreter_invocation_count();
+ float expected_uses = past_uses;
+ cg = CallGenerator::for_inline(method(), expected_uses);
+ }
}
if (failing()) return;
if (cg == NULL) {
@@ -2041,6 +2056,52 @@ static bool oop_offset_is_sane(const TypeInstPtr* tp) {
// Note that OffsetBot and OffsetTop are very negative.
}
+// Eliminate trivially redundant StoreCMs and accumulate their
+// precedence edges.
+static void eliminate_redundant_card_marks(Node* n) {
+ assert(n->Opcode() == Op_StoreCM, "expected StoreCM");
+ if (n->in(MemNode::Address)->outcnt() > 1) {
+ // There are multiple users of the same address so it might be
+ // possible to eliminate some of the StoreCMs
+ Node* mem = n->in(MemNode::Memory);
+ Node* adr = n->in(MemNode::Address);
+ Node* val = n->in(MemNode::ValueIn);
+ Node* prev = n;
+ bool done = false;
+ // Walk the chain of StoreCMs eliminating ones that match. As
+ // long as it's a chain of single users then the optimization is
+ // safe. Eliminating partially redundant StoreCMs would require
+ // cloning copies down the other paths.
+ while (mem->Opcode() == Op_StoreCM && mem->outcnt() == 1 && !done) {
+ if (adr == mem->in(MemNode::Address) &&
+ val == mem->in(MemNode::ValueIn)) {
+ // redundant StoreCM
+ if (mem->req() > MemNode::OopStore) {
+ // Hasn't been processed by this code yet.
+ n->add_prec(mem->in(MemNode::OopStore));
+ } else {
+ // Already converted to precedence edge
+ for (uint i = mem->req(); i < mem->len(); i++) {
+ // Accumulate any precedence edges
+ if (mem->in(i) != NULL) {
+ n->add_prec(mem->in(i));
+ }
+ }
+ // Everything above this point has been processed.
+ done = true;
+ }
+ // Eliminate the previous StoreCM
+ prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
+ assert(mem->outcnt() == 0, "should be dead");
+ mem->disconnect_inputs(NULL);
+ } else {
+ prev = mem;
+ }
+ mem = prev->in(MemNode::Memory);
+ }
+ }
+}
+
//------------------------------final_graph_reshaping_impl----------------------
// Implement items 1-5 from final_graph_reshaping below.
static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
@@ -2167,9 +2228,19 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
frc.inc_float_count();
goto handle_mem;
+ case Op_StoreCM:
+ {
+ // Convert OopStore dependence into precedence edge
+ Node* prec = n->in(MemNode::OopStore);
+ n->del_req(MemNode::OopStore);
+ n->add_prec(prec);
+ eliminate_redundant_card_marks(n);
+ }
+
+ // fall through
+
case Op_StoreB:
case Op_StoreC:
- case Op_StoreCM:
case Op_StorePConditional:
case Op_StoreI:
case Op_StoreL:
diff --git a/src/share/vm/opto/graphKit.cpp b/src/share/vm/opto/graphKit.cpp
index afa2efff9..f2405cabe 100644
--- a/src/share/vm/opto/graphKit.cpp
+++ b/src/share/vm/opto/graphKit.cpp
@@ -1457,19 +1457,22 @@ Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
}
-void GraphKit::pre_barrier(Node* ctl,
+void GraphKit::pre_barrier(bool do_load,
+ Node* ctl,
Node* obj,
Node* adr,
uint adr_idx,
Node* val,
const TypeOopPtr* val_type,
+ Node* pre_val,
BasicType bt) {
+
BarrierSet* bs = Universe::heap()->barrier_set();
set_control(ctl);
switch (bs->kind()) {
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
- g1_write_barrier_pre(obj, adr, adr_idx, val, val_type, bt);
+ g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
break;
case BarrierSet::CardTableModRef:
@@ -1532,7 +1535,11 @@ Node* GraphKit::store_oop(Node* ctl,
uint adr_idx = C->get_alias_index(adr_type);
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
- pre_barrier(control(), obj, adr, adr_idx, val, val_type, bt);
+ pre_barrier(true /* do_load */,
+ control(), obj, adr, adr_idx, val, val_type,
+ NULL /* pre_val */,
+ bt);
+
Node* store = store_to_memory(control(), adr, val, bt, adr_idx);
post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
return store;
@@ -3470,12 +3477,31 @@ void GraphKit::write_barrier_post(Node* oop_store,
}
// G1 pre/post barriers
-void GraphKit::g1_write_barrier_pre(Node* obj,
+void GraphKit::g1_write_barrier_pre(bool do_load,
+ Node* obj,
Node* adr,
uint alias_idx,
Node* val,
const TypeOopPtr* val_type,
+ Node* pre_val,
BasicType bt) {
+
+ // Some sanity checks
+ // Note: val is unused in this routine.
+
+ if (do_load) {
+ // We need to generate the load of the previous value
+ assert(obj != NULL, "must have a base");
+ assert(adr != NULL, "where are loading from?");
+ assert(pre_val == NULL, "loaded already?");
+ assert(val_type != NULL, "need a type");
+ } else {
+ // In this case both val_type and alias_idx are unused.
+ assert(pre_val != NULL, "must be loaded already");
+ assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
+ }
+ assert(bt == T_OBJECT, "or we shouldn't be here");
+
IdealKit ideal(this, true);
Node* tls = __ thread(); // ThreadLocalStorage
@@ -3497,32 +3523,28 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
PtrQueue::byte_offset_of_index());
const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 652
PtrQueue::byte_offset_of_buf());
- // Now the actual pointers into the thread
-
- // set_control( ctl);
+ // Now the actual pointers into the thread
Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
// Now some of the values
-
Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
// if (!marking)
__ if_then(marking, BoolTest::ne, zero); {
Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
- const Type* t1 = adr->bottom_type();
- const Type* t2 = val->bottom_type();
-
- Node* orig = __ load(no_ctrl, adr, val_type, bt, alias_idx);
- // if (orig != NULL)
- __ if_then(orig, BoolTest::ne, null()); {
- Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
-
+ if (do_load) {
// load original value
// alias_idx correct??
+ pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx);
+ }
+
+ // if (pre_val != NULL)
+ __ if_then(pre_val, BoolTest::ne, null()); {
+ Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
// is the queue for this thread full?
__ if_then(index, BoolTest::ne, zero, likely); {
@@ -3536,10 +3558,9 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
#endif
- // Now get the buffer location we will log the original value into and store it
+ // Now get the buffer location we will log the previous value into and store it
Node *log_addr = __ AddP(no_base, buffer, next_indexX);
- __ store(__ ctrl(), log_addr, orig, T_OBJECT, Compile::AliasIdxRaw);
-
+ __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw);
// update the index
__ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
@@ -3547,9 +3568,9 @@ void GraphKit::g1_write_barrier_pre(Node* obj,
// logging buffer is full, call the runtime
const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
- __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, tls);
+ __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
} __ end_if(); // (!index)
- } __ end_if(); // (orig != NULL)
+ } __ end_if(); // (pre_val != NULL)
} __ end_if(); // (!marking)
// Final sync IdealKit and GraphKit.
diff --git a/src/share/vm/opto/graphKit.hpp b/src/share/vm/opto/graphKit.hpp
index 4523326db..fdddd66e7 100644
--- a/src/share/vm/opto/graphKit.hpp
+++ b/src/share/vm/opto/graphKit.hpp
@@ -544,8 +544,10 @@ class GraphKit : public Phase {
BasicType bt);
// For the few case where the barriers need special help
- void pre_barrier(Node* ctl, Node* obj, Node* adr, uint adr_idx,
- Node* val, const TypeOopPtr* val_type, BasicType bt);
+ void pre_barrier(bool do_load, Node* ctl,
+ Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
+ Node* pre_val,
+ BasicType bt);
void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
Node* val, BasicType bt, bool use_precise);
@@ -671,11 +673,13 @@ class GraphKit : public Phase {
Node* adr, uint adr_idx, Node* val, bool use_precise);
// G1 pre/post barriers
- void g1_write_barrier_pre(Node* obj,
+ void g1_write_barrier_pre(bool do_load,
+ Node* obj,
Node* adr,
uint alias_idx,
Node* val,
const TypeOopPtr* val_type,
+ Node* pre_val,
BasicType bt);
void g1_write_barrier_post(Node* store,
diff --git a/src/share/vm/opto/lcm.cpp b/src/share/vm/opto/lcm.cpp
index 1767bce2d..5729c7eea 100644
--- a/src/share/vm/opto/lcm.cpp
+++ b/src/share/vm/opto/lcm.cpp
@@ -688,20 +688,22 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
}
ready_cnt[n->_idx] = local; // Count em up
- // A few node types require changing a required edge to a precedence edge
- // before allocation.
+#ifdef ASSERT
if( UseConcMarkSweepGC || UseG1GC ) {
if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) {
- // Note: Required edges with an index greater than oper_input_base
- // are not supported by the allocator.
- // Note2: Can only depend on unmatched edge being last,
- // can not depend on its absolute position.
- Node *oop_store = n->in(n->req() - 1);
- n->del_req(n->req() - 1);
- n->add_prec(oop_store);
- assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
+ // Check the precedence edges
+ for (uint prec = n->req(); prec < n->len(); prec++) {
+ Node* oop_store = n->in(prec);
+ if (oop_store != NULL) {
+ assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
+ }
+ }
}
}
+#endif
+
+ // A few node types require changing a required edge to a precedence edge
+ // before allocation.
if( n->is_Mach() && n->req() > TypeFunc::Parms &&
(n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ||
n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) {
diff --git a/src/share/vm/opto/library_call.cpp b/src/share/vm/opto/library_call.cpp
index 0f73e144d..8f86f5fdc 100644
--- a/src/share/vm/opto/library_call.cpp
+++ b/src/share/vm/opto/library_call.cpp
@@ -166,6 +166,10 @@ class LibraryCallKit : public GraphKit {
// This returns Type::AnyPtr, RawPtr, or OopPtr.
int classify_unsafe_addr(Node* &base, Node* &offset);
Node* make_unsafe_address(Node* base, Node* offset);
+ // Helper for inline_unsafe_access.
+ // Generates the guards that check whether the result of
+ // Unsafe.getObject should be recorded in an SATB log buffer.
+ void insert_g1_pre_barrier(Node* base_oop, Node* offset, Node* pre_val);
bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
bool inline_unsafe_allocate();
@@ -240,6 +244,8 @@ class LibraryCallKit : public GraphKit {
bool inline_numberOfTrailingZeros(vmIntrinsics::ID id);
bool inline_bitCount(vmIntrinsics::ID id);
bool inline_reverseBytes(vmIntrinsics::ID id);
+
+ bool inline_reference_get();
};
@@ -336,6 +342,14 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
if (!UsePopCountInstruction) return NULL;
break;
+ case vmIntrinsics::_Reference_get:
+ // It is only when G1 is enabled that we absolutely
+ // need to use the intrinsic version of Reference.get()
+ // so that the value in the referent field, if necessary,
+ // can be registered by the pre-barrier code.
+ if (!UseG1GC) return NULL;
+ break;
+
default:
assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
@@ -387,6 +401,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
tty->print_cr("Intrinsic %s", str);
}
#endif
+
if (kit.try_to_inline()) {
if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, kit.bci(), is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
@@ -402,11 +417,19 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
}
if (PrintIntrinsics) {
- tty->print("Did not inline intrinsic %s%s at bci:%d in",
+ if (jvms->has_method()) {
+ // Not a root compile.
+ tty->print("Did not inline intrinsic %s%s at bci:%d in",
+ vmIntrinsics::name_at(intrinsic_id()),
+ (is_virtual() ? " (virtual)" : ""), kit.bci());
+ kit.caller()->print_short_name(tty);
+ tty->print_cr(" (%d bytes)", kit.caller()->code_size());
+ } else {
+ // Root compile
+ tty->print("Did not generate intrinsic %s%s at bci:%d in",
vmIntrinsics::name_at(intrinsic_id()),
(is_virtual() ? " (virtual)" : ""), kit.bci());
- kit.caller()->print_short_name(tty);
- tty->print_cr(" (%d bytes)", kit.caller()->code_size());
+ }
}
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
return NULL;
@@ -418,6 +441,14 @@ bool LibraryCallKit::try_to_inline() {
const bool is_native_ptr = true;
const bool is_static = true;
+ if (!jvms()->has_method()) {
+ // Root JVMState has a null method.
+ assert(map()->memory()->Opcode() == Op_Parm, "");
+ // Insert the memory aliasing node
+ set_all_memory(reset_memory());
+ }
+ assert(merged_memory(), "");
+
switch (intrinsic_id()) {
case vmIntrinsics::_hashCode:
return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
@@ -658,6 +689,9 @@ bool LibraryCallKit::try_to_inline() {
case vmIntrinsics::_getCallerClass:
return inline_native_Reflection_getCallerClass();
+ case vmIntrinsics::_Reference_get:
+ return inline_reference_get();
+
default:
// If you get here, it may be that someone has added a new intrinsic
// to the list in vmSymbols.hpp without implementing it here.
@@ -2076,6 +2110,106 @@ bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) {
const static BasicType T_ADDRESS_HOLDER = T_LONG;
+// Helper that guards and inserts a G1 pre-barrier.
+void LibraryCallKit::insert_g1_pre_barrier(Node* base_oop, Node* offset, Node* pre_val) {
+ assert(UseG1GC, "should not call this otherwise");
+
+ // We could be accessing the referent field of a reference object. If so, when G1
+ // is enabled, we need to log the value in the referent field in an SATB buffer.
+ // This routine performs some compile time filters and generates suitable
+ // runtime filters that guard the pre-barrier code.
+
+ // Some compile time checks.
+
+ // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
+ const TypeX* otype = offset->find_intptr_t_type();
+ if (otype != NULL && otype->is_con() &&
+ otype->get_con() != java_lang_ref_Reference::referent_offset) {
+ // Constant offset but not the reference_offset so just return
+ return;
+ }
+
+ // We only need to generate the runtime guards for instances.
+ const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
+ if (btype != NULL) {
+ if (btype->isa_aryptr()) {
+ // Array type so nothing to do
+ return;
+ }
+
+ const TypeInstPtr* itype = btype->isa_instptr();
+ if (itype != NULL) {
+ // Can the klass of base_oop be statically determined
+ // to be _not_ a sub-class of Reference?
+ ciKlass* klass = itype->klass();
+ if (klass->is_subtype_of(env()->Reference_klass()) &&
+ !env()->Reference_klass()->is_subtype_of(klass)) {
+ return;
+ }
+ }
+ }
+
+ // The compile time filters did not reject base_oop/offset so
+ // we need to generate the following runtime filters
+ //
+ // if (offset == java_lang_ref_Reference::_reference_offset) {
+ // if (base != null) {
+ // if (klass(base)->reference_type() != REF_NONE)) {
+ // pre_barrier(_, pre_val, ...);
+ // }
+ // }
+ // }
+
+ float likely = PROB_LIKELY(0.999);
+ float unlikely = PROB_UNLIKELY(0.999);
+
+ IdealKit ideal(this);
+#define __ ideal.
+
+ const int reference_type_offset = instanceKlass::reference_type_offset_in_bytes() +
+ sizeof(oopDesc);
+
+ Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
+
+ __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
+ __ if_then(base_oop, BoolTest::ne, null(), likely); {
+
+ // Update graphKit memory and control from IdealKit.
+ sync_kit(ideal);
+
+ Node* ref_klass_con = makecon(TypeKlassPtr::make(env()->Reference_klass()));
+ Node* is_instof = gen_instanceof(base_oop, ref_klass_con);
+
+ // Update IdealKit memory and control from graphKit.
+ __ sync_kit(this);
+
+ Node* one = __ ConI(1);
+
+ __ if_then(is_instof, BoolTest::eq, one, unlikely); {
+
+ // Update graphKit from IdeakKit.
+ sync_kit(ideal);
+
+ // Use the pre-barrier to record the value in the referent field
+ pre_barrier(false /* do_load */,
+ __ ctrl(),
+ NULL /* obj */, NULL /* adr */, -1 /* alias_idx */, NULL /* val */, NULL /* val_type */,
+ pre_val /* pre_val */,
+ T_OBJECT);
+
+ // Update IdealKit from graphKit.
+ __ sync_kit(this);
+
+ } __ end_if(); // _ref_type != ref_none
+ } __ end_if(); // base != NULL
+ } __ end_if(); // offset == referent_offset
+
+ // Final sync IdealKit and GraphKit.
+ final_sync(ideal);
+#undef __
+}
+
+
// Interpret Unsafe.fieldOffset cookies correctly:
extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
@@ -2152,9 +2286,11 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
// Build address expression. See the code in inline_unsafe_prefetch.
Node *adr;
Node *heap_base_oop = top();
+ Node* offset = top();
+
if (!is_native_ptr) {
// The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
- Node* offset = pop_pair();
+ offset = pop_pair();
// The base is either a Java object or a value produced by Unsafe.staticFieldBase
Node* base = pop();
// We currently rely on the cookies produced by Unsafe.xxxFieldOffset
@@ -2195,6 +2331,13 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
// or Compile::must_alias will throw a diagnostic assert.)
bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
+ // If we are reading the value of the referent field of a Reference
+ // object (either by using Unsafe directly or through reflection)
+ // then, if G1 is enabled, we need to record the referent in an
+ // SATB log buffer using the pre-barrier mechanism.
+ bool need_read_barrier = UseG1GC && !is_native_ptr && !is_store &&
+ offset != top() && heap_base_oop != top();
+
if (!is_store && type == T_OBJECT) {
// Attempt to infer a sharper value type from the offset and base type.
ciKlass* sharpened_klass = NULL;
@@ -2278,8 +2421,13 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
case T_SHORT:
case T_INT:
case T_FLOAT:
+ push(p);
+ break;
case T_OBJECT:
- push( p );
+ if (need_read_barrier) {
+ insert_g1_pre_barrier(heap_base_oop, offset, p);
+ }
+ push(p);
break;
case T_ADDRESS:
// Cast to an int type.
@@ -2534,7 +2682,10 @@ bool LibraryCallKit::inline_unsafe_CAS(BasicType type) {
case T_OBJECT:
// reference stores need a store barrier.
// (They don't if CAS fails, but it isn't worth checking.)
- pre_barrier(control(), base, adr, alias_idx, newval, value_type->make_oopptr(), T_OBJECT);
+ pre_barrier(true /* do_load*/,
+ control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
+ NULL /* pre_val*/,
+ T_OBJECT);
#ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
Node *newval_enc = _gvn.transform(new (C, 2) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
@@ -5235,3 +5386,44 @@ LibraryCallKit::generate_unchecked_arraycopy(const TypePtr* adr_type,
copyfunc_addr, copyfunc_name, adr_type,
src_start, dest_start, copy_length XTOP);
}
+
+//----------------------------inline_reference_get----------------------------
+
+bool LibraryCallKit::inline_reference_get() {
+ const int nargs = 1; // self
+
+ guarantee(java_lang_ref_Reference::referent_offset > 0,
+ "should have already been set");
+
+ int referent_offset = java_lang_ref_Reference::referent_offset;
+
+ // Restore the stack and pop off the argument
+ _sp += nargs;
+ Node *reference_obj = pop();
+
+ // Null check on self without removing any arguments.
+ _sp += nargs;
+ reference_obj = do_null_check(reference_obj, T_OBJECT);
+ _sp -= nargs;;
+
+ if (stopped()) return true;
+
+ Node *adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
+
+ ciInstanceKlass* klass = env()->Object_klass();
+ const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
+
+ Node* no_ctrl = NULL;
+ Node *result = make_load(no_ctrl, adr, object_type, T_OBJECT);
+
+ // Use the pre-barrier to record the value in the referent field
+ pre_barrier(false /* do_load */,
+ control(),
+ NULL /* obj */, NULL /* adr */, -1 /* alias_idx */, NULL /* val */, NULL /* val_type */,
+ result /* pre_val */,
+ T_OBJECT);
+
+ push(result);
+ return true;
+}
+
diff --git a/src/share/vm/opto/memnode.cpp b/src/share/vm/opto/memnode.cpp
index d5d53132a..7ba8df53e 100644
--- a/src/share/vm/opto/memnode.cpp
+++ b/src/share/vm/opto/memnode.cpp
@@ -2159,9 +2159,12 @@ Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* mem = in(MemNode::Memory);
Node* address = in(MemNode::Address);
- // Back-to-back stores to same address? Fold em up.
- // Generally unsafe if I have intervening uses...
- if (mem->is_Store() && phase->eqv_uncast(mem->in(MemNode::Address), address)) {
+ // Back-to-back stores to same address? Fold em up. Generally
+ // unsafe if I have intervening uses... Also disallowed for StoreCM
+ // since they must follow each StoreP operation. Redundant StoreCMs
+ // are eliminated just before matching in final_graph_reshape.
+ if (mem->is_Store() && phase->eqv_uncast(mem->in(MemNode::Address), address) &&
+ mem->Opcode() != Op_StoreCM) {
// Looking at a dead closed cycle of memory?
assert(mem != mem->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
diff --git a/src/share/vm/opto/output.cpp b/src/share/vm/opto/output.cpp
index 1205cc263..2c185d5b3 100644
--- a/src/share/vm/opto/output.cpp
+++ b/src/share/vm/opto/output.cpp
@@ -1354,15 +1354,20 @@ void Compile::Fill_buffer() {
// Check that oop-store precedes the card-mark
else if( mach->ideal_Opcode() == Op_StoreCM ) {
uint storeCM_idx = j;
- Node *oop_store = mach->in(mach->_cnt); // First precedence edge
- assert( oop_store != NULL, "storeCM expects a precedence edge");
- uint i4;
- for( i4 = 0; i4 < last_inst; ++i4 ) {
- if( b->_nodes[i4] == oop_store ) break;
+ int count = 0;
+ for (uint prec = mach->req(); prec < mach->len(); prec++) {
+ Node *oop_store = mach->in(prec); // Precedence edge
+ if (oop_store == NULL) continue;
+ count++;
+ uint i4;
+ for( i4 = 0; i4 < last_inst; ++i4 ) {
+ if( b->_nodes[i4] == oop_store ) break;
+ }
+ // Note: This test can provide a false failure if other precedence
+ // edges have been added to the storeCMNode.
+ assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
}
- // Note: This test can provide a false failure if other precedence
- // edges have been added to the storeCMNode.
- assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
+ assert(count > 0, "storeCM expects at least one precedence edge");
}
#endif
diff --git a/src/share/vm/prims/jni.cpp b/src/share/vm/prims/jni.cpp
index d77c7ce9e..56fa2e167 100644
--- a/src/share/vm/prims/jni.cpp
+++ b/src/share/vm/prims/jni.cpp
@@ -29,6 +29,9 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "interpreter/linkResolver.hpp"
+#ifndef SERIALGC
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#endif // SERIALGC
#include "memory/allocation.inline.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/oopFactory.hpp"
@@ -1724,6 +1727,26 @@ JNI_ENTRY(jobject, jni_GetObjectField(JNIEnv *env, jobject obj, jfieldID fieldID
o = JvmtiExport::jni_GetField_probe(thread, obj, o, k, fieldID, false);
}
jobject ret = JNIHandles::make_local(env, o->obj_field(offset));
+#ifndef SERIALGC
+ // If G1 is enabled and we are accessing the value of the referent
+ // field in a reference object then we need to register a non-null
+ // referent with the SATB barrier.
+ if (UseG1GC) {
+ bool needs_barrier = false;
+
+ if (ret != NULL &&
+ offset == java_lang_ref_Reference::referent_offset &&
+ instanceKlass::cast(k)->reference_type() != REF_NONE) {
+ assert(instanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
+ needs_barrier = true;
+ }
+
+ if (needs_barrier) {
+ oop referent = JNIHandles::resolve(ret);
+ G1SATBCardTableModRefBS::enqueue(referent);
+ }
+ }
+#endif // SERIALGC
DTRACE_PROBE1(hotspot_jni, GetObjectField__return, ret);
return ret;
JNI_END
diff --git a/src/share/vm/prims/unsafe.cpp b/src/share/vm/prims/unsafe.cpp
index 5458bf70e..e1a21f030 100644
--- a/src/share/vm/prims/unsafe.cpp
+++ b/src/share/vm/prims/unsafe.cpp
@@ -24,6 +24,9 @@
#include "precompiled.hpp"
#include "classfile/vmSymbols.hpp"
+#ifndef SERIALGC
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#endif // SERIALGC
#include "memory/allocation.inline.hpp"
#include "prims/jni.h"
#include "prims/jvm.h"
@@ -193,7 +196,32 @@ UNSAFE_ENTRY(jobject, Unsafe_GetObject140(JNIEnv *env, jobject unsafe, jobject o
UnsafeWrapper("Unsafe_GetObject");
if (obj == NULL) THROW_0(vmSymbols::java_lang_NullPointerException());
GET_OOP_FIELD(obj, offset, v)
- return JNIHandles::make_local(env, v);
+ jobject ret = JNIHandles::make_local(env, v);
+#ifndef SERIALGC
+ // We could be accessing the referent field in a reference
+ // object. If G1 is enabled then we need to register a non-null
+ // referent with the SATB barrier.
+ if (UseG1GC) {
+ bool needs_barrier = false;
+
+ if (ret != NULL) {
+ if (offset == java_lang_ref_Reference::referent_offset) {
+ oop o = JNIHandles::resolve_non_null(obj);
+ klassOop k = o->klass();
+ if (instanceKlass::cast(k)->reference_type() != REF_NONE) {
+ assert(instanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
+ needs_barrier = true;
+ }
+ }
+ }
+
+ if (needs_barrier) {
+ oop referent = JNIHandles::resolve(ret);
+ G1SATBCardTableModRefBS::enqueue(referent);
+ }
+ }
+#endif // SERIALGC
+ return ret;
UNSAFE_END
UNSAFE_ENTRY(void, Unsafe_SetObject140(JNIEnv *env, jobject unsafe, jobject obj, jint offset, jobject x_h))
@@ -226,7 +254,32 @@ UNSAFE_END
UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset))
UnsafeWrapper("Unsafe_GetObject");
GET_OOP_FIELD(obj, offset, v)
- return JNIHandles::make_local(env, v);
+ jobject ret = JNIHandles::make_local(env, v);
+#ifndef SERIALGC
+ // We could be accessing the referent field in a reference
+ // object. If G1 is enabled then we need to register non-null
+ // referent with the SATB barrier.
+ if (UseG1GC) {
+ bool needs_barrier = false;
+
+ if (ret != NULL) {
+ if (offset == java_lang_ref_Reference::referent_offset && obj != NULL) {
+ oop o = JNIHandles::resolve(obj);
+ klassOop k = o->klass();
+ if (instanceKlass::cast(k)->reference_type() != REF_NONE) {
+ assert(instanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity");
+ needs_barrier = true;
+ }
+ }
+ }
+
+ if (needs_barrier) {
+ oop referent = JNIHandles::resolve(ret);
+ G1SATBCardTableModRefBS::enqueue(referent);
+ }
+ }
+#endif // SERIALGC
+ return ret;
UNSAFE_END
UNSAFE_ENTRY(void, Unsafe_SetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h))
diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp
index fdf271c80..6cb615c4b 100644
--- a/src/share/vm/runtime/arguments.cpp
+++ b/src/share/vm/runtime/arguments.cpp
@@ -243,6 +243,12 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
{ "MaxLiveObjectEvacuationRatio",
JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
{ "ForceSharedSpaces", JDK_Version::jdk_update(6,25), JDK_Version::jdk(8) },
+ { "UseParallelOldGCCompacting",
+ JDK_Version::jdk_update(6,27), JDK_Version::jdk(8) },
+ { "UseParallelDensePrefixUpdate",
+ JDK_Version::jdk_update(6,27), JDK_Version::jdk(8) },
+ { "UseParallelOldGCDensePrefix",
+ JDK_Version::jdk_update(6,27), JDK_Version::jdk(8) },
{ "AllowTransitionalJSR292", JDK_Version::jdk(7), JDK_Version::jdk(8) },
{ NULL, JDK_Version(0), JDK_Version(0) }
};
@@ -800,26 +806,22 @@ bool Arguments::process_argument(const char* arg,
JDK_Version since = JDK_Version();
- if (parse_argument(arg, origin)) {
- // do nothing
- } else if (is_newly_obsolete(arg, &since)) {
- enum { bufsize = 256 };
- char buffer[bufsize];
- since.to_string(buffer, bufsize);
- jio_fprintf(defaultStream::error_stream(),
- "Warning: The flag %s has been EOL'd as of %s and will"
- " be ignored\n", arg, buffer);
- } else {
- if (!ignore_unrecognized) {
- jio_fprintf(defaultStream::error_stream(),
- "Unrecognized VM option '%s'\n", arg);
- // allow for commandline "commenting out" options like -XX:#+Verbose
- if (strlen(arg) == 0 || arg[0] != '#') {
- return false;
- }
- }
+ if (parse_argument(arg, origin) || ignore_unrecognized) {
+ return true;
}
- return true;
+
+ const char * const argname = *arg == '+' || *arg == '-' ? arg + 1 : arg;
+ if (is_newly_obsolete(arg, &since)) {
+ char version[256];
+ since.to_string(version, sizeof(version));
+ warning("ignoring option %s; support was removed in %s", argname, version);
+ return true;
+ }
+
+ jio_fprintf(defaultStream::error_stream(),
+ "Unrecognized VM option '%s'\n", argname);
+ // allow for commandline "commenting out" options like -XX:#+Verbose
+ return arg[0] == '#';
}
bool Arguments::process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized) {
diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
index 96f0abbb0..c16f88660 100644
--- a/src/share/vm/runtime/globals.hpp
+++ b/src/share/vm/runtime/globals.hpp
@@ -1355,13 +1355,6 @@ class CommandLineFlags {
product(bool, UseParallelOldGC, false, \
"Use the Parallel Old garbage collector") \
\
- product(bool, UseParallelOldGCCompacting, true, \
- "In the Parallel Old garbage collector use parallel compaction") \
- \
- product(bool, UseParallelDensePrefixUpdate, true, \
- "In the Parallel Old garbage collector use parallel dense" \
- " prefix update") \
- \
product(uintx, HeapMaximumCompactionInterval, 20, \
"How often should we maximally compact the heap (not allowing " \
"any dead space)") \
@@ -1381,9 +1374,6 @@ class CommandLineFlags {
"The standard deviation used by the par compact dead wood" \
"limiter (a number between 0-100).") \
\
- product(bool, UseParallelOldGCDensePrefix, true, \
- "Use a dense prefix with the Parallel Old garbage collector") \
- \
product(uintx, ParallelGCThreads, 0, \
"Number of parallel threads parallel gc will use") \
\