aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjrose <none@none>2009-09-15 21:53:47 -0700
committerjrose <none@none>2009-09-15 21:53:47 -0700
commit1f74851d32708a8af114e72ad7f5a3f9732e1b38 (patch)
tree061d1a4710569eacae84ef937b80acaabe25f844
parent2c2ffa4ec1a4e1e82fe5569d333d8c1a80ad16b5 (diff)
6863023: need non-perm oops in code cache for JSR 292
Summary: Make a special root-list for those few nmethods which might contain non-perm oops. Reviewed-by: twisti, kvn, never, jmasa, ysr
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java6
-rw-r--r--agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java23
-rw-r--r--src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp12
-rw-r--r--src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp2
-rw-r--r--src/cpu/x86/vm/c1_LIRAssembler_x86.cpp28
-rw-r--r--src/cpu/x86/vm/c1_LIRGenerator_x86.cpp2
-rw-r--r--src/cpu/x86/vm/x86_32.ad2
-rw-r--r--src/cpu/x86/vm/x86_64.ad6
-rw-r--r--src/share/vm/c1/c1_GraphBuilder.cpp2
-rw-r--r--src/share/vm/c1/c1_InstructionPrinter.cpp6
-rw-r--r--src/share/vm/c1/c1_LIRGenerator.cpp14
-rw-r--r--src/share/vm/c1/c1_ValueType.cpp2
-rw-r--r--src/share/vm/ci/ciEnv.cpp2
-rw-r--r--src/share/vm/ci/ciEnv.hpp4
-rw-r--r--src/share/vm/ci/ciObject.cpp25
-rw-r--r--src/share/vm/ci/ciObject.hpp34
-rw-r--r--src/share/vm/ci/ciObjectFactory.cpp3
-rw-r--r--src/share/vm/classfile/systemDictionary.cpp4
-rw-r--r--src/share/vm/code/codeBlob.hpp2
-rw-r--r--src/share/vm/code/codeCache.cpp159
-rw-r--r--src/share/vm/code/codeCache.hpp20
-rw-r--r--src/share/vm/code/debugInfoRec.cpp2
-rw-r--r--src/share/vm/code/dependencies.cpp2
-rw-r--r--src/share/vm/code/nmethod.cpp218
-rw-r--r--src/share/vm/code/nmethod.hpp41
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp9
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp35
-rw-r--r--src/share/vm/gc_implementation/g1/concurrentMark.cpp11
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp19
-rw-r--r--src/share/vm/gc_implementation/g1/g1MarkSweep.cpp8
-rw-r--r--src/share/vm/gc_implementation/includeDB_gc_parallelScavenge2
-rw-r--r--src/share/vm/gc_implementation/parNew/parNewGeneration.cpp15
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp8
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp7
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp13
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp3
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp29
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp4
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp6
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp2
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp15
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp3
-rw-r--r--src/share/vm/gc_implementation/shared/markSweep.cpp1
-rw-r--r--src/share/vm/gc_implementation/shared/markSweep.hpp3
-rw-r--r--src/share/vm/gc_interface/collectedHeap.hpp8
-rw-r--r--src/share/vm/memory/defNewGeneration.cpp12
-rw-r--r--src/share/vm/memory/genCollectedHeap.cpp21
-rw-r--r--src/share/vm/memory/genCollectedHeap.hpp12
-rw-r--r--src/share/vm/memory/genMarkSweep.cpp10
-rw-r--r--src/share/vm/memory/iterator.cpp39
-rw-r--r--src/share/vm/memory/iterator.hpp49
-rw-r--r--src/share/vm/memory/sharedHeap.cpp52
-rw-r--r--src/share/vm/memory/sharedHeap.hpp17
-rw-r--r--src/share/vm/oops/instanceKlass.cpp12
-rw-r--r--src/share/vm/oops/oop.hpp1
-rw-r--r--src/share/vm/oops/oop.inline2.hpp4
-rw-r--r--src/share/vm/opto/output.cpp10
-rw-r--r--src/share/vm/opto/parse.hpp2
-rw-r--r--src/share/vm/opto/parse2.cpp3
-rw-r--r--src/share/vm/opto/parse3.cpp11
-rw-r--r--src/share/vm/opto/type.cpp29
-rw-r--r--src/share/vm/opto/type.hpp5
-rw-r--r--src/share/vm/prims/jvmtiTagMap.cpp6
-rw-r--r--src/share/vm/runtime/arguments.cpp10
-rw-r--r--src/share/vm/runtime/frame.cpp30
-rw-r--r--src/share/vm/runtime/frame.hpp10
-rw-r--r--src/share/vm/runtime/globals.hpp5
-rw-r--r--src/share/vm/runtime/sweeper.cpp13
-rw-r--r--src/share/vm/runtime/thread.cpp38
-rw-r--r--src/share/vm/runtime/thread.hpp17
-rw-r--r--src/share/vm/runtime/vmStructs.cpp5
-rw-r--r--src/share/vm/runtime/vmThread.cpp6
-rw-r--r--src/share/vm/runtime/vmThread.hpp2
-rw-r--r--src/share/vm/utilities/debug.cpp5
74 files changed, 979 insertions, 279 deletions
diff --git a/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java b/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java
index 570c5814e..9fc04f5df 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java
@@ -33,6 +33,7 @@ import sun.jvm.hotspot.utilities.*;
public class CodeCache {
private static AddressField heapField;
+ private static AddressField scavengeRootNMethodsField;
private static VirtualConstructor virtualConstructor;
private CodeHeap heap;
@@ -49,6 +50,7 @@ public class CodeCache {
Type type = db.lookupType("CodeCache");
heapField = type.getAddressField("_heap");
+ scavengeRootNMethodsField = type.getAddressField("_scavenge_root_nmethods");
virtualConstructor = new VirtualConstructor(db);
// Add mappings for all possible CodeBlob subclasses
@@ -67,6 +69,10 @@ public class CodeCache {
heap = (CodeHeap) VMObjectFactory.newObject(CodeHeap.class, heapField.getValue());
}
+ public NMethod scavengeRootMethods() {
+ return (NMethod) VMObjectFactory.newObject(NMethod.class, scavengeRootNMethodsField.getValue());
+ }
+
public boolean contains(Address p) {
return getHeap().contains(p);
}
diff --git a/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java b/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java
index 36bcd0260..7f48d5807 100644
--- a/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java
@@ -40,7 +40,10 @@ public class NMethod extends CodeBlob {
/** != InvocationEntryBci if this nmethod is an on-stack replacement method */
private static CIntegerField entryBCIField;
/** To support simple linked-list chaining of nmethods */
- private static AddressField linkField;
+ private static AddressField osrLinkField;
+ private static AddressField scavengeRootLinkField;
+ private static CIntegerField scavengeRootStateField;
+
/** Offsets for different nmethod parts */
private static CIntegerField exceptionOffsetField;
private static CIntegerField deoptOffsetField;
@@ -87,7 +90,10 @@ public class NMethod extends CodeBlob {
zombieInstructionSizeField = type.getCIntegerField("_zombie_instruction_size");
methodField = type.getOopField("_method");
entryBCIField = type.getCIntegerField("_entry_bci");
- linkField = type.getAddressField("_link");
+ osrLinkField = type.getAddressField("_osr_link");
+ scavengeRootLinkField = type.getAddressField("_scavenge_root_link");
+ scavengeRootStateField = type.getCIntegerField("_scavenge_root_state");
+
exceptionOffsetField = type.getCIntegerField("_exception_offset");
deoptOffsetField = type.getCIntegerField("_deoptimize_offset");
origPCOffsetField = type.getCIntegerField("_orig_pc_offset");
@@ -219,10 +225,19 @@ public class NMethod extends CodeBlob {
return getEntryBCI();
}
- public NMethod getLink() {
- return (NMethod) VMObjectFactory.newObject(NMethod.class, linkField.getValue(addr));
+ public NMethod getOSRLink() {
+ return (NMethod) VMObjectFactory.newObject(NMethod.class, osrLinkField.getValue(addr));
+ }
+
+ public NMethod getScavengeRootLink() {
+ return (NMethod) VMObjectFactory.newObject(NMethod.class, scavengeRootLinkField.getValue(addr));
}
+ public int getScavengeRootState() {
+ return (int) scavengeRootStateField.getValue(addr);
+ }
+
+
/** Tells whether frames described by this nmethod can be
deoptimized. Note: native wrappers cannot be deoptimized. */
public boolean canBeDeoptimized() { return isJavaMethod(); }
diff --git a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
index c542a8bef..2583f7cee 100644
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
@@ -2171,7 +2171,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// subtype which we can't check or src is the same array as dst
// but not necessarily exactly of type default_type.
Label known_ok, halt;
- jobject2reg(op->expected_type()->encoding(), tmp);
+ jobject2reg(op->expected_type()->constant_encoding(), tmp);
__ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
if (basic_type != T_OBJECT) {
__ cmp(tmp, tmp2);
@@ -2429,7 +2429,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
assert(data->is_BitData(), "need BitData for checkcast");
Register mdo = k_RInfo;
Register data_val = Rtmp1;
- jobject2reg(md->encoding(), mdo);
+ jobject2reg(md->constant_encoding(), mdo);
int mdo_offset_bias = 0;
if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
@@ -2452,7 +2452,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
// patching may screw with our temporaries on sparc,
// so let's do it before loading the class
if (k->is_loaded()) {
- jobject2reg(k->encoding(), k_RInfo);
+ jobject2reg(k->constant_encoding(), k_RInfo);
} else {
jobject2reg_with_patching(k_RInfo, op->info_for_patch());
}
@@ -2513,7 +2513,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
// patching may screw with our temporaries on sparc,
// so let's do it before loading the class
if (k->is_loaded()) {
- jobject2reg(k->encoding(), k_RInfo);
+ jobject2reg(k->constant_encoding(), k_RInfo);
} else {
jobject2reg_with_patching(k_RInfo, op->info_for_patch());
}
@@ -2717,7 +2717,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
Register mdo = op->mdo()->as_register();
Register tmp1 = op->tmp1()->as_register();
- jobject2reg(md->encoding(), mdo);
+ jobject2reg(md->constant_encoding(), mdo);
int mdo_offset_bias = 0;
if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
data->size_in_bytes())) {
@@ -2774,7 +2774,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
if (receiver == NULL) {
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
mdo_offset_bias);
- jobject2reg(known_klass->encoding(), tmp1);
+ jobject2reg(known_klass->constant_encoding(), tmp1);
__ st_ptr(tmp1, recv_addr);
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias);
diff --git a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
index f9e123bbb..7325d0d74 100644
--- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
+++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
@@ -896,7 +896,7 @@ void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
LIR_Opr len = length.result();
BasicType elem_type = x->elt_type();
- __ oop2reg(ciTypeArrayKlass::make(elem_type)->encoding(), klass_reg);
+ __ oop2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
diff --git a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
index 982f85f5e..f8cdb23ee 100644
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
@@ -1638,7 +1638,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
jobject2reg_with_patching(k_RInfo, op->info_for_patch());
} else {
#ifdef _LP64
- __ movoop(k_RInfo, k->encoding());
+ __ movoop(k_RInfo, k->constant_encoding());
#else
k_RInfo = noreg;
#endif // _LP64
@@ -1661,7 +1661,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
assert(data != NULL, "need data for checkcast");
assert(data->is_BitData(), "need BitData for checkcast");
Register mdo = klass_RInfo;
- __ movoop(mdo, md->encoding());
+ __ movoop(mdo, md->constant_encoding());
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
__ orl(data_addr, header_bits);
@@ -1679,7 +1679,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
#ifdef _LP64
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
#else
- __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->encoding());
+ __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
#endif // _LP64
} else {
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
@@ -1696,7 +1696,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
#ifdef _LP64
__ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
#else
- __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->encoding());
+ __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
#endif // _LP64
if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
__ jcc(Assembler::notEqual, *stub->entry());
@@ -1707,7 +1707,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
#ifdef _LP64
__ cmpptr(klass_RInfo, k_RInfo);
#else
- __ cmpoop(klass_RInfo, k->encoding());
+ __ cmpoop(klass_RInfo, k->constant_encoding());
#endif // _LP64
__ jcc(Assembler::equal, done);
@@ -1715,7 +1715,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
#ifdef _LP64
__ push(k_RInfo);
#else
- __ pushoop(k->encoding());
+ __ pushoop(k->constant_encoding());
#endif // _LP64
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ pop(klass_RInfo);
@@ -1763,7 +1763,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
if (!k->is_loaded()) {
jobject2reg_with_patching(k_RInfo, op->info_for_patch());
} else {
- LP64_ONLY(__ movoop(k_RInfo, k->encoding()));
+ LP64_ONLY(__ movoop(k_RInfo, k->constant_encoding()));
}
assert(obj != k_RInfo, "must be different");
@@ -1774,7 +1774,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
// get object class
// not a safepoint as obj null check happens earlier
if (LP64_ONLY(false &&) k->is_loaded()) {
- NOT_LP64(__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->encoding()));
+ NOT_LP64(__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()));
k_RInfo = noreg;
} else {
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
@@ -1791,14 +1791,14 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
#ifndef _LP64
if (k->is_loaded()) {
// See if we get an immediate positive hit
- __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->encoding());
+ __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
__ jcc(Assembler::equal, one);
if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() == k->super_check_offset()) {
// check for self
- __ cmpoop(klass_RInfo, k->encoding());
+ __ cmpoop(klass_RInfo, k->constant_encoding());
__ jcc(Assembler::equal, one);
__ push(klass_RInfo);
- __ pushoop(k->encoding());
+ __ pushoop(k->constant_encoding());
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ pop(klass_RInfo);
__ pop(dst);
@@ -3112,7 +3112,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// subtype which we can't check or src is the same array as dst
// but not necessarily exactly of type default_type.
Label known_ok, halt;
- __ movoop(tmp, default_type->encoding());
+ __ movoop(tmp, default_type->constant_encoding());
if (basic_type != T_OBJECT) {
__ cmpptr(tmp, dst_klass_addr);
__ jcc(Assembler::notEqual, halt);
@@ -3200,7 +3200,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
assert(data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register();
- __ movoop(mdo, md->encoding());
+ __ movoop(mdo, md->constant_encoding());
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ addl(counter_addr, DataLayout::counter_increment);
Bytecodes::Code bc = method->java_code_at_bci(bci);
@@ -3240,7 +3240,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciKlass* receiver = vc_data->receiver(i);
if (receiver == NULL) {
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
- __ movoop(recv_addr, known_klass->encoding());
+ __ movoop(recv_addr, known_klass->constant_encoding());
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ addl(data_addr, DataLayout::counter_increment);
return;
diff --git a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
index d49793aba..ea6cfb2e7 100644
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
@@ -994,7 +994,7 @@ void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
LIR_Opr len = length.result();
BasicType elem_type = x->elt_type();
- __ oop2reg(ciTypeArrayKlass::make(elem_type)->encoding(), klass_reg);
+ __ oop2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
diff --git a/src/cpu/x86/vm/x86_32.ad b/src/cpu/x86/vm/x86_32.ad
index de55d40d5..71657a809 100644
--- a/src/cpu/x86/vm/x86_32.ad
+++ b/src/cpu/x86/vm/x86_32.ad
@@ -379,7 +379,7 @@ void emit_d32_reloc(CodeBuffer &cbuf, int d32, RelocationHolder const& rspec,
int format) {
#ifdef ASSERT
if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (int)Universe::non_oop_word()) {
- assert(oop(d32)->is_oop() && oop(d32)->is_perm(), "cannot embed non-perm oops in code");
+ assert(oop(d32)->is_oop() && (ScavengeRootsInCode || !oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
}
#endif
cbuf.relocate(cbuf.inst_mark(), rspec, format);
diff --git a/src/cpu/x86/vm/x86_64.ad b/src/cpu/x86/vm/x86_64.ad
index fa03fe5a7..5927b5081 100644
--- a/src/cpu/x86/vm/x86_64.ad
+++ b/src/cpu/x86/vm/x86_64.ad
@@ -683,7 +683,7 @@ void emit_d32_reloc(CodeBuffer& cbuf,
#ifdef ASSERT
if (rspec.reloc()->type() == relocInfo::oop_type &&
d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
- assert(oop((intptr_t)d32)->is_oop() && oop((intptr_t)d32)->is_perm(), "cannot embed non-perm oops in code");
+ assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
}
#endif
cbuf.relocate(cbuf.inst_mark(), rspec, format);
@@ -721,8 +721,8 @@ void emit_d64_reloc(CodeBuffer& cbuf,
#ifdef ASSERT
if (rspec.reloc()->type() == relocInfo::oop_type &&
d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
- assert(oop(d64)->is_oop() && oop(d64)->is_perm(),
- "cannot embed non-perm oops in code");
+ assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()),
+ "cannot embed scavengable oops in code");
}
#endif
cbuf.relocate(cbuf.inst_mark(), rspec, format);
diff --git a/src/share/vm/c1/c1_GraphBuilder.cpp b/src/share/vm/c1/c1_GraphBuilder.cpp
index 30a1ada82..caa99ded6 100644
--- a/src/share/vm/c1/c1_GraphBuilder.cpp
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp
@@ -1442,7 +1442,7 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
switch (field_type) {
case T_ARRAY:
case T_OBJECT:
- if (field_val.as_object()->has_encoding()) {
+ if (field_val.as_object()->should_be_constant()) {
constant = new Constant(as_ValueType(field_val));
}
break;
diff --git a/src/share/vm/c1/c1_InstructionPrinter.cpp b/src/share/vm/c1/c1_InstructionPrinter.cpp
index 0388385a8..60298587a 100644
--- a/src/share/vm/c1/c1_InstructionPrinter.cpp
+++ b/src/share/vm/c1/c1_InstructionPrinter.cpp
@@ -133,12 +133,12 @@ void InstructionPrinter::print_object(Value obj) {
ciMethod* m = (ciMethod*)value;
output()->print("<method %s.%s>", m->holder()->name()->as_utf8(), m->name()->as_utf8());
} else {
- output()->print("<object 0x%x>", value->encoding());
+ output()->print("<object 0x%x>", value->constant_encoding());
}
} else if (type->as_InstanceConstant() != NULL) {
- output()->print("<instance 0x%x>", type->as_InstanceConstant()->value()->encoding());
+ output()->print("<instance 0x%x>", type->as_InstanceConstant()->value()->constant_encoding());
} else if (type->as_ArrayConstant() != NULL) {
- output()->print("<array 0x%x>", type->as_ArrayConstant()->value()->encoding());
+ output()->print("<array 0x%x>", type->as_ArrayConstant()->value()->constant_encoding());
} else if (type->as_ClassConstant() != NULL) {
ciInstanceKlass* klass = type->as_ClassConstant()->value();
if (!klass->is_loaded()) {
diff --git a/src/share/vm/c1/c1_LIRGenerator.cpp b/src/share/vm/c1/c1_LIRGenerator.cpp
index 475877929..8eb667dda 100644
--- a/src/share/vm/c1/c1_LIRGenerator.cpp
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp
@@ -440,7 +440,7 @@ void LIRGenerator::jobject2reg_with_patching(LIR_Opr r, ciObject* obj, CodeEmitI
__ oop2reg_patch(NULL, r, info);
} else {
// no patching needed
- __ oop2reg(obj->encoding(), r);
+ __ oop2reg(obj->constant_encoding(), r);
}
}
@@ -831,7 +831,7 @@ void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
LIR_Opr md_reg = new_register(T_OBJECT);
- __ move(LIR_OprFact::oopConst(md->encoding()), md_reg);
+ __ move(LIR_OprFact::oopConst(md->constant_encoding()), md_reg);
LIR_Opr data_offset_reg = new_register(T_INT);
__ cmove(lir_cond(cond),
LIR_OprFact::intConst(taken_count_offset),
@@ -1071,7 +1071,7 @@ void LIRGenerator::do_Return(Return* x) {
LIR_OprList* args = new LIR_OprList();
args->append(getThreadPointer());
LIR_Opr meth = new_register(T_OBJECT);
- __ oop2reg(method()->encoding(), meth);
+ __ oop2reg(method()->constant_encoding(), meth);
args->append(meth);
call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
}
@@ -1784,7 +1784,7 @@ void LIRGenerator::do_Throw(Throw* x) {
LIR_OprList* args = new LIR_OprList();
args->append(getThreadPointer());
LIR_Opr meth = new_register(T_OBJECT);
- __ oop2reg(method()->encoding(), meth);
+ __ oop2reg(method()->constant_encoding(), meth);
args->append(meth);
call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
}
@@ -2207,7 +2207,7 @@ void LIRGenerator::do_Base(Base* x) {
LIR_OprList* args = new LIR_OprList();
args->append(getThreadPointer());
LIR_Opr meth = new_register(T_OBJECT);
- __ oop2reg(method()->encoding(), meth);
+ __ oop2reg(method()->constant_encoding(), meth);
args->append(meth);
call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
}
@@ -2216,7 +2216,7 @@ void LIRGenerator::do_Base(Base* x) {
LIR_Opr obj;
if (method()->is_static()) {
obj = new_register(T_OBJECT);
- __ oop2reg(method()->holder()->java_mirror()->encoding(), obj);
+ __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
} else {
Local* receiver = x->state()->local_at(0)->as_Local();
assert(receiver != NULL, "must already exist");
@@ -2660,7 +2660,7 @@ void LIRGenerator::increment_invocation_counter(CodeEmitInfo* info, bool backedg
}
LIR_Opr meth = new_register(T_OBJECT);
- __ oop2reg(method()->encoding(), meth);
+ __ oop2reg(method()->constant_encoding(), meth);
LIR_Opr result = increment_and_return_counter(meth, offset, InvocationCounter::count_increment);
__ cmp(lir_cond_aboveEqual, result, LIR_OprFact::intConst(limit));
CodeStub* overflow = new CounterOverflowStub(info, info->bci());
diff --git a/src/share/vm/c1/c1_ValueType.cpp b/src/share/vm/c1/c1_ValueType.cpp
index 79896e43f..9349538e5 100644
--- a/src/share/vm/c1/c1_ValueType.cpp
+++ b/src/share/vm/c1/c1_ValueType.cpp
@@ -86,7 +86,7 @@ ValueType* ValueType::join(ValueType* y) const {
jobject ObjectType::encoding() const {
assert(is_constant(), "must be");
- return constant_value()->encoding();
+ return constant_value()->constant_encoding();
}
bool ObjectType::is_loaded() const {
diff --git a/src/share/vm/ci/ciEnv.cpp b/src/share/vm/ci/ciEnv.cpp
index ccce4e539..b0a17b35c 100644
--- a/src/share/vm/ci/ciEnv.cpp
+++ b/src/share/vm/ci/ciEnv.cpp
@@ -257,7 +257,7 @@ ciMethod* ciEnv::get_method_from_handle(jobject method) {
// ------------------------------------------------------------------
// ciEnv::make_array
-ciArray* ciEnv::make_array(GrowableArray<ciObject*>* objects) {
+ciArray* ciEnv::make_system_array(GrowableArray<ciObject*>* objects) {
VM_ENTRY_MARK;
int length = objects->length();
objArrayOop a = oopFactory::new_system_objArray(length, THREAD);
diff --git a/src/share/vm/ci/ciEnv.hpp b/src/share/vm/ci/ciEnv.hpp
index e58e5a67d..e855dbf9e 100644
--- a/src/share/vm/ci/ciEnv.hpp
+++ b/src/share/vm/ci/ciEnv.hpp
@@ -339,8 +339,8 @@ public:
// but consider adding to vmSymbols.hpp instead.
// Use this to make a holder for non-perm compile time constants.
- // The resulting array is guaranteed to satisfy "has_encoding".
- ciArray* make_array(GrowableArray<ciObject*>* objects);
+ // The resulting array is guaranteed to satisfy "can_be_constant".
+ ciArray* make_system_array(GrowableArray<ciObject*>* objects);
// converts the ciKlass* representing the holder of a method into a
// ciInstanceKlass*. This is needed since the holder of a method in
diff --git a/src/share/vm/ci/ciObject.cpp b/src/share/vm/ci/ciObject.cpp
index 4be69e785..9d27204d9 100644
--- a/src/share/vm/ci/ciObject.cpp
+++ b/src/share/vm/ci/ciObject.cpp
@@ -55,6 +55,7 @@ ciObject::ciObject(oop o) {
}
_klass = NULL;
_ident = 0;
+ init_flags_from(o);
}
// ------------------------------------------------------------------
@@ -69,6 +70,7 @@ ciObject::ciObject(Handle h) {
}
_klass = NULL;
_ident = 0;
+ init_flags_from(h());
}
// ------------------------------------------------------------------
@@ -158,7 +160,7 @@ int ciObject::hash() {
}
// ------------------------------------------------------------------
-// ciObject::encoding
+// ciObject::constant_encoding
//
// The address which the compiler should embed into the
// generated code to represent this oop. This address
@@ -172,16 +174,24 @@ int ciObject::hash() {
//
// This method should be changed to return an generified address
// to discourage use of the JNI handle.
-jobject ciObject::encoding() {
+jobject ciObject::constant_encoding() {
assert(is_null_object() || handle() != NULL, "cannot embed null pointer");
- assert(has_encoding(), "oop must be NULL or perm");
+ assert(can_be_constant(), "oop must be NULL or perm");
return handle();
}
// ------------------------------------------------------------------
-// ciObject::has_encoding
-bool ciObject::has_encoding() {
- return handle() == NULL || is_perm();
+// ciObject::can_be_constant
+bool ciObject::can_be_constant() {
+ if (ScavengeRootsInCode >= 1) return true; // now everybody can encode as a constant
+ return handle() == NULL || !is_scavengable();
+}
+
+// ------------------------------------------------------------------
+// ciObject::should_be_constant()
+bool ciObject::should_be_constant() {
+ if (ScavengeRootsInCode >= 2) return true; // force everybody to be a constant
+ return handle() == NULL || !is_scavengable();
}
@@ -195,8 +205,9 @@ bool ciObject::has_encoding() {
void ciObject::print(outputStream* st) {
st->print("<%s", type_string());
GUARDED_VM_ENTRY(print_impl(st);)
- st->print(" ident=%d %s address=0x%x>", ident(),
+ st->print(" ident=%d %s%s address=0x%x>", ident(),
is_perm() ? "PERM" : "",
+ is_scavengable() ? "SCAVENGABLE" : "",
(address)this);
}
diff --git a/src/share/vm/ci/ciObject.hpp b/src/share/vm/ci/ciObject.hpp
index 30c07adf1..8d5e6b7f4 100644
--- a/src/share/vm/ci/ciObject.hpp
+++ b/src/share/vm/ci/ciObject.hpp
@@ -51,9 +51,10 @@ private:
ciKlass* _klass;
uint _ident;
- enum { FLAG_BITS = 1};
+ enum { FLAG_BITS = 2 };
enum {
- PERM_FLAG = 1
+ PERM_FLAG = 1,
+ SCAVENGABLE_FLAG = 2
};
protected:
ciObject();
@@ -68,8 +69,15 @@ protected:
return JNIHandles::resolve_non_null(_handle);
}
- void set_perm() {
- _ident |= PERM_FLAG;
+ void init_flags_from(oop x) {
+ int flags = 0;
+ if (x != NULL) {
+ if (x->is_perm())
+ flags |= PERM_FLAG;
+ if (x->is_scavengable())
+ flags |= SCAVENGABLE_FLAG;
+ }
+ _ident |= flags;
}
// Virtual behavior of the print() method.
@@ -91,17 +99,27 @@ public:
// A hash value for the convenience of compilers.
int hash();
- // Tells if this oop has an encoding. (I.e., is it null or perm?)
+ // Tells if this oop has an encoding as a constant.
+ // True if is_scavengable is false.
+ // Also true if ScavengeRootsInCode is non-zero.
// If it does not have an encoding, the compiler is responsible for
// making other arrangements for dealing with the oop.
- // See ciEnv::make_perm_array
- bool has_encoding();
+ // See ciEnv::make_array
+ bool can_be_constant();
+
+ // Tells if this oop should be made a constant.
+ // True if is_scavengable is false or ScavengeRootsInCode > 1.
+ bool should_be_constant();
// Is this object guaranteed to be in the permanent part of the heap?
// If so, CollectedHeap::can_elide_permanent_oop_store_barriers is relevant.
// If the answer is false, no guarantees are made.
bool is_perm() { return (_ident & PERM_FLAG) != 0; }
+ // Might this object possibly move during a scavenge operation?
+ // If the answer is true and ScavengeRootsInCode==0, the oop cannot be embedded in code.
+ bool is_scavengable() { return (_ident & SCAVENGABLE_FLAG) != 0; }
+
// The address which the compiler should embed into the
// generated code to represent this oop. This address
// is not the true address of the oop -- it will get patched
@@ -109,7 +127,7 @@ public:
//
// Usage note: no address arithmetic allowed. Oop must
// be registered with the oopRecorder.
- jobject encoding();
+ jobject constant_encoding();
// What kind of ciObject is this?
virtual bool is_null_object() const { return false; }
diff --git a/src/share/vm/ci/ciObjectFactory.cpp b/src/share/vm/ci/ciObjectFactory.cpp
index f0cfd32a9..5567a7a02 100644
--- a/src/share/vm/ci/ciObjectFactory.cpp
+++ b/src/share/vm/ci/ciObjectFactory.cpp
@@ -261,12 +261,11 @@ ciObject* ciObjectFactory::get(oop key) {
ciObject* new_object = create_new_object(keyHandle());
assert(keyHandle() == new_object->get_oop(), "must be properly recorded");
init_ident_of(new_object);
- if (!keyHandle->is_perm()) {
+ if (!new_object->is_perm()) {
// Not a perm-space object.
insert_non_perm(bucket, keyHandle(), new_object);
return new_object;
}
- new_object->set_perm();
if (len != _ci_objects->length()) {
// creating the new object has recursively entered new objects
// into the table. We need to recompute our index.
diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp
index a1efcc595..ef8bb1393 100644
--- a/src/share/vm/classfile/systemDictionary.cpp
+++ b/src/share/vm/classfile/systemDictionary.cpp
@@ -2414,6 +2414,8 @@ Handle SystemDictionary::make_dynamic_call_site(KlassHandle caller,
vmSymbols::makeSite_name(), vmSymbols::makeSite_signature(),
&args, CHECK_(empty));
oop call_site_oop = (oop) result.get_jobject();
+ assert(call_site_oop->is_oop()
+ /*&& sun_dyn_CallSiteImpl::is_instance(call_site_oop)*/, "must be sane");
sun_dyn_CallSiteImpl::set_vmmethod(call_site_oop, mh_invdyn());
if (TraceMethodHandles) {
tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop);
@@ -2450,6 +2452,8 @@ Handle SystemDictionary::find_bootstrap_method(KlassHandle caller,
oop boot_method_oop = (oop) result.get_jobject();
if (boot_method_oop != NULL) {
+ assert(boot_method_oop->is_oop()
+ && java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
// probably no race conditions, but let's be careful:
if (Atomic::cmpxchg_ptr(boot_method_oop, ik->adr_bootstrap_method(), NULL) == NULL)
ik->set_bootstrap_method(boot_method_oop);
diff --git a/src/share/vm/code/codeBlob.hpp b/src/share/vm/code/codeBlob.hpp
index 748d7de03..81acc81fc 100644
--- a/src/share/vm/code/codeBlob.hpp
+++ b/src/share/vm/code/codeBlob.hpp
@@ -175,6 +175,8 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
OopClosure* keep_alive,
bool unloading_occurred);
virtual void oops_do(OopClosure* f) = 0;
+ // (All CodeBlob subtypes other than NMethod currently have
+ // an empty oops_do() method.
// OopMap for frame
OopMapSet* oop_maps() const { return _oop_maps; }
diff --git a/src/share/vm/code/codeCache.cpp b/src/share/vm/code/codeCache.cpp
index ca4f8b7d0..0300957cc 100644
--- a/src/share/vm/code/codeCache.cpp
+++ b/src/share/vm/code/codeCache.cpp
@@ -95,6 +95,7 @@ CodeHeap * CodeCache::_heap = new CodeHeap();
int CodeCache::_number_of_blobs = 0;
int CodeCache::_number_of_nmethods_with_dependencies = 0;
bool CodeCache::_needs_cache_clean = false;
+nmethod* CodeCache::_scavenge_root_nmethods = NULL;
CodeBlob* CodeCache::first() {
@@ -148,10 +149,7 @@ CodeBlob* CodeCache::allocate(int size) {
}
}
verify_if_often();
- if (PrintCodeCache2) { // Need to add a new flag
- ResourceMark rm;
- tty->print_cr("CodeCache allocation: addr: " INTPTR_FORMAT ", size: 0x%x\n", cb, size);
- }
+ print_trace("allocation", cb, size);
return cb;
}
@@ -159,10 +157,7 @@ void CodeCache::free(CodeBlob* cb) {
assert_locked_or_safepoint(CodeCache_lock);
verify_if_often();
- if (PrintCodeCache2) { // Need to add a new flag
- ResourceMark rm;
- tty->print_cr("CodeCache free: addr: " INTPTR_FORMAT ", size: 0x%x\n", cb, cb->size());
- }
+ print_trace("free", cb);
if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
_number_of_nmethods_with_dependencies--;
}
@@ -260,14 +255,148 @@ void CodeCache::do_unloading(BoolObjectClosure* is_alive,
}
}
-void CodeCache::oops_do(OopClosure* f) {
+void CodeCache::blobs_do(CodeBlobClosure* f) {
+ assert_locked_or_safepoint(CodeCache_lock);
+ FOR_ALL_ALIVE_BLOBS(cb) {
+ f->do_code_blob(cb);
+
+#ifdef ASSERT
+ if (cb->is_nmethod())
+ ((nmethod*)cb)->verify_scavenge_root_oops();
+#endif //ASSERT
+ }
+}
+
+// Walk the list of methods which might contain non-perm oops.
+void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
+ assert_locked_or_safepoint(CodeCache_lock);
+ debug_only(mark_scavenge_root_nmethods());
+
+ for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
+ debug_only(cur->clear_scavenge_root_marked());
+ assert(cur->scavenge_root_not_marked(), "");
+ assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
+
+ bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
+#ifndef PRODUCT
+ if (TraceScavenge) {
+ cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
+ }
+#endif //PRODUCT
+ if (is_live)
+ // Perform cur->oops_do(f), maybe just once per nmethod.
+ f->do_code_blob(cur);
+ }
+
+ // Check for stray marks.
+ debug_only(verify_perm_nmethods(NULL));
+}
+
+void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
+ assert_locked_or_safepoint(CodeCache_lock);
+ nm->set_on_scavenge_root_list();
+ nm->set_scavenge_root_link(_scavenge_root_nmethods);
+ set_scavenge_root_nmethods(nm);
+ print_trace("add_scavenge_root", nm);
+}
+
+void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
+ assert_locked_or_safepoint(CodeCache_lock);
+ print_trace("drop_scavenge_root", nm);
+ nmethod* last = NULL;
+ nmethod* cur = scavenge_root_nmethods();
+ while (cur != NULL) {
+ nmethod* next = cur->scavenge_root_link();
+ if (cur == nm) {
+ if (last != NULL)
+ last->set_scavenge_root_link(next);
+ else set_scavenge_root_nmethods(next);
+ nm->set_scavenge_root_link(NULL);
+ nm->clear_on_scavenge_root_list();
+ return;
+ }
+ last = cur;
+ cur = next;
+ }
+ assert(false, "should have been on list");
+}
+
+void CodeCache::prune_scavenge_root_nmethods() {
assert_locked_or_safepoint(CodeCache_lock);
+ debug_only(mark_scavenge_root_nmethods());
+
+ nmethod* last = NULL;
+ nmethod* cur = scavenge_root_nmethods();
+ while (cur != NULL) {
+ nmethod* next = cur->scavenge_root_link();
+ debug_only(cur->clear_scavenge_root_marked());
+ assert(cur->scavenge_root_not_marked(), "");
+ assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
+
+ if (!cur->is_zombie() && !cur->is_unloaded()
+ && cur->detect_scavenge_root_oops()) {
+ // Keep it. Advance 'last' to prevent deletion.
+ last = cur;
+ } else {
+ // Prune it from the list, so we don't have to look at it any more.
+ print_trace("prune_scavenge_root", cur);
+ cur->set_scavenge_root_link(NULL);
+ cur->clear_on_scavenge_root_list();
+ if (last != NULL)
+ last->set_scavenge_root_link(next);
+ else set_scavenge_root_nmethods(next);
+ }
+ cur = next;
+ }
+
+ // Check for stray marks.
+ debug_only(verify_perm_nmethods(NULL));
+}
+
+#ifndef PRODUCT
+void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
+ // While we are here, verify the integrity of the list.
+ mark_scavenge_root_nmethods();
+ for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
+ assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
+ cur->clear_scavenge_root_marked();
+ }
+ verify_perm_nmethods(f);
+}
+
+// Temporarily mark nmethods that are claimed to be on the non-perm list.
+void CodeCache::mark_scavenge_root_nmethods() {
+ FOR_ALL_ALIVE_BLOBS(cb) {
+ if (cb->is_nmethod()) {
+ nmethod *nm = (nmethod*)cb;
+ assert(nm->scavenge_root_not_marked(), "clean state");
+ if (nm->on_scavenge_root_list())
+ nm->set_scavenge_root_marked();
+ }
+ }
+}
+
+// If the closure is given, run it on the unlisted nmethods.
+// Also make sure that the effects of mark_scavenge_root_nmethods is gone.
+void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
FOR_ALL_ALIVE_BLOBS(cb) {
- cb->oops_do(f);
+ bool call_f = (f_or_null != NULL);
+ if (cb->is_nmethod()) {
+ nmethod *nm = (nmethod*)cb;
+ assert(nm->scavenge_root_not_marked(), "must be already processed");
+ if (nm->on_scavenge_root_list())
+ call_f = false; // don't show this one to the client
+ nm->verify_scavenge_root_oops();
+ } else {
+ call_f = false; // not an nmethod
+ }
+ if (call_f) f_or_null->do_code_blob(cb);
}
}
+#endif //PRODUCT
void CodeCache::gc_prologue() {
+ assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
}
@@ -285,6 +414,8 @@ void CodeCache::gc_epilogue() {
cb->fix_oop_relocations();
}
set_needs_cache_clean(false);
+ prune_scavenge_root_nmethods();
+ assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
}
@@ -508,6 +639,14 @@ void CodeCache::verify_if_often() {
}
}
+void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
+ if (PrintCodeCache2) { // Need to add a new flag
+ ResourceMark rm;
+ if (size == 0) size = cb->size();
+ tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
+ }
+}
+
void CodeCache::print_internals() {
int nmethodCount = 0;
int runtimeStubCount = 0;
diff --git a/src/share/vm/code/codeCache.hpp b/src/share/vm/code/codeCache.hpp
index 6327bd110..da5149c6e 100644
--- a/src/share/vm/code/codeCache.hpp
+++ b/src/share/vm/code/codeCache.hpp
@@ -45,8 +45,13 @@ class CodeCache : AllStatic {
static int _number_of_blobs;
static int _number_of_nmethods_with_dependencies;
static bool _needs_cache_clean;
+ static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
static void verify_if_often() PRODUCT_RETURN;
+
+ static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
+ static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
+
public:
// Initialization
@@ -61,6 +66,7 @@ class CodeCache : AllStatic {
static void flush(); // flushes all CodeBlobs
static bool contains(void *p); // returns whether p is included
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
+ static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
// Lookup
@@ -106,12 +112,24 @@ class CodeCache : AllStatic {
static void do_unloading(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
bool unloading_occurred);
- static void oops_do(OopClosure* f);
+ static void oops_do(OopClosure* f) {
+ CodeBlobToOopClosure oopc(f, /*do_marking=*/ false);
+ blobs_do(&oopc);
+ }
+ static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
+ static void scavenge_root_nmethods_do(CodeBlobClosure* f);
+
+ static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; }
+ static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
+ static void add_scavenge_root_nmethod(nmethod* nm);
+ static void drop_scavenge_root_nmethod(nmethod* nm);
+ static void prune_scavenge_root_nmethods();
// Printing/debugging
static void print() PRODUCT_RETURN; // prints summary
static void print_internals();
static void verify(); // verifies the code cache
+ static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
// The full limits of the codeCache
static address low_bound() { return (address) _heap->low_boundary(); }
diff --git a/src/share/vm/code/debugInfoRec.cpp b/src/share/vm/code/debugInfoRec.cpp
index 53b9dde47..c1bbe751b 100644
--- a/src/share/vm/code/debugInfoRec.cpp
+++ b/src/share/vm/code/debugInfoRec.cpp
@@ -299,7 +299,7 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
stream()->write_int(sender_stream_offset);
// serialize scope
- jobject method_enc = (method == NULL)? NULL: method->encoding();
+ jobject method_enc = (method == NULL)? NULL: method->constant_encoding();
stream()->write_int(oop_recorder()->find_index(method_enc));
stream()->write_bci(bci);
assert(method == NULL ||
diff --git a/src/share/vm/code/dependencies.cpp b/src/share/vm/code/dependencies.cpp
index 8af296fd4..0d38dc7c2 100644
--- a/src/share/vm/code/dependencies.cpp
+++ b/src/share/vm/code/dependencies.cpp
@@ -302,7 +302,7 @@ void Dependencies::encode_content_bytes() {
bytes.write_byte(code_byte);
for (int j = 0; j < stride; j++) {
if (j == skipj) continue;
- bytes.write_int(_oop_recorder->find_index(deps->at(i+j)->encoding()));
+ bytes.write_int(_oop_recorder->find_index(deps->at(i+j)->constant_encoding()));
}
}
}
diff --git a/src/share/vm/code/nmethod.cpp b/src/share/vm/code/nmethod.cpp
index 9b8a9dac0..db5ebc4f5 100644
--- a/src/share/vm/code/nmethod.cpp
+++ b/src/share/vm/code/nmethod.cpp
@@ -581,10 +581,13 @@ nmethod::nmethod(
debug_only(No_Safepoint_Verifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
- NOT_PRODUCT(_has_debug_info = false; )
+ NOT_PRODUCT(_has_debug_info = false);
+ _oops_do_mark_link = NULL;
_method = method;
_entry_bci = InvocationEntryBci;
- _link = NULL;
+ _osr_link = NULL;
+ _scavenge_root_link = NULL;
+ _scavenge_root_state = 0;
_compiler = NULL;
// We have no exception handler or deopt handler make the
// values something that will never match a pc like the nmethod vtable entry
@@ -618,7 +621,7 @@ nmethod::nmethod(
_stack_traversal_mark = 0;
code_buffer->copy_oops_to(this);
- debug_only(check_store();)
+ debug_only(verify_scavenge_root_oops());
CodeCache::commit(this);
VTune::create_nmethod(this);
}
@@ -668,10 +671,13 @@ nmethod::nmethod(
debug_only(No_Safepoint_Verifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
- NOT_PRODUCT(_has_debug_info = false; )
+ NOT_PRODUCT(_has_debug_info = false);
+ _oops_do_mark_link = NULL;
_method = method;
_entry_bci = InvocationEntryBci;
- _link = NULL;
+ _osr_link = NULL;
+ _scavenge_root_link = NULL;
+ _scavenge_root_state = 0;
_compiler = NULL;
// We have no exception handler or deopt handler make the
// values something that will never match a pc like the nmethod vtable entry
@@ -703,7 +709,7 @@ nmethod::nmethod(
_stack_traversal_mark = 0;
code_buffer->copy_oops_to(this);
- debug_only(check_store();)
+ debug_only(verify_scavenge_root_oops());
CodeCache::commit(this);
VTune::create_nmethod(this);
}
@@ -770,12 +776,15 @@ nmethod::nmethod(
debug_only(No_Safepoint_Verifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
- NOT_PRODUCT(_has_debug_info = false; )
+ NOT_PRODUCT(_has_debug_info = false);
+ _oops_do_mark_link = NULL;
_method = method;
_compile_id = compile_id;
_comp_level = comp_level;
_entry_bci = entry_bci;
- _link = NULL;
+ _osr_link = NULL;
+ _scavenge_root_link = NULL;
+ _scavenge_root_state = 0;
_compiler = compiler;
_orig_pc_offset = orig_pc_offset;
#ifdef HAVE_DTRACE_H
@@ -813,7 +822,10 @@ nmethod::nmethod(
code_buffer->copy_oops_to(this);
debug_info->copy_to(this);
dependencies->copy_to(this);
- debug_only(check_store();)
+ if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
+ CodeCache::add_scavenge_root_nmethod(this);
+ }
+ debug_only(verify_scavenge_root_oops());
CodeCache::commit(this);
@@ -902,23 +914,30 @@ void nmethod::print_on(outputStream* st, const char* title) const {
if (st != NULL) {
ttyLocker ttyl;
// Print a little tag line that looks like +PrintCompilation output:
- st->print("%3d%c %s",
+ int tlen = (int) strlen(title);
+ bool do_nl = false;
+ if (tlen > 0 && title[tlen-1] == '\n') { tlen--; do_nl = true; }
+ st->print("%3d%c %.*s",
compile_id(),
is_osr_method() ? '%' :
method() != NULL &&
is_native_method() ? 'n' : ' ',
- title);
+ tlen, title);
#ifdef TIERED
st->print(" (%d) ", comp_level());
#endif // TIERED
if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this);
- if (method() != NULL) {
- method()->print_short_name(st);
+ if (Universe::heap()->is_gc_active() && method() != NULL) {
+ st->print("(method)");
+ } else if (method() != NULL) {
+ method()->print_short_name(st);
if (is_osr_method())
st->print(" @ %d", osr_entry_bci());
if (method()->code_size() > 0)
st->print(" (%d bytes)", method()->code_size());
}
+
+ if (do_nl) st->cr();
}
}
@@ -1033,6 +1052,7 @@ void nmethod::cleanup_inline_caches() {
}
}
+// This is a private interface with the sweeper.
void nmethod::mark_as_seen_on_stack() {
assert(is_not_entrant(), "must be a non-entrant method");
set_stack_traversal_mark(NMethodSweeper::traversal_count());
@@ -1077,7 +1097,8 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
" unloadable], methodOop(" INTPTR_FORMAT
"), cause(" INTPTR_FORMAT ")",
this, (address)_method, (address)cause);
- cause->klass()->print();
+ if (!Universe::heap()->is_gc_active())
+ cause->klass()->print();
}
// If _method is already NULL the methodOop is about to be unloaded,
// so we don't have to break the cycle. Note that it is possible to
@@ -1105,7 +1126,8 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
// The methodOop is gone at this point
assert(_method == NULL, "Tautology");
- set_link(NULL);
+ set_osr_link(NULL);
+ //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
NMethodSweeper::notify(this);
}
@@ -1291,6 +1313,10 @@ void nmethod::flush() {
ec = next;
}
+ if (on_scavenge_root_list()) {
+ CodeCache::drop_scavenge_root_nmethod(this);
+ }
+
((CodeBlob*)(this))->flush();
CodeCache::free(this);
@@ -1350,7 +1376,10 @@ bool nmethod::can_unload(BoolObjectClosure* is_alive,
return false;
}
}
- assert(unloading_occurred, "Inconsistency in unloading");
+ // If ScavengeRootsInCode is true, an nmethod might be unloaded
+ // simply because one of its constant oops has gone dead.
+ // No actual classes need to be unloaded in order for this to occur.
+ assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
make_unloaded(is_alive, obj);
return true;
}
@@ -1558,12 +1587,108 @@ void nmethod::oops_do(OopClosure* f) {
}
// Scopes
+ // This includes oop constants not inlined in the code stream.
for (oop* p = oops_begin(); p < oops_end(); p++) {
if (*p == Universe::non_oop_word()) continue; // skip non-oops
f->do_oop(p);
}
}
+#define NMETHOD_SENTINEL ((nmethod*)badAddress)
+
+nmethod* volatile nmethod::_oops_do_mark_nmethods;
+
+// An nmethod is "marked" if its _mark_link is set non-null.
+// Even if it is the end of the linked list, it will have a non-null link value,
+// as long as it is on the list.
+// This code must be MP safe, because it is used from parallel GC passes.
+bool nmethod::test_set_oops_do_mark() {
+ assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
+ nmethod* observed_mark_link = _oops_do_mark_link;
+ if (observed_mark_link == NULL) {
+ // Claim this nmethod for this thread to mark.
+ observed_mark_link = (nmethod*)
+ Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
+ if (observed_mark_link == NULL) {
+
+ // Atomically append this nmethod (now claimed) to the head of the list:
+ nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
+ for (;;) {
+ nmethod* required_mark_nmethods = observed_mark_nmethods;
+ _oops_do_mark_link = required_mark_nmethods;
+ observed_mark_nmethods = (nmethod*)
+ Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
+ if (observed_mark_nmethods == required_mark_nmethods)
+ break;
+ }
+ // Mark was clear when we first saw this guy.
+ NOT_PRODUCT(if (TraceScavenge) print_on(tty, "oops_do, mark\n"));
+ return false;
+ }
+ }
+ // On fall through, another racing thread marked this nmethod before we did.
+ return true;
+}
+
+void nmethod::oops_do_marking_prologue() {
+ NOT_PRODUCT(if (TraceScavenge) tty->print_cr("[oops_do_marking_prologue"));
+ assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
+ // We use cmpxchg_ptr instead of regular assignment here because the user
+ // may fork a bunch of threads, and we need them all to see the same state.
+ void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
+ guarantee(observed == NULL, "no races in this sequential code");
+}
+
+void nmethod::oops_do_marking_epilogue() {
+ assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
+ nmethod* cur = _oops_do_mark_nmethods;
+ while (cur != NMETHOD_SENTINEL) {
+ assert(cur != NULL, "not NULL-terminated");
+ nmethod* next = cur->_oops_do_mark_link;
+ cur->_oops_do_mark_link = NULL;
+ NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark\n"));
+ cur = next;
+ }
+ void* required = _oops_do_mark_nmethods;
+ void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
+ guarantee(observed == required, "no races in this sequential code");
+ NOT_PRODUCT(if (TraceScavenge) tty->print_cr("oops_do_marking_epilogue]"));
+}
+
+class DetectScavengeRoot: public OopClosure {
+ bool _detected_scavenge_root;
+public:
+ DetectScavengeRoot() : _detected_scavenge_root(false)
+ { NOT_PRODUCT(_print_nm = NULL); }
+ bool detected_scavenge_root() { return _detected_scavenge_root; }
+ virtual void do_oop(oop* p) {
+ if ((*p) != NULL && (*p)->is_scavengable()) {
+ NOT_PRODUCT(maybe_print(p));
+ _detected_scavenge_root = true;
+ }
+ }
+ virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+
+#ifndef PRODUCT
+ nmethod* _print_nm;
+ void maybe_print(oop* p) {
+ if (_print_nm == NULL) return;
+ if (!_detected_scavenge_root) _print_nm->print_on(tty, "new scavenge root");
+ tty->print_cr(""PTR_FORMAT"[offset=%d] detected non-perm oop "PTR_FORMAT" (found at "PTR_FORMAT")",
+ _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
+ (intptr_t)(*p), (intptr_t)p);
+ (*p)->print();
+ }
+#endif //PRODUCT
+};
+
+bool nmethod::detect_scavenge_root_oops() {
+ DetectScavengeRoot detect_scavenge_root;
+ NOT_PRODUCT(if (TraceScavenge) detect_scavenge_root._print_nm = this);
+ oops_do(&detect_scavenge_root);
+ return detect_scavenge_root.detected_scavenge_root();
+}
+
// Method that knows how to preserve outgoing arguments at call. This method must be
// called with a frame corresponding to a Java invoke
void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
@@ -1878,6 +2003,24 @@ bool nmethod::is_deopt_pc(address pc) {
// -----------------------------------------------------------------------------
// Verification
+class VerifyOopsClosure: public OopClosure {
+ nmethod* _nm;
+ bool _ok;
+public:
+ VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
+ bool ok() { return _ok; }
+ virtual void do_oop(oop* p) {
+ if ((*p) == NULL || (*p)->is_oop()) return;
+ if (_ok) {
+ _nm->print_nmethod(true);
+ _ok = false;
+ }
+ tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
+ (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
+ }
+ virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+};
+
void nmethod::verify() {
// Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
@@ -1911,6 +2054,11 @@ void nmethod::verify() {
}
}
+ VerifyOopsClosure voc(this);
+ oops_do(&voc);
+ assert(voc.ok(), "embedded oops must be OK");
+ verify_scavenge_root_oops();
+
verify_scopes();
}
@@ -1974,19 +2122,34 @@ void nmethod::verify_scopes() {
// Non-product code
#ifndef PRODUCT
-void nmethod::check_store() {
- // Make sure all oops in the compiled code are tenured
-
- RelocIterator iter(this);
- while (iter.next()) {
- if (iter.type() == relocInfo::oop_type) {
- oop_Relocation* reloc = iter.oop_reloc();
- oop obj = reloc->oop_value();
- if (obj != NULL && !obj->is_perm()) {
- fatal("must be permanent oop in compiled code");
- }
+class DebugScavengeRoot: public OopClosure {
+ nmethod* _nm;
+ bool _ok;
+public:
+ DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
+ bool ok() { return _ok; }
+ virtual void do_oop(oop* p) {
+ if ((*p) == NULL || !(*p)->is_scavengable()) return;
+ if (_ok) {
+ _nm->print_nmethod(true);
+ _ok = false;
}
+ tty->print_cr("*** non-perm oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
+ (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
+ (*p)->print();
+ }
+ virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+};
+
+void nmethod::verify_scavenge_root_oops() {
+ if (!on_scavenge_root_list()) {
+ // Actually look inside, to verify the claim that it's clean.
+ DebugScavengeRoot debug_scavenge_root(this);
+ oops_do(&debug_scavenge_root);
+ if (!debug_scavenge_root.ok())
+ fatal("found an unadvertised bad non-perm oop in the code cache");
}
+ assert(scavenge_root_not_marked(), "");
}
#endif // PRODUCT
@@ -2019,6 +2182,7 @@ void nmethod::print() const {
if (is_not_entrant()) tty->print("not_entrant ");
if (is_zombie()) tty->print("zombie ");
if (is_unloaded()) tty->print("unloaded ");
+ if (on_scavenge_root_list()) tty->print("scavenge_root ");
tty->print_cr("}:");
}
if (size () > 0) tty->print_cr(" total in heap [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
diff --git a/src/share/vm/code/nmethod.hpp b/src/share/vm/code/nmethod.hpp
index 8305f7949..2618d9dcd 100644
--- a/src/share/vm/code/nmethod.hpp
+++ b/src/share/vm/code/nmethod.hpp
@@ -125,6 +125,7 @@ class xmlStream;
class nmethod : public CodeBlob {
friend class VMStructs;
friend class NMethodSweeper;
+ friend class CodeCache; // non-perm oops
private:
// Shared fields for all nmethod's
static int _zombie_instruction_size;
@@ -132,7 +133,12 @@ class nmethod : public CodeBlob {
methodOop _method;
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
- nmethod* _link; // To support simple linked-list chaining of nmethods
+ // To support simple linked-list chaining of nmethods:
+ nmethod* _osr_link; // from instanceKlass::osr_nmethods_head
+ nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
+
+ static nmethod* volatile _oops_do_mark_nmethods;
+ nmethod* volatile _oops_do_mark_link;
AbstractCompiler* _compiler; // The compiler which compiled this nmethod
@@ -174,6 +180,8 @@ class nmethod : public CodeBlob {
// used by jvmti to track if an unload event has been posted for this nmethod.
bool _unload_reported;
+ jbyte _scavenge_root_state;
+
NOT_PRODUCT(bool _has_debug_info; )
// Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
@@ -242,7 +250,6 @@ class nmethod : public CodeBlob {
// helper methods
void* operator new(size_t size, int nmethod_size);
- void check_store();
const char* reloc_string_for(u_char* begin, u_char* end);
void make_not_entrant_or_zombie(int state);
@@ -407,6 +414,24 @@ class nmethod : public CodeBlob {
int version() const { return flags.version; }
void set_version(int v);
+ // Non-perm oop support
+ bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
+ protected:
+ enum { npl_on_list = 0x01, npl_marked = 0x10 };
+ void set_on_scavenge_root_list() { _scavenge_root_state = npl_on_list; }
+ void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
+ // assertion-checking and pruning logic uses the bits of _scavenge_root_state
+#ifndef PRODUCT
+ void set_scavenge_root_marked() { _scavenge_root_state |= npl_marked; }
+ void clear_scavenge_root_marked() { _scavenge_root_state &= ~npl_marked; }
+ bool scavenge_root_not_marked() { return (_scavenge_root_state &~ npl_on_list) == 0; }
+ // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
+#endif //PRODUCT
+ nmethod* scavenge_root_link() const { return _scavenge_root_link; }
+ void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
+
+ public:
+
// Sweeper support
long stack_traversal_mark() { return _stack_traversal_mark; }
void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
@@ -425,8 +450,8 @@ class nmethod : public CodeBlob {
int osr_entry_bci() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; }
address osr_entry() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; }
void invalidate_osr_method();
- nmethod* link() const { return _link; }
- void set_link(nmethod *n) { _link = n; }
+ nmethod* osr_link() const { return _osr_link; }
+ void set_osr_link(nmethod *n) { _osr_link = n; }
// tells whether frames described by this nmethod can be deoptimized
// note: native wrappers cannot be deoptimized.
@@ -467,6 +492,14 @@ class nmethod : public CodeBlob {
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
OopClosure* f);
void oops_do(OopClosure* f);
+ bool detect_scavenge_root_oops();
+ void verify_scavenge_root_oops() PRODUCT_RETURN;
+
+ bool test_set_oops_do_mark();
+ static void oops_do_marking_prologue();
+ static void oops_do_marking_epilogue();
+ static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
+ DEBUG_ONLY(bool test_oops_do_mark() { return _oops_do_mark_link != NULL; })
// ScopeDesc for an instruction
ScopeDesc* scope_desc_at(address pc);
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
index de5955d50..92829787b 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
@@ -53,14 +53,12 @@ class MarkRefsIntoClosure: public OopsInGenClosure {
public:
MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap,
bool should_do_nmethods);
+ bool should_do_nmethods() { return _should_do_nmethods; }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
bool do_header() { return true; }
- virtual const bool do_nmethods() const {
- return _should_do_nmethods;
- }
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
@@ -79,14 +77,12 @@ class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
public:
MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
CMSBitMap* cms_bm, bool should_do_nmethods);
+ bool should_do_nmethods() { return _should_do_nmethods; }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
bool do_header() { return true; }
- virtual const bool do_nmethods() const {
- return _should_do_nmethods;
- }
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
@@ -194,7 +190,6 @@ class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
bool do_header() { return true; }
- virtual const bool do_nmethods() const { return true; }
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
index 6c13acba2..fafb1e368 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
@@ -2857,9 +2857,12 @@ void CMSCollector::verify_after_remark_work_1() {
gch->gen_process_strong_roots(_cmsGen->level(),
true, // younger gens are roots
+ true, // activate StrongRootsScope
true, // collecting perm gen
SharedHeap::ScanningOption(roots_scanning_options()),
- NULL, &notOlder);
+ &notOlder,
+ true, // walk code active on stacks
+ NULL);
// Now mark from the roots
assert(_revisitStack.isEmpty(), "Should be empty");
@@ -2905,9 +2908,12 @@ void CMSCollector::verify_after_remark_work_2() {
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
gch->gen_process_strong_roots(_cmsGen->level(),
true, // younger gens are roots
+ true, // activate StrongRootsScope
true, // collecting perm gen
SharedHeap::ScanningOption(roots_scanning_options()),
- NULL, &notOlder);
+ &notOlder,
+ true, // walk code active on stacks
+ NULL);
// Now mark from the roots
assert(_revisitStack.isEmpty(), "Should be empty");
@@ -3503,9 +3509,12 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
gch->gen_process_strong_roots(_cmsGen->level(),
true, // younger gens are roots
+ true, // activate StrongRootsScope
true, // collecting perm gen
SharedHeap::ScanningOption(roots_scanning_options()),
- NULL, &notOlder);
+ &notOlder,
+ true, // walk all of code cache if (so & SO_CodeCache)
+ NULL);
}
// Clear mod-union table; it will be dirtied in the prologue of
@@ -5015,9 +5024,15 @@ void CMSParRemarkTask::work(int i) {
_timer.start();
gch->gen_process_strong_roots(_collector->_cmsGen->level(),
false, // yg was scanned above
+ false, // this is parallel code
true, // collecting perm gen
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
- NULL, &par_mrias_cl);
+ &par_mrias_cl,
+ true, // walk all of code cache if (so & SO_CodeCache)
+ NULL);
+ assert(_collector->should_unload_classes()
+ || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
+ "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
_timer.stop();
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr(
@@ -5398,7 +5413,6 @@ void CMSCollector::do_remark_parallel() {
// Set up for parallel process_strong_roots work.
gch->set_par_threads(n_workers);
- gch->change_strong_roots_parity();
// We won't be iterating over the cards in the card table updating
// the younger_gen cards, so we shouldn't call the following else
// the verification code as well as subsequent younger_refs_iterate
@@ -5429,8 +5443,10 @@ void CMSCollector::do_remark_parallel() {
if (n_workers > 1) {
// Make refs discovery MT-safe
ReferenceProcessorMTMutator mt(ref_processor(), true);
+ GenCollectedHeap::StrongRootsScope srs(gch);
workers->run_task(&tsk);
} else {
+ GenCollectedHeap::StrongRootsScope srs(gch);
tsk.work(0);
}
gch->set_par_threads(0); // 0 ==> non-parallel.
@@ -5514,11 +5530,18 @@ void CMSCollector::do_remark_non_parallel() {
verify_work_stacks_empty();
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+ GenCollectedHeap::StrongRootsScope srs(gch);
gch->gen_process_strong_roots(_cmsGen->level(),
true, // younger gens as roots
+ false, // use the local StrongRootsScope
true, // collecting perm gen
SharedHeap::ScanningOption(roots_scanning_options()),
- NULL, &mrias_cl);
+ &mrias_cl,
+ true, // walk code active on stacks
+ NULL);
+ assert(should_unload_classes()
+ || (roots_scanning_options() & SharedHeap::SO_CodeCache),
+ "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
}
verify_work_stacks_empty();
// Restore evacuated mark words, if any, used for overflow list links
diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/src/share/vm/gc_implementation/g1/concurrentMark.cpp
index cf8eb866f..de27a6a65 100644
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp
@@ -747,10 +747,11 @@ void ConcurrentMark::checkpointRootsInitialPre() {
// clear the mark bitmap (no grey objects to start with)
_nextMarkBitMap->clearAll();
PrintReachableClosure prcl(_nextMarkBitMap);
- g1h->process_strong_roots(
+ g1h->process_strong_roots(true, // activate StrongRootsScope
false, // fake perm gen collection
SharedHeap::SO_AllClasses,
&prcl, // Regular roots
+ NULL, // do not visit active blobs
&prcl // Perm Gen Roots
);
// The root iteration above "consumed" dirty cards in the perm gen.
@@ -866,9 +867,11 @@ void ConcurrentMark::checkpointRootsInitial() {
g1h->set_marking_started();
g1h->rem_set()->prepare_for_younger_refs_iterate(false);
- g1h->process_strong_roots(false, // fake perm gen collection
+ g1h->process_strong_roots(true, // activate StrongRootsScope
+ false, // fake perm gen collection
SharedHeap::SO_AllClasses,
&notOlder, // Regular roots
+ NULL, // do not visit active blobs
&older // Perm Gen Roots
);
checkpointRootsInitialPost();
@@ -1963,7 +1966,7 @@ void ConcurrentMark::checkpointRootsFinalWork() {
g1h->ensure_parsability(false);
if (ParallelGCThreads > 0) {
- g1h->change_strong_roots_parity();
+ G1CollectedHeap::StrongRootsScope srs(g1h);
// this is remark, so we'll use up all available threads
int active_workers = ParallelGCThreads;
set_phase(active_workers, false);
@@ -1980,7 +1983,7 @@ void ConcurrentMark::checkpointRootsFinalWork() {
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
guarantee( satb_mq_set.completed_buffers_num() == 0, "invariant" );
} else {
- g1h->change_strong_roots_parity();
+ G1CollectedHeap::StrongRootsScope srs(g1h);
// this is remark, so we'll use up all available threads
int active_workers = 1;
set_phase(active_workers, false);
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
index 8b75a1913..917e6a59a 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
@@ -2299,9 +2299,12 @@ void G1CollectedHeap::verify(bool allow_dirty,
if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
if (!silent) { gclog_or_tty->print("roots "); }
VerifyRootsClosure rootsCl(use_prev_marking);
- process_strong_roots(false,
+ CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
+ process_strong_roots(true, // activate StrongRootsScope
+ false,
SharedHeap::SO_AllClasses,
&rootsCl,
+ &blobsCl,
&rootsCl);
rem_set()->invalidate(perm_gen()->used_region(), false);
if (!silent) { gclog_or_tty->print("heapRegions "); }
@@ -3992,8 +3995,14 @@ g1_process_strong_roots(bool collecting_perm_gen,
BufferingOopsInGenClosure buf_scan_perm(scan_perm);
buf_scan_perm.set_generation(perm_gen());
- process_strong_roots(collecting_perm_gen, so,
+ // Walk the code cache w/o buffering, because StarTask cannot handle
+ // unaligned oop locations.
+ CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true);
+
+ process_strong_roots(false, // no scoping; this is parallel code
+ collecting_perm_gen, so,
&buf_scan_non_heap_roots,
+ &eager_scan_code_roots,
&buf_scan_perm);
// Finish up any enqueued closure apps.
buf_scan_non_heap_roots.done();
@@ -4083,7 +4092,8 @@ G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc,
void
G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
OopClosure* non_root_closure) {
- SharedHeap::process_weak_roots(root_closure, non_root_closure);
+ CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
+ SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
}
@@ -4117,15 +4127,16 @@ void G1CollectedHeap::evacuate_collection_set() {
init_for_evac_failure(NULL);
- change_strong_roots_parity(); // In preparation for parallel strong roots.
rem_set()->prepare_for_younger_refs_iterate(true);
assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
double start_par = os::elapsedTime();
if (ParallelGCThreads > 0) {
// The individual threads will set their evac-failure closures.
+ StrongRootsScope srs(this);
workers()->run_task(&g1_par_task);
} else {
+ StrongRootsScope srs(this);
g1_par_task.work(0);
}
diff --git a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
index 492d9f54a..b872de102 100644
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
@@ -116,9 +116,11 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
SharedHeap* sh = SharedHeap::heap();
- sh->process_strong_roots(true, // Collecting permanent generation.
+ sh->process_strong_roots(true, // activeate StrongRootsScope
+ true, // Collecting permanent generation.
SharedHeap::SO_SystemClasses,
&GenMarkSweep::follow_root_closure,
+ &GenMarkSweep::follow_code_root_closure,
&GenMarkSweep::follow_root_closure);
// Process reference objects found during marking
@@ -276,9 +278,11 @@ void G1MarkSweep::mark_sweep_phase3() {
SharedHeap* sh = SharedHeap::heap();
- sh->process_strong_roots(true, // Collecting permanent generation.
+ sh->process_strong_roots(true, // activate StrongRootsScope
+ true, // Collecting permanent generation.
SharedHeap::SO_AllClasses,
&GenMarkSweep::adjust_root_pointer_closure,
+ NULL, // do not touch code cache here
&GenMarkSweep::adjust_pointer_closure);
g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
diff --git a/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge b/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
index 90161a4c7..e121246dc 100644
--- a/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
+++ b/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
@@ -372,6 +372,7 @@ psScavenge.inline.hpp parallelScavengeHeap.hpp
psScavenge.inline.hpp psPromotionManager.hpp
psScavenge.inline.hpp psScavenge.hpp
+pcTasks.cpp codeCache.hpp
pcTasks.cpp collectedHeap.hpp
pcTasks.cpp fprofiler.hpp
pcTasks.cpp jniHandles.hpp
@@ -391,6 +392,7 @@ pcTasks.hpp gcTaskManager.hpp
pcTasks.hpp psTasks.hpp
psTasks.cpp cardTableExtension.hpp
+psTasks.cpp codeCache.hpp
psTasks.cpp fprofiler.hpp
psTasks.cpp gcTaskManager.hpp
psTasks.cpp iterator.hpp
diff --git a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
index 3628875eb..5acb923a0 100644
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
@@ -480,12 +480,14 @@ void ParNewGenTask::work(int i) {
par_scan_state.start_strong_roots();
gch->gen_process_strong_roots(_gen->level(),
- true, // Process younger gens, if any,
- // as strong roots.
- false,// not collecting perm generation.
+ true, // Process younger gens, if any,
+ // as strong roots.
+ false, // no scope; this is parallel code
+ false, // not collecting perm generation.
SharedHeap::SO_AllClasses,
- &par_scan_state.older_gen_closure(),
- &par_scan_state.to_space_root_closure());
+ &par_scan_state.to_space_root_closure(),
+ true, // walk *all* scavengable nmethods
+ &par_scan_state.older_gen_closure());
par_scan_state.end_strong_roots();
// "evacuate followers".
@@ -799,15 +801,16 @@ void ParNewGeneration::collect(bool full,
ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
int n_workers = workers->total_workers();
gch->set_par_threads(n_workers);
- gch->change_strong_roots_parity();
gch->rem_set()->prepare_for_younger_refs_iterate(true);
// It turns out that even when we're using 1 thread, doing the work in a
// separate thread causes wide variance in run times. We can't help this
// in the multi-threaded case, but we special-case n=1 here to get
// repeatable measurements of the 1-thread overhead of the parallel code.
if (n_workers > 1) {
+ GenCollectedHeap::StrongRootsScope srs(gch);
workers->run_task(&tsk);
} else {
+ GenCollectedHeap::StrongRootsScope srs(gch);
tsk.work(0);
}
thread_state_set.reset();
diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
index b48344fd2..0835503d2 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
@@ -962,6 +962,14 @@ void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
_old_gen->resize(desired_free_space);
}
+ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
+ // nothing particular
+}
+
+ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
+ // nothing particular
+}
+
#ifndef PRODUCT
void ParallelScavengeHeap::record_gen_tops_before_GC() {
if (ZapUnusedHeapArea) {
diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
index 350c04317..5b19b7a5c 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
@@ -234,6 +234,13 @@ class ParallelScavengeHeap : public CollectedHeap {
// Mangle the unused parts of all spaces in the heap
void gen_mangle_unused_area() PRODUCT_RETURN;
+
+ // Call these in sequential code around the processing of strong roots.
+ class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
+ public:
+ ParStrongRootsScope();
+ ~ParStrongRootsScope();
+ };
};
inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
diff --git a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
index 7a3ebaf9f..62b6f3cb8 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
@@ -39,12 +39,13 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
+ CodeBlobToOopClosure mark_and_push_in_blobs(&mark_and_push_closure, /*do_marking=*/ true);
if (_java_thread != NULL)
- _java_thread->oops_do(&mark_and_push_closure);
+ _java_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
if (_vm_thread != NULL)
- _vm_thread->oops_do(&mark_and_push_closure);
+ _vm_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
// Do the real work
cm->drain_marking_stacks(&mark_and_push_closure);
@@ -79,7 +80,8 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
case threads:
{
ResourceMark rm;
- Threads::oops_do(&mark_and_push_closure);
+ CodeBlobToOopClosure each_active_code_blob(&mark_and_push_closure, /*do_marking=*/ true);
+ Threads::oops_do(&mark_and_push_closure, &each_active_code_blob);
}
break;
@@ -107,6 +109,11 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
vmSymbols::oops_do(&mark_and_push_closure);
break;
+ case code_cache:
+ // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
+ //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
+ break;
+
default:
fatal("Unknown root type");
}
diff --git a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp
index b536dd3f5..ece6384b2 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp
@@ -92,7 +92,8 @@ class MarkFromRootsTask : public GCTask {
jvmti = 7,
system_dictionary = 8,
vm_symbols = 9,
- reference_processing = 10
+ reference_processing = 10,
+ code_cache = 11
};
private:
RootType _root_type;
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
index 568b5cf83..d71a9592c 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
@@ -507,16 +507,22 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
// General strong roots.
- Universe::oops_do(mark_and_push_closure());
- ReferenceProcessor::oops_do(mark_and_push_closure());
- JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
- Threads::oops_do(mark_and_push_closure());
- ObjectSynchronizer::oops_do(mark_and_push_closure());
- FlatProfiler::oops_do(mark_and_push_closure());
- Management::oops_do(mark_and_push_closure());
- JvmtiExport::oops_do(mark_and_push_closure());
- SystemDictionary::always_strong_oops_do(mark_and_push_closure());
- vmSymbols::oops_do(mark_and_push_closure());
+ {
+ ParallelScavengeHeap::ParStrongRootsScope psrs;
+ Universe::oops_do(mark_and_push_closure());
+ ReferenceProcessor::oops_do(mark_and_push_closure());
+ JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
+ CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
+ Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
+ ObjectSynchronizer::oops_do(mark_and_push_closure());
+ FlatProfiler::oops_do(mark_and_push_closure());
+ Management::oops_do(mark_and_push_closure());
+ JvmtiExport::oops_do(mark_and_push_closure());
+ SystemDictionary::always_strong_oops_do(mark_and_push_closure());
+ vmSymbols::oops_do(mark_and_push_closure());
+ // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
+ //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
+ }
// Flush marking stack.
follow_stack();
@@ -609,7 +615,7 @@ void PSMarkSweep::mark_sweep_phase3() {
Universe::oops_do(adjust_root_pointer_closure());
ReferenceProcessor::oops_do(adjust_root_pointer_closure());
JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
- Threads::oops_do(adjust_root_pointer_closure());
+ Threads::oops_do(adjust_root_pointer_closure(), NULL);
ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
FlatProfiler::oops_do(adjust_root_pointer_closure());
Management::oops_do(adjust_root_pointer_closure());
@@ -617,6 +623,7 @@ void PSMarkSweep::mark_sweep_phase3() {
// SO_AllClasses
SystemDictionary::oops_do(adjust_root_pointer_closure());
vmSymbols::oops_do(adjust_root_pointer_closure());
+ //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());
// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
index 76577c3c5..0b8ee68ba 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
@@ -2322,6 +2322,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
{
TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
+ ParallelScavengeHeap::ParStrongRootsScope psrs;
GCTaskQueue* q = GCTaskQueue::create();
@@ -2335,6 +2336,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::vm_symbols));
+ q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
if (parallel_gc_threads > 1) {
for (uint j = 0; j < parallel_gc_threads; j++) {
@@ -2405,7 +2407,7 @@ void PSParallelCompact::adjust_roots() {
Universe::oops_do(adjust_root_pointer_closure());
ReferenceProcessor::oops_do(adjust_root_pointer_closure());
JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
- Threads::oops_do(adjust_root_pointer_closure());
+ Threads::oops_do(adjust_root_pointer_closure(), NULL);
ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
FlatProfiler::oops_do(adjust_root_pointer_closure());
Management::oops_do(adjust_root_pointer_closure());
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
index 0a4ba7758..443034aa4 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
@@ -799,8 +799,7 @@ class PSParallelCompact : AllStatic {
FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
- virtual const bool do_nmethods() const { return true; }
- };
+ };
class FollowStackClosure: public VoidClosure {
private:
@@ -817,6 +816,8 @@ class PSParallelCompact : AllStatic {
AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
+ // do not walk from thread stacks to the code cache on this phase
+ virtual void do_code_blob(CodeBlob* cb) const { }
};
// Closure for verifying update of pointers. Does not
@@ -1062,7 +1063,6 @@ class PSParallelCompact : AllStatic {
MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
- virtual const bool do_nmethods() const { return true; }
};
PSParallelCompact();
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
index adfa59e68..e25ef6573 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
@@ -358,6 +358,7 @@ bool PSScavenge::invoke_no_policy() {
PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
{
// TraceTime("Roots");
+ ParallelScavengeHeap::ParStrongRootsScope psrs;
GCTaskQueue* q = GCTaskQueue::create();
@@ -376,6 +377,7 @@ bool PSScavenge::invoke_no_policy() {
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
+ q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
ParallelTaskTerminator terminator(
gc_task_manager()->workers(),
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
index 52913e075..c74bd749b 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
@@ -66,7 +66,7 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
case threads:
{
ResourceMark rm;
- Threads::oops_do(&roots_closure);
+ Threads::oops_do(&roots_closure, NULL);
}
break;
@@ -90,6 +90,14 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
JvmtiExport::oops_do(&roots_closure);
break;
+
+ case code_cache:
+ {
+ CodeBlobToOopClosure each_scavengable_code_blob(&roots_closure, /*do_marking=*/ true);
+ CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob);
+ }
+ break;
+
default:
fatal("Unknown root type");
}
@@ -107,12 +115,13 @@ void ThreadRootsTask::do_it(GCTaskManager* manager, uint which) {
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
PSScavengeRootsClosure roots_closure(pm);
+ CodeBlobToOopClosure roots_in_blobs(&roots_closure, /*do_marking=*/ true);
if (_java_thread != NULL)
- _java_thread->oops_do(&roots_closure);
+ _java_thread->oops_do(&roots_closure, &roots_in_blobs);
if (_vm_thread != NULL)
- _vm_thread->oops_do(&roots_closure);
+ _vm_thread->oops_do(&roots_closure, &roots_in_blobs);
// Do the real work
pm->drain_stacks(false);
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp b/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp
index e4a3dedc2..fcb8381a9 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp
@@ -54,7 +54,8 @@ class ScavengeRootsTask : public GCTask {
flat_profiler = 5,
system_dictionary = 6,
management = 7,
- jvmti = 8
+ jvmti = 8,
+ code_cache = 9
};
private:
RootType _root_type;
diff --git a/src/share/vm/gc_implementation/shared/markSweep.cpp b/src/share/vm/gc_implementation/shared/markSweep.cpp
index c18d6e877..6e2515e55 100644
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp
@@ -69,6 +69,7 @@ void MarkSweep::follow_weak_klass_links() {
}
MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
+CodeBlobToOopClosure MarkSweep::follow_code_root_closure(&MarkSweep::follow_root_closure, /*do_marking=*/ true);
void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
diff --git a/src/share/vm/gc_implementation/shared/markSweep.hpp b/src/share/vm/gc_implementation/shared/markSweep.hpp
index b2e8d22d1..4141adade 100644
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp
+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp
@@ -57,14 +57,12 @@ class MarkSweep : AllStatic {
public:
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
- virtual const bool do_nmethods() const { return true; }
};
class MarkAndPushClosure: public OopClosure {
public:
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
- virtual const bool do_nmethods() const { return true; }
};
class FollowStackClosure: public VoidClosure {
@@ -163,6 +161,7 @@ class MarkSweep : AllStatic {
public:
// Public closures
static FollowRootClosure follow_root_closure;
+ static CodeBlobToOopClosure follow_code_root_closure; // => follow_root_closure
static MarkAndPushClosure mark_and_push_closure;
static FollowStackClosure follow_stack_closure;
static AdjustPointerClosure adjust_root_pointer_closure;
diff --git a/src/share/vm/gc_interface/collectedHeap.hpp b/src/share/vm/gc_interface/collectedHeap.hpp
index 11da8de2e..8841fa448 100644
--- a/src/share/vm/gc_interface/collectedHeap.hpp
+++ b/src/share/vm/gc_interface/collectedHeap.hpp
@@ -256,6 +256,14 @@ class CollectedHeap : public CHeapObj {
return p == NULL || is_in_permanent(p);
}
+ // An object is scavengable if its location may move during a scavenge.
+ // (A scavenge is a GC which is not a full GC.)
+ // Currently, this just means it is not perm (and not null).
+ // This could change if we rethink what's in perm-gen.
+ bool is_scavengable(const void *p) const {
+ return !is_in_permanent_or_null(p);
+ }
+
// Returns "TRUE" if "p" is a method oop in the
// current heap, with high probability. This predicate
// is not stable, in general.
diff --git a/src/share/vm/memory/defNewGeneration.cpp b/src/share/vm/memory/defNewGeneration.cpp
index 93a2e57e3..7db8b9dea 100644
--- a/src/share/vm/memory/defNewGeneration.cpp
+++ b/src/share/vm/memory/defNewGeneration.cpp
@@ -555,12 +555,14 @@ void DefNewGeneration::collect(bool full,
"save marks have not been newly set.");
gch->gen_process_strong_roots(_level,
- true, // Process younger gens, if any, as
- // strong roots.
- false,// not collecting permanent generation.
+ true, // Process younger gens, if any,
+ // as strong roots.
+ true, // activate StrongRootsScope
+ false, // not collecting perm generation.
SharedHeap::SO_AllClasses,
- &fsc_with_gc_barrier,
- &fsc_with_no_gc_barrier);
+ &fsc_with_no_gc_barrier,
+ true, // walk *all* scavengable nmethods
+ &fsc_with_gc_barrier);
// "evacuate followers".
evacuate_followers.do_void();
diff --git a/src/share/vm/memory/genCollectedHeap.cpp b/src/share/vm/memory/genCollectedHeap.cpp
index 47a0602ec..5bff15970 100644
--- a/src/share/vm/memory/genCollectedHeap.cpp
+++ b/src/share/vm/memory/genCollectedHeap.cpp
@@ -677,13 +677,23 @@ static AssertIsPermClosure assert_is_perm_closure;
void GenCollectedHeap::
gen_process_strong_roots(int level,
bool younger_gens_as_roots,
+ bool activate_scope,
bool collecting_perm_gen,
SharedHeap::ScanningOption so,
- OopsInGenClosure* older_gens,
- OopsInGenClosure* not_older_gens) {
+ OopsInGenClosure* not_older_gens,
+ bool do_code_roots,
+ OopsInGenClosure* older_gens) {
// General strong roots.
- SharedHeap::process_strong_roots(collecting_perm_gen, so,
- not_older_gens, older_gens);
+
+ if (!do_code_roots) {
+ SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
+ not_older_gens, NULL, older_gens);
+ } else {
+ bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
+ CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
+ SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
+ not_older_gens, &code_roots, older_gens);
+ }
if (younger_gens_as_roots) {
if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
@@ -706,8 +716,9 @@ gen_process_strong_roots(int level,
}
void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
+ CodeBlobClosure* code_roots,
OopClosure* non_root_closure) {
- SharedHeap::process_weak_roots(root_closure, non_root_closure);
+ SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
// "Local" "weak" refs
for (int i = 0; i < _n_gens; i++) {
_gens[i]->ref_processor()->weak_oops_do(root_closure);
diff --git a/src/share/vm/memory/genCollectedHeap.hpp b/src/share/vm/memory/genCollectedHeap.hpp
index 0c14c7f3d..1a04fb0a3 100644
--- a/src/share/vm/memory/genCollectedHeap.hpp
+++ b/src/share/vm/memory/genCollectedHeap.hpp
@@ -408,16 +408,22 @@ public:
// "SO_SystemClasses" to all the "system" classes and loaders;
// "SO_Symbols_and_Strings" applies the closure to all entries in
// SymbolsTable and StringTable.
- void gen_process_strong_roots(int level, bool younger_gens_as_roots,
+ void gen_process_strong_roots(int level,
+ bool younger_gens_as_roots,
+ // The remaining arguments are in an order
+ // consistent with SharedHeap::process_strong_roots:
+ bool activate_scope,
bool collecting_perm_gen,
SharedHeap::ScanningOption so,
- OopsInGenClosure* older_gens,
- OopsInGenClosure* not_older_gens);
+ OopsInGenClosure* not_older_gens,
+ bool do_code_roots,
+ OopsInGenClosure* older_gens);
// Apply "blk" to all the weak roots of the system. These include
// JNI weak roots, the code cache, system dictionary, symbol table,
// string table, and referents of reachable weak refs.
void gen_process_weak_roots(OopClosure* root_closure,
+ CodeBlobClosure* code_roots,
OopClosure* non_root_closure);
// Set the saved marks of generations, if that makes sense.
diff --git a/src/share/vm/memory/genMarkSweep.cpp b/src/share/vm/memory/genMarkSweep.cpp
index ba42d5d21..a44057748 100644
--- a/src/share/vm/memory/genMarkSweep.cpp
+++ b/src/share/vm/memory/genMarkSweep.cpp
@@ -240,9 +240,12 @@ void GenMarkSweep::mark_sweep_phase1(int level,
gch->gen_process_strong_roots(level,
false, // Younger gens are not roots.
+ true, // activate StrongRootsScope
true, // Collecting permanent generation.
SharedHeap::SO_SystemClasses,
- &follow_root_closure, &follow_root_closure);
+ &follow_root_closure,
+ true, // walk code active on stacks
+ &follow_root_closure);
// Process reference objects found during marking
{
@@ -330,14 +333,19 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
gch->gen_process_strong_roots(level,
false, // Younger gens are not roots.
+ true, // activate StrongRootsScope
true, // Collecting permanent generation.
SharedHeap::SO_AllClasses,
&adjust_root_pointer_closure,
+ false, // do not walk code
&adjust_root_pointer_closure);
// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)
+ CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure,
+ /*do_marking=*/ false);
gch->gen_process_weak_roots(&adjust_root_pointer_closure,
+ &adjust_code_pointer_closure,
&adjust_pointer_closure);
adjust_marks();
diff --git a/src/share/vm/memory/iterator.cpp b/src/share/vm/memory/iterator.cpp
index d52eeb904..b619e2098 100644
--- a/src/share/vm/memory/iterator.cpp
+++ b/src/share/vm/memory/iterator.cpp
@@ -32,3 +32,42 @@ void ObjectToOopClosure::do_object(oop obj) {
void VoidClosure::do_void() {
ShouldNotCallThis();
}
+
+MarkingCodeBlobClosure::MarkScope::MarkScope(bool activate)
+ : _active(activate)
+{
+ if (_active) nmethod::oops_do_marking_prologue();
+}
+
+MarkingCodeBlobClosure::MarkScope::~MarkScope() {
+ if (_active) nmethod::oops_do_marking_epilogue();
+}
+
+void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
+ if (!cb->is_nmethod()) return;
+ nmethod* nm = (nmethod*) cb;
+ if (!nm->test_set_oops_do_mark()) {
+ NOT_PRODUCT(if (TraceScavenge) nm->print_on(tty, "oops_do, 1st visit\n"));
+ do_newly_marked_nmethod(nm);
+ } else {
+ NOT_PRODUCT(if (TraceScavenge) nm->print_on(tty, "oops_do, skipped on 2nd visit\n"));
+ }
+}
+
+void CodeBlobToOopClosure::do_newly_marked_nmethod(CodeBlob* cb) {
+ cb->oops_do(_cl);
+}
+
+void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
+ if (!_do_marking) {
+ NOT_PRODUCT(if (TraceScavenge && Verbose && cb->is_nmethod()) ((nmethod*)cb)->print_on(tty, "oops_do, unmarked visit\n"));
+ // This assert won't work, since there are lots of mini-passes
+ // (mostly in debug mode) that co-exist with marking phases.
+ //assert(!(cb->is_nmethod() && ((nmethod*)cb)->test_oops_do_mark()), "found marked nmethod during mark-free phase");
+ cb->oops_do(_cl);
+ } else {
+ MarkingCodeBlobClosure::do_code_blob(cb);
+ }
+}
+
+
diff --git a/src/share/vm/memory/iterator.hpp b/src/share/vm/memory/iterator.hpp
index 07915f11f..1c001df93 100644
--- a/src/share/vm/memory/iterator.hpp
+++ b/src/share/vm/memory/iterator.hpp
@@ -24,6 +24,7 @@
// The following classes are C++ `closures` for iterating over objects, roots and spaces
+class CodeBlob;
class ReferenceProcessor;
// Closure provides abortability.
@@ -57,9 +58,6 @@ class OopClosure : public Closure {
virtual const bool should_remember_klasses() const { return false; }
virtual void remember_klass(Klass* k) { /* do nothing */ }
- // If "true", invoke on nmethods (when scanning compiled frames).
- virtual const bool do_nmethods() const { return false; }
-
// The methods below control how object iterations invoking this closure
// should be performed:
@@ -158,6 +156,51 @@ class CompactibleSpaceClosure : public StackObj {
};
+// CodeBlobClosure is used for iterating through code blobs
+// in the code cache or on thread stacks
+
+class CodeBlobClosure : public Closure {
+ public:
+ // Called for each code blob.
+ virtual void do_code_blob(CodeBlob* cb) = 0;
+};
+
+
+class MarkingCodeBlobClosure : public CodeBlobClosure {
+ public:
+ // Called for each code blob, but at most once per unique blob.
+ virtual void do_newly_marked_nmethod(CodeBlob* cb) = 0;
+
+ virtual void do_code_blob(CodeBlob* cb);
+ // = { if (!nmethod(cb)->test_set_oops_do_mark()) do_newly_marked_nmethod(cb); }
+
+ class MarkScope : public StackObj {
+ protected:
+ bool _active;
+ public:
+ MarkScope(bool activate = true);
+ // = { if (active) nmethod::oops_do_marking_prologue(); }
+ ~MarkScope();
+ // = { if (active) nmethod::oops_do_marking_epilogue(); }
+ };
+};
+
+
+// Applies an oop closure to all ref fields in code blobs
+// iterated over in an object iteration.
+class CodeBlobToOopClosure: public MarkingCodeBlobClosure {
+ OopClosure* _cl;
+ bool _do_marking;
+public:
+ virtual void do_newly_marked_nmethod(CodeBlob* cb);
+ // = { cb->oops_do(_cl); }
+ virtual void do_code_blob(CodeBlob* cb);
+ // = { if (_do_marking) super::do_code_blob(cb); else cb->oops_do(_cl); }
+ CodeBlobToOopClosure(OopClosure* cl, bool do_marking)
+ : _cl(cl), _do_marking(do_marking) {}
+};
+
+
// MonitorClosure is used for iterating over monitors in the monitors cache
diff --git a/src/share/vm/memory/sharedHeap.cpp b/src/share/vm/memory/sharedHeap.cpp
index d1ec7e0fd..31e517c8b 100644
--- a/src/share/vm/memory/sharedHeap.cpp
+++ b/src/share/vm/memory/sharedHeap.cpp
@@ -100,12 +100,27 @@ void SharedHeap::change_strong_roots_parity() {
"Not in range.");
}
-void SharedHeap::process_strong_roots(bool collecting_perm_gen,
+SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
+ : MarkScope(activate)
+{
+ if (_active) {
+ outer->change_strong_roots_parity();
+ }
+}
+
+SharedHeap::StrongRootsScope::~StrongRootsScope() {
+ // nothing particular
+}
+
+void SharedHeap::process_strong_roots(bool activate_scope,
+ bool collecting_perm_gen,
ScanningOption so,
OopClosure* roots,
+ CodeBlobClosure* code_roots,
OopsInGenClosure* perm_blk) {
+ StrongRootsScope srs(this, activate_scope);
// General strong roots.
- if (n_par_threads() == 0) change_strong_roots_parity();
+ assert(_strong_roots_parity != 0, "must have called prologue code");
if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
Universe::oops_do(roots);
ReferenceProcessor::oops_do(roots);
@@ -117,9 +132,9 @@ void SharedHeap::process_strong_roots(bool collecting_perm_gen,
JNIHandles::oops_do(roots);
// All threads execute this; the individual threads are task groups.
if (ParallelGCThreads > 0) {
- Threads::possibly_parallel_oops_do(roots);
+ Threads::possibly_parallel_oops_do(roots, code_roots);
} else {
- Threads::oops_do(roots);
+ Threads::oops_do(roots, code_roots);
}
if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
ObjectSynchronizer::oops_do(roots);
@@ -156,11 +171,29 @@ void SharedHeap::process_strong_roots(bool collecting_perm_gen,
}
if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
- if (so & SO_CodeCache) {
- CodeCache::oops_do(roots);
- }
+ if (so & SO_CodeCache) {
+ // (Currently, CMSCollector uses this to do intermediate-strength collections.)
+ assert(collecting_perm_gen, "scanning all of code cache");
+ assert(code_roots != NULL, "must supply closure for code cache");
+ if (code_roots != NULL) {
+ CodeCache::blobs_do(code_roots);
+ }
+ } else if (so & (SO_SystemClasses|SO_AllClasses)) {
+ if (!collecting_perm_gen) {
+ // If we are collecting from class statics, but we are not going to
+ // visit all of the CodeCache, collect from the non-perm roots if any.
+ // This makes the code cache function temporarily as a source of strong
+ // roots for oops, until the next major collection.
+ //
+ // If collecting_perm_gen is true, we require that this phase will call
+ // CodeCache::do_unloading. This will kill off nmethods with expired
+ // weak references, such as stale invokedynamic targets.
+ CodeCache::scavenge_root_nmethods_do(code_roots);
+ }
+ }
// Verify if the code cache contents are in the perm gen
- NOT_PRODUCT(CodeCache::oops_do(&assert_is_perm_closure));
+ NOT_PRODUCT(CodeBlobToOopClosure assert_code_is_perm(&assert_is_perm_closure, /*do_marking=*/ false));
+ NOT_PRODUCT(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_perm));
}
// Roots that should point only into permanent generation.
@@ -220,11 +253,12 @@ public:
// just skip adjusting any shared entries in the string table.
void SharedHeap::process_weak_roots(OopClosure* root_closure,
+ CodeBlobClosure* code_roots,
OopClosure* non_root_closure) {
// Global (weak) JNI handles
JNIHandles::weak_oops_do(&always_true, root_closure);
- CodeCache::oops_do(non_root_closure);
+ CodeCache::blobs_do(code_roots);
SymbolTable::oops_do(root_closure);
if (UseSharedSpaces && !DumpSharedSpaces) {
SkipAdjustingSharedStrings skip_closure(root_closure);
diff --git a/src/share/vm/memory/sharedHeap.hpp b/src/share/vm/memory/sharedHeap.hpp
index c44800a9b..0bf863fb1 100644
--- a/src/share/vm/memory/sharedHeap.hpp
+++ b/src/share/vm/memory/sharedHeap.hpp
@@ -165,9 +165,21 @@ public:
// c) to never return a distinguished value (zero) with which such
// task-claiming variables may be initialized, to indicate "never
// claimed".
+ private:
void change_strong_roots_parity();
+ public:
int strong_roots_parity() { return _strong_roots_parity; }
+ // Call these in sequential code around process_strong_roots.
+ // strong_roots_prologue calls change_strong_roots_parity, if
+ // parallel tasks are enabled.
+ class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
+ public:
+ StrongRootsScope(SharedHeap* outer, bool activate = true);
+ ~StrongRootsScope();
+ };
+ friend class StrongRootsScope;
+
enum ScanningOption {
SO_None = 0x0,
SO_AllClasses = 0x1,
@@ -198,15 +210,18 @@ public:
// "SO_Symbols" applies the closure to all entries in SymbolsTable;
// "SO_Strings" applies the closure to all entries in StringTable;
// "SO_CodeCache" applies the closure to all elements of the CodeCache.
- void process_strong_roots(bool collecting_perm_gen,
+ void process_strong_roots(bool activate_scope,
+ bool collecting_perm_gen,
ScanningOption so,
OopClosure* roots,
+ CodeBlobClosure* code_roots,
OopsInGenClosure* perm_blk);
// Apply "blk" to all the weak roots of the system. These include
// JNI weak roots, the code cache, system dictionary, symbol table,
// string table.
void process_weak_roots(OopClosure* root_closure,
+ CodeBlobClosure* code_roots,
OopClosure* non_root_closure);
diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKlass.cpp
index c2b554829..d75fcca0f 100644
--- a/src/share/vm/oops/instanceKlass.cpp
+++ b/src/share/vm/oops/instanceKlass.cpp
@@ -2025,7 +2025,7 @@ void instanceKlass::add_osr_nmethod(nmethod* n) {
// This is a short non-blocking critical region, so the no safepoint check is ok.
OsrList_lock->lock_without_safepoint_check();
assert(n->is_osr_method(), "wrong kind of nmethod");
- n->set_link(osr_nmethods_head());
+ n->set_osr_link(osr_nmethods_head());
set_osr_nmethods_head(n);
// Remember to unlock again
OsrList_lock->unlock();
@@ -2041,17 +2041,17 @@ void instanceKlass::remove_osr_nmethod(nmethod* n) {
// Search for match
while(cur != NULL && cur != n) {
last = cur;
- cur = cur->link();
+ cur = cur->osr_link();
}
if (cur == n) {
if (last == NULL) {
// Remove first element
- set_osr_nmethods_head(osr_nmethods_head()->link());
+ set_osr_nmethods_head(osr_nmethods_head()->osr_link());
} else {
- last->set_link(cur->link());
+ last->set_osr_link(cur->osr_link());
}
}
- n->set_link(NULL);
+ n->set_osr_link(NULL);
// Remember to unlock again
OsrList_lock->unlock();
}
@@ -2068,7 +2068,7 @@ nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci) const {
OsrList_lock->unlock();
return osr;
}
- osr = osr->link();
+ osr = osr->osr_link();
}
OsrList_lock->unlock();
return NULL;
diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp
index 6b994d64b..9b7e9baf5 100644
--- a/src/share/vm/oops/oop.hpp
+++ b/src/share/vm/oops/oop.hpp
@@ -330,6 +330,7 @@ class oopDesc {
bool is_perm() const;
bool is_perm_or_null() const;
+ bool is_scavengable() const;
bool is_shared() const;
bool is_shared_readonly() const;
bool is_shared_readwrite() const;
diff --git a/src/share/vm/oops/oop.inline2.hpp b/src/share/vm/oops/oop.inline2.hpp
index f93847478..3f28ea9ce 100644
--- a/src/share/vm/oops/oop.inline2.hpp
+++ b/src/share/vm/oops/oop.inline2.hpp
@@ -34,3 +34,7 @@ inline bool oopDesc::is_perm() const {
inline bool oopDesc::is_perm_or_null() const {
return this == NULL || is_perm();
}
+
+inline bool oopDesc::is_scavengable() const {
+ return Universe::heap()->is_scavengable(this);
+}
diff --git a/src/share/vm/opto/output.cpp b/src/share/vm/opto/output.cpp
index 2a4919ada..3274fd4d9 100644
--- a/src/share/vm/opto/output.cpp
+++ b/src/share/vm/opto/output.cpp
@@ -611,7 +611,7 @@ void Compile::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,
assert(cik->is_instance_klass() ||
cik->is_array_klass(), "Not supported allocation.");
sv = new ObjectValue(spobj->_idx,
- new ConstantOopWriteValue(cik->encoding()));
+ new ConstantOopWriteValue(cik->constant_encoding()));
Compile::set_sv_for_object_node(objs, sv);
uint first_ind = spobj->first_index();
@@ -702,13 +702,13 @@ void Compile::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,
case Type::AryPtr:
case Type::InstPtr:
case Type::KlassPtr: // fall through
- array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->encoding()));
+ array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->constant_encoding()));
break;
case Type::NarrowOop:
if (t == TypeNarrowOop::NULL_PTR) {
array->append(new ConstantOopWriteValue(NULL));
} else {
- array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->encoding()));
+ array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->constant_encoding()));
}
break;
case Type::Int:
@@ -871,7 +871,7 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
assert(cik->is_instance_klass() ||
cik->is_array_klass(), "Not supported allocation.");
ObjectValue* sv = new ObjectValue(spobj->_idx,
- new ConstantOopWriteValue(cik->encoding()));
+ new ConstantOopWriteValue(cik->constant_encoding()));
Compile::set_sv_for_object_node(objs, sv);
uint first_ind = spobj->first_index();
@@ -890,7 +890,7 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
}
} else {
const TypePtr *tp = obj_node->bottom_type()->make_ptr();
- scval = new ConstantOopWriteValue(tp->is_instptr()->const_oop()->encoding());
+ scval = new ConstantOopWriteValue(tp->is_instptr()->const_oop()->constant_encoding());
}
OptoReg::Name box_reg = BoxLockNode::stack_slot(box_node);
diff --git a/src/share/vm/opto/parse.hpp b/src/share/vm/opto/parse.hpp
index 4b3ca5912..37f7b629f 100644
--- a/src/share/vm/opto/parse.hpp
+++ b/src/share/vm/opto/parse.hpp
@@ -469,7 +469,7 @@ class Parse : public GraphKit {
// loading from a constant field or the constant pool
// returns false if push failed (non-perm field constants only, not ldcs)
- bool push_constant(ciConstant con);
+ bool push_constant(ciConstant con, bool require_constant = false);
// implementation of object creation bytecodes
void do_new();
diff --git a/src/share/vm/opto/parse2.cpp b/src/share/vm/opto/parse2.cpp
index 496b4f230..5457d966b 100644
--- a/src/share/vm/opto/parse2.cpp
+++ b/src/share/vm/opto/parse2.cpp
@@ -1325,7 +1325,8 @@ void Parse::do_one_bytecode() {
}
}
}
- push_constant(constant);
+ bool pushed = push_constant(constant, true);
+ guarantee(pushed, "must be possible to push this constant");
}
break;
diff --git a/src/share/vm/opto/parse3.cpp b/src/share/vm/opto/parse3.cpp
index 04b63939a..7125cb5d6 100644
--- a/src/share/vm/opto/parse3.cpp
+++ b/src/share/vm/opto/parse3.cpp
@@ -267,7 +267,7 @@ void Parse::do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool
}
-bool Parse::push_constant(ciConstant constant) {
+bool Parse::push_constant(ciConstant constant, bool require_constant) {
switch (constant.basic_type()) {
case T_BOOLEAN: push( intcon(constant.as_boolean()) ); break;
case T_INT: push( intcon(constant.as_int()) ); break;
@@ -279,13 +279,16 @@ bool Parse::push_constant(ciConstant constant) {
case T_LONG: push_pair( longcon(constant.as_long()) ); break;
case T_ARRAY:
case T_OBJECT: {
- // the oop is in perm space if the ciObject "has_encoding"
+ // cases:
+ // can_be_constant = (oop not scavengable || ScavengeRootsInCode != 0)
+ // should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
+ // An oop is not scavengable if it is in the perm gen.
ciObject* oop_constant = constant.as_object();
if (oop_constant->is_null_object()) {
push( zerocon(T_OBJECT) );
break;
- } else if (oop_constant->has_encoding()) {
- push( makecon(TypeOopPtr::make_from_constant(oop_constant)) );
+ } else if (require_constant || oop_constant->should_be_constant()) {
+ push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant)) );
break;
} else {
// we cannot inline the oop, but we can use it later to narrow a type
diff --git a/src/share/vm/opto/type.cpp b/src/share/vm/opto/type.cpp
index f06726a3c..84779eb3a 100644
--- a/src/share/vm/opto/type.cpp
+++ b/src/share/vm/opto/type.cpp
@@ -2411,14 +2411,13 @@ const TypeOopPtr* TypeOopPtr::make_from_klass_common(ciKlass *klass, bool klass_
//------------------------------make_from_constant-----------------------------
// Make a java pointer from an oop constant
-const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o) {
+const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o, bool require_constant) {
if (o->is_method_data() || o->is_method()) {
// Treat much like a typeArray of bytes, like below, but fake the type...
- assert(o->has_encoding(), "must be a perm space object");
const Type* etype = (Type*)get_const_basic_type(T_BYTE);
const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
ciKlass *klass = ciTypeArrayKlass::make((BasicType) T_BYTE);
- assert(o->has_encoding(), "method data oops should be tenured");
+ assert(o->can_be_constant(), "method data oops should be tenured");
const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0);
return arr;
} else {
@@ -2427,8 +2426,9 @@ const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o) {
ciKlass *klass = o->klass();
if (klass->is_instance_klass()) {
// Element is an instance
- if (!o->has_encoding()) { // not a perm-space constant
- // %%% remove this restriction by rewriting non-perm ConPNodes in a later phase
+ if (require_constant) {
+ if (!o->can_be_constant()) return NULL;
+ } else if (!o->should_be_constant()) {
return TypeInstPtr::make(TypePtr::NotNull, klass, true, NULL, 0);
}
return TypeInstPtr::make(o);
@@ -2440,8 +2440,9 @@ const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o) {
// We used to pass NotNull in here, asserting that the sub-arrays
// are all not-null. This is not true in generally, as code can
// slam NULLs down in the subarrays.
- if (!o->has_encoding()) { // not a perm-space constant
- // %%% remove this restriction by rewriting non-perm ConPNodes in a later phase
+ if (require_constant) {
+ if (!o->can_be_constant()) return NULL;
+ } else if (!o->should_be_constant()) {
return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0);
}
const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0);
@@ -2453,8 +2454,9 @@ const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o) {
const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length()));
// We used to pass NotNull in here, asserting that the array pointer
// is not-null. That was not true in general.
- if (!o->has_encoding()) { // not a perm-space constant
- // %%% remove this restriction by rewriting non-perm ConPNodes in a later phase
+ if (require_constant) {
+ if (!o->can_be_constant()) return NULL;
+ } else if (!o->should_be_constant()) {
return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0);
}
const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0);
@@ -2483,7 +2485,7 @@ intptr_t TypeOopPtr::get_con() const {
ShouldNotReachHere();
}
- return (intptr_t)const_oop()->encoding();
+ return (intptr_t)const_oop()->constant_encoding();
}
@@ -3338,14 +3340,19 @@ const Type *TypeAryPtr::xmeet( const Type *t ) const {
ciObject* o = const_oop();
if( _ptr == Constant ) {
if( tap->const_oop() != NULL && !o->equals(tap->const_oop()) ) {
+ xk = (klass() == tap->klass());
ptr = NotNull;
o = NULL;
instance_id = InstanceBot;
+ } else {
+ xk = true;
}
} else if( above_centerline(_ptr) ) {
o = tap->const_oop();
+ xk = true;
+ } else {
+ xk = this->_klass_is_exact;
}
- xk = true;
return TypeAryPtr::make( ptr, o, tary, tap->_klass, xk, off, instance_id );
}
case NotNull:
diff --git a/src/share/vm/opto/type.hpp b/src/share/vm/opto/type.hpp
index 4ad11402b..34fc16c5b 100644
--- a/src/share/vm/opto/type.hpp
+++ b/src/share/vm/opto/type.hpp
@@ -711,7 +711,10 @@ public:
return make_from_klass_common(klass, false, false);
}
// Creates a singleton type given an object.
- static const TypeOopPtr* make_from_constant(ciObject* o);
+ // If the object cannot be rendered as a constant,
+ // may return a non-singleton type.
+ // If require_constant, produce a NULL if a singleton is not possible.
+ static const TypeOopPtr* make_from_constant(ciObject* o, bool require_constant = false);
// Make a generic (unclassed) pointer to an oop.
static const TypeOopPtr* make(PTR ptr, int offset, int instance_id = InstanceBot);
diff --git a/src/share/vm/prims/jvmtiTagMap.cpp b/src/share/vm/prims/jvmtiTagMap.cpp
index 41c0693a4..6637ff9e3 100644
--- a/src/share/vm/prims/jvmtiTagMap.cpp
+++ b/src/share/vm/prims/jvmtiTagMap.cpp
@@ -3126,6 +3126,12 @@ inline bool VM_HeapWalkOperation::collect_simple_roots() {
// exceptions) will be visible.
blk.set_kind(JVMTI_HEAP_REFERENCE_OTHER);
Universe::oops_do(&blk);
+
+ // If there are any non-perm roots in the code cache, visit them.
+ blk.set_kind(JVMTI_HEAP_REFERENCE_OTHER);
+ CodeBlobToOopClosure look_in_blobs(&blk, false);
+ CodeCache::scavenge_root_nmethods_do(&look_in_blobs);
+
return true;
}
diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp
index 2cce45220..0801eec60 100644
--- a/src/share/vm/runtime/arguments.cpp
+++ b/src/share/vm/runtime/arguments.cpp
@@ -2639,16 +2639,22 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
if (EnableInvokeDynamic && !EnableMethodHandles) {
if (!FLAG_IS_DEFAULT(EnableMethodHandles)) {
- warning("forcing EnableMethodHandles true to allow EnableInvokeDynamic");
+ warning("forcing EnableMethodHandles true because EnableInvokeDynamic is true");
}
EnableMethodHandles = true;
}
if (EnableMethodHandles && !AnonymousClasses) {
if (!FLAG_IS_DEFAULT(AnonymousClasses)) {
- warning("forcing AnonymousClasses true to enable EnableMethodHandles");
+ warning("forcing AnonymousClasses true because EnableMethodHandles is true");
}
AnonymousClasses = true;
}
+ if ((EnableMethodHandles || AnonymousClasses) && ScavengeRootsInCode == 0) {
+ if (!FLAG_IS_DEFAULT(ScavengeRootsInCode)) {
+ warning("forcing ScavengeRootsInCode non-zero because EnableMethodHandles or AnonymousClasses is true");
+ }
+ ScavengeRootsInCode = 1;
+ }
if (PrintGCDetails) {
// Turn on -verbose:gc options as well
diff --git a/src/share/vm/runtime/frame.cpp b/src/share/vm/runtime/frame.cpp
index 8d53406e4..4ce96408c 100644
--- a/src/share/vm/runtime/frame.cpp
+++ b/src/share/vm/runtime/frame.cpp
@@ -1043,7 +1043,7 @@ void frame::oops_interpreted_arguments_do(symbolHandle signature, bool is_static
finder.oops_do();
}
-void frame::oops_code_blob_do(OopClosure* f, const RegisterMap* reg_map) {
+void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) {
assert(_cb != NULL, "sanity check");
if (_cb->oop_maps() != NULL) {
OopMapSet::oops_do(this, reg_map, f);
@@ -1058,21 +1058,9 @@ void frame::oops_code_blob_do(OopClosure* f, const RegisterMap* reg_map) {
// oops referenced from nmethods active on thread stacks so as to
// prevent them from being collected. However, this visit should be
// restricted to certain phases of the collection only. The
- // closure answers whether it wants nmethods to be traced.
- // (All CodeBlob subtypes other than NMethod currently have
- // an empty oops_do() method.
- if (f->do_nmethods()) {
- _cb->oops_do(f);
- }
-}
-
-void frame::nmethods_code_blob_do() {
- assert(_cb != NULL, "sanity check");
-
- // If we see an activation belonging to a non_entrant nmethod, we mark it.
- if (_cb->is_nmethod() && ((nmethod *)_cb)->is_not_entrant()) {
- ((nmethod*)_cb)->mark_as_seen_on_stack();
- }
+ // closure decides how it wants nmethods to be traced.
+ if (cf != NULL)
+ cf->do_code_blob(_cb);
}
class CompiledArgumentOopFinder: public SignatureInfo {
@@ -1201,18 +1189,18 @@ void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) {
}
-void frame::oops_do_internal(OopClosure* f, RegisterMap* map, bool use_interpreter_oop_map_cache) {
+void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
if (is_interpreted_frame()) { oops_interpreted_do(f, map, use_interpreter_oop_map_cache);
} else if (is_entry_frame()) { oops_entry_do (f, map);
- } else if (CodeCache::contains(pc())) { oops_code_blob_do (f, map);
+ } else if (CodeCache::contains(pc())) { oops_code_blob_do (f, cf, map);
} else {
ShouldNotReachHere();
}
}
-void frame::nmethods_do() {
+void frame::nmethods_do(CodeBlobClosure* cf) {
if (_cb != NULL && _cb->is_nmethod()) {
- nmethods_code_blob_do();
+ cf->do_code_blob(_cb);
}
}
@@ -1358,7 +1346,7 @@ void frame::verify(const RegisterMap* map) {
}
}
COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), "must be empty before verify");)
- oops_do_internal(&VerifyOopClosure::verify_oop, (RegisterMap*)map, false);
+ oops_do_internal(&VerifyOopClosure::verify_oop, NULL, (RegisterMap*)map, false);
}
diff --git a/src/share/vm/runtime/frame.hpp b/src/share/vm/runtime/frame.hpp
index 9817a3641..54da49536 100644
--- a/src/share/vm/runtime/frame.hpp
+++ b/src/share/vm/runtime/frame.hpp
@@ -384,16 +384,14 @@ class frame VALUE_OBJ_CLASS_SPEC {
void oops_interpreted_arguments_do(symbolHandle signature, bool is_static, OopClosure* f);
// Iteration of oops
- void oops_do_internal(OopClosure* f, RegisterMap* map, bool use_interpreter_oop_map_cache);
+ void oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
void oops_entry_do(OopClosure* f, const RegisterMap* map);
- void oops_code_blob_do(OopClosure* f, const RegisterMap* map);
+ void oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map);
int adjust_offset(methodOop method, int index); // helper for above fn
- // Iteration of nmethods
- void nmethods_code_blob_do();
public:
// Memory management
- void oops_do(OopClosure* f, RegisterMap* map) { oops_do_internal(f, map, true); }
- void nmethods_do();
+ void oops_do(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cf, map, true); }
+ void nmethods_do(CodeBlobClosure* cf);
void gc_prologue();
void gc_epilogue();
diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
index da22ad578..ba37313b7 100644
--- a/src/share/vm/runtime/globals.hpp
+++ b/src/share/vm/runtime/globals.hpp
@@ -714,6 +714,11 @@ class CommandLineFlags {
diagnostic(bool, TraceNMethodInstalls, false, \
"Trace nmethod intallation") \
\
+ diagnostic(intx, ScavengeRootsInCode, 0, \
+ "0: do not allow scavengable oops in the code cache; " \
+ "1: allow scavenging from the code cache; " \
+ "2: emit as many constants as the compiler can see") \
+ \
diagnostic(bool, TraceOSRBreakpoint, false, \
"Trace OSR Breakpoint ") \
\
diff --git a/src/share/vm/runtime/sweeper.cpp b/src/share/vm/runtime/sweeper.cpp
index bfa4761d4..fe1cb641c 100644
--- a/src/share/vm/runtime/sweeper.cpp
+++ b/src/share/vm/runtime/sweeper.cpp
@@ -34,6 +34,17 @@ jint NMethodSweeper::_locked_seen = 0;
jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
bool NMethodSweeper::_rescan = false;
+class MarkActivationClosure: public CodeBlobClosure {
+public:
+ virtual void do_code_blob(CodeBlob* cb) {
+ // If we see an activation belonging to a non_entrant nmethod, we mark it.
+ if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
+ ((nmethod*)cb)->mark_as_seen_on_stack();
+ }
+ }
+};
+static MarkActivationClosure mark_activation_closure;
+
void NMethodSweeper::sweep() {
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
if (!MethodFlushing) return;
@@ -57,7 +68,7 @@ void NMethodSweeper::sweep() {
if (PrintMethodFlushing) {
tty->print_cr("### Sweep: stack traversal %d", _traversals);
}
- Threads::nmethods_do();
+ Threads::nmethods_do(&mark_activation_closure);
// reset the flags since we started a scan from the beginning.
_rescan = false;
diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp
index 59fb8dbc7..998088fbd 100644
--- a/src/share/vm/runtime/thread.cpp
+++ b/src/share/vm/runtime/thread.cpp
@@ -683,14 +683,15 @@ bool Thread::claim_oops_do_par_case(int strong_roots_parity) {
return false;
}
-void Thread::oops_do(OopClosure* f) {
+void Thread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
active_handles()->oops_do(f);
// Do oop for ThreadShadow
f->do_oop((oop*)&_pending_exception);
handle_area()->oops_do(f);
}
-void Thread::nmethods_do() {
+void Thread::nmethods_do(CodeBlobClosure* cf) {
+ // no nmethods in a generic thread...
}
void Thread::print_on(outputStream* st) const {
@@ -2316,12 +2317,12 @@ void JavaThread::gc_prologue() {
}
-void JavaThread::oops_do(OopClosure* f) {
+void JavaThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
// The ThreadProfiler oops_do is done from FlatProfiler::oops_do
// since there may be more than one thread using each ThreadProfiler.
// Traverse the GCHandles
- Thread::oops_do(f);
+ Thread::oops_do(f, cf);
assert( (!has_last_Java_frame() && java_call_counter() == 0) ||
(has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
@@ -2347,7 +2348,7 @@ void JavaThread::oops_do(OopClosure* f) {
// Traverse the execution stack
for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
- fst.current()->oops_do(f, fst.register_map());
+ fst.current()->oops_do(f, cf, fst.register_map());
}
}
@@ -2379,9 +2380,8 @@ void JavaThread::oops_do(OopClosure* f) {
}
}
-void JavaThread::nmethods_do() {
- // Traverse the GCHandles
- Thread::nmethods_do();
+void JavaThread::nmethods_do(CodeBlobClosure* cf) {
+ Thread::nmethods_do(cf); // (super method is a no-op)
assert( (!has_last_Java_frame() && java_call_counter() == 0) ||
(has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
@@ -2389,7 +2389,7 @@ void JavaThread::nmethods_do() {
if (has_last_Java_frame()) {
// Traverse the execution stack
for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
- fst.current()->nmethods_do();
+ fst.current()->nmethods_do(cf);
}
}
}
@@ -2463,7 +2463,7 @@ static void frame_verify(frame* f, const RegisterMap *map) { f->verify(map); }
void JavaThread::verify() {
// Verify oops in the thread.
- oops_do(&VerifyOopClosure::verify_oop);
+ oops_do(&VerifyOopClosure::verify_oop, NULL);
// Verify the stack frames.
frames_do(frame_verify);
@@ -3602,14 +3602,14 @@ bool Threads::includes(JavaThread* p) {
// uses the Threads_lock to gurantee this property. It also makes sure that
// all threads gets blocked when exiting or starting).
-void Threads::oops_do(OopClosure* f) {
+void Threads::oops_do(OopClosure* f, CodeBlobClosure* cf) {
ALL_JAVA_THREADS(p) {
- p->oops_do(f);
+ p->oops_do(f, cf);
}
- VMThread::vm_thread()->oops_do(f);
+ VMThread::vm_thread()->oops_do(f, cf);
}
-void Threads::possibly_parallel_oops_do(OopClosure* f) {
+void Threads::possibly_parallel_oops_do(OopClosure* f, CodeBlobClosure* cf) {
// Introduce a mechanism allowing parallel threads to claim threads as
// root groups. Overhead should be small enough to use all the time,
// even in sequential code.
@@ -3618,12 +3618,12 @@ void Threads::possibly_parallel_oops_do(OopClosure* f) {
int cp = SharedHeap::heap()->strong_roots_parity();
ALL_JAVA_THREADS(p) {
if (p->claim_oops_do(is_par, cp)) {
- p->oops_do(f);
+ p->oops_do(f, cf);
}
}
VMThread* vmt = VMThread::vm_thread();
if (vmt->claim_oops_do(is_par, cp))
- vmt->oops_do(f);
+ vmt->oops_do(f, cf);
}
#ifndef SERIALGC
@@ -3644,11 +3644,11 @@ void Threads::create_thread_roots_marking_tasks(GCTaskQueue* q) {
}
#endif // SERIALGC
-void Threads::nmethods_do() {
+void Threads::nmethods_do(CodeBlobClosure* cf) {
ALL_JAVA_THREADS(p) {
- p->nmethods_do();
+ p->nmethods_do(cf);
}
- VMThread::vm_thread()->nmethods_do();
+ VMThread::vm_thread()->nmethods_do(cf);
}
void Threads::gc_epilogue() {
diff --git a/src/share/vm/runtime/thread.hpp b/src/share/vm/runtime/thread.hpp
index c1920bb22..574084b72 100644
--- a/src/share/vm/runtime/thread.hpp
+++ b/src/share/vm/runtime/thread.hpp
@@ -374,7 +374,8 @@ class Thread: public ThreadShadow {
// GC support
// Apply "f->do_oop" to all root oops in "this".
- void oops_do(OopClosure* f);
+ // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
+ void oops_do(OopClosure* f, CodeBlobClosure* cf);
// Handles the parallel case for the method below.
private:
@@ -398,7 +399,7 @@ public:
}
// Sweeper support
- void nmethods_do();
+ void nmethods_do(CodeBlobClosure* cf);
// Tells if adr belong to this thread. This is used
// for checking if a lock is owned by the running thread.
@@ -1238,10 +1239,10 @@ class JavaThread: public Thread {
void frames_do(void f(frame*, const RegisterMap*));
// Memory operations
- void oops_do(OopClosure* f);
+ void oops_do(OopClosure* f, CodeBlobClosure* cf);
// Sweeper operations
- void nmethods_do();
+ void nmethods_do(CodeBlobClosure* cf);
// Memory management operations
void gc_epilogue();
@@ -1629,9 +1630,9 @@ class Threads: AllStatic {
// Apply "f->do_oop" to all root oops in all threads.
// This version may only be called by sequential code.
- static void oops_do(OopClosure* f);
+ static void oops_do(OopClosure* f, CodeBlobClosure* cf);
// This version may be called by sequential or parallel code.
- static void possibly_parallel_oops_do(OopClosure* f);
+ static void possibly_parallel_oops_do(OopClosure* f, CodeBlobClosure* cf);
// This creates a list of GCTasks, one per thread.
static void create_thread_roots_tasks(GCTaskQueue* q);
// This creates a list of GCTasks, one per thread, for marking objects.
@@ -1639,13 +1640,13 @@ class Threads: AllStatic {
// Apply "f->do_oop" to roots in all threads that
// are part of compiled frames
- static void compiled_frame_oops_do(OopClosure* f);
+ static void compiled_frame_oops_do(OopClosure* f, CodeBlobClosure* cf);
static void convert_hcode_pointers();
static void restore_hcode_pointers();
// Sweeper
- static void nmethods_do();
+ static void nmethods_do(CodeBlobClosure* cf);
static void gc_epilogue();
static void gc_prologue();
diff --git a/src/share/vm/runtime/vmStructs.cpp b/src/share/vm/runtime/vmStructs.cpp
index 00dac5885..f90c8a46b 100644
--- a/src/share/vm/runtime/vmStructs.cpp
+++ b/src/share/vm/runtime/vmStructs.cpp
@@ -549,6 +549,7 @@ static inline uint64_t cast_uint64_t(size_t x)
/********************************/ \
\
static_field(CodeCache, _heap, CodeHeap*) \
+ static_field(CodeCache, _scavenge_root_nmethods, nmethod*) \
\
/*******************************/ \
/* CodeHeap (NOTE: incomplete) */ \
@@ -618,7 +619,9 @@ static inline uint64_t cast_uint64_t(size_t x)
static_field(nmethod, _zombie_instruction_size, int) \
nonstatic_field(nmethod, _method, methodOop) \
nonstatic_field(nmethod, _entry_bci, int) \
- nonstatic_field(nmethod, _link, nmethod*) \
+ nonstatic_field(nmethod, _osr_link, nmethod*) \
+ nonstatic_field(nmethod, _scavenge_root_link, nmethod*) \
+ nonstatic_field(nmethod, _scavenge_root_state, jbyte) \
nonstatic_field(nmethod, _exception_offset, int) \
nonstatic_field(nmethod, _deoptimize_offset, int) \
nonstatic_field(nmethod, _orig_pc_offset, int) \
diff --git a/src/share/vm/runtime/vmThread.cpp b/src/share/vm/runtime/vmThread.cpp
index 7b2cc45be..b54f62515 100644
--- a/src/share/vm/runtime/vmThread.cpp
+++ b/src/share/vm/runtime/vmThread.cpp
@@ -619,8 +619,8 @@ void VMThread::execute(VM_Operation* op) {
}
-void VMThread::oops_do(OopClosure* f) {
- Thread::oops_do(f);
+void VMThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
+ Thread::oops_do(f, cf);
_vm_queue->oops_do(f);
}
@@ -652,5 +652,5 @@ void VMOperationQueue::verify_queue(int prio) {
#endif
void VMThread::verify() {
- oops_do(&VerifyOopClosure::verify_oop);
+ oops_do(&VerifyOopClosure::verify_oop, NULL);
}
diff --git a/src/share/vm/runtime/vmThread.hpp b/src/share/vm/runtime/vmThread.hpp
index b196d0fc5..7acf84a70 100644
--- a/src/share/vm/runtime/vmThread.hpp
+++ b/src/share/vm/runtime/vmThread.hpp
@@ -121,7 +121,7 @@ class VMThread: public Thread {
static VMThread* vm_thread() { return _vm_thread; }
// GC support
- void oops_do(OopClosure* f);
+ void oops_do(OopClosure* f, CodeBlobClosure* cf);
// Debugging
void print_on(outputStream* st) const;
diff --git a/src/share/vm/utilities/debug.cpp b/src/share/vm/utilities/debug.cpp
index 20241e5bd..ef3e12355 100644
--- a/src/share/vm/utilities/debug.cpp
+++ b/src/share/vm/utilities/debug.cpp
@@ -702,11 +702,14 @@ static void findref(intptr_t x) {
tty->print_cr("Searching strong roots:");
Universe::oops_do(&lookFor, false);
JNIHandles::oops_do(&lookFor); // Global (strong) JNI handles
- Threads::oops_do(&lookFor);
+ Threads::oops_do(&lookFor, NULL);
ObjectSynchronizer::oops_do(&lookFor);
//FlatProfiler::oops_do(&lookFor);
SystemDictionary::oops_do(&lookFor);
+ tty->print_cr("Searching code cache:");
+ CodeCache::oops_do(&lookFor);
+
tty->print_cr("Done.");
}