aboutsummaryrefslogtreecommitdiff
path: root/src/share
diff options
context:
space:
mode:
authorcoleenp <none@none>2008-04-13 17:43:42 -0400
committercoleenp <none@none>2008-04-13 17:43:42 -0400
commit8dc3eb94be5546c0d912a2973322aa4c75f4ca16 (patch)
tree3ef8c64ec2efcfbeec6e46f6a3775a60c122e5df /src/share
parent490af03e05e08d9e13e7135cce32cb5b31110834 (diff)
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
Diffstat (limited to 'src/share')
-rw-r--r--src/share/vm/adlc/archDesc.cpp5
-rw-r--r--src/share/vm/adlc/forms.cpp3
-rw-r--r--src/share/vm/adlc/forms.hpp3
-rw-r--r--src/share/vm/adlc/formssel.cpp22
-rw-r--r--src/share/vm/adlc/output_c.cpp22
-rw-r--r--src/share/vm/adlc/output_h.cpp51
-rw-r--r--src/share/vm/asm/codeBuffer.cpp6
-rw-r--r--src/share/vm/c1/c1_Runtime1.cpp72
-rw-r--r--src/share/vm/ci/ciInstanceKlass.cpp38
-rw-r--r--src/share/vm/ci/ciInstanceKlass.hpp11
-rw-r--r--src/share/vm/ci/ciObjectFactory.cpp2
-rw-r--r--src/share/vm/classfile/classFileParser.cpp64
-rw-r--r--src/share/vm/classfile/javaClasses.cpp71
-rw-r--r--src/share/vm/classfile/javaClasses.hpp55
-rw-r--r--src/share/vm/compiler/oopMap.cpp50
-rw-r--r--src/share/vm/compiler/oopMap.hpp9
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp260
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp86
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp2
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp261
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp14
-rw-r--r--src/share/vm/gc_implementation/includeDB_gc_parNew9
-rw-r--r--src/share/vm/gc_implementation/includeDB_gc_parallelScavenge3
-rw-r--r--src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp9
-rw-r--r--src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp4
-rw-r--r--src/share/vm/gc_implementation/parNew/parNewGeneration.cpp96
-rw-r--r--src/share/vm/gc_implementation/parNew/parNewGeneration.hpp61
-rw-r--r--src/share/vm/gc_implementation/parNew/parOopClosures.hpp64
-rw-r--r--src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp48
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp49
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp2
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp2
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp13
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp24
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp98
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp216
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp6
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp2
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp46
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp49
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp50
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp14
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp8
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp42
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp17
-rw-r--r--src/share/vm/gc_implementation/shared/markSweep.cpp94
-rw-r--r--src/share/vm/gc_implementation/shared/markSweep.hpp88
-rw-r--r--src/share/vm/gc_implementation/shared/markSweep.inline.hpp100
-rw-r--r--src/share/vm/gc_interface/collectedHeap.cpp1
-rw-r--r--src/share/vm/gc_interface/collectedHeap.hpp6
-rw-r--r--src/share/vm/gc_interface/collectedHeap.inline.hpp13
-rw-r--r--src/share/vm/includeDB_core41
-rw-r--r--src/share/vm/interpreter/interpreterRuntime.hpp5
-rw-r--r--src/share/vm/memory/barrierSet.hpp10
-rw-r--r--src/share/vm/memory/barrierSet.inline.hpp2
-rw-r--r--src/share/vm/memory/cardTableModRefBS.cpp2
-rw-r--r--src/share/vm/memory/cardTableModRefBS.hpp6
-rw-r--r--src/share/vm/memory/cardTableRS.cpp30
-rw-r--r--src/share/vm/memory/cardTableRS.hpp6
-rw-r--r--src/share/vm/memory/compactingPermGenGen.cpp21
-rw-r--r--src/share/vm/memory/defNewGeneration.cpp124
-rw-r--r--src/share/vm/memory/defNewGeneration.hpp23
-rw-r--r--src/share/vm/memory/defNewGeneration.inline.hpp103
-rw-r--r--src/share/vm/memory/dump.cpp16
-rw-r--r--src/share/vm/memory/genCollectedHeap.cpp4
-rw-r--r--src/share/vm/memory/genCollectedHeap.hpp3
-rw-r--r--src/share/vm/memory/genMarkSweep.cpp9
-rw-r--r--src/share/vm/memory/genOopClosures.hpp96
-rw-r--r--src/share/vm/memory/genOopClosures.inline.hpp69
-rw-r--r--src/share/vm/memory/genRemSet.hpp6
-rw-r--r--src/share/vm/memory/genRemSet.inline.hpp2
-rw-r--r--src/share/vm/memory/generation.cpp4
-rw-r--r--src/share/vm/memory/generation.hpp8
-rw-r--r--src/share/vm/memory/iterator.hpp2
-rw-r--r--src/share/vm/memory/modRefBarrierSet.hpp8
-rw-r--r--src/share/vm/memory/referenceProcessor.cpp356
-rw-r--r--src/share/vm/memory/referenceProcessor.hpp39
-rw-r--r--src/share/vm/memory/restore.cpp2
-rw-r--r--src/share/vm/memory/serialize.cpp7
-rw-r--r--src/share/vm/memory/sharedHeap.cpp6
-rw-r--r--src/share/vm/memory/space.cpp28
-rw-r--r--src/share/vm/memory/space.hpp229
-rw-r--r--src/share/vm/memory/universe.cpp14
-rw-r--r--src/share/vm/memory/universe.hpp9
-rw-r--r--src/share/vm/oops/arrayOop.hpp77
-rw-r--r--src/share/vm/oops/constantPoolKlass.cpp15
-rw-r--r--src/share/vm/oops/constantPoolKlass.hpp10
-rw-r--r--src/share/vm/oops/constantPoolOop.hpp11
-rw-r--r--src/share/vm/oops/cpCacheKlass.cpp17
-rw-r--r--src/share/vm/oops/cpCacheKlass.hpp11
-rw-r--r--src/share/vm/oops/cpCacheOop.cpp1
-rw-r--r--src/share/vm/oops/cpCacheOop.hpp7
-rw-r--r--src/share/vm/oops/instanceKlass.cpp752
-rw-r--r--src/share/vm/oops/instanceKlass.hpp31
-rw-r--r--src/share/vm/oops/instanceKlassKlass.cpp24
-rw-r--r--src/share/vm/oops/instanceOop.hpp21
-rw-r--r--src/share/vm/oops/instanceRefKlass.cpp340
-rw-r--r--src/share/vm/oops/klass.cpp3
-rw-r--r--src/share/vm/oops/klass.hpp1
-rw-r--r--src/share/vm/oops/klassVtable.cpp4
-rw-r--r--src/share/vm/oops/markOop.hpp2
-rw-r--r--src/share/vm/oops/methodDataKlass.cpp6
-rw-r--r--src/share/vm/oops/methodOop.cpp31
-rw-r--r--src/share/vm/oops/objArrayKlass.cpp300
-rw-r--r--src/share/vm/oops/objArrayKlass.hpp10
-rw-r--r--src/share/vm/oops/objArrayOop.cpp10
-rw-r--r--src/share/vm/oops/objArrayOop.hpp67
-rw-r--r--src/share/vm/oops/oop.cpp7
-rw-r--r--src/share/vm/oops/oop.hpp94
-rw-r--r--src/share/vm/oops/oop.inline.hpp292
-rw-r--r--src/share/vm/oops/oop.pcgc.inline.hpp23
-rw-r--r--src/share/vm/oops/oopsHierarchy.hpp34
-rw-r--r--src/share/vm/opto/buildOopMap.cpp20
-rw-r--r--src/share/vm/opto/callnode.hpp3
-rw-r--r--src/share/vm/opto/cfgnode.cpp13
-rw-r--r--src/share/vm/opto/chaitin.cpp1
-rw-r--r--src/share/vm/opto/classes.hpp7
-rw-r--r--src/share/vm/opto/compile.cpp17
-rw-r--r--src/share/vm/opto/connode.cpp39
-rw-r--r--src/share/vm/opto/connode.hpp49
-rw-r--r--src/share/vm/opto/escape.cpp25
-rw-r--r--src/share/vm/opto/graphKit.cpp4
-rw-r--r--src/share/vm/opto/idealKit.cpp4
-rw-r--r--src/share/vm/opto/lcm.cpp2
-rw-r--r--src/share/vm/opto/library_call.cpp57
-rw-r--r--src/share/vm/opto/loopTransform.cpp3
-rw-r--r--src/share/vm/opto/machnode.cpp7
-rw-r--r--src/share/vm/opto/macro.cpp16
-rw-r--r--src/share/vm/opto/macro.hpp4
-rw-r--r--src/share/vm/opto/matcher.cpp88
-rw-r--r--src/share/vm/opto/memnode.cpp79
-rw-r--r--src/share/vm/opto/memnode.hpp45
-rw-r--r--src/share/vm/opto/node.cpp6
-rw-r--r--src/share/vm/opto/node.hpp1
-rw-r--r--src/share/vm/opto/opcodes.cpp1
-rw-r--r--src/share/vm/opto/opcodes.hpp1
-rw-r--r--src/share/vm/opto/parse2.cpp8
-rw-r--r--src/share/vm/opto/parse3.cpp2
-rw-r--r--src/share/vm/opto/phaseX.cpp15
-rw-r--r--src/share/vm/opto/phaseX.hpp8
-rw-r--r--src/share/vm/opto/subnode.cpp69
-rw-r--r--src/share/vm/opto/subnode.hpp10
-rw-r--r--src/share/vm/opto/superword.cpp1
-rw-r--r--src/share/vm/opto/type.cpp224
-rw-r--r--src/share/vm/opto/type.hpp121
-rw-r--r--src/share/vm/prims/jni.cpp5
-rw-r--r--src/share/vm/prims/jvmtiTagMap.cpp20
-rw-r--r--src/share/vm/prims/unsafe.cpp77
-rw-r--r--src/share/vm/runtime/arguments.cpp25
-rw-r--r--src/share/vm/runtime/atomic.cpp12
-rw-r--r--src/share/vm/runtime/atomic.hpp10
-rw-r--r--src/share/vm/runtime/frame.cpp8
-rw-r--r--src/share/vm/runtime/frame.hpp14
-rw-r--r--src/share/vm/runtime/globals.cpp11
-rw-r--r--src/share/vm/runtime/globals.hpp20
-rw-r--r--src/share/vm/runtime/globals_extension.hpp17
-rw-r--r--src/share/vm/runtime/hpi.cpp3
-rw-r--r--src/share/vm/runtime/init.cpp3
-rw-r--r--src/share/vm/runtime/jniHandles.cpp6
-rw-r--r--src/share/vm/runtime/vmStructs.cpp31
-rw-r--r--src/share/vm/services/heapDumper.cpp13
-rw-r--r--src/share/vm/utilities/copy.hpp12
-rw-r--r--src/share/vm/utilities/debug.cpp1
-rw-r--r--src/share/vm/utilities/globalDefinitions.cpp49
-rw-r--r--src/share/vm/utilities/globalDefinitions.hpp37
-rw-r--r--src/share/vm/utilities/taskqueue.hpp26
-rw-r--r--src/share/vm/utilities/vmError.cpp5
167 files changed, 4593 insertions, 2594 deletions
diff --git a/src/share/vm/adlc/archDesc.cpp b/src/share/vm/adlc/archDesc.cpp
index 0e9088e8f..534c62a33 100644
--- a/src/share/vm/adlc/archDesc.cpp
+++ b/src/share/vm/adlc/archDesc.cpp
@@ -867,6 +867,7 @@ const char *ArchDesc::reg_mask(InstructForm &inForm) {
Form *form = (Form*)_globalNames[result];
assert( form, "Result operand must be defined");
OperandForm *oper = form->is_operand();
+ if (oper == NULL) form->dump();
assert( oper, "Result must be an OperandForm");
return reg_mask( *oper );
}
@@ -908,6 +909,7 @@ const char *ArchDesc::getIdealType(const char *idealOp) {
switch( last_char ) {
case 'I': return "TypeInt::INT";
case 'P': return "TypePtr::BOTTOM";
+ case 'N': return "TypeNarrowOop::BOTTOM";
case 'F': return "Type::FLOAT";
case 'D': return "Type::DOUBLE";
case 'L': return "TypeLong::LONG";
@@ -944,7 +946,7 @@ void ArchDesc::initBaseOpTypes() {
// Create InstructForm and assign type for each ideal instruction.
for ( int j = _last_machine_leaf+1; j < _last_opcode; ++j) {
char *ident = (char *)NodeClassNames[j];
- if(!strcmp(ident, "ConI") || !strcmp(ident, "ConP") ||
+ if(!strcmp(ident, "ConI") || !strcmp(ident, "ConP") || !strcmp(ident, "ConN") ||
!strcmp(ident, "ConF") || !strcmp(ident, "ConD") ||
!strcmp(ident, "ConL") || !strcmp(ident, "Con" ) ||
!strcmp(ident, "Bool") ) {
@@ -1109,6 +1111,7 @@ void ArchDesc::buildMustCloneMap(FILE *fp_hpp, FILE *fp_cpp) {
if ( strcmp(idealName,"CmpI") == 0
|| strcmp(idealName,"CmpU") == 0
|| strcmp(idealName,"CmpP") == 0
+ || strcmp(idealName,"CmpN") == 0
|| strcmp(idealName,"CmpL") == 0
|| strcmp(idealName,"CmpD") == 0
|| strcmp(idealName,"CmpF") == 0
diff --git a/src/share/vm/adlc/forms.cpp b/src/share/vm/adlc/forms.cpp
index d51c7d1f9..7bd209385 100644
--- a/src/share/vm/adlc/forms.cpp
+++ b/src/share/vm/adlc/forms.cpp
@@ -211,6 +211,7 @@ Form::DataType Form::ideal_to_const_type(const char *name) const {
if (strcmp(name,"ConI")==0) return Form::idealI;
if (strcmp(name,"ConP")==0) return Form::idealP;
+ if (strcmp(name,"ConN")==0) return Form::idealN;
if (strcmp(name,"ConL")==0) return Form::idealL;
if (strcmp(name,"ConF")==0) return Form::idealF;
if (strcmp(name,"ConD")==0) return Form::idealD;
@@ -256,6 +257,7 @@ Form::DataType Form::is_load_from_memory(const char *opType) const {
if( strcmp(opType,"LoadPLocked")==0 ) return Form::idealP;
if( strcmp(opType,"LoadLLocked")==0 ) return Form::idealL;
if( strcmp(opType,"LoadP")==0 ) return Form::idealP;
+ if( strcmp(opType,"LoadN")==0 ) return Form::idealN;
if( strcmp(opType,"LoadRange")==0 ) return Form::idealI;
if( strcmp(opType,"LoadS")==0 ) return Form::idealS;
if( strcmp(opType,"Load16B")==0 ) return Form::idealB;
@@ -286,6 +288,7 @@ Form::DataType Form::is_store_to_memory(const char *opType) const {
if( strcmp(opType,"StoreI")==0) return Form::idealI;
if( strcmp(opType,"StoreL")==0) return Form::idealL;
if( strcmp(opType,"StoreP")==0) return Form::idealP;
+ if( strcmp(opType,"StoreN")==0) return Form::idealN;
if( strcmp(opType,"Store16B")==0) return Form::idealB;
if( strcmp(opType,"Store8B")==0) return Form::idealB;
if( strcmp(opType,"Store4B")==0) return Form::idealB;
diff --git a/src/share/vm/adlc/forms.hpp b/src/share/vm/adlc/forms.hpp
index e4f823392..dfa344547 100644
--- a/src/share/vm/adlc/forms.hpp
+++ b/src/share/vm/adlc/forms.hpp
@@ -168,7 +168,8 @@ public:
idealD = 5, // Double type
idealB = 6, // Byte type
idealC = 7, // Char type
- idealS = 8 // String type
+ idealS = 8, // String type
+ idealN = 9 // Narrow oop types
};
// Convert ideal name to a DataType, return DataType::none if not a 'ConX'
Form::DataType ideal_to_const_type(const char *ideal_type_name) const;
diff --git a/src/share/vm/adlc/formssel.cpp b/src/share/vm/adlc/formssel.cpp
index a65882497..eab02499c 100644
--- a/src/share/vm/adlc/formssel.cpp
+++ b/src/share/vm/adlc/formssel.cpp
@@ -726,6 +726,9 @@ bool InstructForm::captures_bottom_type() const {
if( _matrule && _matrule->_rChild &&
(!strcmp(_matrule->_rChild->_opType,"CastPP") || // new result type
!strcmp(_matrule->_rChild->_opType,"CastX2P") || // new result type
+ !strcmp(_matrule->_rChild->_opType,"DecodeN") ||
+ !strcmp(_matrule->_rChild->_opType,"EncodeP") ||
+ !strcmp(_matrule->_rChild->_opType,"LoadN") ||
!strcmp(_matrule->_rChild->_opType,"CreateEx") || // type of exception
!strcmp(_matrule->_rChild->_opType,"CheckCastPP")) ) return true;
else if ( is_ideal_load() == Form::idealP ) return true;
@@ -2101,6 +2104,7 @@ bool OperandForm::is_bound_register() const {
if (strcmp(name,"RegF")==0) size = 1;
if (strcmp(name,"RegD")==0) size = 2;
if (strcmp(name,"RegL")==0) size = 2;
+ if (strcmp(name,"RegN")==0) size = 1;
if (strcmp(name,"RegP")==0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1;
if (size == 0) return false;
return size == reg_class->size();
@@ -2365,11 +2369,12 @@ void OperandForm::ext_format(FILE *fp, FormDict &globals, uint index) {
void OperandForm::format_constant(FILE *fp, uint const_index, uint const_type) {
switch(const_type) {
- case Form::idealI: fprintf(fp,"st->print(\"#%%d\", _c%d);\n", const_index); break;
- case Form::idealP: fprintf(fp,"_c%d->dump_on(st);\n", const_index); break;
- case Form::idealL: fprintf(fp,"st->print(\"#%%lld\", _c%d);\n", const_index); break;
- case Form::idealF: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
- case Form::idealD: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
+ case Form::idealI: fprintf(fp,"st->print(\"#%%d\", _c%d);\n", const_index); break;
+ case Form::idealP: fprintf(fp,"_c%d->dump_on(st);\n", const_index); break;
+ case Form::idealN: fprintf(fp,"_c%d->dump_on(st);\n", const_index); break;
+ case Form::idealL: fprintf(fp,"st->print(\"#%%lld\", _c%d);\n", const_index); break;
+ case Form::idealF: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
+ case Form::idealD: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
default:
assert( false, "ShouldNotReachHere()");
}
@@ -3300,9 +3305,9 @@ void MatchNode::output(FILE *fp) {
int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
static const char *needs_ideal_memory_list[] = {
- "StoreI","StoreL","StoreP","StoreD","StoreF" ,
+ "StoreI","StoreL","StoreP","StoreN","StoreD","StoreF" ,
"StoreB","StoreC","Store" ,"StoreFP",
- "LoadI" ,"LoadL", "LoadP" ,"LoadD" ,"LoadF" ,
+ "LoadI" ,"LoadL", "LoadP" ,"LoadN", "LoadD" ,"LoadF" ,
"LoadB" ,"LoadC" ,"LoadS" ,"Load" ,
"Store4I","Store2I","Store2L","Store2D","Store4F","Store2F","Store16B",
"Store8B","Store4B","Store8C","Store4C","Store2C",
@@ -3311,7 +3316,7 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
"LoadRange", "LoadKlass", "LoadL_unaligned", "LoadD_unaligned",
"LoadPLocked", "LoadLLocked",
"StorePConditional", "StoreLConditional",
- "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP",
+ "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN",
"StoreCM",
"ClearArray"
};
@@ -3712,6 +3717,7 @@ bool MatchRule::is_base_register(FormDict &globals) const {
if( base_operand(position, globals, result, name, opType) &&
(strcmp(opType,"RegI")==0 ||
strcmp(opType,"RegP")==0 ||
+ strcmp(opType,"RegN")==0 ||
strcmp(opType,"RegL")==0 ||
strcmp(opType,"RegF")==0 ||
strcmp(opType,"RegD")==0 ||
diff --git a/src/share/vm/adlc/output_c.cpp b/src/share/vm/adlc/output_c.cpp
index 037e129d1..168b93217 100644
--- a/src/share/vm/adlc/output_c.cpp
+++ b/src/share/vm/adlc/output_c.cpp
@@ -1546,6 +1546,18 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
// Build a mapping from operand index to input edges
fprintf(fp," unsigned idx0 = oper_input_base();\n");
+
+ // The order in which inputs are added to a node is very
+ // strange. Store nodes get a memory input before Expand is
+ // called and all other nodes get it afterwards so
+ // oper_input_base is wrong during expansion. This code adjusts
+ // is so that expansion will work correctly.
+ bool missing_memory_edge = node->_matrule->needs_ideal_memory_edge(_globalNames) &&
+ node->is_ideal_store() == Form::none;
+ if (missing_memory_edge) {
+ fprintf(fp," idx0--; // Adjust base because memory edge hasn't been inserted yet\n");
+ }
+
for( i = 0; i < node->num_opnds(); i++ ) {
fprintf(fp," unsigned idx%d = idx%d + num%d;\n",
i+1,i,i);
@@ -1600,8 +1612,10 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
int node_mem_op = node->memory_operand(_globalNames);
assert( node_mem_op != InstructForm::NO_MEMORY_OPERAND,
"expand rule member needs memory but top-level inst doesn't have any" );
- // Copy memory edge
- fprintf(fp," n%d->add_req(_in[1]);\t// Add memory edge\n", cnt);
+ if (!missing_memory_edge) {
+ // Copy memory edge
+ fprintf(fp," n%d->add_req(_in[1]);\t// Add memory edge\n", cnt);
+ }
}
// Iterate over the new instruction's operands
@@ -2363,6 +2377,8 @@ void ArchDesc::defineSize(FILE *fp, InstructForm &inst) {
fprintf(fp,"uint %sNode::size(PhaseRegAlloc *ra_) const {\n",
inst._ident);
+ fprintf(fp, " assert(VerifyOops || MachNode::size(ra_) <= %s, \"bad fixed size\");\n", inst._size);
+
//(2)
// Print the size
fprintf(fp, " return (VerifyOops ? MachNode::size(ra_) : %s);\n", inst._size);
@@ -3426,6 +3442,8 @@ static void path_to_constant(FILE *fp, FormDict &globals,
fprintf(fp, "_leaf->get_int()");
} else if ( (strcmp(optype,"ConP") == 0) ) {
fprintf(fp, "_leaf->bottom_type()->is_ptr()");
+ } else if ( (strcmp(optype,"ConN") == 0) ) {
+ fprintf(fp, "_leaf->bottom_type()->is_narrowoop()");
} else if ( (strcmp(optype,"ConF") == 0) ) {
fprintf(fp, "_leaf->getf()");
} else if ( (strcmp(optype,"ConD") == 0) ) {
diff --git a/src/share/vm/adlc/output_h.cpp b/src/share/vm/adlc/output_h.cpp
index 5429e57d9..0640f0a32 100644
--- a/src/share/vm/adlc/output_h.cpp
+++ b/src/share/vm/adlc/output_h.cpp
@@ -203,6 +203,10 @@ static void declareConstStorage(FILE *fp, FormDict &globals, OperandForm *oper)
if (i > 0) fprintf(fp,", ");
fprintf(fp," const TypePtr *_c%d;\n", i);
}
+ else if (!strcmp(type, "ConN")) {
+ if (i > 0) fprintf(fp,", ");
+ fprintf(fp," const TypeNarrowOop *_c%d;\n", i);
+ }
else if (!strcmp(type, "ConL")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp," jlong _c%d;\n", i);
@@ -235,6 +239,10 @@ static void declareConstStorage(FILE *fp, FormDict &globals, OperandForm *oper)
fprintf(fp," const TypePtr *_c%d;\n", i);
i++;
}
+ else if (!strcmp(comp->base_type(globals), "ConN")) {
+ fprintf(fp," const TypePtr *_c%d;\n", i);
+ i++;
+ }
else if (!strcmp(comp->base_type(globals), "ConL")) {
fprintf(fp," jlong _c%d;\n", i);
i++;
@@ -280,6 +288,7 @@ static void defineConstructor(FILE *fp, const char *name, uint num_consts,
fprintf(fp,is_ideal_bool ? "BoolTest::mask c%d" : "int32 c%d", i);
break;
}
+ case Form::idealN : { fprintf(fp,"const TypeNarrowOop *c%d", i); break; }
case Form::idealP : { fprintf(fp,"const TypePtr *c%d", i); break; }
case Form::idealL : { fprintf(fp,"jlong c%d", i); break; }
case Form::idealF : { fprintf(fp,"jfloat c%d", i); break; }
@@ -302,6 +311,11 @@ static void defineConstructor(FILE *fp, const char *name, uint num_consts,
fprintf(fp,"const TypePtr *c%d", i);
i++;
}
+ else if (!strcmp(comp->base_type(globals), "ConN")) {
+ if (i > 0) fprintf(fp,", ");
+ fprintf(fp,"const TypePtr *c%d", i);
+ i++;
+ }
else if (!strcmp(comp->base_type(globals), "ConL")) {
if (i > 0) fprintf(fp,", ");
fprintf(fp,"jlong c%d", i);
@@ -360,6 +374,10 @@ static uint dump_spec_constant(FILE *fp, const char *ideal_type, uint i) {
fprintf(fp," _c%d->dump_on(st);\n", i);
++i;
}
+ else if (!strcmp(ideal_type, "ConN")) {
+ fprintf(fp," _c%d->dump();\n", i);
+ ++i;
+ }
else if (!strcmp(ideal_type, "ConL")) {
fprintf(fp," st->print(\"#\" INT64_FORMAT, _c%d);\n", i);
++i;
@@ -417,8 +435,13 @@ void gen_oper_format(FILE *fp, FormDict &globals, OperandForm &oper, bool for_c_
// Replacement variable
const char *rep_var = oper._format->_rep_vars.iter();
// Check that it is a local name, and an operand
- OperandForm *op = oper._localNames[rep_var]->is_operand();
- assert( op, "replacement variable was not found in local names");
+ const Form* form = oper._localNames[rep_var];
+ if (form == NULL) {
+ globalAD->syntax_err(oper._linenum,
+ "\'%s\' not found in format for %s\n", rep_var, oper._ident);
+ assert(form, "replacement variable was not found in local names");
+ }
+ OperandForm *op = form->is_operand();
// Get index if register or constant
if ( op->_matrule && op->_matrule->is_base_register(globals) ) {
idx = oper.register_position( globals, rep_var);
@@ -483,9 +506,14 @@ void gen_oper_format(FILE *fp, FormDict &globals, OperandForm &oper, bool for_c_
} else {
// Replacement variable
const char *rep_var = oper._format->_rep_vars.iter();
- // Check that it is a local name, and an operand
- OperandForm *op = oper._localNames[rep_var]->is_operand();
- assert( op, "replacement variable was not found in local names");
+ // Check that it is a local name, and an operand
+ const Form* form = oper._localNames[rep_var];
+ if (form == NULL) {
+ globalAD->syntax_err(oper._linenum,
+ "\'%s\' not found in format for %s\n", rep_var, oper._ident);
+ assert(form, "replacement variable was not found in local names");
+ }
+ OperandForm *op = form->is_operand();
// Get index if register or constant
if ( op->_matrule && op->_matrule->is_base_register(globals) ) {
idx = oper.register_position( globals, rep_var);
@@ -1163,7 +1191,7 @@ void ArchDesc::declareClasses(FILE *fp) {
if( type != NULL ) {
Form::DataType data_type = oper->is_base_constant(_globalNames);
// Check if we are an ideal pointer type
- if( data_type == Form::idealP ) {
+ if( data_type == Form::idealP || data_type == Form::idealN ) {
// Return the ideal type we already have: <TypePtr *>
fprintf(fp," return _c0;");
} else {
@@ -1291,6 +1319,16 @@ void ArchDesc::declareClasses(FILE *fp) {
fprintf(fp, " return _c0->isa_oop_ptr();");
fprintf(fp, " }\n");
}
+ else if (!strcmp(oper->ideal_type(_globalNames), "ConN")) {
+ // Access the locally stored constant
+ fprintf(fp," virtual intptr_t constant() const {");
+ fprintf(fp, " return _c0->make_oopptr()->get_con();");
+ fprintf(fp, " }\n");
+ // Generate query to determine if this pointer is an oop
+ fprintf(fp," virtual bool constant_is_oop() const {");
+ fprintf(fp, " return _c0->make_oopptr()->isa_oop_ptr();");
+ fprintf(fp, " }\n");
+ }
else if (!strcmp(oper->ideal_type(_globalNames), "ConL")) {
fprintf(fp," virtual intptr_t constant() const {");
// We don't support addressing modes with > 4Gig offsets.
@@ -1748,6 +1786,7 @@ void ArchDesc::declareClasses(FILE *fp) {
fprintf(fp," return TypeInt::make(opnd_array(1)->constant());\n");
break;
case Form::idealP:
+ case Form::idealN:
fprintf(fp," return opnd_array(1)->type();\n",result);
break;
case Form::idealD:
diff --git a/src/share/vm/asm/codeBuffer.cpp b/src/share/vm/asm/codeBuffer.cpp
index 0fc53ed45..cd88a2ab1 100644
--- a/src/share/vm/asm/codeBuffer.cpp
+++ b/src/share/vm/asm/codeBuffer.cpp
@@ -281,8 +281,10 @@ address CodeSection::target(Label& L, address branch_pc) {
// Need to return a pc, doesn't matter what it is since it will be
// replaced during resolution later.
- // (Don't return NULL or badAddress, since branches shouldn't overflow.)
- return base;
+ // Don't return NULL or badAddress, since branches shouldn't overflow.
+ // Don't return base either because that could overflow displacements
+ // for shorter branches. It will get checked when bound.
+ return branch_pc;
}
}
diff --git a/src/share/vm/c1/c1_Runtime1.cpp b/src/share/vm/c1/c1_Runtime1.cpp
index 8fe439557..8f42fe143 100644
--- a/src/share/vm/c1/c1_Runtime1.cpp
+++ b/src/share/vm/c1/c1_Runtime1.cpp
@@ -1074,6 +1074,43 @@ JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
JRT_END
+// Array copy return codes.
+enum {
+ ac_failed = -1, // arraycopy failed
+ ac_ok = 0 // arraycopy succeeded
+};
+
+
+template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
+ oopDesc* dst, T* dst_addr,
+ int length) {
+
+ // For performance reasons, we assume we are using a card marking write
+ // barrier. The assert will fail if this is not the case.
+ // Note that we use the non-virtual inlineable variant of write_ref_array.
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ assert(bs->has_write_ref_array_opt(),
+ "Barrier set must have ref array opt");
+ if (src == dst) {
+ // same object, no check
+ Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
+ bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
+ (HeapWord*)(dst_addr + length)));
+ return ac_ok;
+ } else {
+ klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
+ klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
+ if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
+ // Elements are guaranteed to be subtypes, so no check necessary
+ Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
+ bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
+ (HeapWord*)(dst_addr + length)));
+ return ac_ok;
+ }
+ }
+ return ac_failed;
+}
+
// fast and direct copy of arrays; returning -1, means that an exception may be thrown
// and we did not copy anything
JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))
@@ -1081,11 +1118,6 @@ JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int d
_generic_arraycopy_cnt++; // Slow-path oop array copy
#endif
- enum {
- ac_failed = -1, // arraycopy failed
- ac_ok = 0 // arraycopy succeeded
- };
-
if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed;
if (!dst->is_array() || !src->is_array()) return ac_failed;
if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
@@ -1105,30 +1137,14 @@ JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int d
memmove(dst_addr, src_addr, length << l2es);
return ac_ok;
} else if (src->is_objArray() && dst->is_objArray()) {
- oop* src_addr = objArrayOop(src)->obj_at_addr(src_pos);
- oop* dst_addr = objArrayOop(dst)->obj_at_addr(dst_pos);
- // For performance reasons, we assume we are using a card marking write
- // barrier. The assert will fail if this is not the case.
- // Note that we use the non-virtual inlineable variant of write_ref_array.
- BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->has_write_ref_array_opt(),
- "Barrier set must have ref array opt");
- if (src == dst) {
- // same object, no check
- Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
- bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
- (HeapWord*)(dst_addr + length)));
- return ac_ok;
+ if (UseCompressedOops) { // will need for tiered
+ narrowOop *src_addr = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos);
+ narrowOop *dst_addr = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos);
+ return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
} else {
- klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
- klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
- if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
- // Elements are guaranteed to be subtypes, so no check necessary
- Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
- bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
- (HeapWord*)(dst_addr + length)));
- return ac_ok;
- }
+ oop *src_addr = objArrayOop(src)->obj_at_addr<oop>(src_pos);
+ oop *dst_addr = objArrayOop(dst)->obj_at_addr<oop>(dst_pos);
+ return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
}
}
return ac_failed;
diff --git a/src/share/vm/ci/ciInstanceKlass.cpp b/src/share/vm/ci/ciInstanceKlass.cpp
index 9710b8dd4..ab9b059f0 100644
--- a/src/share/vm/ci/ciInstanceKlass.cpp
+++ b/src/share/vm/ci/ciInstanceKlass.cpp
@@ -48,6 +48,7 @@ ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) :
// Next line must follow and use the result of the previous line:
_is_linked = _is_initialized || ik->is_linked();
_nonstatic_field_size = ik->nonstatic_field_size();
+ _has_nonstatic_fields = ik->has_nonstatic_fields();
_nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
_nof_implementors = ik->nof_implementors();
@@ -93,6 +94,7 @@ ciInstanceKlass::ciInstanceKlass(ciSymbol* name,
_is_initialized = false;
_is_linked = false;
_nonstatic_field_size = -1;
+ _has_nonstatic_fields = false;
_nonstatic_fields = NULL;
_nof_implementors = -1;
_loader = loader;
@@ -201,7 +203,7 @@ ciInstanceKlass* ciInstanceKlass::get_canonical_holder(int offset) {
assert(offset >= 0 && offset < layout_helper(), "offset must be tame");
#endif
- if (offset < (instanceOopDesc::header_size() * wordSize)) {
+ if (offset < instanceOopDesc::base_offset_in_bytes()) {
// All header offsets belong properly to java/lang/Object.
return CURRENT_ENV->Object_klass();
}
@@ -210,7 +212,8 @@ ciInstanceKlass* ciInstanceKlass::get_canonical_holder(int offset) {
for (;;) {
assert(self->is_loaded(), "must be loaded to have size");
ciInstanceKlass* super = self->super();
- if (super == NULL || !super->contains_field_offset(offset)) {
+ if (super == NULL || super->nof_nonstatic_fields() == 0 ||
+ !super->contains_field_offset(offset)) {
return self;
} else {
self = super; // return super->get_canonical_holder(offset)
@@ -381,31 +384,28 @@ int ciInstanceKlass::compute_nonstatic_fields() {
if (_nonstatic_fields != NULL)
return _nonstatic_fields->length();
- // Size in bytes of my fields, including inherited fields.
- // About equal to size_helper() - sizeof(oopDesc).
- int fsize = nonstatic_field_size() * wordSize;
- if (fsize == 0) { // easy shortcut
+ if (!has_nonstatic_fields()) {
Arena* arena = CURRENT_ENV->arena();
_nonstatic_fields = new (arena) GrowableArray<ciField*>(arena, 0, 0, NULL);
return 0;
}
assert(!is_java_lang_Object(), "bootstrap OK");
+ // Size in bytes of my fields, including inherited fields.
+ int fsize = nonstatic_field_size() * wordSize;
+
ciInstanceKlass* super = this->super();
- int super_fsize = 0;
- int super_flen = 0;
GrowableArray<ciField*>* super_fields = NULL;
- if (super != NULL) {
- super_fsize = super->nonstatic_field_size() * wordSize;
- super_flen = super->nof_nonstatic_fields();
+ if (super != NULL && super->has_nonstatic_fields()) {
+ int super_fsize = super->nonstatic_field_size() * wordSize;
+ int super_flen = super->nof_nonstatic_fields();
super_fields = super->_nonstatic_fields;
assert(super_flen == 0 || super_fields != NULL, "first get nof_fields");
- }
-
- // See if I am no larger than my super; if so, I can use his fields.
- if (fsize == super_fsize) {
- _nonstatic_fields = super_fields;
- return super_fields->length();
+ // See if I am no larger than my super; if so, I can use his fields.
+ if (fsize == super_fsize) {
+ _nonstatic_fields = super_fields;
+ return super_fields->length();
+ }
}
GrowableArray<ciField*>* fields = NULL;
@@ -425,11 +425,11 @@ int ciInstanceKlass::compute_nonstatic_fields() {
// (In principle, they could mix with superclass fields.)
fields->sort(sort_field_by_offset);
#ifdef ASSERT
- int last_offset = sizeof(oopDesc);
+ int last_offset = instanceOopDesc::base_offset_in_bytes();
for (int i = 0; i < fields->length(); i++) {
ciField* field = fields->at(i);
int offset = field->offset_in_bytes();
- int size = (field->_type == NULL) ? oopSize : field->size_in_bytes();
+ int size = (field->_type == NULL) ? heapOopSize : field->size_in_bytes();
assert(last_offset <= offset, "no field overlap");
if (last_offset > (int)sizeof(oopDesc))
assert((offset - last_offset) < BytesPerLong, "no big holes");
diff --git a/src/share/vm/ci/ciInstanceKlass.hpp b/src/share/vm/ci/ciInstanceKlass.hpp
index d52818fec..a843a9251 100644
--- a/src/share/vm/ci/ciInstanceKlass.hpp
+++ b/src/share/vm/ci/ciInstanceKlass.hpp
@@ -35,15 +35,16 @@ class ciInstanceKlass : public ciKlass {
friend class ciBytecodeStream;
private:
- bool _is_shared;
-
jobject _loader;
jobject _protection_domain;
+ bool _is_shared;
bool _is_initialized;
bool _is_linked;
bool _has_finalizer;
bool _has_subklass;
+ bool _has_nonstatic_fields;
+
ciFlags _flags;
jint _nonstatic_field_size;
jint _nonstatic_oop_map_size;
@@ -132,6 +133,9 @@ public:
jint nonstatic_field_size() {
assert(is_loaded(), "must be loaded");
return _nonstatic_field_size; }
+ jint has_nonstatic_fields() {
+ assert(is_loaded(), "must be loaded");
+ return _has_nonstatic_fields; }
jint nonstatic_oop_map_size() {
assert(is_loaded(), "must be loaded");
return _nonstatic_oop_map_size; }
@@ -164,8 +168,7 @@ public:
bool has_finalizable_subclass();
bool contains_field_offset(int offset) {
- return (offset/wordSize) >= instanceOopDesc::header_size()
- && (offset/wordSize)-instanceOopDesc::header_size() < nonstatic_field_size();
+ return instanceOopDesc::contains_field_offset(offset, nonstatic_field_size());
}
// Get the instance of java.lang.Class corresponding to
diff --git a/src/share/vm/ci/ciObjectFactory.cpp b/src/share/vm/ci/ciObjectFactory.cpp
index 52612b55b..649b73543 100644
--- a/src/share/vm/ci/ciObjectFactory.cpp
+++ b/src/share/vm/ci/ciObjectFactory.cpp
@@ -121,7 +121,7 @@ void ciObjectFactory::init_shared_objects() {
for (int i = T_BOOLEAN; i <= T_CONFLICT; i++) {
BasicType t = (BasicType)i;
- if (type2name(t) != NULL && t != T_OBJECT && t != T_ARRAY) {
+ if (type2name(t) != NULL && t != T_OBJECT && t != T_ARRAY && t != T_NARROWOOP) {
ciType::_basic_types[t] = new (_arena) ciType(t);
init_ident_of(ciType::_basic_types[t]);
}
diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile/classFileParser.cpp
index 73ab038a7..752ccdfec 100644
--- a/src/share/vm/classfile/classFileParser.cpp
+++ b/src/share/vm/classfile/classFileParser.cpp
@@ -2341,7 +2341,7 @@ void ClassFileParser::java_lang_Class_fix_post(int* next_nonstatic_oop_offset_pt
// Incrementing next_nonstatic_oop_offset here advances the
// location where the real java fields are placed.
const int extra = java_lang_Class::number_of_fake_oop_fields;
- (*next_nonstatic_oop_offset_ptr) += (extra * wordSize);
+ (*next_nonstatic_oop_offset_ptr) += (extra * heapOopSize);
}
@@ -2647,7 +2647,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
align_object_offset(vtable_size) +
align_object_offset(itable_size)) * wordSize;
next_static_double_offset = next_static_oop_offset +
- (fac.static_oop_count * oopSize);
+ (fac.static_oop_count * heapOopSize);
if ( fac.static_double_count &&
(Universe::field_type_should_be_aligned(T_DOUBLE) ||
Universe::field_type_should_be_aligned(T_LONG)) ) {
@@ -2687,6 +2687,14 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
int nonstatic_byte_count = fac.nonstatic_byte_count;
int nonstatic_oop_count = fac.nonstatic_oop_count;
+ bool super_has_nonstatic_fields =
+ (super_klass() != NULL && super_klass->has_nonstatic_fields());
+ bool has_nonstatic_fields = super_has_nonstatic_fields ||
+ ((nonstatic_double_count + nonstatic_word_count +
+ nonstatic_short_count + nonstatic_byte_count +
+ nonstatic_oop_count) != 0);
+
+
// Prepare list of oops for oop maps generation.
u2* nonstatic_oop_offsets;
u2* nonstatic_oop_length;
@@ -2703,7 +2711,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
java_lang_Class_fix_post(&next_nonstatic_field_offset);
nonstatic_oop_offsets[0] = (u2)first_nonstatic_field_offset;
int fake_oop_count = (( next_nonstatic_field_offset -
- first_nonstatic_field_offset ) / oopSize);
+ first_nonstatic_field_offset ) / heapOopSize);
nonstatic_oop_length [0] = (u2)fake_oop_count;
nonstatic_oop_map_count = 1;
nonstatic_oop_count -= fake_oop_count;
@@ -2715,7 +2723,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
#ifndef PRODUCT
if( PrintCompactFieldsSavings ) {
next_nonstatic_double_offset = next_nonstatic_field_offset +
- (nonstatic_oop_count * oopSize);
+ (nonstatic_oop_count * heapOopSize);
if ( nonstatic_double_count > 0 ) {
next_nonstatic_double_offset = align_size_up(next_nonstatic_double_offset, BytesPerLong);
}
@@ -2749,7 +2757,15 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
class_name() == vmSymbols::java_lang_ref_SoftReference() ||
class_name() == vmSymbols::java_lang_StackTraceElement() ||
class_name() == vmSymbols::java_lang_String() ||
- class_name() == vmSymbols::java_lang_Throwable()) ) {
+ class_name() == vmSymbols::java_lang_Throwable() ||
+ class_name() == vmSymbols::java_lang_Boolean() ||
+ class_name() == vmSymbols::java_lang_Character() ||
+ class_name() == vmSymbols::java_lang_Float() ||
+ class_name() == vmSymbols::java_lang_Double() ||
+ class_name() == vmSymbols::java_lang_Byte() ||
+ class_name() == vmSymbols::java_lang_Short() ||
+ class_name() == vmSymbols::java_lang_Integer() ||
+ class_name() == vmSymbols::java_lang_Long())) {
allocation_style = 0; // Allocate oops first
compact_fields = false; // Don't compact fields
}
@@ -2758,7 +2774,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
// Fields order: oops, longs/doubles, ints, shorts/chars, bytes
next_nonstatic_oop_offset = next_nonstatic_field_offset;
next_nonstatic_double_offset = next_nonstatic_oop_offset +
- (nonstatic_oop_count * oopSize);
+ (nonstatic_oop_count * heapOopSize);
} else if( allocation_style == 1 ) {
// Fields order: longs/doubles, ints, shorts/chars, bytes, oops
next_nonstatic_double_offset = next_nonstatic_field_offset;
@@ -2775,8 +2791,18 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
int nonstatic_short_space_offset;
int nonstatic_byte_space_offset;
- if( nonstatic_double_count > 0 ) {
- int offset = next_nonstatic_double_offset;
+ bool compact_into_header = (UseCompressedOops &&
+ allocation_style == 1 && compact_fields &&
+ !super_has_nonstatic_fields);
+
+ if( compact_into_header || nonstatic_double_count > 0 ) {
+ int offset;
+ // Pack something in with the header if no super klass has done so.
+ if (compact_into_header) {
+ offset = oopDesc::klass_gap_offset_in_bytes();
+ } else {
+ offset = next_nonstatic_double_offset;
+ }
next_nonstatic_double_offset = align_size_up(offset, BytesPerLong);
if( compact_fields && offset != next_nonstatic_double_offset ) {
// Allocate available fields into the gap before double field.
@@ -2804,12 +2830,13 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
}
// Allocate oop field in the gap if there are no other fields for that.
nonstatic_oop_space_offset = offset;
- if( length >= oopSize && nonstatic_oop_count > 0 &&
+ if(!compact_into_header && length >= heapOopSize &&
+ nonstatic_oop_count > 0 &&
allocation_style != 0 ) { // when oop fields not first
nonstatic_oop_count -= 1;
nonstatic_oop_space_count = 1; // Only one will fit
- length -= oopSize;
- offset += oopSize;
+ length -= heapOopSize;
+ offset += heapOopSize;
}
}
}
@@ -2828,9 +2855,9 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
next_nonstatic_oop_offset = next_nonstatic_byte_offset + nonstatic_byte_count;
if( nonstatic_oop_count > 0 ) {
notaligned_offset = next_nonstatic_oop_offset;
- next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, oopSize);
+ next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, heapOopSize);
}
- notaligned_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * oopSize);
+ notaligned_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
}
next_nonstatic_type_offset = align_size_up(notaligned_offset, wordSize );
nonstatic_field_size = nonstatic_field_size + ((next_nonstatic_type_offset
@@ -2846,7 +2873,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
switch (atype) {
case STATIC_OOP:
real_offset = next_static_oop_offset;
- next_static_oop_offset += oopSize;
+ next_static_oop_offset += heapOopSize;
break;
case STATIC_BYTE:
real_offset = next_static_byte_offset;
@@ -2868,16 +2895,16 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
case NONSTATIC_OOP:
if( nonstatic_oop_space_count > 0 ) {
real_offset = nonstatic_oop_space_offset;
- nonstatic_oop_space_offset += oopSize;
+ nonstatic_oop_space_offset += heapOopSize;
nonstatic_oop_space_count -= 1;
} else {
real_offset = next_nonstatic_oop_offset;
- next_nonstatic_oop_offset += oopSize;
+ next_nonstatic_oop_offset += heapOopSize;
}
// Update oop maps
if( nonstatic_oop_map_count > 0 &&
nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
- (u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * oopSize) ) {
+ (u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * heapOopSize) ) {
// Extend current oop map
nonstatic_oop_length[nonstatic_oop_map_count - 1] += 1;
} else {
@@ -2970,6 +2997,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
//this_klass->set_super(super_klass());
this_klass->set_class_loader(class_loader());
this_klass->set_nonstatic_field_size(nonstatic_field_size);
+ this_klass->set_has_nonstatic_fields(has_nonstatic_fields);
this_klass->set_static_oop_field_size(fac.static_oop_count);
cp->set_pool_holder(this_klass());
this_klass->set_constants(cp());
@@ -3128,7 +3156,7 @@ int ClassFileParser::compute_oop_map_size(instanceKlassHandle super, int nonstat
OopMapBlock* first_map = super->start_of_nonstatic_oop_maps();
OopMapBlock* last_map = first_map + map_size - 1;
- int next_offset = last_map->offset() + (last_map->length() * oopSize);
+ int next_offset = last_map->offset() + (last_map->length() * heapOopSize);
if (next_offset == first_nonstatic_oop_offset) {
// There is no gap bettwen superklass's last oop field and first
// local oop field, merge maps.
diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/javaClasses.cpp
index 19ed2e419..e8190d501 100644
--- a/src/share/vm/classfile/javaClasses.cpp
+++ b/src/share/vm/classfile/javaClasses.cpp
@@ -520,16 +520,12 @@ void java_lang_Thread::compute_offsets() {
JavaThread* java_lang_Thread::thread(oop java_thread) {
- return (JavaThread*) java_thread->obj_field(_eetop_offset);
+ return (JavaThread*)java_thread->address_field(_eetop_offset);
}
void java_lang_Thread::set_thread(oop java_thread, JavaThread* thread) {
- // We are storing a JavaThread* (malloc'ed data) into a long field in the thread
- // object. The store has to be 64-bit wide so we use a pointer store, but we
- // cannot call oopDesc::obj_field_put since it includes a write barrier!
- oop* addr = java_thread->obj_field_addr(_eetop_offset);
- *addr = (oop) thread;
+ java_thread->address_field_put(_eetop_offset, (address)thread);
}
@@ -1038,8 +1034,8 @@ class BacktraceBuilder: public StackObj {
if (_dirty && _methods != NULL) {
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
- bs->write_ref_array(MemRegion((HeapWord*)_methods->obj_at_addr(0),
- _methods->length() * HeapWordsPerOop));
+ bs->write_ref_array(MemRegion((HeapWord*)_methods->base(),
+ _methods->array_size()));
_dirty = false;
}
}
@@ -1083,8 +1079,9 @@ class BacktraceBuilder: public StackObj {
method = mhandle();
}
- // _methods->obj_at_put(_index, method);
- *_methods->obj_at_addr(_index) = method;
+ _methods->obj_at_put(_index, method);
+ // bad for UseCompressedOops
+ // *_methods->obj_at_addr(_index) = method;
_bcis->ushort_at_put(_index, bci);
_index++;
_dirty = true;
@@ -1973,39 +1970,30 @@ BasicType java_lang_boxing_object::set_value(oop box, jvalue* value) {
// Support for java_lang_ref_Reference
-
-void java_lang_ref_Reference::set_referent(oop ref, oop value) {
- ref->obj_field_put(referent_offset, value);
-}
-
-oop* java_lang_ref_Reference::referent_addr(oop ref) {
- return ref->obj_field_addr(referent_offset);
-}
-
-void java_lang_ref_Reference::set_next(oop ref, oop value) {
- ref->obj_field_put(next_offset, value);
-}
-
-oop* java_lang_ref_Reference::next_addr(oop ref) {
- return ref->obj_field_addr(next_offset);
-}
-
-void java_lang_ref_Reference::set_discovered(oop ref, oop value) {
- ref->obj_field_put(discovered_offset, value);
-}
-
-oop* java_lang_ref_Reference::discovered_addr(oop ref) {
- return ref->obj_field_addr(discovered_offset);
+oop java_lang_ref_Reference::pending_list_lock() {
+ instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
+ char *addr = (((char *)ik->start_of_static_fields()) + static_lock_offset);
+ if (UseCompressedOops) {
+ return oopDesc::load_decode_heap_oop((narrowOop *)addr);
+ } else {
+ return oopDesc::load_decode_heap_oop((oop*)addr);
+ }
}
-oop* java_lang_ref_Reference::pending_list_lock_addr() {
+HeapWord *java_lang_ref_Reference::pending_list_addr() {
instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
- return (oop*)(((char *)ik->start_of_static_fields()) + static_lock_offset);
+ char *addr = (((char *)ik->start_of_static_fields()) + static_pending_offset);
+ // XXX This might not be HeapWord aligned, almost rather be char *.
+ return (HeapWord*)addr;
}
-oop* java_lang_ref_Reference::pending_list_addr() {
- instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
- return (oop *)(((char *)ik->start_of_static_fields()) + static_pending_offset);
+oop java_lang_ref_Reference::pending_list() {
+ char *addr = (char *)pending_list_addr();
+ if (UseCompressedOops) {
+ return oopDesc::load_decode_heap_oop((narrowOop *)addr);
+ } else {
+ return oopDesc::load_decode_heap_oop((oop*)addr);
+ }
}
@@ -2291,8 +2279,11 @@ oop java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(
// Invoked before SystemDictionary::initialize, so pre-loaded classes
// are not available to determine the offset_of_static_fields.
void JavaClasses::compute_hard_coded_offsets() {
- const int x = wordSize;
- const int header = instanceOopDesc::header_size_in_bytes();
+ const int x = heapOopSize;
+ // Objects don't get allocated in the gap in the header with compressed oops
+ // for these special classes because hard coded offsets can't be conditional
+ // so base_offset_in_bytes() is wrong here, allocate after the header.
+ const int header = sizeof(instanceOopDesc);
// Do the String Class
java_lang_String::value_offset = java_lang_String::hc_value_offset * x + header;
diff --git a/src/share/vm/classfile/javaClasses.hpp b/src/share/vm/classfile/javaClasses.hpp
index 8ac7cf883..32e278b06 100644
--- a/src/share/vm/classfile/javaClasses.hpp
+++ b/src/share/vm/classfile/javaClasses.hpp
@@ -691,24 +691,47 @@ class java_lang_ref_Reference: AllStatic {
static int number_of_fake_oop_fields;
// Accessors
- static oop referent(oop ref) { return *referent_addr(ref); }
- static void set_referent(oop ref, oop value);
- static oop* referent_addr(oop ref);
-
- static oop next(oop ref) { return *next_addr(ref); }
- static void set_next(oop ref, oop value);
- static oop* next_addr(oop ref);
-
- static oop discovered(oop ref) { return *discovered_addr(ref); }
- static void set_discovered(oop ref, oop value);
- static oop* discovered_addr(oop ref);
-
+ static oop referent(oop ref) {
+ return ref->obj_field(referent_offset);
+ }
+ static void set_referent(oop ref, oop value) {
+ ref->obj_field_put(referent_offset, value);
+ }
+ static void set_referent_raw(oop ref, oop value) {
+ ref->obj_field_raw_put(referent_offset, value);
+ }
+ static HeapWord* referent_addr(oop ref) {
+ return ref->obj_field_addr<HeapWord>(referent_offset);
+ }
+ static oop next(oop ref) {
+ return ref->obj_field(next_offset);
+ }
+ static void set_next(oop ref, oop value) {
+ ref->obj_field_put(next_offset, value);
+ }
+ static void set_next_raw(oop ref, oop value) {
+ ref->obj_field_raw_put(next_offset, value);
+ }
+ static HeapWord* next_addr(oop ref) {
+ return ref->obj_field_addr<HeapWord>(next_offset);
+ }
+ static oop discovered(oop ref) {
+ return ref->obj_field(discovered_offset);
+ }
+ static void set_discovered(oop ref, oop value) {
+ ref->obj_field_put(discovered_offset, value);
+ }
+ static void set_discovered_raw(oop ref, oop value) {
+ ref->obj_field_raw_put(discovered_offset, value);
+ }
+ static HeapWord* discovered_addr(oop ref) {
+ return ref->obj_field_addr<HeapWord>(discovered_offset);
+ }
// Accessors for statics
- static oop pending_list_lock() { return *pending_list_lock_addr(); }
- static oop pending_list() { return *pending_list_addr(); }
+ static oop pending_list_lock();
+ static oop pending_list();
- static oop* pending_list_lock_addr();
- static oop* pending_list_addr();
+ static HeapWord* pending_list_addr();
};
diff --git a/src/share/vm/compiler/oopMap.cpp b/src/share/vm/compiler/oopMap.cpp
index 9f5a1ada4..2984d647e 100644
--- a/src/share/vm/compiler/oopMap.cpp
+++ b/src/share/vm/compiler/oopMap.cpp
@@ -169,11 +169,8 @@ void OopMap::set_value(VMReg reg) {
}
-void OopMap::set_dead(VMReg reg) {
- // At this time, we only need dead entries in our OopMap when ZapDeadCompiledLocals is active.
- if (ZapDeadCompiledLocals) {
- set_xxx(reg, OopMapValue::dead_value, VMRegImpl::Bad());
- }
+void OopMap::set_narrowoop(VMReg reg) {
+ set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
}
@@ -305,7 +302,9 @@ OopMap* OopMapSet::find_map_at_offset(int pc_offset) const {
}
class DoNothingClosure: public OopClosure {
-public: void do_oop(oop* p) {}
+ public:
+ void do_oop(oop* p) {}
+ void do_oop(narrowOop* p) {}
};
static DoNothingClosure do_nothing;
@@ -349,23 +348,21 @@ static void trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f) {
// add derived oops to a table
- all_do(fr, reg_map, f, add_derived_oop, &do_nothing, &do_nothing);
+ all_do(fr, reg_map, f, add_derived_oop, &do_nothing);
}
void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
OopClosure* oop_fn, void derived_oop_fn(oop*, oop*),
- OopClosure* value_fn, OopClosure* dead_fn) {
+ OopClosure* value_fn) {
CodeBlob* cb = fr->cb();
- {
- assert(cb != NULL, "no codeblob");
- }
+ assert(cb != NULL, "no codeblob");
NOT_PRODUCT(if (TraceCodeBlobStacks) trace_codeblob_maps(fr, reg_map);)
OopMapSet* maps = cb->oop_maps();
- OopMap* map = cb->oop_map_for_return_address(fr->pc());
- assert(map != NULL, " no ptr map found");
+ OopMap* map = cb->oop_map_for_return_address(fr->pc());
+ assert(map != NULL, "no ptr map found");
// handle derived pointers first (otherwise base pointer may be
// changed before derived pointer offset has been collected)
@@ -393,8 +390,8 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
}
}
- // We want dead, value and oop oop_types
- int mask = OopMapValue::oop_value | OopMapValue::value_value | OopMapValue::dead_value;
+ // We want coop, value and oop oop_types
+ int mask = OopMapValue::oop_value | OopMapValue::value_value | OopMapValue::narrowoop_value;
{
for (OopMapStream oms(map,mask); !oms.is_done(); oms.next()) {
omv = oms.current();
@@ -402,11 +399,15 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
if ( loc != NULL ) {
if ( omv.type() == OopMapValue::oop_value ) {
#ifdef ASSERT
- if (COMPILER2_PRESENT(!DoEscapeAnalysis &&) !Universe::heap()->is_in_or_null(*loc)) {
+ if (COMPILER2_PRESENT(!DoEscapeAnalysis &&)
+ (((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
+ !Universe::heap()->is_in_or_null(*loc)) {
tty->print_cr("# Found non oop pointer. Dumping state at failure");
// try to dump out some helpful debugging information
trace_codeblob_maps(fr, reg_map);
omv.print();
+ tty->print_cr("register r");
+ omv.reg()->print();
tty->print_cr("loc = %p *loc = %p\n", loc, (address)*loc);
// do the real assert.
assert(Universe::heap()->is_in_or_null(*loc), "found non oop pointer");
@@ -415,8 +416,17 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
oop_fn->do_oop(loc);
} else if ( omv.type() == OopMapValue::value_value ) {
value_fn->do_oop(loc);
- } else if ( omv.type() == OopMapValue::dead_value ) {
- dead_fn->do_oop(loc);
+ } else if ( omv.type() == OopMapValue::narrowoop_value ) {
+ narrowOop *nl = (narrowOop*)loc;
+#ifndef VM_LITTLE_ENDIAN
+ if (!omv.reg()->is_stack()) {
+ // compressed oops in registers only take up 4 bytes of an
+ // 8 byte register but they are in the wrong part of the
+ // word so adjust loc to point at the right place.
+ nl = (narrowOop*)((address)nl + 4);
+ }
+#endif
+ oop_fn->do_oop(nl);
}
}
}
@@ -519,8 +529,8 @@ void print_register_type(OopMapValue::oop_types x, VMReg optional,
case OopMapValue::value_value:
st->print("Value" );
break;
- case OopMapValue::dead_value:
- st->print("Dead" );
+ case OopMapValue::narrowoop_value:
+ tty->print("NarrowOop" );
break;
case OopMapValue::callee_saved_value:
st->print("Callers_" );
diff --git a/src/share/vm/compiler/oopMap.hpp b/src/share/vm/compiler/oopMap.hpp
index 5c9c8c42f..ac05d570c 100644
--- a/src/share/vm/compiler/oopMap.hpp
+++ b/src/share/vm/compiler/oopMap.hpp
@@ -61,7 +61,7 @@ public:
unused_value =0, // powers of 2, for masking OopMapStream
oop_value = 1,
value_value = 2,
- dead_value = 4,
+ narrowoop_value = 4,
callee_saved_value = 8,
derived_oop_value= 16,
stack_obj = 32 };
@@ -90,14 +90,14 @@ public:
// Querying
bool is_oop() { return mask_bits(value(), type_mask_in_place) == oop_value; }
bool is_value() { return mask_bits(value(), type_mask_in_place) == value_value; }
- bool is_dead() { return mask_bits(value(), type_mask_in_place) == dead_value; }
+ bool is_narrowoop() { return mask_bits(value(), type_mask_in_place) == narrowoop_value; }
bool is_callee_saved() { return mask_bits(value(), type_mask_in_place) == callee_saved_value; }
bool is_derived_oop() { return mask_bits(value(), type_mask_in_place) == derived_oop_value; }
bool is_stack_obj() { return mask_bits(value(), type_mask_in_place) == stack_obj; }
void set_oop() { set_value((value() & register_mask_in_place) | oop_value); }
void set_value() { set_value((value() & register_mask_in_place) | value_value); }
- void set_dead() { set_value((value() & register_mask_in_place) | dead_value); }
+ void set_narrowoop() { set_value((value() & register_mask_in_place) | narrowoop_value); }
void set_callee_saved() { set_value((value() & register_mask_in_place) | callee_saved_value); }
void set_derived_oop() { set_value((value() & register_mask_in_place) | derived_oop_value); }
void set_stack_obj() { set_value((value() & register_mask_in_place) | stack_obj); }
@@ -176,6 +176,7 @@ class OopMap: public ResourceObj {
// slots to hold 4-byte values like ints and floats in the LP64 build.
void set_oop ( VMReg local);
void set_value( VMReg local);
+ void set_narrowoop(VMReg local);
void set_dead ( VMReg local);
void set_callee_saved( VMReg local, VMReg caller_machine_register );
void set_derived_oop ( VMReg local, VMReg derived_from_local_register );
@@ -245,7 +246,7 @@ class OopMapSet : public ResourceObj {
static void all_do(const frame* fr, const RegisterMap* reg_map,
OopClosure* oop_fn,
void derived_oop_fn(oop* base, oop* derived),
- OopClosure* value_fn, OopClosure* dead_fn);
+ OopClosure* value_fn);
// Printing
void print_on(outputStream* st) const;
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
index 1c6d7a548..87ca3afc4 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
@@ -29,22 +29,34 @@ class ConcurrentMarkSweepGeneration;
class CMSBitMap;
class CMSMarkStack;
class CMSCollector;
-template<class E> class GenericTaskQueue;
-typedef GenericTaskQueue<oop> OopTaskQueue;
-template<class E> class GenericTaskQueueSet;
-typedef GenericTaskQueueSet<oop> OopTaskQueueSet;
class MarkFromRootsClosure;
class Par_MarkFromRootsClosure;
+// Decode the oop and call do_oop on it.
+#define DO_OOP_WORK_DEFN \
+ void do_oop(oop obj); \
+ template <class T> inline void do_oop_work(T* p) { \
+ T heap_oop = oopDesc::load_heap_oop(p); \
+ if (!oopDesc::is_null(heap_oop)) { \
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
+ do_oop(obj); \
+ } \
+ }
+
class MarkRefsIntoClosure: public OopsInGenClosure {
- const MemRegion _span;
- CMSBitMap* _bitMap;
- const bool _should_do_nmethods;
+ private:
+ const MemRegion _span;
+ CMSBitMap* _bitMap;
+ const bool _should_do_nmethods;
+ protected:
+ DO_OOP_WORK_DEFN
public:
MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap,
bool should_do_nmethods);
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
bool do_header() { return true; }
virtual const bool do_nmethods() const {
return _should_do_nmethods;
@@ -57,15 +69,20 @@ class MarkRefsIntoClosure: public OopsInGenClosure {
// A variant of the above used in certain kinds of CMS
// marking verification.
class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
- const MemRegion _span;
- CMSBitMap* _verification_bm;
- CMSBitMap* _cms_bm;
- const bool _should_do_nmethods;
+ private:
+ const MemRegion _span;
+ CMSBitMap* _verification_bm;
+ CMSBitMap* _cms_bm;
+ const bool _should_do_nmethods;
+ protected:
+ DO_OOP_WORK_DEFN
public:
MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
CMSBitMap* cms_bm, bool should_do_nmethods);
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
bool do_header() { return true; }
virtual const bool do_nmethods() const {
return _should_do_nmethods;
@@ -75,37 +92,40 @@ class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
}
};
-
// The non-parallel version (the parallel version appears further below).
class PushAndMarkClosure: public OopClosure {
- CMSCollector* _collector;
- MemRegion _span;
- CMSBitMap* _bit_map;
- CMSBitMap* _mod_union_table;
- CMSMarkStack* _mark_stack;
- CMSMarkStack* _revisit_stack;
- bool _concurrent_precleaning;
- bool const _should_remember_klasses;
+ private:
+ CMSCollector* _collector;
+ MemRegion _span;
+ CMSBitMap* _bit_map;
+ CMSBitMap* _mod_union_table;
+ CMSMarkStack* _mark_stack;
+ CMSMarkStack* _revisit_stack;
+ bool _concurrent_precleaning;
+ bool const _should_remember_klasses;
+ protected:
+ DO_OOP_WORK_DEFN
public:
PushAndMarkClosure(CMSCollector* collector,
MemRegion span,
ReferenceProcessor* rp,
CMSBitMap* bit_map,
CMSBitMap* mod_union_table,
- CMSMarkStack* mark_stack,
- CMSMarkStack* revisit_stack,
- bool concurrent_precleaning);
-
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop(p); }
+ CMSMarkStack* mark_stack,
+ CMSMarkStack* revisit_stack,
+ bool concurrent_precleaning);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
bool do_header() { return true; }
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
- const bool should_remember_klasses() const {
+ virtual const bool should_remember_klasses() const {
return _should_remember_klasses;
}
- void remember_klass(Klass* k);
+ virtual void remember_klass(Klass* k);
};
// In the parallel case, the revisit stack, the bit map and the
@@ -115,12 +135,15 @@ class PushAndMarkClosure: public OopClosure {
// used in the non-parallel case above is here replaced with
// an OopTaskQueue structure to allow efficient work stealing.
class Par_PushAndMarkClosure: public OopClosure {
- CMSCollector* _collector;
- MemRegion _span;
- CMSBitMap* _bit_map;
- OopTaskQueue* _work_queue;
- CMSMarkStack* _revisit_stack;
- bool const _should_remember_klasses;
+ private:
+ CMSCollector* _collector;
+ MemRegion _span;
+ CMSBitMap* _bit_map;
+ OopTaskQueue* _work_queue;
+ CMSMarkStack* _revisit_stack;
+ bool const _should_remember_klasses;
+ protected:
+ DO_OOP_WORK_DEFN
public:
Par_PushAndMarkClosure(CMSCollector* collector,
MemRegion span,
@@ -128,43 +151,48 @@ class Par_PushAndMarkClosure: public OopClosure {
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
CMSMarkStack* revisit_stack);
-
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
bool do_header() { return true; }
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
- const bool should_remember_klasses() const {
+ virtual const bool should_remember_klasses() const {
return _should_remember_klasses;
}
- void remember_klass(Klass* k);
+ virtual void remember_klass(Klass* k);
};
-
// The non-parallel version (the parallel version appears further below).
class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
- MemRegion _span;
- CMSBitMap* _bit_map;
- CMSMarkStack* _mark_stack;
- PushAndMarkClosure _pushAndMarkClosure;
- CMSCollector* _collector;
- bool _yield;
+ private:
+ MemRegion _span;
+ CMSBitMap* _bit_map;
+ CMSMarkStack* _mark_stack;
+ PushAndMarkClosure _pushAndMarkClosure;
+ CMSCollector* _collector;
+ Mutex* _freelistLock;
+ bool _yield;
// Whether closure is being used for concurrent precleaning
- bool _concurrent_precleaning;
- Mutex* _freelistLock;
+ bool _concurrent_precleaning;
+ protected:
+ DO_OOP_WORK_DEFN
public:
MarkRefsIntoAndScanClosure(MemRegion span,
ReferenceProcessor* rp,
CMSBitMap* bit_map,
CMSBitMap* mod_union_table,
- CMSMarkStack* mark_stack,
- CMSMarkStack* revisit_stack,
+ CMSMarkStack* mark_stack,
+ CMSMarkStack* revisit_stack,
CMSCollector* collector,
bool should_yield,
bool concurrent_precleaning);
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
bool do_header() { return true; }
virtual const bool do_nmethods() const { return true; }
Prefetch::style prefetch_style() {
@@ -185,11 +213,14 @@ class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
// sycnhronized. An OopTaskQueue structure, supporting efficient
// workstealing, replaces a CMSMarkStack for storing grey objects.
class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
- MemRegion _span;
- CMSBitMap* _bit_map;
- OopTaskQueue* _work_queue;
- const uint _low_water_mark;
- Par_PushAndMarkClosure _par_pushAndMarkClosure;
+ private:
+ MemRegion _span;
+ CMSBitMap* _bit_map;
+ OopTaskQueue* _work_queue;
+ const uint _low_water_mark;
+ Par_PushAndMarkClosure _par_pushAndMarkClosure;
+ protected:
+ DO_OOP_WORK_DEFN
public:
Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
MemRegion span,
@@ -197,8 +228,10 @@ class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
CMSMarkStack* revisit_stack);
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
bool do_header() { return true; }
virtual const bool do_nmethods() const { return true; }
Prefetch::style prefetch_style() {
@@ -211,28 +244,34 @@ class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
// following the first checkpoint. Its use is buried in
// the closure MarkFromRootsClosure.
class PushOrMarkClosure: public OopClosure {
- CMSCollector* _collector;
- MemRegion _span;
- CMSBitMap* _bitMap;
- CMSMarkStack* _markStack;
- CMSMarkStack* _revisitStack;
- HeapWord* const _finger;
- MarkFromRootsClosure* const _parent;
- bool const _should_remember_klasses;
+ private:
+ CMSCollector* _collector;
+ MemRegion _span;
+ CMSBitMap* _bitMap;
+ CMSMarkStack* _markStack;
+ CMSMarkStack* _revisitStack;
+ HeapWord* const _finger;
+ MarkFromRootsClosure* const
+ _parent;
+ bool const _should_remember_klasses;
+ protected:
+ DO_OOP_WORK_DEFN
public:
PushOrMarkClosure(CMSCollector* cms_collector,
MemRegion span,
CMSBitMap* bitMap,
- CMSMarkStack* markStack,
- CMSMarkStack* revisitStack,
- HeapWord* finger,
+ CMSMarkStack* markStack,
+ CMSMarkStack* revisitStack,
+ HeapWord* finger,
MarkFromRootsClosure* parent);
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop(p); }
- const bool should_remember_klasses() const {
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
+ virtual const bool should_remember_klasses() const {
return _should_remember_klasses;
}
- void remember_klass(Klass* k);
+ virtual void remember_klass(Klass* k);
// Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost);
private:
@@ -244,6 +283,7 @@ class PushOrMarkClosure: public OopClosure {
// following the first checkpoint. Its use is buried in
// the closure Par_MarkFromRootsClosure.
class Par_PushOrMarkClosure: public OopClosure {
+ private:
CMSCollector* _collector;
MemRegion _whole_span;
MemRegion _span; // local chunk
@@ -253,24 +293,29 @@ class Par_PushOrMarkClosure: public OopClosure {
CMSMarkStack* _revisit_stack;
HeapWord* const _finger;
HeapWord** const _global_finger_addr;
- Par_MarkFromRootsClosure* const _parent;
- bool const _should_remember_klasses;
+ Par_MarkFromRootsClosure* const
+ _parent;
+ bool const _should_remember_klasses;
+ protected:
+ DO_OOP_WORK_DEFN
public:
Par_PushOrMarkClosure(CMSCollector* cms_collector,
- MemRegion span,
- CMSBitMap* bit_map,
- OopTaskQueue* work_queue,
- CMSMarkStack* mark_stack,
- CMSMarkStack* revisit_stack,
- HeapWord* finger,
- HeapWord** global_finger_addr,
- Par_MarkFromRootsClosure* parent);
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop(p); }
- const bool should_remember_klasses() const {
+ MemRegion span,
+ CMSBitMap* bit_map,
+ OopTaskQueue* work_queue,
+ CMSMarkStack* mark_stack,
+ CMSMarkStack* revisit_stack,
+ HeapWord* finger,
+ HeapWord** global_finger_addr,
+ Par_MarkFromRootsClosure* parent);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
+ virtual const bool should_remember_klasses() const {
return _should_remember_klasses;
}
- void remember_klass(Klass* k);
+ virtual void remember_klass(Klass* k);
// Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost);
private:
@@ -282,10 +327,13 @@ class Par_PushOrMarkClosure: public OopClosure {
// This is currently used during the (weak) reference object
// processing phase of the CMS final checkpoint step.
class CMSKeepAliveClosure: public OopClosure {
+ private:
CMSCollector* _collector;
MemRegion _span;
CMSMarkStack* _mark_stack;
CMSBitMap* _bit_map;
+ protected:
+ DO_OOP_WORK_DEFN
public:
CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
CMSBitMap* bit_map, CMSMarkStack* mark_stack):
@@ -293,16 +341,20 @@ class CMSKeepAliveClosure: public OopClosure {
_span(span),
_bit_map(bit_map),
_mark_stack(mark_stack) { }
-
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
};
class CMSInnerParMarkAndPushClosure: public OopClosure {
+ private:
CMSCollector* _collector;
MemRegion _span;
OopTaskQueue* _work_queue;
CMSBitMap* _bit_map;
+ protected:
+ DO_OOP_WORK_DEFN
public:
CMSInnerParMarkAndPushClosure(CMSCollector* collector,
MemRegion span, CMSBitMap* bit_map,
@@ -311,24 +363,32 @@ class CMSInnerParMarkAndPushClosure: public OopClosure {
_span(span),
_bit_map(bit_map),
_work_queue(work_queue) { }
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
};
// A parallel (MT) version of the above, used when
// reference processing is parallel; the only difference
// is in the do_oop method.
class CMSParKeepAliveClosure: public OopClosure {
+ private:
CMSCollector* _collector;
MemRegion _span;
OopTaskQueue* _work_queue;
CMSBitMap* _bit_map;
- CMSInnerParMarkAndPushClosure _mark_and_push;
+ CMSInnerParMarkAndPushClosure
+ _mark_and_push;
const uint _low_water_mark;
void trim_queue(uint max);
+ protected:
+ DO_OOP_WORK_DEFN
public:
CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
CMSBitMap* bit_map, OopTaskQueue* work_queue);
- void do_oop(oop* p);
- void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
};
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
index de5611ddb..09d0db5e0 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
@@ -177,7 +177,7 @@ HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
assert(q->forwardee() == NULL, "should be forwarded to NULL");
}
- debug_only(MarkSweep::register_live_oop(q, adjusted_size));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
compact_top += adjusted_size;
// we need to update the offset table so that the beginnings of objects can be
@@ -1211,7 +1211,7 @@ FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
return fc;
}
-oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size, oop* ref) {
+oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
assert_locked();
@@ -2116,7 +2116,6 @@ void CompactibleFreeListSpace::split(size_t from, size_t to1) {
splitBirth(to2);
}
-
void CompactibleFreeListSpace::print() const {
tty->print(" CompactibleFreeListSpace");
Space::print();
@@ -2130,6 +2129,7 @@ void CompactibleFreeListSpace::prepare_for_verify() {
}
class VerifyAllBlksClosure: public BlkClosure {
+ private:
const CompactibleFreeListSpace* _sp;
const MemRegion _span;
@@ -2137,7 +2137,7 @@ class VerifyAllBlksClosure: public BlkClosure {
VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
MemRegion span) : _sp(sp), _span(span) { }
- size_t do_blk(HeapWord* addr) {
+ virtual size_t do_blk(HeapWord* addr) {
size_t res;
if (_sp->block_is_obj(addr)) {
oop p = oop(addr);
@@ -2160,12 +2160,54 @@ class VerifyAllBlksClosure: public BlkClosure {
};
class VerifyAllOopsClosure: public OopClosure {
+ private:
const CMSCollector* _collector;
const CompactibleFreeListSpace* _sp;
const MemRegion _span;
const bool _past_remark;
const CMSBitMap* _bit_map;
+ protected:
+ void do_oop(void* p, oop obj) {
+ if (_span.contains(obj)) { // the interior oop points into CMS heap
+ if (!_span.contains(p)) { // reference from outside CMS heap
+ // Should be a valid object; the first disjunct below allows
+ // us to sidestep an assertion in block_is_obj() that insists
+ // that p be in _sp. Note that several generations (and spaces)
+ // are spanned by _span (CMS heap) above.
+ guarantee(!_sp->is_in_reserved(obj) ||
+ _sp->block_is_obj((HeapWord*)obj),
+ "Should be an object");
+ guarantee(obj->is_oop(), "Should be an oop");
+ obj->verify();
+ if (_past_remark) {
+ // Remark has been completed, the object should be marked
+ _bit_map->isMarked((HeapWord*)obj);
+ }
+ } else { // reference within CMS heap
+ if (_past_remark) {
+ // Remark has been completed -- so the referent should have
+ // been marked, if referring object is.
+ if (_bit_map->isMarked(_collector->block_start(p))) {
+ guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
+ }
+ }
+ }
+ } else if (_sp->is_in_reserved(p)) {
+ // the reference is from FLS, and points out of FLS
+ guarantee(obj->is_oop(), "Should be an oop");
+ obj->verify();
+ }
+ }
+
+ template <class T> void do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ do_oop(p, obj);
+ }
+ }
+
public:
VerifyAllOopsClosure(const CMSCollector* collector,
const CompactibleFreeListSpace* sp, MemRegion span,
@@ -2173,40 +2215,8 @@ class VerifyAllOopsClosure: public OopClosure {
OopClosure(), _collector(collector), _sp(sp), _span(span),
_past_remark(past_remark), _bit_map(bit_map) { }
- void do_oop(oop* ptr) {
- oop p = *ptr;
- if (p != NULL) {
- if (_span.contains(p)) { // the interior oop points into CMS heap
- if (!_span.contains(ptr)) { // reference from outside CMS heap
- // Should be a valid object; the first disjunct below allows
- // us to sidestep an assertion in block_is_obj() that insists
- // that p be in _sp. Note that several generations (and spaces)
- // are spanned by _span (CMS heap) above.
- guarantee(!_sp->is_in_reserved(p) || _sp->block_is_obj((HeapWord*)p),
- "Should be an object");
- guarantee(p->is_oop(), "Should be an oop");
- p->verify();
- if (_past_remark) {
- // Remark has been completed, the object should be marked
- _bit_map->isMarked((HeapWord*)p);
- }
- }
- else { // reference within CMS heap
- if (_past_remark) {
- // Remark has been completed -- so the referent should have
- // been marked, if referring object is.
- if (_bit_map->isMarked(_collector->block_start(ptr))) {
- guarantee(_bit_map->isMarked((HeapWord*)p), "Marking error?");
- }
- }
- }
- } else if (_sp->is_in_reserved(ptr)) {
- // the reference is from FLS, and points out of FLS
- guarantee(p->is_oop(), "Should be an oop");
- p->verify();
- }
- }
- }
+ virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
};
void CompactibleFreeListSpace::verify(bool ignored) const {
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
index 5eb0f41b6..729556bae 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
@@ -540,7 +540,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
HeapWord* allocate(size_t size);
HeapWord* par_allocate(size_t size);
- oop promote(oop obj, size_t obj_size, oop* ref);
+ oop promote(oop obj, size_t obj_size);
void gc_prologue();
void gc_epilogue();
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
index 42593073c..689ede0ea 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
@@ -1226,7 +1226,7 @@ CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
return NULL;
}
-oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
+oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
// allocate, copy and if necessary update promoinfo --
// delegate to underlying space.
@@ -1238,7 +1238,7 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
}
#endif // #ifndef PRODUCT
- oop res = _cmsSpace->promote(obj, obj_size, ref);
+ oop res = _cmsSpace->promote(obj, obj_size);
if (res == NULL) {
// expand and retry
size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
@@ -1249,7 +1249,7 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
assert(next_gen() == NULL, "assumption, based upon which no attempt "
"is made to pass on a possibly failing "
"promotion to next generation");
- res = _cmsSpace->promote(obj, obj_size, ref);
+ res = _cmsSpace->promote(obj, obj_size);
}
if (res != NULL) {
// See comment in allocate() about when objects should
@@ -3922,13 +3922,15 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
}
class Par_ConcMarkingClosure: public OopClosure {
+ private:
CMSCollector* _collector;
MemRegion _span;
CMSBitMap* _bit_map;
CMSMarkStack* _overflow_stack;
CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
OopTaskQueue* _work_queue;
-
+ protected:
+ DO_OOP_WORK_DEFN
public:
Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
@@ -3937,8 +3939,8 @@ class Par_ConcMarkingClosure: public OopClosure {
_work_queue(work_queue),
_bit_map(bit_map),
_overflow_stack(overflow_stack) { } // need to initialize revisit stack etc.
-
- void do_oop(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
void trim_queue(size_t max);
void handle_stack_overflow(HeapWord* lost);
};
@@ -3947,11 +3949,9 @@ class Par_ConcMarkingClosure: public OopClosure {
// the salient assumption here is that stolen oops must
// always be initialized, so we do not need to check for
// uninitialized objects before scanning here.
-void Par_ConcMarkingClosure::do_oop(oop* p) {
- oop this_oop = *p;
- assert(this_oop->is_oop_or_null(),
- "expected an oop or NULL");
- HeapWord* addr = (HeapWord*)this_oop;
+void Par_ConcMarkingClosure::do_oop(oop obj) {
+ assert(obj->is_oop_or_null(), "expected an oop or NULL");
+ HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
// and is not marked
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
@@ -3970,7 +3970,7 @@ void Par_ConcMarkingClosure::do_oop(oop* p) {
}
)
if (simulate_overflow ||
- !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
+ !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
// stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
@@ -3987,6 +3987,9 @@ void Par_ConcMarkingClosure::do_oop(oop* p) {
}
}
+void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
+void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
+
void Par_ConcMarkingClosure::trim_queue(size_t max) {
while (_work_queue->size() > max) {
oop new_oop;
@@ -4086,8 +4089,8 @@ void CMSConcMarkingTask::coordinator_yield() {
//
// Tony 2006.06.29
for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
+ ConcurrentMarkSweepThread::should_yield() &&
+ !CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
}
@@ -6048,8 +6051,8 @@ void CMSCollector::reset(bool asynch) {
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
+ ConcurrentMarkSweepThread::should_yield() &&
+ !CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
}
@@ -6362,19 +6365,19 @@ MarkRefsIntoClosure::MarkRefsIntoClosure(
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
}
-void MarkRefsIntoClosure::do_oop(oop* p) {
+void MarkRefsIntoClosure::do_oop(oop obj) {
// if p points into _span, then mark corresponding bit in _markBitMap
- oop thisOop = *p;
- if (thisOop != NULL) {
- assert(thisOop->is_oop(), "expected an oop");
- HeapWord* addr = (HeapWord*)thisOop;
- if (_span.contains(addr)) {
- // this should be made more efficient
- _bitMap->mark(addr);
- }
+ assert(obj->is_oop(), "expected an oop");
+ HeapWord* addr = (HeapWord*)obj;
+ if (_span.contains(addr)) {
+ // this should be made more efficient
+ _bitMap->mark(addr);
}
}
+void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
+void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
+
// A variant of the above, used for CMS marking verification.
MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm,
@@ -6387,23 +6390,23 @@ MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
}
-void MarkRefsIntoVerifyClosure::do_oop(oop* p) {
+void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
// if p points into _span, then mark corresponding bit in _markBitMap
- oop this_oop = *p;
- if (this_oop != NULL) {
- assert(this_oop->is_oop(), "expected an oop");
- HeapWord* addr = (HeapWord*)this_oop;
- if (_span.contains(addr)) {
- _verification_bm->mark(addr);
- if (!_cms_bm->isMarked(addr)) {
- oop(addr)->print();
- gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
- fatal("... aborting");
- }
+ assert(obj->is_oop(), "expected an oop");
+ HeapWord* addr = (HeapWord*)obj;
+ if (_span.contains(addr)) {
+ _verification_bm->mark(addr);
+ if (!_cms_bm->isMarked(addr)) {
+ oop(addr)->print();
+ gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
+ fatal("... aborting");
}
}
}
+void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
+void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
+
//////////////////////////////////////////////////
// MarkRefsIntoAndScanClosure
//////////////////////////////////////////////////
@@ -6438,13 +6441,13 @@ MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
// The marks are made in the marking bit map and the marking stack is
// used for keeping the (newly) grey objects during the scan.
// The parallel version (Par_...) appears further below.
-void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
- oop this_oop = *p;
- if (this_oop != NULL) {
- assert(this_oop->is_oop(), "expected an oop");
- HeapWord* addr = (HeapWord*)this_oop;
- assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
- assert(_collector->overflow_list_is_empty(), "should be empty");
+void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
+ if (obj != NULL) {
+ assert(obj->is_oop(), "expected an oop");
+ HeapWord* addr = (HeapWord*)obj;
+ assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
+ assert(_collector->overflow_list_is_empty(),
+ "overflow list should be empty");
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
// mark bit map (object is now grey)
@@ -6452,7 +6455,7 @@ void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
// push on marking stack (stack should be empty), and drain the
// stack by applying this closure to the oops in the oops popped
// from the stack (i.e. blacken the grey objects)
- bool res = _mark_stack->push(this_oop);
+ bool res = _mark_stack->push(obj);
assert(res, "Should have space to push on empty stack");
do {
oop new_oop = _mark_stack->pop();
@@ -6488,6 +6491,9 @@ void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
}
}
+void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
+void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
+
void MarkRefsIntoAndScanClosure::do_yield_work() {
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
"CMS thread should hold CMS token");
@@ -6506,9 +6512,11 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
_collector->icms_wait();
// See the comment in coordinator_yield()
- for (unsigned i = 0; i < CMSYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
+ for (unsigned i = 0;
+ i < CMSYieldSleepCount &&
+ ConcurrentMarkSweepThread::should_yield() &&
+ !CMSCollector::foregroundGCIsActive();
+ ++i) {
os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
}
@@ -6545,13 +6553,12 @@ Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
// the scan phase whence they are also available for stealing by parallel
// threads. Since the marking bit map is shared, updates are
// synchronized (via CAS).
-void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
- oop this_oop = *p;
- if (this_oop != NULL) {
+void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
+ if (obj != NULL) {
// Ignore mark word because this could be an already marked oop
// that may be chained at the end of the overflow list.
- assert(this_oop->is_oop(true /* ignore mark word */), "expected an oop");
- HeapWord* addr = (HeapWord*)this_oop;
+ assert(obj->is_oop(), "expected an oop");
+ HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
// mark bit map (object will become grey):
@@ -6565,7 +6572,7 @@ void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
// queue to an appropriate length by applying this closure to
// the oops in the oops popped from the stack (i.e. blacken the
// grey objects)
- bool res = _work_queue->push(this_oop);
+ bool res = _work_queue->push(obj);
assert(res, "Low water mark should be less than capacity?");
trim_queue(_low_water_mark);
} // Else, another thread claimed the object
@@ -6573,6 +6580,9 @@ void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
}
}
+void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
+void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
+
// This closure is used to rescan the marked objects on the dirty cards
// in the mod union table and the card table proper.
size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
@@ -6675,8 +6685,8 @@ void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
+ ConcurrentMarkSweepThread::should_yield() &&
+ !CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
}
@@ -6928,13 +6938,13 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
assert(_markStack->isEmpty(),
"should drain stack to limit stack usage");
// convert ptr to an oop preparatory to scanning
- oop this_oop = oop(ptr);
+ oop obj = oop(ptr);
// Ignore mark word in verification below, since we
// may be running concurrent with mutators.
- assert(this_oop->is_oop(true), "should be an oop");
+ assert(obj->is_oop(true), "should be an oop");
assert(_finger <= ptr, "_finger runneth ahead");
// advance the finger to right end of this object
- _finger = ptr + this_oop->size();
+ _finger = ptr + obj->size();
assert(_finger > ptr, "we just incremented it above");
// On large heaps, it may take us some time to get through
// the marking phase (especially if running iCMS). During
@@ -6980,7 +6990,7 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
_span, _bitMap, _markStack,
_revisitStack,
_finger, this);
- bool res = _markStack->push(this_oop);
+ bool res = _markStack->push(obj);
assert(res, "Empty non-zero size stack should have space for single push");
while (!_markStack->isEmpty()) {
oop new_oop = _markStack->pop();
@@ -7052,13 +7062,13 @@ void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
assert(_work_queue->size() == 0,
"should drain stack to limit stack usage");
// convert ptr to an oop preparatory to scanning
- oop this_oop = oop(ptr);
+ oop obj = oop(ptr);
// Ignore mark word in verification below, since we
// may be running concurrent with mutators.
- assert(this_oop->is_oop(true), "should be an oop");
+ assert(obj->is_oop(true), "should be an oop");
assert(_finger <= ptr, "_finger runneth ahead");
// advance the finger to right end of this object
- _finger = ptr + this_oop->size();
+ _finger = ptr + obj->size();
assert(_finger > ptr, "we just incremented it above");
// On large heaps, it may take us some time to get through
// the marking phase (especially if running iCMS). During
@@ -7106,7 +7116,7 @@ void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
_revisit_stack,
_finger,
gfa, this);
- bool res = _work_queue->push(this_oop); // overflow could occur here
+ bool res = _work_queue->push(obj); // overflow could occur here
assert(res, "Will hold once we use workqueues");
while (true) {
oop new_oop;
@@ -7176,15 +7186,15 @@ void MarkFromRootsVerifyClosure::do_bit(size_t offset) {
assert(_mark_stack->isEmpty(),
"should drain stack to limit stack usage");
// convert addr to an oop preparatory to scanning
- oop this_oop = oop(addr);
- assert(this_oop->is_oop(), "should be an oop");
+ oop obj = oop(addr);
+ assert(obj->is_oop(), "should be an oop");
assert(_finger <= addr, "_finger runneth ahead");
// advance the finger to right end of this object
- _finger = addr + this_oop->size();
+ _finger = addr + obj->size();
assert(_finger > addr, "we just incremented it above");
// Note: the finger doesn't advance while we drain
// the stack below.
- bool res = _mark_stack->push(this_oop);
+ bool res = _mark_stack->push(obj);
assert(res, "Empty non-zero size stack should have space for single push");
while (!_mark_stack->isEmpty()) {
oop new_oop = _mark_stack->pop();
@@ -7207,6 +7217,8 @@ PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
_mark_stack(mark_stack)
{ }
+void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
+void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
// Upon stack overflow, we discard (part of) the stack,
// remembering the least address amongst those discarded
@@ -7219,20 +7231,20 @@ void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
_mark_stack->expand(); // expand the stack if possible
}
-void PushAndMarkVerifyClosure::do_oop(oop* p) {
- oop this_oop = *p;
- assert(this_oop->is_oop_or_null(), "expected an oop or NULL");
- HeapWord* addr = (HeapWord*)this_oop;
+void PushAndMarkVerifyClosure::do_oop(oop obj) {
+ assert(obj->is_oop_or_null(), "expected an oop or NULL");
+ HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
_verification_bm->mark(addr); // now grey
if (!_cms_bm->isMarked(addr)) {
oop(addr)->print();
- gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
+ gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
+ addr);
fatal("... aborting");
}
- if (!_mark_stack->push(this_oop)) { // stack overflow
+ if (!_mark_stack->push(obj)) { // stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
SIZE_FORMAT, _mark_stack->capacity());
@@ -7285,7 +7297,6 @@ Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
_should_remember_klasses(collector->should_unload_classes())
{ }
-
void CMSCollector::lower_restart_addr(HeapWord* low) {
assert(_span.contains(low), "Out of bounds addr");
if (_restart_addr == NULL) {
@@ -7321,12 +7332,10 @@ void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
_overflow_stack->expand(); // expand the stack if possible
}
-
-void PushOrMarkClosure::do_oop(oop* p) {
- oop thisOop = *p;
+void PushOrMarkClosure::do_oop(oop obj) {
// Ignore mark word because we are running concurrent with mutators.
- assert(thisOop->is_oop_or_null(true), "expected an oop or NULL");
- HeapWord* addr = (HeapWord*)thisOop;
+ assert(obj->is_oop_or_null(true), "expected an oop or NULL");
+ HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
_bitMap->mark(addr); // now grey
@@ -7342,7 +7351,7 @@ void PushOrMarkClosure::do_oop(oop* p) {
simulate_overflow = true;
}
)
- if (simulate_overflow || !_markStack->push(thisOop)) { // stack overflow
+ if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
SIZE_FORMAT, _markStack->capacity());
@@ -7358,11 +7367,13 @@ void PushOrMarkClosure::do_oop(oop* p) {
}
}
-void Par_PushOrMarkClosure::do_oop(oop* p) {
- oop this_oop = *p;
+void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
+void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
+
+void Par_PushOrMarkClosure::do_oop(oop obj) {
// Ignore mark word because we are running concurrent with mutators.
- assert(this_oop->is_oop_or_null(true), "expected an oop or NULL");
- HeapWord* addr = (HeapWord*)this_oop;
+ assert(obj->is_oop_or_null(true), "expected an oop or NULL");
+ HeapWord* addr = (HeapWord*)obj;
if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
// We read the global_finger (volatile read) strictly after marking oop
@@ -7391,7 +7402,7 @@ void Par_PushOrMarkClosure::do_oop(oop* p) {
}
)
if (simulate_overflow ||
- !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
+ !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
// stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
@@ -7408,6 +7419,8 @@ void Par_PushOrMarkClosure::do_oop(oop* p) {
}
}
+void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
+void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
MemRegion span,
@@ -7432,16 +7445,11 @@ PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
// Grey object rescan during pre-cleaning and second checkpoint phases --
// the non-parallel version (the parallel version appears further below.)
-void PushAndMarkClosure::do_oop(oop* p) {
- oop this_oop = *p;
- // Ignore mark word verification. If during concurrent precleaning
- // the object monitor may be locked. If during the checkpoint
- // phases, the object may already have been reached by a different
- // path and may be at the end of the global overflow list (so
- // the mark word may be NULL).
- assert(this_oop->is_oop_or_null(true/* ignore mark word */),
+void PushAndMarkClosure::do_oop(oop obj) {
+ // If _concurrent_precleaning, ignore mark word verification
+ assert(obj->is_oop_or_null(_concurrent_precleaning),
"expected an oop or NULL");
- HeapWord* addr = (HeapWord*)this_oop;
+ HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
// and is not marked
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
@@ -7456,7 +7464,7 @@ void PushAndMarkClosure::do_oop(oop* p) {
simulate_overflow = true;
}
)
- if (simulate_overflow || !_mark_stack->push(this_oop)) {
+ if (simulate_overflow || !_mark_stack->push(obj)) {
if (_concurrent_precleaning) {
// During precleaning we can just dirty the appropriate card
// in the mod union table, thus ensuring that the object remains
@@ -7468,7 +7476,7 @@ void PushAndMarkClosure::do_oop(oop* p) {
} else {
// During the remark phase, we need to remember this oop
// in the overflow list.
- _collector->push_on_overflow_list(this_oop);
+ _collector->push_on_overflow_list(obj);
_collector->_ser_pmc_remark_ovflw++;
}
}
@@ -7492,10 +7500,12 @@ Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
}
+void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
+void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
+
// Grey object rescan during second checkpoint phase --
// the parallel version.
-void Par_PushAndMarkClosure::do_oop(oop* p) {
- oop this_oop = *p;
+void Par_PushAndMarkClosure::do_oop(oop obj) {
// In the assert below, we ignore the mark word because
// this oop may point to an already visited object that is
// on the overflow stack (in which case the mark word has
@@ -7507,9 +7517,9 @@ void Par_PushAndMarkClosure::do_oop(oop* p) {
// value, by the time we get to examined this failing assert in
// the debugger, is_oop_or_null(false) may subsequently start
// to hold.
- assert(this_oop->is_oop_or_null(true),
+ assert(obj->is_oop_or_null(true),
"expected an oop or NULL");
- HeapWord* addr = (HeapWord*)this_oop;
+ HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
// and is not marked
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
@@ -7527,14 +7537,17 @@ void Par_PushAndMarkClosure::do_oop(oop* p) {
simulate_overflow = true;
}
)
- if (simulate_overflow || !_work_queue->push(this_oop)) {
- _collector->par_push_on_overflow_list(this_oop);
+ if (simulate_overflow || !_work_queue->push(obj)) {
+ _collector->par_push_on_overflow_list(obj);
_collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
}
} // Else, some other thread got there first
}
}
+void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
+void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
+
void PushAndMarkClosure::remember_klass(Klass* k) {
if (!_revisit_stack->push(oop(k))) {
fatal("Revisit stack overflowed in PushAndMarkClosure");
@@ -8228,9 +8241,8 @@ bool CMSIsAliveClosure::do_object_b(oop obj) {
}
// CMSKeepAliveClosure: the serial version
-void CMSKeepAliveClosure::do_oop(oop* p) {
- oop this_oop = *p;
- HeapWord* addr = (HeapWord*)this_oop;
+void CMSKeepAliveClosure::do_oop(oop obj) {
+ HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
_bit_map->mark(addr);
@@ -8242,26 +8254,28 @@ void CMSKeepAliveClosure::do_oop(oop* p) {
simulate_overflow = true;
}
)
- if (simulate_overflow || !_mark_stack->push(this_oop)) {
- _collector->push_on_overflow_list(this_oop);
+ if (simulate_overflow || !_mark_stack->push(obj)) {
+ _collector->push_on_overflow_list(obj);
_collector->_ser_kac_ovflw++;
}
}
}
+void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
+void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
+
// CMSParKeepAliveClosure: a parallel version of the above.
// The work queues are private to each closure (thread),
// but (may be) available for stealing by other threads.
-void CMSParKeepAliveClosure::do_oop(oop* p) {
- oop this_oop = *p;
- HeapWord* addr = (HeapWord*)this_oop;
+void CMSParKeepAliveClosure::do_oop(oop obj) {
+ HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
// In general, during recursive tracing, several threads
// may be concurrently getting here; the first one to
// "tag" it, claims it.
if (_bit_map->par_mark(addr)) {
- bool res = _work_queue->push(this_oop);
+ bool res = _work_queue->push(obj);
assert(res, "Low water mark should be much less than capacity");
// Do a recursive trim in the hope that this will keep
// stack usage lower, but leave some oops for potential stealers
@@ -8270,6 +8284,9 @@ void CMSParKeepAliveClosure::do_oop(oop* p) {
}
}
+void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
+void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
+
void CMSParKeepAliveClosure::trim_queue(uint max) {
while (_work_queue->size() > max) {
oop new_oop;
@@ -8285,9 +8302,8 @@ void CMSParKeepAliveClosure::trim_queue(uint max) {
}
}
-void CMSInnerParMarkAndPushClosure::do_oop(oop* p) {
- oop this_oop = *p;
- HeapWord* addr = (HeapWord*)this_oop;
+void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
+ HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
if (_bit_map->par_mark(addr)) {
@@ -8299,14 +8315,17 @@ void CMSInnerParMarkAndPushClosure::do_oop(oop* p) {
simulate_overflow = true;
}
)
- if (simulate_overflow || !_work_queue->push(this_oop)) {
- _collector->par_push_on_overflow_list(this_oop);
+ if (simulate_overflow || !_work_queue->push(obj)) {
+ _collector->par_push_on_overflow_list(obj);
_collector->_par_kac_ovflw++;
}
} // Else another thread got there already
}
}
+void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
+void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
+
//////////////////////////////////////////////////////////////////
// CMSExpansionCause /////////////////////////////
//////////////////////////////////////////////////////////////////
@@ -8337,12 +8356,12 @@ void CMSDrainMarkingStackClosure::do_void() {
while (!_mark_stack->isEmpty() ||
// if stack is empty, check the overflow list
_collector->take_from_overflow_list(num, _mark_stack)) {
- oop this_oop = _mark_stack->pop();
- HeapWord* addr = (HeapWord*)this_oop;
+ oop obj = _mark_stack->pop();
+ HeapWord* addr = (HeapWord*)obj;
assert(_span.contains(addr), "Should be within span");
assert(_bit_map->isMarked(addr), "Should be marked");
- assert(this_oop->is_oop(), "Should be an oop");
- this_oop->oop_iterate(_keep_alive);
+ assert(obj->is_oop(), "Should be an oop");
+ obj->oop_iterate(_keep_alive);
}
}
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
index 9f05caf72..ea44e417b 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
@@ -1138,7 +1138,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// Allocation support
HeapWord* allocate(size_t size, bool tlab);
HeapWord* have_lock_and_allocate(size_t size, bool tlab);
- oop promote(oop obj, size_t obj_size, oop* ref);
+ oop promote(oop obj, size_t obj_size);
HeapWord* par_allocate(size_t size, bool tlab) {
return allocate(size, tlab);
}
@@ -1301,9 +1301,8 @@ class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
// This closure is used to check that a certain set of oops is empty.
class FalseClosure: public OopClosure {
public:
- void do_oop(oop* p) {
- guarantee(false, "Should be an empty set");
- }
+ void do_oop(oop* p) { guarantee(false, "Should be an empty set"); }
+ void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
};
// This closure is used to do concurrent marking from the roots
@@ -1380,6 +1379,12 @@ class PushAndMarkVerifyClosure: public OopClosure {
CMSBitMap* _verification_bm;
CMSBitMap* _cms_bm;
CMSMarkStack* _mark_stack;
+ protected:
+ void do_oop(oop p);
+ template <class T> inline void do_oop_work(T *p) {
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ do_oop(obj);
+ }
public:
PushAndMarkVerifyClosure(CMSCollector* cms_collector,
MemRegion span,
@@ -1387,6 +1392,7 @@ class PushAndMarkVerifyClosure: public OopClosure {
CMSBitMap* cms_bm,
CMSMarkStack* mark_stack);
void do_oop(oop* p);
+ void do_oop(narrowOop* p);
// Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost);
};
diff --git a/src/share/vm/gc_implementation/includeDB_gc_parNew b/src/share/vm/gc_implementation/includeDB_gc_parNew
index d0014f358..7c9267922 100644
--- a/src/share/vm/gc_implementation/includeDB_gc_parNew
+++ b/src/share/vm/gc_implementation/includeDB_gc_parNew
@@ -19,7 +19,7 @@
// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
// CA 95054 USA or visit www.sun.com if you need additional information or
// have any questions.
-//
+//
//
asParNewGeneration.hpp adaptiveSizePolicy.hpp
@@ -66,8 +66,8 @@ parNewGeneration.cpp handles.hpp
parNewGeneration.cpp handles.inline.hpp
parNewGeneration.cpp java.hpp
parNewGeneration.cpp objArrayOop.hpp
-parNewGeneration.cpp oop.pcgc.inline.hpp
parNewGeneration.cpp oop.inline.hpp
+parNewGeneration.cpp oop.pcgc.inline.hpp
parNewGeneration.cpp parGCAllocBuffer.hpp
parNewGeneration.cpp parNewGeneration.hpp
parNewGeneration.cpp parOopClosures.inline.hpp
@@ -80,3 +80,8 @@ parNewGeneration.cpp workgroup.hpp
parNewGeneration.hpp defNewGeneration.hpp
parNewGeneration.hpp parGCAllocBuffer.hpp
parNewGeneration.hpp taskqueue.hpp
+
+parOopClosures.hpp genOopClosures.hpp
+
+parOopClosures.inline.hpp parNewGeneration.hpp
+parOopClosures.inline.hpp parOopClosures.hpp
diff --git a/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge b/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
index d4cf2da6d..8a2a7a612 100644
--- a/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
+++ b/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
@@ -19,7 +19,7 @@
// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
// CA 95054 USA or visit www.sun.com if you need additional information or
// have any questions.
-//
+//
//
// NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
@@ -279,6 +279,7 @@ psParallelCompact.hpp mutableSpace.hpp
psParallelCompact.hpp objectStartArray.hpp
psParallelCompact.hpp oop.hpp
psParallelCompact.hpp parMarkBitMap.hpp
+psParallelCompact.hpp psCompactionManager.hpp
psParallelCompact.hpp sharedHeap.hpp
psOldGen.cpp psAdaptiveSizePolicy.hpp
diff --git a/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp b/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
index faa3ce7ac..4a3bf2492 100644
--- a/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
+++ b/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
@@ -32,18 +32,19 @@ ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
_allocated(0), _wasted(0)
{
assert (min_size() > AlignmentReserve, "Inconsistency!");
+ // arrayOopDesc::header_size depends on command line initialization.
+ FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
+ AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
}
-const size_t ParGCAllocBuffer::FillerHeaderSize =
- align_object_size(arrayOopDesc::header_size(T_INT));
+size_t ParGCAllocBuffer::FillerHeaderSize;
// If the minimum object size is greater than MinObjAlignment, we can
// end up with a shard at the end of the buffer that's smaller than
// the smallest object. We can't allow that because the buffer must
// look like it's full of objects when we retire it, so we make
// sure we have enough space for a filler int array object.
-const size_t ParGCAllocBuffer::AlignmentReserve =
- oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
+size_t ParGCAllocBuffer::AlignmentReserve;
void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
assert(!retain || end_of_gc, "Can only retain at GC end.");
diff --git a/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp b/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp
index 73901f2ba..d8caac661 100644
--- a/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp
+++ b/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp
@@ -41,8 +41,8 @@ protected:
size_t _allocated; // in HeapWord units
size_t _wasted; // in HeapWord units
char tail[32];
- static const size_t FillerHeaderSize;
- static const size_t AlignmentReserve;
+ static size_t FillerHeaderSize;
+ static size_t AlignmentReserve;
public:
// Initializes the buffer to be empty, but with the given "word_sz".
diff --git a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
index 2bcd31138..36b8bb247 100644
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
@@ -104,16 +104,15 @@ void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
// must be removed.
arrayOop(old)->set_length(end);
}
+
// process our set of indices (include header in first chunk)
- oop* start_addr = start == 0 ? (oop*)obj : obj->obj_at_addr(start);
- oop* end_addr = obj->base() + end; // obj_at_addr(end) asserts end < length
- MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
+ // should make sure end is even (aligned to HeapWord in case of compressed oops)
if ((HeapWord *)obj < young_old_boundary()) {
// object is in to_space
- obj->oop_iterate(&_to_space_closure, mr);
+ obj->oop_iterate_range(&_to_space_closure, start, end);
} else {
// object is in old generation
- obj->oop_iterate(&_old_gen_closure, mr);
+ obj->oop_iterate_range(&_old_gen_closure, start, end);
}
}
@@ -319,7 +318,6 @@ void ParScanThreadStateSet::flush()
}
}
-
ParScanClosure::ParScanClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state) :
OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g)
@@ -328,11 +326,25 @@ ParScanClosure::ParScanClosure(ParNewGeneration* g,
_boundary = _g->reserved().end();
}
+void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
+void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
+
+void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
+void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
+
+void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); }
+void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
+
+void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); }
+void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
+
ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state)
: ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
-{
-}
+{}
+
+void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
+void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
#ifdef WIN32
#pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
@@ -475,51 +487,66 @@ ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
-void
-// ParNewGeneration::
-ParKeepAliveClosure::do_oop(oop* p) {
- // We never expect to see a null reference being processed
- // as a weak reference.
- assert (*p != NULL, "expected non-null ref");
- assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
+template <class T>
+void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
+#ifdef ASSERT
+ {
+ assert(!oopDesc::is_null(*p), "expected non-null ref");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ // We never expect to see a null reference being processed
+ // as a weak reference.
+ assert(obj->is_oop(), "expected an oop while scanning weak refs");
+ }
+#endif // ASSERT
_par_cl->do_oop_nv(p);
if (Universe::heap()->is_in_reserved(p)) {
- _rs->write_ref_field_gc_par(p, *p);
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ _rs->write_ref_field_gc_par(p, obj);
}
}
+void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); }
+void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
+
// ParNewGeneration::
KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
DefNewGeneration::KeepAliveClosure(cl) {}
-void
-// ParNewGeneration::
-KeepAliveClosure::do_oop(oop* p) {
- // We never expect to see a null reference being processed
- // as a weak reference.
- assert (*p != NULL, "expected non-null ref");
- assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
+template <class T>
+void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
+#ifdef ASSERT
+ {
+ assert(!oopDesc::is_null(*p), "expected non-null ref");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ // We never expect to see a null reference being processed
+ // as a weak reference.
+ assert(obj->is_oop(), "expected an oop while scanning weak refs");
+ }
+#endif // ASSERT
_cl->do_oop_nv(p);
if (Universe::heap()->is_in_reserved(p)) {
- _rs->write_ref_field_gc_par(p, *p);
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ _rs->write_ref_field_gc_par(p, obj);
}
}
-void ScanClosureWithParBarrier::do_oop(oop* p) {
- oop obj = *p;
- // Should we copy the obj?
- if (obj != NULL) {
+void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); }
+void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
+
+template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if ((HeapWord*)obj < _boundary) {
assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
- if (obj->is_forwarded()) {
- *p = obj->forwardee();
- } else {
- *p = _g->DefNewGeneration::copy_to_survivor_space(obj, p);
- }
+ oop new_obj = obj->is_forwarded()
+ ? obj->forwardee()
+ : _g->DefNewGeneration::copy_to_survivor_space(obj);
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
}
if (_gc_barrier) {
// If p points to a younger generation, mark the card.
@@ -530,6 +557,9 @@ void ScanClosureWithParBarrier::do_oop(oop* p) {
}
}
+void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
+void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
+
class ParNewRefProcTaskProxy: public AbstractGangTask {
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
public:
diff --git a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
index a41548b1a..19564e7b6 100644
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
@@ -33,7 +33,6 @@ class ParEvacuateFollowersClosure;
// but they must be here to allow ParScanClosure::do_oop_work to be defined
// in genOopClosures.inline.hpp.
-
typedef OopTaskQueue ObjToScanQueue;
typedef OopTaskQueueSet ObjToScanQueueSet;
@@ -41,15 +40,20 @@ typedef OopTaskQueueSet ObjToScanQueueSet;
const int PAR_STATS_ENABLED = 0;
class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
+ private:
ParScanWeakRefClosure* _par_cl;
+ protected:
+ template <class T> void do_oop_work(T* p);
public:
ParKeepAliveClosure(ParScanWeakRefClosure* cl);
- void do_oop(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
// The state needed by thread performing parallel young-gen collection.
class ParScanThreadState {
friend class ParScanThreadStateSet;
+ private:
ObjToScanQueue *_work_queue;
ParGCAllocBuffer _to_space_alloc_buffer;
@@ -111,7 +115,7 @@ class ParScanThreadState {
ObjToScanQueueSet* work_queue_set_, size_t desired_plab_sz_,
ParallelTaskTerminator& term_);
-public:
+ public:
ageTable* age_table() {return &_ageTable;}
ObjToScanQueue* work_queue() { return _work_queue; }
@@ -195,13 +199,13 @@ public:
double elapsed() {
return os::elapsedTime() - _start;
}
-
};
class ParNewGenTask: public AbstractGangTask {
- ParNewGeneration* _gen;
- Generation* _next_gen;
- HeapWord* _young_old_boundary;
+ private:
+ ParNewGeneration* _gen;
+ Generation* _next_gen;
+ HeapWord* _young_old_boundary;
class ParScanThreadStateSet* _state_set;
public:
@@ -216,35 +220,44 @@ public:
};
class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
+ protected:
+ template <class T> void do_oop_work(T* p);
public:
KeepAliveClosure(ScanWeakRefClosure* cl);
- void do_oop(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
class EvacuateFollowersClosureGeneral: public VoidClosure {
- GenCollectedHeap* _gch;
- int _level;
- OopsInGenClosure* _scan_cur_or_nonheap;
- OopsInGenClosure* _scan_older;
- public:
- EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
- OopsInGenClosure* cur,
- OopsInGenClosure* older);
- void do_void();
+ private:
+ GenCollectedHeap* _gch;
+ int _level;
+ OopsInGenClosure* _scan_cur_or_nonheap;
+ OopsInGenClosure* _scan_older;
+ public:
+ EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
+ OopsInGenClosure* cur,
+ OopsInGenClosure* older);
+ virtual void do_void();
};
// Closure for scanning ParNewGeneration.
// Same as ScanClosure, except does parallel GC barrier.
class ScanClosureWithParBarrier: public ScanClosure {
-public:
+ protected:
+ template <class T> void do_oop_work(T* p);
+ public:
ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier);
- void do_oop(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
// Implements AbstractRefProcTaskExecutor for ParNew.
class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
-public:
-
+ private:
+ ParNewGeneration& _generation;
+ ParScanThreadStateSet& _state_set;
+ public:
ParNewRefProcTaskExecutor(ParNewGeneration& generation,
ParScanThreadStateSet& state_set)
: _generation(generation), _state_set(state_set)
@@ -255,9 +268,6 @@ public:
virtual void execute(EnqueueTask& task);
// Switch to single threaded mode.
virtual void set_single_threaded_mode();
-private:
- ParNewGeneration& _generation;
- ParScanThreadStateSet& _state_set;
};
@@ -269,6 +279,7 @@ class ParNewGeneration: public DefNewGeneration {
friend class ParNewRefProcTaskExecutor;
friend class ParScanThreadStateSet;
+ private:
// XXX use a global constant instead of 64!
struct ObjToScanQueuePadded {
ObjToScanQueue work_queue;
@@ -314,7 +325,7 @@ class ParNewGeneration: public DefNewGeneration {
// the details of the policy.
virtual void adjust_desired_tenuring_threshold();
-public:
+ public:
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
~ParNewGeneration() {
diff --git a/src/share/vm/gc_implementation/parNew/parOopClosures.hpp b/src/share/vm/gc_implementation/parNew/parOopClosures.hpp
index 463127f1a..eac7668d9 100644
--- a/src/share/vm/gc_implementation/parNew/parOopClosures.hpp
+++ b/src/share/vm/gc_implementation/parNew/parOopClosures.hpp
@@ -26,70 +26,77 @@
class ParScanThreadState;
class ParNewGeneration;
-template<class E> class GenericTaskQueueSet;
-typedef GenericTaskQueueSet<oop> ObjToScanQueueSet;
+typedef OopTaskQueueSet ObjToScanQueueSet;
class ParallelTaskTerminator;
class ParScanClosure: public OopsInGenClosure {
-protected:
+ protected:
ParScanThreadState* _par_scan_state;
- ParNewGeneration* _g;
- HeapWord* _boundary;
- void do_oop_work(oop* p,
- bool gc_barrier,
- bool root_scan);
-
- void par_do_barrier(oop* p);
-
-public:
+ ParNewGeneration* _g;
+ HeapWord* _boundary;
+ template <class T> void inline par_do_barrier(T* p);
+ template <class T> void inline do_oop_work(T* p,
+ bool gc_barrier,
+ bool root_scan);
+ public:
ParScanClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state);
};
class ParScanWithBarrierClosure: public ParScanClosure {
-public:
- void do_oop(oop* p) { do_oop_work(p, true, false); }
- void do_oop_nv(oop* p) { do_oop_work(p, true, false); }
+ public:
ParScanWithBarrierClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state) :
ParScanClosure(g, par_scan_state) {}
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p);
+ inline void do_oop_nv(narrowOop* p);
};
class ParScanWithoutBarrierClosure: public ParScanClosure {
-public:
+ public:
ParScanWithoutBarrierClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state) :
ParScanClosure(g, par_scan_state) {}
- void do_oop(oop* p) { do_oop_work(p, false, false); }
- void do_oop_nv(oop* p) { do_oop_work(p, false, false); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p);
+ inline void do_oop_nv(narrowOop* p);
};
class ParRootScanWithBarrierTwoGensClosure: public ParScanClosure {
-public:
+ public:
ParRootScanWithBarrierTwoGensClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state) :
ParScanClosure(g, par_scan_state) {}
- void do_oop(oop* p) { do_oop_work(p, true, true); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
class ParRootScanWithoutBarrierClosure: public ParScanClosure {
-public:
+ public:
ParRootScanWithoutBarrierClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state) :
ParScanClosure(g, par_scan_state) {}
- void do_oop(oop* p) { do_oop_work(p, false, true); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
class ParScanWeakRefClosure: public ScanWeakRefClosure {
-protected:
+ protected:
ParScanThreadState* _par_scan_state;
-public:
+ template <class T> inline void do_oop_work(T* p);
+ public:
ParScanWeakRefClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state);
- void do_oop(oop* p);
- void do_oop_nv(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p);
+ inline void do_oop_nv(narrowOop* p);
};
class ParEvacuateFollowersClosure: public VoidClosure {
+ private:
ParScanThreadState* _par_scan_state;
ParScanThreadState* par_scan_state() { return _par_scan_state; }
@@ -121,8 +128,7 @@ class ParEvacuateFollowersClosure: public VoidClosure {
ParallelTaskTerminator* _terminator;
ParallelTaskTerminator* terminator() { return _terminator; }
-
-public:
+ public:
ParEvacuateFollowersClosure(
ParScanThreadState* par_scan_state_,
ParScanWithoutBarrierClosure* to_space_closure_,
@@ -132,5 +138,5 @@ public:
ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
ObjToScanQueueSet* task_queues_,
ParallelTaskTerminator* terminator_);
- void do_void();
+ virtual void do_void();
};
diff --git a/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp b/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp
index 3b38b3a2d..d84f12899 100644
--- a/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp
+++ b/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp
@@ -22,10 +22,9 @@
*
*/
-inline void ParScanWeakRefClosure::do_oop(oop* p)
-{
- oop obj = *p;
- assert (obj != NULL, "null weak reference?");
+template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
+ assert (!oopDesc::is_null(*p), "null weak reference?");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// weak references are sometimes scanned twice; must check
// that to-space doesn't already contain this object
if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
@@ -33,41 +32,43 @@ inline void ParScanWeakRefClosure::do_oop(oop* p)
// ParScanClosure::do_oop_work).
klassOop objK = obj->klass();
markOop m = obj->mark();
+ oop new_obj;
if (m->is_marked()) { // Contains forwarding pointer.
- *p = ParNewGeneration::real_forwardee(obj);
+ new_obj = ParNewGeneration::real_forwardee(obj);
} else {
size_t obj_sz = obj->size_given_klass(objK->klass_part());
- *p = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
- obj, obj_sz, m);
+ new_obj = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
+ obj, obj_sz, m);
}
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
}
}
-inline void ParScanWeakRefClosure::do_oop_nv(oop* p)
-{
- ParScanWeakRefClosure::do_oop(p);
-}
+inline void ParScanWeakRefClosure::do_oop_nv(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
+inline void ParScanWeakRefClosure::do_oop_nv(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
-inline void ParScanClosure::par_do_barrier(oop* p) {
+template <class T> inline void ParScanClosure::par_do_barrier(T* p) {
assert(generation()->is_in_reserved(p), "expected ref in generation");
- oop obj = *p;
- assert(obj != NULL, "expected non-null object");
+ assert(!oopDesc::is_null(*p), "expected non-null object");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// If p points to a younger generation, mark the card.
if ((HeapWord*)obj < gen_boundary()) {
rs()->write_ref_field_gc_par(p, obj);
}
}
-inline void ParScanClosure::do_oop_work(oop* p,
+template <class T>
+inline void ParScanClosure::do_oop_work(T* p,
bool gc_barrier,
bool root_scan) {
- oop obj = *p;
assert((!Universe::heap()->is_in_reserved(p) ||
generation()->is_in_reserved(p))
&& (generation()->level() == 0 || gc_barrier),
"The gen must be right, and we must be doing the barrier "
"in older generations.");
- if (obj != NULL) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if ((HeapWord*)obj < _boundary) {
assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
// OK, we need to ensure that it is copied.
@@ -78,11 +79,14 @@ inline void ParScanClosure::do_oop_work(oop* p,
// forwarded.
klassOop objK = obj->klass();
markOop m = obj->mark();
+ oop new_obj;
if (m->is_marked()) { // Contains forwarding pointer.
- *p = ParNewGeneration::real_forwardee(obj);
+ new_obj = ParNewGeneration::real_forwardee(obj);
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
} else {
size_t obj_sz = obj->size_given_klass(objK->klass_part());
- *p = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
+ new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
if (root_scan) {
// This may have pushed an object. If we have a root
// category with a lot of roots, can't let the queue get too
@@ -97,3 +101,9 @@ inline void ParScanClosure::do_oop_work(oop* p,
}
}
}
+
+inline void ParScanWithBarrierClosure::do_oop_nv(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
+inline void ParScanWithBarrierClosure::do_oop_nv(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
+
+inline void ParScanWithoutBarrierClosure::do_oop_nv(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
+inline void ParScanWithoutBarrierClosure::do_oop_nv(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
diff --git a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp
index 9857b4e6c..2b2c6f87c 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp
@@ -28,17 +28,16 @@
// Checks an individual oop for missing precise marks. Mark
// may be either dirty or newgen.
class CheckForUnmarkedOops : public OopClosure {
- PSYoungGen* _young_gen;
+ private:
+ PSYoungGen* _young_gen;
CardTableExtension* _card_table;
- HeapWord* _unmarked_addr;
- jbyte* _unmarked_card;
+ HeapWord* _unmarked_addr;
+ jbyte* _unmarked_card;
- public:
- CheckForUnmarkedOops( PSYoungGen* young_gen, CardTableExtension* card_table ) :
- _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
-
- virtual void do_oop(oop* p) {
- if (_young_gen->is_in_reserved(*p) &&
+ protected:
+ template <class T> void do_oop_work(T* p) {
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ if (_young_gen->is_in_reserved(obj) &&
!_card_table->addr_is_marked_imprecise(p)) {
// Don't overwrite the first missing card mark
if (_unmarked_addr == NULL) {
@@ -48,6 +47,13 @@ class CheckForUnmarkedOops : public OopClosure {
}
}
+ public:
+ CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
+ _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
+
+ virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
+
bool has_unmarked_oop() {
return _unmarked_addr != NULL;
}
@@ -56,7 +62,8 @@ class CheckForUnmarkedOops : public OopClosure {
// Checks all objects for the existance of some type of mark,
// precise or imprecise, dirty or newgen.
class CheckForUnmarkedObjects : public ObjectClosure {
- PSYoungGen* _young_gen;
+ private:
+ PSYoungGen* _young_gen;
CardTableExtension* _card_table;
public:
@@ -75,7 +82,7 @@ class CheckForUnmarkedObjects : public ObjectClosure {
// we test for missing precise marks first. If any are found, we don't
// fail unless the object head is also unmarked.
virtual void do_object(oop obj) {
- CheckForUnmarkedOops object_check( _young_gen, _card_table );
+ CheckForUnmarkedOops object_check(_young_gen, _card_table);
obj->oop_iterate(&object_check);
if (object_check.has_unmarked_oop()) {
assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
@@ -85,19 +92,25 @@ class CheckForUnmarkedObjects : public ObjectClosure {
// Checks for precise marking of oops as newgen.
class CheckForPreciseMarks : public OopClosure {
- PSYoungGen* _young_gen;
+ private:
+ PSYoungGen* _young_gen;
CardTableExtension* _card_table;
- public:
- CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
- _young_gen(young_gen), _card_table(card_table) { }
-
- virtual void do_oop(oop* p) {
- if (_young_gen->is_in_reserved(*p)) {
+ protected:
+ template <class T> void do_oop_work(T* p) {
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ if (_young_gen->is_in_reserved(obj)) {
assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
_card_table->set_card_newgen(p);
}
}
+
+ public:
+ CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
+ _young_gen(young_gen), _card_table(card_table) { }
+
+ virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
};
// We get passed the space_top value to prevent us from traversing into
diff --git a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp
index 39c03a941..8722e0f3b 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp
@@ -80,7 +80,7 @@ class CardTableExtension : public CardTableModRefBS {
static bool card_is_verify(int value) { return value == verify_card; }
// Card marking
- void inline_write_ref_field_gc(oop* field, oop new_val) {
+ void inline_write_ref_field_gc(void* field, oop new_val) {
jbyte* byte = byte_for(field);
*byte = youngergen_card;
}
diff --git a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
index a4f8878b2..994e627ed 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
@@ -146,7 +146,7 @@ void RefProcTaskExecutor::execute(ProcessTask& task)
{
ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
uint parallel_gc_threads = heap->gc_task_manager()->workers();
- TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
+ ChunkTaskQueueSet* qset = ParCompactionManager::chunk_array();
ParallelTaskTerminator terminator(parallel_gc_threads, qset);
GCTaskQueue* q = GCTaskQueue::create();
for(uint i=0; i<parallel_gc_threads; i++) {
diff --git a/src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp b/src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp
index 5bf0b3980..53775a79d 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp
@@ -33,8 +33,8 @@ const int PREFETCH_QUEUE_SIZE = 8;
class PrefetchQueue : public CHeapObj {
private:
- oop* _prefetch_queue[PREFETCH_QUEUE_SIZE];
- unsigned int _prefetch_index;
+ void* _prefetch_queue[PREFETCH_QUEUE_SIZE];
+ uint _prefetch_index;
public:
int length() { return PREFETCH_QUEUE_SIZE; }
@@ -46,20 +46,21 @@ class PrefetchQueue : public CHeapObj {
_prefetch_index = 0;
}
- inline oop* push_and_pop(oop* p) {
- Prefetch::write((*p)->mark_addr(), 0);
+ template <class T> inline void* push_and_pop(T* p) {
+ oop o = oopDesc::load_decode_heap_oop_not_null(p);
+ Prefetch::write(o->mark_addr(), 0);
// This prefetch is intended to make sure the size field of array
// oops is in cache. It assumes the the object layout is
// mark -> klass -> size, and that mark and klass are heapword
// sized. If this should change, this prefetch will need updating!
- Prefetch::write((*p)->mark_addr() + (HeapWordSize*2), 0);
+ Prefetch::write(o->mark_addr() + (HeapWordSize*2), 0);
_prefetch_queue[_prefetch_index++] = p;
_prefetch_index &= (PREFETCH_QUEUE_SIZE-1);
return _prefetch_queue[_prefetch_index];
}
// Stores a NULL pointer in the pop'd location.
- inline oop* pop() {
+ inline void* pop() {
_prefetch_queue[_prefetch_index++] = NULL;
_prefetch_index &= (PREFETCH_QUEUE_SIZE-1);
return _prefetch_queue[_prefetch_index];
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
index 3afd47d05..cec3a48db 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
@@ -168,7 +168,7 @@ void PSMarkSweepDecorator::precompact() {
start_array->allocate_block(compact_top);
}
- debug_only(MarkSweep::register_live_oop(oop(q), size));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size));
compact_top += size;
assert(compact_top <= dest->space()->end(),
"Exceeding space in destination");
@@ -234,7 +234,7 @@ void PSMarkSweepDecorator::precompact() {
start_array->allocate_block(compact_top);
}
- debug_only(MarkSweep::register_live_oop(oop(q), sz));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz));
compact_top += sz;
assert(compact_top <= dest->space()->end(),
"Exceeding space in destination");
@@ -326,15 +326,11 @@ void PSMarkSweepDecorator::adjust_pointers() {
HeapWord* end = _first_dead;
while (q < end) {
- debug_only(MarkSweep::track_interior_pointers(oop(q)));
-
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
// point all the oops to the new location
size_t size = oop(q)->adjust_pointers();
-
- debug_only(MarkSweep::check_interior_pointers());
-
- debug_only(MarkSweep::validate_live_oop(oop(q), size));
-
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
q += size;
}
@@ -354,11 +350,11 @@ void PSMarkSweepDecorator::adjust_pointers() {
Prefetch::write(q, interval);
if (oop(q)->is_gc_marked()) {
// q is alive
- debug_only(MarkSweep::track_interior_pointers(oop(q)));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
// point all the oops to the new location
size_t size = oop(q)->adjust_pointers();
- debug_only(MarkSweep::check_interior_pointers());
- debug_only(MarkSweep::validate_live_oop(oop(q), size));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
debug_only(prev_q = q);
q += size;
} else {
@@ -392,7 +388,7 @@ void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
while (q < end) {
size_t size = oop(q)->size();
assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
- debug_only(MarkSweep::live_oop_moved_to(q, size, q));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q));
debug_only(prev_q = q);
q += size;
}
@@ -427,7 +423,7 @@ void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
Prefetch::write(compaction_top, copy_interval);
// copy object and reinit its mark
- debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, compaction_top));
assert(q != compaction_top, "everything in this pass should be moving");
Copy::aligned_conjoint_words(q, compaction_top, size);
oop(compaction_top)->init_mark();
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
index d7277b145..c0a2afdfa 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
@@ -81,14 +81,14 @@ bool PSParallelCompact::_dwl_initialized = false;
#endif // #ifdef ASSERT
#ifdef VALIDATE_MARK_SWEEP
-GrowableArray<oop*>* PSParallelCompact::_root_refs_stack = NULL;
+GrowableArray<void*>* PSParallelCompact::_root_refs_stack = NULL;
GrowableArray<oop> * PSParallelCompact::_live_oops = NULL;
GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL;
GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL;
size_t PSParallelCompact::_live_oops_index = 0;
size_t PSParallelCompact::_live_oops_index_at_perm = 0;
-GrowableArray<oop*>* PSParallelCompact::_other_refs_stack = NULL;
-GrowableArray<oop*>* PSParallelCompact::_adjusted_pointers = NULL;
+GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL;
+GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL;
bool PSParallelCompact::_pointer_tracking = false;
bool PSParallelCompact::_root_tracking = true;
@@ -811,46 +811,23 @@ ParMarkBitMap PSParallelCompact::_mark_bitmap;
ParallelCompactData PSParallelCompact::_summary_data;
PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
+
+void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
+bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
+
+void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
+void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
+
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
-void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) {
-#ifdef VALIDATE_MARK_SWEEP
- if (ValidateMarkSweep) {
- if (!Universe::heap()->is_in_reserved(p)) {
- _root_refs_stack->push(p);
- } else {
- _other_refs_stack->push(p);
- }
- }
-#endif
- mark_and_push(_compaction_manager, p);
-}
+void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
+void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
-void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
- oop* p) {
- assert(Universe::heap()->is_in_reserved(p),
- "we should only be traversing objects here");
- oop m = *p;
- if (m != NULL && mark_bitmap()->is_unmarked(m)) {
- if (mark_obj(m)) {
- m->follow_contents(cm); // Follow contents of the marked object
- }
- }
-}
+void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); }
-// Anything associated with this variable is temporary.
-
-void PSParallelCompact::mark_and_push_internal(ParCompactionManager* cm,
- oop* p) {
- // Push marked object, contents will be followed later
- oop m = *p;
- if (mark_obj(m)) {
- // This thread marked the object and
- // owns the subsequent processing of it.
- cm->save_for_scanning(m);
- }
-}
+void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
+void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
void PSParallelCompact::post_initialize() {
ParallelScavengeHeap* heap = gc_heap();
@@ -2751,23 +2728,6 @@ void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
young_gen->move_and_update(cm);
}
-void PSParallelCompact::follow_root(ParCompactionManager* cm, oop* p) {
- assert(!Universe::heap()->is_in_reserved(p),
- "roots shouldn't be things within the heap");
-#ifdef VALIDATE_MARK_SWEEP
- if (ValidateMarkSweep) {
- guarantee(!_root_refs_stack->contains(p), "should only be in here once");
- _root_refs_stack->push(p);
- }
-#endif
- oop m = *p;
- if (m != NULL && mark_bitmap()->is_unmarked(m)) {
- if (mark_obj(m)) {
- m->follow_contents(cm); // Follow contents of the marked object
- }
- }
- follow_stack(cm);
-}
void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
while(!cm->overflow_stack()->is_empty()) {
@@ -2807,7 +2767,7 @@ PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
#ifdef VALIDATE_MARK_SWEEP
-void PSParallelCompact::track_adjusted_pointer(oop* p, oop newobj, bool isroot) {
+void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
if (!ValidateMarkSweep)
return;
@@ -2821,7 +2781,7 @@ void PSParallelCompact::track_adjusted_pointer(oop* p, oop newobj, bool isroot)
if (index != -1) {
int l = _root_refs_stack->length();
if (l > 0 && l - 1 != index) {
- oop* last = _root_refs_stack->pop();
+ void* last = _root_refs_stack->pop();
assert(last != p, "should be different");
_root_refs_stack->at_put(index, last);
} else {
@@ -2832,7 +2792,7 @@ void PSParallelCompact::track_adjusted_pointer(oop* p, oop newobj, bool isroot)
}
-void PSParallelCompact::check_adjust_pointer(oop* p) {
+void PSParallelCompact::check_adjust_pointer(void* p) {
_adjusted_pointers->push(p);
}
@@ -2840,7 +2800,8 @@ void PSParallelCompact::check_adjust_pointer(oop* p) {
class AdjusterTracker: public OopClosure {
public:
AdjusterTracker() {};
- void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
+ void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
+ void do_oop(narrowOop* o) { PSParallelCompact::check_adjust_pointer(o); }
};
@@ -2948,25 +2909,6 @@ void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) {
}
#endif //VALIDATE_MARK_SWEEP
-void PSParallelCompact::adjust_pointer(oop* p, bool isroot) {
- oop obj = *p;
- VALIDATE_MARK_SWEEP_ONLY(oop saved_new_pointer = NULL);
- if (obj != NULL) {
- oop new_pointer = (oop) summary_data().calc_new_pointer(obj);
- assert(new_pointer != NULL || // is forwarding ptr?
- obj->is_shared(), // never forwarded?
- "should have a new location");
- // Just always do the update unconditionally?
- if (new_pointer != NULL) {
- *p = new_pointer;
- assert(Universe::heap()->is_in_reserved(new_pointer),
- "should be in object space");
- VALIDATE_MARK_SWEEP_ONLY(saved_new_pointer = new_pointer);
- }
- }
- VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, saved_new_pointer, isroot));
-}
-
// Update interior oops in the ranges of chunks [beg_chunk, end_chunk).
void
PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
index f38ff2b98..956682116 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
@@ -80,11 +80,11 @@ public:
static const size_t ChunkSize;
static const size_t ChunkSizeBytes;
- // Mask for the bits in a size_t to get an offset within a chunk.
+ // Mask for the bits in a size_t to get an offset within a chunk.
static const size_t ChunkSizeOffsetMask;
- // Mask for the bits in a pointer to get an offset within a chunk.
+ // Mask for the bits in a pointer to get an offset within a chunk.
static const size_t ChunkAddrOffsetMask;
- // Mask for the bits in a pointer to get the address of the start of a chunk.
+ // Mask for the bits in a pointer to get the address of the start of a chunk.
static const size_t ChunkAddrMask;
static const size_t Log2BlockSize;
@@ -229,7 +229,7 @@ public:
// 1 bit marks the end of an object.
class BlockData
{
- public:
+ public:
typedef short int blk_ofs_t;
blk_ofs_t offset() const { return _offset >= 0 ? _offset : -_offset; }
@@ -269,7 +269,7 @@ public:
return !_first_is_start_bit;
}
- private:
+ private:
blk_ofs_t _offset;
// This is temporary until the mark_bitmap is separated into
// a start bit array and an end bit array.
@@ -277,7 +277,7 @@ public:
#ifdef ASSERT
short _set_phase;
static short _cur_phase;
- public:
+ public:
static void set_cur_phase(short v) { _cur_phase = v; }
#endif
};
@@ -729,48 +729,51 @@ class PSParallelCompact : AllStatic {
} SpaceId;
public:
- // In line closure decls
+ // Inline closure decls
//
-
class IsAliveClosure: public BoolObjectClosure {
public:
- void do_object(oop p) { assert(false, "don't call"); }
- bool do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
+ virtual void do_object(oop p);
+ virtual bool do_object_b(oop p);
};
class KeepAliveClosure: public OopClosure {
+ private:
ParCompactionManager* _compaction_manager;
+ protected:
+ template <class T> inline void do_oop_work(T* p);
public:
- KeepAliveClosure(ParCompactionManager* cm) {
- _compaction_manager = cm;
- }
- void do_oop(oop* p);
+ KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
- class FollowRootClosure: public OopsInGenClosure{
+ // Current unused
+ class FollowRootClosure: public OopsInGenClosure {
+ private:
ParCompactionManager* _compaction_manager;
public:
- FollowRootClosure(ParCompactionManager* cm) {
- _compaction_manager = cm;
- }
- void do_oop(oop* p) { follow_root(_compaction_manager, p); }
+ FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
virtual const bool do_nmethods() const { return true; }
};
class FollowStackClosure: public VoidClosure {
+ private:
ParCompactionManager* _compaction_manager;
public:
- FollowStackClosure(ParCompactionManager* cm) {
- _compaction_manager = cm;
- }
- void do_void() { follow_stack(_compaction_manager); }
+ FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
+ virtual void do_void();
};
class AdjustPointerClosure: public OopsInGenClosure {
+ private:
bool _is_root;
public:
- AdjustPointerClosure(bool is_root) : _is_root(is_root) {}
- void do_oop(oop* p) { adjust_pointer(p, _is_root); }
+ AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
// Closure for verifying update of pointers. Does not
@@ -805,8 +808,6 @@ class PSParallelCompact : AllStatic {
friend class instanceKlassKlass;
friend class RefProcTaskProxy;
- static void mark_and_push_internal(ParCompactionManager* cm, oop* p);
-
private:
static elapsedTimer _accumulated_time;
static unsigned int _total_invocations;
@@ -838,9 +839,9 @@ class PSParallelCompact : AllStatic {
private:
// Closure accessors
- static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
+ static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; }
- static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
+ static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
static void initialize_space_info();
@@ -859,10 +860,11 @@ class PSParallelCompact : AllStatic {
static void follow_stack(ParCompactionManager* cm);
static void follow_weak_klass_links(ParCompactionManager* cm);
- static void adjust_pointer(oop* p, bool is_root);
+ template <class T> static inline void adjust_pointer(T* p, bool is_root);
static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
- static void follow_root(ParCompactionManager* cm, oop* p);
+ template <class T>
+ static inline void follow_root(ParCompactionManager* cm, T* p);
// Compute the dense prefix for the designated space. This is an experimental
// implementation currently not used in production.
@@ -971,14 +973,14 @@ class PSParallelCompact : AllStatic {
protected:
#ifdef VALIDATE_MARK_SWEEP
- static GrowableArray<oop*>* _root_refs_stack;
+ static GrowableArray<void*>* _root_refs_stack;
static GrowableArray<oop> * _live_oops;
static GrowableArray<oop> * _live_oops_moved_to;
static GrowableArray<size_t>* _live_oops_size;
static size_t _live_oops_index;
static size_t _live_oops_index_at_perm;
- static GrowableArray<oop*>* _other_refs_stack;
- static GrowableArray<oop*>* _adjusted_pointers;
+ static GrowableArray<void*>* _other_refs_stack;
+ static GrowableArray<void*>* _adjusted_pointers;
static bool _pointer_tracking;
static bool _root_tracking;
@@ -999,12 +1001,12 @@ class PSParallelCompact : AllStatic {
public:
class MarkAndPushClosure: public OopClosure {
+ private:
ParCompactionManager* _compaction_manager;
public:
- MarkAndPushClosure(ParCompactionManager* cm) {
- _compaction_manager = cm;
- }
- void do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
+ MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
virtual const bool do_nmethods() const { return true; }
};
@@ -1038,21 +1040,9 @@ class PSParallelCompact : AllStatic {
// Marking support
static inline bool mark_obj(oop obj);
- static bool mark_obj(oop* p) {
- if (*p != NULL) {
- return mark_obj(*p);
- } else {
- return false;
- }
- }
- static void mark_and_push(ParCompactionManager* cm, oop* p) {
- // Check mark and maybe push on
- // marking stack
- oop m = *p;
- if (m != NULL && mark_bitmap()->is_unmarked(m)) {
- mark_and_push_internal(cm, p);
- }
- }
+ // Check mark and maybe push on marking stack
+ template <class T> static inline void mark_and_push(ParCompactionManager* cm,
+ T* p);
// Compaction support.
// Return true if p is in the range [beg_addr, end_addr).
@@ -1127,13 +1117,17 @@ class PSParallelCompact : AllStatic {
static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
// Mark pointer and follow contents.
- static void mark_and_follow(ParCompactionManager* cm, oop* p);
+ template <class T>
+ static inline void mark_and_follow(ParCompactionManager* cm, T* p);
static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
static ParallelCompactData& summary_data() { return _summary_data; }
- static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
- static inline void adjust_pointer(oop* p,
+ static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
+ static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
+
+ template <class T>
+ static inline void adjust_pointer(T* p,
HeapWord* beg_addr,
HeapWord* end_addr);
@@ -1147,8 +1141,8 @@ class PSParallelCompact : AllStatic {
static jlong millis_since_last_gc();
#ifdef VALIDATE_MARK_SWEEP
- static void track_adjusted_pointer(oop* p, oop newobj, bool isroot);
- static void check_adjust_pointer(oop* p); // Adjust this pointer
+ static void track_adjusted_pointer(void* p, bool isroot);
+ static void check_adjust_pointer(void* p);
static void track_interior_pointers(oop obj);
static void check_interior_pointers();
@@ -1185,7 +1179,7 @@ class PSParallelCompact : AllStatic {
#endif // #ifdef ASSERT
};
-bool PSParallelCompact::mark_obj(oop obj) {
+inline bool PSParallelCompact::mark_obj(oop obj) {
const int obj_size = obj->size();
if (mark_bitmap()->mark_obj(obj, obj_size)) {
_summary_data.add_obj(obj, obj_size);
@@ -1195,13 +1189,94 @@ bool PSParallelCompact::mark_obj(oop obj) {
}
}
-inline bool PSParallelCompact::print_phases()
-{
+template <class T>
+inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
+ assert(!Universe::heap()->is_in_reserved(p),
+ "roots shouldn't be things within the heap");
+#ifdef VALIDATE_MARK_SWEEP
+ if (ValidateMarkSweep) {
+ guarantee(!_root_refs_stack->contains(p), "should only be in here once");
+ _root_refs_stack->push(p);
+ }
+#endif
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (mark_bitmap()->is_unmarked(obj)) {
+ if (mark_obj(obj)) {
+ obj->follow_contents(cm);
+ }
+ }
+ }
+ follow_stack(cm);
+}
+
+template <class T>
+inline void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
+ T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (mark_bitmap()->is_unmarked(obj)) {
+ if (mark_obj(obj)) {
+ obj->follow_contents(cm);
+ }
+ }
+ }
+}
+
+template <class T>
+inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (mark_bitmap()->is_unmarked(obj)) {
+ if (mark_obj(obj)) {
+ // This thread marked the object and owns the subsequent processing of it.
+ cm->save_for_scanning(obj);
+ }
+ }
+ }
+}
+
+template <class T>
+inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ oop new_obj = (oop)summary_data().calc_new_pointer(obj);
+ assert(new_obj != NULL || // is forwarding ptr?
+ obj->is_shared(), // never forwarded?
+ "should be forwarded");
+ // Just always do the update unconditionally?
+ if (new_obj != NULL) {
+ assert(Universe::heap()->is_in_reserved(new_obj),
+ "should be in object space");
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+ }
+ }
+ VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot));
+}
+
+template <class T>
+inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
+#ifdef VALIDATE_MARK_SWEEP
+ if (ValidateMarkSweep) {
+ if (!Universe::heap()->is_in_reserved(p)) {
+ _root_refs_stack->push(p);
+ } else {
+ _other_refs_stack->push(p);
+ }
+ }
+#endif
+ mark_and_push(_compaction_manager, p);
+}
+
+inline bool PSParallelCompact::print_phases() {
return _print_phases;
}
-inline double PSParallelCompact::normal_distribution(double density)
-{
+inline double PSParallelCompact::normal_distribution(double density) {
assert(_dwl_initialized, "uninitialized");
const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
@@ -1257,10 +1332,11 @@ inline bool PSParallelCompact::should_update_klass(klassOop k) {
return ((HeapWord*) k) >= dense_prefix(perm_space_id);
}
-inline void PSParallelCompact::adjust_pointer(oop* p,
+template <class T>
+inline void PSParallelCompact::adjust_pointer(T* p,
HeapWord* beg_addr,
HeapWord* end_addr) {
- if (is_in(p, beg_addr, end_addr)) {
+ if (is_in((HeapWord*)p, beg_addr, end_addr)) {
adjust_pointer(p);
}
}
@@ -1332,18 +1408,18 @@ class UpdateOnlyClosure: public ParMarkBitMapClosure {
inline void do_addr(HeapWord* addr);
};
-inline void UpdateOnlyClosure::do_addr(HeapWord* addr) {
+inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
+{
_start_array->allocate_block(addr);
oop(addr)->update_contents(compaction_manager());
}
class FillClosure: public ParMarkBitMapClosure {
-public:
- FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id):
+ public:
+ FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
_space_id(space_id),
- _start_array(PSParallelCompact::start_array(space_id))
- {
+ _start_array(PSParallelCompact::start_array(space_id)) {
assert(_space_id == PSParallelCompact::perm_space_id ||
_space_id == PSParallelCompact::old_space_id,
"cannot use FillClosure in the young gen");
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp
index 46b39edeb..8a1893f21 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp
@@ -25,7 +25,7 @@
#include "incls/_precompiled.incl"
#include "incls/_psPromotionLAB.cpp.incl"
-const size_t PSPromotionLAB::filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));
+size_t PSPromotionLAB::filler_header_size;
// This is the shared initialization code. It sets up the basic pointers,
// and allows enough extra space for a filler object. We call a virtual
@@ -41,6 +41,10 @@ void PSPromotionLAB::initialize(MemRegion lab) {
set_end(end);
set_top(bottom);
+ // Initialize after VM starts up because header_size depends on compressed
+ // oops.
+ filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));
+
// We can be initialized to a zero size!
if (free() > 0) {
if (ZapUnusedHeapArea) {
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp
index fea560553..ee8c2d783 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp
@@ -32,7 +32,7 @@ class ObjectStartArray;
class PSPromotionLAB : public CHeapObj {
protected:
- static const size_t filler_header_size;
+ static size_t filler_header_size;
enum LabState {
needs_flush,
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
index ea31f391a..92a5002d8 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
@@ -182,7 +182,7 @@ PSPromotionManager::PSPromotionManager() {
claimed_stack_depth()->initialize();
queue_size = claimed_stack_depth()->max_elems();
// We want the overflow stack to be permanent
- _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<oop*>(10, true);
+ _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<StarTask>(10, true);
_overflow_stack_breadth = NULL;
} else {
claimed_stack_breadth()->initialize();
@@ -240,6 +240,7 @@ void PSPromotionManager::reset() {
#endif // PS_PM_STATS
}
+
void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
assert(depth_first(), "invariant");
assert(overflow_stack_depth() != NULL, "invariant");
@@ -254,13 +255,15 @@ void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
#endif /* ASSERT */
do {
- oop* p;
+ StarTask p;
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
while(!overflow_stack_depth()->is_empty()) {
- p = overflow_stack_depth()->pop();
- process_popped_location_depth(p);
+ // linux compiler wants different overloaded operator= in taskqueue to
+ // assign to p that the other compilers don't like.
+ StarTask ptr = overflow_stack_depth()->pop();
+ process_popped_location_depth(ptr);
}
if (totally_drain) {
@@ -365,7 +368,7 @@ void PSPromotionManager::flush_labs() {
//
oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
- assert(PSScavenge::should_scavenge(o), "Sanity");
+ assert(PSScavenge::should_scavenge(&o), "Sanity");
oop new_obj = NULL;
@@ -530,16 +533,30 @@ oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
// This code must come after the CAS test, or it will print incorrect
// information.
if (TraceScavenge) {
- gclog_or_tty->print_cr("{%s %s 0x%x -> 0x%x (%d)}",
- PSScavenge::should_scavenge(new_obj) ? "copying" : "tenuring",
+ gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
+ PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
-
}
#endif
return new_obj;
}
+template <class T> void PSPromotionManager::process_array_chunk_work(
+ oop obj,
+ int start, int end) {
+ assert(start < end, "invariant");
+ T* const base = (T*)objArrayOop(obj)->base();
+ T* p = base + start;
+ T* const chunk_end = base + end;
+ while (p < chunk_end) {
+ if (PSScavenge::should_scavenge(p)) {
+ claim_or_forward_depth(p);
+ }
+ ++p;
+ }
+}
+
void PSPromotionManager::process_array_chunk(oop old) {
assert(PSChunkLargeArrays, "invariant");
assert(old->is_objArray(), "invariant");
@@ -569,15 +586,10 @@ void PSPromotionManager::process_array_chunk(oop old) {
arrayOop(old)->set_length(actual_length);
}
- assert(start < end, "invariant");
- oop* const base = objArrayOop(obj)->base();
- oop* p = base + start;
- oop* const chunk_end = base + end;
- while (p < chunk_end) {
- if (PSScavenge::should_scavenge(*p)) {
- claim_or_forward_depth(p);
- }
- ++p;
+ if (UseCompressedOops) {
+ process_array_chunk_work<narrowOop>(obj, start, end);
+ } else {
+ process_array_chunk_work<oop>(obj, start, end);
}
}
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
index c40b01666..b18674ea8 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
@@ -42,8 +42,6 @@ class MutableSpace;
class PSOldGen;
class ParCompactionManager;
-#define PS_CHUNKED_ARRAY_OOP_MASK 1
-
#define PS_PM_STATS 0
class PSPromotionManager : public CHeapObj {
@@ -80,7 +78,7 @@ class PSPromotionManager : public CHeapObj {
PrefetchQueue _prefetch_queue;
OopStarTaskQueue _claimed_stack_depth;
- GrowableArray<oop*>* _overflow_stack_depth;
+ GrowableArray<StarTask>* _overflow_stack_depth;
OopTaskQueue _claimed_stack_breadth;
GrowableArray<oop>* _overflow_stack_breadth;
@@ -92,13 +90,15 @@ class PSPromotionManager : public CHeapObj {
uint _min_array_size_for_chunking;
// Accessors
- static PSOldGen* old_gen() { return _old_gen; }
- static MutableSpace* young_space() { return _young_space; }
+ static PSOldGen* old_gen() { return _old_gen; }
+ static MutableSpace* young_space() { return _young_space; }
inline static PSPromotionManager* manager_array(int index);
+ template <class T> inline void claim_or_forward_internal_depth(T* p);
+ template <class T> inline void claim_or_forward_internal_breadth(T* p);
- GrowableArray<oop*>* overflow_stack_depth() { return _overflow_stack_depth; }
- GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; }
+ GrowableArray<StarTask>* overflow_stack_depth() { return _overflow_stack_depth; }
+ GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; }
// On the task queues we push reference locations as well as
// partially-scanned arrays (in the latter case, we push an oop to
@@ -116,27 +116,37 @@ class PSPromotionManager : public CHeapObj {
// (oop). We do all the necessary casting in the mask / unmask
// methods to avoid sprinkling the rest of the code with more casts.
- bool is_oop_masked(oop* p) {
- return ((intptr_t) p & PS_CHUNKED_ARRAY_OOP_MASK) == PS_CHUNKED_ARRAY_OOP_MASK;
+ // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any
+ // future masks) can't conflict with COMPRESSED_OOP_MASK
+#define PS_CHUNKED_ARRAY_OOP_MASK 0x2
+
+ bool is_oop_masked(StarTask p) {
+ // If something is marked chunked it's always treated like wide oop*
+ return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) ==
+ PS_CHUNKED_ARRAY_OOP_MASK;
}
oop* mask_chunked_array_oop(oop obj) {
assert(!is_oop_masked((oop*) obj), "invariant");
- oop* ret = (oop*) ((intptr_t) obj | PS_CHUNKED_ARRAY_OOP_MASK);
+ oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK);
assert(is_oop_masked(ret), "invariant");
return ret;
}
- oop unmask_chunked_array_oop(oop* p) {
+ oop unmask_chunked_array_oop(StarTask p) {
assert(is_oop_masked(p), "invariant");
- oop ret = oop((intptr_t) p & ~PS_CHUNKED_ARRAY_OOP_MASK);
+ assert(!p.is_narrow(), "chunked array oops cannot be narrow");
+ oop *chunk = (oop*)p; // cast p to oop (uses conversion operator)
+ oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
assert(!is_oop_masked((oop*) ret), "invariant");
return ret;
}
+ template <class T> void process_array_chunk_work(oop obj,
+ int start, int end);
void process_array_chunk(oop old);
- void push_depth(oop* p) {
+ template <class T> void push_depth(T* p) {
assert(depth_first(), "pre-condition");
#if PS_PM_STATS
@@ -175,7 +185,7 @@ class PSPromotionManager : public CHeapObj {
}
protected:
- static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
+ static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
static OopTaskQueueSet* stack_array_breadth() { return _stack_array_breadth; }
public:
@@ -227,6 +237,7 @@ class PSPromotionManager : public CHeapObj {
drain_stacks_breadth(totally_drain);
}
}
+ public:
void drain_stacks_cond_depth() {
if (claimed_stack_depth()->size() > _target_stack_size) {
drain_stacks_depth(false);
@@ -256,15 +267,11 @@ class PSPromotionManager : public CHeapObj {
return _depth_first;
}
- inline void process_popped_location_depth(oop* p);
+ inline void process_popped_location_depth(StarTask p);
inline void flush_prefetch_queue();
-
- inline void claim_or_forward_depth(oop* p);
- inline void claim_or_forward_internal_depth(oop* p);
-
- inline void claim_or_forward_breadth(oop* p);
- inline void claim_or_forward_internal_breadth(oop* p);
+ template <class T> inline void claim_or_forward_depth(T* p);
+ template <class T> inline void claim_or_forward_breadth(T* p);
#if PS_PM_STATS
void increment_steals(oop* p = NULL) {
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
index e900e0601..73cc15f32 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
@@ -28,64 +28,68 @@ inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
return _manager_array[index];
}
-inline void PSPromotionManager::claim_or_forward_internal_depth(oop* p) {
- if (p != NULL) {
- oop o = *p;
+template <class T>
+inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
+ if (p != NULL) { // XXX: error if p != NULL here
+ oop o = oopDesc::load_decode_heap_oop_not_null(p);
if (o->is_forwarded()) {
o = o->forwardee();
-
// Card mark
if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
}
- *p = o;
+ oopDesc::encode_store_heap_oop_not_null(p, o);
} else {
push_depth(p);
}
}
}
-inline void PSPromotionManager::claim_or_forward_internal_breadth(oop* p) {
- if (p != NULL) {
- oop o = *p;
+template <class T>
+inline void PSPromotionManager::claim_or_forward_internal_breadth(T* p) {
+ if (p != NULL) { // XXX: error if p != NULL here
+ oop o = oopDesc::load_decode_heap_oop_not_null(p);
if (o->is_forwarded()) {
o = o->forwardee();
} else {
o = copy_to_survivor_space(o, false);
}
-
// Card mark
if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
}
- *p = o;
+ oopDesc::encode_store_heap_oop_not_null(p, o);
}
}
inline void PSPromotionManager::flush_prefetch_queue() {
assert(!depth_first(), "invariant");
- for (int i=0; i<_prefetch_queue.length(); i++) {
- claim_or_forward_internal_breadth(_prefetch_queue.pop());
+ for (int i = 0; i < _prefetch_queue.length(); i++) {
+ claim_or_forward_internal_breadth((oop*)_prefetch_queue.pop());
}
}
-inline void PSPromotionManager::claim_or_forward_depth(oop* p) {
+template <class T>
+inline void PSPromotionManager::claim_or_forward_depth(T* p) {
assert(depth_first(), "invariant");
- assert(PSScavenge::should_scavenge(*p, true), "revisiting object?");
- assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
+ assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
+ "Sanity");
assert(Universe::heap()->is_in(p), "pointer outside heap");
claim_or_forward_internal_depth(p);
}
-inline void PSPromotionManager::claim_or_forward_breadth(oop* p) {
+template <class T>
+inline void PSPromotionManager::claim_or_forward_breadth(T* p) {
assert(!depth_first(), "invariant");
- assert(PSScavenge::should_scavenge(*p, true), "revisiting object?");
- assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
+ assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
+ "Sanity");
assert(Universe::heap()->is_in(p), "pointer outside heap");
if (UsePrefetchQueue) {
- claim_or_forward_internal_breadth(_prefetch_queue.push_and_pop(p));
+ claim_or_forward_internal_breadth((T*)_prefetch_queue.push_and_pop(p));
} else {
// This option is used for testing. The use of the prefetch
// queue can delay the processing of the objects and thus
@@ -106,12 +110,16 @@ inline void PSPromotionManager::claim_or_forward_breadth(oop* p) {
}
}
-inline void PSPromotionManager::process_popped_location_depth(oop* p) {
+inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
if (is_oop_masked(p)) {
assert(PSChunkLargeArrays, "invariant");
oop const old = unmask_chunked_array_oop(p);
process_array_chunk(old);
} else {
- PSScavenge::copy_and_push_safe_barrier(this, p);
+ if (p.is_narrow()) {
+ PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p);
+ } else {
+ PSScavenge::copy_and_push_safe_barrier(this, (oop*)p);
+ }
}
}
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
index 426337ddc..5f960dc9e 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
@@ -65,16 +65,18 @@ public:
assert(_promotion_manager != NULL, "Sanity");
}
- void do_oop(oop* p) {
- assert (*p != NULL, "expected non-null ref");
- assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
+ template <class T> void do_oop_work(T* p) {
+ assert (!oopDesc::is_null(*p), "expected non-null ref");
+ assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
+ "expected an oop while scanning weak refs");
- oop obj = oop(*p);
// Weak refs may be visited more than once.
- if (PSScavenge::should_scavenge(obj, _to_space)) {
+ if (PSScavenge::should_scavenge(p, _to_space)) {
PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
}
}
+ virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
};
class PSEvacuateFollowersClosure: public VoidClosure {
@@ -83,7 +85,7 @@ class PSEvacuateFollowersClosure: public VoidClosure {
public:
PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
- void do_void() {
+ virtual void do_void() {
assert(_promotion_manager != NULL, "Sanity");
_promotion_manager->drain_stacks(true);
guarantee(_promotion_manager->stacks_empty(),
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp
index ff20b5bf6..1f8ad9407 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp
@@ -116,16 +116,16 @@ class PSScavenge: AllStatic {
// If an attempt to promote fails, this method is invoked
static void oop_promotion_failed(oop obj, markOop obj_mark);
- static inline bool should_scavenge(oop p);
+ template <class T> static inline bool should_scavenge(T* p);
// These call should_scavenge() above and, if it returns true, also check that
// the object was not newly copied into to_space. The version with the bool
// argument is a convenience wrapper that fetches the to_space pointer from
// the heap and calls the other version (if the arg is true).
- static inline bool should_scavenge(oop p, MutableSpace* to_space);
- static inline bool should_scavenge(oop p, bool check_to_space);
+ template <class T> static inline bool should_scavenge(T* p, MutableSpace* to_space);
+ template <class T> static inline bool should_scavenge(T* p, bool check_to_space);
- inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, oop* p);
+ template <class T> inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p);
// Is an object in the young generation
// This assumes that the HeapWord argument is in the heap,
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp
index ea61dc8f5..08b576c77 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp
@@ -22,28 +22,33 @@
*
*/
-
inline void PSScavenge::save_to_space_top_before_gc() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
_to_space_top_before_gc = heap->young_gen()->to_space()->top();
}
-inline bool PSScavenge::should_scavenge(oop p) {
- return p == NULL ? false : PSScavenge::is_obj_in_young((HeapWord*) p);
+template <class T> inline bool PSScavenge::should_scavenge(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (oopDesc::is_null(heap_oop)) return false;
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ return PSScavenge::is_obj_in_young((HeapWord*)obj);
}
-inline bool PSScavenge::should_scavenge(oop p, MutableSpace* to_space) {
+template <class T>
+inline bool PSScavenge::should_scavenge(T* p, MutableSpace* to_space) {
if (should_scavenge(p)) {
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// Skip objects copied to to_space since the scavenge started.
- HeapWord* const addr = (HeapWord*) p;
+ HeapWord* const addr = (HeapWord*)obj;
return addr < to_space_top_before_gc() || addr >= to_space->end();
}
return false;
}
-inline bool PSScavenge::should_scavenge(oop p, bool check_to_space) {
+template <class T>
+inline bool PSScavenge::should_scavenge(T* p, bool check_to_space) {
if (check_to_space) {
- ParallelScavengeHeap* heap = (ParallelScavengeHeap*) Universe::heap();
+ ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
return should_scavenge(p, heap->young_gen()->to_space());
}
return should_scavenge(p);
@@ -52,24 +57,23 @@ inline bool PSScavenge::should_scavenge(oop p, bool check_to_space) {
// Attempt to "claim" oop at p via CAS, push the new obj if successful
// This version tests the oop* to make sure it is within the heap before
// attempting marking.
+template <class T>
inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm,
- oop* p) {
- assert(should_scavenge(*p, true), "revisiting object?");
+ T* p) {
+ assert(should_scavenge(p, true), "revisiting object?");
- oop o = *p;
- if (o->is_forwarded()) {
- *p = o->forwardee();
- } else {
- *p = pm->copy_to_survivor_space(o, pm->depth_first());
- }
+ oop o = oopDesc::load_decode_heap_oop_not_null(p);
+ oop new_obj = o->is_forwarded()
+ ? o->forwardee()
+ : pm->copy_to_survivor_space(o, pm->depth_first());
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
// We cannot mark without test, as some code passes us pointers
// that are outside the heap.
- if ((!PSScavenge::is_obj_in_young((HeapWord*) p)) &&
+ if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
Universe::heap()->is_in_reserved(p)) {
- o = *p;
- if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
- card_table()->inline_write_ref_field_gc(p, o);
+ if (PSScavenge::is_obj_in_young((HeapWord*)new_obj)) {
+ card_table()->inline_write_ref_field_gc(p, new_obj);
}
}
}
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
index 2e4333586..dd5d1045f 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
@@ -34,15 +34,17 @@ class PSScavengeRootsClosure: public OopClosure {
private:
PSPromotionManager* _promotion_manager;
- public:
- PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
-
- virtual void do_oop(oop* p) {
- if (PSScavenge::should_scavenge(*p)) {
+ protected:
+ template <class T> void do_oop_work(T *p) {
+ if (PSScavenge::should_scavenge(p)) {
// We never card mark roots, maybe call a func without test?
PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
}
}
+ public:
+ PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
+ void do_oop(oop* p) { PSScavengeRootsClosure::do_oop_work(p); }
+ void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); }
};
void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
@@ -135,7 +137,7 @@ void StealTask::do_it(GCTaskManager* manager, uint which) {
int random_seed = 17;
if (pm->depth_first()) {
while(true) {
- oop* p;
+ StarTask p;
if (PSPromotionManager::steal_depth(which, &random_seed, p)) {
#if PS_PM_STATS
pm->increment_steals(p);
@@ -164,8 +166,7 @@ void StealTask::do_it(GCTaskManager* manager, uint which) {
}
}
}
- guarantee(pm->stacks_empty(),
- "stacks should be empty at this point");
+ guarantee(pm->stacks_empty(), "stacks should be empty at this point");
}
//
diff --git a/src/share/vm/gc_implementation/shared/markSweep.cpp b/src/share/vm/gc_implementation/shared/markSweep.cpp
index e7d59db0b..ee77a7f52 100644
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp
@@ -36,16 +36,16 @@ PreservedMark* MarkSweep::_preserved_marks = NULL;
ReferenceProcessor* MarkSweep::_ref_processor = NULL;
#ifdef VALIDATE_MARK_SWEEP
-GrowableArray<oop*>* MarkSweep::_root_refs_stack = NULL;
+GrowableArray<void*>* MarkSweep::_root_refs_stack = NULL;
GrowableArray<oop> * MarkSweep::_live_oops = NULL;
GrowableArray<oop> * MarkSweep::_live_oops_moved_to = NULL;
GrowableArray<size_t>* MarkSweep::_live_oops_size = NULL;
size_t MarkSweep::_live_oops_index = 0;
size_t MarkSweep::_live_oops_index_at_perm = 0;
-GrowableArray<oop*>* MarkSweep::_other_refs_stack = NULL;
-GrowableArray<oop*>* MarkSweep::_adjusted_pointers = NULL;
-bool MarkSweep::_pointer_tracking = false;
-bool MarkSweep::_root_tracking = true;
+GrowableArray<void*>* MarkSweep::_other_refs_stack = NULL;
+GrowableArray<void*>* MarkSweep::_adjusted_pointers = NULL;
+bool MarkSweep::_pointer_tracking = false;
+bool MarkSweep::_root_tracking = true;
GrowableArray<HeapWord*>* MarkSweep::_cur_gc_live_oops = NULL;
GrowableArray<HeapWord*>* MarkSweep::_cur_gc_live_oops_moved_to = NULL;
@@ -59,7 +59,6 @@ void MarkSweep::revisit_weak_klass_link(Klass* k) {
_revisit_klass_stack->push(k);
}
-
void MarkSweep::follow_weak_klass_links() {
// All klasses on the revisit stack are marked at this point.
// Update and follow all subklass, sibling and implementor links.
@@ -69,44 +68,15 @@ void MarkSweep::follow_weak_klass_links() {
follow_stack();
}
+MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
-void MarkSweep::mark_and_follow(oop* p) {
- assert(Universe::heap()->is_in_reserved(p),
- "we should only be traversing objects here");
- oop m = *p;
- if (m != NULL && !m->mark()->is_marked()) {
- mark_object(m);
- m->follow_contents(); // Follow contents of the marked object
- }
-}
-
-void MarkSweep::_mark_and_push(oop* p) {
- // Push marked object, contents will be followed later
- oop m = *p;
- mark_object(m);
- _marking_stack->push(m);
-}
+void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
+void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
-void MarkSweep::follow_root(oop* p) {
- assert(!Universe::heap()->is_in_reserved(p),
- "roots shouldn't be things within the heap");
-#ifdef VALIDATE_MARK_SWEEP
- if (ValidateMarkSweep) {
- guarantee(!_root_refs_stack->contains(p), "should only be in here once");
- _root_refs_stack->push(p);
- }
-#endif
- oop m = *p;
- if (m != NULL && !m->mark()->is_marked()) {
- mark_object(m);
- m->follow_contents(); // Follow contents of the marked object
- }
- follow_stack();
-}
-
-MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
+void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(p); }
+void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
void MarkSweep::follow_stack() {
while (!_marking_stack->is_empty()) {
@@ -118,6 +88,7 @@ void MarkSweep::follow_stack() {
MarkSweep::FollowStackClosure MarkSweep::follow_stack_closure;
+void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
// We preserve the mark which should be replaced at the end and the location that it
// will go. Note that the object that this markOop belongs to isn't currently at that
@@ -142,6 +113,9 @@ void MarkSweep::preserve_mark(oop obj, markOop mark) {
MarkSweep::AdjustPointerClosure MarkSweep::adjust_root_pointer_closure(true);
MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure(false);
+void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
+void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
+
void MarkSweep::adjust_marks() {
assert(_preserved_oop_stack == NULL ||
_preserved_oop_stack->length() == _preserved_mark_stack->length(),
@@ -187,7 +161,7 @@ void MarkSweep::restore_marks() {
#ifdef VALIDATE_MARK_SWEEP
-void MarkSweep::track_adjusted_pointer(oop* p, oop newobj, bool isroot) {
+void MarkSweep::track_adjusted_pointer(void* p, bool isroot) {
if (!ValidateMarkSweep)
return;
@@ -201,7 +175,7 @@ void MarkSweep::track_adjusted_pointer(oop* p, oop newobj, bool isroot) {
if (index != -1) {
int l = _root_refs_stack->length();
if (l > 0 && l - 1 != index) {
- oop* last = _root_refs_stack->pop();
+ void* last = _root_refs_stack->pop();
assert(last != p, "should be different");
_root_refs_stack->at_put(index, last);
} else {
@@ -211,19 +185,17 @@ void MarkSweep::track_adjusted_pointer(oop* p, oop newobj, bool isroot) {
}
}
-
-void MarkSweep::check_adjust_pointer(oop* p) {
+void MarkSweep::check_adjust_pointer(void* p) {
_adjusted_pointers->push(p);
}
-
class AdjusterTracker: public OopClosure {
public:
- AdjusterTracker() {};
- void do_oop(oop* o) { MarkSweep::check_adjust_pointer(o); }
+ AdjusterTracker() {}
+ void do_oop(oop* o) { MarkSweep::check_adjust_pointer(o); }
+ void do_oop(narrowOop* o) { MarkSweep::check_adjust_pointer(o); }
};
-
void MarkSweep::track_interior_pointers(oop obj) {
if (ValidateMarkSweep) {
_adjusted_pointers->clear();
@@ -234,7 +206,6 @@ void MarkSweep::track_interior_pointers(oop obj) {
}
}
-
void MarkSweep::check_interior_pointers() {
if (ValidateMarkSweep) {
_pointer_tracking = false;
@@ -242,7 +213,6 @@ void MarkSweep::check_interior_pointers() {
}
}
-
void MarkSweep::reset_live_oop_tracking(bool at_perm) {
if (ValidateMarkSweep) {
guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
@@ -250,7 +220,6 @@ void MarkSweep::reset_live_oop_tracking(bool at_perm) {
}
}
-
void MarkSweep::register_live_oop(oop p, size_t size) {
if (ValidateMarkSweep) {
_live_oops->push(p);
@@ -283,7 +252,6 @@ void MarkSweep::live_oop_moved_to(HeapWord* q, size_t size,
}
}
-
void MarkSweep::compaction_complete() {
if (RecordMarkSweepCompaction) {
GrowableArray<HeapWord*>* _tmp_live_oops = _cur_gc_live_oops;
@@ -299,7 +267,6 @@ void MarkSweep::compaction_complete() {
}
}
-
void MarkSweep::print_new_location_of_heap_address(HeapWord* q) {
if (!RecordMarkSweepCompaction) {
tty->print_cr("Requires RecordMarkSweepCompaction to be enabled");
@@ -318,7 +285,7 @@ void MarkSweep::print_new_location_of_heap_address(HeapWord* q) {
HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i);
size_t offset = (q - old_oop);
tty->print_cr("Address " PTR_FORMAT, q);
- tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset);
+ tty->print_cr(" Was in oop " PTR_FORMAT ", size " SIZE_FORMAT ", at offset " SIZE_FORMAT, old_oop, sz, offset);
tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset);
return;
}
@@ -328,23 +295,16 @@ void MarkSweep::print_new_location_of_heap_address(HeapWord* q) {
}
#endif //VALIDATE_MARK_SWEEP
-MarkSweep::IsAliveClosure MarkSweep::is_alive;
+MarkSweep::IsAliveClosure MarkSweep::is_alive;
-void MarkSweep::KeepAliveClosure::do_oop(oop* p) {
-#ifdef VALIDATE_MARK_SWEEP
- if (ValidateMarkSweep) {
- if (!Universe::heap()->is_in_reserved(p)) {
- _root_refs_stack->push(p);
- } else {
- _other_refs_stack->push(p);
- }
- }
-#endif
- mark_and_push(p);
-}
+void MarkSweep::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
+bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
MarkSweep::KeepAliveClosure MarkSweep::keep_alive;
+void MarkSweep::KeepAliveClosure::do_oop(oop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); }
+void MarkSweep::KeepAliveClosure::do_oop(narrowOop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); }
+
void marksweep_init() { /* empty */ }
#ifndef PRODUCT
diff --git a/src/share/vm/gc_implementation/shared/markSweep.hpp b/src/share/vm/gc_implementation/shared/markSweep.hpp
index 8f8b681d1..d0ede4efb 100644
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp
+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp
@@ -46,55 +46,59 @@ class ReferenceProcessor;
#define VALIDATE_MARK_SWEEP_ONLY(code)
#endif
-
// declared at end
class PreservedMark;
class MarkSweep : AllStatic {
//
- // In line closure decls
+ // Inline closure decls
//
-
- class FollowRootClosure: public OopsInGenClosure{
+ class FollowRootClosure: public OopsInGenClosure {
public:
- void do_oop(oop* p) { follow_root(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
virtual const bool do_nmethods() const { return true; }
};
class MarkAndPushClosure: public OopClosure {
public:
- void do_oop(oop* p) { mark_and_push(p); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
virtual const bool do_nmethods() const { return true; }
};
class FollowStackClosure: public VoidClosure {
public:
- void do_void() { follow_stack(); }
+ virtual void do_void();
};
class AdjustPointerClosure: public OopsInGenClosure {
+ private:
bool _is_root;
public:
AdjustPointerClosure(bool is_root) : _is_root(is_root) {}
- void do_oop(oop* p) { _adjust_pointer(p, _is_root); }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
// Used for java/lang/ref handling
class IsAliveClosure: public BoolObjectClosure {
public:
- void do_object(oop p) { assert(false, "don't call"); }
- bool do_object_b(oop p) { return p->is_gc_marked(); }
+ virtual void do_object(oop p);
+ virtual bool do_object_b(oop p);
};
class KeepAliveClosure: public OopClosure {
+ protected:
+ template <class T> void do_oop_work(T* p);
public:
- void do_oop(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
//
// Friend decls
//
-
friend class AdjustPointerClosure;
friend class KeepAliveClosure;
friend class VM_MarkSweep;
@@ -120,14 +124,14 @@ class MarkSweep : AllStatic {
static ReferenceProcessor* _ref_processor;
#ifdef VALIDATE_MARK_SWEEP
- static GrowableArray<oop*>* _root_refs_stack;
+ static GrowableArray<void*>* _root_refs_stack;
static GrowableArray<oop> * _live_oops;
static GrowableArray<oop> * _live_oops_moved_to;
static GrowableArray<size_t>* _live_oops_size;
static size_t _live_oops_index;
static size_t _live_oops_index_at_perm;
- static GrowableArray<oop*>* _other_refs_stack;
- static GrowableArray<oop*>* _adjusted_pointers;
+ static GrowableArray<void*>* _other_refs_stack;
+ static GrowableArray<void*>* _adjusted_pointers;
static bool _pointer_tracking;
static bool _root_tracking;
@@ -146,9 +150,8 @@ class MarkSweep : AllStatic {
static GrowableArray<size_t>* _last_gc_live_oops_size;
#endif
-
// Non public closures
- static IsAliveClosure is_alive;
+ static IsAliveClosure is_alive;
static KeepAliveClosure keep_alive;
// Class unloading. Update subklass/sibling/implementor links at end of marking phase.
@@ -159,9 +162,9 @@ class MarkSweep : AllStatic {
public:
// Public closures
- static FollowRootClosure follow_root_closure;
- static MarkAndPushClosure mark_and_push_closure;
- static FollowStackClosure follow_stack_closure;
+ static FollowRootClosure follow_root_closure;
+ static MarkAndPushClosure mark_and_push_closure;
+ static FollowStackClosure follow_stack_closure;
static AdjustPointerClosure adjust_root_pointer_closure;
static AdjustPointerClosure adjust_pointer_closure;
@@ -170,39 +173,29 @@ class MarkSweep : AllStatic {
// Call backs for marking
static void mark_object(oop obj);
- static void follow_root(oop* p); // Mark pointer and follow contents. Empty marking
-
- // stack afterwards.
+ // Mark pointer and follow contents. Empty marking stack afterwards.
+ template <class T> static inline void follow_root(T* p);
+ // Mark pointer and follow contents.
+ template <class T> static inline void mark_and_follow(T* p);
+ // Check mark and maybe push on marking stack
+ template <class T> static inline void mark_and_push(T* p);
- static void mark_and_follow(oop* p); // Mark pointer and follow contents.
- static void _mark_and_push(oop* p); // Mark pointer and push obj on
- // marking stack.
+ static void follow_stack(); // Empty marking stack.
+ static void preserve_mark(oop p, markOop mark);
+ // Save the mark word so it can be restored later
+ static void adjust_marks(); // Adjust the pointers in the preserved marks table
+ static void restore_marks(); // Restore the marks that we saved in preserve_mark
- static void mark_and_push(oop* p) { // Check mark and maybe push on
- // marking stack
- // assert(Universe::is_reserved_heap((oop)p), "we should only be traversing objects here");
- oop m = *p;
- if (m != NULL && !m->mark()->is_marked()) {
- _mark_and_push(p);
- }
- }
+ template <class T> static inline void adjust_pointer(T* p, bool isroot);
- static void follow_stack(); // Empty marking stack.
-
-
- static void preserve_mark(oop p, markOop mark); // Save the mark word so it can be restored later
- static void adjust_marks(); // Adjust the pointers in the preserved marks table
- static void restore_marks(); // Restore the marks that we saved in preserve_mark
-
- static void _adjust_pointer(oop* p, bool isroot);
-
- static void adjust_root_pointer(oop* p) { _adjust_pointer(p, true); }
- static void adjust_pointer(oop* p) { _adjust_pointer(p, false); }
+ static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
+ static void adjust_pointer(oop* p) { adjust_pointer(p, false); }
+ static void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
#ifdef VALIDATE_MARK_SWEEP
- static void track_adjusted_pointer(oop* p, oop newobj, bool isroot);
- static void check_adjust_pointer(oop* p); // Adjust this pointer
+ static void track_adjusted_pointer(void* p, bool isroot);
+ static void check_adjust_pointer(void* p);
static void track_interior_pointers(oop obj);
static void check_interior_pointers();
@@ -223,7 +216,6 @@ class MarkSweep : AllStatic {
static void revisit_weak_klass_link(Klass* k); // Update subklass/sibling/implementor links at end of marking.
};
-
class PreservedMark VALUE_OBJ_CLASS_SPEC {
private:
oop _obj;
diff --git a/src/share/vm/gc_implementation/shared/markSweep.inline.hpp b/src/share/vm/gc_implementation/shared/markSweep.inline.hpp
index 1418df7f9..c4045ee98 100644
--- a/src/share/vm/gc_implementation/shared/markSweep.inline.hpp
+++ b/src/share/vm/gc_implementation/shared/markSweep.inline.hpp
@@ -22,32 +22,11 @@
*
*/
-inline void MarkSweep::_adjust_pointer(oop* p, bool isroot) {
- oop obj = *p;
- VALIDATE_MARK_SWEEP_ONLY(oop saved_new_pointer = NULL);
- if (obj != NULL) {
- oop new_pointer = oop(obj->mark()->decode_pointer());
- assert(new_pointer != NULL || // is forwarding ptr?
- obj->mark() == markOopDesc::prototype() || // not gc marked?
- (UseBiasedLocking && obj->mark()->has_bias_pattern()) || // not gc marked?
- obj->is_shared(), // never forwarded?
- "should contain a forwarding pointer");
- if (new_pointer != NULL) {
- *p = new_pointer;
- assert(Universe::heap()->is_in_reserved(new_pointer),
- "should be in object space");
- VALIDATE_MARK_SWEEP_ONLY(saved_new_pointer = new_pointer);
- }
- }
- VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, saved_new_pointer, isroot));
-}
-
inline void MarkSweep::mark_object(oop obj) {
-
#ifndef SERIALGC
if (UseParallelOldGC && VerifyParallelOldWithMarkSweep) {
assert(PSParallelCompact::mark_bitmap()->is_marked(obj),
- "Should be marked in the marking bitmap");
+ "Should be marked in the marking bitmap");
}
#endif // SERIALGC
@@ -60,3 +39,80 @@ inline void MarkSweep::mark_object(oop obj) {
preserve_mark(obj, mark);
}
}
+
+template <class T> inline void MarkSweep::follow_root(T* p) {
+ assert(!Universe::heap()->is_in_reserved(p),
+ "roots shouldn't be things within the heap");
+#ifdef VALIDATE_MARK_SWEEP
+ if (ValidateMarkSweep) {
+ guarantee(!_root_refs_stack->contains(p), "should only be in here once");
+ _root_refs_stack->push(p);
+ }
+#endif
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (!obj->mark()->is_marked()) {
+ mark_object(obj);
+ obj->follow_contents();
+ }
+ }
+ follow_stack();
+}
+
+template <class T> inline void MarkSweep::mark_and_follow(T* p) {
+// assert(Universe::heap()->is_in_reserved(p), "should be in object space");
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (!obj->mark()->is_marked()) {
+ mark_object(obj);
+ obj->follow_contents();
+ }
+ }
+}
+
+template <class T> inline void MarkSweep::mark_and_push(T* p) {
+// assert(Universe::heap()->is_in_reserved(p), "should be in object space");
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (!obj->mark()->is_marked()) {
+ mark_object(obj);
+ _marking_stack->push(obj);
+ }
+ }
+}
+
+template <class T> inline void MarkSweep::adjust_pointer(T* p, bool isroot) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ oop new_obj = oop(obj->mark()->decode_pointer());
+ assert(new_obj != NULL || // is forwarding ptr?
+ obj->mark() == markOopDesc::prototype() || // not gc marked?
+ (UseBiasedLocking && obj->mark()->has_bias_pattern()) ||
+ // not gc marked?
+ obj->is_shared(), // never forwarded?
+ "should be forwarded");
+ if (new_obj != NULL) {
+ assert(Universe::heap()->is_in_reserved(new_obj),
+ "should be in object space");
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+ }
+ }
+ VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot));
+}
+
+template <class T> inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
+#ifdef VALIDATE_MARK_SWEEP
+ if (ValidateMarkSweep) {
+ if (!Universe::heap()->is_in_reserved(p)) {
+ _root_refs_stack->push(p);
+ } else {
+ _other_refs_stack->push(p);
+ }
+ }
+#endif
+ mark_and_push(p);
+}
diff --git a/src/share/vm/gc_interface/collectedHeap.cpp b/src/share/vm/gc_interface/collectedHeap.cpp
index c4efcc4d9..ecfab9ed6 100644
--- a/src/share/vm/gc_interface/collectedHeap.cpp
+++ b/src/share/vm/gc_interface/collectedHeap.cpp
@@ -35,7 +35,6 @@ int CollectedHeap::_fire_out_of_memory_count = 0;
CollectedHeap::CollectedHeap() :
_reserved(), _barrier_set(NULL), _is_gc_active(false),
_total_collections(0), _total_full_collections(0),
- _max_heap_capacity(0),
_gc_cause(GCCause::_no_gc), _gc_lastcause(GCCause::_no_gc) {
NOT_PRODUCT(_promotion_failure_alot_count = 0;)
NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
diff --git a/src/share/vm/gc_interface/collectedHeap.hpp b/src/share/vm/gc_interface/collectedHeap.hpp
index cad60b36a..ef55f1467 100644
--- a/src/share/vm/gc_interface/collectedHeap.hpp
+++ b/src/share/vm/gc_interface/collectedHeap.hpp
@@ -53,7 +53,6 @@ class CollectedHeap : public CHeapObj {
bool _is_gc_active;
unsigned int _total_collections; // ... started
unsigned int _total_full_collections; // ... started
- size_t _max_heap_capacity;
NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
@@ -149,10 +148,7 @@ class CollectedHeap : public CHeapObj {
virtual void post_initialize() = 0;
MemRegion reserved_region() const { return _reserved; }
-
- // Return the number of bytes currently reserved, committed, and used,
- // respectively, for holding objects.
- size_t reserved_obj_bytes() const { return _reserved.byte_size(); }
+ address base() const { return (address)reserved_region().start(); }
// Future cleanup here. The following functions should specify bytes or
// heapwords as part of their signature.
diff --git a/src/share/vm/gc_interface/collectedHeap.inline.hpp b/src/share/vm/gc_interface/collectedHeap.inline.hpp
index 5344802b7..556cd490f 100644
--- a/src/share/vm/gc_interface/collectedHeap.inline.hpp
+++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp
@@ -61,7 +61,10 @@ void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
obj->set_klass(klass());
assert(!Universe::is_fully_initialized() || obj->blueprint() != NULL,
"missing blueprint");
+}
+// Support for jvmti and dtrace
+inline void post_allocation_notify(KlassHandle klass, oop obj) {
// support for JVMTI VMObjectAlloc event (no-op if not enabled)
JvmtiExport::vm_object_alloc_event_collector(obj);
@@ -79,18 +82,22 @@ void CollectedHeap::post_allocation_setup_obj(KlassHandle klass,
post_allocation_setup_common(klass, obj, size);
assert(Universe::is_bootstrapping() ||
!((oop)obj)->blueprint()->oop_is_array(), "must not be an array");
+ // notify jvmti and dtrace
+ post_allocation_notify(klass, (oop)obj);
}
void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
HeapWord* obj,
size_t size,
int length) {
- // Set array length before posting jvmti object alloc event
- // in post_allocation_setup_common()
assert(length >= 0, "length should be non-negative");
- ((arrayOop)obj)->set_length(length);
post_allocation_setup_common(klass, obj, size);
+ // Must set length after installing klass as set_klass zeros the length
+ // field in UseCompressedOops
+ ((arrayOop)obj)->set_length(length);
assert(((oop)obj)->blueprint()->oop_is_array(), "must be an array");
+ // notify jvmti and dtrace (must be after length is set for dtrace)
+ post_allocation_notify(klass, (oop)obj);
}
HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS) {
diff --git a/src/share/vm/includeDB_core b/src/share/vm/includeDB_core
index 06eb247de..f7da6aa3f 100644
--- a/src/share/vm/includeDB_core
+++ b/src/share/vm/includeDB_core
@@ -191,7 +191,6 @@ array.hpp allocation.inline.hpp
arrayKlass.cpp arrayKlass.hpp
arrayKlass.cpp arrayKlassKlass.hpp
arrayKlass.cpp arrayOop.hpp
-arrayKlass.cpp collectedHeap.hpp
arrayKlass.cpp collectedHeap.inline.hpp
arrayKlass.cpp gcLocker.hpp
arrayKlass.cpp instanceKlass.hpp
@@ -211,6 +210,7 @@ arrayKlass.hpp universe.hpp
arrayKlassKlass.cpp arrayKlassKlass.hpp
arrayKlassKlass.cpp handles.inline.hpp
arrayKlassKlass.cpp javaClasses.hpp
+arrayKlassKlass.cpp markSweep.inline.hpp
arrayKlassKlass.cpp oop.inline.hpp
arrayKlassKlass.hpp arrayKlass.hpp
@@ -250,7 +250,7 @@ assembler.inline.hpp threadLocalStorage.hpp
assembler_<arch_model>.cpp assembler_<arch_model>.inline.hpp
assembler_<arch_model>.cpp biasedLocking.hpp
assembler_<arch_model>.cpp cardTableModRefBS.hpp
-assembler_<arch_model>.cpp collectedHeap.hpp
+assembler_<arch_model>.cpp collectedHeap.inline.hpp
assembler_<arch_model>.cpp interfaceSupport.hpp
assembler_<arch_model>.cpp interpreter.hpp
assembler_<arch_model>.cpp objectMonitor.hpp
@@ -331,9 +331,8 @@ bitMap.hpp top.hpp
bitMap.inline.hpp atomic.hpp
bitMap.inline.hpp bitMap.hpp
-blockOffsetTable.cpp blockOffsetTable.hpp
blockOffsetTable.cpp blockOffsetTable.inline.hpp
-blockOffsetTable.cpp collectedHeap.hpp
+blockOffsetTable.cpp collectedHeap.inline.hpp
blockOffsetTable.cpp iterator.hpp
blockOffsetTable.cpp java.hpp
blockOffsetTable.cpp oop.inline.hpp
@@ -990,6 +989,7 @@ codeCache.cpp methodOop.hpp
codeCache.cpp mutexLocker.hpp
codeCache.cpp nmethod.hpp
codeCache.cpp objArrayOop.hpp
+codeCache.cpp oop.inline.hpp
codeCache.cpp pcDesc.hpp
codeCache.cpp resourceArea.hpp
@@ -1124,7 +1124,7 @@ compiledICHolderKlass.cpp collectedHeap.inline.hpp
compiledICHolderKlass.cpp compiledICHolderKlass.hpp
compiledICHolderKlass.cpp handles.inline.hpp
compiledICHolderKlass.cpp javaClasses.hpp
-compiledICHolderKlass.cpp markSweep.hpp
+compiledICHolderKlass.cpp markSweep.inline.hpp
compiledICHolderKlass.cpp oop.inline.hpp
compiledICHolderKlass.cpp oop.inline2.hpp
compiledICHolderKlass.cpp permGen.hpp
@@ -1192,6 +1192,7 @@ constMethodKlass.cpp constMethodOop.hpp
constMethodKlass.cpp gcLocker.hpp
constMethodKlass.cpp handles.inline.hpp
constMethodKlass.cpp interpreter.hpp
+constMethodKlass.cpp markSweep.inline.hpp
constMethodKlass.cpp oop.inline.hpp
constMethodKlass.cpp oop.inline2.hpp
constMethodKlass.cpp resourceArea.hpp
@@ -1210,6 +1211,8 @@ constantPoolKlass.cpp collectedHeap.inline.hpp
constantPoolKlass.cpp constantPoolKlass.hpp
constantPoolKlass.cpp constantPoolOop.hpp
constantPoolKlass.cpp handles.inline.hpp
+constantPoolKlass.cpp javaClasses.hpp
+constantPoolKlass.cpp markSweep.inline.hpp
constantPoolKlass.cpp oop.inline.hpp
constantPoolKlass.cpp oop.inline2.hpp
constantPoolKlass.cpp oopFactory.hpp
@@ -1261,7 +1264,8 @@ cpCacheKlass.cpp collectedHeap.hpp
cpCacheKlass.cpp constantPoolOop.hpp
cpCacheKlass.cpp cpCacheKlass.hpp
cpCacheKlass.cpp handles.inline.hpp
-cpCacheKlass.cpp markSweep.hpp
+cpCacheKlass.cpp javaClasses.hpp
+cpCacheKlass.cpp markSweep.inline.hpp
cpCacheKlass.cpp oop.inline.hpp
cpCacheKlass.cpp permGen.hpp
@@ -1273,7 +1277,6 @@ cpCacheOop.cpp cpCacheOop.hpp
cpCacheOop.cpp handles.inline.hpp
cpCacheOop.cpp interpreter.hpp
cpCacheOop.cpp jvmtiRedefineClassesTrace.hpp
-cpCacheOop.cpp markSweep.hpp
cpCacheOop.cpp markSweep.inline.hpp
cpCacheOop.cpp objArrayOop.hpp
cpCacheOop.cpp oop.inline.hpp
@@ -1385,7 +1388,6 @@ debug_<arch>.cpp top.hpp
defNewGeneration.cpp collectorCounters.hpp
defNewGeneration.cpp copy.hpp
-defNewGeneration.cpp defNewGeneration.hpp
defNewGeneration.cpp defNewGeneration.inline.hpp
defNewGeneration.cpp gcLocker.inline.hpp
defNewGeneration.cpp gcPolicyCounters.hpp
@@ -1397,7 +1399,6 @@ defNewGeneration.cpp iterator.hpp
defNewGeneration.cpp java.hpp
defNewGeneration.cpp oop.inline.hpp
defNewGeneration.cpp referencePolicy.hpp
-defNewGeneration.cpp space.hpp
defNewGeneration.cpp space.inline.hpp
defNewGeneration.cpp thread_<os_family>.inline.hpp
@@ -1406,6 +1407,7 @@ defNewGeneration.hpp cSpaceCounters.hpp
defNewGeneration.hpp generation.inline.hpp
defNewGeneration.hpp generationCounters.hpp
+defNewGeneration.inline.hpp cardTableRS.hpp
defNewGeneration.inline.hpp defNewGeneration.hpp
defNewGeneration.inline.hpp space.hpp
@@ -1956,6 +1958,7 @@ instanceKlass.cpp javaClasses.hpp
instanceKlass.cpp jvmti.h
instanceKlass.cpp jvmtiExport.hpp
instanceKlass.cpp jvmtiRedefineClassesTrace.hpp
+instanceKlass.cpp markSweep.inline.hpp
instanceKlass.cpp methodOop.hpp
instanceKlass.cpp mutexLocker.hpp
instanceKlass.cpp objArrayKlassKlass.hpp
@@ -1991,6 +1994,7 @@ instanceKlassKlass.cpp instanceKlassKlass.hpp
instanceKlassKlass.cpp instanceRefKlass.hpp
instanceKlassKlass.cpp javaClasses.hpp
instanceKlassKlass.cpp jvmtiExport.hpp
+instanceKlassKlass.cpp markSweep.inline.hpp
instanceKlassKlass.cpp objArrayKlassKlass.hpp
instanceKlassKlass.cpp objArrayOop.hpp
instanceKlassKlass.cpp oop.inline.hpp
@@ -2012,7 +2016,7 @@ instanceRefKlass.cpp genCollectedHeap.hpp
instanceRefKlass.cpp genOopClosures.inline.hpp
instanceRefKlass.cpp instanceRefKlass.hpp
instanceRefKlass.cpp javaClasses.hpp
-instanceRefKlass.cpp markSweep.hpp
+instanceRefKlass.cpp markSweep.inline.hpp
instanceRefKlass.cpp oop.inline.hpp
instanceRefKlass.cpp preserveException.hpp
instanceRefKlass.cpp systemDictionary.hpp
@@ -2492,7 +2496,7 @@ klassKlass.cpp instanceKlass.hpp
klassKlass.cpp instanceOop.hpp
klassKlass.cpp klassKlass.hpp
klassKlass.cpp klassOop.hpp
-klassKlass.cpp markSweep.hpp
+klassKlass.cpp markSweep.inline.hpp
klassKlass.cpp methodKlass.hpp
klassKlass.cpp objArrayKlass.hpp
klassKlass.cpp oop.inline.hpp
@@ -2519,7 +2523,7 @@ klassVtable.cpp instanceKlass.hpp
klassVtable.cpp jvmtiRedefineClassesTrace.hpp
klassVtable.cpp klassOop.hpp
klassVtable.cpp klassVtable.hpp
-klassVtable.cpp markSweep.hpp
+klassVtable.cpp markSweep.inline.hpp
klassVtable.cpp methodOop.hpp
klassVtable.cpp objArrayOop.hpp
klassVtable.cpp oop.inline.hpp
@@ -2632,6 +2636,9 @@ markOop.inline.hpp klassOop.hpp
markOop.inline.hpp markOop.hpp
markSweep.cpp compileBroker.hpp
+
+markSweep.hpp collectedHeap.hpp
+
memRegion.cpp globals.hpp
memRegion.cpp memRegion.hpp
@@ -2731,7 +2738,7 @@ methodDataKlass.cpp collectedHeap.inline.hpp
methodDataKlass.cpp gcLocker.hpp
methodDataKlass.cpp handles.inline.hpp
methodDataKlass.cpp klassOop.hpp
-methodDataKlass.cpp markSweep.hpp
+methodDataKlass.cpp markSweep.inline.hpp
methodDataKlass.cpp methodDataKlass.hpp
methodDataKlass.cpp methodDataOop.hpp
methodDataKlass.cpp oop.inline.hpp
@@ -2746,7 +2753,6 @@ methodDataOop.cpp bytecodeStream.hpp
methodDataOop.cpp deoptimization.hpp
methodDataOop.cpp handles.inline.hpp
methodDataOop.cpp linkResolver.hpp
-methodDataOop.cpp markSweep.hpp
methodDataOop.cpp markSweep.inline.hpp
methodDataOop.cpp methodDataOop.hpp
methodDataOop.cpp oop.inline.hpp
@@ -2764,7 +2770,7 @@ methodKlass.cpp handles.inline.hpp
methodKlass.cpp interpreter.hpp
methodKlass.cpp javaClasses.hpp
methodKlass.cpp klassOop.hpp
-methodKlass.cpp markSweep.hpp
+methodKlass.cpp markSweep.inline.hpp
methodKlass.cpp methodDataOop.hpp
methodKlass.cpp methodKlass.hpp
methodKlass.cpp oop.inline.hpp
@@ -2941,6 +2947,7 @@ objArrayKlass.cpp systemDictionary.hpp
objArrayKlass.cpp universe.inline.hpp
objArrayKlass.cpp vmSymbols.hpp
+
objArrayKlass.hpp arrayKlass.hpp
objArrayKlass.hpp instanceKlass.hpp
objArrayKlass.hpp specialized_oop_closures.hpp
@@ -2948,6 +2955,7 @@ objArrayKlass.hpp specialized_oop_closures.hpp
objArrayKlassKlass.cpp collectedHeap.inline.hpp
objArrayKlassKlass.cpp instanceKlass.hpp
objArrayKlassKlass.cpp javaClasses.hpp
+objArrayKlassKlass.cpp markSweep.inline.hpp
objArrayKlassKlass.cpp objArrayKlassKlass.hpp
objArrayKlassKlass.cpp oop.inline.hpp
objArrayKlassKlass.cpp oop.inline2.hpp
@@ -2956,6 +2964,7 @@ objArrayKlassKlass.cpp systemDictionary.hpp
objArrayKlassKlass.hpp arrayKlassKlass.hpp
objArrayKlassKlass.hpp objArrayKlass.hpp
+objArrayOop.cpp objArrayKlass.hpp
objArrayOop.cpp objArrayOop.hpp
objArrayOop.cpp oop.inline.hpp
@@ -3005,7 +3014,6 @@ oop.inline.hpp generation.hpp
oop.inline.hpp klass.hpp
oop.inline.hpp klassOop.hpp
oop.inline.hpp markOop.inline.hpp
-oop.inline.hpp markSweep.hpp
oop.inline.hpp markSweep.inline.hpp
oop.inline.hpp oop.hpp
oop.inline.hpp os.hpp
@@ -4536,6 +4544,7 @@ vtableStubs.cpp handles.inline.hpp
vtableStubs.cpp instanceKlass.hpp
vtableStubs.cpp jvmtiExport.hpp
vtableStubs.cpp klassVtable.hpp
+vtableStubs.cpp oop.inline.hpp
vtableStubs.cpp mutexLocker.hpp
vtableStubs.cpp resourceArea.hpp
vtableStubs.cpp sharedRuntime.hpp
diff --git a/src/share/vm/interpreter/interpreterRuntime.hpp b/src/share/vm/interpreter/interpreterRuntime.hpp
index 0b071feb4..82f73d8ac 100644
--- a/src/share/vm/interpreter/interpreterRuntime.hpp
+++ b/src/share/vm/interpreter/interpreterRuntime.hpp
@@ -35,7 +35,10 @@ class InterpreterRuntime: AllStatic {
static methodOop method(JavaThread *thread) { return last_frame(thread).interpreter_frame_method(); }
static address bcp(JavaThread *thread) { return last_frame(thread).interpreter_frame_bcp(); }
static void set_bcp_and_mdp(address bcp, JavaThread*thread);
- static Bytecodes::Code code(JavaThread *thread) { return Bytecodes::code_at(bcp(thread)); }
+ static Bytecodes::Code code(JavaThread *thread) {
+ // pass method to avoid calling unsafe bcp_to_method (partial fix 4926272)
+ return Bytecodes::code_at(bcp(thread), method(thread));
+ }
static bool already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); }
static int one_byte_index(JavaThread *thread) { return bcp(thread)[1]; }
static int two_byte_index(JavaThread *thread) { return Bytes::get_Java_u2(bcp(thread) + 1); }
diff --git a/src/share/vm/memory/barrierSet.hpp b/src/share/vm/memory/barrierSet.hpp
index bc3208be6..aa56fa9e8 100644
--- a/src/share/vm/memory/barrierSet.hpp
+++ b/src/share/vm/memory/barrierSet.hpp
@@ -54,9 +54,9 @@ public:
// These functions indicate whether a particular access of the given
// kinds requires a barrier.
- virtual bool read_ref_needs_barrier(oop* field) = 0;
+ virtual bool read_ref_needs_barrier(void* field) = 0;
virtual bool read_prim_needs_barrier(HeapWord* field, size_t bytes) = 0;
- virtual bool write_ref_needs_barrier(oop* field, oop new_val) = 0;
+ virtual bool write_ref_needs_barrier(void* field, oop new_val) = 0;
virtual bool write_prim_needs_barrier(HeapWord* field, size_t bytes, juint val1, juint val2) = 0;
// The first four operations provide a direct implementation of the
@@ -64,7 +64,7 @@ public:
// directly, as appropriate.
// Invoke the barrier, if any, necessary when reading the given ref field.
- virtual void read_ref_field(oop* field) = 0;
+ virtual void read_ref_field(void* field) = 0;
// Invoke the barrier, if any, necessary when reading the given primitive
// "field" of "bytes" bytes in "obj".
@@ -75,9 +75,9 @@ public:
// (For efficiency reasons, this operation is specialized for certain
// barrier types. Semantically, it should be thought of as a call to the
// virtual "_work" function below, which must implement the barrier.)
- inline void write_ref_field(oop* field, oop new_val);
+ inline void write_ref_field(void* field, oop new_val);
protected:
- virtual void write_ref_field_work(oop* field, oop new_val) = 0;
+ virtual void write_ref_field_work(void* field, oop new_val) = 0;
public:
// Invoke the barrier, if any, necessary when writing the "bytes"-byte
diff --git a/src/share/vm/memory/barrierSet.inline.hpp b/src/share/vm/memory/barrierSet.inline.hpp
index 082cb7609..ab89a4d46 100644
--- a/src/share/vm/memory/barrierSet.inline.hpp
+++ b/src/share/vm/memory/barrierSet.inline.hpp
@@ -26,7 +26,7 @@
// performance-critical calls when when the barrier is the most common
// card-table kind.
-void BarrierSet::write_ref_field(oop* field, oop new_val) {
+void BarrierSet::write_ref_field(void* field, oop new_val) {
if (kind() == CardTableModRef) {
((CardTableModRefBS*)this)->inline_write_ref_field(field, new_val);
} else {
diff --git a/src/share/vm/memory/cardTableModRefBS.cpp b/src/share/vm/memory/cardTableModRefBS.cpp
index 8149c2ce0..fab92e0f6 100644
--- a/src/share/vm/memory/cardTableModRefBS.cpp
+++ b/src/share/vm/memory/cardTableModRefBS.cpp
@@ -294,7 +294,7 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
// Note that these versions are precise! The scanning code has to handle the
// fact that the write barrier may be either precise or imprecise.
-void CardTableModRefBS::write_ref_field_work(oop* field, oop newVal) {
+void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
inline_write_ref_field(field, newVal);
}
diff --git a/src/share/vm/memory/cardTableModRefBS.hpp b/src/share/vm/memory/cardTableModRefBS.hpp
index 393adcc95..dcc457111 100644
--- a/src/share/vm/memory/cardTableModRefBS.hpp
+++ b/src/share/vm/memory/cardTableModRefBS.hpp
@@ -273,7 +273,7 @@ public:
// *** Barrier set functions.
- inline bool write_ref_needs_barrier(oop* field, oop new_val) {
+ inline bool write_ref_needs_barrier(void* field, oop new_val) {
// Note that this assumes the perm gen is the highest generation
// in the address space
return new_val != NULL && !new_val->is_perm();
@@ -285,7 +285,7 @@ public:
// these functions here for performance.
protected:
void write_ref_field_work(oop obj, size_t offset, oop newVal);
- void write_ref_field_work(oop* field, oop newVal);
+ void write_ref_field_work(void* field, oop newVal);
public:
bool has_write_ref_array_opt() { return true; }
@@ -315,7 +315,7 @@ public:
// *** Card-table-barrier-specific things.
- inline void inline_write_ref_field(oop* field, oop newVal) {
+ inline void inline_write_ref_field(void* field, oop newVal) {
jbyte* byte = byte_for(field);
*byte = dirty_card;
}
diff --git a/src/share/vm/memory/cardTableRS.cpp b/src/share/vm/memory/cardTableRS.cpp
index f389a70a4..e84cc57f0 100644
--- a/src/share/vm/memory/cardTableRS.cpp
+++ b/src/share/vm/memory/cardTableRS.cpp
@@ -191,7 +191,7 @@ public:
// prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card
// cur-younger-gen ==> cur_younger_gen
// cur_youngergen_and_prev_nonclean_card ==> no change.
-void CardTableRS::write_ref_field_gc_par(oop* field, oop new_val) {
+void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
jbyte* entry = ct_bs()->byte_for(field);
do {
jbyte entry_val = *entry;
@@ -290,28 +290,36 @@ void CardTableRS::invalidate_or_clear(Generation* gen, bool younger,
class VerifyCleanCardClosure: public OopClosure {
- HeapWord* boundary;
- HeapWord* begin; HeapWord* end;
-public:
- void do_oop(oop* p) {
+private:
+ HeapWord* _boundary;
+ HeapWord* _begin;
+ HeapWord* _end;
+protected:
+ template <class T> void do_oop_work(T* p) {
HeapWord* jp = (HeapWord*)p;
- if (jp >= begin && jp < end) {
- guarantee(*p == NULL || (HeapWord*)p < boundary
- || (HeapWord*)(*p) >= boundary,
+ if (jp >= _begin && jp < _end) {
+ oop obj = oopDesc::load_decode_heap_oop(p);
+ guarantee(obj == NULL ||
+ (HeapWord*)p < _boundary ||
+ (HeapWord*)obj >= _boundary,
"pointer on clean card crosses boundary");
}
}
- VerifyCleanCardClosure(HeapWord* b, HeapWord* _begin, HeapWord* _end) :
- boundary(b), begin(_begin), end(_end) {}
+public:
+ VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) :
+ _boundary(b), _begin(begin), _end(end) {}
+ virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); }
};
class VerifyCTSpaceClosure: public SpaceClosure {
+private:
CardTableRS* _ct;
HeapWord* _boundary;
public:
VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) :
_ct(ct), _boundary(boundary) {}
- void do_space(Space* s) { _ct->verify_space(s, _boundary); }
+ virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); }
};
class VerifyCTGenClosure: public GenCollectedHeap::GenClosure {
diff --git a/src/share/vm/memory/cardTableRS.hpp b/src/share/vm/memory/cardTableRS.hpp
index 5d92067aa..c2180de67 100644
--- a/src/share/vm/memory/cardTableRS.hpp
+++ b/src/share/vm/memory/cardTableRS.hpp
@@ -106,18 +106,18 @@ public:
// closure application.
void younger_refs_iterate(Generation* g, OopsInGenClosure* blk);
- void inline_write_ref_field_gc(oop* field, oop new_val) {
+ void inline_write_ref_field_gc(void* field, oop new_val) {
jbyte* byte = _ct_bs.byte_for(field);
*byte = youngergen_card;
}
- void write_ref_field_gc_work(oop* field, oop new_val) {
+ void write_ref_field_gc_work(void* field, oop new_val) {
inline_write_ref_field_gc(field, new_val);
}
// Override. Might want to devirtualize this in the same fashion as
// above. Ensures that the value of the card for field says that it's
// a younger card in the current collection.
- virtual void write_ref_field_gc_par(oop* field, oop new_val);
+ virtual void write_ref_field_gc_par(void* field, oop new_val);
void resize_covered_region(MemRegion new_region);
diff --git a/src/share/vm/memory/compactingPermGenGen.cpp b/src/share/vm/memory/compactingPermGenGen.cpp
index 317908b18..78ada7014 100644
--- a/src/share/vm/memory/compactingPermGenGen.cpp
+++ b/src/share/vm/memory/compactingPermGenGen.cpp
@@ -49,9 +49,9 @@ public:
// to prevent visiting any object twice.
class RecursiveAdjustSharedObjectClosure : public OopClosure {
-public:
- void do_oop(oop* o) {
- oop obj = *o;
+ protected:
+ template <class T> inline void do_oop_work(T* p) {
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
if (obj->is_shared_readwrite()) {
if (obj->mark()->is_marked()) {
obj->init_mark(); // Don't revisit this object.
@@ -71,7 +71,10 @@ public:
}
}
}
- };
+ }
+ public:
+ virtual void do_oop(oop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); }
};
@@ -86,9 +89,9 @@ public:
// as doing so can cause hash codes to be computed, destroying
// forwarding pointers.
class TraversePlaceholdersClosure : public OopClosure {
- public:
- void do_oop(oop* o) {
- oop obj = *o;
+ protected:
+ template <class T> inline void do_oop_work(T* p) {
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
if (obj->klass() == Universe::symbolKlassObj() &&
obj->is_shared_readonly()) {
symbolHandle sym((symbolOop) obj);
@@ -99,6 +102,10 @@ class TraversePlaceholdersClosure : public OopClosure {
}
}
}
+ public:
+ virtual void do_oop(oop* p) { TraversePlaceholdersClosure::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { TraversePlaceholdersClosure::do_oop_work(p); }
+
};
diff --git a/src/share/vm/memory/defNewGeneration.cpp b/src/share/vm/memory/defNewGeneration.cpp
index f892ffdb5..d13c9e9ad 100644
--- a/src/share/vm/memory/defNewGeneration.cpp
+++ b/src/share/vm/memory/defNewGeneration.cpp
@@ -47,31 +47,9 @@ KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
_rs = (CardTableRS*)rs;
}
-void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) {
- // We never expect to see a null reference being processed
- // as a weak reference.
- assert (*p != NULL, "expected non-null ref");
- assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
-
- _cl->do_oop_nv(p);
-
- // Card marking is trickier for weak refs.
- // This oop is a 'next' field which was filled in while we
- // were discovering weak references. While we might not need
- // to take a special action to keep this reference alive, we
- // will need to dirty a card as the field was modified.
- //
- // Alternatively, we could create a method which iterates through
- // each generation, allowing them in turn to examine the modified
- // field.
- //
- // We could check that p is also in an older generation, but
- // dirty cards in the youngest gen are never scanned, so the
- // extra check probably isn't worthwhile.
- if (Universe::heap()->is_in_reserved(p)) {
- _rs->inline_write_ref_field_gc(p, *p);
- }
-}
+void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
+void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
+
DefNewGeneration::FastKeepAliveClosure::
FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
@@ -79,19 +57,8 @@ FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
_boundary = g->reserved().end();
}
-void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) {
- assert (*p != NULL, "expected non-null ref");
- assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
-
- _cl->do_oop_nv(p);
-
- // Optimized for Defnew generation if it's the youngest generation:
- // we set a younger_gen card if we have an older->youngest
- // generation pointer.
- if (((HeapWord*)(*p) < _boundary) && Universe::heap()->is_in_reserved(p)) {
- _rs->inline_write_ref_field_gc(p, *p);
- }
-}
+void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
+void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
DefNewGeneration::EvacuateFollowersClosure::
EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
@@ -132,6 +99,9 @@ ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
_boundary = _g->reserved().end();
}
+void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
+void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
+
FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
{
@@ -139,6 +109,9 @@ FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
_boundary = _g->reserved().end();
}
+void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
+void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
+
ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
OopClosure(g->ref_processor()), _g(g)
{
@@ -146,6 +119,11 @@ ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
_boundary = _g->reserved().end();
}
+void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
+void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
+
+void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
+void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
DefNewGeneration::DefNewGeneration(ReservedSpace rs,
size_t initial_size,
@@ -656,7 +634,7 @@ void DefNewGeneration::handle_promotion_failure(oop old) {
}
}
-oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) {
+oop DefNewGeneration::copy_to_survivor_space(oop old) {
assert(is_in_reserved(old) && !old->is_forwarded(),
"shouldn't be scavenging this oop");
size_t s = old->size();
@@ -669,7 +647,7 @@ oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) {
// Otherwise try allocating obj tenured
if (obj == NULL) {
- obj = _next_gen->promote(old, s, from);
+ obj = _next_gen->promote(old, s);
if (obj == NULL) {
if (!HandlePromotionFailure) {
// A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
@@ -862,3 +840,69 @@ void DefNewGeneration::print_on(outputStream* st) const {
const char* DefNewGeneration::name() const {
return "def new generation";
}
+
+// Moved from inline file as they are not called inline
+CompactibleSpace* DefNewGeneration::first_compaction_space() const {
+ return eden();
+}
+
+HeapWord* DefNewGeneration::allocate(size_t word_size,
+ bool is_tlab) {
+ // This is the slow-path allocation for the DefNewGeneration.
+ // Most allocations are fast-path in compiled code.
+ // We try to allocate from the eden. If that works, we are happy.
+ // Note that since DefNewGeneration supports lock-free allocation, we
+ // have to use it here, as well.
+ HeapWord* result = eden()->par_allocate(word_size);
+ if (result != NULL) {
+ return result;
+ }
+ do {
+ HeapWord* old_limit = eden()->soft_end();
+ if (old_limit < eden()->end()) {
+ // Tell the next generation we reached a limit.
+ HeapWord* new_limit =
+ next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
+ if (new_limit != NULL) {
+ Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
+ } else {
+ assert(eden()->soft_end() == eden()->end(),
+ "invalid state after allocation_limit_reached returned null");
+ }
+ } else {
+ // The allocation failed and the soft limit is equal to the hard limit,
+ // there are no reasons to do an attempt to allocate
+ assert(old_limit == eden()->end(), "sanity check");
+ break;
+ }
+ // Try to allocate until succeeded or the soft limit can't be adjusted
+ result = eden()->par_allocate(word_size);
+ } while (result == NULL);
+
+ // If the eden is full and the last collection bailed out, we are running
+ // out of heap space, and we try to allocate the from-space, too.
+ // allocate_from_space can't be inlined because that would introduce a
+ // circular dependency at compile time.
+ if (result == NULL) {
+ result = allocate_from_space(word_size);
+ }
+ return result;
+}
+
+HeapWord* DefNewGeneration::par_allocate(size_t word_size,
+ bool is_tlab) {
+ return eden()->par_allocate(word_size);
+}
+
+void DefNewGeneration::gc_prologue(bool full) {
+ // Ensure that _end and _soft_end are the same in eden space.
+ eden()->set_soft_end(eden()->end());
+}
+
+size_t DefNewGeneration::tlab_capacity() const {
+ return eden()->capacity();
+}
+
+size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
+ return unsafe_max_alloc_nogc();
+}
diff --git a/src/share/vm/memory/defNewGeneration.hpp b/src/share/vm/memory/defNewGeneration.hpp
index 289a5317e..893afc055 100644
--- a/src/share/vm/memory/defNewGeneration.hpp
+++ b/src/share/vm/memory/defNewGeneration.hpp
@@ -24,6 +24,7 @@
class EdenSpace;
class ContiguousSpace;
+class ScanClosure;
// DefNewGeneration is a young generation containing eden, from- and
// to-space.
@@ -155,17 +156,21 @@ protected:
protected:
ScanWeakRefClosure* _cl;
CardTableRS* _rs;
+ template <class T> void do_oop_work(T* p);
public:
KeepAliveClosure(ScanWeakRefClosure* cl);
- void do_oop(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
class FastKeepAliveClosure: public KeepAliveClosure {
protected:
HeapWord* _boundary;
+ template <class T> void do_oop_work(T* p);
public:
FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl);
- void do_oop(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
class EvacuateFollowersClosure: public VoidClosure {
@@ -206,7 +211,7 @@ protected:
ContiguousSpace* from() const { return _from_space; }
ContiguousSpace* to() const { return _to_space; }
- inline CompactibleSpace* first_compaction_space() const;
+ virtual CompactibleSpace* first_compaction_space() const;
// Space enquiries
size_t capacity() const;
@@ -226,8 +231,8 @@ protected:
// Thread-local allocation buffers
bool supports_tlab_allocation() const { return true; }
- inline size_t tlab_capacity() const;
- inline size_t unsafe_max_tlab_alloc() const;
+ size_t tlab_capacity() const;
+ size_t unsafe_max_tlab_alloc() const;
// Grow the generation by the specified number of bytes.
// The size of bytes is assumed to be properly aligned.
@@ -265,13 +270,13 @@ protected:
return result;
}
- inline HeapWord* allocate(size_t word_size, bool is_tlab);
+ HeapWord* allocate(size_t word_size, bool is_tlab);
HeapWord* allocate_from_space(size_t word_size);
- inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
+ HeapWord* par_allocate(size_t word_size, bool is_tlab);
// Prologue & Epilogue
- inline virtual void gc_prologue(bool full);
+ virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
// Doesn't require additional work during GC prologue and epilogue
@@ -307,7 +312,7 @@ protected:
bool is_tlab,
bool parallel = false);
- oop copy_to_survivor_space(oop old, oop* from);
+ oop copy_to_survivor_space(oop old);
int tenuring_threshold() { return _tenuring_threshold; }
// Performance Counter support
diff --git a/src/share/vm/memory/defNewGeneration.inline.hpp b/src/share/vm/memory/defNewGeneration.inline.hpp
index dffc86b5a..23a969845 100644
--- a/src/share/vm/memory/defNewGeneration.inline.hpp
+++ b/src/share/vm/memory/defNewGeneration.inline.hpp
@@ -22,67 +22,60 @@
*
*/
-CompactibleSpace* DefNewGeneration::first_compaction_space() const {
- return eden();
-}
+// Methods of protected closure types
-HeapWord* DefNewGeneration::allocate(size_t word_size,
- bool is_tlab) {
- // This is the slow-path allocation for the DefNewGeneration.
- // Most allocations are fast-path in compiled code.
- // We try to allocate from the eden. If that works, we are happy.
- // Note that since DefNewGeneration supports lock-free allocation, we
- // have to use it here, as well.
- HeapWord* result = eden()->par_allocate(word_size);
- if (result != NULL) {
- return result;
+template <class T>
+inline void DefNewGeneration::KeepAliveClosure::do_oop_work(T* p) {
+#ifdef ASSERT
+ {
+ // We never expect to see a null reference being processed
+ // as a weak reference.
+ assert (!oopDesc::is_null(*p), "expected non-null ref");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ assert (obj->is_oop(), "expected an oop while scanning weak refs");
}
- do {
- HeapWord* old_limit = eden()->soft_end();
- if (old_limit < eden()->end()) {
- // Tell the next generation we reached a limit.
- HeapWord* new_limit =
- next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
- if (new_limit != NULL) {
- Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
- } else {
- assert(eden()->soft_end() == eden()->end(),
- "invalid state after allocation_limit_reached returned null");
- }
- } else {
- // The allocation failed and the soft limit is equal to the hard limit,
- // there are no reasons to do an attempt to allocate
- assert(old_limit == eden()->end(), "sanity check");
- break;
- }
- // Try to allocate until succeeded or the soft limit can't be adjusted
- result = eden()->par_allocate(word_size);
- } while (result == NULL);
+#endif // ASSERT
- // If the eden is full and the last collection bailed out, we are running
- // out of heap space, and we try to allocate the from-space, too.
- // allocate_from_space can't be inlined because that would introduce a
- // circular dependency at compile time.
- if (result == NULL) {
- result = allocate_from_space(word_size);
- }
- return result;
-}
+ _cl->do_oop_nv(p);
-HeapWord* DefNewGeneration::par_allocate(size_t word_size,
- bool is_tlab) {
- return eden()->par_allocate(word_size);
+ // Card marking is trickier for weak refs.
+ // This oop is a 'next' field which was filled in while we
+ // were discovering weak references. While we might not need
+ // to take a special action to keep this reference alive, we
+ // will need to dirty a card as the field was modified.
+ //
+ // Alternatively, we could create a method which iterates through
+ // each generation, allowing them in turn to examine the modified
+ // field.
+ //
+ // We could check that p is also in an older generation, but
+ // dirty cards in the youngest gen are never scanned, so the
+ // extra check probably isn't worthwhile.
+ if (Universe::heap()->is_in_reserved(p)) {
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ _rs->inline_write_ref_field_gc(p, obj);
+ }
}
-void DefNewGeneration::gc_prologue(bool full) {
- // Ensure that _end and _soft_end are the same in eden space.
- eden()->set_soft_end(eden()->end());
-}
+template <class T>
+inline void DefNewGeneration::FastKeepAliveClosure::do_oop_work(T* p) {
+#ifdef ASSERT
+ {
+ // We never expect to see a null reference being processed
+ // as a weak reference.
+ assert (!oopDesc::is_null(*p), "expected non-null ref");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ assert (obj->is_oop(), "expected an oop while scanning weak refs");
+ }
+#endif // ASSERT
-size_t DefNewGeneration::tlab_capacity() const {
- return eden()->capacity();
-}
+ _cl->do_oop_nv(p);
-size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
- return unsafe_max_alloc_nogc();
+ // Optimized for Defnew generation if it's the youngest generation:
+ // we set a younger_gen card if we have an older->youngest
+ // generation pointer.
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ if (((HeapWord*)obj < _boundary) && Universe::heap()->is_in_reserved(p)) {
+ _rs->inline_write_ref_field_gc(p, obj);
+ }
}
diff --git a/src/share/vm/memory/dump.cpp b/src/share/vm/memory/dump.cpp
index 4f75ca8e7..949933628 100644
--- a/src/share/vm/memory/dump.cpp
+++ b/src/share/vm/memory/dump.cpp
@@ -60,9 +60,9 @@ public:
hash_offset = java_lang_String::hash_offset_in_bytes();
}
- void do_oop(oop* pobj) {
- if (pobj != NULL) {
- oop obj = *pobj;
+ void do_oop(oop* p) {
+ if (p != NULL) {
+ oop obj = *p;
if (obj->klass() == SystemDictionary::string_klass()) {
int hash;
@@ -79,6 +79,7 @@ public:
}
}
}
+ void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
@@ -121,9 +122,8 @@ static bool mark_object(oop obj) {
class MarkObjectsOopClosure : public OopClosure {
public:
- void do_oop(oop* pobj) {
- mark_object(*pobj);
- }
+ void do_oop(oop* p) { mark_object(*p); }
+ void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
@@ -136,6 +136,7 @@ public:
mark_object(obj);
}
}
+ void do_oop(narrowOop* pobj) { ShouldNotReachHere(); }
};
@@ -554,6 +555,7 @@ public:
}
}
}
+ void do_oop(narrowOop* pobj) { ShouldNotReachHere(); }
};
@@ -690,6 +692,8 @@ public:
++top;
}
+ void do_oop(narrowOop* pobj) { ShouldNotReachHere(); }
+
void do_int(int* p) {
check_space();
*top = (oop)(intptr_t)*p;
diff --git a/src/share/vm/memory/genCollectedHeap.cpp b/src/share/vm/memory/genCollectedHeap.cpp
index afc4f52ea..dc3ba9b3c 100644
--- a/src/share/vm/memory/genCollectedHeap.cpp
+++ b/src/share/vm/memory/genCollectedHeap.cpp
@@ -624,6 +624,7 @@ public:
void do_oop(oop* p) {
assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
}
+ void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
static AssertIsPermClosure assert_is_perm_closure;
@@ -1300,8 +1301,7 @@ void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
oop obj,
- size_t obj_size,
- oop* ref) {
+ size_t obj_size) {
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
HeapWord* result = NULL;
diff --git a/src/share/vm/memory/genCollectedHeap.hpp b/src/share/vm/memory/genCollectedHeap.hpp
index b3cf2de0f..ec147b309 100644
--- a/src/share/vm/memory/genCollectedHeap.hpp
+++ b/src/share/vm/memory/genCollectedHeap.hpp
@@ -452,8 +452,7 @@ public:
// gen; return the new location of obj if successful. Otherwise, return NULL.
oop handle_failed_promotion(Generation* gen,
oop obj,
- size_t obj_size,
- oop* ref);
+ size_t obj_size);
private:
// Accessor for memory state verification support
diff --git a/src/share/vm/memory/genMarkSweep.cpp b/src/share/vm/memory/genMarkSweep.cpp
index 35e074403..e98f67930 100644
--- a/src/share/vm/memory/genMarkSweep.cpp
+++ b/src/share/vm/memory/genMarkSweep.cpp
@@ -73,8 +73,7 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
VALIDATE_MARK_SWEEP_ONLY(
if (ValidateMarkSweep) {
- guarantee(_root_refs_stack->length() == 0,
- "should be empty by now");
+ guarantee(_root_refs_stack->length() == 0, "should be empty by now");
}
)
@@ -165,9 +164,9 @@ void GenMarkSweep::allocate_stacks() {
#ifdef VALIDATE_MARK_SWEEP
if (ValidateMarkSweep) {
- _root_refs_stack = new (ResourceObj::C_HEAP) GrowableArray<oop*>(100, true);
- _other_refs_stack = new (ResourceObj::C_HEAP) GrowableArray<oop*>(100, true);
- _adjusted_pointers = new (ResourceObj::C_HEAP) GrowableArray<oop*>(100, true);
+ _root_refs_stack = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true);
+ _other_refs_stack = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true);
+ _adjusted_pointers = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true);
_live_oops = new (ResourceObj::C_HEAP) GrowableArray<oop>(100, true);
_live_oops_moved_to = new (ResourceObj::C_HEAP) GrowableArray<oop>(100, true);
_live_oops_size = new (ResourceObj::C_HEAP) GrowableArray<size_t>(100, true);
diff --git a/src/share/vm/memory/genOopClosures.hpp b/src/share/vm/memory/genOopClosures.hpp
index 137482c3c..d0f142f65 100644
--- a/src/share/vm/memory/genOopClosures.hpp
+++ b/src/share/vm/memory/genOopClosures.hpp
@@ -28,6 +28,11 @@ class CardTableRS;
class CardTableModRefBS;
class DefNewGeneration;
+template<class E> class GenericTaskQueue;
+typedef GenericTaskQueue<oop> OopTaskQueue;
+template<class E> class GenericTaskQueueSet;
+typedef GenericTaskQueueSet<oop> OopTaskQueueSet;
+
// Closure for iterating roots from a particular generation
// Note: all classes deriving from this MUST call this do_barrier
// method at the end of their own do_oop method!
@@ -35,13 +40,13 @@ class DefNewGeneration;
class OopsInGenClosure : public OopClosure {
private:
- Generation* _orig_gen; // generation originally set in ctor
- Generation* _gen; // generation being scanned
+ Generation* _orig_gen; // generation originally set in ctor
+ Generation* _gen; // generation being scanned
protected:
// Some subtypes need access.
- HeapWord* _gen_boundary; // start of generation
- CardTableRS* _rs; // remembered set
+ HeapWord* _gen_boundary; // start of generation
+ CardTableRS* _rs; // remembered set
// For assertions
Generation* generation() { return _gen; }
@@ -49,7 +54,7 @@ class OopsInGenClosure : public OopClosure {
// Derived classes that modify oops so that they might be old-to-young
// pointers must call the method below.
- void do_barrier(oop* p);
+ template <class T> void do_barrier(T* p);
public:
OopsInGenClosure() : OopClosure(NULL),
@@ -75,14 +80,17 @@ class OopsInGenClosure : public OopClosure {
// This closure will perform barrier store calls for ALL
// pointers in scanned oops.
class ScanClosure: public OopsInGenClosure {
-protected:
+ protected:
DefNewGeneration* _g;
- HeapWord* _boundary;
- bool _gc_barrier;
-public:
+ HeapWord* _boundary;
+ bool _gc_barrier;
+ template <class T> inline void do_oop_work(T* p);
+ public:
ScanClosure(DefNewGeneration* g, bool gc_barrier);
- void do_oop(oop* p);
- void do_oop_nv(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p);
+ inline void do_oop_nv(narrowOop* p);
bool do_header() { return false; }
Prefetch::style prefetch_style() {
return Prefetch::do_write;
@@ -95,14 +103,17 @@ public:
// pointers into the DefNewGeneration. This is less
// precise, but faster, than a ScanClosure
class FastScanClosure: public OopsInGenClosure {
-protected:
+ protected:
DefNewGeneration* _g;
- HeapWord* _boundary;
- bool _gc_barrier;
-public:
+ HeapWord* _boundary;
+ bool _gc_barrier;
+ template <class T> inline void do_oop_work(T* p);
+ public:
FastScanClosure(DefNewGeneration* g, bool gc_barrier);
- void do_oop(oop* p);
- void do_oop_nv(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p);
+ inline void do_oop_nv(narrowOop* p);
bool do_header() { return false; }
Prefetch::style prefetch_style() {
return Prefetch::do_write;
@@ -110,19 +121,27 @@ public:
};
class FilteringClosure: public OopClosure {
- HeapWord* _boundary;
+ private:
+ HeapWord* _boundary;
OopClosure* _cl;
-public:
+ protected:
+ template <class T> inline void do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if ((HeapWord*)obj < _boundary) {
+ _cl->do_oop(p);
+ }
+ }
+ }
+ public:
FilteringClosure(HeapWord* boundary, OopClosure* cl) :
OopClosure(cl->_ref_processor), _boundary(boundary),
_cl(cl) {}
- void do_oop(oop* p);
- void do_oop_nv(oop* p) {
- oop obj = *p;
- if ((HeapWord*)obj < _boundary && obj != NULL) {
- _cl->do_oop(p);
- }
- }
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p) { FilteringClosure::do_oop_work(p); }
+ inline void do_oop_nv(narrowOop* p) { FilteringClosure::do_oop_work(p); }
bool do_header() { return false; }
};
@@ -131,19 +150,26 @@ public:
// OopsInGenClosure -- weak references are processed all
// at once, with no notion of which generation they were in.
class ScanWeakRefClosure: public OopClosure {
-protected:
- DefNewGeneration* _g;
- HeapWord* _boundary;
-public:
+ protected:
+ DefNewGeneration* _g;
+ HeapWord* _boundary;
+ template <class T> inline void do_oop_work(T* p);
+ public:
ScanWeakRefClosure(DefNewGeneration* g);
- void do_oop(oop* p);
- void do_oop_nv(oop* p);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+ inline void do_oop_nv(oop* p);
+ inline void do_oop_nv(narrowOop* p);
};
class VerifyOopClosure: public OopClosure {
-public:
- void do_oop(oop* p) {
- guarantee((*p)->is_oop_or_null(), "invalid oop");
+ protected:
+ template <class T> inline void do_oop_work(T* p) {
+ oop obj = oopDesc::load_decode_heap_oop(p);
+ guarantee(obj->is_oop_or_null(), "invalid oop");
}
+ public:
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
static VerifyOopClosure verify_oop;
};
diff --git a/src/share/vm/memory/genOopClosures.inline.hpp b/src/share/vm/memory/genOopClosures.inline.hpp
index 3bbe76527..a6699d74b 100644
--- a/src/share/vm/memory/genOopClosures.inline.hpp
+++ b/src/share/vm/memory/genOopClosures.inline.hpp
@@ -38,10 +38,10 @@ inline void OopsInGenClosure::set_generation(Generation* gen) {
}
}
-inline void OopsInGenClosure::do_barrier(oop* p) {
+template <class T> inline void OopsInGenClosure::do_barrier(T* p) {
assert(generation()->is_in_reserved(p), "expected ref in generation");
- oop obj = *p;
- assert(obj != NULL, "expected non-null object");
+ assert(!oopDesc::is_null(*p), "expected non-null object");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// If p points to a younger generation, mark the card.
if ((HeapWord*)obj < _gen_boundary) {
_rs->inline_write_ref_field_gc(p, obj);
@@ -49,18 +49,17 @@ inline void OopsInGenClosure::do_barrier(oop* p) {
}
// NOTE! Any changes made here should also be made
-// in FastScanClosure::do_oop();
-inline void ScanClosure::do_oop(oop* p) {
- oop obj = *p;
+// in FastScanClosure::do_oop_work()
+template <class T> inline void ScanClosure::do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
// Should we copy the obj?
- if (obj != NULL) {
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if ((HeapWord*)obj < _boundary) {
assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
- if (obj->is_forwarded()) {
- *p = obj->forwardee();
- } else {
- *p = _g->copy_to_survivor_space(obj, p);
- }
+ oop new_obj = obj->is_forwarded() ? obj->forwardee()
+ : _g->copy_to_survivor_space(obj);
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
}
if (_gc_barrier) {
// Now call parent closure
@@ -69,23 +68,21 @@ inline void ScanClosure::do_oop(oop* p) {
}
}
-inline void ScanClosure::do_oop_nv(oop* p) {
- ScanClosure::do_oop(p);
-}
+inline void ScanClosure::do_oop_nv(oop* p) { ScanClosure::do_oop_work(p); }
+inline void ScanClosure::do_oop_nv(narrowOop* p) { ScanClosure::do_oop_work(p); }
// NOTE! Any changes made here should also be made
-// in ScanClosure::do_oop();
-inline void FastScanClosure::do_oop(oop* p) {
- oop obj = *p;
+// in ScanClosure::do_oop_work()
+template <class T> inline void FastScanClosure::do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
// Should we copy the obj?
- if (obj != NULL) {
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if ((HeapWord*)obj < _boundary) {
assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
- if (obj->is_forwarded()) {
- *p = obj->forwardee();
- } else {
- *p = _g->copy_to_survivor_space(obj, p);
- }
+ oop new_obj = obj->is_forwarded() ? obj->forwardee()
+ : _g->copy_to_survivor_space(obj);
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
if (_gc_barrier) {
// Now call parent closure
do_barrier(p);
@@ -94,26 +91,22 @@ inline void FastScanClosure::do_oop(oop* p) {
}
}
-inline void FastScanClosure::do_oop_nv(oop* p) {
- FastScanClosure::do_oop(p);
-}
+inline void FastScanClosure::do_oop_nv(oop* p) { FastScanClosure::do_oop_work(p); }
+inline void FastScanClosure::do_oop_nv(narrowOop* p) { FastScanClosure::do_oop_work(p); }
// Note similarity to ScanClosure; the difference is that
// the barrier set is taken care of outside this closure.
-inline void ScanWeakRefClosure::do_oop(oop* p) {
- oop obj = *p;
- assert (obj != NULL, "null weak reference?");
+template <class T> inline void ScanWeakRefClosure::do_oop_work(T* p) {
+ assert(!oopDesc::is_null(*p), "null weak reference?");
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// weak references are sometimes scanned twice; must check
// that to-space doesn't already contain this object
if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
- if (obj->is_forwarded()) {
- *p = obj->forwardee();
- } else {
- *p = _g->copy_to_survivor_space(obj, p);
- }
+ oop new_obj = obj->is_forwarded() ? obj->forwardee()
+ : _g->copy_to_survivor_space(obj);
+ oopDesc::encode_store_heap_oop_not_null(p, new_obj);
}
}
-inline void ScanWeakRefClosure::do_oop_nv(oop* p) {
- ScanWeakRefClosure::do_oop(p);
-}
+inline void ScanWeakRefClosure::do_oop_nv(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
+inline void ScanWeakRefClosure::do_oop_nv(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
diff --git a/src/share/vm/memory/genRemSet.hpp b/src/share/vm/memory/genRemSet.hpp
index 006eab3eb..c2ef23061 100644
--- a/src/share/vm/memory/genRemSet.hpp
+++ b/src/share/vm/memory/genRemSet.hpp
@@ -68,13 +68,13 @@ public:
// This method is used to notify the remembered set that "new_val" has
// been written into "field" by the garbage collector.
- void write_ref_field_gc(oop* field, oop new_val);
+ void write_ref_field_gc(void* field, oop new_val);
protected:
- virtual void write_ref_field_gc_work(oop* field, oop new_val) = 0;
+ virtual void write_ref_field_gc_work(void* field, oop new_val) = 0;
public:
// A version of the above suitable for use by parallel collectors.
- virtual void write_ref_field_gc_par(oop* field, oop new_val) = 0;
+ virtual void write_ref_field_gc_par(void* field, oop new_val) = 0;
// Resize one of the regions covered by the remembered set.
virtual void resize_covered_region(MemRegion new_region) = 0;
diff --git a/src/share/vm/memory/genRemSet.inline.hpp b/src/share/vm/memory/genRemSet.inline.hpp
index 448c18e22..3ae0e7f68 100644
--- a/src/share/vm/memory/genRemSet.inline.hpp
+++ b/src/share/vm/memory/genRemSet.inline.hpp
@@ -26,7 +26,7 @@
// performance-critical call when when the rem set is the most common
// card-table kind.
-void GenRemSet::write_ref_field_gc(oop* field, oop new_val) {
+void GenRemSet::write_ref_field_gc(void* field, oop new_val) {
if (kind() == CardTableModRef) {
((CardTableRS*)this)->inline_write_ref_field_gc(field, new_val);
} else {
diff --git a/src/share/vm/memory/generation.cpp b/src/share/vm/memory/generation.cpp
index d09238c19..5ed3ec09b 100644
--- a/src/share/vm/memory/generation.cpp
+++ b/src/share/vm/memory/generation.cpp
@@ -171,7 +171,7 @@ bool Generation::promotion_attempt_is_safe(size_t promotion_in_bytes,
}
// Ignores "ref" and calls allocate().
-oop Generation::promote(oop obj, size_t obj_size, oop* ref) {
+oop Generation::promote(oop obj, size_t obj_size) {
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
#ifndef PRODUCT
@@ -186,7 +186,7 @@ oop Generation::promote(oop obj, size_t obj_size, oop* ref) {
return oop(result);
} else {
GenCollectedHeap* gch = GenCollectedHeap::heap();
- return gch->handle_failed_promotion(this, obj, obj_size, ref);
+ return gch->handle_failed_promotion(this, obj, obj_size);
}
}
diff --git a/src/share/vm/memory/generation.hpp b/src/share/vm/memory/generation.hpp
index b0921e766..2e146d538 100644
--- a/src/share/vm/memory/generation.hpp
+++ b/src/share/vm/memory/generation.hpp
@@ -295,13 +295,7 @@ class Generation: public CHeapObj {
//
// The "obj_size" argument is just obj->size(), passed along so the caller can
// avoid repeating the virtual call to retrieve it.
- //
- // The "ref" argument, if non-NULL, is the address of some reference to "obj"
- // (that is "*ref == obj"); some generations may use this information to, for
- // example, influence placement decisions.
- //
- // The default implementation ignores "ref" and calls allocate().
- virtual oop promote(oop obj, size_t obj_size, oop* ref);
+ virtual oop promote(oop obj, size_t obj_size);
// Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote
// object "obj", whose original mark word was "m", and whose size is
diff --git a/src/share/vm/memory/iterator.hpp b/src/share/vm/memory/iterator.hpp
index 29be107d6..1b92ddd4b 100644
--- a/src/share/vm/memory/iterator.hpp
+++ b/src/share/vm/memory/iterator.hpp
@@ -35,6 +35,8 @@ class OopClosure : public StackObj {
OopClosure() : _ref_processor(NULL) { }
virtual void do_oop(oop* o) = 0;
virtual void do_oop_v(oop* o) { do_oop(o); }
+ virtual void do_oop(narrowOop* o) = 0;
+ virtual void do_oop_v(narrowOop* o) { do_oop(o); }
// In support of post-processing of weak links of KlassKlass objects;
// see KlassKlass::oop_oop_iterate().
diff --git a/src/share/vm/memory/modRefBarrierSet.hpp b/src/share/vm/memory/modRefBarrierSet.hpp
index 85a943977..c85a18e7a 100644
--- a/src/share/vm/memory/modRefBarrierSet.hpp
+++ b/src/share/vm/memory/modRefBarrierSet.hpp
@@ -37,19 +37,19 @@ public:
bool has_write_ref_barrier() { return true; }
bool has_write_prim_barrier() { return false; }
- bool read_ref_needs_barrier(oop* field) { return false; }
+ bool read_ref_needs_barrier(void* field) { return false; }
bool read_prim_needs_barrier(HeapWord* field, size_t bytes) { return false; }
- virtual bool write_ref_needs_barrier(oop* field, oop new_val) = 0;
+ virtual bool write_ref_needs_barrier(void* field, oop new_val) = 0;
bool write_prim_needs_barrier(HeapWord* field, size_t bytes,
juint val1, juint val2) { return false; }
void write_prim_field(oop obj, size_t offset, size_t bytes,
juint val1, juint val2) {}
- void read_ref_field(oop* field) {}
+ void read_ref_field(void* field) {}
void read_prim_field(HeapWord* field, size_t bytes) {}
protected:
- virtual void write_ref_field_work(oop* field, oop new_val) = 0;
+ virtual void write_ref_field_work(void* field, oop new_val) = 0;
public:
void write_prim_field(HeapWord* field, size_t bytes,
juint val1, juint val2) {}
diff --git a/src/share/vm/memory/referenceProcessor.cpp b/src/share/vm/memory/referenceProcessor.cpp
index 6aeeecd3e..c9ba9b81c 100644
--- a/src/share/vm/memory/referenceProcessor.cpp
+++ b/src/share/vm/memory/referenceProcessor.cpp
@@ -28,16 +28,32 @@
// List of discovered references.
class DiscoveredList {
public:
- DiscoveredList() : _head(NULL), _len(0) { }
- oop head() const { return _head; }
- oop* head_ptr() { return &_head; }
- void set_head(oop o) { _head = o; }
- bool empty() const { return _head == ReferenceProcessor::_sentinelRef; }
+ DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
+ oop head() const {
+ return UseCompressedOops ? oopDesc::decode_heap_oop_not_null(_compressed_head) :
+ _oop_head;
+ }
+ HeapWord* adr_head() {
+ return UseCompressedOops ? (HeapWord*)&_compressed_head :
+ (HeapWord*)&_oop_head;
+ }
+ void set_head(oop o) {
+ if (UseCompressedOops) {
+ // Must compress the head ptr.
+ _compressed_head = oopDesc::encode_heap_oop_not_null(o);
+ } else {
+ _oop_head = o;
+ }
+ }
+ bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); }
size_t length() { return _len; }
void set_length(size_t len) { _len = len; }
private:
+ // Set value depending on UseCompressedOops. This could be a template class
+ // but then we have to fix all the instantiations and declarations that use this class.
+ oop _oop_head;
+ narrowOop _compressed_head;
size_t _len;
- oop _head;
};
oop ReferenceProcessor::_sentinelRef = NULL;
@@ -49,11 +65,11 @@ void referenceProcessor_init() {
}
void ReferenceProcessor::init_statics() {
- assert(_sentinelRef == NULL, "should be initialized precsiely once");
+ assert(_sentinelRef == NULL, "should be initialized precisely once");
EXCEPTION_MARK;
_sentinelRef = instanceKlass::cast(
- SystemDictionary::object_klass())->
- allocate_permanent_instance(THREAD);
+ SystemDictionary::reference_klass())->
+ allocate_permanent_instance(THREAD);
// Initialize the master soft ref clock.
java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
@@ -69,15 +85,13 @@ void ReferenceProcessor::init_statics() {
"Unrecongnized RefDiscoveryPolicy");
}
-
-ReferenceProcessor* ReferenceProcessor::create_ref_processor(
- MemRegion span,
- bool atomic_discovery,
- bool mt_discovery,
- BoolObjectClosure* is_alive_non_header,
- int parallel_gc_threads,
- bool mt_processing)
-{
+ReferenceProcessor*
+ReferenceProcessor::create_ref_processor(MemRegion span,
+ bool atomic_discovery,
+ bool mt_discovery,
+ BoolObjectClosure* is_alive_non_header,
+ int parallel_gc_threads,
+ bool mt_processing) {
int mt_degree = 1;
if (parallel_gc_threads > 1) {
mt_degree = parallel_gc_threads;
@@ -93,10 +107,11 @@ ReferenceProcessor* ReferenceProcessor::create_ref_processor(
return rp;
}
-
ReferenceProcessor::ReferenceProcessor(MemRegion span,
- bool atomic_discovery, bool mt_discovery, int mt_degree,
- bool mt_processing) :
+ bool atomic_discovery,
+ bool mt_discovery,
+ int mt_degree,
+ bool mt_processing) :
_discovering_refs(false),
_enqueuing_is_done(false),
_is_alive_non_header(NULL),
@@ -114,10 +129,10 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
_discoveredWeakRefs = &_discoveredSoftRefs[_num_q];
_discoveredFinalRefs = &_discoveredWeakRefs[_num_q];
_discoveredPhantomRefs = &_discoveredFinalRefs[_num_q];
- assert(_sentinelRef != NULL, "_sentinelRef is NULL");
+ assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
// Initialized all entries to _sentinelRef
for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
- _discoveredSoftRefs[i].set_head(_sentinelRef);
+ _discoveredSoftRefs[i].set_head(sentinel_ref());
_discoveredSoftRefs[i].set_length(0);
}
}
@@ -134,16 +149,19 @@ void ReferenceProcessor::verify_no_references_recorded() {
void ReferenceProcessor::weak_oops_do(OopClosure* f) {
for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
- f->do_oop(_discoveredSoftRefs[i].head_ptr());
+ if (UseCompressedOops) {
+ f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
+ } else {
+ f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
+ }
}
}
void ReferenceProcessor::oops_do(OopClosure* f) {
- f->do_oop(&_sentinelRef);
+ f->do_oop(adr_sentinel_ref());
}
-void ReferenceProcessor::update_soft_ref_master_clock()
-{
+void ReferenceProcessor::update_soft_ref_master_clock() {
// Update (advance) the soft ref master clock field. This must be done
// after processing the soft ref list.
jlong now = os::javaTimeMillis();
@@ -164,9 +182,7 @@ void ReferenceProcessor::update_soft_ref_master_clock()
// past clock value.
}
-
-void
-ReferenceProcessor::process_discovered_references(
+void ReferenceProcessor::process_discovered_references(
ReferencePolicy* policy,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
@@ -223,15 +239,13 @@ ReferenceProcessor::process_discovered_references(
}
}
-
#ifndef PRODUCT
// Calculate the number of jni handles.
-unsigned int ReferenceProcessor::count_jni_refs()
-{
+uint ReferenceProcessor::count_jni_refs() {
class AlwaysAliveClosure: public BoolObjectClosure {
public:
- bool do_object_b(oop obj) { return true; }
- void do_object(oop obj) { assert(false, "Don't call"); }
+ virtual bool do_object_b(oop obj) { return true; }
+ virtual void do_object(oop obj) { assert(false, "Don't call"); }
};
class CountHandleClosure: public OopClosure {
@@ -239,9 +253,8 @@ unsigned int ReferenceProcessor::count_jni_refs()
int _count;
public:
CountHandleClosure(): _count(0) {}
- void do_oop(oop* unused) {
- _count++;
- }
+ void do_oop(oop* unused) { _count++; }
+ void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
int count() { return _count; }
};
CountHandleClosure global_handle_count;
@@ -262,36 +275,48 @@ void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
#endif
JNIHandles::weak_oops_do(is_alive, keep_alive);
// Finally remember to keep sentinel around
- keep_alive->do_oop(&_sentinelRef);
+ keep_alive->do_oop(adr_sentinel_ref());
complete_gc->do_void();
}
-bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
- NOT_PRODUCT(verify_ok_to_handle_reflists());
+
+template <class T>
+static bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
+ AbstractRefProcTaskExecutor* task_executor) {
+
// Remember old value of pending references list
- oop* pending_list_addr = java_lang_ref_Reference::pending_list_addr();
- oop old_pending_list_value = *pending_list_addr;
+ T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
+ T old_pending_list_value = *pending_list_addr;
// Enqueue references that are not made active again, and
// clear the decks for the next collection (cycle).
- enqueue_discovered_reflists(pending_list_addr, task_executor);
+ ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
// Do the oop-check on pending_list_addr missed in
// enqueue_discovered_reflist. We should probably
// do a raw oop_check so that future such idempotent
// oop_stores relying on the oop-check side-effect
// may be elided automatically and safely without
// affecting correctness.
- oop_store(pending_list_addr, *(pending_list_addr));
+ oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
// Stop treating discovered references specially.
- disable_discovery();
+ ref->disable_discovery();
// Return true if new pending references were added
return old_pending_list_value != *pending_list_addr;
}
+bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
+ NOT_PRODUCT(verify_ok_to_handle_reflists());
+ if (UseCompressedOops) {
+ return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
+ } else {
+ return enqueue_discovered_ref_helper<oop>(this, task_executor);
+ }
+}
+
void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
- oop* pending_list_addr) {
+ HeapWord* pending_list_addr) {
// Given a list of refs linked through the "discovered" field
// (java.lang.ref.Reference.discovered) chain them through the
// "next" field (java.lang.ref.Reference.next) and prepend
@@ -305,19 +330,19 @@ void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
// the next field and clearing it (except for the last
// non-sentinel object which is treated specially to avoid
// confusion with an active reference).
- while (obj != _sentinelRef) {
+ while (obj != sentinel_ref()) {
assert(obj->is_instanceRef(), "should be reference object");
oop next = java_lang_ref_Reference::discovered(obj);
if (TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
- (oopDesc*) obj, (oopDesc*) next);
+ gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
+ obj, next);
}
- assert(*java_lang_ref_Reference::next_addr(obj) == NULL,
- "The reference should not be enqueued");
- if (next == _sentinelRef) { // obj is last
+ assert(java_lang_ref_Reference::next(obj) == NULL,
+ "The reference should not be enqueued");
+ if (next == sentinel_ref()) { // obj is last
// Swap refs_list into pendling_list_addr and
// set obj's next to what we read from pending_list_addr.
- oop old = (oop)Atomic::xchg_ptr(refs_list.head(), pending_list_addr);
+ oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
// Need oop_check on pending_list_addr above;
// see special oop-check code at the end of
// enqueue_discovered_reflists() further below.
@@ -341,15 +366,14 @@ class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
public:
RefProcEnqueueTask(ReferenceProcessor& ref_processor,
DiscoveredList discovered_refs[],
- oop* pending_list_addr,
+ HeapWord* pending_list_addr,
oop sentinel_ref,
int n_queues)
: EnqueueTask(ref_processor, discovered_refs,
pending_list_addr, sentinel_ref, n_queues)
{ }
- virtual void work(unsigned int work_id)
- {
+ virtual void work(unsigned int work_id) {
assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds");
// Simplest first cut: static partitioning.
int index = work_id;
@@ -363,18 +387,18 @@ public:
};
// Enqueue references that are not made active again
-void ReferenceProcessor::enqueue_discovered_reflists(oop* pending_list_addr,
+void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
AbstractRefProcTaskExecutor* task_executor) {
if (_processing_is_mt && task_executor != NULL) {
// Parallel code
RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
- pending_list_addr, _sentinelRef, _num_q);
+ pending_list_addr, sentinel_ref(), _num_q);
task_executor->execute(tsk);
} else {
// Serial code: call the parent class's implementation
for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
- _discoveredSoftRefs[i].set_head(_sentinelRef);
+ _discoveredSoftRefs[i].set_head(sentinel_ref());
_discoveredSoftRefs[i].set_length(0);
}
}
@@ -388,14 +412,13 @@ public:
BoolObjectClosure* is_alive);
// End Of List.
- inline bool has_next() const
- { return _next != ReferenceProcessor::_sentinelRef; }
+ inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); }
// Get oop to the Reference object.
- inline oop obj() const { return _ref; }
+ inline oop obj() const { return _ref; }
// Get oop to the referent object.
- inline oop referent() const { return _referent; }
+ inline oop referent() const { return _referent; }
// Returns true if referent is alive.
inline bool is_referent_alive() const;
@@ -417,13 +440,26 @@ public:
inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
// Make the referent alive.
- inline void make_referent_alive() { _keep_alive->do_oop(_referent_addr); }
+ inline void make_referent_alive() {
+ if (UseCompressedOops) {
+ _keep_alive->do_oop((narrowOop*)_referent_addr);
+ } else {
+ _keep_alive->do_oop((oop*)_referent_addr);
+ }
+ }
// Update the discovered field.
- inline void update_discovered() { _keep_alive->do_oop(_prev_next); }
+ inline void update_discovered() {
+ // First _prev_next ref actually points into DiscoveredList (gross).
+ if (UseCompressedOops) {
+ _keep_alive->do_oop((narrowOop*)_prev_next);
+ } else {
+ _keep_alive->do_oop((oop*)_prev_next);
+ }
+ }
// NULL out referent pointer.
- inline void clear_referent() { *_referent_addr = NULL; }
+ inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
// Statistics
NOT_PRODUCT(
@@ -436,11 +472,11 @@ private:
private:
DiscoveredList& _refs_list;
- oop* _prev_next;
+ HeapWord* _prev_next;
oop _ref;
- oop* _discovered_addr;
+ HeapWord* _discovered_addr;
oop _next;
- oop* _referent_addr;
+ HeapWord* _referent_addr;
oop _referent;
OopClosure* _keep_alive;
BoolObjectClosure* _is_alive;
@@ -457,7 +493,7 @@ inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_li
OopClosure* keep_alive,
BoolObjectClosure* is_alive)
: _refs_list(refs_list),
- _prev_next(refs_list.head_ptr()),
+ _prev_next(refs_list.adr_head()),
_ref(refs_list.head()),
#ifdef ASSERT
_first_seen(refs_list.head()),
@@ -471,19 +507,18 @@ inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_li
_is_alive(is_alive)
{ }
-inline bool DiscoveredListIterator::is_referent_alive() const
-{
+inline bool DiscoveredListIterator::is_referent_alive() const {
return _is_alive->do_object_b(_referent);
}
-inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent))
-{
+inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
_discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
- assert(_discovered_addr && (*_discovered_addr)->is_oop_or_null(),
+ oop discovered = java_lang_ref_Reference::discovered(_ref);
+ assert(_discovered_addr && discovered->is_oop_or_null(),
"discovered field is bad");
- _next = *_discovered_addr;
+ _next = discovered;
_referent_addr = java_lang_ref_Reference::referent_addr(_ref);
- _referent = *_referent_addr;
+ _referent = java_lang_ref_Reference::referent(_ref);
assert(Universe::heap()->is_in_reserved_or_null(_referent),
"Wrong oop found in java.lang.Reference object");
assert(allow_null_referent ?
@@ -492,32 +527,32 @@ inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referen
"bad referent");
}
-inline void DiscoveredListIterator::next()
-{
+inline void DiscoveredListIterator::next() {
_prev_next = _discovered_addr;
move_to_next();
}
-inline void DiscoveredListIterator::remove()
-{
+inline void DiscoveredListIterator::remove() {
assert(_ref->is_oop(), "Dropping a bad reference");
- // Clear the discovered_addr field so that the object does
- // not look like it has been discovered.
- *_discovered_addr = NULL;
- // Remove Reference object from list.
- *_prev_next = _next;
+ oop_store_raw(_discovered_addr, NULL);
+ // First _prev_next ref actually points into DiscoveredList (gross).
+ if (UseCompressedOops) {
+ // Remove Reference object from list.
+ oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next);
+ } else {
+ // Remove Reference object from list.
+ oopDesc::store_heap_oop((oop*)_prev_next, _next);
+ }
NOT_PRODUCT(_removed++);
move_to_next();
}
-inline void DiscoveredListIterator::move_to_next()
-{
+inline void DiscoveredListIterator::move_to_next() {
_ref = _next;
assert(_ref != _first_seen, "cyclic ref_list found");
NOT_PRODUCT(_processed++);
}
-
// NOTE: process_phase*() are largely similar, and at a high level
// merely iterate over the extant list applying a predicate to
// each of its elements and possibly removing that element from the
@@ -531,13 +566,13 @@ inline void DiscoveredListIterator::move_to_next()
// referents are not alive, but that should be kept alive for policy reasons.
// Keep alive the transitive closure of all such referents.
void
-ReferenceProcessor::process_phase1(DiscoveredList& refs_list_addr,
+ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
ReferencePolicy* policy,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc) {
assert(policy != NULL, "Must have a non-NULL policy");
- DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+ DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
// Decide which softly reachable refs should be kept alive.
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
@@ -545,7 +580,7 @@ ReferenceProcessor::process_phase1(DiscoveredList& refs_list_addr,
if (referent_is_dead && !policy->should_clear_reference(iter.obj())) {
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
- (address)iter.obj(), iter.obj()->blueprint()->internal_name());
+ iter.obj(), iter.obj()->blueprint()->internal_name());
}
// Make the Reference object active again
iter.make_active();
@@ -570,20 +605,19 @@ ReferenceProcessor::process_phase1(DiscoveredList& refs_list_addr,
// Traverse the list and remove any Refs that are not active, or
// whose referents are either alive or NULL.
void
-ReferenceProcessor::pp2_work(DiscoveredList& refs_list_addr,
+ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
- OopClosure* keep_alive)
-{
+ OopClosure* keep_alive) {
assert(discovery_is_atomic(), "Error");
- DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+ DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
- DEBUG_ONLY(oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());)
- assert(*next_addr == NULL, "Should not discover inactive Reference");
+ DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
+ assert(next == NULL, "Should not discover inactive Reference");
if (iter.is_referent_alive()) {
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
- (address)iter.obj(), iter.obj()->blueprint()->internal_name());
+ iter.obj(), iter.obj()->blueprint()->internal_name());
}
// The referent is reachable after all.
// Update the referent pointer as necessary: Note that this
@@ -605,25 +639,28 @@ ReferenceProcessor::pp2_work(DiscoveredList& refs_list_addr,
}
void
-ReferenceProcessor::pp2_work_concurrent_discovery(
- DiscoveredList& refs_list_addr,
- BoolObjectClosure* is_alive,
- OopClosure* keep_alive,
- VoidClosure* complete_gc)
-{
+ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
+ BoolObjectClosure* is_alive,
+ OopClosure* keep_alive,
+ VoidClosure* complete_gc) {
assert(!discovery_is_atomic(), "Error");
- DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+ DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
- oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
+ HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
+ oop next = java_lang_ref_Reference::next(iter.obj());
if ((iter.referent() == NULL || iter.is_referent_alive() ||
- *next_addr != NULL)) {
- assert((*next_addr)->is_oop_or_null(), "bad next field");
+ next != NULL)) {
+ assert(next->is_oop_or_null(), "bad next field");
// Remove Reference object from list
iter.remove();
// Trace the cohorts
iter.make_referent_alive();
- keep_alive->do_oop(next_addr);
+ if (UseCompressedOops) {
+ keep_alive->do_oop((narrowOop*)next_addr);
+ } else {
+ keep_alive->do_oop((oop*)next_addr);
+ }
} else {
iter.next();
}
@@ -639,15 +676,15 @@ ReferenceProcessor::pp2_work_concurrent_discovery(
}
// Traverse the list and process the referents, by either
-// either clearing them or keeping them (and their reachable
+// clearing them or keeping them (and their reachable
// closure) alive.
void
-ReferenceProcessor::process_phase3(DiscoveredList& refs_list_addr,
+ReferenceProcessor::process_phase3(DiscoveredList& refs_list,
bool clear_referent,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc) {
- DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive);
+ DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
while (iter.has_next()) {
iter.update_discovered();
iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
@@ -661,7 +698,7 @@ ReferenceProcessor::process_phase3(DiscoveredList& refs_list_addr,
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
clear_referent ? "cleared " : "",
- (address)iter.obj(), iter.obj()->blueprint()->internal_name());
+ iter.obj(), iter.obj()->blueprint()->internal_name());
}
assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
// If discovery is concurrent, we may have objects with null referents,
@@ -679,15 +716,15 @@ ReferenceProcessor::process_phase3(DiscoveredList& refs_list_addr,
}
void
-ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& ref_list) {
- oop obj = ref_list.head();
- while (obj != _sentinelRef) {
- oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
- obj = *discovered_addr;
- *discovered_addr = NULL;
- }
- ref_list.set_head(_sentinelRef);
- ref_list.set_length(0);
+ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
+ oop obj = refs_list.head();
+ while (obj != sentinel_ref()) {
+ oop discovered = java_lang_ref_Reference::discovered(obj);
+ java_lang_ref_Reference::set_discovered_raw(obj, NULL);
+ obj = discovered;
+ }
+ refs_list.set_head(sentinel_ref());
+ refs_list.set_length(0);
}
void
@@ -777,7 +814,7 @@ void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
// find an element to split the list on
for (size_t j = 0; j < refs_to_move; ++j) {
move_tail = new_head;
- new_head = *java_lang_ref_Reference::discovered_addr(new_head);
+ new_head = java_lang_ref_Reference::discovered(new_head);
}
java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
ref_lists[to_idx].set_head(move_head);
@@ -875,17 +912,17 @@ void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list)
size_t length = refs_list.length();
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
- oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
- assert((*next_addr)->is_oop_or_null(), "bad next field");
+ oop next = java_lang_ref_Reference::next(iter.obj());
+ assert(next->is_oop_or_null(), "bad next field");
// If referent has been cleared or Reference is not active,
// drop it.
- if (iter.referent() == NULL || *next_addr != NULL) {
+ if (iter.referent() == NULL || next != NULL) {
debug_only(
if (PrintGCDetails && TraceReferenceGC) {
gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
INTPTR_FORMAT " with next field: " INTPTR_FORMAT
" and referent: " INTPTR_FORMAT,
- (address)iter.obj(), (address)*next_addr, (address)iter.referent());
+ iter.obj(), next, iter.referent());
}
)
// Remove Reference object from list
@@ -950,18 +987,21 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
return list;
}
-inline void ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& list,
- oop obj, oop* discovered_addr) {
+inline void
+ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
+ oop obj,
+ HeapWord* discovered_addr) {
assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
// First we must make sure this object is only enqueued once. CAS in a non null
// discovered_addr.
- oop retest = (oop)Atomic::cmpxchg_ptr(list.head(), discovered_addr, NULL);
+ oop retest = oopDesc::atomic_compare_exchange_oop(refs_list.head(), discovered_addr,
+ NULL);
if (retest == NULL) {
// This thread just won the right to enqueue the object.
// We have separate lists for enqueueing so no synchronization
// is necessary.
- list.set_head(obj);
- list.set_length(list.length() + 1);
+ refs_list.set_head(obj);
+ refs_list.set_length(refs_list.length() + 1);
} else {
// If retest was non NULL, another thread beat us to it:
// The reference has already been discovered...
@@ -972,7 +1012,6 @@ inline void ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& list,
}
}
-
// We mention two of several possible choices here:
// #0: if the reference object is not in the "originating generation"
// (or part of the heap being collected, indicated by our "span"
@@ -1006,8 +1045,8 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
return false;
}
// We only enqueue active references.
- oop* next_addr = java_lang_ref_Reference::next_addr(obj);
- if (*next_addr != NULL) {
+ oop next = java_lang_ref_Reference::next(obj);
+ if (next != NULL) {
return false;
}
@@ -1034,14 +1073,14 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
}
}
- oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
- assert(discovered_addr != NULL && (*discovered_addr)->is_oop_or_null(),
- "bad discovered field");
- if (*discovered_addr != NULL) {
+ HeapWord* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
+ oop discovered = java_lang_ref_Reference::discovered(obj);
+ assert(discovered->is_oop_or_null(), "bad discovered field");
+ if (discovered != NULL) {
// The reference has already been discovered...
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
- (oopDesc*)obj, obj->blueprint()->internal_name());
+ obj, obj->blueprint()->internal_name());
}
if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
// assumes that an object is not processed twice;
@@ -1088,7 +1127,7 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
if (_discovery_is_mt) {
add_to_discovered_list_mt(*list, obj, discovered_addr);
} else {
- *discovered_addr = list->head();
+ oop_store_raw(discovered_addr, list->head());
list->set_head(obj);
list->set_length(list->length() + 1);
}
@@ -1106,7 +1145,7 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
oop referent = java_lang_ref_Reference::referent(obj);
if (PrintGCDetails) {
gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
- (oopDesc*) obj, obj->blueprint()->internal_name());
+ obj, obj->blueprint()->internal_name());
}
assert(referent->is_oop(), "Enqueued a bad referent");
}
@@ -1181,17 +1220,20 @@ void ReferenceProcessor::preclean_discovered_references(
// are not active (have a non-NULL next field). NOTE: For this to work
// correctly, refs discovery can not be happening concurrently with this
// step.
-void ReferenceProcessor::preclean_discovered_reflist(
- DiscoveredList& refs_list, BoolObjectClosure* is_alive,
- OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield) {
-
+void
+ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
+ BoolObjectClosure* is_alive,
+ OopClosure* keep_alive,
+ VoidClosure* complete_gc,
+ YieldClosure* yield) {
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
size_t length = refs_list.length();
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
- oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
+ oop obj = iter.obj();
+ oop next = java_lang_ref_Reference::next(obj);
if (iter.referent() == NULL || iter.is_referent_alive() ||
- *next_addr != NULL) {
+ next != NULL) {
// The referent has been cleared, or is alive, or the Reference is not
// active; we need to trace and mark its cohort.
if (TraceReferenceGC) {
@@ -1203,7 +1245,13 @@ void ReferenceProcessor::preclean_discovered_reflist(
--length;
// Keep alive its cohort.
iter.make_referent_alive();
- keep_alive->do_oop(next_addr);
+ if (UseCompressedOops) {
+ narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
+ keep_alive->do_oop(next_addr);
+ } else {
+ oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
+ keep_alive->do_oop(next_addr);
+ }
} else {
iter.next();
}
@@ -1241,7 +1289,7 @@ void ReferenceProcessor::verify_ok_to_handle_reflists() {
#endif
void ReferenceProcessor::verify() {
- guarantee(_sentinelRef != NULL && _sentinelRef->is_oop(), "Lost _sentinelRef");
+ guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef");
}
#ifndef PRODUCT
@@ -1249,12 +1297,12 @@ void ReferenceProcessor::clear_discovered_references() {
guarantee(!_discovering_refs, "Discovering refs?");
for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
oop obj = _discoveredSoftRefs[i].head();
- while (obj != _sentinelRef) {
+ while (obj != sentinel_ref()) {
oop next = java_lang_ref_Reference::discovered(obj);
java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
obj = next;
}
- _discoveredSoftRefs[i].set_head(_sentinelRef);
+ _discoveredSoftRefs[i].set_head(sentinel_ref());
_discoveredSoftRefs[i].set_length(0);
}
}
diff --git a/src/share/vm/memory/referenceProcessor.hpp b/src/share/vm/memory/referenceProcessor.hpp
index 29e2a7b77..e11f95645 100644
--- a/src/share/vm/memory/referenceProcessor.hpp
+++ b/src/share/vm/memory/referenceProcessor.hpp
@@ -45,8 +45,6 @@ class AbstractRefProcTaskExecutor;
class DiscoveredList;
class ReferenceProcessor : public CHeapObj {
- friend class DiscoveredList;
- friend class DiscoveredListIterator;
protected:
// End of list marker
static oop _sentinelRef;
@@ -70,16 +68,20 @@ class ReferenceProcessor : public CHeapObj {
BoolObjectClosure* _is_alive_non_header;
// The discovered ref lists themselves
- int _num_q; // the MT'ness degree of the queues below
- DiscoveredList* _discoveredSoftRefs; // pointer to array of oops
+
+ // The MT'ness degree of the queues below
+ int _num_q;
+ // Arrays of lists of oops, one per thread
+ DiscoveredList* _discoveredSoftRefs;
DiscoveredList* _discoveredWeakRefs;
DiscoveredList* _discoveredFinalRefs;
DiscoveredList* _discoveredPhantomRefs;
public:
- int num_q() { return _num_q; }
+ int num_q() { return _num_q; }
DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
- static oop* sentinel_ref() { return &_sentinelRef; }
+ static oop sentinel_ref() { return _sentinelRef; }
+ static oop* adr_sentinel_ref() { return &_sentinelRef; }
public:
// Process references with a certain reachability level.
@@ -98,45 +100,45 @@ class ReferenceProcessor : public CHeapObj {
// Work methods used by the method process_discovered_reflist
// Phase1: keep alive all those referents that are otherwise
// dead but which must be kept alive by policy (and their closure).
- void process_phase1(DiscoveredList& refs_list_addr,
+ void process_phase1(DiscoveredList& refs_list,
ReferencePolicy* policy,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc);
// Phase2: remove all those references whose referents are
// reachable.
- inline void process_phase2(DiscoveredList& refs_list_addr,
+ inline void process_phase2(DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc) {
if (discovery_is_atomic()) {
// complete_gc is ignored in this case for this phase
- pp2_work(refs_list_addr, is_alive, keep_alive);
+ pp2_work(refs_list, is_alive, keep_alive);
} else {
assert(complete_gc != NULL, "Error");
- pp2_work_concurrent_discovery(refs_list_addr, is_alive,
+ pp2_work_concurrent_discovery(refs_list, is_alive,
keep_alive, complete_gc);
}
}
// Work methods in support of process_phase2
- void pp2_work(DiscoveredList& refs_list_addr,
+ void pp2_work(DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
OopClosure* keep_alive);
void pp2_work_concurrent_discovery(
- DiscoveredList& refs_list_addr,
+ DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc);
// Phase3: process the referents by either clearing them
// or keeping them alive (and their closure)
- void process_phase3(DiscoveredList& refs_list_addr,
+ void process_phase3(DiscoveredList& refs_list,
bool clear_referent,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc);
// Enqueue references with a certain reachability level
- void enqueue_discovered_reflist(DiscoveredList& refs_list, oop* pending_list_addr);
+ void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
// "Preclean" all the discovered reference lists
// by removing references with strongly reachable referents.
@@ -169,6 +171,8 @@ class ReferenceProcessor : public CHeapObj {
// occupying the i / _num_q slot.
const char* list_name(int i);
+ void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
+
protected:
// "Preclean" the given discovered reference list
// by removing references with strongly reachable referents.
@@ -179,7 +183,6 @@ class ReferenceProcessor : public CHeapObj {
VoidClosure* complete_gc,
YieldClosure* yield);
- void enqueue_discovered_reflists(oop* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
int next_id() {
int id = _next_id;
if (++_next_id == _num_q) {
@@ -189,7 +192,7 @@ class ReferenceProcessor : public CHeapObj {
}
DiscoveredList* get_discovered_list(ReferenceType rt);
inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
- oop* discovered_addr);
+ HeapWord* discovered_addr);
void verify_ok_to_handle_reflists() PRODUCT_RETURN;
void abandon_partial_discovered_list(DiscoveredList& refs_list);
@@ -477,7 +480,7 @@ class AbstractRefProcTaskExecutor::EnqueueTask {
protected:
EnqueueTask(ReferenceProcessor& ref_processor,
DiscoveredList refs_lists[],
- oop* pending_list_addr,
+ HeapWord* pending_list_addr,
oop sentinel_ref,
int n_queues)
: _ref_processor(ref_processor),
@@ -493,7 +496,7 @@ public:
protected:
ReferenceProcessor& _ref_processor;
DiscoveredList* _refs_lists;
- oop* _pending_list_addr;
+ HeapWord* _pending_list_addr;
oop _sentinel_ref;
int _n_queues;
};
diff --git a/src/share/vm/memory/restore.cpp b/src/share/vm/memory/restore.cpp
index a677a8517..0a84749bc 100644
--- a/src/share/vm/memory/restore.cpp
+++ b/src/share/vm/memory/restore.cpp
@@ -50,6 +50,8 @@ public:
*p = obj;
}
+ void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+
void do_ptr(void** p) {
assert(*p == NULL, "initializing previous initialized pointer.");
void* obj = nextOop();
diff --git a/src/share/vm/memory/serialize.cpp b/src/share/vm/memory/serialize.cpp
index ec4688d48..6f9ba38b1 100644
--- a/src/share/vm/memory/serialize.cpp
+++ b/src/share/vm/memory/serialize.cpp
@@ -41,17 +41,18 @@ void CompactingPermGenGen::serialize_oops(SerializeOopClosure* soc) {
int tag = 0;
soc->do_tag(--tag);
+ assert(!UseCompressedOops, "UseCompressedOops doesn't work with shared archive");
// Verify the sizes of various oops in the system.
soc->do_tag(sizeof(oopDesc));
soc->do_tag(sizeof(instanceOopDesc));
soc->do_tag(sizeof(methodOopDesc));
soc->do_tag(sizeof(constMethodOopDesc));
soc->do_tag(sizeof(methodDataOopDesc));
- soc->do_tag(sizeof(arrayOopDesc));
+ soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE));
soc->do_tag(sizeof(constantPoolOopDesc));
soc->do_tag(sizeof(constantPoolCacheOopDesc));
- soc->do_tag(sizeof(objArrayOopDesc));
- soc->do_tag(sizeof(typeArrayOopDesc));
+ soc->do_tag(objArrayOopDesc::base_offset_in_bytes(T_BYTE));
+ soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
soc->do_tag(sizeof(symbolOopDesc));
soc->do_tag(sizeof(klassOopDesc));
soc->do_tag(sizeof(markOopDesc));
diff --git a/src/share/vm/memory/sharedHeap.cpp b/src/share/vm/memory/sharedHeap.cpp
index c37bbf8a2..015f9807a 100644
--- a/src/share/vm/memory/sharedHeap.cpp
+++ b/src/share/vm/memory/sharedHeap.cpp
@@ -74,9 +74,10 @@ void SharedHeap::set_par_threads(int t) {
class AssertIsPermClosure: public OopClosure {
public:
- void do_oop(oop* p) {
+ virtual void do_oop(oop* p) {
assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
}
+ virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
static AssertIsPermClosure assert_is_perm_closure;
@@ -187,12 +188,13 @@ class SkipAdjustingSharedStrings: public OopClosure {
public:
SkipAdjustingSharedStrings(OopClosure* clo) : _clo(clo) {}
- void do_oop(oop* p) {
+ virtual void do_oop(oop* p) {
oop o = (*p);
if (!o->is_shared_readwrite()) {
_clo->do_oop(p);
}
}
+ virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
// Unmarked shared Strings in the StringTable (which got there due to
diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp
index dd4392091..eeab52e60 100644
--- a/src/share/vm/memory/space.cpp
+++ b/src/share/vm/memory/space.cpp
@@ -25,6 +25,9 @@
# include "incls/_precompiled.incl"
# include "incls/_space.cpp.incl"
+void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
+void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
+
HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
HeapWord* top_obj) {
if (top_obj != NULL) {
@@ -150,10 +153,6 @@ DirtyCardToOopClosure* Space::new_dcto_cl(OopClosure* cl,
return new DirtyCardToOopClosure(this, cl, precision, boundary);
}
-void FilteringClosure::do_oop(oop* p) {
- do_oop_nv(p);
-}
-
HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
HeapWord* top_obj) {
if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
@@ -337,7 +336,7 @@ HeapWord* CompactibleSpace::forward(oop q, size_t size,
assert(q->forwardee() == NULL, "should be forwarded to NULL");
}
- debug_only(MarkSweep::register_live_oop(q, size));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, size));
compact_top += size;
// we need to update the offset table so that the beginnings of objects can be
@@ -406,13 +405,13 @@ void Space::adjust_pointers() {
if (oop(q)->is_gc_marked()) {
// q is alive
- debug_only(MarkSweep::track_interior_pointers(oop(q)));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
// point all the oops to the new location
size_t size = oop(q)->adjust_pointers();
- debug_only(MarkSweep::check_interior_pointers());
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
debug_only(prev_q = q);
- debug_only(MarkSweep::validate_live_oop(oop(q), size));
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
q += size;
} else {
@@ -884,10 +883,13 @@ OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOff
class VerifyOldOopClosure : public OopClosure {
public:
- oop the_obj;
- bool allow_dirty;
+ oop _the_obj;
+ bool _allow_dirty;
void do_oop(oop* p) {
- the_obj->verify_old_oop(p, allow_dirty);
+ _the_obj->verify_old_oop(p, _allow_dirty);
+ }
+ void do_oop(narrowOop* p) {
+ _the_obj->verify_old_oop(p, _allow_dirty);
}
};
@@ -898,7 +900,7 @@ void OffsetTableContigSpace::verify(bool allow_dirty) const {
HeapWord* p = bottom();
HeapWord* prev_p = NULL;
VerifyOldOopClosure blk; // Does this do anything?
- blk.allow_dirty = allow_dirty;
+ blk._allow_dirty = allow_dirty;
int objs = 0;
int blocks = 0;
@@ -919,7 +921,7 @@ void OffsetTableContigSpace::verify(bool allow_dirty) const {
if (objs == OBJ_SAMPLE_INTERVAL) {
oop(p)->verify();
- blk.the_obj = oop(p);
+ blk._the_obj = oop(p);
oop(p)->oop_iterate(&blk);
objs = 0;
} else {
diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp
index e036004ce..37f726e5b 100644
--- a/src/share/vm/memory/space.hpp
+++ b/src/share/vm/memory/space.hpp
@@ -52,21 +52,24 @@ class GenRemSet;
class CardTableRS;
class DirtyCardToOopClosure;
-
// An oop closure that is circumscribed by a filtering memory region.
-class SpaceMemRegionOopsIterClosure: public virtual OopClosure {
- OopClosure* cl;
- MemRegion mr;
-public:
- void do_oop(oop* p) {
- if (mr.contains(p)) {
- cl->do_oop(p);
+class SpaceMemRegionOopsIterClosure: public OopClosure {
+ private:
+ OopClosure* _cl;
+ MemRegion _mr;
+ protected:
+ template <class T> void do_oop_work(T* p) {
+ if (_mr.contains(p)) {
+ _cl->do_oop(p);
}
}
- SpaceMemRegionOopsIterClosure(OopClosure* _cl, MemRegion _mr): cl(_cl), mr(_mr) {}
+ public:
+ SpaceMemRegionOopsIterClosure(OopClosure* cl, MemRegion mr):
+ _cl(cl), _mr(mr) {}
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
};
-
// A Space describes a heap area. Class Space is an abstract
// base class.
//
@@ -279,7 +282,7 @@ protected:
CardTableModRefBS::PrecisionStyle _precision;
HeapWord* _boundary; // If non-NULL, process only non-NULL oops
// pointing below boundary.
- HeapWord* _min_done; // ObjHeadPreciseArray precision requires
+ HeapWord* _min_done; // ObjHeadPreciseArray precision requires
// a downwards traversal; this is the
// lowest location already done (or,
// alternatively, the lowest address that
@@ -508,7 +511,7 @@ protected:
/* prefetch beyond q */ \
Prefetch::write(q, interval); \
/* size_t size = oop(q)->size(); changing this for cms for perm gen */\
- size_t size = block_size(q); \
+ size_t size = block_size(q); \
compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
q += size; \
end_of_live = q; \
@@ -572,147 +575,149 @@ protected:
cp->space->set_compaction_top(compact_top); \
}
-#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
- /* adjust all the interior pointers to point at the new locations of objects \
- * Used by MarkSweep::mark_sweep_phase3() */ \
+#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
+ /* adjust all the interior pointers to point at the new locations of objects \
+ * Used by MarkSweep::mark_sweep_phase3() */ \
\
- HeapWord* q = bottom(); \
- HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
+ HeapWord* q = bottom(); \
+ HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
\
- assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
+ assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
\
- if (q < t && _first_dead > q && \
+ if (q < t && _first_dead > q && \
!oop(q)->is_gc_marked()) { \
/* we have a chunk of the space which hasn't moved and we've \
* reinitialized the mark word during the previous pass, so we can't \
- * use is_gc_marked for the traversal. */ \
+ * use is_gc_marked for the traversal. */ \
HeapWord* end = _first_dead; \
\
- while (q < end) { \
- /* I originally tried to conjoin "block_start(q) == q" to the \
- * assertion below, but that doesn't work, because you can't \
- * accurately traverse previous objects to get to the current one \
- * after their pointers (including pointers into permGen) have been \
- * updated, until the actual compaction is done. dld, 4/00 */ \
- assert(block_is_obj(q), \
- "should be at block boundaries, and should be looking at objs"); \
- \
- debug_only(MarkSweep::track_interior_pointers(oop(q))); \
+ while (q < end) { \
+ /* I originally tried to conjoin "block_start(q) == q" to the \
+ * assertion below, but that doesn't work, because you can't \
+ * accurately traverse previous objects to get to the current one \
+ * after their pointers (including pointers into permGen) have been \
+ * updated, until the actual compaction is done. dld, 4/00 */ \
+ assert(block_is_obj(q), \
+ "should be at block boundaries, and should be looking at objs"); \
\
- /* point all the oops to the new location */ \
- size_t size = oop(q)->adjust_pointers(); \
- size = adjust_obj_size(size); \
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
\
- debug_only(MarkSweep::check_interior_pointers()); \
- \
- debug_only(MarkSweep::validate_live_oop(oop(q), size)); \
+ /* point all the oops to the new location */ \
+ size_t size = oop(q)->adjust_pointers(); \
+ size = adjust_obj_size(size); \
\
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
+ \
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
+ \
q += size; \
- } \
+ } \
\
- if (_first_dead == t) { \
- q = t; \
- } else { \
- /* $$$ This is funky. Using this to read the previously written \
- * LiveRange. See also use below. */ \
+ if (_first_dead == t) { \
+ q = t; \
+ } else { \
+ /* $$$ This is funky. Using this to read the previously written \
+ * LiveRange. See also use below. */ \
q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
- } \
- } \
+ } \
+ } \
\
const intx interval = PrefetchScanIntervalInBytes; \
\
- debug_only(HeapWord* prev_q = NULL); \
- while (q < t) { \
- /* prefetch beyond q */ \
+ debug_only(HeapWord* prev_q = NULL); \
+ while (q < t) { \
+ /* prefetch beyond q */ \
Prefetch::write(q, interval); \
- if (oop(q)->is_gc_marked()) { \
- /* q is alive */ \
- debug_only(MarkSweep::track_interior_pointers(oop(q))); \
- /* point all the oops to the new location */ \
- size_t size = oop(q)->adjust_pointers(); \
- size = adjust_obj_size(size); \
- debug_only(MarkSweep::check_interior_pointers()); \
- debug_only(MarkSweep::validate_live_oop(oop(q), size)); \
- debug_only(prev_q = q); \
+ if (oop(q)->is_gc_marked()) { \
+ /* q is alive */ \
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
+ /* point all the oops to the new location */ \
+ size_t size = oop(q)->adjust_pointers(); \
+ size = adjust_obj_size(size); \
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
+ debug_only(prev_q = q); \
q += size; \
- } else { \
- /* q is not a live object, so its mark should point at the next \
- * live object */ \
- debug_only(prev_q = q); \
- q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
- assert(q > prev_q, "we should be moving forward through memory"); \
- } \
- } \
+ } else { \
+ /* q is not a live object, so its mark should point at the next \
+ * live object */ \
+ debug_only(prev_q = q); \
+ q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
+ assert(q > prev_q, "we should be moving forward through memory"); \
+ } \
+ } \
\
- assert(q == t, "just checking"); \
+ assert(q == t, "just checking"); \
}
-#define SCAN_AND_COMPACT(obj_size) { \
+#define SCAN_AND_COMPACT(obj_size) { \
/* Copy all live objects to their new location \
- * Used by MarkSweep::mark_sweep_phase4() */ \
+ * Used by MarkSweep::mark_sweep_phase4() */ \
\
- HeapWord* q = bottom(); \
- HeapWord* const t = _end_of_live; \
- debug_only(HeapWord* prev_q = NULL); \
+ HeapWord* q = bottom(); \
+ HeapWord* const t = _end_of_live; \
+ debug_only(HeapWord* prev_q = NULL); \
\
- if (q < t && _first_dead > q && \
+ if (q < t && _first_dead > q && \
!oop(q)->is_gc_marked()) { \
- debug_only( \
- /* we have a chunk of the space which hasn't moved and we've reinitialized the \
- * mark word during the previous pass, so we can't use is_gc_marked for the \
- * traversal. */ \
- HeapWord* const end = _first_dead; \
- \
- while (q < end) { \
+ debug_only( \
+ /* we have a chunk of the space which hasn't moved and we've reinitialized \
+ * the mark word during the previous pass, so we can't use is_gc_marked for \
+ * the traversal. */ \
+ HeapWord* const end = _first_dead; \
+ \
+ while (q < end) { \
size_t size = obj_size(q); \
- assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); \
- debug_only(MarkSweep::live_oop_moved_to(q, size, q)); \
- debug_only(prev_q = q); \
+ assert(!oop(q)->is_gc_marked(), \
+ "should be unmarked (special dense prefix handling)"); \
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \
+ debug_only(prev_q = q); \
q += size; \
- } \
- ) /* debug_only */ \
+ } \
+ ) /* debug_only */ \
+ \
+ if (_first_dead == t) { \
+ q = t; \
+ } else { \
+ /* $$$ Funky */ \
+ q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
+ } \
+ } \
\
- if (_first_dead == t) { \
- q = t; \
- } else { \
- /* $$$ Funky */ \
- q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
- } \
- } \
- \
- const intx scan_interval = PrefetchScanIntervalInBytes; \
- const intx copy_interval = PrefetchCopyIntervalInBytes; \
- while (q < t) { \
- if (!oop(q)->is_gc_marked()) { \
- /* mark is pointer to next marked oop */ \
- debug_only(prev_q = q); \
- q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
- assert(q > prev_q, "we should be moving forward through memory"); \
- } else { \
- /* prefetch beyond q */ \
+ const intx scan_interval = PrefetchScanIntervalInBytes; \
+ const intx copy_interval = PrefetchCopyIntervalInBytes; \
+ while (q < t) { \
+ if (!oop(q)->is_gc_marked()) { \
+ /* mark is pointer to next marked oop */ \
+ debug_only(prev_q = q); \
+ q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
+ assert(q > prev_q, "we should be moving forward through memory"); \
+ } else { \
+ /* prefetch beyond q */ \
Prefetch::read(q, scan_interval); \
\
/* size and destination */ \
size_t size = obj_size(q); \
HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
\
- /* prefetch beyond compaction_top */ \
+ /* prefetch beyond compaction_top */ \
Prefetch::write(compaction_top, copy_interval); \
\
- /* copy object and reinit its mark */ \
- debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top)); \
- assert(q != compaction_top, "everything in this pass should be moving"); \
- Copy::aligned_conjoint_words(q, compaction_top, size); \
- oop(compaction_top)->init_mark(); \
- assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
+ /* copy object and reinit its mark */ \
+ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \
+ compaction_top)); \
+ assert(q != compaction_top, "everything in this pass should be moving"); \
+ Copy::aligned_conjoint_words(q, compaction_top, size); \
+ oop(compaction_top)->init_mark(); \
+ assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
\
- debug_only(prev_q = q); \
+ debug_only(prev_q = q); \
q += size; \
- } \
- } \
+ } \
+ } \
\
/* Reset space after compaction is complete */ \
- reset_after_compaction(); \
+ reset_after_compaction(); \
/* We do this clear, below, since it has overloaded meanings for some */ \
/* space subtypes. For example, OffsetTableContigSpace's that were */ \
/* compacted into will have had their offset table thresholds updated */ \
diff --git a/src/share/vm/memory/universe.cpp b/src/share/vm/memory/universe.cpp
index bbc3ee4aa..d69016370 100644
--- a/src/share/vm/memory/universe.cpp
+++ b/src/share/vm/memory/universe.cpp
@@ -99,6 +99,7 @@ size_t Universe::_heap_capacity_at_last_gc;
size_t Universe::_heap_used_at_last_gc;
CollectedHeap* Universe::_collectedHeap = NULL;
+address Universe::_heap_base = NULL;
void Universe::basic_type_classes_do(void f(klassOop)) {
@@ -464,7 +465,7 @@ void Universe::init_self_patching_vtbl_list(void** list, int count) {
class FixupMirrorClosure: public ObjectClosure {
public:
- void do_object(oop obj) {
+ virtual void do_object(oop obj) {
if (obj->is_klass()) {
EXCEPTION_MARK;
KlassHandle k(THREAD, klassOop(obj));
@@ -667,7 +668,7 @@ jint universe_init() {
"LogHeapWordSize is incorrect.");
guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
- "oop size is not not a multiple of HeapWord size");
+ "oop size is not not a multiple of HeapWord size");
TraceTime timer("Genesis", TraceStartupTime);
GC_locker::lock(); // do not allow gc during bootstrapping
JavaClasses::compute_hard_coded_offsets();
@@ -759,6 +760,15 @@ jint Universe::initialize_heap() {
if (status != JNI_OK) {
return status;
}
+ if (UseCompressedOops) {
+ // Subtract a page because something can get allocated at heap base.
+ // This also makes implicit null checking work, because the
+ // memory+1 page below heap_base needs to cause a signal.
+ // See needs_explicit_null_check.
+ // Only set the heap base for compressed oops because it indicates
+ // compressed oops for pstack code.
+ Universe::_heap_base = Universe::heap()->base() - os::vm_page_size();
+ }
// We will never reach the CATCH below since Exceptions::_throw will cause
// the VM to exit if an exception is thrown during initialization
diff --git a/src/share/vm/memory/universe.hpp b/src/share/vm/memory/universe.hpp
index 6fd557585..7cf8da13b 100644
--- a/src/share/vm/memory/universe.hpp
+++ b/src/share/vm/memory/universe.hpp
@@ -180,10 +180,13 @@ class Universe: AllStatic {
// The particular choice of collected heap.
static CollectedHeap* _collectedHeap;
+ // Base address for oop-within-java-object materialization.
+ // NULL if using wide oops. Doubles as heap oop null value.
+ static address _heap_base;
// array of dummy objects used with +FullGCAlot
debug_only(static objArrayOop _fullgc_alot_dummy_array;)
- // index of next entry to clear
+ // index of next entry to clear
debug_only(static int _fullgc_alot_dummy_next;)
// Compiler/dispatch support
@@ -323,6 +326,10 @@ class Universe: AllStatic {
// The particular choice of collected heap.
static CollectedHeap* heap() { return _collectedHeap; }
+ // For UseCompressedOops
+ static address heap_base() { return _heap_base; }
+ static address* heap_base_addr() { return &_heap_base; }
+
// Historic gc information
static size_t get_heap_capacity_at_last_gc() { return _heap_capacity_at_last_gc; }
static size_t get_heap_free_at_last_gc() { return _heap_capacity_at_last_gc - _heap_used_at_last_gc; }
diff --git a/src/share/vm/oops/arrayOop.hpp b/src/share/vm/oops/arrayOop.hpp
index 49fc566a9..5e54a86ee 100644
--- a/src/share/vm/oops/arrayOop.hpp
+++ b/src/share/vm/oops/arrayOop.hpp
@@ -22,34 +22,79 @@
*
*/
-// arrayOopDesc is the abstract baseclass for all arrays.
+// arrayOopDesc is the abstract baseclass for all arrays. It doesn't
+// declare pure virtual to enforce this because that would allocate a vtbl
+// in each instance, which we don't want.
+
+// The layout of array Oops is:
+//
+// markOop
+// klassOop // 32 bits if compressed but declared 64 in LP64.
+// length // shares klass memory or allocated after declared fields.
+
class arrayOopDesc : public oopDesc {
friend class VMStructs;
- private:
- int _length; // number of elements in the array
- public:
// Interpreter/Compiler offsets
- static int length_offset_in_bytes() { return offset_of(arrayOopDesc, _length); }
- static int base_offset_in_bytes(BasicType type) { return header_size(type) * HeapWordSize; }
+
+ // Header size computation.
+ // The header is considered the oop part of this type plus the length.
+ // Returns the aligned header_size_in_bytes. This is not equivalent to
+ // sizeof(arrayOopDesc) which should not appear in the code, except here.
+ static int header_size_in_bytes() {
+ size_t hs = UseCompressedOops ?
+ sizeof(arrayOopDesc) :
+ align_size_up(sizeof(arrayOopDesc) + sizeof(int), HeapWordSize);
+#ifdef ASSERT
+ // make sure it isn't called before UseCompressedOops is initialized.
+ static size_t arrayoopdesc_hs = 0;
+ if (arrayoopdesc_hs == 0) arrayoopdesc_hs = hs;
+ assert(arrayoopdesc_hs == hs, "header size can't change");
+#endif // ASSERT
+ return (int)hs;
+ }
+
+ public:
+ // The _length field is not declared in C++. It is allocated after the
+ // declared nonstatic fields in arrayOopDesc if not compressed, otherwise
+ // it occupies the second half of the _klass field in oopDesc.
+ static int length_offset_in_bytes() {
+ return UseCompressedOops ? klass_gap_offset_in_bytes() :
+ sizeof(arrayOopDesc);
+ }
+
+ // Returns the offset of the first element.
+ static int base_offset_in_bytes(BasicType type) {
+ return header_size(type) * HeapWordSize;
+ }
// Returns the address of the first element.
- void* base(BasicType type) const { return (void*) (((intptr_t) this) + base_offset_in_bytes(type)); }
+ void* base(BasicType type) const {
+ return (void*) (((intptr_t) this) + base_offset_in_bytes(type));
+ }
// Tells whether index is within bounds.
bool is_within_bounds(int index) const { return 0 <= index && index < length(); }
- // Accessores for instance variable
- int length() const { return _length; }
- void set_length(int length) { _length = length; }
+ // Accessors for instance variable which is not a C++ declared nonstatic
+ // field.
+ int length() const {
+ return *(int*)(((intptr_t)this) + length_offset_in_bytes());
+ }
+ void set_length(int length) {
+ *(int*)(((intptr_t)this) + length_offset_in_bytes()) = length;
+ }
- // Header size computation.
- // Should only be called with constants as argument (will not constant fold otherwise)
+ // Should only be called with constants as argument
+ // (will not constant fold otherwise)
+ // Returns the header size in words aligned to the requirements of the
+ // array object type.
static int header_size(BasicType type) {
- return Universe::element_type_should_be_aligned(type)
- ? align_object_size(sizeof(arrayOopDesc)/HeapWordSize)
- : sizeof(arrayOopDesc)/HeapWordSize;
+ size_t typesize_in_bytes = header_size_in_bytes();
+ return (int)(Universe::element_type_should_be_aligned(type)
+ ? align_object_size(typesize_in_bytes/HeapWordSize)
+ : typesize_in_bytes/HeapWordSize);
}
// This method returns the maximum length that can passed into
@@ -62,7 +107,7 @@ class arrayOopDesc : public oopDesc {
// We use max_jint, since object_size is internally represented by an 'int'
// This gives us an upper bound of max_jint words for the size of the oop.
int32_t max_words = (max_jint - header_size(type) - 2);
- int elembytes = (type == T_OBJECT) ? T_OBJECT_aelem_bytes : type2aelembytes(type);
+ int elembytes = type2aelembytes(type);
jlong len = ((jlong)max_words * HeapWordSize) / elembytes;
return (len > max_jint) ? max_jint : (int32_t)len;
}
diff --git a/src/share/vm/oops/constantPoolKlass.cpp b/src/share/vm/oops/constantPoolKlass.cpp
index b97618739..229007f62 100644
--- a/src/share/vm/oops/constantPoolKlass.cpp
+++ b/src/share/vm/oops/constantPoolKlass.cpp
@@ -29,8 +29,9 @@ constantPoolOop constantPoolKlass::allocate(int length, TRAPS) {
int size = constantPoolOopDesc::object_size(length);
KlassHandle klass (THREAD, as_klassOop());
constantPoolOop c =
- (constantPoolOop)CollectedHeap::permanent_array_allocate(klass, size, length, CHECK_NULL);
+ (constantPoolOop)CollectedHeap::permanent_obj_allocate(klass, size, CHECK_NULL);
+ c->set_length(length);
c->set_tags(NULL);
c->set_cache(NULL);
c->set_pool_holder(NULL);
@@ -54,14 +55,14 @@ constantPoolOop constantPoolKlass::allocate(int length, TRAPS) {
klassOop constantPoolKlass::create_klass(TRAPS) {
constantPoolKlass o;
- KlassHandle klassklass(THREAD, Universe::arrayKlassKlassObj());
- arrayKlassHandle k = base_create_array_klass(o.vtbl_value(), header_size(), klassklass, CHECK_NULL);
- arrayKlassHandle super (THREAD, k->super());
- complete_create_array_klass(k, super, CHECK_NULL);
+ KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());
+ KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_NULL);
+ // Make sure size calculation is right
+ assert(k()->size() == align_object_size(header_size()), "wrong size for object");
+ java_lang_Class::create_mirror(k, CHECK_NULL); // Allocate mirror
return k();
}
-
int constantPoolKlass::oop_size(oop obj) const {
assert(obj->is_constantPool(), "must be constantPool");
return constantPoolOop(obj)->object_size();
@@ -275,7 +276,7 @@ void constantPoolKlass::oop_print_on(oop obj, outputStream* st) {
EXCEPTION_MARK;
oop anObj;
assert(obj->is_constantPool(), "must be constantPool");
- arrayKlass::oop_print_on(obj, st);
+ Klass::oop_print_on(obj, st);
constantPoolOop cp = constantPoolOop(obj);
// Temp. remove cache so we can do lookups with original indicies.
diff --git a/src/share/vm/oops/constantPoolKlass.hpp b/src/share/vm/oops/constantPoolKlass.hpp
index ac01a7b71..b563f7ddd 100644
--- a/src/share/vm/oops/constantPoolKlass.hpp
+++ b/src/share/vm/oops/constantPoolKlass.hpp
@@ -24,7 +24,8 @@
// A constantPoolKlass is the klass of a constantPoolOop
-class constantPoolKlass : public arrayKlass {
+class constantPoolKlass : public Klass {
+ juint _alloc_size; // allocation profiling support
public:
// Dispatched klass operations
bool oop_is_constantPool() const { return true; }
@@ -44,7 +45,7 @@ class constantPoolKlass : public arrayKlass {
// Sizing
static int header_size() { return oopDesc::header_size() + sizeof(constantPoolKlass)/HeapWordSize; }
- int object_size() const { return arrayKlass::object_size(header_size()); }
+ int object_size() const { return align_object_size(header_size()); }
// Garbage collection
void oop_follow_contents(oop obj);
@@ -57,6 +58,11 @@ class constantPoolKlass : public arrayKlass {
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
+ // Allocation profiling support
+ // no idea why this is pure virtual and not in Klass ???
+ juint alloc_size() const { return _alloc_size; }
+ void set_alloc_size(juint n) { _alloc_size = n; }
+
#ifndef PRODUCT
public:
// Printing
diff --git a/src/share/vm/oops/constantPoolOop.hpp b/src/share/vm/oops/constantPoolOop.hpp
index 3083a3240..b10db6bb0 100644
--- a/src/share/vm/oops/constantPoolOop.hpp
+++ b/src/share/vm/oops/constantPoolOop.hpp
@@ -34,13 +34,14 @@
class SymbolHashMap;
-class constantPoolOopDesc : public arrayOopDesc {
+class constantPoolOopDesc : public oopDesc {
friend class VMStructs;
friend class BytecodeInterpreter; // Directly extracts an oop in the pool for fast instanceof/checkcast
private:
typeArrayOop _tags; // the tag array describing the constant pool's contents
constantPoolCacheOop _cache; // the cache holding interpreter runtime information
klassOop _pool_holder; // the corresponding class
+ int _length; // number of elements in the array
// only set to non-zero if constant pool is merged by RedefineClasses
int _orig_length;
@@ -330,6 +331,14 @@ class constantPoolOopDesc : public arrayOopDesc {
bool klass_name_at_matches(instanceKlassHandle k, int which);
// Sizing
+ int length() const { return _length; }
+ void set_length(int length) { _length = length; }
+
+ // Tells whether index is within bounds.
+ bool is_within_bounds(int index) const {
+ return 0 <= index && index < length();
+ }
+
static int header_size() { return sizeof(constantPoolOopDesc)/HeapWordSize; }
static int object_size(int length) { return align_object_size(header_size() + length); }
int object_size() { return object_size(length()); }
diff --git a/src/share/vm/oops/cpCacheKlass.cpp b/src/share/vm/oops/cpCacheKlass.cpp
index c3f7d764f..b57ccda8c 100644
--- a/src/share/vm/oops/cpCacheKlass.cpp
+++ b/src/share/vm/oops/cpCacheKlass.cpp
@@ -37,18 +37,19 @@ constantPoolCacheOop constantPoolCacheKlass::allocate(int length, TRAPS) {
int size = constantPoolCacheOopDesc::object_size(length);
KlassHandle klass (THREAD, as_klassOop());
constantPoolCacheOop cache = (constantPoolCacheOop)
- CollectedHeap::permanent_array_allocate(klass, size, length, CHECK_NULL);
+ CollectedHeap::permanent_obj_allocate(klass, size, CHECK_NULL);
+ cache->set_length(length);
cache->set_constant_pool(NULL);
return cache;
}
-
klassOop constantPoolCacheKlass::create_klass(TRAPS) {
constantPoolCacheKlass o;
- KlassHandle klassklass(THREAD, Universe::arrayKlassKlassObj());
- arrayKlassHandle k = base_create_array_klass(o.vtbl_value(), header_size(), klassklass, CHECK_NULL);
- KlassHandle super (THREAD, k->super());
- complete_create_array_klass(k, super, CHECK_NULL);
+ KlassHandle h_this_klass(THREAD, Universe::klassKlassObj());
+ KlassHandle k = base_create_klass(h_this_klass, header_size(), o.vtbl_value(), CHECK_NULL);
+ // Make sure size calculation is right
+ assert(k()->size() == align_object_size(header_size()), "wrong size for object");
+ java_lang_Class::create_mirror(k, CHECK_NULL); // Allocate mirror
return k();
}
@@ -183,7 +184,7 @@ void constantPoolCacheKlass::oop_print_on(oop obj, outputStream* st) {
assert(obj->is_constantPoolCache(), "obj must be constant pool cache");
constantPoolCacheOop cache = (constantPoolCacheOop)obj;
// super print
- arrayKlass::oop_print_on(obj, st);
+ Klass::oop_print_on(obj, st);
// print constant pool cache entries
for (int i = 0; i < cache->length(); i++) cache->entry_at(i)->print(st, i);
}
@@ -194,7 +195,7 @@ void constantPoolCacheKlass::oop_verify_on(oop obj, outputStream* st) {
guarantee(obj->is_constantPoolCache(), "obj must be constant pool cache");
constantPoolCacheOop cache = (constantPoolCacheOop)obj;
// super verify
- arrayKlass::oop_verify_on(obj, st);
+ Klass::oop_verify_on(obj, st);
// print constant pool cache entries
for (int i = 0; i < cache->length(); i++) cache->entry_at(i)->verify(st);
}
diff --git a/src/share/vm/oops/cpCacheKlass.hpp b/src/share/vm/oops/cpCacheKlass.hpp
index 7eb8d4457..9c20eb9a9 100644
--- a/src/share/vm/oops/cpCacheKlass.hpp
+++ b/src/share/vm/oops/cpCacheKlass.hpp
@@ -22,7 +22,8 @@
*
*/
-class constantPoolCacheKlass: public arrayKlass {
+class constantPoolCacheKlass: public Klass {
+ juint _alloc_size; // allocation profiling support
public:
// Dispatched klass operations
bool oop_is_constantPoolCache() const { return true; }
@@ -41,8 +42,8 @@ class constantPoolCacheKlass: public arrayKlass {
}
// Sizing
- static int header_size() { return oopDesc::header_size() + sizeof(constantPoolCacheKlass)/HeapWordSize; }
- int object_size() const { return arrayKlass::object_size(header_size()); }
+ static int header_size() { return oopDesc::header_size() + sizeof(constantPoolCacheKlass)/HeapWordSize; }
+ int object_size() const { return align_object_size(header_size()); }
// Garbage collection
void oop_follow_contents(oop obj);
@@ -55,6 +56,10 @@ class constantPoolCacheKlass: public arrayKlass {
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
+ // Allocation profiling support
+ juint alloc_size() const { return _alloc_size; }
+ void set_alloc_size(juint n) { _alloc_size = n; }
+
#ifndef PRODUCT
public:
// Printing
diff --git a/src/share/vm/oops/cpCacheOop.cpp b/src/share/vm/oops/cpCacheOop.cpp
index 3ffee53be..a8f5c051b 100644
--- a/src/share/vm/oops/cpCacheOop.cpp
+++ b/src/share/vm/oops/cpCacheOop.cpp
@@ -218,6 +218,7 @@ class LocalOopClosure: public OopClosure {
public:
LocalOopClosure(void f(oop*)) { _f = f; }
virtual void do_oop(oop* o) { _f(o); }
+ virtual void do_oop(narrowOop *o) { ShouldNotReachHere(); }
};
diff --git a/src/share/vm/oops/cpCacheOop.hpp b/src/share/vm/oops/cpCacheOop.hpp
index 55f7fcbba..fc8103618 100644
--- a/src/share/vm/oops/cpCacheOop.hpp
+++ b/src/share/vm/oops/cpCacheOop.hpp
@@ -286,12 +286,17 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
// is created and initialized before a class is actively used (i.e., initialized), the indivi-
// dual cache entries are filled at resolution (i.e., "link") time (see also: rewriter.*).
-class constantPoolCacheOopDesc: public arrayOopDesc {
+class constantPoolCacheOopDesc: public oopDesc {
friend class VMStructs;
private:
+ int _length;
constantPoolOop _constant_pool; // the corresponding constant pool
// Sizing
+ debug_only(friend class ClassVerifier;)
+ int length() const { return _length; }
+ void set_length(int length) { _length = length; }
+
static int header_size() { return sizeof(constantPoolCacheOopDesc) / HeapWordSize; }
static int object_size(int length) { return align_object_size(header_size() + length * in_words(ConstantPoolCacheEntry::size())); }
int object_size() { return object_size(length()); }
diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKlass.cpp
index 45cd864b9..2ce607485 100644
--- a/src/share/vm/oops/instanceKlass.cpp
+++ b/src/share/vm/oops/instanceKlass.cpp
@@ -1255,218 +1255,298 @@ bool instanceKlass::is_dependent_nmethod(nmethod* nm) {
#endif //PRODUCT
-void instanceKlass::follow_static_fields() {
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- while (start < end) {
- if (*start != NULL) {
- assert(Universe::heap()->is_in_closed_subset(*start),
- "should be in heap");
- MarkSweep::mark_and_push(start);
- }
- start++;
+#ifdef ASSERT
+template <class T> void assert_is_in(T *p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
+ assert(Universe::heap()->is_in(o), "should be in heap");
}
}
+template <class T> void assert_is_in_closed_subset(T *p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
+ assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
+ }
+}
+template <class T> void assert_is_in_reserved(T *p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
+ assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
+ }
+}
+template <class T> void assert_nothing(T *p) {}
+
+#else
+template <class T> void assert_is_in(T *p) {}
+template <class T> void assert_is_in_closed_subset(T *p) {}
+template <class T> void assert_is_in_reserved(T *p) {}
+template <class T> void assert_nothing(T *p) {}
+#endif // ASSERT
+
+//
+// Macros that iterate over areas of oops which are specialized on type of
+// oop pointer either narrow or wide, depending on UseCompressedOops
+//
+// Parameters are:
+// T - type of oop to point to (either oop or narrowOop)
+// start_p - starting pointer for region to iterate over
+// count - number of oops or narrowOops to iterate over
+// do_oop - action to perform on each oop (it's arbitrary C code which
+// makes it more efficient to put in a macro rather than making
+// it a template function)
+// assert_fn - assert function which is template function because performance
+// doesn't matter when enabled.
+#define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
+ T, start_p, count, do_oop, \
+ assert_fn) \
+{ \
+ T* p = (T*)(start_p); \
+ T* const end = p + (count); \
+ while (p < end) { \
+ (assert_fn)(p); \
+ do_oop; \
+ ++p; \
+ } \
+}
+
+#define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
+ T, start_p, count, do_oop, \
+ assert_fn) \
+{ \
+ T* const start = (T*)(start_p); \
+ T* p = start + (count); \
+ while (start < p) { \
+ --p; \
+ (assert_fn)(p); \
+ do_oop; \
+ } \
+}
+
+#define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
+ T, start_p, count, low, high, \
+ do_oop, assert_fn) \
+{ \
+ T* const l = (T*)(low); \
+ T* const h = (T*)(high); \
+ assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
+ mask_bits((intptr_t)h, sizeof(T)-1) == 0, \
+ "bounded region must be properly aligned"); \
+ T* p = (T*)(start_p); \
+ T* end = p + (count); \
+ if (p < l) p = l; \
+ if (end > h) end = h; \
+ while (p < end) { \
+ (assert_fn)(p); \
+ do_oop; \
+ ++p; \
+ } \
+}
+
+
+// The following macros call specialized macros, passing either oop or
+// narrowOop as the specialization type. These test the UseCompressedOops
+// flag.
+#define InstanceKlass_OOP_ITERATE(start_p, count, \
+ do_oop, assert_fn) \
+{ \
+ if (UseCompressedOops) { \
+ InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
+ start_p, count, \
+ do_oop, assert_fn) \
+ } else { \
+ InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
+ start_p, count, \
+ do_oop, assert_fn) \
+ } \
+}
+
+#define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \
+ do_oop, assert_fn) \
+{ \
+ if (UseCompressedOops) { \
+ InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
+ start_p, count, \
+ low, high, \
+ do_oop, assert_fn) \
+ } else { \
+ InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
+ start_p, count, \
+ low, high, \
+ do_oop, assert_fn) \
+ } \
+}
+
+#define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \
+{ \
+ /* Compute oopmap block range. The common case \
+ is nonstatic_oop_map_size == 1. */ \
+ OopMapBlock* map = start_of_nonstatic_oop_maps(); \
+ OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
+ if (UseCompressedOops) { \
+ while (map < end_map) { \
+ InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
+ obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \
+ do_oop, assert_fn) \
+ ++map; \
+ } \
+ } else { \
+ while (map < end_map) { \
+ InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
+ obj->obj_field_addr<oop>(map->offset()), map->length(), \
+ do_oop, assert_fn) \
+ ++map; \
+ } \
+ } \
+}
+
+#define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \
+{ \
+ OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \
+ OopMapBlock* map = start_map + nonstatic_oop_map_size(); \
+ if (UseCompressedOops) { \
+ while (start_map < map) { \
+ --map; \
+ InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \
+ obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \
+ do_oop, assert_fn) \
+ } \
+ } else { \
+ while (start_map < map) { \
+ --map; \
+ InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \
+ obj->obj_field_addr<oop>(map->offset()), map->length(), \
+ do_oop, assert_fn) \
+ } \
+ } \
+}
+
+#define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \
+ assert_fn) \
+{ \
+ /* Compute oopmap block range. The common case is \
+ nonstatic_oop_map_size == 1, so we accept the \
+ usually non-existent extra overhead of examining \
+ all the maps. */ \
+ OopMapBlock* map = start_of_nonstatic_oop_maps(); \
+ OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
+ if (UseCompressedOops) { \
+ while (map < end_map) { \
+ InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
+ obj->obj_field_addr<narrowOop>(map->offset()), map->length(), \
+ low, high, \
+ do_oop, assert_fn) \
+ ++map; \
+ } \
+ } else { \
+ while (map < end_map) { \
+ InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
+ obj->obj_field_addr<oop>(map->offset()), map->length(), \
+ low, high, \
+ do_oop, assert_fn) \
+ ++map; \
+ } \
+ } \
+}
+
+void instanceKlass::follow_static_fields() {
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ MarkSweep::mark_and_push(p), \
+ assert_is_in_closed_subset)
+}
#ifndef SERIALGC
void instanceKlass::follow_static_fields(ParCompactionManager* cm) {
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- while (start < end) {
- if (*start != NULL) {
- assert(Universe::heap()->is_in(*start), "should be in heap");
- PSParallelCompact::mark_and_push(cm, start);
- }
- start++;
- }
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ PSParallelCompact::mark_and_push(cm, p), \
+ assert_is_in)
}
#endif // SERIALGC
-
void instanceKlass::adjust_static_fields() {
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- while (start < end) {
- MarkSweep::adjust_pointer(start);
- start++;
- }
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ MarkSweep::adjust_pointer(p), \
+ assert_nothing)
}
#ifndef SERIALGC
void instanceKlass::update_static_fields() {
- oop* const start = start_of_static_fields();
- oop* const beg_oop = start;
- oop* const end_oop = start + static_oop_field_size();
- for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
- PSParallelCompact::adjust_pointer(cur_oop);
- }
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ PSParallelCompact::adjust_pointer(p), \
+ assert_nothing)
}
-void
-instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) {
- oop* const start = start_of_static_fields();
- oop* const beg_oop = MAX2((oop*)beg_addr, start);
- oop* const end_oop = MIN2((oop*)end_addr, start + static_oop_field_size());
- for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
- PSParallelCompact::adjust_pointer(cur_oop);
- }
+void instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) {
+ InstanceKlass_BOUNDED_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ beg_addr, end_addr, \
+ PSParallelCompact::adjust_pointer(p), \
+ assert_nothing )
}
#endif // SERIALGC
void instanceKlass::oop_follow_contents(oop obj) {
- assert (obj!=NULL, "can't follow the content of NULL object");
+ assert(obj != NULL, "can't follow the content of NULL object");
obj->follow_header();
- OopMapBlock* map = start_of_nonstatic_oop_maps();
- OopMapBlock* end_map = map + nonstatic_oop_map_size();
- while (map < end_map) {
- oop* start = obj->obj_field_addr(map->offset());
- oop* end = start + map->length();
- while (start < end) {
- if (*start != NULL) {
- assert(Universe::heap()->is_in_closed_subset(*start),
- "should be in heap");
- MarkSweep::mark_and_push(start);
- }
- start++;
- }
- map++;
- }
+ InstanceKlass_OOP_MAP_ITERATE( \
+ obj, \
+ MarkSweep::mark_and_push(p), \
+ assert_is_in_closed_subset)
}
#ifndef SERIALGC
void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
oop obj) {
- assert (obj!=NULL, "can't follow the content of NULL object");
+ assert(obj != NULL, "can't follow the content of NULL object");
obj->follow_header(cm);
- OopMapBlock* map = start_of_nonstatic_oop_maps();
- OopMapBlock* end_map = map + nonstatic_oop_map_size();
- while (map < end_map) {
- oop* start = obj->obj_field_addr(map->offset());
- oop* end = start + map->length();
- while (start < end) {
- if (*start != NULL) {
- assert(Universe::heap()->is_in(*start), "should be in heap");
- PSParallelCompact::mark_and_push(cm, start);
- }
- start++;
- }
- map++;
- }
+ InstanceKlass_OOP_MAP_ITERATE( \
+ obj, \
+ PSParallelCompact::mark_and_push(cm, p), \
+ assert_is_in)
}
#endif // SERIALGC
-#define invoke_closure_on(start, closure, nv_suffix) { \
- oop obj = *(start); \
- if (obj != NULL) { \
- assert(Universe::heap()->is_in_closed_subset(obj), "should be in heap"); \
- (closure)->do_oop##nv_suffix(start); \
- } \
-}
-
// closure's do_header() method dicates whether the given closure should be
// applied to the klass ptr in the object header.
-#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
- \
-int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, \
- OopClosureType* closure) { \
- SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
- /* header */ \
- if (closure->do_header()) { \
- obj->oop_iterate_header(closure); \
- } \
- /* instance variables */ \
- OopMapBlock* map = start_of_nonstatic_oop_maps(); \
- OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
- const intx field_offset = PrefetchFieldsAhead; \
- if (field_offset > 0) { \
- while (map < end_map) { \
- oop* start = obj->obj_field_addr(map->offset()); \
- oop* const end = start + map->length(); \
- while (start < end) { \
- prefetch_beyond(start, (oop*)end, field_offset, \
- closure->prefetch_style()); \
- SpecializationStats:: \
- record_do_oop_call##nv_suffix(SpecializationStats::ik); \
- invoke_closure_on(start, closure, nv_suffix); \
- start++; \
- } \
- map++; \
- } \
- } else { \
- while (map < end_map) { \
- oop* start = obj->obj_field_addr(map->offset()); \
- oop* const end = start + map->length(); \
- while (start < end) { \
- SpecializationStats:: \
- record_do_oop_call##nv_suffix(SpecializationStats::ik); \
- invoke_closure_on(start, closure, nv_suffix); \
- start++; \
- } \
- map++; \
- } \
- } \
- return size_helper(); \
-}
-
-#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
- \
-int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
- OopClosureType* closure, \
- MemRegion mr) { \
- SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
- /* header */ \
- if (closure->do_header()) { \
- obj->oop_iterate_header(closure, mr); \
- } \
- /* instance variables */ \
- OopMapBlock* map = start_of_nonstatic_oop_maps(); \
- OopMapBlock* const end_map = map + nonstatic_oop_map_size(); \
- HeapWord* bot = mr.start(); \
- HeapWord* top = mr.end(); \
- oop* start = obj->obj_field_addr(map->offset()); \
- HeapWord* end = MIN2((HeapWord*)(start + map->length()), top); \
- /* Find the first map entry that extends onto mr. */ \
- while (map < end_map && end <= bot) { \
- map++; \
- start = obj->obj_field_addr(map->offset()); \
- end = MIN2((HeapWord*)(start + map->length()), top); \
- } \
- if (map != end_map) { \
- /* The current map's end is past the start of "mr". Skip up to the first \
- entry on "mr". */ \
- while ((HeapWord*)start < bot) { \
- start++; \
- } \
- const intx field_offset = PrefetchFieldsAhead; \
- for (;;) { \
- if (field_offset > 0) { \
- while ((HeapWord*)start < end) { \
- prefetch_beyond(start, (oop*)end, field_offset, \
- closure->prefetch_style()); \
- invoke_closure_on(start, closure, nv_suffix); \
- start++; \
- } \
- } else { \
- while ((HeapWord*)start < end) { \
- invoke_closure_on(start, closure, nv_suffix); \
- start++; \
- } \
- } \
- /* Go to the next map. */ \
- map++; \
- if (map == end_map) { \
- break; \
- } \
- /* Otherwise, */ \
- start = obj->obj_field_addr(map->offset()); \
- if ((HeapWord*)start >= top) { \
- break; \
- } \
- end = MIN2((HeapWord*)(start + map->length()), top); \
- } \
- } \
- return size_helper(); \
+#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
+ \
+int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, \
+ OopClosureType* closure) {\
+ SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
+ /* header */ \
+ if (closure->do_header()) { \
+ obj->oop_iterate_header(closure); \
+ } \
+ InstanceKlass_OOP_MAP_ITERATE( \
+ obj, \
+ SpecializationStats:: \
+ record_do_oop_call##nv_suffix(SpecializationStats::ik); \
+ (closure)->do_oop##nv_suffix(p), \
+ assert_is_in_closed_subset) \
+ return size_helper(); \
+}
+
+#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
+ \
+int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
+ OopClosureType* closure, \
+ MemRegion mr) { \
+ SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
+ if (closure->do_header()) { \
+ obj->oop_iterate_header(closure, mr); \
+ } \
+ InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
+ obj, mr.start(), mr.end(), \
+ (closure)->do_oop##nv_suffix(p), \
+ assert_is_in_closed_subset) \
+ return size_helper(); \
}
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
@@ -1474,56 +1554,28 @@ ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN)
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
-
void instanceKlass::iterate_static_fields(OopClosure* closure) {
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- while (start < end) {
- assert(Universe::heap()->is_in_reserved_or_null(*start), "should be in heap");
- closure->do_oop(start);
- start++;
- }
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ closure->do_oop(p), \
+ assert_is_in_reserved)
}
void instanceKlass::iterate_static_fields(OopClosure* closure,
MemRegion mr) {
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- // I gather that the the static fields of reference types come first,
- // hence the name of "oop_field_size", and that is what makes this safe.
- assert((intptr_t)mr.start() ==
- align_size_up((intptr_t)mr.start(), sizeof(oop)) &&
- (intptr_t)mr.end() == align_size_up((intptr_t)mr.end(), sizeof(oop)),
- "Memregion must be oop-aligned.");
- if ((HeapWord*)start < mr.start()) start = (oop*)mr.start();
- if ((HeapWord*)end > mr.end()) end = (oop*)mr.end();
- while (start < end) {
- invoke_closure_on(start, closure,_v);
- start++;
- }
+ InstanceKlass_BOUNDED_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ mr.start(), mr.end(), \
+ (closure)->do_oop_v(p), \
+ assert_is_in_closed_subset)
}
-
int instanceKlass::oop_adjust_pointers(oop obj) {
int size = size_helper();
-
- // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1.
- OopMapBlock* map = start_of_nonstatic_oop_maps();
- OopMapBlock* const end_map = map + nonstatic_oop_map_size();
- // Iterate over oopmap blocks
- while (map < end_map) {
- // Compute oop range for this block
- oop* start = obj->obj_field_addr(map->offset());
- oop* end = start + map->length();
- // Iterate over oops
- while (start < end) {
- assert(Universe::heap()->is_in_or_null(*start), "should be in heap");
- MarkSweep::adjust_pointer(start);
- start++;
- }
- map++;
- }
-
+ InstanceKlass_OOP_MAP_ITERATE( \
+ obj, \
+ MarkSweep::adjust_pointer(p), \
+ assert_is_in)
obj->adjust_header();
return size;
}
@@ -1531,132 +1583,66 @@ int instanceKlass::oop_adjust_pointers(oop obj) {
#ifndef SERIALGC
void instanceKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
assert(!pm->depth_first(), "invariant");
- // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1.
- OopMapBlock* start_map = start_of_nonstatic_oop_maps();
- OopMapBlock* map = start_map + nonstatic_oop_map_size();
-
- // Iterate over oopmap blocks
- while (start_map < map) {
- --map;
- // Compute oop range for this block
- oop* start = obj->obj_field_addr(map->offset());
- oop* curr = start + map->length();
- // Iterate over oops
- while (start < curr) {
- --curr;
- if (PSScavenge::should_scavenge(*curr)) {
- assert(Universe::heap()->is_in(*curr), "should be in heap");
- pm->claim_or_forward_breadth(curr);
- }
- }
- }
+ InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
+ obj, \
+ if (PSScavenge::should_scavenge(p)) { \
+ pm->claim_or_forward_breadth(p); \
+ }, \
+ assert_nothing )
}
void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
assert(pm->depth_first(), "invariant");
- // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1.
- OopMapBlock* start_map = start_of_nonstatic_oop_maps();
- OopMapBlock* map = start_map + nonstatic_oop_map_size();
-
- // Iterate over oopmap blocks
- while (start_map < map) {
- --map;
- // Compute oop range for this block
- oop* start = obj->obj_field_addr(map->offset());
- oop* curr = start + map->length();
- // Iterate over oops
- while (start < curr) {
- --curr;
- if (PSScavenge::should_scavenge(*curr)) {
- assert(Universe::heap()->is_in(*curr), "should be in heap");
- pm->claim_or_forward_depth(curr);
- }
- }
- }
+ InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
+ obj, \
+ if (PSScavenge::should_scavenge(p)) { \
+ pm->claim_or_forward_depth(p); \
+ }, \
+ assert_nothing )
}
int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
- // Compute oopmap block range. The common case is nonstatic_oop_map_size==1.
- OopMapBlock* map = start_of_nonstatic_oop_maps();
- OopMapBlock* const end_map = map + nonstatic_oop_map_size();
- // Iterate over oopmap blocks
- while (map < end_map) {
- // Compute oop range for this oopmap block.
- oop* const map_start = obj->obj_field_addr(map->offset());
- oop* const beg_oop = map_start;
- oop* const end_oop = map_start + map->length();
- for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
- PSParallelCompact::adjust_pointer(cur_oop);
- }
- ++map;
- }
-
+ InstanceKlass_OOP_MAP_ITERATE( \
+ obj, \
+ PSParallelCompact::adjust_pointer(p), \
+ assert_nothing)
return size_helper();
}
int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
HeapWord* beg_addr, HeapWord* end_addr) {
- // Compute oopmap block range. The common case is nonstatic_oop_map_size==1.
- OopMapBlock* map = start_of_nonstatic_oop_maps();
- OopMapBlock* const end_map = map + nonstatic_oop_map_size();
- // Iterate over oopmap blocks
- while (map < end_map) {
- // Compute oop range for this oopmap block.
- oop* const map_start = obj->obj_field_addr(map->offset());
- oop* const beg_oop = MAX2((oop*)beg_addr, map_start);
- oop* const end_oop = MIN2((oop*)end_addr, map_start + map->length());
- for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
- PSParallelCompact::adjust_pointer(cur_oop);
- }
- ++map;
- }
-
+ InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
+ obj, beg_addr, end_addr, \
+ PSParallelCompact::adjust_pointer(p), \
+ assert_nothing)
return size_helper();
}
void instanceKlass::copy_static_fields(PSPromotionManager* pm) {
assert(!pm->depth_first(), "invariant");
- // Compute oop range
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- // Iterate over oops
- while (start < end) {
- if (PSScavenge::should_scavenge(*start)) {
- assert(Universe::heap()->is_in(*start), "should be in heap");
- pm->claim_or_forward_breadth(start);
- }
- start++;
- }
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ if (PSScavenge::should_scavenge(p)) { \
+ pm->claim_or_forward_breadth(p); \
+ }, \
+ assert_nothing )
}
void instanceKlass::push_static_fields(PSPromotionManager* pm) {
assert(pm->depth_first(), "invariant");
- // Compute oop range
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- // Iterate over oops
- while (start < end) {
- if (PSScavenge::should_scavenge(*start)) {
- assert(Universe::heap()->is_in(*start), "should be in heap");
- pm->claim_or_forward_depth(start);
- }
- start++;
- }
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ if (PSScavenge::should_scavenge(p)) { \
+ pm->claim_or_forward_depth(p); \
+ }, \
+ assert_nothing )
}
void instanceKlass::copy_static_fields(ParCompactionManager* cm) {
- // Compute oop range
- oop* start = start_of_static_fields();
- oop* end = start + static_oop_field_size();
- // Iterate over oops
- while (start < end) {
- if (*start != NULL) {
- assert(Universe::heap()->is_in(*start), "should be in heap");
- // *start = (oop) cm->summary_data()->calc_new_pointer(*start);
- PSParallelCompact::adjust_pointer(start);
- }
- start++;
- }
+ InstanceKlass_OOP_ITERATE( \
+ start_of_static_fields(), static_oop_field_size(), \
+ PSParallelCompact::adjust_pointer(p), \
+ assert_is_in)
}
#endif // SERIALGC
@@ -1687,18 +1673,15 @@ void instanceKlass::follow_weak_klass_links(
Klass::follow_weak_klass_links(is_alive, keep_alive);
}
-
void instanceKlass::remove_unshareable_info() {
Klass::remove_unshareable_info();
init_implementor();
}
-
static void clear_all_breakpoints(methodOop m) {
m->clear_all_breakpoints();
}
-
void instanceKlass::release_C_heap_structures() {
// Deallocate oop map cache
if (_oop_map_cache != NULL) {
@@ -2047,29 +2030,30 @@ void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
obj->print_address_on(st);
}
-#endif
+#endif // ndef PRODUCT
const char* instanceKlass::internal_name() const {
return external_name();
}
-
-
// Verification
class VerifyFieldClosure: public OopClosure {
- public:
- void do_oop(oop* p) {
+ protected:
+ template <class T> void do_oop_work(T* p) {
guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap");
- if (!(*p)->is_oop_or_null()) {
- tty->print_cr("Failed: %p -> %p",p,(address)*p);
+ oop obj = oopDesc::load_decode_heap_oop(p);
+ if (!obj->is_oop_or_null()) {
+ tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj);
Universe::print();
guarantee(false, "boom");
}
}
+ public:
+ virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
};
-
void instanceKlass::oop_verify_on(oop obj, outputStream* st) {
Klass::oop_verify_on(obj, st);
VerifyFieldClosure blk;
@@ -2110,26 +2094,28 @@ void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) {
}
}
-#endif
-
+#endif // ndef PRODUCT
-/* JNIid class for jfieldIDs only */
- JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
- _holder = holder;
- _offset = offset;
- _next = next;
- debug_only(_is_static_field_id = false;)
- }
+// JNIid class for jfieldIDs only
+// Note to reviewers:
+// These JNI functions are just moved over to column 1 and not changed
+// in the compressed oops workspace.
+JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
+ _holder = holder;
+ _offset = offset;
+ _next = next;
+ debug_only(_is_static_field_id = false;)
+}
- JNIid* JNIid::find(int offset) {
- JNIid* current = this;
- while (current != NULL) {
- if (current->offset() == offset) return current;
- current = current->next();
- }
- return NULL;
- }
+JNIid* JNIid::find(int offset) {
+ JNIid* current = this;
+ while (current != NULL) {
+ if (current->offset() == offset) return current;
+ current = current->next();
+ }
+ return NULL;
+}
void JNIid::oops_do(OopClosure* f) {
for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
@@ -2138,40 +2124,40 @@ void JNIid::oops_do(OopClosure* f) {
}
void JNIid::deallocate(JNIid* current) {
- while (current != NULL) {
- JNIid* next = current->next();
- delete current;
- current = next;
- }
- }
-
-
- void JNIid::verify(klassOop holder) {
- int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields();
- int end_field_offset;
- end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
-
- JNIid* current = this;
- while (current != NULL) {
- guarantee(current->holder() == holder, "Invalid klass in JNIid");
- #ifdef ASSERT
- int o = current->offset();
- if (current->is_static_field_id()) {
- guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid");
- }
- #endif
- current = current->next();
- }
- }
+ while (current != NULL) {
+ JNIid* next = current->next();
+ delete current;
+ current = next;
+ }
+}
+
+void JNIid::verify(klassOop holder) {
+ int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields();
+ int end_field_offset;
+ end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
+ JNIid* current = this;
+ while (current != NULL) {
+ guarantee(current->holder() == holder, "Invalid klass in JNIid");
#ifdef ASSERT
- void instanceKlass::set_init_state(ClassState state) {
- bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
- : (_init_state < state);
- assert(good_state || state == allocated, "illegal state transition");
- _init_state = state;
+ int o = current->offset();
+ if (current->is_static_field_id()) {
+ guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid");
+ }
+#endif
+ current = current->next();
}
+}
+
+
+#ifdef ASSERT
+void instanceKlass::set_init_state(ClassState state) {
+ bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
+ : (_init_state < state);
+ assert(good_state || state == allocated, "illegal state transition");
+ _init_state = state;
+}
#endif
@@ -2180,9 +2166,9 @@ void JNIid::deallocate(JNIid* current) {
// Add an information node that contains weak references to the
// interesting parts of the previous version of the_class.
void instanceKlass::add_previous_version(instanceKlassHandle ikh,
- BitMap * emcp_methods, int emcp_method_count) {
+ BitMap* emcp_methods, int emcp_method_count) {
assert(Thread::current()->is_VM_thread(),
- "only VMThread can add previous versions");
+ "only VMThread can add previous versions");
if (_previous_versions == NULL) {
// This is the first previous version so make some space.
diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKlass.hpp
index 285291dce..b7b71d930 100644
--- a/src/share/vm/oops/instanceKlass.hpp
+++ b/src/share/vm/oops/instanceKlass.hpp
@@ -180,12 +180,16 @@ class instanceKlass: public Klass {
// End of the oop block.
//
- int _nonstatic_field_size; // number of non-static fields in this klass (including inherited fields)
- int _static_field_size; // number of static fields (oop and non-oop) in this klass
+ // number of words used by non-static fields in this klass (including
+ // inherited fields but after header_size()). If fields are compressed into
+ // header, this can be zero so it's not the same as number of static fields.
+ int _nonstatic_field_size;
+ int _static_field_size; // number words used by static fields (oop and non-oop) in this klass
int _static_oop_field_size;// number of static oop fields in this klass
int _nonstatic_oop_map_size;// number of nonstatic oop-map blocks allocated at end of this klass
bool _is_marked_dependent; // used for marking during flushing and deoptimization
bool _rewritten; // methods rewritten.
+ bool _has_nonstatic_fields; // for sizing with UseCompressedOops
u2 _minor_version; // minor version number of class file
u2 _major_version; // major version number of class file
ClassState _init_state; // state of class
@@ -221,6 +225,9 @@ class instanceKlass: public Klass {
friend class SystemDictionary;
public:
+ bool has_nonstatic_fields() const { return _has_nonstatic_fields; }
+ void set_has_nonstatic_fields(bool b) { _has_nonstatic_fields = b; }
+
// field sizes
int nonstatic_field_size() const { return _nonstatic_field_size; }
void set_nonstatic_field_size(int size) { _nonstatic_field_size = size; }
@@ -340,8 +347,7 @@ class instanceKlass: public Klass {
// find a non-static or static field given its offset within the class.
bool contains_field_offset(int offset) {
- return ((offset/wordSize) >= instanceOopDesc::header_size() &&
- (offset/wordSize)-instanceOopDesc::header_size() < nonstatic_field_size());
+ return instanceOopDesc::contains_field_offset(offset, nonstatic_field_size());
}
bool find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const;
@@ -570,12 +576,21 @@ class instanceKlass: public Klass {
intptr_t* start_of_itable() const { return start_of_vtable() + align_object_offset(vtable_length()); }
int itable_offset_in_words() const { return start_of_itable() - (intptr_t*)as_klassOop(); }
- oop* start_of_static_fields() const { return (oop*)(start_of_itable() + align_object_offset(itable_length())); }
+ // Static field offset is an offset into the Heap, should be converted by
+ // based on UseCompressedOop for traversal
+ HeapWord* start_of_static_fields() const {
+ return (HeapWord*)(start_of_itable() + align_object_offset(itable_length()));
+ }
+
intptr_t* end_of_itable() const { return start_of_itable() + itable_length(); }
- oop* end_of_static_fields() const { return start_of_static_fields() + static_field_size(); }
- int offset_of_static_fields() const { return (intptr_t)start_of_static_fields() - (intptr_t)as_klassOop(); }
- OopMapBlock* start_of_nonstatic_oop_maps() const { return (OopMapBlock*) (start_of_static_fields() + static_field_size()); }
+ int offset_of_static_fields() const {
+ return (intptr_t)start_of_static_fields() - (intptr_t)as_klassOop();
+ }
+
+ OopMapBlock* start_of_nonstatic_oop_maps() const {
+ return (OopMapBlock*) (start_of_static_fields() + static_field_size());
+ }
// Allocation profiling support
juint alloc_size() const { return _alloc_count * size_helper(); }
diff --git a/src/share/vm/oops/instanceKlassKlass.cpp b/src/share/vm/oops/instanceKlassKlass.cpp
index 144ced69e..f0a150408 100644
--- a/src/share/vm/oops/instanceKlassKlass.cpp
+++ b/src/share/vm/oops/instanceKlassKlass.cpp
@@ -286,17 +286,17 @@ void instanceKlassKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
ik->copy_static_fields(pm);
oop* loader_addr = ik->adr_class_loader();
- if (PSScavenge::should_scavenge(*loader_addr)) {
+ if (PSScavenge::should_scavenge(loader_addr)) {
pm->claim_or_forward_breadth(loader_addr);
}
oop* pd_addr = ik->adr_protection_domain();
- if (PSScavenge::should_scavenge(*pd_addr)) {
+ if (PSScavenge::should_scavenge(pd_addr)) {
pm->claim_or_forward_breadth(pd_addr);
}
oop* sg_addr = ik->adr_signers();
- if (PSScavenge::should_scavenge(*sg_addr)) {
+ if (PSScavenge::should_scavenge(sg_addr)) {
pm->claim_or_forward_breadth(sg_addr);
}
@@ -309,17 +309,17 @@ void instanceKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
ik->push_static_fields(pm);
oop* loader_addr = ik->adr_class_loader();
- if (PSScavenge::should_scavenge(*loader_addr)) {
+ if (PSScavenge::should_scavenge(loader_addr)) {
pm->claim_or_forward_depth(loader_addr);
}
oop* pd_addr = ik->adr_protection_domain();
- if (PSScavenge::should_scavenge(*pd_addr)) {
+ if (PSScavenge::should_scavenge(pd_addr)) {
pm->claim_or_forward_depth(pd_addr);
}
oop* sg_addr = ik->adr_signers();
- if (PSScavenge::should_scavenge(*sg_addr)) {
+ if (PSScavenge::should_scavenge(sg_addr)) {
pm->claim_or_forward_depth(sg_addr);
}
@@ -602,16 +602,18 @@ const char* instanceKlassKlass::internal_name() const {
// Verification
-
class VerifyFieldClosure: public OopClosure {
- public:
- void do_oop(oop* p) {
+ protected:
+ template <class T> void do_oop_work(T* p) {
guarantee(Universe::heap()->is_in(p), "should be in heap");
- guarantee((*p)->is_oop_or_null(), "should be in heap");
+ oop obj = oopDesc::load_decode_heap_oop(p);
+ guarantee(obj->is_oop_or_null(), "should be in heap");
}
+ public:
+ virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
};
-
void instanceKlassKlass::oop_verify_on(oop obj, outputStream* st) {
klassKlass::oop_verify_on(obj, st);
if (!obj->partially_loaded()) {
diff --git a/src/share/vm/oops/instanceOop.hpp b/src/share/vm/oops/instanceOop.hpp
index 49cab9379..e0f0cca1f 100644
--- a/src/share/vm/oops/instanceOop.hpp
+++ b/src/share/vm/oops/instanceOop.hpp
@@ -27,5 +27,26 @@
class instanceOopDesc : public oopDesc {
public:
+ // aligned header size.
static int header_size() { return sizeof(instanceOopDesc)/HeapWordSize; }
+
+ // If compressed, the offset of the fields of the instance may not be aligned.
+ static int base_offset_in_bytes() {
+ return UseCompressedOops ?
+ klass_gap_offset_in_bytes() :
+ sizeof(instanceOopDesc);
+ }
+
+ static bool contains_field_offset(int offset, int nonstatic_field_size) {
+ int base_in_bytes = base_offset_in_bytes();
+ if (UseCompressedOops) {
+ return (offset >= base_in_bytes &&
+ // field can be embedded in header, or is after header.
+ (offset < (int)sizeof(instanceOopDesc) ||
+ (offset-(int)sizeof(instanceOopDesc))/wordSize < nonstatic_field_size));
+ } else {
+ return (offset >= base_in_bytes &&
+ (offset-base_in_bytes)/wordSize < nonstatic_field_size);
+ }
+ }
};
diff --git a/src/share/vm/oops/instanceRefKlass.cpp b/src/share/vm/oops/instanceRefKlass.cpp
index d98ecd2cf..634a8c7fb 100644
--- a/src/share/vm/oops/instanceRefKlass.cpp
+++ b/src/share/vm/oops/instanceRefKlass.cpp
@@ -25,23 +25,24 @@
# include "incls/_precompiled.incl"
# include "incls/_instanceRefKlass.cpp.incl"
-void instanceRefKlass::oop_follow_contents(oop obj) {
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
- oop referent = *referent_addr;
+template <class T>
+static void specialized_oop_follow_contents(instanceRefKlass* ref, oop obj) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ oop referent = oopDesc::load_decode_heap_oop(referent_addr);
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (address)obj);
+ gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
}
)
if (referent != NULL) {
if (!referent->is_gc_marked() &&
MarkSweep::ref_processor()->
- discover_reference(obj, reference_type())) {
+ discover_reference(obj, ref->reference_type())) {
// reference already enqueued, referent will be traversed later
- instanceKlass::oop_follow_contents(obj);
+ ref->instanceKlass::oop_follow_contents(obj);
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, (address)obj);
+ gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, obj);
}
)
return;
@@ -49,42 +50,52 @@ void instanceRefKlass::oop_follow_contents(oop obj) {
// treat referent as normal oop
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, (address)obj);
+ gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, obj);
}
)
MarkSweep::mark_and_push(referent_addr);
}
}
// treat next as normal oop. next is a link in the pending list.
- oop* next_addr = java_lang_ref_Reference::next_addr(obj);
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr(" Process next as normal " INTPTR_FORMAT, next_addr);
}
)
MarkSweep::mark_and_push(next_addr);
- instanceKlass::oop_follow_contents(obj);
+ ref->instanceKlass::oop_follow_contents(obj);
+}
+
+void instanceRefKlass::oop_follow_contents(oop obj) {
+ if (UseCompressedOops) {
+ specialized_oop_follow_contents<narrowOop>(this, obj);
+ } else {
+ specialized_oop_follow_contents<oop>(this, obj);
+ }
}
#ifndef SERIALGC
-void instanceRefKlass::oop_follow_contents(ParCompactionManager* cm,
- oop obj) {
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
- oop referent = *referent_addr;
+template <class T>
+static void specialized_oop_follow_contents(instanceRefKlass* ref,
+ ParCompactionManager* cm,
+ oop obj) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ oop referent = oopDesc::load_decode_heap_oop(referent_addr);
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (address)obj);
+ gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
}
)
if (referent != NULL) {
if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
PSParallelCompact::ref_processor()->
- discover_reference(obj, reference_type())) {
+ discover_reference(obj, ref->reference_type())) {
// reference already enqueued, referent will be traversed later
- instanceKlass::oop_follow_contents(cm, obj);
+ ref->instanceKlass::oop_follow_contents(cm, obj);
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, (address)obj);
+ gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, obj);
}
)
return;
@@ -92,70 +103,85 @@ void instanceRefKlass::oop_follow_contents(ParCompactionManager* cm,
// treat referent as normal oop
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, (address)obj);
+ gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, obj);
}
)
PSParallelCompact::mark_and_push(cm, referent_addr);
}
}
// treat next as normal oop. next is a link in the pending list.
- oop* next_addr = java_lang_ref_Reference::next_addr(obj);
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr(" Process next as normal " INTPTR_FORMAT, next_addr);
}
)
PSParallelCompact::mark_and_push(cm, next_addr);
- instanceKlass::oop_follow_contents(cm, obj);
+ ref->instanceKlass::oop_follow_contents(cm, obj);
}
-#endif // SERIALGC
-
-int instanceRefKlass::oop_adjust_pointers(oop obj) {
- int size = size_helper();
- instanceKlass::oop_adjust_pointers(obj);
-
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
- MarkSweep::adjust_pointer(referent_addr);
- oop* next_addr = java_lang_ref_Reference::next_addr(obj);
- MarkSweep::adjust_pointer(next_addr);
- oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
- MarkSweep::adjust_pointer(discovered_addr);
+void instanceRefKlass::oop_follow_contents(ParCompactionManager* cm,
+ oop obj) {
+ if (UseCompressedOops) {
+ specialized_oop_follow_contents<narrowOop>(this, cm, obj);
+ } else {
+ specialized_oop_follow_contents<oop>(this, cm, obj);
+ }
+}
+#endif // SERIALGC
#ifdef ASSERT
+template <class T> void trace_reference_gc(const char *s, oop obj,
+ T* referent_addr,
+ T* next_addr,
+ T* discovered_addr) {
if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr("instanceRefKlass::oop_adjust_pointers obj "
- INTPTR_FORMAT, (address)obj);
+ gclog_or_tty->print_cr("%s obj " INTPTR_FORMAT, s, (address)obj);
gclog_or_tty->print_cr(" referent_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, referent_addr,
- referent_addr ? (address)*referent_addr : NULL);
+ INTPTR_FORMAT, referent_addr,
+ referent_addr ?
+ (address)oopDesc::load_decode_heap_oop(referent_addr) : NULL);
gclog_or_tty->print_cr(" next_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, next_addr,
- next_addr ? (address)*next_addr : NULL);
+ INTPTR_FORMAT, next_addr,
+ next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL);
gclog_or_tty->print_cr(" discovered_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, discovered_addr,
- discovered_addr ? (address)*discovered_addr : NULL);
+ INTPTR_FORMAT, discovered_addr,
+ discovered_addr ?
+ (address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL);
}
+}
#endif
+template <class T> void specialized_oop_adjust_pointers(instanceRefKlass *ref, oop obj) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ MarkSweep::adjust_pointer(referent_addr);
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+ MarkSweep::adjust_pointer(next_addr);
+ T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+ MarkSweep::adjust_pointer(discovered_addr);
+ debug_only(trace_reference_gc("instanceRefKlass::oop_adjust_pointers", obj,
+ referent_addr, next_addr, discovered_addr);)
+}
+
+int instanceRefKlass::oop_adjust_pointers(oop obj) {
+ int size = size_helper();
+ instanceKlass::oop_adjust_pointers(obj);
+
+ if (UseCompressedOops) {
+ specialized_oop_adjust_pointers<narrowOop>(this, obj);
+ } else {
+ specialized_oop_adjust_pointers<oop>(this, obj);
+ }
return size;
}
-#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
- \
-int instanceRefKlass:: \
-oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
- /* Get size before changing pointers */ \
- SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\
- \
- int size = instanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \
- \
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj); \
- oop referent = *referent_addr; \
- if (referent != NULL) { \
+#define InstanceRefKlass_SPECIALIZED_OOP_ITERATE(T, nv_suffix, contains) \
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); \
+ oop referent = oopDesc::load_decode_heap_oop(referent_addr); \
+ if (referent != NULL && contains(referent_addr)) { \
ReferenceProcessor* rp = closure->_ref_processor; \
if (!referent->is_gc_marked() && (rp != NULL) && \
- rp->discover_reference(obj, reference_type())) { \
+ rp->discover_reference(obj, reference_type())) { \
return size; \
} else { \
/* treat referent as normal oop */ \
@@ -163,12 +189,34 @@ oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) {
closure->do_oop##nv_suffix(referent_addr); \
} \
} \
- \
/* treat next as normal oop */ \
- oop* next_addr = java_lang_ref_Reference::next_addr(obj); \
- SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk); \
- closure->do_oop##nv_suffix(next_addr); \
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); \
+ if (contains(next_addr)) { \
+ SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk); \
+ closure->do_oop##nv_suffix(next_addr); \
+ } \
return size; \
+
+
+template <class T> bool contains(T *t) { return true; }
+
+// Macro to define instanceRefKlass::oop_oop_iterate for virtual/nonvirtual for
+// all closures. Macros calling macros above for each oop size.
+
+#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
+ \
+int instanceRefKlass:: \
+oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
+ /* Get size before changing pointers */ \
+ SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\
+ \
+ int size = instanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \
+ \
+ if (UseCompressedOops) { \
+ InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, contains); \
+ } else { \
+ InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, contains); \
+ } \
}
#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
@@ -180,28 +228,11 @@ oop_oop_iterate##nv_suffix##_m(oop obj,
SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\
\
int size = instanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \
- \
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj); \
- oop referent = *referent_addr; \
- if (referent != NULL && mr.contains(referent_addr)) { \
- ReferenceProcessor* rp = closure->_ref_processor; \
- if (!referent->is_gc_marked() && (rp != NULL) && \
- rp->discover_reference(obj, reference_type())) { \
- return size; \
- } else { \
- /* treat referent as normal oop */ \
- SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
- closure->do_oop##nv_suffix(referent_addr); \
- } \
+ if (UseCompressedOops) { \
+ InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, mr.contains); \
+ } else { \
+ InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, mr.contains); \
} \
- \
- /* treat next as normal oop */ \
- oop* next_addr = java_lang_ref_Reference::next_addr(obj); \
- if (mr.contains(next_addr)) { \
- SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
- closure->do_oop##nv_suffix(next_addr); \
- } \
- return size; \
}
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN)
@@ -209,16 +240,17 @@ ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceRefKlass_OOP_OOP_ITERATE_DEFN)
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m)
ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m)
-
#ifndef SERIALGC
-void instanceRefKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
+template <class T>
+void specialized_oop_copy_contents(instanceRefKlass *ref,
+ PSPromotionManager* pm, oop obj) {
assert(!pm->depth_first(), "invariant");
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
- if (PSScavenge::should_scavenge(*referent_addr)) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ if (PSScavenge::should_scavenge(referent_addr)) {
ReferenceProcessor* rp = PSScavenge::reference_processor();
- if (rp->discover_reference(obj, reference_type())) {
+ if (rp->discover_reference(obj, ref->reference_type())) {
// reference already enqueued, referent and next will be traversed later
- instanceKlass::oop_copy_contents(pm, obj);
+ ref->instanceKlass::oop_copy_contents(pm, obj);
return;
} else {
// treat referent as normal oop
@@ -226,21 +258,31 @@ void instanceRefKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
}
}
// treat next as normal oop
- oop* next_addr = java_lang_ref_Reference::next_addr(obj);
- if (PSScavenge::should_scavenge(*next_addr)) {
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+ if (PSScavenge::should_scavenge(next_addr)) {
pm->claim_or_forward_breadth(next_addr);
}
- instanceKlass::oop_copy_contents(pm, obj);
+ ref->instanceKlass::oop_copy_contents(pm, obj);
}
-void instanceRefKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
+void instanceRefKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
+ if (UseCompressedOops) {
+ specialized_oop_copy_contents<narrowOop>(this, pm, obj);
+ } else {
+ specialized_oop_copy_contents<oop>(this, pm, obj);
+ }
+}
+
+template <class T>
+void specialized_oop_push_contents(instanceRefKlass *ref,
+ PSPromotionManager* pm, oop obj) {
assert(pm->depth_first(), "invariant");
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
- if (PSScavenge::should_scavenge(*referent_addr)) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
+ if (PSScavenge::should_scavenge(referent_addr)) {
ReferenceProcessor* rp = PSScavenge::reference_processor();
- if (rp->discover_reference(obj, reference_type())) {
+ if (rp->discover_reference(obj, ref->reference_type())) {
// reference already enqueued, referent and next will be traversed later
- instanceKlass::oop_push_contents(pm, obj);
+ ref->instanceKlass::oop_push_contents(pm, obj);
return;
} else {
// treat referent as normal oop
@@ -248,71 +290,68 @@ void instanceRefKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
}
}
// treat next as normal oop
- oop* next_addr = java_lang_ref_Reference::next_addr(obj);
- if (PSScavenge::should_scavenge(*next_addr)) {
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+ if (PSScavenge::should_scavenge(next_addr)) {
pm->claim_or_forward_depth(next_addr);
}
- instanceKlass::oop_push_contents(pm, obj);
+ ref->instanceKlass::oop_push_contents(pm, obj);
}
-int instanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
- instanceKlass::oop_update_pointers(cm, obj);
+void instanceRefKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
+ if (UseCompressedOops) {
+ specialized_oop_push_contents<narrowOop>(this, pm, obj);
+ } else {
+ specialized_oop_push_contents<oop>(this, pm, obj);
+ }
+}
- oop* referent_addr = java_lang_ref_Reference::referent_addr(obj);
+template <class T>
+void specialized_oop_update_pointers(instanceRefKlass *ref,
+ ParCompactionManager* cm, oop obj) {
+ T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
PSParallelCompact::adjust_pointer(referent_addr);
- oop* next_addr = java_lang_ref_Reference::next_addr(obj);
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
PSParallelCompact::adjust_pointer(next_addr);
- oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
+ T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
PSParallelCompact::adjust_pointer(discovered_addr);
+ debug_only(trace_reference_gc("instanceRefKlass::oop_update_ptrs", obj,
+ referent_addr, next_addr, discovered_addr);)
+}
-#ifdef ASSERT
- if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr("instanceRefKlass::oop_update_pointers obj "
- INTPTR_FORMAT, (oopDesc*) obj);
- gclog_or_tty->print_cr(" referent_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, referent_addr,
- referent_addr ? (oopDesc*) *referent_addr : NULL);
- gclog_or_tty->print_cr(" next_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, next_addr,
- next_addr ? (oopDesc*) *next_addr : NULL);
- gclog_or_tty->print_cr(" discovered_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, discovered_addr,
- discovered_addr ? (oopDesc*) *discovered_addr : NULL);
+int instanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
+ instanceKlass::oop_update_pointers(cm, obj);
+ if (UseCompressedOops) {
+ specialized_oop_update_pointers<narrowOop>(this, cm, obj);
+ } else {
+ specialized_oop_update_pointers<oop>(this, cm, obj);
}
-#endif
-
return size_helper();
}
-int
-instanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
- HeapWord* beg_addr, HeapWord* end_addr) {
- instanceKlass::oop_update_pointers(cm, obj, beg_addr, end_addr);
- oop* p;
- oop* referent_addr = p = java_lang_ref_Reference::referent_addr(obj);
+template <class T> void
+specialized_oop_update_pointers(ParCompactionManager* cm, oop obj,
+ HeapWord* beg_addr, HeapWord* end_addr) {
+ T* p;
+ T* referent_addr = p = (T*)java_lang_ref_Reference::referent_addr(obj);
PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
- oop* next_addr = p = java_lang_ref_Reference::next_addr(obj);
+ T* next_addr = p = (T*)java_lang_ref_Reference::next_addr(obj);
PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
- oop* discovered_addr = p = java_lang_ref_Reference::discovered_addr(obj);
+ T* discovered_addr = p = (T*)java_lang_ref_Reference::discovered_addr(obj);
PSParallelCompact::adjust_pointer(p, beg_addr, end_addr);
+ debug_only(trace_reference_gc("instanceRefKlass::oop_update_ptrs", obj,
+ referent_addr, next_addr, discovered_addr);)
+}
-#ifdef ASSERT
- if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr("instanceRefKlass::oop_update_pointers obj "
- INTPTR_FORMAT, (oopDesc*) obj);
- gclog_or_tty->print_cr(" referent_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, referent_addr,
- referent_addr ? (oopDesc*) *referent_addr : NULL);
- gclog_or_tty->print_cr(" next_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, next_addr,
- next_addr ? (oopDesc*) *next_addr : NULL);
- gclog_or_tty->print_cr(" discovered_addr/* " INTPTR_FORMAT " / "
- INTPTR_FORMAT, discovered_addr,
- discovered_addr ? (oopDesc*) *discovered_addr : NULL);
+int
+instanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
+ HeapWord* beg_addr, HeapWord* end_addr) {
+ instanceKlass::oop_update_pointers(cm, obj, beg_addr, end_addr);
+ if (UseCompressedOops) {
+ specialized_oop_update_pointers<narrowOop>(cm, obj, beg_addr, end_addr);
+ } else {
+ specialized_oop_update_pointers<oop>(cm, obj, beg_addr, end_addr);
}
-#endif
-
return size_helper();
}
#endif // SERIALGC
@@ -338,7 +377,7 @@ void instanceRefKlass::update_nonstatic_oop_maps(klassOop k) {
// offset 2 (words) and has 4 map entries.
debug_only(int offset = java_lang_ref_Reference::referent_offset);
debug_only(int length = ((java_lang_ref_Reference::discovered_offset -
- java_lang_ref_Reference::referent_offset)/wordSize) + 1);
+ java_lang_ref_Reference::referent_offset)/heapOopSize) + 1);
if (UseSharedSpaces) {
assert(map->offset() == java_lang_ref_Reference::queue_offset &&
@@ -368,22 +407,35 @@ void instanceRefKlass::oop_verify_on(oop obj, outputStream* st) {
if (referent != NULL) {
guarantee(referent->is_oop(), "referent field heap failed");
- if (gch != NULL && !gch->is_in_youngest(obj))
+ if (gch != NULL && !gch->is_in_youngest(obj)) {
// We do a specific remembered set check here since the referent
// field is not part of the oop mask and therefore skipped by the
// regular verify code.
- obj->verify_old_oop(java_lang_ref_Reference::referent_addr(obj), true);
+ if (UseCompressedOops) {
+ narrowOop* referent_addr = (narrowOop*)java_lang_ref_Reference::referent_addr(obj);
+ obj->verify_old_oop(referent_addr, true);
+ } else {
+ oop* referent_addr = (oop*)java_lang_ref_Reference::referent_addr(obj);
+ obj->verify_old_oop(referent_addr, true);
+ }
+ }
}
// Verify next field
oop next = java_lang_ref_Reference::next(obj);
if (next != NULL) {
- guarantee(next->is_oop(), "next field verify failed");
+ guarantee(next->is_oop(), "next field verify fa iled");
guarantee(next->is_instanceRef(), "next field verify failed");
if (gch != NULL && !gch->is_in_youngest(obj)) {
// We do a specific remembered set check here since the next field is
// not part of the oop mask and therefore skipped by the regular
// verify code.
- obj->verify_old_oop(java_lang_ref_Reference::next_addr(obj), true);
+ if (UseCompressedOops) {
+ narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
+ obj->verify_old_oop(next_addr, true);
+ } else {
+ oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
+ obj->verify_old_oop(next_addr, true);
+ }
}
}
}
diff --git a/src/share/vm/oops/klass.cpp b/src/share/vm/oops/klass.cpp
index 33e800a3c..04d350197 100644
--- a/src/share/vm/oops/klass.cpp
+++ b/src/share/vm/oops/klass.cpp
@@ -542,11 +542,10 @@ void Klass::oop_verify_on(oop obj, outputStream* st) {
void Klass::oop_verify_old_oop(oop obj, oop* p, bool allow_dirty) {
/* $$$ I think this functionality should be handled by verification of
-
RememberedSet::verify_old_oop(obj, p, allow_dirty, false);
-
the card table. */
}
+void Klass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) { }
#ifndef PRODUCT
diff --git a/src/share/vm/oops/klass.hpp b/src/share/vm/oops/klass.hpp
index 76473cba3..d8aac3de6 100644
--- a/src/share/vm/oops/klass.hpp
+++ b/src/share/vm/oops/klass.hpp
@@ -757,6 +757,7 @@ class Klass : public Klass_vtbl {
virtual const char* internal_name() const = 0;
virtual void oop_verify_on(oop obj, outputStream* st);
virtual void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
+ virtual void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty);
// tells whether obj is partially constructed (gc during class loading)
virtual bool oop_partially_loaded(oop obj) const { return false; }
virtual void oop_set_partially_loaded(oop obj) {};
diff --git a/src/share/vm/oops/klassVtable.cpp b/src/share/vm/oops/klassVtable.cpp
index 84ce0c58a..447d6e929 100644
--- a/src/share/vm/oops/klassVtable.cpp
+++ b/src/share/vm/oops/klassVtable.cpp
@@ -1118,8 +1118,8 @@ void klassItable::setup_itable_offset_table(instanceKlassHandle klass) {
itableOffsetEntry* ioe = (itableOffsetEntry*)klass->start_of_itable();
itableMethodEntry* ime = (itableMethodEntry*)(ioe + nof_interfaces);
intptr_t* end = klass->end_of_itable();
- assert((oop*)(ime + nof_methods) <= klass->start_of_static_fields(), "wrong offset calculation (1)");
- assert((oop*)(end) == (oop*)(ime + nof_methods), "wrong offset calculation (2)");
+ assert((oop*)(ime + nof_methods) <= (oop*)klass->start_of_static_fields(), "wrong offset calculation (1)");
+ assert((oop*)(end) == (oop*)(ime + nof_methods), "wrong offset calculation (2)");
// Visit all interfaces and initialize itable offset table
SetupItableClosure sic((address)klass->as_klassOop(), ioe, ime);
diff --git a/src/share/vm/oops/markOop.hpp b/src/share/vm/oops/markOop.hpp
index 155fb1638..ac42fef74 100644
--- a/src/share/vm/oops/markOop.hpp
+++ b/src/share/vm/oops/markOop.hpp
@@ -89,7 +89,7 @@ class markOopDesc: public oopDesc {
enum { age_bits = 4,
lock_bits = 2,
biased_lock_bits = 1,
- max_hash_bits = BitsPerOop - age_bits - lock_bits - biased_lock_bits,
+ max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits,
hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits,
epoch_bits = 2
};
diff --git a/src/share/vm/oops/methodDataKlass.cpp b/src/share/vm/oops/methodDataKlass.cpp
index f3ee24122..feddbddb0 100644
--- a/src/share/vm/oops/methodDataKlass.cpp
+++ b/src/share/vm/oops/methodDataKlass.cpp
@@ -95,6 +95,7 @@ void methodDataKlass::oop_follow_contents(ParCompactionManager* cm,
}
#endif // SERIALGC
+
int methodDataKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
assert (obj->is_methodData(), "object must be method data");
methodDataOop m = methodDataOop(obj);
@@ -113,7 +114,6 @@ int methodDataKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
return size;
}
-
int methodDataKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) {
assert (obj->is_methodData(), "object must be method data");
methodDataOop m = methodDataOop(obj);
@@ -158,14 +158,14 @@ void methodDataKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
assert (obj->is_methodData(), "object must be method data");
methodDataOop m = methodDataOop(obj);
// This should never point into the young gen.
- assert(!PSScavenge::should_scavenge(oop(*m->adr_method())), "Sanity");
+ assert(!PSScavenge::should_scavenge(m->adr_method()), "Sanity");
}
void methodDataKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
assert (obj->is_methodData(), "object must be method data");
methodDataOop m = methodDataOop(obj);
// This should never point into the young gen.
- assert(!PSScavenge::should_scavenge(oop(*m->adr_method())), "Sanity");
+ assert(!PSScavenge::should_scavenge(m->adr_method()), "Sanity");
}
int methodDataKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
diff --git a/src/share/vm/oops/methodOop.cpp b/src/share/vm/oops/methodOop.cpp
index db4dc7735..83543fee2 100644
--- a/src/share/vm/oops/methodOop.cpp
+++ b/src/share/vm/oops/methodOop.cpp
@@ -430,11 +430,11 @@ bool methodOopDesc::can_be_statically_bound() const {
bool methodOopDesc::is_accessor() const {
if (code_size() != 5) return false;
if (size_of_parameters() != 1) return false;
- if (Bytecodes::java_code_at(code_base()+0) != Bytecodes::_aload_0 ) return false;
- if (Bytecodes::java_code_at(code_base()+1) != Bytecodes::_getfield) return false;
- Bytecodes::Code ret_bc = Bytecodes::java_code_at(code_base()+4);
- if (Bytecodes::java_code_at(code_base()+4) != Bytecodes::_areturn &&
- Bytecodes::java_code_at(code_base()+4) != Bytecodes::_ireturn ) return false;
+ methodOop m = (methodOop)this; // pass to code_at() to avoid method_from_bcp
+ if (Bytecodes::java_code_at(code_base()+0, m) != Bytecodes::_aload_0 ) return false;
+ if (Bytecodes::java_code_at(code_base()+1, m) != Bytecodes::_getfield) return false;
+ if (Bytecodes::java_code_at(code_base()+4, m) != Bytecodes::_areturn &&
+ Bytecodes::java_code_at(code_base()+4, m) != Bytecodes::_ireturn ) return false;
return true;
}
@@ -955,7 +955,7 @@ extern "C" {
// This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
static void reorder_based_on_method_index(objArrayOop methods,
objArrayOop annotations,
- oop* temp_array) {
+ GrowableArray<oop>* temp_array) {
if (annotations == NULL) {
return;
}
@@ -963,12 +963,15 @@ static void reorder_based_on_method_index(objArrayOop methods,
int length = methods->length();
int i;
// Copy to temp array
- memcpy(temp_array, annotations->obj_at_addr(0), length * sizeof(oop));
+ temp_array->clear();
+ for (i = 0; i < length; i++) {
+ temp_array->append(annotations->obj_at(i));
+ }
// Copy back using old method indices
for (i = 0; i < length; i++) {
methodOop m = (methodOop) methods->obj_at(i);
- annotations->obj_at_put(i, temp_array[m->method_idnum()]);
+ annotations->obj_at_put(i, temp_array->at(m->method_idnum()));
}
}
@@ -997,7 +1000,7 @@ void methodOopDesc::sort_methods(objArrayOop methods,
// Use a simple bubble sort for small number of methods since
// qsort requires a functional pointer call for each comparison.
- if (length < 8) {
+ if (UseCompressedOops || length < 8) {
bool sorted = true;
for (int i=length-1; i>0; i--) {
for (int j=0; j<i; j++) {
@@ -1010,11 +1013,14 @@ void methodOopDesc::sort_methods(objArrayOop methods,
}
}
if (sorted) break;
- sorted = true;
+ sorted = true;
}
} else {
+ // XXX This doesn't work for UseCompressedOops because the compare fn
+ // will have to decode the methodOop anyway making it not much faster
+ // than above.
compareFn compare = (compareFn) (idempotent ? method_compare_idempotent : method_compare);
- qsort(methods->obj_at_addr(0), length, oopSize, compare);
+ qsort(methods->base(), length, heapOopSize, compare);
}
// Sort annotations if necessary
@@ -1022,8 +1028,9 @@ void methodOopDesc::sort_methods(objArrayOop methods,
assert(methods_parameter_annotations == NULL || methods_parameter_annotations->length() == methods->length(), "");
assert(methods_default_annotations == NULL || methods_default_annotations->length() == methods->length(), "");
if (do_annotations) {
+ ResourceMark rm;
// Allocate temporary storage
- oop* temp_array = NEW_RESOURCE_ARRAY(oop, length);
+ GrowableArray<oop>* temp_array = new GrowableArray<oop>(length);
reorder_based_on_method_index(methods, methods_annotations, temp_array);
reorder_based_on_method_index(methods, methods_parameter_annotations, temp_array);
reorder_based_on_method_index(methods, methods_default_annotations, temp_array);
diff --git a/src/share/vm/oops/objArrayKlass.cpp b/src/share/vm/oops/objArrayKlass.cpp
index e83ecd7bf..193249bcf 100644
--- a/src/share/vm/oops/objArrayKlass.cpp
+++ b/src/share/vm/oops/objArrayKlass.cpp
@@ -80,35 +80,11 @@ oop objArrayKlass::multi_allocate(int rank, jint* sizes, TRAPS) {
return h_array();
}
-void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
- int dst_pos, int length, TRAPS) {
- assert(s->is_objArray(), "must be obj array");
-
- if (!d->is_objArray()) {
- THROW(vmSymbols::java_lang_ArrayStoreException());
- }
-
- // Check is all offsets and lengths are non negative
- if (src_pos < 0 || dst_pos < 0 || length < 0) {
- THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException());
- }
- // Check if the ranges are valid
- if ( (((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length())
- || (((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length()) ) {
- THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException());
- }
+// Either oop or narrowOop depending on UseCompressedOops.
+template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
+ arrayOop d, T* dst, int length, TRAPS) {
- // Special case. Boundary cases must be checked first
- // This allows the following call: copy_array(s, s.length(), d.length(), 0).
- // This is correct, since the position is supposed to be an 'in between point', i.e., s.length(),
- // points to the right of the last element.
- if (length==0) {
- return;
- }
-
- oop* const src = objArrayOop(s)->obj_at_addr(src_pos);
- oop* const dst = objArrayOop(d)->obj_at_addr(dst_pos);
- const size_t word_len = length * HeapWordsPerOop;
+ const size_t word_len = objArrayOopDesc::array_size(length);
// For performance reasons, we assume we are using a card marking write
// barrier. The assert will fail if this is not the case.
@@ -129,16 +105,21 @@ void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
} else {
// slow case: need individual subtype checks
// note: don't use obj_at_put below because it includes a redundant store check
- oop* from = src;
- oop* end = from + length;
- for (oop* p = dst; from < end; from++, p++) {
- oop element = *from;
- if (element == NULL || Klass::cast(element->klass())->is_subtype_of(bound)) {
- *p = element;
+ T* from = src;
+ T* end = from + length;
+ for (T* p = dst; from < end; from++, p++) {
+ // XXX this is going to be slow.
+ T element = *from;
+ if (oopDesc::is_null(element) ||
+ Klass::cast(oopDesc::decode_heap_oop_not_null(element)->klass())->is_subtype_of(bound)) {
+ *p = *from;
} else {
// We must do a barrier to cover the partial copy.
- const size_t done_word_len = pointer_delta(p, dst, oopSize) *
- HeapWordsPerOop;
+ const size_t pd = pointer_delta(p, dst, (size_t)heapOopSize);
+ // pointer delta is scaled to number of elements (length field in
+ // objArrayOop) which we assume is 32 bit.
+ assert(pd == (size_t)(int)pd, "length field overflow");
+ const size_t done_word_len = objArrayOopDesc::array_size((int)pd);
bs->write_ref_array(MemRegion((HeapWord*)dst, done_word_len));
THROW(vmSymbols::java_lang_ArrayStoreException());
return;
@@ -149,6 +130,42 @@ void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
bs->write_ref_array(MemRegion((HeapWord*)dst, word_len));
}
+void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
+ int dst_pos, int length, TRAPS) {
+ assert(s->is_objArray(), "must be obj array");
+
+ if (!d->is_objArray()) {
+ THROW(vmSymbols::java_lang_ArrayStoreException());
+ }
+
+ // Check is all offsets and lengths are non negative
+ if (src_pos < 0 || dst_pos < 0 || length < 0) {
+ THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException());
+ }
+ // Check if the ranges are valid
+ if ( (((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length())
+ || (((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length()) ) {
+ THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException());
+ }
+
+ // Special case. Boundary cases must be checked first
+ // This allows the following call: copy_array(s, s.length(), d.length(), 0).
+ // This is correct, since the position is supposed to be an 'in between point', i.e., s.length(),
+ // points to the right of the last element.
+ if (length==0) {
+ return;
+ }
+ if (UseCompressedOops) {
+ narrowOop* const src = objArrayOop(s)->obj_at_addr<narrowOop>(src_pos);
+ narrowOop* const dst = objArrayOop(d)->obj_at_addr<narrowOop>(dst_pos);
+ do_copy<narrowOop>(s, src, d, dst, length, CHECK);
+ } else {
+ oop* const src = objArrayOop(s)->obj_at_addr<oop>(src_pos);
+ oop* const dst = objArrayOop(d)->obj_at_addr<oop>(dst_pos);
+ do_copy<oop> (s, src, d, dst, length, CHECK);
+ }
+}
+
klassOop objArrayKlass::array_klass_impl(bool or_null, int n, TRAPS) {
objArrayKlassHandle h_this(THREAD, as_klassOop());
@@ -242,49 +259,75 @@ bool objArrayKlass::compute_is_subtype_of(klassOop k) {
return element_klass()->klass_part()->is_subtype_of(oak->element_klass());
}
-
void objArrayKlass::initialize(TRAPS) {
Klass::cast(bottom_klass())->initialize(THREAD); // dispatches to either instanceKlass or typeArrayKlass
}
+#define ObjArrayKlass_SPECIALIZED_OOP_ITERATE(T, a, p, do_oop) \
+{ \
+ T* p = (T*)(a)->base(); \
+ T* const end = p + (a)->length(); \
+ while (p < end) { \
+ do_oop; \
+ p++; \
+ } \
+}
+
+#define ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(T, a, p, low, high, do_oop) \
+{ \
+ T* const l = (T*)(low); \
+ T* const h = (T*)(high); \
+ T* p = (T*)(a)->base(); \
+ T* end = p + (a)->length(); \
+ if (p < l) p = l; \
+ if (end > h) end = h; \
+ while (p < end) { \
+ do_oop; \
+ ++p; \
+ } \
+}
+
+#define ObjArrayKlass_OOP_ITERATE(a, p, do_oop) \
+ if (UseCompressedOops) { \
+ ObjArrayKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
+ a, p, do_oop) \
+ } else { \
+ ObjArrayKlass_SPECIALIZED_OOP_ITERATE(oop, \
+ a, p, do_oop) \
+ }
+
+#define ObjArrayKlass_BOUNDED_OOP_ITERATE(a, p, low, high, do_oop) \
+ if (UseCompressedOops) { \
+ ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
+ a, p, low, high, do_oop) \
+ } else { \
+ ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
+ a, p, low, high, do_oop) \
+ }
void objArrayKlass::oop_follow_contents(oop obj) {
assert (obj->is_array(), "obj must be array");
- arrayOop a = arrayOop(obj);
+ objArrayOop a = objArrayOop(obj);
a->follow_header();
- oop* base = (oop*)a->base(T_OBJECT);
- oop* const end = base + a->length();
- while (base < end) {
- if (*base != NULL)
- // we call mark_and_follow here to avoid excessive marking stack usage
- MarkSweep::mark_and_follow(base);
- base++;
- }
+ ObjArrayKlass_OOP_ITERATE( \
+ a, p, \
+ /* we call mark_and_follow here to avoid excessive marking stack usage */ \
+ MarkSweep::mark_and_follow(p))
}
#ifndef SERIALGC
void objArrayKlass::oop_follow_contents(ParCompactionManager* cm,
oop obj) {
assert (obj->is_array(), "obj must be array");
- arrayOop a = arrayOop(obj);
+ objArrayOop a = objArrayOop(obj);
a->follow_header(cm);
- oop* base = (oop*)a->base(T_OBJECT);
- oop* const end = base + a->length();
- while (base < end) {
- if (*base != NULL)
- // we call mark_and_follow here to avoid excessive marking stack usage
- PSParallelCompact::mark_and_follow(cm, base);
- base++;
- }
+ ObjArrayKlass_OOP_ITERATE( \
+ a, p, \
+ /* we call mark_and_follow here to avoid excessive marking stack usage */ \
+ PSParallelCompact::mark_and_follow(cm, p))
}
#endif // SERIALGC
-#define invoke_closure_on(base, closure, nv_suffix) { \
- if (*(base) != NULL) { \
- (closure)->do_oop##nv_suffix(base); \
- } \
-}
-
#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
\
int objArrayKlass::oop_oop_iterate##nv_suffix(oop obj, \
@@ -298,21 +341,7 @@ int objArrayKlass::oop_oop_iterate##nv_suffix(oop obj,
if (closure->do_header()) { \
a->oop_iterate_header(closure); \
} \
- oop* base = a->base(); \
- oop* const end = base + a->length(); \
- const intx field_offset = PrefetchFieldsAhead; \
- if (field_offset > 0) { \
- while (base < end) { \
- prefetch_beyond(base, end, field_offset, closure->prefetch_style()); \
- invoke_closure_on(base, closure, nv_suffix); \
- base++; \
- } \
- } else { \
- while (base < end) { \
- invoke_closure_on(base, closure, nv_suffix); \
- base++; \
- } \
- } \
+ ObjArrayKlass_OOP_ITERATE(a, p, (closure)->do_oop##nv_suffix(p)) \
return size; \
}
@@ -330,28 +359,43 @@ int objArrayKlass::oop_oop_iterate##nv_suffix##_m(oop obj,
if (closure->do_header()) { \
a->oop_iterate_header(closure, mr); \
} \
- oop* bottom = (oop*)mr.start(); \
- oop* top = (oop*)mr.end(); \
- oop* base = a->base(); \
- oop* end = base + a->length(); \
- if (base < bottom) { \
- base = bottom; \
- } \
- if (end > top) { \
- end = top; \
- } \
- const intx field_offset = PrefetchFieldsAhead; \
- if (field_offset > 0) { \
- while (base < end) { \
- prefetch_beyond(base, end, field_offset, closure->prefetch_style()); \
- invoke_closure_on(base, closure, nv_suffix); \
- base++; \
+ ObjArrayKlass_BOUNDED_OOP_ITERATE( \
+ a, p, mr.start(), mr.end(), (closure)->do_oop##nv_suffix(p)) \
+ return size; \
+}
+
+// Like oop_oop_iterate but only iterates over a specified range and only used
+// for objArrayOops.
+#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r(OopClosureType, nv_suffix) \
+ \
+int objArrayKlass::oop_oop_iterate_range##nv_suffix(oop obj, \
+ OopClosureType* closure, \
+ int start, int end) { \
+ SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::oa); \
+ assert(obj->is_array(), "obj must be array"); \
+ objArrayOop a = objArrayOop(obj); \
+ /* Get size before changing pointers. */ \
+ /* Don't call size() or oop_size() since that is a virtual call */ \
+ int size = a->object_size(); \
+ if (UseCompressedOops) { \
+ HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr<narrowOop>(start);\
+ /* this might be wierd if end needs to be aligned on HeapWord boundary */ \
+ HeapWord* high = (HeapWord*)((narrowOop*)a->base() + end); \
+ MemRegion mr(low, high); \
+ if (closure->do_header()) { \
+ a->oop_iterate_header(closure, mr); \
} \
+ ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
+ a, p, low, high, (closure)->do_oop##nv_suffix(p)) \
} else { \
- while (base < end) { \
- invoke_closure_on(base, closure, nv_suffix); \
- base++; \
+ HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr<oop>(start); \
+ HeapWord* high = (HeapWord*)((oop*)a->base() + end); \
+ MemRegion mr(low, high); \
+ if (closure->do_header()) { \
+ a->oop_iterate_header(closure, mr); \
} \
+ ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
+ a, p, low, high, (closure)->do_oop##nv_suffix(p)) \
} \
return size; \
}
@@ -360,6 +404,8 @@ ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN)
ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN)
ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m)
ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m)
+ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r)
+ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r)
int objArrayKlass::oop_adjust_pointers(oop obj) {
assert(obj->is_objArray(), "obj must be obj array");
@@ -368,12 +414,7 @@ int objArrayKlass::oop_adjust_pointers(oop obj) {
// Don't call size() or oop_size() since that is a virtual call.
int size = a->object_size();
a->adjust_header();
- oop* base = a->base();
- oop* const end = base + a->length();
- while (base < end) {
- MarkSweep::adjust_pointer(base);
- base++;
- }
+ ObjArrayKlass_OOP_ITERATE(a, p, MarkSweep::adjust_pointer(p))
return size;
}
@@ -381,51 +422,27 @@ int objArrayKlass::oop_adjust_pointers(oop obj) {
void objArrayKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
assert(!pm->depth_first(), "invariant");
assert(obj->is_objArray(), "obj must be obj array");
- // Compute oop range
- oop* curr = objArrayOop(obj)->base();
- oop* end = curr + objArrayOop(obj)->length();
- // assert(align_object_size(end - (oop*)obj) == oop_size(obj), "checking size");
- assert(align_object_size(pointer_delta(end, obj, sizeof(oop*)))
- == oop_size(obj), "checking size");
-
- // Iterate over oops
- while (curr < end) {
- if (PSScavenge::should_scavenge(*curr)) {
- pm->claim_or_forward_breadth(curr);
- }
- ++curr;
- }
+ ObjArrayKlass_OOP_ITERATE( \
+ objArrayOop(obj), p, \
+ if (PSScavenge::should_scavenge(p)) { \
+ pm->claim_or_forward_breadth(p); \
+ })
}
void objArrayKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
assert(pm->depth_first(), "invariant");
assert(obj->is_objArray(), "obj must be obj array");
- // Compute oop range
- oop* curr = objArrayOop(obj)->base();
- oop* end = curr + objArrayOop(obj)->length();
- // assert(align_object_size(end - (oop*)obj) == oop_size(obj), "checking size");
- assert(align_object_size(pointer_delta(end, obj, sizeof(oop*)))
- == oop_size(obj), "checking size");
-
- // Iterate over oops
- while (curr < end) {
- if (PSScavenge::should_scavenge(*curr)) {
- pm->claim_or_forward_depth(curr);
- }
- ++curr;
- }
+ ObjArrayKlass_OOP_ITERATE( \
+ objArrayOop(obj), p, \
+ if (PSScavenge::should_scavenge(p)) { \
+ pm->claim_or_forward_depth(p); \
+ })
}
int objArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
assert (obj->is_objArray(), "obj must be obj array");
objArrayOop a = objArrayOop(obj);
-
- oop* const base = a->base();
- oop* const beg_oop = base;
- oop* const end_oop = base + a->length();
- for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
- PSParallelCompact::adjust_pointer(cur_oop);
- }
+ ObjArrayKlass_OOP_ITERATE(a, p, PSParallelCompact::adjust_pointer(p))
return a->object_size();
}
@@ -433,13 +450,9 @@ int objArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
HeapWord* beg_addr, HeapWord* end_addr) {
assert (obj->is_objArray(), "obj must be obj array");
objArrayOop a = objArrayOop(obj);
-
- oop* const base = a->base();
- oop* const beg_oop = MAX2((oop*)beg_addr, base);
- oop* const end_oop = MIN2((oop*)end_addr, base + a->length());
- for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
- PSParallelCompact::adjust_pointer(cur_oop);
- }
+ ObjArrayKlass_BOUNDED_OOP_ITERATE( \
+ a, p, beg_addr, end_addr, \
+ PSParallelCompact::adjust_pointer(p))
return a->object_size();
}
#endif // SERIALGC
@@ -509,3 +522,4 @@ void objArrayKlass::oop_verify_old_oop(oop obj, oop* p, bool allow_dirty) {
RememberedSet::verify_old_oop(obj, p, allow_dirty, true);
*/
}
+void objArrayKlass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) {}
diff --git a/src/share/vm/oops/objArrayKlass.hpp b/src/share/vm/oops/objArrayKlass.hpp
index a2915ef0d..6fabe837b 100644
--- a/src/share/vm/oops/objArrayKlass.hpp
+++ b/src/share/vm/oops/objArrayKlass.hpp
@@ -63,6 +63,11 @@ class objArrayKlass : public arrayKlass {
// Compute class loader
oop class_loader() const { return Klass::cast(bottom_klass())->class_loader(); }
+ private:
+ // Either oop or narrowOop depending on UseCompressedOops.
+ // must be called from within objArrayKlass.cpp
+ template <class T> void do_copy(arrayOop s, T* src, arrayOop d,
+ T* dst, int length, TRAPS);
protected:
// Returns the objArrayKlass for n'th dimension.
virtual klassOop array_klass_impl(bool or_null, int n, TRAPS);
@@ -101,7 +106,9 @@ class objArrayKlass : public arrayKlass {
#define ObjArrayKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, \
- MemRegion mr);
+ MemRegion mr); \
+ int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* blk, \
+ int start, int end);
ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DECL)
ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayKlass_OOP_OOP_ITERATE_DECL)
@@ -124,5 +131,6 @@ class objArrayKlass : public arrayKlass {
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
+ void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty);
};
diff --git a/src/share/vm/oops/objArrayOop.cpp b/src/share/vm/oops/objArrayOop.cpp
index c339e2cd8..c1ae6830a 100644
--- a/src/share/vm/oops/objArrayOop.cpp
+++ b/src/share/vm/oops/objArrayOop.cpp
@@ -25,4 +25,12 @@
# include "incls/_precompiled.incl"
# include "incls/_objArrayOop.cpp.incl"
-// <<this page is intentionally left blank>>
+#define ObjArrayOop_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
+ \
+int objArrayOopDesc::oop_iterate_range(OopClosureType* blk, int start, int end) { \
+ SpecializationStats::record_call(); \
+ return ((objArrayKlass*)blueprint())->oop_oop_iterate_range##nv_suffix(this, blk, start, end); \
+}
+
+ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DEFN)
+ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayOop_OOP_ITERATE_DEFN)
diff --git a/src/share/vm/oops/objArrayOop.hpp b/src/share/vm/oops/objArrayOop.hpp
index b61d7d4d6..6f12c0f1f 100644
--- a/src/share/vm/oops/objArrayOop.hpp
+++ b/src/share/vm/oops/objArrayOop.hpp
@@ -26,20 +26,67 @@
// Evaluating "String arg[10]" will create an objArrayOop.
class objArrayOopDesc : public arrayOopDesc {
+ friend class objArrayKlass;
+ friend class Runtime1;
+ friend class psPromotionManager;
+
+ template <class T> T* obj_at_addr(int index) const {
+ assert(is_within_bounds(index), "index out of bounds");
+ return &((T*)base())[index];
+ }
+
public:
+ // base is the address following the header.
+ HeapWord* base() const { return (HeapWord*) arrayOopDesc::base(T_OBJECT); }
+
// Accessing
- oop obj_at(int index) const { return *obj_at_addr(index); }
- void obj_at_put(int index, oop value) { oop_store(obj_at_addr(index), value); }
- oop* base() const { return (oop*) arrayOopDesc::base(T_OBJECT); }
+ oop obj_at(int index) const {
+ // With UseCompressedOops decode the narrow oop in the objArray to an
+ // uncompressed oop. Otherwise this is simply a "*" operator.
+ if (UseCompressedOops) {
+ return load_decode_heap_oop(obj_at_addr<narrowOop>(index));
+ } else {
+ return load_decode_heap_oop(obj_at_addr<oop>(index));
+ }
+ }
+ void obj_at_put(int index, oop value) {
+ if (UseCompressedOops) {
+ oop_store(obj_at_addr<narrowOop>(index), value);
+ } else {
+ oop_store(obj_at_addr<oop>(index), value);
+ }
+ }
// Sizing
- static int header_size() { return arrayOopDesc::header_size(T_OBJECT); }
- static int object_size(int length) { return align_object_size(header_size() + length); }
- int object_size() { return object_size(length()); }
+ static int header_size() { return arrayOopDesc::header_size(T_OBJECT); }
+ int object_size() { return object_size(length()); }
+ int array_size() { return array_size(length()); }
- // Returns the address of the index'th element
- oop* obj_at_addr(int index) const {
- assert(is_within_bounds(index), "index out of bounds");
- return &base()[index];
+ static int object_size(int length) {
+ // This returns the object size in HeapWords.
+ return align_object_size(header_size() + array_size(length));
+ }
+
+ // Give size of objArrayOop in HeapWords minus the header
+ static int array_size(int length) {
+ // Without UseCompressedOops, this is simply:
+ // oop->length() * HeapWordsPerOop;
+ // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
+ // The oop elements are aligned up to wordSize
+ const int HeapWordsPerOop = heapOopSize/HeapWordSize;
+ if (HeapWordsPerOop > 0) {
+ return length * HeapWordsPerOop;
+ } else {
+ const int OopsPerHeapWord = HeapWordSize/heapOopSize;
+ int word_len = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
+ return word_len;
+ }
}
+
+ // special iterators for index ranges, returns size of object
+#define ObjArrayOop_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
+ int oop_iterate_range(OopClosureType* blk, int start, int end);
+
+ ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DECL)
+ ALL_OOP_OOP_ITERATE_CLOSURES_3(ObjArrayOop_OOP_ITERATE_DECL)
};
diff --git a/src/share/vm/oops/oop.cpp b/src/share/vm/oops/oop.cpp
index 6fbecdaf3..badba6d4f 100644
--- a/src/share/vm/oops/oop.cpp
+++ b/src/share/vm/oops/oop.cpp
@@ -105,10 +105,14 @@ void oopDesc::verify() {
}
+// XXX verify_old_oop doesn't do anything (should we remove?)
void oopDesc::verify_old_oop(oop* p, bool allow_dirty) {
blueprint()->oop_verify_old_oop(this, p, allow_dirty);
}
+void oopDesc::verify_old_oop(narrowOop* p, bool allow_dirty) {
+ blueprint()->oop_verify_old_oop(this, p, allow_dirty);
+}
bool oopDesc::partially_loaded() {
return blueprint()->oop_partially_loaded(this);
@@ -130,3 +134,6 @@ intptr_t oopDesc::slow_identity_hash() {
}
VerifyOopClosure VerifyOopClosure::verify_oop;
+
+void VerifyOopClosure::do_oop(oop* p) { VerifyOopClosure::do_oop_work(p); }
+void VerifyOopClosure::do_oop(narrowOop* p) { VerifyOopClosure::do_oop_work(p); }
diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp
index 2c4b07d54..fc70d4503 100644
--- a/src/share/vm/oops/oop.hpp
+++ b/src/share/vm/oops/oop.hpp
@@ -30,12 +30,12 @@
// no virtual functions allowed
// store into oop with store check
-void oop_store(oop* p, oop v);
-void oop_store(volatile oop* p, oop v);
+template <class T> void oop_store(T* p, oop v);
+template <class T> void oop_store(volatile T* p, oop v);
// store into oop without store check
-void oop_store_without_check(oop* p, oop v);
-void oop_store_without_check(volatile oop* p, oop v);
+template <class T> void oop_store_without_check(T* p, oop v);
+template <class T> void oop_store_without_check(volatile T* p, oop v);
extern bool always_do_update_barrier;
@@ -55,7 +55,10 @@ class oopDesc {
friend class VMStructs;
private:
volatile markOop _mark;
- klassOop _klass;
+ union _metadata {
+ wideKlassOop _klass;
+ narrowOop _compressed_klass;
+ } _metadata;
// Fast access to barrier set. Must be initialized.
static BarrierSet* _bs;
@@ -73,16 +76,16 @@ class oopDesc {
// objects during a GC) -- requires a valid klass pointer
void init_mark();
- klassOop klass() const { return _klass; }
- oop* klass_addr() const { return (oop*) &_klass; }
+ klassOop klass() const;
+ oop* klass_addr();
+ narrowOop* compressed_klass_addr();
void set_klass(klassOop k);
// For when the klass pointer is being used as a linked list "next" field.
void set_klass_to_list_ptr(oop k);
- // size of object header
- static int header_size() { return sizeof(oopDesc)/HeapWordSize; }
- static int header_size_in_bytes() { return sizeof(oopDesc); }
+ // size of object header, aligned to platform wordSize
+ static int header_size() { return sizeof(oopDesc)/HeapWordSize; }
Klass* blueprint() const;
@@ -119,7 +122,6 @@ class oopDesc {
private:
// field addresses in oop
- // byte/char/bool/short fields are always stored as full words
void* field_base(int offset) const;
jbyte* byte_field_addr(int offset) const;
@@ -130,13 +132,66 @@ class oopDesc {
jlong* long_field_addr(int offset) const;
jfloat* float_field_addr(int offset) const;
jdouble* double_field_addr(int offset) const;
+ address* address_field_addr(int offset) const;
public:
- // need this as public for garbage collection
- oop* obj_field_addr(int offset) const;
-
+ // Need this as public for garbage collection.
+ template <class T> T* obj_field_addr(int offset) const;
+
+ static bool is_null(oop obj);
+ static bool is_null(narrowOop obj);
+
+ // Decode an oop pointer from a narrowOop if compressed.
+ // These are overloaded for oop and narrowOop as are the other functions
+ // below so that they can be called in template functions.
+ static oop decode_heap_oop_not_null(oop v);
+ static oop decode_heap_oop_not_null(narrowOop v);
+ static oop decode_heap_oop(oop v);
+ static oop decode_heap_oop(narrowOop v);
+
+ // Encode an oop pointer to a narrow oop. The or_null versions accept
+ // null oop pointer, others do not in order to eliminate the
+ // null checking branches.
+ static narrowOop encode_heap_oop_not_null(oop v);
+ static narrowOop encode_heap_oop(oop v);
+
+ // Load an oop out of the Java heap
+ static narrowOop load_heap_oop(narrowOop* p);
+ static oop load_heap_oop(oop* p);
+
+ // Load an oop out of Java heap and decode it to an uncompressed oop.
+ static oop load_decode_heap_oop_not_null(narrowOop* p);
+ static oop load_decode_heap_oop_not_null(oop* p);
+ static oop load_decode_heap_oop(narrowOop* p);
+ static oop load_decode_heap_oop(oop* p);
+
+ // Store an oop into the heap.
+ static void store_heap_oop(narrowOop* p, narrowOop v);
+ static void store_heap_oop(oop* p, oop v);
+
+ // Encode oop if UseCompressedOops and store into the heap.
+ static void encode_store_heap_oop_not_null(narrowOop* p, oop v);
+ static void encode_store_heap_oop_not_null(oop* p, oop v);
+ static void encode_store_heap_oop(narrowOop* p, oop v);
+ static void encode_store_heap_oop(oop* p, oop v);
+
+ static void release_store_heap_oop(volatile narrowOop* p, narrowOop v);
+ static void release_store_heap_oop(volatile oop* p, oop v);
+
+ static void release_encode_store_heap_oop_not_null(volatile narrowOop* p, oop v);
+ static void release_encode_store_heap_oop_not_null(volatile oop* p, oop v);
+ static void release_encode_store_heap_oop(volatile narrowOop* p, oop v);
+ static void release_encode_store_heap_oop(volatile oop* p, oop v);
+
+ static oop atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest);
+ static oop atomic_compare_exchange_oop(oop exchange_value,
+ volatile HeapWord *dest,
+ oop compare_value);
+
+ // Access to fields in a instanceOop through these methods.
oop obj_field(int offset) const;
void obj_field_put(int offset, oop value);
+ void obj_field_raw_put(int offset, oop value);
jbyte byte_field(int offset) const;
void byte_field_put(int offset, jbyte contents);
@@ -162,6 +217,9 @@ class oopDesc {
jdouble double_field(int offset) const;
void double_field_put(int offset, jdouble contents);
+ address address_field(int offset) const;
+ void address_field_put(int offset, address contents);
+
oop obj_field_acquire(int offset) const;
void release_obj_field_put(int offset, oop value);
@@ -207,6 +265,7 @@ class oopDesc {
void verify_on(outputStream* st);
void verify();
void verify_old_oop(oop* p, bool allow_dirty);
+ void verify_old_oop(narrowOop* p, bool allow_dirty);
// tells whether this oop is partially constructed (gc during class loading)
bool partially_loaded();
@@ -228,8 +287,8 @@ class oopDesc {
bool is_gc_marked() const;
// Apply "MarkSweep::mark_and_push" to (the address of) every non-NULL
// reference field in "this".
- void follow_contents();
- void follow_header();
+ void follow_contents(void);
+ void follow_header(void);
#ifndef SERIALGC
// Parallel Scavenge
@@ -317,6 +376,7 @@ class oopDesc {
void set_displaced_mark(markOop m);
// for code generation
- static int klass_offset_in_bytes() { return offset_of(oopDesc, _klass); }
static int mark_offset_in_bytes() { return offset_of(oopDesc, _mark); }
+ static int klass_offset_in_bytes() { return offset_of(oopDesc, _metadata._klass); }
+ static int klass_gap_offset_in_bytes();
};
diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp
index f01dede10..e864a65d7 100644
--- a/src/share/vm/oops/oop.inline.hpp
+++ b/src/share/vm/oops/oop.inline.hpp
@@ -25,7 +25,6 @@
// Implementation of all inlined member functions defined in oop.hpp
// We need a separate file to avoid circular references
-
inline void oopDesc::release_set_mark(markOop m) {
OrderAccess::release_store_ptr(&_mark, m);
}
@@ -34,17 +33,54 @@ inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
}
+inline klassOop oopDesc::klass() const {
+ if (UseCompressedOops) {
+ return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass);
+ // can be NULL in CMS, but isn't supported on CMS yet.
+ } else {
+ return _metadata._klass;
+ }
+}
+
+inline int oopDesc::klass_gap_offset_in_bytes() {
+ assert(UseCompressedOops, "only applicable to compressed headers");
+ return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
+}
+
+inline oop* oopDesc::klass_addr() {
+ // Only used internally and with CMS and will not work with
+ // UseCompressedOops
+ assert(!UseCompressedOops, "only supported with uncompressed oops");
+ return (oop*) &_metadata._klass;
+}
+
+inline narrowOop* oopDesc::compressed_klass_addr() {
+ assert(UseCompressedOops, "only called by compressed oops");
+ return (narrowOop*) &_metadata._compressed_klass;
+}
+
inline void oopDesc::set_klass(klassOop k) {
// since klasses are promoted no store check is needed
assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop");
assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop");
- oop_store_without_check((oop*) &_klass, (oop) k);
+ if (UseCompressedOops) {
+ // zero the gap when the klass is set, by zeroing the pointer sized
+ // part of the union.
+ _metadata._klass = NULL;
+ oop_store_without_check(compressed_klass_addr(), (oop)k);
+ } else {
+ oop_store_without_check(klass_addr(), (oop) k);
+ }
}
inline void oopDesc::set_klass_to_list_ptr(oop k) {
// This is only to be used during GC, for from-space objects, so no
// barrier is needed.
- _klass = (klassOop)k;
+ if (UseCompressedOops) {
+ _metadata._compressed_klass = encode_heap_oop_not_null(k);
+ } else {
+ _metadata._klass = (klassOop)k;
+ }
}
inline void oopDesc::init_mark() { set_mark(markOopDesc::prototype_for_object(this)); }
@@ -70,7 +106,7 @@ inline bool oopDesc::is_compiledICHolder() const { return blueprint()->oop_is_
inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
-inline oop* oopDesc::obj_field_addr(int offset) const { return (oop*) field_base(offset); }
+template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
inline jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); }
inline jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); }
inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); }
@@ -79,9 +115,156 @@ inline jshort* oopDesc::short_field_addr(int offset) const { return (jshort*)
inline jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); }
inline jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); }
inline jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
+inline address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
+
+
+// Functions for getting and setting oops within instance objects.
+// If the oops are compressed, the type passed to these overloaded functions
+// is narrowOop. All functions are overloaded so they can be called by
+// template functions without conditionals (the compiler instantiates via
+// the right type and inlines the appopriate code).
+
+inline bool oopDesc::is_null(oop obj) { return obj == NULL; }
+inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
+
+// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
+// offset from the heap base. Saving the check for null can save instructions
+// in inner GC loops so these are separated.
+
+inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
+ assert(!is_null(v), "oop value can never be zero");
+ address heap_base = Universe::heap_base();
+ uint64_t result = (uint64_t)(pointer_delta((void*)v, (void*)heap_base, 1) >> LogMinObjAlignmentInBytes);
+ assert((result & 0xffffffff00000000L) == 0, "narrow oop overflow");
+ return (narrowOop)result;
+}
+
+inline narrowOop oopDesc::encode_heap_oop(oop v) {
+ return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
+}
+
+inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
+ assert(!is_null(v), "narrow oop value can never be zero");
+ address heap_base = Universe::heap_base();
+ return (oop)(void*)((uintptr_t)heap_base + ((uintptr_t)v << LogMinObjAlignmentInBytes));
+}
+
+inline oop oopDesc::decode_heap_oop(narrowOop v) {
+ return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
+}
+
+inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
+inline oop oopDesc::decode_heap_oop(oop v) { return v; }
+
+// Load an oop out of the Java heap as is without decoding.
+// Called by GC to check for null before decoding.
+inline oop oopDesc::load_heap_oop(oop* p) { return *p; }
+inline narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; }
+
+// Load and decode an oop out of the Java heap into a wide oop.
+inline oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; }
+inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
+ return decode_heap_oop_not_null(*p);
+}
+
+// Load and decode an oop out of the heap accepting null
+inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
+inline oop oopDesc::load_decode_heap_oop(narrowOop* p) {
+ return decode_heap_oop(*p);
+}
+
+// Store already encoded heap oop into the heap.
+inline void oopDesc::store_heap_oop(oop* p, oop v) { *p = v; }
+inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
+
+// Encode and store a heap oop.
+inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
+ *p = encode_heap_oop_not_null(v);
+}
+inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
+
+// Encode and store a heap oop allowing for null.
+inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
+ *p = encode_heap_oop(v);
+}
+inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
+
+// Store heap oop as is for volatile fields.
+inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
+ OrderAccess::release_store_ptr(p, v);
+}
+inline void oopDesc::release_store_heap_oop(volatile narrowOop* p,
+ narrowOop v) {
+ OrderAccess::release_store(p, v);
+}
+
+inline void oopDesc::release_encode_store_heap_oop_not_null(
+ volatile narrowOop* p, oop v) {
+ // heap oop is not pointer sized.
+ OrderAccess::release_store(p, encode_heap_oop_not_null(v));
+}
+
+inline void oopDesc::release_encode_store_heap_oop_not_null(
+ volatile oop* p, oop v) {
+ OrderAccess::release_store_ptr(p, v);
+}
+
+inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
+ oop v) {
+ OrderAccess::release_store_ptr(p, v);
+}
+inline void oopDesc::release_encode_store_heap_oop(
+ volatile narrowOop* p, oop v) {
+ OrderAccess::release_store(p, encode_heap_oop(v));
+}
+
-inline oop oopDesc::obj_field(int offset) const { return *obj_field_addr(offset); }
-inline void oopDesc::obj_field_put(int offset, oop value) { oop_store(obj_field_addr(offset), value); }
+// These functions are only used to exchange oop fields in instances,
+// not headers.
+inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
+ if (UseCompressedOops) {
+ // encode exchange value from oop to T
+ narrowOop val = encode_heap_oop(exchange_value);
+ narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
+ // decode old from T to oop
+ return decode_heap_oop(old);
+ } else {
+ return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
+ }
+}
+
+inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
+ volatile HeapWord *dest,
+ oop compare_value) {
+ if (UseCompressedOops) {
+ // encode exchange and compare value from oop to T
+ narrowOop val = encode_heap_oop(exchange_value);
+ narrowOop cmp = encode_heap_oop(compare_value);
+
+ narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
+ // decode old from T to oop
+ return decode_heap_oop(old);
+ } else {
+ return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
+ }
+}
+
+// In order to put or get a field out of an instance, must first check
+// if the field has been compressed and uncompress it.
+inline oop oopDesc::obj_field(int offset) const {
+ return UseCompressedOops ?
+ load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
+ load_decode_heap_oop(obj_field_addr<oop>(offset));
+}
+inline void oopDesc::obj_field_put(int offset, oop value) {
+ UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
+ oop_store(obj_field_addr<oop>(offset), value);
+}
+inline void oopDesc::obj_field_raw_put(int offset, oop value) {
+ UseCompressedOops ?
+ encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
+ encode_store_heap_oop(obj_field_addr<oop>(offset), value);
+}
inline jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); }
inline void oopDesc::byte_field_put(int offset, jbyte contents) { *byte_field_addr(offset) = (jint) contents; }
@@ -107,8 +290,21 @@ inline void oopDesc::float_field_put(int offset, jfloat contents) { *float_fie
inline jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); }
inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
-inline oop oopDesc::obj_field_acquire(int offset) const { return (oop)OrderAccess::load_ptr_acquire(obj_field_addr(offset)); }
-inline void oopDesc::release_obj_field_put(int offset, oop value) { oop_store((volatile oop*)obj_field_addr(offset), value); }
+inline address oopDesc::address_field(int offset) const { return *address_field_addr(offset); }
+inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
+
+inline oop oopDesc::obj_field_acquire(int offset) const {
+ return UseCompressedOops ?
+ decode_heap_oop((narrowOop)
+ OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
+ : decode_heap_oop((oop)
+ OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
+}
+inline void oopDesc::release_obj_field_put(int offset, oop value) {
+ UseCompressedOops ?
+ oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
+ oop_store((volatile oop*) obj_field_addr<oop>(offset), value);
+}
inline jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); }
inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); }
@@ -134,7 +330,6 @@ inline void oopDesc::release_float_field_put(int offset, jfloat contents) { Or
inline jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); }
inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
-
inline int oopDesc::size_given_klass(Klass* klass) {
int lh = klass->layout_helper();
int s = lh >> LogHeapWordSize; // deliver size scaled by wordSize
@@ -200,7 +395,7 @@ inline int oopDesc::size_given_klass(Klass* klass) {
// technique) we will need to suitably modify the assertion.
assert((s == klass->oop_size(this)) ||
(((UseParNewGC || UseParallelGC) &&
- Universe::heap()->is_gc_active()) &&
+ Universe::heap()->is_gc_active()) &&
(is_typeArray() ||
(is_objArray() && is_forwarded()))),
"wrong array object size");
@@ -224,52 +419,58 @@ inline bool oopDesc::is_parsable() {
return blueprint()->oop_is_parsable(this);
}
-
-inline void update_barrier_set(oop *p, oop v) {
+inline void update_barrier_set(void* p, oop v) {
assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
oopDesc::bs()->write_ref_field(p, v);
}
-
-inline void oop_store(oop* p, oop v) {
+template <class T> inline void oop_store(T* p, oop v) {
if (always_do_update_barrier) {
- oop_store((volatile oop*)p, v);
+ oop_store((volatile T*)p, v);
} else {
- *p = v;
+ oopDesc::encode_store_heap_oop(p, v);
update_barrier_set(p, v);
}
}
-inline void oop_store(volatile oop* p, oop v) {
+template <class T> inline void oop_store(volatile T* p, oop v) {
// Used by release_obj_field_put, so use release_store_ptr.
- OrderAccess::release_store_ptr(p, v);
- update_barrier_set((oop *)p, v);
+ oopDesc::release_encode_store_heap_oop(p, v);
+ update_barrier_set((void*)p, v);
}
-inline void oop_store_without_check(oop* p, oop v) {
+template <class T> inline void oop_store_without_check(T* p, oop v) {
// XXX YSR FIX ME!!!
if (always_do_update_barrier) {
- oop_store(p, v);
+ oop_store(p, v);
} else {
assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v),
"oop store without store check failed");
- *p = v;
+ oopDesc::encode_store_heap_oop(p, v);
}
}
// When it absolutely has to get there.
-inline void oop_store_without_check(volatile oop* p, oop v) {
+template <class T> inline void oop_store_without_check(volatile T* p, oop v) {
// XXX YSR FIX ME!!!
if (always_do_update_barrier) {
oop_store(p, v);
} else {
- assert(!Universe::heap()->barrier_set()->
- write_ref_needs_barrier((oop *)p, v),
+ assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier((T*)p, v),
"oop store without store check failed");
- OrderAccess::release_store_ptr(p, v);
+ oopDesc::release_encode_store_heap_oop(p, v);
}
}
+// Should replace *addr = oop assignments where addr type depends on UseCompressedOops
+// (without having to remember the function name this calls).
+inline void oop_store_raw(HeapWord* addr, oop value) {
+ if (UseCompressedOops) {
+ oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
+ } else {
+ oopDesc::encode_store_heap_oop((oop*)addr, value);
+ }
+}
// Used only for markSweep, scavenging
inline bool oopDesc::is_gc_marked() const {
@@ -340,15 +541,17 @@ inline bool oopDesc::is_unlocked_oop() const {
if (!Universe::heap()->is_in_reserved(this)) return false;
return mark()->is_unlocked();
}
-
-
#endif // PRODUCT
inline void oopDesc::follow_header() {
- MarkSweep::mark_and_push((oop*)&_klass);
+ if (UseCompressedOops) {
+ MarkSweep::mark_and_push(compressed_klass_addr());
+ } else {
+ MarkSweep::mark_and_push(klass_addr());
+ }
}
-inline void oopDesc::follow_contents() {
+inline void oopDesc::follow_contents(void) {
assert (is_gc_marked(), "should be marked");
blueprint()->oop_follow_contents(this);
}
@@ -362,7 +565,6 @@ inline bool oopDesc::is_forwarded() const {
return mark()->is_marked();
}
-
// Used by scavengers
inline void oopDesc::forward_to(oop p) {
assert(Universe::heap()->is_in_reserved(p),
@@ -384,8 +586,9 @@ inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
// Note that the forwardee is not the same thing as the displaced_mark.
// The forwardee is used when copying during scavenge and mark-sweep.
// It does need to clear the low two locking- and GC-related bits.
-inline oop oopDesc::forwardee() const { return (oop) mark()->decode_pointer(); }
-
+inline oop oopDesc::forwardee() const {
+ return (oop) mark()->decode_pointer();
+}
inline bool oopDesc::has_displaced_mark() const {
return mark()->has_displaced_mark_helper();
@@ -432,17 +635,24 @@ inline intptr_t oopDesc::identity_hash() {
}
}
-
inline void oopDesc::oop_iterate_header(OopClosure* blk) {
- blk->do_oop((oop*)&_klass);
+ if (UseCompressedOops) {
+ blk->do_oop(compressed_klass_addr());
+ } else {
+ blk->do_oop(klass_addr());
+ }
}
-
inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) {
- if (mr.contains(&_klass)) blk->do_oop((oop*)&_klass);
+ if (UseCompressedOops) {
+ if (mr.contains(compressed_klass_addr())) {
+ blk->do_oop(compressed_klass_addr());
+ }
+ } else {
+ if (mr.contains(klass_addr())) blk->do_oop(klass_addr());
+ }
}
-
inline int oopDesc::adjust_pointers() {
debug_only(int check_size = size());
int s = blueprint()->oop_adjust_pointers(this);
@@ -451,7 +661,11 @@ inline int oopDesc::adjust_pointers() {
}
inline void oopDesc::adjust_header() {
- MarkSweep::adjust_pointer((oop*)&_klass);
+ if (UseCompressedOops) {
+ MarkSweep::adjust_pointer(compressed_klass_addr());
+ } else {
+ MarkSweep::adjust_pointer(klass_addr());
+ }
}
#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
diff --git a/src/share/vm/oops/oop.pcgc.inline.hpp b/src/share/vm/oops/oop.pcgc.inline.hpp
index 13f93e27d..1db005569 100644
--- a/src/share/vm/oops/oop.pcgc.inline.hpp
+++ b/src/share/vm/oops/oop.pcgc.inline.hpp
@@ -67,8 +67,8 @@ inline void oopDesc::update_contents(ParCompactionManager* cm,
// update_header();
// The klass has moved. Is the location of the klass
// within the limits?
- if ((((HeapWord*)&_klass) >= begin_limit) &&
- (((HeapWord*)&_klass) < end_limit)) {
+ if ((((HeapWord*)&_metadata._klass) >= begin_limit) &&
+ (((HeapWord*)&_metadata._klass) < end_limit)) {
set_klass(updated_klass);
}
@@ -89,7 +89,11 @@ inline void oopDesc::follow_contents(ParCompactionManager* cm) {
// Used by parallel old GC.
inline void oopDesc::follow_header(ParCompactionManager* cm) {
- PSParallelCompact::mark_and_push(cm, (oop*)&_klass);
+ if (UseCompressedOops) {
+ PSParallelCompact::mark_and_push(cm, compressed_klass_addr());
+ } else {
+ PSParallelCompact::mark_and_push(cm, klass_addr());
+ }
}
inline oop oopDesc::forward_to_atomic(oop p) {
@@ -114,9 +118,18 @@ inline oop oopDesc::forward_to_atomic(oop p) {
}
inline void oopDesc::update_header() {
- PSParallelCompact::adjust_pointer((oop*)&_klass);
+ if (UseCompressedOops) {
+ PSParallelCompact::adjust_pointer(compressed_klass_addr());
+ } else {
+ PSParallelCompact::adjust_pointer(klass_addr());
+ }
}
inline void oopDesc::update_header(HeapWord* beg_addr, HeapWord* end_addr) {
- PSParallelCompact::adjust_pointer((oop*)&_klass, beg_addr, end_addr);
+ if (UseCompressedOops) {
+ PSParallelCompact::adjust_pointer(compressed_klass_addr(),
+ beg_addr, end_addr);
+ } else {
+ PSParallelCompact::adjust_pointer(klass_addr(), beg_addr, end_addr);
+ }
}
diff --git a/src/share/vm/oops/oopsHierarchy.hpp b/src/share/vm/oops/oopsHierarchy.hpp
index 6aab383bb..32c982327 100644
--- a/src/share/vm/oops/oopsHierarchy.hpp
+++ b/src/share/vm/oops/oopsHierarchy.hpp
@@ -26,21 +26,25 @@
// This hierarchy is a representation hierarchy, i.e. if A is a superclass
// of B, A's representation is a prefix of B's representation.
+typedef juint narrowOop; // Offset instead of address for an oop within a java object
+typedef class klassOopDesc* wideKlassOop; // to keep SA happy and unhandled oop
+ // detector happy.
+
#ifndef CHECK_UNHANDLED_OOPS
-typedef class oopDesc* oop;
+typedef class oopDesc* oop;
typedef class instanceOopDesc* instanceOop;
-typedef class methodOopDesc* methodOop;
-typedef class constMethodOopDesc* constMethodOop;
-typedef class methodDataOopDesc* methodDataOop;
-typedef class arrayOopDesc* arrayOop;
-typedef class constantPoolOopDesc* constantPoolOop;
-typedef class constantPoolCacheOopDesc* constantPoolCacheOop;
-typedef class objArrayOopDesc* objArrayOop;
-typedef class typeArrayOopDesc* typeArrayOop;
-typedef class symbolOopDesc* symbolOop;
-typedef class klassOopDesc* klassOop;
-typedef class markOopDesc* markOop;
+typedef class methodOopDesc* methodOop;
+typedef class constMethodOopDesc* constMethodOop;
+typedef class methodDataOopDesc* methodDataOop;
+typedef class arrayOopDesc* arrayOop;
+typedef class objArrayOopDesc* objArrayOop;
+typedef class typeArrayOopDesc* typeArrayOop;
+typedef class constantPoolOopDesc* constantPoolOop;
+typedef class constantPoolCacheOopDesc* constantPoolCacheOop;
+typedef class symbolOopDesc* symbolOop;
+typedef class klassOopDesc* klassOop;
+typedef class markOopDesc* markOop;
typedef class compiledICHolderOopDesc* compiledICHolderOop;
#else
@@ -172,9 +176,9 @@ class arrayKlassKlass;
class objArrayKlassKlass;
class typeArrayKlassKlass;
class arrayKlass;
-class constantPoolKlass;
-class constantPoolCacheKlass;
class objArrayKlass;
class typeArrayKlass;
-class symbolKlass;
+class constantPoolKlass;
+class constantPoolCacheKlass;
+class symbolKlass;
class compiledICHolderKlass;
diff --git a/src/share/vm/opto/buildOopMap.cpp b/src/share/vm/opto/buildOopMap.cpp
index 2116c404d..8d3adc78d 100644
--- a/src/share/vm/opto/buildOopMap.cpp
+++ b/src/share/vm/opto/buildOopMap.cpp
@@ -315,6 +315,26 @@ OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, i
}
}
+ } else if( t->isa_narrowoop() ) {
+ assert( !OptoReg::is_valid(_callees[reg]), "oop can't be callee save" );
+ // Check for a legal reg name in the oopMap and bailout if it is not.
+ if (!omap->legal_vm_reg_name(r)) {
+ regalloc->C->record_method_not_compilable("illegal oopMap register name");
+ continue;
+ }
+ if( mcall ) {
+ // Outgoing argument GC mask responsibility belongs to the callee,
+ // not the caller. Inspect the inputs to the call, to see if
+ // this live-range is one of them.
+ uint cnt = mcall->tf()->domain()->cnt();
+ uint j;
+ for( j = TypeFunc::Parms; j < cnt; j++)
+ if( mcall->in(j) == def )
+ break; // reaching def is an argument oop
+ if( j < cnt ) // arg oops dont go in GC map
+ continue; // Continue on to the next register
+ }
+ omap->set_narrowoop(r);
} else if( OptoReg::is_valid(_callees[reg])) { // callee-save?
// It's a callee-save value
assert( dup_check[_callees[reg]]==0, "trying to callee save same reg twice" );
diff --git a/src/share/vm/opto/callnode.hpp b/src/share/vm/opto/callnode.hpp
index a6cc8e528..daa572e78 100644
--- a/src/share/vm/opto/callnode.hpp
+++ b/src/share/vm/opto/callnode.hpp
@@ -725,7 +725,8 @@ public:
// Conservatively small estimate of offset of first non-header byte.
int minimum_header_size() {
- return is_AllocateArray() ? sizeof(arrayOopDesc) : sizeof(oopDesc);
+ return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
+ instanceOopDesc::base_offset_in_bytes();
}
// Return the corresponding initialization barrier (or null if none).
diff --git a/src/share/vm/opto/cfgnode.cpp b/src/share/vm/opto/cfgnode.cpp
index f68e117b2..48bf400c8 100644
--- a/src/share/vm/opto/cfgnode.cpp
+++ b/src/share/vm/opto/cfgnode.cpp
@@ -848,7 +848,7 @@ const Type *PhiNode::Value( PhaseTransform *phase ) const {
// Until we have harmony between classes and interfaces in the type
// lattice, we must tread carefully around phis which implicitly
// convert the one to the other.
- const TypeInstPtr* ttip = _type->isa_instptr();
+ const TypeInstPtr* ttip = _type->isa_narrowoop() ? _type->isa_narrowoop()->make_oopptr()->isa_instptr() :_type->isa_instptr();
bool is_intf = false;
if (ttip != NULL) {
ciKlass* k = ttip->klass();
@@ -867,7 +867,7 @@ const Type *PhiNode::Value( PhaseTransform *phase ) const {
// of all the input types. The lattice is not distributive in
// such cases. Ward off asserts in type.cpp by refusing to do
// meets between interfaces and proper classes.
- const TypeInstPtr* tiip = ti->isa_instptr();
+ const TypeInstPtr* tiip = ti->isa_narrowoop() ? ti->is_narrowoop()->make_oopptr()->isa_instptr() : ti->isa_instptr();
if (tiip) {
bool ti_is_intf = false;
ciKlass* k = tiip->klass();
@@ -924,12 +924,15 @@ const Type *PhiNode::Value( PhaseTransform *phase ) const {
// class-typed Phi and an interface flows in, it's possible that the meet &
// join report an interface back out. This isn't possible but happens
// because the type system doesn't interact well with interfaces.
- const TypeInstPtr *jtip = jt->isa_instptr();
+ const TypeInstPtr *jtip = jt->isa_narrowoop() ? jt->isa_narrowoop()->make_oopptr()->isa_instptr() : jt->isa_instptr();
if( jtip && ttip ) {
if( jtip->is_loaded() && jtip->klass()->is_interface() &&
- ttip->is_loaded() && !ttip->klass()->is_interface() )
+ ttip->is_loaded() && !ttip->klass()->is_interface() ) {
// Happens in a CTW of rt.jar, 320-341, no extra flags
- { assert(ft == ttip->cast_to_ptr_type(jtip->ptr()), ""); jt = ft; }
+ assert(ft == ttip->cast_to_ptr_type(jtip->ptr()) ||
+ ft->isa_narrowoop() && ft->isa_narrowoop()->make_oopptr() == ttip->cast_to_ptr_type(jtip->ptr()), "");
+ jt = ft;
+ }
}
if (jt != ft && jt->base() == ft->base()) {
if (jt->isa_int() &&
diff --git a/src/share/vm/opto/chaitin.cpp b/src/share/vm/opto/chaitin.cpp
index 33ca24ba5..bc31285bf 100644
--- a/src/share/vm/opto/chaitin.cpp
+++ b/src/share/vm/opto/chaitin.cpp
@@ -682,6 +682,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
break;
case Op_RegF:
case Op_RegI:
+ case Op_RegN:
case Op_RegFlags:
case 0: // not an ideal register
lrg.set_num_regs(1);
diff --git a/src/share/vm/opto/classes.hpp b/src/share/vm/opto/classes.hpp
index 0c5f53ba3..7bfcf487e 100644
--- a/src/share/vm/opto/classes.hpp
+++ b/src/share/vm/opto/classes.hpp
@@ -64,6 +64,7 @@ macro(CMoveF)
macro(CMoveI)
macro(CMoveL)
macro(CMoveP)
+macro(CmpN)
macro(CmpD)
macro(CmpD3)
macro(CmpF)
@@ -77,7 +78,9 @@ macro(CmpU)
macro(CompareAndSwapI)
macro(CompareAndSwapL)
macro(CompareAndSwapP)
+macro(CompareAndSwapN)
macro(Con)
+macro(ConN)
macro(ConD)
macro(ConF)
macro(ConI)
@@ -100,6 +103,7 @@ macro(CosD)
macro(CountedLoop)
macro(CountedLoopEnd)
macro(CreateEx)
+macro(DecodeN)
macro(DivD)
macro(DivF)
macro(DivI)
@@ -107,6 +111,7 @@ macro(DivL)
macro(DivMod)
macro(DivModI)
macro(DivModL)
+macro(EncodeP)
macro(ExpD)
macro(FastLock)
macro(FastUnlock)
@@ -133,6 +138,7 @@ macro(LoadL_unaligned)
macro(LoadPLocked)
macro(LoadLLocked)
macro(LoadP)
+macro(LoadN)
macro(LoadRange)
macro(LoadS)
macro(Lock)
@@ -201,6 +207,7 @@ macro(StoreF)
macro(StoreI)
macro(StoreL)
macro(StoreP)
+macro(StoreN)
macro(StrComp)
macro(SubD)
macro(SubF)
diff --git a/src/share/vm/opto/compile.cpp b/src/share/vm/opto/compile.cpp
index 874235175..1e3c9fb43 100644
--- a/src/share/vm/opto/compile.cpp
+++ b/src/share/vm/opto/compile.cpp
@@ -1031,6 +1031,10 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset, ta->instance_id());
}
// Arrays of known objects become arrays of unknown objects.
+ if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
+ const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
+ tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset, ta->instance_id());
+ }
if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset, ta->instance_id());
@@ -1069,7 +1073,7 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
}
// Canonicalize the holder of this field
ciInstanceKlass *k = to->klass()->as_instance_klass();
- if (offset >= 0 && offset < oopDesc::header_size() * wordSize) {
+ if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
// First handle header references such as a LoadKlassNode, even if the
// object's klass is unloaded at compile time (4965979).
tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset, to->instance_id());
@@ -1310,7 +1314,7 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr
// Check for final instance fields.
const TypeInstPtr* tinst = flat->isa_instptr();
- if (tinst && tinst->offset() >= oopDesc::header_size() * wordSize) {
+ if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
ciInstanceKlass *k = tinst->klass()->as_instance_klass();
ciField* field = k->get_field_by_offset(tinst->offset(), false);
// Set field() and is_rewritable() attributes.
@@ -1731,6 +1735,8 @@ void Compile::dump_asm(int *pcs, uint pc_limit) {
starts_bundle = '+';
}
+ if (WizardMode) n->dump();
+
if( !n->is_Region() && // Dont print in the Assembly
!n->is_Phi() && // a few noisely useless nodes
!n->is_Proj() &&
@@ -1755,6 +1761,8 @@ void Compile::dump_asm(int *pcs, uint pc_limit) {
// then back up and print it
if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
assert(delay != NULL, "no unconditional delay instruction");
+ if (WizardMode) delay->dump();
+
if (node_bundling(delay)->starts_bundle())
starts_bundle = '+';
if (pcs && n->_idx < pc_limit)
@@ -1819,7 +1827,7 @@ struct Final_Reshape_Counts : public StackObj {
static bool oop_offset_is_sane(const TypeInstPtr* tp) {
ciInstanceKlass *k = tp->klass()->as_instance_klass();
// Make sure the offset goes inside the instance layout.
- return (uint)tp->offset() < (uint)(oopDesc::header_size() + k->nonstatic_field_size())*wordSize;
+ return k->contains_field_offset(tp->offset());
// Note that OffsetBot and OffsetTop are very negative.
}
@@ -1946,7 +1954,9 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
case Op_CompareAndSwapI:
case Op_CompareAndSwapL:
case Op_CompareAndSwapP:
+ case Op_CompareAndSwapN:
case Op_StoreP:
+ case Op_StoreN:
case Op_LoadB:
case Op_LoadC:
case Op_LoadI:
@@ -1956,6 +1966,7 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
case Op_LoadPLocked:
case Op_LoadLLocked:
case Op_LoadP:
+ case Op_LoadN:
case Op_LoadRange:
case Op_LoadS: {
handle_mem:
diff --git a/src/share/vm/opto/connode.cpp b/src/share/vm/opto/connode.cpp
index de3eed6f0..e0b728663 100644
--- a/src/share/vm/opto/connode.cpp
+++ b/src/share/vm/opto/connode.cpp
@@ -35,6 +35,7 @@ uint ConNode::hash() const {
//------------------------------make-------------------------------------------
ConNode *ConNode::make( Compile* C, const Type *t ) {
+ if (t->isa_narrowoop()) return new (C, 1) ConNNode( t->is_narrowoop() );
switch( t->basic_type() ) {
case T_INT: return new (C, 1) ConINode( t->is_int() );
case T_ARRAY: return new (C, 1) ConPNode( t->is_aryptr() );
@@ -461,7 +462,8 @@ static bool can_cause_alias(Node *n, PhaseTransform *phase) {
possible_alias = n->is_Phi() ||
opc == Op_CheckCastPP ||
opc == Op_StorePConditional ||
- opc == Op_CompareAndSwapP;
+ opc == Op_CompareAndSwapP ||
+ opc == Op_CompareAndSwapN;
}
return possible_alias;
}
@@ -549,6 +551,41 @@ Node *CheckCastPPNode::Ideal(PhaseGVN *phase, bool can_reshape){
return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
}
+
+Node* DecodeNNode::Identity(PhaseTransform* phase) {
+ const Type *t = phase->type( in(1) );
+ if( t == Type::TOP ) return in(1);
+
+ if (in(1)->Opcode() == Op_EncodeP) {
+ // (DecodeN (EncodeP p)) -> p
+ return in(1)->in(1);
+ }
+ return this;
+}
+
+Node* EncodePNode::Identity(PhaseTransform* phase) {
+ const Type *t = phase->type( in(1) );
+ if( t == Type::TOP ) return in(1);
+
+ if (in(1)->Opcode() == Op_DecodeN) {
+ // (EncodeP (DecodeN p)) -> p
+ return in(1)->in(1);
+ }
+ return this;
+}
+
+
+Node* EncodePNode::encode(PhaseGVN* phase, Node* value) {
+ const Type* newtype = value->bottom_type();
+ if (newtype == TypePtr::NULL_PTR) {
+ return phase->transform(new (phase->C, 1) ConNNode(TypeNarrowOop::NULL_PTR));
+ } else {
+ return phase->transform(new (phase->C, 2) EncodePNode(value,
+ newtype->is_oopptr()->make_narrowoop()));
+ }
+}
+
+
//=============================================================================
//------------------------------Identity---------------------------------------
Node *Conv2BNode::Identity( PhaseTransform *phase ) {
diff --git a/src/share/vm/opto/connode.hpp b/src/share/vm/opto/connode.hpp
index 1c1b96a19..63204ce6b 100644
--- a/src/share/vm/opto/connode.hpp
+++ b/src/share/vm/opto/connode.hpp
@@ -78,6 +78,20 @@ public:
};
+//------------------------------ConNNode--------------------------------------
+// Simple narrow oop constants
+class ConNNode : public ConNode {
+public:
+ ConNNode( const TypeNarrowOop *t ) : ConNode(t) {}
+ virtual int Opcode() const;
+
+ static ConNNode* make( Compile *C, ciObject* con ) {
+ return new (C, 1) ConNNode( TypeNarrowOop::make_from_constant(con) );
+ }
+
+};
+
+
//------------------------------ConLNode---------------------------------------
// Simple long constants
class ConLNode : public ConNode {
@@ -254,6 +268,41 @@ public:
//virtual Node *Ideal_DU_postCCP( PhaseCCP * );
};
+
+//------------------------------EncodeP--------------------------------
+// Encodes an oop pointers into its compressed form
+// Takes an extra argument which is the real heap base as a long which
+// may be useful for code generation in the backend.
+class EncodePNode : public TypeNode {
+ public:
+ EncodePNode(Node* value, const Type* type):
+ TypeNode(type, 2) {
+ init_req(0, NULL);
+ init_req(1, value);
+ }
+ virtual int Opcode() const;
+ virtual Node *Identity( PhaseTransform *phase );
+ virtual uint ideal_reg() const { return Op_RegN; }
+
+ static Node* encode(PhaseGVN* phase, Node* value);
+};
+
+//------------------------------DecodeN--------------------------------
+// Converts a narrow oop into a real oop ptr.
+// Takes an extra argument which is the real heap base as a long which
+// may be useful for code generation in the backend.
+class DecodeNNode : public TypeNode {
+ public:
+ DecodeNNode(Node* value, const Type* type):
+ TypeNode(type, 2) {
+ init_req(0, NULL);
+ init_req(1, value);
+ }
+ virtual int Opcode() const;
+ virtual Node *Identity( PhaseTransform *phase );
+ virtual uint ideal_reg() const { return Op_RegP; }
+};
+
//------------------------------Conv2BNode-------------------------------------
// Convert int/pointer to a Boolean. Map zero to zero, all else to 1.
class Conv2BNode : public Node {
diff --git a/src/share/vm/opto/escape.cpp b/src/share/vm/opto/escape.cpp
index 301be9aaa..ea5f14f33 100644
--- a/src/share/vm/opto/escape.cpp
+++ b/src/share/vm/opto/escape.cpp
@@ -1749,15 +1749,28 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
break;
}
+ case Op_ConN:
+ {
+ // assume all narrow oop constants globally escape except for null
+ PointsToNode::EscapeState es;
+ if (phase->type(n) == TypeNarrowOop::NULL_PTR)
+ es = PointsToNode::NoEscape;
+ else
+ es = PointsToNode::GlobalEscape;
+
+ add_node(n, PointsToNode::JavaObject, es, true);
+ break;
+ }
case Op_LoadKlass:
{
add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
break;
}
case Op_LoadP:
+ case Op_LoadN:
{
const Type *t = phase->type(n);
- if (t->isa_ptr() == NULL) {
+ if (!t->isa_narrowoop() && t->isa_ptr() == NULL) {
_processed.set(n->_idx);
return;
}
@@ -1847,8 +1860,12 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
break;
}
case Op_StoreP:
+ case Op_StoreN:
{
const Type *adr_type = phase->type(n->in(MemNode::Address));
+ if (adr_type->isa_narrowoop()) {
+ adr_type = adr_type->is_narrowoop()->make_oopptr();
+ }
if (adr_type->isa_oopptr()) {
add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
} else {
@@ -1870,8 +1887,12 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
}
case Op_StorePConditional:
case Op_CompareAndSwapP:
+ case Op_CompareAndSwapN:
{
const Type *adr_type = phase->type(n->in(MemNode::Address));
+ if (adr_type->isa_narrowoop()) {
+ adr_type = adr_type->is_narrowoop()->make_oopptr();
+ }
if (adr_type->isa_oopptr()) {
add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
} else {
@@ -1927,6 +1948,8 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
}
case Op_CastPP:
case Op_CheckCastPP:
+ case Op_EncodeP:
+ case Op_DecodeN:
{
int ti = n->in(1)->_idx;
if (_nodes->adr_at(ti)->node_type() == PointsToNode::JavaObject) {
diff --git a/src/share/vm/opto/graphKit.cpp b/src/share/vm/opto/graphKit.cpp
index d44caf2e3..49e05b842 100644
--- a/src/share/vm/opto/graphKit.cpp
+++ b/src/share/vm/opto/graphKit.cpp
@@ -1328,7 +1328,7 @@ Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
if (require_atomic_access && bt == T_LONG) {
ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t);
} else {
- ld = LoadNode::make(C, ctl, mem, adr, adr_type, t, bt);
+ ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt);
}
return _gvn.transform(ld);
}
@@ -1344,7 +1344,7 @@ Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
if (require_atomic_access && bt == T_LONG) {
st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val);
} else {
- st = StoreNode::make(C, ctl, mem, adr, adr_type, val, bt);
+ st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt);
}
st = _gvn.transform(st);
set_memory(st, adr_idx);
diff --git a/src/share/vm/opto/idealKit.cpp b/src/share/vm/opto/idealKit.cpp
index ae65319f0..437fc105a 100644
--- a/src/share/vm/opto/idealKit.cpp
+++ b/src/share/vm/opto/idealKit.cpp
@@ -345,7 +345,7 @@ Node* IdealKit::load(Node* ctl,
if (require_atomic_access && bt == T_LONG) {
ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t);
} else {
- ld = LoadNode::make(C, ctl, mem, adr, adr_type, t, bt);
+ ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt);
}
return transform(ld);
}
@@ -361,7 +361,7 @@ Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt,
if (require_atomic_access && bt == T_LONG) {
st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val);
} else {
- st = StoreNode::make(C, ctl, mem, adr, adr_type, val, bt);
+ st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt);
}
st = transform(st);
set_memory(st, adr_idx);
diff --git a/src/share/vm/opto/lcm.cpp b/src/share/vm/opto/lcm.cpp
index 5b0ddae99..7aa32b524 100644
--- a/src/share/vm/opto/lcm.cpp
+++ b/src/share/vm/opto/lcm.cpp
@@ -110,6 +110,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
case Op_LoadI:
case Op_LoadL:
case Op_LoadP:
+ case Op_LoadN:
case Op_LoadS:
case Op_LoadKlass:
case Op_LoadRange:
@@ -124,6 +125,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
case Op_StoreI:
case Op_StoreL:
case Op_StoreP:
+ case Op_StoreN:
was_store = true; // Memory op is a store op
// Stores will have their address in slot 2 (memory in slot 1).
// If the value being nul-checked is in another slot, it means we
diff --git a/src/share/vm/opto/library_call.cpp b/src/share/vm/opto/library_call.cpp
index b0587edfc..2e7688ed9 100644
--- a/src/share/vm/opto/library_call.cpp
+++ b/src/share/vm/opto/library_call.cpp
@@ -1847,7 +1847,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
// See if it is a narrow oop array.
if (adr_type->isa_aryptr()) {
- if (adr_type->offset() >= objArrayOopDesc::header_size() * wordSize) {
+ if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes(type)) {
const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
if (elem_type != NULL) {
sharpened_klass = elem_type->klass();
@@ -2164,10 +2164,19 @@ bool LibraryCallKit::inline_unsafe_CAS(BasicType type) {
cas = _gvn.transform(new (C, 5) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
break;
case T_OBJECT:
- // reference stores need a store barrier.
+ // reference stores need a store barrier.
// (They don't if CAS fails, but it isn't worth checking.)
pre_barrier(control(), base, adr, alias_idx, newval, value_type, T_OBJECT);
- cas = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
+#ifdef _LP64
+ if (adr->bottom_type()->is_narrow()) {
+ cas = _gvn.transform(new (C, 5) CompareAndSwapNNode(control(), mem, adr,
+ EncodePNode::encode(&_gvn, newval),
+ EncodePNode::encode(&_gvn, oldval)));
+ } else
+#endif
+ {
+ cas = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
+ }
post_barrier(control(), cas, base, adr, alias_idx, newval, T_OBJECT, true);
break;
default:
@@ -3824,7 +3833,15 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
Node* size = _gvn.transform(alloc_siz);
// Exclude the header.
- int base_off = sizeof(oopDesc);
+ int base_off = instanceOopDesc::base_offset_in_bytes();
+ if (UseCompressedOops) {
+ // copy the header gap though.
+ Node* sptr = basic_plus_adr(src, base_off);
+ Node* dptr = basic_plus_adr(dest, base_off);
+ Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, raw_adr_type);
+ store_to_memory(control(), dptr, sval, T_INT, raw_adr_type);
+ base_off += sizeof(int);
+ }
src = basic_plus_adr(src, base_off);
dest = basic_plus_adr(dest, base_off);
end = basic_plus_adr(end, size);
@@ -4389,7 +4406,7 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
// Let's see if we need card marks:
if (alloc != NULL && use_ReduceInitialCardMarks()) {
// If we do not need card marks, copy using the jint or jlong stub.
- copy_type = LP64_ONLY(T_LONG) NOT_LP64(T_INT);
+ copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
"sizes agree");
}
@@ -4715,23 +4732,25 @@ LibraryCallKit::generate_clear_array(const TypePtr* adr_type,
int to_clear = (bump_bit | clear_low);
// Align up mod 8, then store a jint zero unconditionally
// just before the mod-8 boundary.
- // This would only fail if the first array element were immediately
- // after the length field, and were also at an even offset mod 8.
- assert(((abase + bump_bit) & ~to_clear) - BytesPerInt
- >= arrayOopDesc::length_offset_in_bytes() + BytesPerInt,
- "store must not trash length field");
-
- // Bump 'start' up to (or past) the next jint boundary:
- start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(bump_bit)) );
+ if (((abase + bump_bit) & ~to_clear) - bump_bit
+ < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) {
+ bump_bit = 0;
+ assert((abase & to_clear) == 0, "array base must be long-aligned");
+ } else {
+ // Bump 'start' up to (or past) the next jint boundary:
+ start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(bump_bit)) );
+ assert((abase & clear_low) == 0, "array base must be int-aligned");
+ }
// Round bumped 'start' down to jlong boundary in body of array.
start = _gvn.transform( new(C,3) AndXNode(start, MakeConX(~to_clear)) );
- // Store a zero to the immediately preceding jint:
- Node* x1 = _gvn.transform( new(C,3) AddXNode(start, MakeConX(-BytesPerInt)) );
- Node* p1 = basic_plus_adr(dest, x1);
- mem = StoreNode::make(C, control(), mem, p1, adr_type, intcon(0), T_INT);
- mem = _gvn.transform(mem);
+ if (bump_bit != 0) {
+ // Store a zero to the immediately preceding jint:
+ Node* x1 = _gvn.transform( new(C,3) AddXNode(start, MakeConX(-bump_bit)) );
+ Node* p1 = basic_plus_adr(dest, x1);
+ mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT);
+ mem = _gvn.transform(mem);
+ }
}
-
Node* end = dest_size; // pre-rounded
mem = ClearArrayNode::clear_memory(control(), mem, dest,
start, end, &_gvn);
diff --git a/src/share/vm/opto/loopTransform.cpp b/src/share/vm/opto/loopTransform.cpp
index de54863b5..780766fb8 100644
--- a/src/share/vm/opto/loopTransform.cpp
+++ b/src/share/vm/opto/loopTransform.cpp
@@ -1513,7 +1513,8 @@ void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) {
(bol->in(1)->Opcode() == Op_StoreLConditional ) ||
(bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
(bol->in(1)->Opcode() == Op_CompareAndSwapL ) ||
- (bol->in(1)->Opcode() == Op_CompareAndSwapP )))
+ (bol->in(1)->Opcode() == Op_CompareAndSwapP ) ||
+ (bol->in(1)->Opcode() == Op_CompareAndSwapN )))
return; // Allocation loops RARELY take backedge
// Find the OTHER exit path from the IF
Node* ex = iff->proj_out(1-test_con);
diff --git a/src/share/vm/opto/machnode.cpp b/src/share/vm/opto/machnode.cpp
index 8b88f00b5..70815a280 100644
--- a/src/share/vm/opto/machnode.cpp
+++ b/src/share/vm/opto/machnode.cpp
@@ -263,6 +263,13 @@ const Node* MachNode::get_base_and_disp(intptr_t &offset, const TypePtr* &adr_ty
// See if it adds up to a base + offset.
if (index != NULL) {
if (!index->is_Con()) {
+ const TypeNarrowOop* narrowoop = index->bottom_type()->isa_narrowoop();
+ if (narrowoop != NULL) {
+ // Memory references through narrow oops have a
+ // funny base so grab the type from the index.
+ adr_type = narrowoop->make_oopptr();
+ return NULL;
+ }
disp = Type::OffsetBot;
} else if (disp != Type::OffsetBot) {
const TypeX* ti = index->bottom_type()->isa_intptr_t();
diff --git a/src/share/vm/opto/macro.cpp b/src/share/vm/opto/macro.cpp
index bc785ab35..8c9934f11 100644
--- a/src/share/vm/opto/macro.cpp
+++ b/src/share/vm/opto/macro.cpp
@@ -819,7 +819,7 @@ void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_ad
Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) {
Node* adr = basic_plus_adr(base, offset);
const TypePtr* adr_type = TypeRawPtr::BOTTOM;
- Node* value = LoadNode::make(C, ctl, mem, adr, adr_type, value_type, bt);
+ Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt);
transform_later(value);
return value;
}
@@ -827,7 +827,7 @@ Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset,
Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) {
Node* adr = basic_plus_adr(base, offset);
- mem = StoreNode::make(C, ctl, mem, adr, NULL, value, bt);
+ mem = StoreNode::make(_igvn, ctl, mem, adr, NULL, value, bt);
transform_later(mem);
return mem;
}
@@ -1270,6 +1270,13 @@ PhaseMacroExpand::initialize_object(AllocateNode* alloc,
mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
}
rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
+
+ if (UseCompressedOops) {
+ Node *zeronode = makecon(TypeInt::ZERO);
+ // store uncompressed 0 into klass ptr to zero out gap. The gap is
+ // used for primitive fields and has to be zeroed.
+ rawmem = make_store(control, rawmem, object, oopDesc::klass_gap_offset_in_bytes(), zeronode, T_INT);
+ }
rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_OBJECT);
int header_size = alloc->minimum_header_size(); // conservatively small
@@ -1277,7 +1284,7 @@ PhaseMacroExpand::initialize_object(AllocateNode* alloc,
if (length != NULL) { // Arrays need length field
rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
// conservatively small header size:
- header_size = sizeof(arrayOopDesc);
+ header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass();
if (k->is_array_klass()) // we know the exact header size in most cases:
header_size = Klass::layout_helper_header_size(k->layout_helper());
@@ -1306,7 +1313,6 @@ PhaseMacroExpand::initialize_object(AllocateNode* alloc,
rawmem = init->complete_stores(control, rawmem, object,
header_size, size_in_bytes, &_igvn);
}
-
// We have no more use for this link, since the AllocateNode goes away:
init->set_req(InitializeNode::RawAddress, top());
// (If we keep the link, it just confuses the register allocator,
@@ -1705,6 +1711,8 @@ bool PhaseMacroExpand::expand_macro_nodes() {
assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
if (C->failing()) return true;
}
+
+ _igvn.set_delay_transform(false);
_igvn.optimize();
return false;
}
diff --git a/src/share/vm/opto/macro.hpp b/src/share/vm/opto/macro.hpp
index 47e30ef8f..06e28119d 100644
--- a/src/share/vm/opto/macro.hpp
+++ b/src/share/vm/opto/macro.hpp
@@ -110,7 +110,9 @@ private:
Node* length);
public:
- PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn) {}
+ PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn) {
+ _igvn.set_delay_transform(true);
+ }
bool expand_macro_nodes();
};
diff --git a/src/share/vm/opto/matcher.cpp b/src/share/vm/opto/matcher.cpp
index 7d9cd51ef..0374d7c7a 100644
--- a/src/share/vm/opto/matcher.cpp
+++ b/src/share/vm/opto/matcher.cpp
@@ -30,7 +30,7 @@ OptoReg::Name OptoReg::c_frame_pointer;
const int Matcher::base2reg[Type::lastype] = {
- Node::NotAMachineReg,0,0, Op_RegI, Op_RegL, 0,
+ Node::NotAMachineReg,0,0, Op_RegI, Op_RegL, 0, Op_RegN,
Node::NotAMachineReg, Node::NotAMachineReg, /* tuple, array */
Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, /* the pointers */
0, 0/*abio*/,
@@ -70,12 +70,14 @@ Matcher::Matcher( Node_List &proj_list ) :
C->set_matcher(this);
idealreg2spillmask[Op_RegI] = NULL;
+ idealreg2spillmask[Op_RegN] = NULL;
idealreg2spillmask[Op_RegL] = NULL;
idealreg2spillmask[Op_RegF] = NULL;
idealreg2spillmask[Op_RegD] = NULL;
idealreg2spillmask[Op_RegP] = NULL;
idealreg2debugmask[Op_RegI] = NULL;
+ idealreg2debugmask[Op_RegN] = NULL;
idealreg2debugmask[Op_RegL] = NULL;
idealreg2debugmask[Op_RegF] = NULL;
idealreg2debugmask[Op_RegD] = NULL;
@@ -366,17 +368,19 @@ static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
void Matcher::init_first_stack_mask() {
// Allocate storage for spill masks as masks for the appropriate load type.
- RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask)*10);
- idealreg2spillmask[Op_RegI] = &rms[0];
- idealreg2spillmask[Op_RegL] = &rms[1];
- idealreg2spillmask[Op_RegF] = &rms[2];
- idealreg2spillmask[Op_RegD] = &rms[3];
- idealreg2spillmask[Op_RegP] = &rms[4];
- idealreg2debugmask[Op_RegI] = &rms[5];
- idealreg2debugmask[Op_RegL] = &rms[6];
- idealreg2debugmask[Op_RegF] = &rms[7];
- idealreg2debugmask[Op_RegD] = &rms[8];
- idealreg2debugmask[Op_RegP] = &rms[9];
+ RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask)*12);
+ idealreg2spillmask[Op_RegN] = &rms[0];
+ idealreg2spillmask[Op_RegI] = &rms[1];
+ idealreg2spillmask[Op_RegL] = &rms[2];
+ idealreg2spillmask[Op_RegF] = &rms[3];
+ idealreg2spillmask[Op_RegD] = &rms[4];
+ idealreg2spillmask[Op_RegP] = &rms[5];
+ idealreg2debugmask[Op_RegN] = &rms[6];
+ idealreg2debugmask[Op_RegI] = &rms[7];
+ idealreg2debugmask[Op_RegL] = &rms[8];
+ idealreg2debugmask[Op_RegF] = &rms[9];
+ idealreg2debugmask[Op_RegD] = &rms[10];
+ idealreg2debugmask[Op_RegP] = &rms[11];
OptoReg::Name i;
@@ -399,6 +403,10 @@ void Matcher::init_first_stack_mask() {
C->FIRST_STACK_mask().set_AllStack();
// Make spill masks. Registers for their class, plus FIRST_STACK_mask.
+#ifdef _LP64
+ *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
+ idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
+#endif
*idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
*idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
@@ -413,6 +421,7 @@ void Matcher::init_first_stack_mask() {
// Make up debug masks. Any spill slot plus callee-save registers.
// Caller-save registers are assumed to be trashable by the various
// inline-cache fixup routines.
+ *idealreg2debugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
*idealreg2debugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
*idealreg2debugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
*idealreg2debugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
@@ -428,6 +437,7 @@ void Matcher::init_first_stack_mask() {
if( _register_save_policy[i] == 'C' ||
_register_save_policy[i] == 'A' ||
(_register_save_policy[i] == 'E' && exclude_soe) ) {
+ idealreg2debugmask[Op_RegN]->Remove(i);
idealreg2debugmask[Op_RegI]->Remove(i); // Exclude save-on-call
idealreg2debugmask[Op_RegL]->Remove(i); // registers from debug
idealreg2debugmask[Op_RegF]->Remove(i); // masks
@@ -661,6 +671,9 @@ void Matcher::init_spill_mask( Node *ret ) {
set_shared(fp);
// Compute generic short-offset Loads
+#ifdef _LP64
+ MachNode *spillCP = match_tree(new (C, 3) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM));
+#endif
MachNode *spillI = match_tree(new (C, 3) LoadINode(NULL,mem,fp,atp));
MachNode *spillL = match_tree(new (C, 3) LoadLNode(NULL,mem,fp,atp));
MachNode *spillF = match_tree(new (C, 3) LoadFNode(NULL,mem,fp,atp));
@@ -670,6 +683,9 @@ void Matcher::init_spill_mask( Node *ret ) {
spillD != NULL && spillP != NULL, "");
// Get the ADLC notion of the right regmask, for each basic type.
+#ifdef _LP64
+ idealreg2regmask[Op_RegN] = &spillCP->out_RegMask();
+#endif
idealreg2regmask[Op_RegI] = &spillI->out_RegMask();
idealreg2regmask[Op_RegL] = &spillL->out_RegMask();
idealreg2regmask[Op_RegF] = &spillF->out_RegMask();
@@ -1227,6 +1243,13 @@ static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool s
if( j == max_scan ) // No post-domination before scan end?
return true; // Then break the match tree up
}
+
+ if (m->Opcode() == Op_DecodeN && m->outcnt() == 2) {
+ // These are commonly used in address expressions and can
+ // efficiently fold into them in some cases but because they are
+ // consumed by AddP they commonly have two users.
+ if (m->raw_out(0) == m->raw_out(1) && m->raw_out(0)->Opcode() == Op_AddP) return false;
+ }
}
// Not forceably cloning. If shared, put it into a register.
@@ -1714,6 +1737,7 @@ void Matcher::find_shared( Node *n ) {
case Op_StoreI:
case Op_StoreL:
case Op_StoreP:
+ case Op_StoreN:
case Op_Store16B:
case Op_Store8B:
case Op_Store4B:
@@ -1739,6 +1763,7 @@ void Matcher::find_shared( Node *n ) {
case Op_LoadL:
case Op_LoadS:
case Op_LoadP:
+ case Op_LoadN:
case Op_LoadRange:
case Op_LoadD_unaligned:
case Op_LoadL_unaligned:
@@ -1853,7 +1878,8 @@ void Matcher::find_shared( Node *n ) {
case Op_StoreLConditional:
case Op_CompareAndSwapI:
case Op_CompareAndSwapL:
- case Op_CompareAndSwapP: { // Convert trinary to binary-tree
+ case Op_CompareAndSwapP:
+ case Op_CompareAndSwapN: { // Convert trinary to binary-tree
Node *newval = n->in(MemNode::ValueIn );
Node *oldval = n->in(LoadStoreNode::ExpectedIn);
Node *pair = new (C, 3) BinaryNode( oldval, newval );
@@ -1905,22 +1931,25 @@ void Matcher::collect_null_checks( Node *proj ) {
// During matching If's have Bool & Cmp side-by-side
BoolNode *b = iff->in(1)->as_Bool();
Node *cmp = iff->in(2);
- if( cmp->Opcode() == Op_CmpP ) {
- if( cmp->in(2)->bottom_type() == TypePtr::NULL_PTR ) {
-
- if( proj->Opcode() == Op_IfTrue ) {
- extern int all_null_checks_found;
- all_null_checks_found++;
- if( b->_test._test == BoolTest::ne ) {
- _null_check_tests.push(proj);
- _null_check_tests.push(cmp->in(1));
- }
- } else {
- assert( proj->Opcode() == Op_IfFalse, "" );
- if( b->_test._test == BoolTest::eq ) {
- _null_check_tests.push(proj);
- _null_check_tests.push(cmp->in(1));
- }
+ int opc = cmp->Opcode();
+ if (opc != Op_CmpP && opc != Op_CmpN) return;
+
+ const Type* ct = cmp->in(2)->bottom_type();
+ if (ct == TypePtr::NULL_PTR ||
+ (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
+
+ if( proj->Opcode() == Op_IfTrue ) {
+ extern int all_null_checks_found;
+ all_null_checks_found++;
+ if( b->_test._test == BoolTest::ne ) {
+ _null_check_tests.push(proj);
+ _null_check_tests.push(cmp->in(1));
+ }
+ } else {
+ assert( proj->Opcode() == Op_IfFalse, "" );
+ if( b->_test._test == BoolTest::eq ) {
+ _null_check_tests.push(proj);
+ _null_check_tests.push(cmp->in(1));
}
}
}
@@ -2038,6 +2067,7 @@ bool Matcher::post_store_load_barrier(const Node *vmb) {
xop == Op_FastLock ||
xop == Op_CompareAndSwapL ||
xop == Op_CompareAndSwapP ||
+ xop == Op_CompareAndSwapN ||
xop == Op_CompareAndSwapI)
return true;
diff --git a/src/share/vm/opto/memnode.cpp b/src/share/vm/opto/memnode.cpp
index df47ccb0e..bb9790d95 100644
--- a/src/share/vm/opto/memnode.cpp
+++ b/src/share/vm/opto/memnode.cpp
@@ -549,6 +549,10 @@ Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
adr = adr->in(AddPNode::Base);
continue;
+ case Op_DecodeN: // No change to NULL-ness, so peek thru
+ adr = adr->in(1);
+ continue;
+
case Op_CastPP:
// If the CastPP is useless, just peek on through it.
if( ccp->type(adr) == ccp->type(adr->in(1)) ) {
@@ -605,6 +609,7 @@ Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
case Op_CastX2P: // no null checks on native pointers
case Op_Parm: // 'this' pointer is not null
case Op_LoadP: // Loading from within a klass
+ case Op_LoadN: // Loading from within a klass
case Op_LoadKlass: // Loading from within a klass
case Op_ConP: // Loading from a klass
case Op_CreateEx: // Sucking up the guts of an exception oop
@@ -669,7 +674,9 @@ void LoadNode::dump_spec(outputStream *st) const {
//----------------------------LoadNode::make-----------------------------------
// Polymorphic factory method:
-LoadNode *LoadNode::make( Compile *C, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) {
+Node *LoadNode::make( PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) {
+ Compile* C = gvn.C;
+
// sanity check the alias category against the created node type
assert(!(adr_type->isa_oopptr() &&
adr_type->offset() == oopDesc::klass_offset_in_bytes()),
@@ -687,7 +694,25 @@ LoadNode *LoadNode::make( Compile *C, Node *ctl, Node *mem, Node *adr, const Typ
case T_FLOAT: return new (C, 3) LoadFNode(ctl, mem, adr, adr_type, rt );
case T_DOUBLE: return new (C, 3) LoadDNode(ctl, mem, adr, adr_type, rt );
case T_ADDRESS: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr() );
- case T_OBJECT: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr());
+ case T_OBJECT:
+#ifdef _LP64
+ if (adr->bottom_type()->is_narrow()) {
+ const TypeNarrowOop* narrowtype;
+ if (rt->isa_narrowoop()) {
+ narrowtype = rt->is_narrowoop();
+ rt = narrowtype->make_oopptr();
+ } else {
+ narrowtype = rt->is_oopptr()->make_narrowoop();
+ }
+ Node* load = gvn.transform(new (C, 3) LoadNNode(ctl, mem, adr, adr_type, narrowtype));
+
+ return new (C, 2) DecodeNNode(load, rt);
+ } else
+#endif
+ {
+ assert(!adr->bottom_type()->is_narrow(), "should have got back a narrow oop");
+ return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr());
+ }
}
ShouldNotReachHere();
return (LoadNode*)NULL;
@@ -1743,7 +1768,9 @@ Node* LoadRangeNode::Identity( PhaseTransform *phase ) {
//=============================================================================
//---------------------------StoreNode::make-----------------------------------
// Polymorphic factory method:
-StoreNode* StoreNode::make( Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) {
+StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) {
+ Compile* C = gvn.C;
+
switch (bt) {
case T_BOOLEAN:
case T_BYTE: return new (C, 4) StoreBNode(ctl, mem, adr, adr_type, val);
@@ -1754,7 +1781,27 @@ StoreNode* StoreNode::make( Compile *C, Node* ctl, Node* mem, Node* adr, const T
case T_FLOAT: return new (C, 4) StoreFNode(ctl, mem, adr, adr_type, val);
case T_DOUBLE: return new (C, 4) StoreDNode(ctl, mem, adr, adr_type, val);
case T_ADDRESS:
- case T_OBJECT: return new (C, 4) StorePNode(ctl, mem, adr, adr_type, val);
+ case T_OBJECT:
+#ifdef _LP64
+ if (adr->bottom_type()->is_narrow() ||
+ (UseCompressedOops && val->bottom_type()->isa_klassptr() &&
+ adr->bottom_type()->isa_rawptr())) {
+ const TypePtr* type = val->bottom_type()->is_ptr();
+ Node* cp;
+ if (type->isa_oopptr()) {
+ const TypeNarrowOop* etype = type->is_oopptr()->make_narrowoop();
+ cp = gvn.transform(new (C, 2) EncodePNode(val, etype));
+ } else if (type == TypePtr::NULL_PTR) {
+ cp = gvn.transform(new (C, 1) ConNNode(TypeNarrowOop::NULL_PTR));
+ } else {
+ ShouldNotReachHere();
+ }
+ return new (C, 4) StoreNNode(ctl, mem, adr, adr_type, cp);
+ } else
+#endif
+ {
+ return new (C, 4) StorePNode(ctl, mem, adr, adr_type, val);
+ }
}
ShouldNotReachHere();
return (StoreNode*)NULL;
@@ -2136,7 +2183,7 @@ Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
Node* adr = new (C, 4) AddPNode(dest, dest, phase->MakeConX(offset));
adr = phase->transform(adr);
const TypePtr* atp = TypeRawPtr::BOTTOM;
- mem = StoreNode::make(C, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
+ mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
mem = phase->transform(mem);
offset += BytesPerInt;
}
@@ -2199,7 +2246,7 @@ Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
Node* adr = new (C, 4) AddPNode(dest, dest, phase->MakeConX(done_offset));
adr = phase->transform(adr);
const TypePtr* atp = TypeRawPtr::BOTTOM;
- mem = StoreNode::make(C, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
+ mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
mem = phase->transform(mem);
done_offset += BytesPerInt;
}
@@ -2556,9 +2603,7 @@ int InitializeNode::captured_store_insertion_point(intptr_t start,
assert(allocation() != NULL, "must be present");
// no negatives, no header fields:
- if (start < (intptr_t) sizeof(oopDesc)) return FAIL;
- if (start < (intptr_t) sizeof(arrayOopDesc) &&
- start < (intptr_t) allocation()->minimum_header_size()) return FAIL;
+ if (start < (intptr_t) allocation()->minimum_header_size()) return FAIL;
// after a certain size, we bail out on tracking all the stores:
intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize);
@@ -2895,14 +2940,14 @@ InitializeNode::coalesce_subword_stores(intptr_t header_size,
if (!split) {
++new_long;
off[nst] = offset;
- st[nst++] = StoreNode::make(C, ctl, zmem, adr, atp,
+ st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
phase->longcon(con), T_LONG);
} else {
// Omit either if it is a zero.
if (con0 != 0) {
++new_int;
off[nst] = offset;
- st[nst++] = StoreNode::make(C, ctl, zmem, adr, atp,
+ st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
phase->intcon(con0), T_INT);
}
if (con1 != 0) {
@@ -2910,7 +2955,7 @@ InitializeNode::coalesce_subword_stores(intptr_t header_size,
offset += BytesPerInt;
adr = make_raw_address(offset, phase);
off[nst] = offset;
- st[nst++] = StoreNode::make(C, ctl, zmem, adr, atp,
+ st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
phase->intcon(con1), T_INT);
}
}
@@ -3018,9 +3063,10 @@ Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
Node* zmem = zero_memory(); // initially zero memory state
Node* inits = zmem; // accumulating a linearized chain of inits
#ifdef ASSERT
- intptr_t last_init_off = sizeof(oopDesc); // previous init offset
- intptr_t last_init_end = sizeof(oopDesc); // previous init offset+size
- intptr_t last_tile_end = sizeof(oopDesc); // previous tile offset+size
+ intptr_t first_offset = allocation()->minimum_header_size();
+ intptr_t last_init_off = first_offset; // previous init offset
+ intptr_t last_init_end = first_offset; // previous init offset+size
+ intptr_t last_tile_end = first_offset; // previous tile offset+size
#endif
intptr_t zeroes_done = header_size;
@@ -3155,7 +3201,8 @@ Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
bool InitializeNode::stores_are_sane(PhaseTransform* phase) {
if (is_complete())
return true; // stores could be anything at this point
- intptr_t last_off = sizeof(oopDesc);
+ assert(allocation() != NULL, "must be present");
+ intptr_t last_off = allocation()->minimum_header_size();
for (uint i = InitializeNode::RawStores; i < req(); i++) {
Node* st = in(i);
intptr_t st_off = get_store_offset(st, phase);
diff --git a/src/share/vm/opto/memnode.hpp b/src/share/vm/opto/memnode.hpp
index c32f9d6d3..0eb0da045 100644
--- a/src/share/vm/opto/memnode.hpp
+++ b/src/share/vm/opto/memnode.hpp
@@ -137,7 +137,8 @@ public:
}
// Polymorphic factory method:
- static LoadNode* make( Compile *C, Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, BasicType bt );
+ static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
+ const TypePtr* at, const Type *rt, BasicType bt );
virtual uint hash() const; // Check the type
@@ -330,6 +331,29 @@ public:
virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
};
+
+//------------------------------LoadNNode--------------------------------------
+// Load a narrow oop from memory (either object or array)
+class LoadNNode : public LoadNode {
+public:
+ LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t )
+ : LoadNode(c,mem,adr,at,t) {}
+ virtual int Opcode() const;
+ virtual uint ideal_reg() const { return Op_RegN; }
+ virtual int store_Opcode() const { return Op_StoreN; }
+ virtual BasicType memory_type() const { return T_NARROWOOP; }
+ // depends_only_on_test is almost always true, and needs to be almost always
+ // true to enable key hoisting & commoning optimizations. However, for the
+ // special case of RawPtr loads from TLS top & end, the control edge carries
+ // the dependence preventing hoisting past a Safepoint instead of the memory
+ // edge. (An unfortunate consequence of having Safepoints not set Raw
+ // Memory; itself an unfortunate consequence of having Nodes which produce
+ // results (new raw memory state) inside of loops preventing all manner of
+ // other optimizations). Basically, it's ugly but so is the alternative.
+ // See comment in macro.cpp, around line 125 expand_allocate_common().
+ virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
+};
+
//------------------------------LoadKlassNode----------------------------------
// Load a Klass from an object
class LoadKlassNode : public LoadPNode {
@@ -376,7 +400,8 @@ public:
}
// Polymorphic factory method:
- static StoreNode* make( Compile *C, Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, BasicType bt );
+ static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
+ const TypePtr* at, Node *val, BasicType bt );
virtual uint hash() const; // Check the type
@@ -488,6 +513,15 @@ public:
virtual BasicType memory_type() const { return T_ADDRESS; }
};
+//------------------------------StoreNNode-------------------------------------
+// Store narrow oop to memory
+class StoreNNode : public StoreNode {
+public:
+ StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
+ virtual int Opcode() const;
+ virtual BasicType memory_type() const { return T_NARROWOOP; }
+};
+
//------------------------------StoreCMNode-----------------------------------
// Store card-mark byte to memory for CM
// The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
@@ -600,6 +634,13 @@ public:
virtual int Opcode() const;
};
+//------------------------------CompareAndSwapNNode---------------------------
+class CompareAndSwapNNode : public LoadStoreNode {
+public:
+ CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
+ virtual int Opcode() const;
+};
+
//------------------------------ClearArray-------------------------------------
class ClearArrayNode: public Node {
public:
diff --git a/src/share/vm/opto/node.cpp b/src/share/vm/opto/node.cpp
index 636063405..bc62abba3 100644
--- a/src/share/vm/opto/node.cpp
+++ b/src/share/vm/opto/node.cpp
@@ -1169,6 +1169,12 @@ intptr_t Node::get_ptr() const {
return ((ConPNode*)this)->type()->is_ptr()->get_con();
}
+// Get a narrow oop constant from a ConNNode.
+intptr_t Node::get_narrowcon() const {
+ assert( Opcode() == Op_ConN, "" );
+ return ((ConNNode*)this)->type()->is_narrowoop()->get_con();
+}
+
// Get a long constant from a ConNode.
// Return a default value if there is no apparent constant here.
const TypeLong* Node::find_long_type() const {
diff --git a/src/share/vm/opto/node.hpp b/src/share/vm/opto/node.hpp
index 6dbd2a4cd..56800ae1d 100644
--- a/src/share/vm/opto/node.hpp
+++ b/src/share/vm/opto/node.hpp
@@ -917,6 +917,7 @@ public:
// These guys are called by code generated by ADLC:
intptr_t get_ptr() const;
+ intptr_t get_narrowcon() const;
jdouble getd() const;
jfloat getf() const;
diff --git a/src/share/vm/opto/opcodes.cpp b/src/share/vm/opto/opcodes.cpp
index 533cff06c..ddf5d4066 100644
--- a/src/share/vm/opto/opcodes.cpp
+++ b/src/share/vm/opto/opcodes.cpp
@@ -29,6 +29,7 @@
const char *NodeClassNames[] = {
"Node",
"Set",
+ "RegN",
"RegI",
"RegP",
"RegF",
diff --git a/src/share/vm/opto/opcodes.hpp b/src/share/vm/opto/opcodes.hpp
index 7c3e38a15..530f9e2cb 100644
--- a/src/share/vm/opto/opcodes.hpp
+++ b/src/share/vm/opto/opcodes.hpp
@@ -27,6 +27,7 @@
enum Opcodes {
Op_Node = 0,
macro(Set) // Instruction selection match rule
+ macro(RegN) // Machine narrow oop register
macro(RegI) // Machine integer register
macro(RegP) // Machine pointer register
macro(RegF) // Machine float register
diff --git a/src/share/vm/opto/parse2.cpp b/src/share/vm/opto/parse2.cpp
index d66a68741..e6eda3453 100644
--- a/src/share/vm/opto/parse2.cpp
+++ b/src/share/vm/opto/parse2.cpp
@@ -67,12 +67,16 @@ Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
const Type* elemtype = arytype->elem();
if (UseUniqueSubclasses && result2 != NULL) {
- const TypeInstPtr* toop = elemtype->isa_instptr();
+ const Type* el = elemtype;
+ if (elemtype->isa_narrowoop()) {
+ el = elemtype->is_narrowoop()->make_oopptr();
+ }
+ const TypeInstPtr* toop = el->isa_instptr();
if (toop) {
if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
// If we load from "AbstractClass[]" we must see "ConcreteSubClass".
const Type* subklass = Type::get_const_type(toop->klass());
- elemtype = subklass->join(elemtype);
+ elemtype = subklass->join(el);
}
}
}
diff --git a/src/share/vm/opto/parse3.cpp b/src/share/vm/opto/parse3.cpp
index cfd042959..3c0e0ff31 100644
--- a/src/share/vm/opto/parse3.cpp
+++ b/src/share/vm/opto/parse3.cpp
@@ -365,7 +365,7 @@ Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, in
const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
for (jint i = 0; i < length_con; i++) {
Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1);
- intptr_t offset = header + ((intptr_t)i << LogBytesPerWord);
+ intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
Node* eaddr = basic_plus_adr(array, offset);
store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT);
}
diff --git a/src/share/vm/opto/phaseX.cpp b/src/share/vm/opto/phaseX.cpp
index f462fe7d2..e58577451 100644
--- a/src/share/vm/opto/phaseX.cpp
+++ b/src/share/vm/opto/phaseX.cpp
@@ -744,20 +744,23 @@ void PhaseGVN::dead_loop_check( Node *n ) {
//=============================================================================
//------------------------------PhaseIterGVN-----------------------------------
// Initialize hash table to fresh and clean for +VerifyOpto
-PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ) : PhaseGVN(igvn,dummy), _worklist( ) {
+PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ) : PhaseGVN(igvn,dummy), _worklist( ),
+ _delay_transform(false) {
}
//------------------------------PhaseIterGVN-----------------------------------
// Initialize with previous PhaseIterGVN info; used by PhaseCCP
PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn ) : PhaseGVN(igvn),
- _worklist( igvn->_worklist )
+ _worklist( igvn->_worklist ),
+ _delay_transform(igvn->_delay_transform)
{
}
//------------------------------PhaseIterGVN-----------------------------------
// Initialize with previous PhaseGVN info from Parser
PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn),
- _worklist(*C->for_igvn())
+ _worklist(*C->for_igvn()),
+ _delay_transform(false)
{
uint max;
@@ -953,6 +956,12 @@ Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
//------------------------------transform--------------------------------------
// Non-recursive: idealize Node 'n' with respect to its inputs and its value
Node *PhaseIterGVN::transform( Node *n ) {
+ if (_delay_transform) {
+ // Register the node but don't optimize for now
+ register_new_node_with_optimizer(n);
+ return n;
+ }
+
// If brand new node, make space in type array, and give it a type.
ensure_type_or_null(n);
if (type_or_null(n) == NULL) {
diff --git a/src/share/vm/opto/phaseX.hpp b/src/share/vm/opto/phaseX.hpp
index ed5526ea0..e040ccc8b 100644
--- a/src/share/vm/opto/phaseX.hpp
+++ b/src/share/vm/opto/phaseX.hpp
@@ -383,6 +383,10 @@ public:
// Phase for iteratively performing local, pessimistic GVN-style optimizations.
// and ideal transformations on the graph.
class PhaseIterGVN : public PhaseGVN {
+ private:
+ bool _delay_transform; // When true simply register the node when calling transform
+ // instead of actually optimizing it
+
// Idealize old Node 'n' with respect to its inputs and its value
virtual Node *transform_old( Node *a_node );
protected:
@@ -446,6 +450,10 @@ public:
subsume_node(old, nn);
}
+ void set_delay_transform(bool delay) {
+ _delay_transform = delay;
+ }
+
#ifndef PRODUCT
protected:
// Sub-quadratic implementation of VerifyIterativeGVN.
diff --git a/src/share/vm/opto/subnode.cpp b/src/share/vm/opto/subnode.cpp
index 2fc002892..53413dfbf 100644
--- a/src/share/vm/opto/subnode.cpp
+++ b/src/share/vm/opto/subnode.cpp
@@ -736,6 +736,75 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
}
//=============================================================================
+//------------------------------sub--------------------------------------------
+// Simplify an CmpN (compare 2 pointers) node, based on local information.
+// If both inputs are constants, compare them.
+const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const {
+ const TypePtr *r0 = t1->is_narrowoop()->make_oopptr(); // Handy access
+ const TypePtr *r1 = t2->is_narrowoop()->make_oopptr();
+
+ // Undefined inputs makes for an undefined result
+ if( TypePtr::above_centerline(r0->_ptr) ||
+ TypePtr::above_centerline(r1->_ptr) )
+ return Type::TOP;
+
+ if (r0 == r1 && r0->singleton()) {
+ // Equal pointer constants (klasses, nulls, etc.)
+ return TypeInt::CC_EQ;
+ }
+
+ // See if it is 2 unrelated classes.
+ const TypeOopPtr* p0 = r0->isa_oopptr();
+ const TypeOopPtr* p1 = r1->isa_oopptr();
+ if (p0 && p1) {
+ ciKlass* klass0 = p0->klass();
+ bool xklass0 = p0->klass_is_exact();
+ ciKlass* klass1 = p1->klass();
+ bool xklass1 = p1->klass_is_exact();
+ int kps = (p0->isa_klassptr()?1:0) + (p1->isa_klassptr()?1:0);
+ if (klass0 && klass1 &&
+ kps != 1 && // both or neither are klass pointers
+ !klass0->is_interface() && // do not trust interfaces
+ !klass1->is_interface()) {
+ // See if neither subclasses the other, or if the class on top
+ // is precise. In either of these cases, the compare must fail.
+ if (klass0->equals(klass1) || // if types are unequal but klasses are
+ !klass0->is_java_klass() || // types not part of Java language?
+ !klass1->is_java_klass()) { // types not part of Java language?
+ // Do nothing; we know nothing for imprecise types
+ } else if (klass0->is_subtype_of(klass1)) {
+ // If klass1's type is PRECISE, then we can fail.
+ if (xklass1) return TypeInt::CC_GT;
+ } else if (klass1->is_subtype_of(klass0)) {
+ // If klass0's type is PRECISE, then we can fail.
+ if (xklass0) return TypeInt::CC_GT;
+ } else { // Neither subtypes the other
+ return TypeInt::CC_GT; // ...so always fail
+ }
+ }
+ }
+
+ // Known constants can be compared exactly
+ // Null can be distinguished from any NotNull pointers
+ // Unknown inputs makes an unknown result
+ if( r0->singleton() ) {
+ intptr_t bits0 = r0->get_con();
+ if( r1->singleton() )
+ return bits0 == r1->get_con() ? TypeInt::CC_EQ : TypeInt::CC_GT;
+ return ( r1->_ptr == TypePtr::NotNull && bits0==0 ) ? TypeInt::CC_GT : TypeInt::CC;
+ } else if( r1->singleton() ) {
+ intptr_t bits1 = r1->get_con();
+ return ( r0->_ptr == TypePtr::NotNull && bits1==0 ) ? TypeInt::CC_GT : TypeInt::CC;
+ } else
+ return TypeInt::CC;
+}
+
+//------------------------------Ideal------------------------------------------
+Node *CmpNNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
+ return NULL;
+}
+
+//=============================================================================
//------------------------------Value------------------------------------------
// Simplify an CmpF (compare 2 floats ) node, based on local information.
// If both inputs are constants, compare them.
diff --git a/src/share/vm/opto/subnode.hpp b/src/share/vm/opto/subnode.hpp
index 4992a59c5..8d01e2328 100644
--- a/src/share/vm/opto/subnode.hpp
+++ b/src/share/vm/opto/subnode.hpp
@@ -163,6 +163,16 @@ public:
virtual const Type *sub( const Type *, const Type * ) const;
};
+//------------------------------CmpNNode--------------------------------------
+// Compare 2 narrow oop values, returning condition codes (-1, 0 or 1).
+class CmpNNode : public CmpNode {
+public:
+ CmpNNode( Node *in1, Node *in2 ) : CmpNode(in1,in2) {}
+ virtual int Opcode() const;
+ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+ virtual const Type *sub( const Type *, const Type * ) const;
+};
+
//------------------------------CmpLNode---------------------------------------
// Compare 2 long values, returning condition codes (-1, 0 or 1).
class CmpLNode : public CmpNode {
diff --git a/src/share/vm/opto/superword.cpp b/src/share/vm/opto/superword.cpp
index b9b8a98bb..5e3a7bb70 100644
--- a/src/share/vm/opto/superword.cpp
+++ b/src/share/vm/opto/superword.cpp
@@ -1424,6 +1424,7 @@ int SuperWord::memory_alignment(MemNode* s, int iv_adjust_in_bytes) {
//---------------------------container_type---------------------------
// Smallest type containing range of values
const Type* SuperWord::container_type(const Type* t) {
+ if (t->isa_narrowoop()) t = t->is_narrowoop()->make_oopptr();
if (t->isa_aryptr()) {
t = t->is_aryptr()->elem();
}
diff --git a/src/share/vm/opto/type.cpp b/src/share/vm/opto/type.cpp
index 333fb476b..0715ef22a 100644
--- a/src/share/vm/opto/type.cpp
+++ b/src/share/vm/opto/type.cpp
@@ -40,6 +40,7 @@ const BasicType Type::_basic_type[Type::lastype] = {
T_INT, // Int
T_LONG, // Long
T_VOID, // Half
+ T_NARROWOOP, // NarrowOop
T_ILLEGAL, // Tuple
T_ARRAY, // Array
@@ -279,15 +280,6 @@ void Type::Initialize_shared(Compile* current) {
TypeRawPtr::BOTTOM = TypeRawPtr::make( TypePtr::BotPTR );
TypeRawPtr::NOTNULL= TypeRawPtr::make( TypePtr::NotNull );
- mreg2type[Op_Node] = Type::BOTTOM;
- mreg2type[Op_Set ] = 0;
- mreg2type[Op_RegI] = TypeInt::INT;
- mreg2type[Op_RegP] = TypePtr::BOTTOM;
- mreg2type[Op_RegF] = Type::FLOAT;
- mreg2type[Op_RegD] = Type::DOUBLE;
- mreg2type[Op_RegL] = TypeLong::LONG;
- mreg2type[Op_RegFlags] = TypeInt::CC;
-
const Type **fmembar = TypeTuple::fields(0);
TypeTuple::MEMBAR = TypeTuple::make(TypeFunc::Parms+0, fmembar);
@@ -305,6 +297,19 @@ void Type::Initialize_shared(Compile* current) {
false, 0, oopDesc::klass_offset_in_bytes());
TypeOopPtr::BOTTOM = TypeOopPtr::make(TypePtr::BotPTR, OffsetBot);
+ TypeNarrowOop::NULL_PTR = TypeNarrowOop::make( TypePtr::NULL_PTR );
+ TypeNarrowOop::BOTTOM = TypeNarrowOop::make( TypeInstPtr::BOTTOM );
+
+ mreg2type[Op_Node] = Type::BOTTOM;
+ mreg2type[Op_Set ] = 0;
+ mreg2type[Op_RegN] = TypeNarrowOop::BOTTOM;
+ mreg2type[Op_RegI] = TypeInt::INT;
+ mreg2type[Op_RegP] = TypePtr::BOTTOM;
+ mreg2type[Op_RegF] = Type::FLOAT;
+ mreg2type[Op_RegD] = Type::DOUBLE;
+ mreg2type[Op_RegL] = TypeLong::LONG;
+ mreg2type[Op_RegFlags] = TypeInt::CC;
+
TypeAryPtr::RANGE = TypeAryPtr::make( TypePtr::BotPTR, TypeAry::make(Type::BOTTOM,TypeInt::POS), current->env()->Object_klass(), false, arrayOopDesc::length_offset_in_bytes());
// There is no shared klass for Object[]. See note in TypeAryPtr::klass().
TypeAryPtr::OOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInstPtr::BOTTOM,TypeInt::POS), NULL /*ciArrayKlass::make(o)*/, false, Type::OffsetBot);
@@ -316,6 +321,7 @@ void Type::Initialize_shared(Compile* current) {
TypeAryPtr::FLOATS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::FLOAT ,TypeInt::POS), ciTypeArrayKlass::make(T_FLOAT), true, Type::OffsetBot);
TypeAryPtr::DOUBLES = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::DOUBLE ,TypeInt::POS), ciTypeArrayKlass::make(T_DOUBLE), true, Type::OffsetBot);
+ TypeAryPtr::_array_body_type[T_NARROWOOP] = NULL; // what should this be?
TypeAryPtr::_array_body_type[T_OBJECT] = TypeAryPtr::OOPS;
TypeAryPtr::_array_body_type[T_ARRAY] = TypeAryPtr::OOPS; // arrays are stored in oop arrays
TypeAryPtr::_array_body_type[T_BYTE] = TypeAryPtr::BYTES;
@@ -345,6 +351,7 @@ void Type::Initialize_shared(Compile* current) {
longpair[1] = TypeLong::LONG;
TypeTuple::LONG_PAIR = TypeTuple::make(2, longpair);
+ _const_basic_type[T_NARROWOOP] = TypeNarrowOop::BOTTOM;
_const_basic_type[T_BOOLEAN] = TypeInt::BOOL;
_const_basic_type[T_CHAR] = TypeInt::CHAR;
_const_basic_type[T_BYTE] = TypeInt::BYTE;
@@ -359,6 +366,7 @@ void Type::Initialize_shared(Compile* current) {
_const_basic_type[T_ADDRESS] = TypeRawPtr::BOTTOM; // both interpreter return addresses & random raw ptrs
_const_basic_type[T_CONFLICT]= Type::BOTTOM; // why not?
+ _zero_type[T_NARROWOOP] = TypeNarrowOop::NULL_PTR;
_zero_type[T_BOOLEAN] = TypeInt::ZERO; // false == 0
_zero_type[T_CHAR] = TypeInt::ZERO; // '\0' == 0
_zero_type[T_BYTE] = TypeInt::ZERO; // 0x00 == 0
@@ -400,6 +408,10 @@ void Type::Initialize(Compile* current) {
Type* t = (Type*)i._value;
tdic->Insert(t,t); // New Type, insert into Type table
}
+
+#ifdef ASSERT
+ verify_lastype();
+#endif
}
//------------------------------hashcons---------------------------------------
@@ -467,7 +479,19 @@ bool Type::is_nan() const {
// Compute the MEET of two types. NOT virtual. It enforces that meet is
// commutative and the lattice is symmetric.
const Type *Type::meet( const Type *t ) const {
+ if (isa_narrowoop() && t->isa_narrowoop()) {
+ const Type* result = is_narrowoop()->make_oopptr()->meet(t->is_narrowoop()->make_oopptr());
+ if (result->isa_oopptr()) {
+ return result->isa_oopptr()->make_narrowoop();
+ } else if (result == TypePtr::NULL_PTR) {
+ return TypeNarrowOop::NULL_PTR;
+ } else {
+ return result;
+ }
+ }
+
const Type *mt = xmeet(t);
+ if (isa_narrowoop() || t->isa_narrowoop()) return mt;
#ifdef ASSERT
assert( mt == t->xmeet(this), "meet not commutative" );
const Type* dual_join = mt->_dual;
@@ -556,6 +580,9 @@ const Type *Type::xmeet( const Type *t ) const {
case AryPtr:
return t->xmeet(this);
+ case NarrowOop:
+ return t->xmeet(this);
+
case Bad: // Type check
default: // Bogus type not in lattice
typerr(t);
@@ -613,6 +640,7 @@ const Type::TYPES Type::dual_type[Type::lastype] = {
Bad, // Int - handled in v-call
Bad, // Long - handled in v-call
Half, // Half
+ Bad, // NarrowOop - handled in v-call
Bad, // Tuple - handled in v-call
Bad, // Array - handled in v-call
@@ -668,11 +696,14 @@ void Type::dump_on(outputStream *st) const {
ResourceMark rm;
Dict d(cmpkey,hashkey); // Stop recursive type dumping
dump2(d,1, st);
+ if (isa_ptr() && is_ptr()->is_narrow()) {
+ st->print(" [narrow]");
+ }
}
//------------------------------data-------------------------------------------
const char * const Type::msg[Type::lastype] = {
- "bad","control","top","int:","long:","half",
+ "bad","control","top","int:","long:","half", "narrowoop:",
"tuple:", "aryptr",
"anyptr:", "rawptr:", "java:", "inst:", "ary:", "klass:",
"func", "abIO", "return_address", "memory",
@@ -735,7 +766,7 @@ void Type::typerr( const Type *t ) const {
//------------------------------isa_oop_ptr------------------------------------
// Return true if type is an oop pointer type. False for raw pointers.
static char isa_oop_ptr_tbl[Type::lastype] = {
- 0,0,0,0,0,0,0/*tuple*/, 0/*ary*/,
+ 0,0,0,0,0,0,0/*narrowoop*/,0/*tuple*/, 0/*ary*/,
0/*anyptr*/,0/*rawptr*/,1/*OopPtr*/,1/*InstPtr*/,1/*AryPtr*/,1/*KlassPtr*/,
0/*func*/,0,0/*return_address*/,0,
/*floats*/0,0,0, /*doubles*/0,0,0,
@@ -1051,6 +1082,7 @@ const Type *TypeInt::xmeet( const Type *t ) const {
case DoubleTop:
case DoubleCon:
case DoubleBot:
+ case NarrowOop:
case Bottom: // Ye Olde Default
return Type::BOTTOM;
default: // All else is a mistake
@@ -1718,6 +1750,9 @@ inline const TypeInt* normalize_array_size(const TypeInt* size) {
//------------------------------make-------------------------------------------
const TypeAry *TypeAry::make( const Type *elem, const TypeInt *size) {
+ if (UseCompressedOops && elem->isa_oopptr()) {
+ elem = elem->is_oopptr()->make_narrowoop();
+ }
size = normalize_array_size(size);
return (TypeAry*)(new TypeAry(elem,size))->hashcons();
}
@@ -1800,14 +1835,28 @@ bool TypeAry::ary_must_be_exact() const {
// In such cases, an array built on this ary must have no subclasses.
if (_elem == BOTTOM) return false; // general array not exact
if (_elem == TOP ) return false; // inverted general array not exact
- const TypeOopPtr* toop = _elem->isa_oopptr();
+ const TypeOopPtr* toop = NULL;
+ if (UseCompressedOops) {
+ const TypeNarrowOop* noop = _elem->isa_narrowoop();
+ if (noop) toop = noop->make_oopptr()->isa_oopptr();
+ } else {
+ toop = _elem->isa_oopptr();
+ }
if (!toop) return true; // a primitive type, like int
ciKlass* tklass = toop->klass();
if (tklass == NULL) return false; // unloaded class
if (!tklass->is_loaded()) return false; // unloaded class
- const TypeInstPtr* tinst = _elem->isa_instptr();
+ const TypeInstPtr* tinst;
+ if (_elem->isa_narrowoop())
+ tinst = _elem->is_narrowoop()->make_oopptr()->isa_instptr();
+ else
+ tinst = _elem->isa_instptr();
if (tinst) return tklass->as_instance_klass()->is_final();
- const TypeAryPtr* tap = _elem->isa_aryptr();
+ const TypeAryPtr* tap;
+ if (_elem->isa_narrowoop())
+ tap = _elem->is_narrowoop()->make_oopptr()->isa_aryptr();
+ else
+ tap = _elem->isa_aryptr();
if (tap) return tap->ary()->ary_must_be_exact();
return false;
}
@@ -1864,6 +1913,7 @@ const Type *TypePtr::xmeet( const Type *t ) const {
case DoubleTop:
case DoubleCon:
case DoubleBot:
+ case NarrowOop:
case Bottom: // Ye Olde Default
return Type::BOTTOM;
case Top:
@@ -2455,6 +2505,10 @@ const TypePtr *TypeOopPtr::add_offset( int offset ) const {
return make( _ptr, xadd_offset(offset) );
}
+const TypeNarrowOop* TypeOopPtr::make_narrowoop() const {
+ return TypeNarrowOop::make(this);
+}
+
int TypeOopPtr::meet_instance(int iid) const {
if (iid == 0) {
return (_instance_id < 0) ? _instance_id : UNKNOWN_INSTANCE;
@@ -2607,6 +2661,7 @@ const Type *TypeInstPtr::xmeet( const Type *t ) const {
case DoubleTop:
case DoubleCon:
case DoubleBot:
+ case NarrowOop:
case Bottom: // Ye Olde Default
return Type::BOTTOM;
case Top:
@@ -3021,6 +3076,9 @@ static jint max_array_length(BasicType etype) {
jint res = cache;
if (res == 0) {
switch (etype) {
+ case T_NARROWOOP:
+ etype = T_OBJECT;
+ break;
case T_CONFLICT:
case T_ILLEGAL:
case T_VOID:
@@ -3093,6 +3151,7 @@ const Type *TypeAryPtr::xmeet( const Type *t ) const {
case DoubleTop:
case DoubleCon:
case DoubleBot:
+ case NarrowOop:
case Bottom: // Ye Olde Default
return Type::BOTTOM;
case Top:
@@ -3293,6 +3352,124 @@ const TypePtr *TypeAryPtr::add_offset( int offset ) const {
//=============================================================================
+const TypeNarrowOop *TypeNarrowOop::BOTTOM;
+const TypeNarrowOop *TypeNarrowOop::NULL_PTR;
+
+
+const TypeNarrowOop* TypeNarrowOop::make(const TypePtr* type) {
+ return (const TypeNarrowOop*)(new TypeNarrowOop(type))->hashcons();
+}
+
+//------------------------------hash-------------------------------------------
+// Type-specific hashing function.
+int TypeNarrowOop::hash(void) const {
+ return _ooptype->hash() + 7;
+}
+
+
+bool TypeNarrowOop::eq( const Type *t ) const {
+ const TypeNarrowOop* tc = t->isa_narrowoop();
+ if (tc != NULL) {
+ if (_ooptype->base() != tc->_ooptype->base()) {
+ return false;
+ }
+ return tc->_ooptype->eq(_ooptype);
+ }
+ return false;
+}
+
+bool TypeNarrowOop::singleton(void) const { // TRUE if type is a singleton
+ return _ooptype->singleton();
+}
+
+bool TypeNarrowOop::empty(void) const {
+ return _ooptype->empty();
+}
+
+//------------------------------meet-------------------------------------------
+// Compute the MEET of two types. It returns a new Type object.
+const Type *TypeNarrowOop::xmeet( const Type *t ) const {
+ // Perform a fast test for common case; meeting the same types together.
+ if( this == t ) return this; // Meeting same type-rep?
+
+
+ // Current "this->_base" is OopPtr
+ switch (t->base()) { // switch on original type
+
+ case Int: // Mixing ints & oops happens when javac
+ case Long: // reuses local variables
+ case FloatTop:
+ case FloatCon:
+ case FloatBot:
+ case DoubleTop:
+ case DoubleCon:
+ case DoubleBot:
+ case Bottom: // Ye Olde Default
+ return Type::BOTTOM;
+ case Top:
+ return this;
+
+ case NarrowOop: {
+ const Type* result = _ooptype->xmeet(t->is_narrowoop()->make_oopptr());
+ if (result->isa_ptr()) {
+ return TypeNarrowOop::make(result->is_ptr());
+ }
+ return result;
+ }
+
+ default: // All else is a mistake
+ typerr(t);
+
+ case RawPtr:
+ case AnyPtr:
+ case OopPtr:
+ case InstPtr:
+ case KlassPtr:
+ case AryPtr:
+ typerr(t);
+ return Type::BOTTOM;
+
+ } // End of switch
+}
+
+const Type *TypeNarrowOop::xdual() const { // Compute dual right now.
+ const TypePtr* odual = _ooptype->dual()->is_ptr();
+ return new TypeNarrowOop(odual);
+}
+
+const Type *TypeNarrowOop::filter( const Type *kills ) const {
+ if (kills->isa_narrowoop()) {
+ const Type* ft =_ooptype->filter(kills->is_narrowoop()->_ooptype);
+ if (ft->empty())
+ return Type::TOP; // Canonical empty value
+ if (ft->isa_ptr()) {
+ return make(ft->isa_ptr());
+ }
+ return ft;
+ } else if (kills->isa_ptr()) {
+ const Type* ft = _ooptype->join(kills);
+ if (ft->empty())
+ return Type::TOP; // Canonical empty value
+ return ft;
+ } else {
+ return Type::TOP;
+ }
+}
+
+
+intptr_t TypeNarrowOop::get_con() const {
+ return _ooptype->get_con();
+}
+
+#ifndef PRODUCT
+void TypeNarrowOop::dump2( Dict & d, uint depth, outputStream *st ) const {
+ tty->print("narrowoop: ");
+ _ooptype->dump2(d, depth, st);
+}
+#endif
+
+
+//=============================================================================
// Convenience common pre-built types.
// Not-null object klass or below
@@ -3341,28 +3518,33 @@ ciKlass* TypeAryPtr::klass() const {
ciKlass* k_ary = NULL;
const TypeInstPtr *tinst;
const TypeAryPtr *tary;
+ const Type* el = elem();
+ if (el->isa_narrowoop()) {
+ el = el->is_narrowoop()->make_oopptr();
+ }
+
// Get element klass
- if ((tinst = elem()->isa_instptr()) != NULL) {
+ if ((tinst = el->isa_instptr()) != NULL) {
// Compute array klass from element klass
k_ary = ciObjArrayKlass::make(tinst->klass());
- } else if ((tary = elem()->isa_aryptr()) != NULL) {
+ } else if ((tary = el->isa_aryptr()) != NULL) {
// Compute array klass from element klass
ciKlass* k_elem = tary->klass();
// If element type is something like bottom[], k_elem will be null.
if (k_elem != NULL)
k_ary = ciObjArrayKlass::make(k_elem);
- } else if ((elem()->base() == Type::Top) ||
- (elem()->base() == Type::Bottom)) {
+ } else if ((el->base() == Type::Top) ||
+ (el->base() == Type::Bottom)) {
// element type of Bottom occurs from meet of basic type
// and object; Top occurs when doing join on Bottom.
// Leave k_ary at NULL.
} else {
// Cannot compute array klass directly from basic type,
// since subtypes of TypeInt all have basic type T_INT.
- assert(!elem()->isa_int(),
+ assert(!el->isa_int(),
"integral arrays must be pre-equipped with a class");
// Compute array klass directly from basic type
- k_ary = ciTypeArrayKlass::make(elem()->basic_type());
+ k_ary = ciTypeArrayKlass::make(el->basic_type());
}
if( this != TypeAryPtr::OOPS )
@@ -3710,7 +3892,7 @@ void TypeFunc::dump2( Dict &d, uint depth, outputStream *st ) const {
//------------------------------print_flattened--------------------------------
// Print a 'flattened' signature
static const char * const flat_type_msg[Type::lastype] = {
- "bad","control","top","int","long","_",
+ "bad","control","top","int","long","_", "narrowoop",
"tuple:", "array:",
"ptr", "rawptr", "ptr", "ptr", "ptr", "ptr",
"func", "abIO", "return_address", "mem",
diff --git a/src/share/vm/opto/type.hpp b/src/share/vm/opto/type.hpp
index c68205f88..570a98582 100644
--- a/src/share/vm/opto/type.hpp
+++ b/src/share/vm/opto/type.hpp
@@ -41,6 +41,7 @@ class TypeD;
class TypeF;
class TypeInt;
class TypeLong;
+class TypeNarrowOop;
class TypeAry;
class TypeTuple;
class TypePtr;
@@ -64,6 +65,7 @@ public:
Int, // Integer range (lo-hi)
Long, // Long integer range (lo-hi)
Half, // Placeholder half of doubleword
+ NarrowOop, // Compressed oop pointer
Tuple, // Method signature or object layout
Array, // Array types
@@ -188,6 +190,11 @@ public:
// Currently, it also works around limitations involving interface types.
virtual const Type *filter( const Type *kills ) const;
+ // Returns true if this pointer points at memory which contains a
+ // compressed oop references. In 32-bit builds it's non-virtual
+ // since we don't support compressed oops at all in the mode.
+ LP64_ONLY(virtual) bool is_narrow() const { return false; }
+
// Convenience access
float getf() const;
double getd() const;
@@ -204,15 +211,18 @@ public:
const TypeAry *is_ary() const; // Array, NOT array pointer
const TypePtr *is_ptr() const; // Asserts it is a ptr type
const TypePtr *isa_ptr() const; // Returns NULL if not ptr type
- const TypeRawPtr *is_rawptr() const; // NOT Java oop
- const TypeOopPtr *isa_oopptr() const; // Returns NULL if not ptr type
- const TypeKlassPtr *isa_klassptr() const; // Returns NULL if not KlassPtr
- const TypeKlassPtr *is_klassptr() const; // assert if not KlassPtr
- const TypeOopPtr *is_oopptr() const; // Java-style GC'd pointer
- const TypeInstPtr *isa_instptr() const; // Returns NULL if not InstPtr
- const TypeInstPtr *is_instptr() const; // Instance
- const TypeAryPtr *isa_aryptr() const; // Returns NULL if not AryPtr
- const TypeAryPtr *is_aryptr() const; // Array oop
+ const TypeRawPtr *isa_rawptr() const; // NOT Java oop
+ const TypeRawPtr *is_rawptr() const; // Asserts is rawptr
+ const TypeNarrowOop *is_narrowoop() const; // Java-style GC'd pointer
+ const TypeNarrowOop *isa_narrowoop() const; // Returns NULL if not oop ptr type
+ const TypeOopPtr *isa_oopptr() const; // Returns NULL if not oop ptr type
+ const TypeOopPtr *is_oopptr() const; // Java-style GC'd pointer
+ const TypeKlassPtr *isa_klassptr() const; // Returns NULL if not KlassPtr
+ const TypeKlassPtr *is_klassptr() const; // assert if not KlassPtr
+ const TypeInstPtr *isa_instptr() const; // Returns NULL if not InstPtr
+ const TypeInstPtr *is_instptr() const; // Instance
+ const TypeAryPtr *isa_aryptr() const; // Returns NULL if not AryPtr
+ const TypeAryPtr *is_aryptr() const; // Array oop
virtual bool is_finite() const; // Has a finite value
virtual bool is_nan() const; // Is not a number (NaN)
@@ -540,6 +550,7 @@ public:
// Otherwise the _base will indicate which subset of pointers is affected,
// and the class will be inherited from.
class TypePtr : public Type {
+ friend class TypeNarrowOop;
public:
enum PTR { TopPTR, AnyNull, Constant, Null, NotNull, BotPTR, lastPTR };
protected:
@@ -701,6 +712,15 @@ public:
virtual const TypePtr *add_offset( int offset ) const;
+ // returns the equivalent compressed version of this pointer type
+ virtual const TypeNarrowOop* make_narrowoop() const;
+
+#ifdef _LP64
+ virtual bool is_narrow() const {
+ return (UseCompressedOops && _offset != 0);
+ }
+#endif
+
virtual const Type *xmeet( const Type *t ) const;
virtual const Type *xdual() const; // Compute dual right now.
@@ -822,6 +842,12 @@ public:
virtual const Type *xmeet( const Type *t ) const;
virtual const Type *xdual() const; // Compute dual right now.
+#ifdef _LP64
+ virtual bool is_narrow() const {
+ return (UseCompressedOops && klass() != NULL && _offset != 0);
+ }
+#endif
+
// Convenience common pre-built types.
static const TypeAryPtr *RANGE;
static const TypeAryPtr *OOPS;
@@ -874,6 +900,18 @@ public:
virtual const Type *xmeet( const Type *t ) const;
virtual const Type *xdual() const; // Compute dual right now.
+#ifdef _LP64
+ // Perm objects don't use compressed references, except for static fields
+ // which are currently compressed
+ virtual bool is_narrow() const {
+ if (UseCompressedOops && _offset != 0 && _klass->is_instance_klass()) {
+ ciInstanceKlass* ik = _klass->as_instance_klass();
+ return ik != NULL && ik->get_field_by_offset(_offset, true) != NULL;
+ }
+ return false;
+ }
+#endif
+
// Convenience common pre-built types.
static const TypeKlassPtr* OBJECT; // Not-null object klass or below
static const TypeKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same
@@ -882,6 +920,56 @@ public:
#endif
};
+//------------------------------TypeNarrowOop----------------------------------------
+// A compressed reference to some kind of Oop. This type wraps around
+// a preexisting TypeOopPtr and forwards most of it's operations to
+// the underlying type. It's only real purpose is to track the
+// oopness of the compressed oop value when we expose the conversion
+// between the normal and the compressed form.
+class TypeNarrowOop : public Type {
+protected:
+ const TypePtr* _ooptype;
+
+ TypeNarrowOop( const TypePtr* ooptype): Type(NarrowOop),
+ _ooptype(ooptype) {
+ assert(ooptype->offset() == 0 ||
+ ooptype->offset() == OffsetBot ||
+ ooptype->offset() == OffsetTop, "no real offsets");
+ }
+public:
+ virtual bool eq( const Type *t ) const;
+ virtual int hash() const; // Type specific hashing
+ virtual bool singleton(void) const; // TRUE if type is a singleton
+
+ virtual const Type *xmeet( const Type *t ) const;
+ virtual const Type *xdual() const; // Compute dual right now.
+
+ virtual intptr_t get_con() const;
+
+ // Do not allow interface-vs.-noninterface joins to collapse to top.
+ virtual const Type *filter( const Type *kills ) const;
+
+ virtual bool empty(void) const; // TRUE if type is vacuous
+
+ static const TypeNarrowOop *make( const TypePtr* type);
+
+ static const TypeNarrowOop* make_from_constant(ciObject* con) {
+ return make(TypeOopPtr::make_from_constant(con));
+ }
+
+ // returns the equivalent oopptr type for this compressed pointer
+ virtual const TypePtr *make_oopptr() const {
+ return _ooptype;
+ }
+
+ static const TypeNarrowOop *BOTTOM;
+ static const TypeNarrowOop *NULL_PTR;
+
+#ifndef PRODUCT
+ virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
+#endif
+};
+
//------------------------------TypeFunc---------------------------------------
// Class of Array Types
class TypeFunc : public Type {
@@ -1002,6 +1090,10 @@ inline const TypeOopPtr *Type::isa_oopptr() const {
return (_base >= OopPtr && _base <= KlassPtr) ? (TypeOopPtr*)this : NULL;
}
+inline const TypeRawPtr *Type::isa_rawptr() const {
+ return (_base == RawPtr) ? (TypeRawPtr*)this : NULL;
+}
+
inline const TypeRawPtr *Type::is_rawptr() const {
assert( _base == RawPtr, "Not a raw pointer" );
return (TypeRawPtr*)this;
@@ -1025,6 +1117,17 @@ inline const TypeAryPtr *Type::is_aryptr() const {
return (TypeAryPtr*)this;
}
+inline const TypeNarrowOop *Type::is_narrowoop() const {
+ // OopPtr is the first and KlassPtr the last, with no non-oops between.
+ assert(_base == NarrowOop, "Not a narrow oop" ) ;
+ return (TypeNarrowOop*)this;
+}
+
+inline const TypeNarrowOop *Type::isa_narrowoop() const {
+ // OopPtr is the first and KlassPtr the last, with no non-oops between.
+ return (_base == NarrowOop) ? (TypeNarrowOop*)this : NULL;
+}
+
inline const TypeKlassPtr *Type::isa_klassptr() const {
return (_base == KlassPtr) ? (TypeKlassPtr*)this : NULL;
}
diff --git a/src/share/vm/prims/jni.cpp b/src/share/vm/prims/jni.cpp
index 6f31afc98..91fb8cb3c 100644
--- a/src/share/vm/prims/jni.cpp
+++ b/src/share/vm/prims/jni.cpp
@@ -135,7 +135,10 @@ intptr_t jfieldIDWorkaround::encode_klass_hash(klassOop k, intptr_t offset) {
if (offset <= small_offset_mask) {
klassOop field_klass = k;
klassOop super_klass = Klass::cast(field_klass)->super();
- while (instanceKlass::cast(super_klass)->contains_field_offset(offset)) {
+ // With compressed oops the most super class with nonstatic fields would
+ // be the owner of fields embedded in the header.
+ while (instanceKlass::cast(super_klass)->has_nonstatic_fields() &&
+ instanceKlass::cast(super_klass)->contains_field_offset(offset)) {
field_klass = super_klass; // super contains the field also
super_klass = Klass::cast(field_klass)->super();
}
diff --git a/src/share/vm/prims/jvmtiTagMap.cpp b/src/share/vm/prims/jvmtiTagMap.cpp
index 23e46ede8..30d9fee46 100644
--- a/src/share/vm/prims/jvmtiTagMap.cpp
+++ b/src/share/vm/prims/jvmtiTagMap.cpp
@@ -2662,6 +2662,7 @@ class SimpleRootsClosure : public OopClosure {
_continue = CallbackInvoker::report_simple_root(kind, o);
}
+ virtual void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
};
// A supporting closure used to process JNI locals
@@ -2704,6 +2705,7 @@ class JNILocalRootsClosure : public OopClosure {
// invoke the callback
_continue = CallbackInvoker::report_jni_local_root(_thread_tag, _tid, _depth, _method, o);
}
+ virtual void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
};
@@ -2878,9 +2880,11 @@ inline bool VM_HeapWalkOperation::iterate_over_type_array(oop o) {
}
// verify that a static oop field is in range
-static inline bool verify_static_oop(instanceKlass* ik, oop* obj_p) {
- oop* start = ik->start_of_static_fields();
- oop* end = start + ik->static_oop_field_size();
+static inline bool verify_static_oop(instanceKlass* ik,
+ klassOop k, int offset) {
+ address obj_p = (address)k + offset;
+ address start = (address)ik->start_of_static_fields();
+ address end = start + (ik->static_oop_field_size() * heapOopSize);
assert(end >= start, "sanity check");
if (obj_p >= start && obj_p < end) {
@@ -2981,10 +2985,8 @@ inline bool VM_HeapWalkOperation::iterate_over_class(klassOop k) {
ClassFieldDescriptor* field = field_map->field_at(i);
char type = field->field_type();
if (!is_primitive_field_type(type)) {
- address addr = (address)k + field->field_offset();
- oop* f = (oop*)addr;
- assert(verify_static_oop(ik, f), "sanity check");
- oop fld_o = *f;
+ oop fld_o = k->obj_field(field->field_offset());
+ assert(verify_static_oop(ik, k, field->field_offset()), "sanity check");
if (fld_o != NULL) {
int slot = field->field_index();
if (!CallbackInvoker::report_static_field_reference(mirror, fld_o, slot)) {
@@ -3026,9 +3028,7 @@ inline bool VM_HeapWalkOperation::iterate_over_object(oop o) {
ClassFieldDescriptor* field = field_map->field_at(i);
char type = field->field_type();
if (!is_primitive_field_type(type)) {
- address addr = (address)o + field->field_offset();
- oop* f = (oop*)addr;
- oop fld_o = *f;
+ oop fld_o = o->obj_field(field->field_offset());
if (fld_o != NULL) {
// reflection code may have a reference to a klassOop.
// - see sun.reflect.UnsafeStaticFieldAccessorImpl and sun.misc.Unsafe
diff --git a/src/share/vm/prims/unsafe.cpp b/src/share/vm/prims/unsafe.cpp
index adea25098..899a99296 100644
--- a/src/share/vm/prims/unsafe.cpp
+++ b/src/share/vm/prims/unsafe.cpp
@@ -100,7 +100,7 @@ inline void* index_oop_from_field_offset_long(oop p, jlong field_offset) {
assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset");
if (byte_offset == (jint)byte_offset) {
void* ptr_plus_disp = (address)p + byte_offset;
- assert((void*)p->obj_field_addr((jint)byte_offset) == ptr_plus_disp,
+ assert((void*)p->obj_field_addr<oop>((jint)byte_offset) == ptr_plus_disp,
"raw [ptr+disp] must be consistent with oop::field_base");
}
}
@@ -146,13 +146,36 @@ jint Unsafe_invocation_key_to_method_slot(jint key) {
*(volatile type_name*)index_oop_from_field_offset_long(p, offset) = x; \
OrderAccess::fence();
+// Macros for oops that check UseCompressedOops
+
+#define GET_OOP_FIELD(obj, offset, v) \
+ oop p = JNIHandles::resolve(obj); \
+ oop v; \
+ if (UseCompressedOops) { \
+ narrowOop n = *(narrowOop*)index_oop_from_field_offset_long(p, offset); \
+ v = oopDesc::decode_heap_oop(n); \
+ } else { \
+ v = *(oop*)index_oop_from_field_offset_long(p, offset); \
+ }
+
+#define GET_OOP_FIELD_VOLATILE(obj, offset, v) \
+ oop p = JNIHandles::resolve(obj); \
+ volatile oop v; \
+ if (UseCompressedOops) { \
+ volatile narrowOop n = *(volatile narrowOop*)index_oop_from_field_offset_long(p, offset); \
+ v = oopDesc::decode_heap_oop(n); \
+ } else { \
+ v = *(volatile oop*)index_oop_from_field_offset_long(p, offset); \
+ }
+
+
// Get/SetObject must be special-cased, since it works with handles.
// The xxx140 variants for backward compatibility do not allow a full-width offset.
UNSAFE_ENTRY(jobject, Unsafe_GetObject140(JNIEnv *env, jobject unsafe, jobject obj, jint offset))
UnsafeWrapper("Unsafe_GetObject");
if (obj == NULL) THROW_0(vmSymbols::java_lang_NullPointerException());
- GET_FIELD(obj, offset, oop, v);
+ GET_OOP_FIELD(obj, offset, v)
return JNIHandles::make_local(env, v);
UNSAFE_END
@@ -162,11 +185,21 @@ UNSAFE_ENTRY(void, Unsafe_SetObject140(JNIEnv *env, jobject unsafe, jobject obj,
oop x = JNIHandles::resolve(x_h);
//SET_FIELD(obj, offset, oop, x);
oop p = JNIHandles::resolve(obj);
- if (x != NULL) {
- // If there is a heap base pointer, we are obliged to emit a store barrier.
- oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+ if (UseCompressedOops) {
+ if (x != NULL) {
+ // If there is a heap base pointer, we are obliged to emit a store barrier.
+ oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
+ } else {
+ narrowOop n = oopDesc::encode_heap_oop_not_null(x);
+ *(narrowOop*)index_oop_from_field_offset_long(p, offset) = n;
+ }
} else {
- *(oop*)index_oop_from_field_offset_long(p, offset) = x;
+ if (x != NULL) {
+ // If there is a heap base pointer, we are obliged to emit a store barrier.
+ oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+ } else {
+ *(oop*)index_oop_from_field_offset_long(p, offset) = x;
+ }
}
UNSAFE_END
@@ -175,7 +208,7 @@ UNSAFE_END
// That is, it should be in the range [0, MAX_OBJECT_SIZE].
UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset))
UnsafeWrapper("Unsafe_GetObject");
- GET_FIELD(obj, offset, oop, v);
+ GET_OOP_FIELD(obj, offset, v)
return JNIHandles::make_local(env, v);
UNSAFE_END
@@ -183,12 +216,16 @@ UNSAFE_ENTRY(void, Unsafe_SetObject(JNIEnv *env, jobject unsafe, jobject obj, jl
UnsafeWrapper("Unsafe_SetObject");
oop x = JNIHandles::resolve(x_h);
oop p = JNIHandles::resolve(obj);
- oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+ if (UseCompressedOops) {
+ oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
+ } else {
+ oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+ }
UNSAFE_END
UNSAFE_ENTRY(jobject, Unsafe_GetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset))
UnsafeWrapper("Unsafe_GetObjectVolatile");
- GET_FIELD_VOLATILE(obj, offset, oop, v);
+ GET_OOP_FIELD_VOLATILE(obj, offset, v)
return JNIHandles::make_local(env, v);
UNSAFE_END
@@ -196,7 +233,11 @@ UNSAFE_ENTRY(void, Unsafe_SetObjectVolatile(JNIEnv *env, jobject unsafe, jobject
UnsafeWrapper("Unsafe_SetObjectVolatile");
oop x = JNIHandles::resolve(x_h);
oop p = JNIHandles::resolve(obj);
- oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+ if (UseCompressedOops) {
+ oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
+ } else {
+ oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+ }
OrderAccess::fence();
UNSAFE_END
@@ -311,7 +352,11 @@ UNSAFE_ENTRY(void, Unsafe_SetOrderedObject(JNIEnv *env, jobject unsafe, jobject
UnsafeWrapper("Unsafe_SetOrderedObject");
oop x = JNIHandles::resolve(x_h);
oop p = JNIHandles::resolve(obj);
- oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+ if (UseCompressedOops) {
+ oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
+ } else {
+ oop_store((oop*)index_oop_from_field_offset_long(p, offset), x);
+ }
OrderAccess::fence();
UNSAFE_END
@@ -647,7 +692,7 @@ static void getBaseAndScale(int& base, int& scale, jclass acls, TRAPS) {
THROW(vmSymbols::java_lang_InvalidClassException());
} else if (k->klass_part()->oop_is_objArray()) {
base = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
- scale = oopSize;
+ scale = heapOopSize;
} else if (k->klass_part()->oop_is_typeArray()) {
typeArrayKlass* tak = typeArrayKlass::cast(k);
base = tak->array_header_in_bytes();
@@ -845,11 +890,11 @@ UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapObject(JNIEnv *env, jobject unsafe,
oop x = JNIHandles::resolve(x_h);
oop e = JNIHandles::resolve(e_h);
oop p = JNIHandles::resolve(obj);
- intptr_t* addr = (intptr_t *)index_oop_from_field_offset_long(p, offset);
- intptr_t res = Atomic::cmpxchg_ptr((intptr_t)x, addr, (intptr_t)e);
- jboolean success = (res == (intptr_t)e);
+ HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
+ oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e);
+ jboolean success = (res == e);
if (success)
- update_barrier_set((oop*)addr, x);
+ update_barrier_set((void*)addr, x);
return success;
UNSAFE_END
diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp
index a02fa0983..ad7dd375d 100644
--- a/src/share/vm/runtime/arguments.cpp
+++ b/src/share/vm/runtime/arguments.cpp
@@ -1163,6 +1163,31 @@ void Arguments::set_ergonomics_flags() {
no_shared_spaces();
}
}
+
+#ifdef _LP64
+ // Compressed Headers do not work with CMS, which uses a bit in the klass
+ // field offset to determine free list chunk markers.
+ // Check that UseCompressedOops can be set with the max heap size allocated
+ // by ergonomics.
+ if (!UseConcMarkSweepGC && MaxHeapSize <= (32*G - os::vm_page_size())) {
+ if (FLAG_IS_DEFAULT(UseCompressedOops)) {
+ FLAG_SET_ERGO(bool, UseCompressedOops, true);
+ }
+ } else {
+ if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
+ // If specified, give a warning
+ if (UseConcMarkSweepGC){
+ warning("Compressed Oops does not work with CMS");
+ } else {
+ warning(
+ "Max heap size too large for Compressed Oops");
+ }
+ FLAG_SET_DEFAULT(UseCompressedOops, false);
+ }
+ }
+ // Also checks that certain machines are slower with compressed oops
+ // in vm_version initialization code.
+#endif // _LP64
}
void Arguments::set_parallel_gc_flags() {
diff --git a/src/share/vm/runtime/atomic.cpp b/src/share/vm/runtime/atomic.cpp
index 299d2b00a..847cf0861 100644
--- a/src/share/vm/runtime/atomic.cpp
+++ b/src/share/vm/runtime/atomic.cpp
@@ -44,3 +44,15 @@ jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_
}
return cur_as_bytes[offset];
}
+
+unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
+ assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
+ return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+unsigned Atomic::cmpxchg(unsigned int exchange_value,
+ volatile unsigned int* dest, unsigned int compare_value) {
+ assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
+ return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
+ (jint)compare_value);
+}
diff --git a/src/share/vm/runtime/atomic.hpp b/src/share/vm/runtime/atomic.hpp
index cfbda4ba7..a8b7dfa7a 100644
--- a/src/share/vm/runtime/atomic.hpp
+++ b/src/share/vm/runtime/atomic.hpp
@@ -55,7 +55,10 @@ class Atomic : AllStatic {
static void dec_ptr(volatile void* dest);
// Performs atomic exchange of *dest with exchange_value. Returns old prior value of *dest.
- static jint xchg (jint exchange_value, volatile jint* dest);
+ static jint xchg(jint exchange_value, volatile jint* dest);
+ static unsigned int xchg(unsigned int exchange_value,
+ volatile unsigned int* dest);
+
static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
static void* xchg_ptr(void* exchange_value, volatile void* dest);
@@ -65,6 +68,11 @@ class Atomic : AllStatic {
static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value);
static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value);
static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value);
+
+ static unsigned int cmpxchg(unsigned int exchange_value,
+ volatile unsigned int* dest,
+ unsigned int compare_value);
+
static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value);
static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value);
};
diff --git a/src/share/vm/runtime/frame.cpp b/src/share/vm/runtime/frame.cpp
index efc74a361..4db33fc54 100644
--- a/src/share/vm/runtime/frame.cpp
+++ b/src/share/vm/runtime/frame.cpp
@@ -1153,9 +1153,8 @@ oop* frame::oopmapreg_to_location(VMReg reg, const RegisterMap* reg_map) const {
// If it is passed in a register, it got spilled in the stub frame.
return (oop *)reg_map->location(reg);
} else {
- int sp_offset_in_stack_slots = reg->reg2stack();
- int sp_offset = sp_offset_in_stack_slots >> (LogBytesPerWord - LogBytesPerInt);
- return (oop *)&unextended_sp()[sp_offset];
+ int sp_offset_in_bytes = reg->reg2stack() * VMRegImpl::stack_slot_size;
+ return (oop*)(((address)unextended_sp()) + sp_offset_in_bytes);
}
}
@@ -1331,8 +1330,7 @@ void frame::zap_dead_compiled_locals(JavaThread* thread, const RegisterMap* reg_
ResourceMark rm(thread);
assert(_cb != NULL, "sanity check");
if (_cb->oop_maps() != NULL) {
- OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop,
- &_check_value, &_zap_dead);
+ OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop, &_check_value);
}
}
diff --git a/src/share/vm/runtime/frame.hpp b/src/share/vm/runtime/frame.hpp
index 475a77517..ec13e57a2 100644
--- a/src/share/vm/runtime/frame.hpp
+++ b/src/share/vm/runtime/frame.hpp
@@ -250,7 +250,7 @@ class frame VALUE_OBJ_CLASS_SPEC {
oop interpreter_callee_receiver(symbolHandle signature) { return *interpreter_callee_receiver_addr(signature); }
- oop *interpreter_callee_receiver_addr(symbolHandle signature);
+ oop* interpreter_callee_receiver_addr(symbolHandle signature);
// expression stack (may go up or down, direction == 1 or -1)
@@ -402,19 +402,25 @@ class frame VALUE_OBJ_CLASS_SPEC {
# ifdef ENABLE_ZAP_DEAD_LOCALS
private:
class CheckValueClosure: public OopClosure {
- public: void do_oop(oop* p);
+ public:
+ void do_oop(oop* p);
+ void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
static CheckValueClosure _check_value;
class CheckOopClosure: public OopClosure {
- public: void do_oop(oop* p);
+ public:
+ void do_oop(oop* p);
+ void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
static CheckOopClosure _check_oop;
static void check_derived_oop(oop* base, oop* derived);
class ZapDeadClosure: public OopClosure {
- public: void do_oop(oop* p);
+ public:
+ void do_oop(oop* p);
+ void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
static ZapDeadClosure _zap_dead;
diff --git a/src/share/vm/runtime/globals.cpp b/src/share/vm/runtime/globals.cpp
index f70d0c08b..ffd62d14a 100644
--- a/src/share/vm/runtime/globals.cpp
+++ b/src/share/vm/runtime/globals.cpp
@@ -29,7 +29,8 @@
RUNTIME_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, \
MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, \
MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG, \
- MATERIALIZE_MANAGEABLE_FLAG, MATERIALIZE_PRODUCT_RW_FLAG)
+ MATERIALIZE_MANAGEABLE_FLAG, MATERIALIZE_PRODUCT_RW_FLAG, \
+ MATERIALIZE_LP64_PRODUCT_FLAG)
RUNTIME_OS_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, \
MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, \
@@ -137,6 +138,12 @@ void Flag::print_as_flag(outputStream* st) {
#define RUNTIME_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{notproduct}", DEFAULT },
#endif
+#ifdef _LP64
+ #define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{lp64_product}", DEFAULT },
+#else
+ #define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
+#endif // _LP64
+
#define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C1 product}", DEFAULT },
#define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, "{C1 pd product}", DEFAULT },
#ifdef PRODUCT
@@ -165,7 +172,7 @@ void Flag::print_as_flag(outputStream* st) {
static Flag flagTable[] = {
- RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT)
+ RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT, RUNTIME_LP64_PRODUCT_FLAG_STRUCT)
RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT)
#ifdef COMPILER1
C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
index e1d23978d..d25dd756e 100644
--- a/src/share/vm/runtime/globals.hpp
+++ b/src/share/vm/runtime/globals.hpp
@@ -237,7 +237,6 @@ class CommandLineFlags {
#define falseInTiered true
#endif
-
// develop flags are settable / visible only during development and are constant in the PRODUCT version
// product flags are always settable / visible
// notproduct flags are settable / visible only during development and are not declared in the PRODUCT version
@@ -286,7 +285,11 @@ class CommandLineFlags {
// Note that when there is a need to support develop flags to be writeable,
// it can be done in the same way as product_rw.
-#define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, manageable, product_rw) \
+#define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, manageable, product_rw, lp64_product) \
+ \
+ lp64_product(bool, UseCompressedOops, false, \
+ "Use 32-bit object references in 64-bit VM. " \
+ "lp64_product means flag is always constant in 32 bit VM") \
\
/* UseMembar is theoretically a temp flag used for memory barrier \
* removal testing. It was supposed to be removed before FCS but has \
@@ -3209,6 +3212,12 @@ class CommandLineFlags {
#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc) extern "C" type name;
#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc) extern "C" type name;
#endif
+// Special LP64 flags, product only needed for now.
+#ifdef _LP64
+#define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) extern "C" type name;
+#else
+#define DECLARE_LP64_PRODUCT_FLAG(type, name, value, doc) const type name = value;
+#endif // _LP64
// Implementation macros
#define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc) type name = value;
@@ -3225,7 +3234,12 @@ class CommandLineFlags {
#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type name = pd_##name;
#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value;
#endif
+#ifdef _LP64
+#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) type name = value;
+#else
+#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) /* flag is constant */
+#endif // _LP64
-RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
+RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG, DECLARE_LP64_PRODUCT_FLAG)
RUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
diff --git a/src/share/vm/runtime/globals_extension.hpp b/src/share/vm/runtime/globals_extension.hpp
index d6cc1cf58..ffb8071b2 100644
--- a/src/share/vm/runtime/globals_extension.hpp
+++ b/src/share/vm/runtime/globals_extension.hpp
@@ -41,6 +41,11 @@
#define RUNTIME_PD_DEVELOP_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
#define RUNTIME_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
#endif
+#ifdef _LP64
+#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#else
+#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
+#endif // _LP64
#define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
#define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
@@ -71,7 +76,9 @@
typedef enum {
RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER,
RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER,
- RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER)
+ RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER,
+ RUNTIME_PRODUCT_RW_FLAG_MEMBER,
+ RUNTIME_LP64_PRODUCT_FLAG_MEMBER)
RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER,
RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER,
RUNTIME_NOTPRODUCT_FLAG_MEMBER)
@@ -116,6 +123,11 @@ typedef enum {
#define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
#define C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
#endif
+#ifdef _LP64
+#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#else
+#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
+#endif // _LP64
#define C2_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
@@ -137,7 +149,8 @@ typedef enum {
RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE,
RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE,
- RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE)
+ RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE,
+ RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE)
RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE, RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
diff --git a/src/share/vm/runtime/hpi.cpp b/src/share/vm/runtime/hpi.cpp
index 18e4e7976..1b8e361df 100644
--- a/src/share/vm/runtime/hpi.cpp
+++ b/src/share/vm/runtime/hpi.cpp
@@ -27,7 +27,8 @@
extern "C" {
static void unimplemented_panic(const char *fmt, ...) {
- Unimplemented();
+ // mitigate testing damage from bug 6626677
+ warning("hpi::unimplemented_panic called");
}
static void unimplemented_monitorRegister(sys_mon_t *mid, char *info_str) {
diff --git a/src/share/vm/runtime/init.cpp b/src/share/vm/runtime/init.cpp
index b93099ecb..dfadab630 100644
--- a/src/share/vm/runtime/init.cpp
+++ b/src/share/vm/runtime/init.cpp
@@ -27,7 +27,6 @@
// Initialization done by VM thread in vm_init_globals()
void check_ThreadShadow();
-void check_basic_types();
void eventlog_init();
void mutex_init();
void chunkpool_init();
@@ -73,7 +72,7 @@ void ostream_exit();
void vm_init_globals() {
check_ThreadShadow();
- check_basic_types();
+ basic_types_init();
eventlog_init();
mutex_init();
chunkpool_init();
diff --git a/src/share/vm/runtime/jniHandles.cpp b/src/share/vm/runtime/jniHandles.cpp
index 05078c559..6f91a4dae 100644
--- a/src/share/vm/runtime/jniHandles.cpp
+++ b/src/share/vm/runtime/jniHandles.cpp
@@ -206,9 +206,10 @@ private:
int _count;
public:
CountHandleClosure(): _count(0) {}
- void do_oop(oop* unused) {
+ virtual void do_oop(oop* unused) {
_count++;
}
+ virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
int count() { return _count; }
};
@@ -230,9 +231,10 @@ void JNIHandles::print_on(outputStream* st) {
class VerifyHandleClosure: public OopClosure {
public:
- void do_oop(oop* root) {
+ virtual void do_oop(oop* root) {
(*root)->verify();
}
+ virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); }
};
void JNIHandles::verify() {
diff --git a/src/share/vm/runtime/vmStructs.cpp b/src/share/vm/runtime/vmStructs.cpp
index fbc424303..3a347fccf 100644
--- a/src/share/vm/runtime/vmStructs.cpp
+++ b/src/share/vm/runtime/vmStructs.cpp
@@ -71,7 +71,8 @@ static inline uint64_t cast_uint64_t(size_t x)
/******************************************************************/ \
\
volatile_nonstatic_field(oopDesc, _mark, markOop) \
- nonstatic_field(oopDesc, _klass, klassOop) \
+ volatile_nonstatic_field(oopDesc, _metadata._klass, wideKlassOop) \
+ volatile_nonstatic_field(oopDesc, _metadata._compressed_klass, narrowOop) \
static_field(oopDesc, _bs, BarrierSet*) \
nonstatic_field(arrayKlass, _dimension, int) \
nonstatic_field(arrayKlass, _higher_dimension, klassOop) \
@@ -79,13 +80,14 @@ static inline uint64_t cast_uint64_t(size_t x)
nonstatic_field(arrayKlass, _vtable_len, int) \
nonstatic_field(arrayKlass, _alloc_size, juint) \
nonstatic_field(arrayKlass, _component_mirror, oop) \
- nonstatic_field(arrayOopDesc, _length, int) \
nonstatic_field(compiledICHolderKlass, _alloc_size, juint) \
nonstatic_field(compiledICHolderOopDesc, _holder_method, methodOop) \
nonstatic_field(compiledICHolderOopDesc, _holder_klass, klassOop) \
nonstatic_field(constantPoolOopDesc, _tags, typeArrayOop) \
nonstatic_field(constantPoolOopDesc, _cache, constantPoolCacheOop) \
nonstatic_field(constantPoolOopDesc, _pool_holder, klassOop) \
+ nonstatic_field(constantPoolOopDesc, _length, int) \
+ nonstatic_field(constantPoolCacheOopDesc, _length, int) \
nonstatic_field(constantPoolCacheOopDesc, _constant_pool, constantPoolOop) \
nonstatic_field(instanceKlass, _array_klasses, klassOop) \
nonstatic_field(instanceKlass, _methods, objArrayOop) \
@@ -261,6 +263,7 @@ static inline uint64_t cast_uint64_t(size_t x)
static_field(Universe, _bootstrapping, bool) \
static_field(Universe, _fully_initialized, bool) \
static_field(Universe, _verify_count, int) \
+ static_field(Universe, _heap_base, address) \
\
/**********************************************************************************/ \
/* Generation and Space hierarchies */ \
@@ -305,8 +308,6 @@ static inline uint64_t cast_uint64_t(size_t x)
nonstatic_field(SharedHeap, _perm_gen, PermGen*) \
nonstatic_field(CollectedHeap, _barrier_set, BarrierSet*) \
nonstatic_field(CollectedHeap, _is_gc_active, bool) \
- nonstatic_field(CollectedHeap, _max_heap_capacity, size_t) \
- \
nonstatic_field(CompactibleSpace, _compaction_top, HeapWord*) \
nonstatic_field(CompactibleSpace, _first_dead, HeapWord*) \
nonstatic_field(CompactibleSpace, _end_of_live, HeapWord*) \
@@ -912,12 +913,12 @@ static inline uint64_t cast_uint64_t(size_t x)
declare_type(arrayKlass, Klass) \
declare_type(arrayKlassKlass, klassKlass) \
declare_type(arrayOopDesc, oopDesc) \
- declare_type(compiledICHolderKlass, Klass) \
- declare_type(compiledICHolderOopDesc, oopDesc) \
- declare_type(constantPoolKlass, arrayKlass) \
- declare_type(constantPoolOopDesc, arrayOopDesc) \
- declare_type(constantPoolCacheKlass, arrayKlass) \
- declare_type(constantPoolCacheOopDesc, arrayOopDesc) \
+ declare_type(compiledICHolderKlass, Klass) \
+ declare_type(compiledICHolderOopDesc, oopDesc) \
+ declare_type(constantPoolKlass, Klass) \
+ declare_type(constantPoolOopDesc, oopDesc) \
+ declare_type(constantPoolCacheKlass, Klass) \
+ declare_type(constantPoolCacheOopDesc, oopDesc) \
declare_type(instanceKlass, Klass) \
declare_type(instanceKlassKlass, klassKlass) \
declare_type(instanceOopDesc, oopDesc) \
@@ -949,9 +950,11 @@ static inline uint64_t cast_uint64_t(size_t x)
declare_oop_type(klassOop) \
declare_oop_type(markOop) \
declare_oop_type(methodOop) \
- declare_oop_type(methodDataOop) \
+ declare_oop_type(methodDataOop) \
declare_oop_type(objArrayOop) \
declare_oop_type(oop) \
+ declare_oop_type(narrowOop) \
+ declare_oop_type(wideKlassOop) \
declare_oop_type(constMethodOop) \
declare_oop_type(symbolOop) \
declare_oop_type(typeArrayOop) \
@@ -1307,6 +1310,7 @@ static inline uint64_t cast_uint64_t(size_t x)
/* Object sizes */ \
/****************/ \
\
+ declare_constant(oopSize) \
declare_constant(LogBytesPerWord) \
declare_constant(BytesPerLong) \
\
@@ -1314,7 +1318,9 @@ static inline uint64_t cast_uint64_t(size_t x)
/* Object alignment */ \
/********************/ \
\
+ declare_constant(MinObjAlignment) \
declare_constant(MinObjAlignmentInBytes) \
+ declare_constant(LogMinObjAlignmentInBytes) \
\
/********************************************/ \
/* Generation and Space Hierarchy Constants */ \
@@ -1361,7 +1367,6 @@ static inline uint64_t cast_uint64_t(size_t x)
\
declare_constant(HeapWordSize) \
declare_constant(LogHeapWordSize) \
- declare_constant(HeapWordsPerOop) \
\
/* constants from PermGen::Name enum */ \
\
@@ -1610,7 +1615,7 @@ static inline uint64_t cast_uint64_t(size_t x)
declare_constant(OopMapValue::unused_value) \
declare_constant(OopMapValue::oop_value) \
declare_constant(OopMapValue::value_value) \
- declare_constant(OopMapValue::dead_value) \
+ declare_constant(OopMapValue::narrowoop_value) \
declare_constant(OopMapValue::callee_saved_value) \
declare_constant(OopMapValue::derived_oop_value) \
\
diff --git a/src/share/vm/services/heapDumper.cpp b/src/share/vm/services/heapDumper.cpp
index afb7d9cbb..1a30c05b5 100644
--- a/src/share/vm/services/heapDumper.cpp
+++ b/src/share/vm/services/heapDumper.cpp
@@ -670,8 +670,12 @@ void DumperSupport::dump_field_value(DumpWriter* writer, char type, address addr
switch (type) {
case JVM_SIGNATURE_CLASS :
case JVM_SIGNATURE_ARRAY : {
- oop* f = (oop*)addr;
- oop o = *f;
+ oop o;
+ if (UseCompressedOops) {
+ o = oopDesc::load_decode_heap_oop((narrowOop*)addr);
+ } else {
+ o = oopDesc::load_decode_heap_oop((oop*)addr);
+ }
// reflection and sun.misc.Unsafe classes may have a reference to a
// klassOop so filter it out.
@@ -1077,6 +1081,7 @@ class SymbolTableDumper : public OopClosure {
public:
SymbolTableDumper(DumpWriter* writer) { _writer = writer; }
void do_oop(oop* obj_p);
+ void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
};
void SymbolTableDumper::do_oop(oop* obj_p) {
@@ -1106,6 +1111,7 @@ class JNILocalsDumper : public OopClosure {
_thread_serial_num = thread_serial_num;
}
void do_oop(oop* obj_p);
+ void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
};
@@ -1133,6 +1139,7 @@ class JNIGlobalsDumper : public OopClosure {
_writer = writer;
}
void do_oop(oop* obj_p);
+ void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
};
void JNIGlobalsDumper::do_oop(oop* obj_p) {
@@ -1164,6 +1171,7 @@ class MonitorUsedDumper : public OopClosure {
writer()->write_u1(HPROF_GC_ROOT_MONITOR_USED);
writer()->write_objectID(*obj_p);
}
+ void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
};
@@ -1178,6 +1186,7 @@ class StickyClassDumper : public OopClosure {
_writer = writer;
}
void do_oop(oop* obj_p);
+ void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
};
void StickyClassDumper::do_oop(oop* obj_p) {
diff --git a/src/share/vm/utilities/copy.hpp b/src/share/vm/utilities/copy.hpp
index 1bbea38ad..f3f84d0f2 100644
--- a/src/share/vm/utilities/copy.hpp
+++ b/src/share/vm/utilities/copy.hpp
@@ -148,11 +148,19 @@ class Copy : AllStatic {
// oops, conjoint, atomic on each oop
static void conjoint_oops_atomic(oop* from, oop* to, size_t count) {
- assert_params_ok(from, to, LogBytesPerOop);
+ assert_params_ok(from, to, LogBytesPerHeapOop);
assert_non_zero(count);
pd_conjoint_oops_atomic(from, to, count);
}
+ // overloaded for UseCompressedOops
+ static void conjoint_oops_atomic(narrowOop* from, narrowOop* to, size_t count) {
+ assert(sizeof(narrowOop) == sizeof(jint), "this cast is wrong");
+ assert_params_ok(from, to, LogBytesPerInt);
+ assert_non_zero(count);
+ pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
+ }
+
// Copy a span of memory. If the span is an integral number of aligned
// longs, words, or ints, copy those units atomically.
// The largest atomic transfer unit is 8 bytes, or the largest power
@@ -188,7 +196,7 @@ class Copy : AllStatic {
// oops, conjoint array, atomic on each oop
static void arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
- assert_params_ok(from, to, LogBytesPerOop);
+ assert_params_ok(from, to, LogBytesPerHeapOop);
assert_non_zero(count);
pd_arrayof_conjoint_oops(from, to, count);
}
diff --git a/src/share/vm/utilities/debug.cpp b/src/share/vm/utilities/debug.cpp
index 8eb84cddc..053601077 100644
--- a/src/share/vm/utilities/debug.cpp
+++ b/src/share/vm/utilities/debug.cpp
@@ -669,6 +669,7 @@ public:
tty->print_cr("0x%08x", o);
}
}
+ void do_oop(narrowOop* o) { ShouldNotReachHere(); }
};
diff --git a/src/share/vm/utilities/globalDefinitions.cpp b/src/share/vm/utilities/globalDefinitions.cpp
index 89373ef28..acb8875ac 100644
--- a/src/share/vm/utilities/globalDefinitions.cpp
+++ b/src/share/vm/utilities/globalDefinitions.cpp
@@ -24,18 +24,23 @@
# include "incls/_precompiled.incl"
# include "incls/_globalDefinitions.cpp.incl"
-
-
// Basic error support
+// Info for oops within a java object. Defaults are zero so
+// things will break badly if incorrectly initialized.
+int heapOopSize = 0;
+int LogBytesPerHeapOop = 0;
+int LogBitsPerHeapOop = 0;
+int BytesPerHeapOop = 0;
+int BitsPerHeapOop = 0;
+
void basic_fatal(const char* msg) {
fatal(msg);
}
-
// Something to help porters sleep at night
-void check_basic_types() {
+void basic_types_init() {
#ifdef ASSERT
#ifdef _LP64
assert(min_intx == (intx)CONST64(0x8000000000000000), "correct constant");
@@ -92,6 +97,7 @@ void check_basic_types() {
case T_LONG:
case T_OBJECT:
case T_ADDRESS: // random raw pointer
+ case T_NARROWOOP: // compressed pointer
case T_CONFLICT: // might as well support a bottom type
case T_VOID: // padding or other unaddressed word
// layout type must map to itself
@@ -134,11 +140,30 @@ void check_basic_types() {
os::java_to_os_priority[9] = JavaPriority9_To_OSPriority;
if(JavaPriority10_To_OSPriority != -1 )
os::java_to_os_priority[10] = JavaPriority10_To_OSPriority;
+
+ // Set the size of basic types here (after argument parsing but before
+ // stub generation).
+ if (UseCompressedOops) {
+ // Size info for oops within java objects is fixed
+ heapOopSize = jintSize;
+ LogBytesPerHeapOop = LogBytesPerInt;
+ LogBitsPerHeapOop = LogBitsPerInt;
+ BytesPerHeapOop = BytesPerInt;
+ BitsPerHeapOop = BitsPerInt;
+ } else {
+ heapOopSize = oopSize;
+ LogBytesPerHeapOop = LogBytesPerWord;
+ LogBitsPerHeapOop = LogBitsPerWord;
+ BytesPerHeapOop = BytesPerWord;
+ BitsPerHeapOop = BitsPerWord;
+ }
+ _type2aelembytes[T_OBJECT] = heapOopSize;
+ _type2aelembytes[T_ARRAY] = heapOopSize;
}
// Map BasicType to signature character
-char type2char_tab[T_CONFLICT+1]={ 0, 0, 0, 0, 'Z', 'C', 'F', 'D', 'B', 'S', 'I', 'J', 'L', '[', 'V', 0, 0};
+char type2char_tab[T_CONFLICT+1]={ 0, 0, 0, 0, 'Z', 'C', 'F', 'D', 'B', 'S', 'I', 'J', 'L', '[', 'V', 0, 0, 0};
// Map BasicType to Java type name
const char* type2name_tab[T_CONFLICT+1] = {
@@ -155,6 +180,7 @@ const char* type2name_tab[T_CONFLICT+1] = {
"array",
"void",
"*address*",
+ "*narrowoop*",
"*conflict*"
};
@@ -170,7 +196,7 @@ BasicType name2type(const char* name) {
// Map BasicType to size in words
-int type2size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1};
+int type2size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, 1, -1};
BasicType type2field[T_CONFLICT+1] = {
(BasicType)0, // 0,
@@ -189,7 +215,8 @@ BasicType type2field[T_CONFLICT+1] = {
T_OBJECT, // T_ARRAY = 13,
T_VOID, // T_VOID = 14,
T_ADDRESS, // T_ADDRESS = 15,
- T_CONFLICT // T_CONFLICT = 16,
+ T_NARROWOOP, // T_NARROWOOP= 16,
+ T_CONFLICT // T_CONFLICT = 17,
};
@@ -210,7 +237,8 @@ BasicType type2wfield[T_CONFLICT+1] = {
T_OBJECT, // T_ARRAY = 13,
T_VOID, // T_VOID = 14,
T_ADDRESS, // T_ADDRESS = 15,
- T_CONFLICT // T_CONFLICT = 16,
+ T_NARROWOOP, // T_NARROWOOP = 16,
+ T_CONFLICT // T_CONFLICT = 17,
};
@@ -231,7 +259,8 @@ int _type2aelembytes[T_CONFLICT+1] = {
T_ARRAY_aelem_bytes, // T_ARRAY = 13,
0, // T_VOID = 14,
T_OBJECT_aelem_bytes, // T_ADDRESS = 15,
- 0 // T_CONFLICT = 16,
+ T_NARROWOOP_aelem_bytes,// T_NARROWOOP= 16,
+ 0 // T_CONFLICT = 17,
};
#ifdef ASSERT
@@ -245,7 +274,7 @@ int type2aelembytes(BasicType t, bool allow_address) {
// The following code is mostly taken from JVM typedefs_md.h and system_md.c
-static const jlong high_bit = (jlong)1 << (jlong)63;
+static const jlong high_bit = (jlong)1 << (jlong)63;
static const jlong other_bits = ~high_bit;
jlong float2long(jfloat f) {
diff --git a/src/share/vm/utilities/globalDefinitions.hpp b/src/share/vm/utilities/globalDefinitions.hpp
index 0feceb5de..93e1927db 100644
--- a/src/share/vm/utilities/globalDefinitions.hpp
+++ b/src/share/vm/utilities/globalDefinitions.hpp
@@ -59,23 +59,26 @@ const int LongAlignmentMask = (1 << LogBytesPerLong) - 1;
const int WordsPerLong = 2; // Number of stack entries for longs
-const int oopSize = sizeof(char*);
+const int oopSize = sizeof(char*); // Full-width oop
+extern int heapOopSize; // Oop within a java object
const int wordSize = sizeof(char*);
const int longSize = sizeof(jlong);
const int jintSize = sizeof(jint);
const int size_tSize = sizeof(size_t);
-// Size of a char[] needed to represent a jint as a string in decimal.
-const int jintAsStringSize = 12;
+const int BytesPerOop = BytesPerWord; // Full-width oop
-const int LogBytesPerOop = LogBytesPerWord;
-const int LogBitsPerOop = LogBitsPerWord;
-const int BytesPerOop = 1 << LogBytesPerOop;
-const int BitsPerOop = 1 << LogBitsPerOop;
+extern int LogBytesPerHeapOop; // Oop within a java object
+extern int LogBitsPerHeapOop;
+extern int BytesPerHeapOop;
+extern int BitsPerHeapOop;
const int BitsPerJavaInteger = 32;
const int BitsPerSize_t = size_tSize * BitsPerByte;
+// Size of a char[] needed to represent a jint as a string in decimal.
+const int jintAsStringSize = 12;
+
// In fact this should be
// log2_intptr(sizeof(class JavaThread)) - log2_intptr(64);
// see os::set_memory_serialize_page()
@@ -99,14 +102,14 @@ private:
};
// HeapWordSize must be 2^LogHeapWordSize.
-const int HeapWordSize = sizeof(HeapWord);
+const int HeapWordSize = sizeof(HeapWord);
#ifdef _LP64
-const int LogHeapWordSize = 3;
+const int LogHeapWordSize = 3;
#else
-const int LogHeapWordSize = 2;
+const int LogHeapWordSize = 2;
#endif
-const int HeapWordsPerOop = oopSize / HeapWordSize;
-const int HeapWordsPerLong = BytesPerLong / HeapWordSize;
+const int HeapWordsPerLong = BytesPerLong / HeapWordSize;
+const int LogHeapWordsPerLong = LogBytesPerLong - LogHeapWordSize;
// The larger HeapWordSize for 64bit requires larger heaps
// for the same application running in 64bit. See bug 4967770.
@@ -284,6 +287,9 @@ const int MinObjAlignment = HeapWordsPerLong;
const int MinObjAlignmentInBytes = MinObjAlignment * HeapWordSize;
const int MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1;
+const int LogMinObjAlignment = LogHeapWordsPerLong;
+const int LogMinObjAlignmentInBytes = LogMinObjAlignment + LogHeapWordSize;
+
// Machine dependent stuff
#include "incls/_globalDefinitions_pd.hpp.incl"
@@ -371,7 +377,7 @@ union jlong_accessor {
jlong long_value;
};
-void check_basic_types(); // cannot define here; uses assert
+void basic_types_init(); // cannot define here; uses assert
// NOTE: replicated in SA in vm/agent/sun/jvm/hotspot/runtime/BasicType.java
@@ -388,7 +394,8 @@ enum BasicType {
T_ARRAY = 13,
T_VOID = 14,
T_ADDRESS = 15,
- T_CONFLICT = 16, // for stack value type with conflicting contents
+ T_NARROWOOP= 16,
+ T_CONFLICT = 17, // for stack value type with conflicting contents
T_ILLEGAL = 99
};
@@ -438,6 +445,7 @@ enum BasicTypeSize {
T_LONG_size = 2,
T_OBJECT_size = 1,
T_ARRAY_size = 1,
+ T_NARROWOOP_size = 1,
T_VOID_size = 0
};
@@ -465,6 +473,7 @@ enum ArrayElementSize {
T_OBJECT_aelem_bytes = 4,
T_ARRAY_aelem_bytes = 4,
#endif
+ T_NARROWOOP_aelem_bytes = 4,
T_VOID_aelem_bytes = 0
};
diff --git a/src/share/vm/utilities/taskqueue.hpp b/src/share/vm/utilities/taskqueue.hpp
index 7fa983f82..6e883d9e6 100644
--- a/src/share/vm/utilities/taskqueue.hpp
+++ b/src/share/vm/utilities/taskqueue.hpp
@@ -490,7 +490,31 @@ typedef oop Task;
typedef GenericTaskQueue<Task> OopTaskQueue;
typedef GenericTaskQueueSet<Task> OopTaskQueueSet;
-typedef oop* StarTask;
+
+#define COMPRESSED_OOP_MASK 1
+
+// This is a container class for either an oop* or a narrowOop*.
+// Both are pushed onto a task queue and the consumer will test is_narrow()
+// to determine which should be processed.
+class StarTask {
+ void* _holder; // either union oop* or narrowOop*
+ public:
+ StarTask(narrowOop *p) { _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK); }
+ StarTask(oop *p) { _holder = (void*)p; }
+ StarTask() { _holder = NULL; }
+ operator oop*() { return (oop*)_holder; }
+ operator narrowOop*() {
+ return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
+ }
+
+ // Operators to preserve const/volatile in assignments required by gcc
+ void operator=(const volatile StarTask& t) volatile { _holder = t._holder; }
+
+ bool is_narrow() const {
+ return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
+ }
+};
+
typedef GenericTaskQueue<StarTask> OopStarTaskQueue;
typedef GenericTaskQueueSet<StarTask> OopStarTaskQueueSet;
diff --git a/src/share/vm/utilities/vmError.cpp b/src/share/vm/utilities/vmError.cpp
index 81b954625..f8df90421 100644
--- a/src/share/vm/utilities/vmError.cpp
+++ b/src/share/vm/utilities/vmError.cpp
@@ -332,11 +332,12 @@ void VMError::report(outputStream* st) {
// VM version
st->print_cr("#");
- st->print_cr("# Java VM: %s (%s %s %s)",
+ st->print_cr("# Java VM: %s (%s %s %s %s)",
Abstract_VM_Version::vm_name(),
Abstract_VM_Version::vm_release(),
Abstract_VM_Version::vm_info_string(),
- Abstract_VM_Version::vm_platform_string()
+ Abstract_VM_Version::vm_platform_string(),
+ UseCompressedOops ? "compressed oops" : ""
);
STEP(60, "(printing problematic frame)")