aboutsummaryrefslogtreecommitdiff
path: root/src/share
diff options
context:
space:
mode:
authorEdward Nevill edward.nevill@linaro.org <Edward Nevill edward.nevill@linaro.org>2013-10-11 12:06:22 +0100
committerEdward Nevill edward.nevill@linaro.org <Edward Nevill edward.nevill@linaro.org>2013-10-11 12:06:22 +0100
commitc9c29ea453ff939e48745c1f879819b96e63fedd (patch)
tree798259b00805bf284db2e1f4d32110b72e357364 /src/share
parentafedba2e348cf9b3179325f96e1ff518f8a88c5b (diff)
parent3ade2048da2eab1098270ecd3669cc49addaa142 (diff)
Merge up to jdk8-b110
Diffstat (limited to 'src/share')
-rw-r--r--src/share/tools/LogCompilation/README6
-rw-r--r--src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java6
-rw-r--r--src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java26
-rw-r--r--src/share/tools/ProjectCreator/BuildConfig.java111
-rw-r--r--src/share/tools/ProjectCreator/FileTreeCreator.java8
-rw-r--r--src/share/tools/ProjectCreator/FileTreeCreatorVC10.java64
-rw-r--r--src/share/tools/ProjectCreator/FileTreeCreatorVC7.java2
-rw-r--r--src/share/tools/ProjectCreator/ProjectCreator.java7
-rw-r--r--src/share/tools/ProjectCreator/WinGammaPlatform.java25
-rw-r--r--src/share/tools/ProjectCreator/WinGammaPlatformVC10.java37
-rw-r--r--src/share/tools/ProjectCreator/WinGammaPlatformVC7.java29
-rw-r--r--src/share/tools/hsdis/hsdis.c1
-rw-r--r--src/share/tools/launcher/java.c2080
-rw-r--r--src/share/tools/launcher/java.h110
-rw-r--r--src/share/tools/launcher/jli_util.c89
-rw-r--r--src/share/tools/launcher/wildcard.c496
-rw-r--r--src/share/vm/adlc/archDesc.cpp4
-rw-r--r--src/share/vm/adlc/arena.cpp6
-rw-r--r--src/share/vm/adlc/arena.hpp10
-rw-r--r--src/share/vm/adlc/dict2.cpp18
-rw-r--r--src/share/vm/adlc/forms.hpp4
-rw-r--r--src/share/vm/adlc/formssel.cpp96
-rw-r--r--src/share/vm/adlc/formssel.hpp17
-rw-r--r--src/share/vm/adlc/main.cpp4
-rw-r--r--src/share/vm/adlc/output_c.cpp37
-rw-r--r--src/share/vm/asm/codeBuffer.hpp6
-rw-r--r--src/share/vm/c1/c1_CodeStubs.hpp5
-rw-r--r--src/share/vm/c1/c1_Compilation.cpp9
-rw-r--r--src/share/vm/c1/c1_Compilation.hpp6
-rw-r--r--src/share/vm/c1/c1_Compiler.cpp32
-rw-r--r--src/share/vm/c1/c1_Compiler.hpp2
-rw-r--r--src/share/vm/c1/c1_GraphBuilder.cpp44
-rw-r--r--src/share/vm/c1/c1_IR.cpp6
-rw-r--r--src/share/vm/c1/c1_Instruction.hpp6
-rw-r--r--src/share/vm/c1/c1_LIR.cpp49
-rw-r--r--src/share/vm/c1/c1_LIR.hpp30
-rw-r--r--src/share/vm/c1/c1_LIRAssembler.cpp11
-rw-r--r--src/share/vm/c1/c1_LIRAssembler.hpp5
-rw-r--r--src/share/vm/c1/c1_LIRGenerator.cpp9
-rw-r--r--src/share/vm/c1/c1_LIRGenerator.hpp3
-rw-r--r--src/share/vm/c1/c1_Runtime1.cpp168
-rw-r--r--src/share/vm/c1/c1_Runtime1.hpp2
-rw-r--r--src/share/vm/c1/c1_globals.cpp2
-rw-r--r--src/share/vm/c1/c1_globals.hpp10
-rw-r--r--src/share/vm/ci/bcEscapeAnalyzer.cpp15
-rw-r--r--src/share/vm/ci/bcEscapeAnalyzer.hpp1
-rw-r--r--src/share/vm/ci/ciArray.cpp79
-rw-r--r--src/share/vm/ci/ciArray.hpp19
-rw-r--r--src/share/vm/ci/ciConstant.hpp15
-rw-r--r--src/share/vm/ci/ciEnv.cpp4
-rw-r--r--src/share/vm/ci/ciEnv.hpp1
-rw-r--r--src/share/vm/ci/ciField.cpp38
-rw-r--r--src/share/vm/ci/ciField.hpp11
-rw-r--r--src/share/vm/ci/ciFlags.hpp1
-rw-r--r--src/share/vm/ci/ciInstance.cpp10
-rw-r--r--src/share/vm/ci/ciInstanceKlass.cpp36
-rw-r--r--src/share/vm/ci/ciInstanceKlass.hpp6
-rw-r--r--src/share/vm/ci/ciMethod.cpp47
-rw-r--r--src/share/vm/ci/ciMethod.hpp6
-rw-r--r--src/share/vm/ci/ciObjectFactory.cpp7
-rw-r--r--src/share/vm/ci/ciObjectFactory.hpp2
-rw-r--r--src/share/vm/ci/ciReplay.cpp10
-rw-r--r--src/share/vm/ci/ciSymbol.hpp3
-rw-r--r--src/share/vm/ci/ciTypeArray.cpp7
-rw-r--r--src/share/vm/ci/ciUtilities.hpp4
-rw-r--r--src/share/vm/classfile/altHashing.cpp6
-rw-r--r--src/share/vm/classfile/classFileParser.cpp335
-rw-r--r--src/share/vm/classfile/classFileParser.hpp34
-rw-r--r--src/share/vm/classfile/classLoader.cpp96
-rw-r--r--src/share/vm/classfile/classLoader.hpp20
-rw-r--r--src/share/vm/classfile/classLoaderData.cpp61
-rw-r--r--src/share/vm/classfile/classLoaderData.hpp16
-rw-r--r--src/share/vm/classfile/defaultMethods.cpp528
-rw-r--r--src/share/vm/classfile/dictionary.cpp18
-rw-r--r--src/share/vm/classfile/dictionary.hpp1
-rw-r--r--src/share/vm/classfile/genericSignatures.cpp1279
-rw-r--r--src/share/vm/classfile/genericSignatures.hpp467
-rw-r--r--src/share/vm/classfile/javaClasses.cpp137
-rw-r--r--src/share/vm/classfile/javaClasses.hpp55
-rw-r--r--src/share/vm/classfile/symbolTable.cpp262
-rw-r--r--src/share/vm/classfile/symbolTable.hpp42
-rw-r--r--src/share/vm/classfile/systemDictionary.cpp61
-rw-r--r--src/share/vm/classfile/systemDictionary.hpp11
-rw-r--r--src/share/vm/classfile/verifier.cpp23
-rw-r--r--src/share/vm/classfile/verifier.hpp20
-rw-r--r--src/share/vm/classfile/vmSymbols.cpp4
-rw-r--r--src/share/vm/classfile/vmSymbols.hpp26
-rw-r--r--src/share/vm/code/codeBlob.cpp8
-rw-r--r--src/share/vm/code/codeBlob.hpp8
-rw-r--r--src/share/vm/code/codeCache.cpp30
-rw-r--r--src/share/vm/code/codeCache.hpp10
-rw-r--r--src/share/vm/code/compiledIC.cpp37
-rw-r--r--src/share/vm/code/compiledIC.hpp5
-rw-r--r--src/share/vm/code/debugInfo.hpp2
-rw-r--r--src/share/vm/code/debugInfoRec.cpp4
-rw-r--r--src/share/vm/code/dependencies.cpp6
-rw-r--r--src/share/vm/code/nmethod.cpp87
-rw-r--r--src/share/vm/code/nmethod.hpp6
-rw-r--r--src/share/vm/code/relocInfo.cpp145
-rw-r--r--src/share/vm/code/relocInfo.hpp130
-rw-r--r--src/share/vm/code/vtableStubs.cpp17
-rw-r--r--src/share/vm/code/vtableStubs.hpp8
-rw-r--r--src/share/vm/compiler/compileBroker.cpp78
-rw-r--r--src/share/vm/compiler/compileBroker.hpp19
-rw-r--r--src/share/vm/compiler/compileLog.cpp15
-rw-r--r--src/share/vm/compiler/compileLog.hpp2
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp8
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp1
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp10
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp6
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp4
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp16
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp11
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp1
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp545
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp52
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp4
-rw-r--r--src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp20
-rw-r--r--src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp12
-rw-r--r--src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp9
-rw-r--r--src/share/vm/gc_implementation/g1/concurrentMark.cpp91
-rw-r--r--src/share/vm/gc_implementation/g1/concurrentMark.hpp13
-rw-r--r--src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp8
-rw-r--r--src/share/vm/gc_implementation/g1/evacuationInfo.hpp81
-rw-r--r--src/share/vm/gc_implementation/g1/g1BiasedArray.cpp141
-rw-r--r--src/share/vm/gc_implementation/g1/g1BiasedArray.hpp181
-rw-r--r--src/share/vm/gc_implementation/g1/g1CardCounts.cpp28
-rw-r--r--src/share/vm/gc_implementation/g1/g1CardCounts.hpp22
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp874
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp361
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp133
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp24
-rw-r--r--src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp20
-rw-r--r--src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp25
-rw-r--r--src/share/vm/gc_implementation/g1/g1MarkSweep.cpp28
-rw-r--r--src/share/vm/gc_implementation/g1/g1MarkSweep.hpp5
-rw-r--r--src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp1
-rw-r--r--src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp3
-rw-r--r--src/share/vm/gc_implementation/g1/g1RemSet.cpp155
-rw-r--r--src/share/vm/gc_implementation/g1/g1RemSet.hpp47
-rw-r--r--src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp239
-rw-r--r--src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp118
-rw-r--r--src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp2
-rw-r--r--src/share/vm/gc_implementation/g1/g1YCTypes.hpp51
-rw-r--r--src/share/vm/gc_implementation/g1/g1_globals.hpp23
-rw-r--r--src/share/vm/gc_implementation/g1/heapRegion.cpp472
-rw-r--r--src/share/vm/gc_implementation/g1/heapRegion.hpp29
-rw-r--r--src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp155
-rw-r--r--src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp41
-rw-r--r--src/share/vm/gc_implementation/g1/heapRegionSeq.cpp49
-rw-r--r--src/share/vm/gc_implementation/g1/heapRegionSeq.hpp59
-rw-r--r--src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp22
-rw-r--r--src/share/vm/gc_implementation/g1/ptrQueue.hpp1
-rw-r--r--src/share/vm/gc_implementation/g1/vmStructs_g1.hpp15
-rw-r--r--src/share/vm/gc_implementation/g1/vm_operations_g1.cpp9
-rw-r--r--src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp5
-rw-r--r--src/share/vm/gc_implementation/parNew/parNewGeneration.cpp126
-rw-r--r--src/share/vm/gc_implementation/parNew/parNewGeneration.hpp24
-rw-r--r--src/share/vm/gc_implementation/parNew/parOopClosures.hpp3
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp11
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp2
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp10
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp5
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp41
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp76
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp34
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp18
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp37
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp3
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp462
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp51
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp9
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp4
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp80
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp3
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp2
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp347
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp171
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp43
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp22
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp8
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp122
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp49
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp8
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp13
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp7
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp12
-rw-r--r--src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp4
-rw-r--r--src/share/vm/gc_implementation/shared/allocationStats.hpp6
-rw-r--r--src/share/vm/gc_implementation/shared/copyFailedInfo.hpp90
-rw-r--r--src/share/vm/gc_implementation/shared/gcHeapSummary.hpp142
-rw-r--r--src/share/vm/gc_implementation/shared/gcTimer.cpp374
-rw-r--r--src/share/vm/gc_implementation/shared/gcTimer.hpp195
-rw-r--r--src/share/vm/gc_implementation/shared/gcTrace.cpp222
-rw-r--r--src/share/vm/gc_implementation/shared/gcTrace.hpp233
-rw-r--r--src/share/vm/gc_implementation/shared/gcTraceSend.cpp304
-rw-r--r--src/share/vm/gc_implementation/shared/gcTraceTime.cpp79
-rw-r--r--src/share/vm/gc_implementation/shared/gcTraceTime.hpp (renamed from src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.inline.hpp)28
-rw-r--r--src/share/vm/gc_implementation/shared/gcUtil.hpp6
-rw-r--r--src/share/vm/gc_implementation/shared/gcWhen.hpp48
-rw-r--r--src/share/vm/gc_implementation/shared/hSpaceCounters.hpp4
-rw-r--r--src/share/vm/gc_implementation/shared/markSweep.cpp16
-rw-r--r--src/share/vm/gc_implementation/shared/markSweep.hpp15
-rw-r--r--src/share/vm/gc_implementation/shared/objectCountEventSender.cpp57
-rw-r--r--src/share/vm/gc_implementation/shared/objectCountEventSender.hpp44
-rw-r--r--src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp2
-rw-r--r--src/share/vm/gc_implementation/shared/vmGCOperations.cpp41
-rw-r--r--src/share/vm/gc_implementation/shared/vmGCOperations.hpp7
-rw-r--r--src/share/vm/gc_interface/allocTracer.cpp48
-rw-r--r--src/share/vm/gc_interface/allocTracer.hpp (renamed from src/share/tools/launcher/jli_util.h)20
-rw-r--r--src/share/vm/gc_interface/collectedHeap.cpp101
-rw-r--r--src/share/vm/gc_interface/collectedHeap.hpp67
-rw-r--r--src/share/vm/gc_interface/collectedHeap.inline.hpp22
-rw-r--r--src/share/vm/gc_interface/gcCause.cpp3
-rw-r--r--src/share/vm/gc_interface/gcCause.hpp1
-rw-r--r--src/share/vm/gc_interface/gcName.hpp61
-rw-r--r--src/share/vm/interpreter/abstractInterpreter.hpp5
-rw-r--r--src/share/vm/interpreter/bytecodeInterpreter.cpp35
-rw-r--r--src/share/vm/interpreter/interpreter.cpp16
-rw-r--r--src/share/vm/interpreter/interpreterRuntime.cpp75
-rw-r--r--src/share/vm/interpreter/interpreterRuntime.hpp3
-rw-r--r--src/share/vm/interpreter/linkResolver.cpp213
-rw-r--r--src/share/vm/interpreter/linkResolver.hpp111
-rw-r--r--src/share/vm/interpreter/templateInterpreter.cpp8
-rw-r--r--src/share/vm/libadt/port.hpp6
-rw-r--r--src/share/vm/memory/allocation.cpp138
-rw-r--r--src/share/vm/memory/allocation.hpp218
-rw-r--r--src/share/vm/memory/allocation.inline.hpp38
-rw-r--r--src/share/vm/memory/binaryTreeDictionary.cpp4
-rw-r--r--src/share/vm/memory/cardTableModRefBS.cpp57
-rw-r--r--src/share/vm/memory/cardTableModRefBS.hpp3
-rw-r--r--src/share/vm/memory/cardTableRS.cpp58
-rw-r--r--src/share/vm/memory/cardTableRS.hpp7
-rw-r--r--src/share/vm/memory/collectorPolicy.cpp97
-rw-r--r--src/share/vm/memory/collectorPolicy.hpp6
-rw-r--r--src/share/vm/memory/defNewGeneration.cpp68
-rw-r--r--src/share/vm/memory/defNewGeneration.hpp14
-rw-r--r--src/share/vm/memory/filemap.cpp34
-rw-r--r--src/share/vm/memory/filemap.hpp12
-rw-r--r--src/share/vm/memory/freeList.cpp11
-rw-r--r--src/share/vm/memory/freeList.hpp5
-rw-r--r--src/share/vm/memory/gcLocker.cpp2
-rw-r--r--src/share/vm/memory/gcLocker.hpp2
-rw-r--r--src/share/vm/memory/genCollectedHeap.cpp83
-rw-r--r--src/share/vm/memory/genCollectedHeap.hpp32
-rw-r--r--src/share/vm/memory/genMarkSweep.cpp47
-rw-r--r--src/share/vm/memory/genRemSet.hpp9
-rw-r--r--src/share/vm/memory/generation.cpp30
-rw-r--r--src/share/vm/memory/generation.hpp8
-rw-r--r--src/share/vm/memory/heap.cpp7
-rw-r--r--src/share/vm/memory/heapInspection.cpp79
-rw-r--r--src/share/vm/memory/heapInspection.hpp46
-rw-r--r--src/share/vm/memory/iterator.cpp2
-rw-r--r--src/share/vm/memory/iterator.hpp2
-rw-r--r--src/share/vm/memory/memRegion.cpp20
-rw-r--r--src/share/vm/memory/memRegion.hpp16
-rw-r--r--src/share/vm/memory/metablock.cpp7
-rw-r--r--src/share/vm/memory/metablock.hpp2
-rw-r--r--src/share/vm/memory/metaspace.cpp1151
-rw-r--r--src/share/vm/memory/metaspace.hpp187
-rw-r--r--src/share/vm/memory/metaspaceCounters.cpp150
-rw-r--r--src/share/vm/memory/metaspaceCounters.hpp39
-rw-r--r--src/share/vm/memory/metaspaceShared.cpp188
-rw-r--r--src/share/vm/memory/oopFactory.hpp3
-rw-r--r--src/share/vm/memory/padded.hpp93
-rw-r--r--src/share/vm/memory/padded.inline.hpp49
-rw-r--r--src/share/vm/memory/referenceProcessor.cpp87
-rw-r--r--src/share/vm/memory/referenceProcessor.hpp38
-rw-r--r--src/share/vm/memory/referenceProcessorStats.hpp73
-rw-r--r--src/share/vm/memory/referenceType.hpp41
-rw-r--r--src/share/vm/memory/resourceArea.hpp27
-rw-r--r--src/share/vm/memory/sharedHeap.cpp34
-rw-r--r--src/share/vm/memory/sharedHeap.hpp5
-rw-r--r--src/share/vm/memory/space.hpp5
-rw-r--r--src/share/vm/memory/universe.cpp317
-rw-r--r--src/share/vm/memory/universe.hpp138
-rw-r--r--src/share/vm/oops/annotations.cpp2
-rw-r--r--src/share/vm/oops/arrayKlass.cpp13
-rw-r--r--src/share/vm/oops/arrayKlass.hpp8
-rw-r--r--src/share/vm/oops/arrayOop.hpp2
-rw-r--r--src/share/vm/oops/compiledICHolder.cpp2
-rw-r--r--src/share/vm/oops/constMethod.cpp3
-rw-r--r--src/share/vm/oops/constantPool.cpp79
-rw-r--r--src/share/vm/oops/constantPool.hpp41
-rw-r--r--src/share/vm/oops/cpCache.cpp31
-rw-r--r--src/share/vm/oops/cpCache.hpp33
-rw-r--r--src/share/vm/oops/fieldInfo.hpp8
-rw-r--r--src/share/vm/oops/fieldStreams.hpp13
-rw-r--r--src/share/vm/oops/generateOopMap.cpp20
-rw-r--r--src/share/vm/oops/instanceKlass.cpp428
-rw-r--r--src/share/vm/oops/instanceKlass.hpp182
-rw-r--r--src/share/vm/oops/instanceOop.hpp4
-rw-r--r--src/share/vm/oops/klass.cpp48
-rw-r--r--src/share/vm/oops/klass.hpp36
-rw-r--r--src/share/vm/oops/klass.inline.hpp40
-rw-r--r--src/share/vm/oops/klassVtable.cpp239
-rw-r--r--src/share/vm/oops/klassVtable.hpp8
-rw-r--r--src/share/vm/oops/method.cpp76
-rw-r--r--src/share/vm/oops/method.hpp36
-rw-r--r--src/share/vm/oops/methodCounters.cpp2
-rw-r--r--src/share/vm/oops/methodData.cpp3
-rw-r--r--src/share/vm/oops/methodData.hpp6
-rw-r--r--src/share/vm/oops/objArrayKlass.cpp6
-rw-r--r--src/share/vm/oops/objArrayKlass.hpp4
-rw-r--r--src/share/vm/oops/oop.hpp13
-rw-r--r--src/share/vm/oops/oop.inline.hpp73
-rw-r--r--src/share/vm/oops/oopsHierarchy.hpp6
-rw-r--r--src/share/vm/oops/symbol.cpp12
-rw-r--r--src/share/vm/oops/symbol.hpp27
-rw-r--r--src/share/vm/oops/typeArrayKlass.hpp2
-rw-r--r--src/share/vm/opto/block.cpp474
-rw-r--r--src/share/vm/opto/block.hpp327
-rw-r--r--src/share/vm/opto/buildOopMap.cpp80
-rw-r--r--src/share/vm/opto/bytecodeInfo.cpp87
-rw-r--r--src/share/vm/opto/c2_globals.hpp21
-rw-r--r--src/share/vm/opto/c2compiler.cpp9
-rw-r--r--src/share/vm/opto/callGenerator.cpp62
-rw-r--r--src/share/vm/opto/callGenerator.hpp8
-rw-r--r--src/share/vm/opto/callnode.cpp98
-rw-r--r--src/share/vm/opto/callnode.hpp84
-rw-r--r--src/share/vm/opto/cfgnode.cpp6
-rw-r--r--src/share/vm/opto/chaitin.cpp273
-rw-r--r--src/share/vm/opto/chaitin.hpp61
-rw-r--r--src/share/vm/opto/coalesce.cpp124
-rw-r--r--src/share/vm/opto/coalesce.hpp1
-rw-r--r--src/share/vm/opto/compile.cpp228
-rw-r--r--src/share/vm/opto/compile.hpp114
-rw-r--r--src/share/vm/opto/connode.cpp2
-rw-r--r--src/share/vm/opto/doCall.cpp18
-rw-r--r--src/share/vm/opto/domgraph.cpp73
-rw-r--r--src/share/vm/opto/escape.cpp111
-rw-r--r--src/share/vm/opto/gcm.cpp437
-rw-r--r--src/share/vm/opto/generateOptoStub.cpp1
-rw-r--r--src/share/vm/opto/graphKit.cpp76
-rw-r--r--src/share/vm/opto/graphKit.hpp7
-rw-r--r--src/share/vm/opto/idealGraphPrinter.cpp30
-rw-r--r--src/share/vm/opto/idealGraphPrinter.hpp11
-rw-r--r--src/share/vm/opto/ifg.cpp176
-rw-r--r--src/share/vm/opto/ifnode.cpp2
-rw-r--r--src/share/vm/opto/lcm.cpp310
-rw-r--r--src/share/vm/opto/library_call.cpp469
-rw-r--r--src/share/vm/opto/live.cpp119
-rw-r--r--src/share/vm/opto/live.hpp22
-rw-r--r--src/share/vm/opto/loopPredicate.cpp4
-rw-r--r--src/share/vm/opto/loopTransform.cpp4
-rw-r--r--src/share/vm/opto/loopnode.cpp10
-rw-r--r--src/share/vm/opto/loopnode.hpp2
-rw-r--r--src/share/vm/opto/loopopts.cpp16
-rw-r--r--src/share/vm/opto/machnode.cpp4
-rw-r--r--src/share/vm/opto/machnode.hpp4
-rw-r--r--src/share/vm/opto/macro.cpp113
-rw-r--r--src/share/vm/opto/macro.hpp3
-rw-r--r--src/share/vm/opto/matcher.cpp111
-rw-r--r--src/share/vm/opto/matcher.hpp26
-rw-r--r--src/share/vm/opto/memnode.cpp719
-rw-r--r--src/share/vm/opto/memnode.hpp6
-rw-r--r--src/share/vm/opto/multnode.cpp33
-rw-r--r--src/share/vm/opto/multnode.hpp2
-rw-r--r--src/share/vm/opto/node.cpp65
-rw-r--r--src/share/vm/opto/node.hpp9
-rw-r--r--src/share/vm/opto/output.cpp365
-rw-r--r--src/share/vm/opto/output.hpp3
-rw-r--r--src/share/vm/opto/parse.hpp8
-rw-r--r--src/share/vm/opto/parse1.cpp6
-rw-r--r--src/share/vm/opto/parse2.cpp12
-rw-r--r--src/share/vm/opto/parse3.cpp113
-rw-r--r--src/share/vm/opto/parseHelper.cpp5
-rw-r--r--src/share/vm/opto/phase.cpp2
-rw-r--r--src/share/vm/opto/phase.hpp1
-rw-r--r--src/share/vm/opto/phaseX.cpp18
-rw-r--r--src/share/vm/opto/phasetype.hpp98
-rw-r--r--src/share/vm/opto/postaloc.cpp115
-rw-r--r--src/share/vm/opto/reg_split.cpp102
-rw-r--r--src/share/vm/opto/runtime.cpp30
-rw-r--r--src/share/vm/opto/runtime.hpp4
-rw-r--r--src/share/vm/opto/subnode.cpp7
-rw-r--r--src/share/vm/opto/type.cpp206
-rw-r--r--src/share/vm/opto/type.hpp51
-rw-r--r--src/share/vm/precompiled/precompiled.hpp1
-rw-r--r--src/share/vm/prims/forte.cpp49
-rw-r--r--src/share/vm/prims/jni.cpp127
-rw-r--r--src/share/vm/prims/jniCheck.cpp66
-rw-r--r--src/share/vm/prims/jvm.cpp272
-rw-r--r--src/share/vm/prims/jvm.h3
-rw-r--r--src/share/vm/prims/jvmti.xml121
-rw-r--r--src/share/vm/prims/jvmtiEnvBase.hpp6
-rw-r--r--src/share/vm/prims/jvmtiExport.cpp101
-rw-r--r--src/share/vm/prims/jvmtiExport.hpp3
-rw-r--r--src/share/vm/prims/jvmtiGen.java4
-rw-r--r--src/share/vm/prims/jvmtiImpl.cpp110
-rw-r--r--src/share/vm/prims/jvmtiImpl.hpp77
-rw-r--r--src/share/vm/prims/jvmtiRedefineClasses.cpp279
-rw-r--r--src/share/vm/prims/jvmtiRedefineClasses.hpp46
-rw-r--r--src/share/vm/prims/jvmtiTagMap.cpp4
-rw-r--r--src/share/vm/prims/methodHandles.cpp326
-rw-r--r--src/share/vm/prims/methodHandles.hpp24
-rw-r--r--src/share/vm/prims/nativeLookup.cpp5
-rw-r--r--src/share/vm/prims/unsafe.cpp13
-rw-r--r--src/share/vm/prims/whitebox.cpp115
-rw-r--r--src/share/vm/runtime/advancedThresholdPolicy.cpp13
-rw-r--r--src/share/vm/runtime/advancedThresholdPolicy.hpp3
-rw-r--r--src/share/vm/runtime/aprofiler.cpp143
-rw-r--r--src/share/vm/runtime/aprofiler.hpp71
-rw-r--r--src/share/vm/runtime/arguments.cpp635
-rw-r--r--src/share/vm/runtime/arguments.hpp36
-rw-r--r--src/share/vm/runtime/atomic.cpp29
-rw-r--r--src/share/vm/runtime/atomic.hpp22
-rw-r--r--src/share/vm/runtime/compilationPolicy.cpp21
-rw-r--r--src/share/vm/runtime/compilationPolicy.hpp2
-rw-r--r--src/share/vm/runtime/deoptimization.cpp19
-rw-r--r--src/share/vm/runtime/fieldDescriptor.cpp18
-rw-r--r--src/share/vm/runtime/fieldDescriptor.hpp12
-rw-r--r--src/share/vm/runtime/fprofiler.cpp6
-rw-r--r--src/share/vm/runtime/frame.cpp43
-rw-r--r--src/share/vm/runtime/frame.hpp10
-rw-r--r--src/share/vm/runtime/frame.inline.hpp6
-rw-r--r--src/share/vm/runtime/globals.cpp59
-rw-r--r--src/share/vm/runtime/globals.hpp122
-rw-r--r--src/share/vm/runtime/globals_extension.hpp5
-rw-r--r--src/share/vm/runtime/handles.cpp18
-rw-r--r--src/share/vm/runtime/handles.hpp15
-rw-r--r--src/share/vm/runtime/handles.inline.hpp2
-rw-r--r--src/share/vm/runtime/init.cpp3
-rw-r--r--src/share/vm/runtime/interfaceSupport.hpp14
-rw-r--r--src/share/vm/runtime/java.cpp36
-rw-r--r--src/share/vm/runtime/javaCalls.hpp2
-rw-r--r--src/share/vm/runtime/jniHandles.cpp1
-rw-r--r--src/share/vm/runtime/mutex.cpp4
-rw-r--r--src/share/vm/runtime/mutexLocker.cpp29
-rw-r--r--src/share/vm/runtime/mutexLocker.hpp11
-rw-r--r--src/share/vm/runtime/objectMonitor.cpp58
-rw-r--r--src/share/vm/runtime/objectMonitor.hpp27
-rw-r--r--src/share/vm/runtime/os.cpp185
-rw-r--r--src/share/vm/runtime/os.hpp190
-rw-r--r--src/share/vm/runtime/park.cpp4
-rw-r--r--src/share/vm/runtime/park.hpp4
-rw-r--r--src/share/vm/runtime/perfData.cpp4
-rw-r--r--src/share/vm/runtime/perfData.hpp3
-rw-r--r--src/share/vm/runtime/reflection.cpp23
-rw-r--r--src/share/vm/runtime/reflectionUtils.hpp14
-rw-r--r--src/share/vm/runtime/sharedRuntime.cpp60
-rw-r--r--src/share/vm/runtime/sharedRuntime.hpp2
-rw-r--r--src/share/vm/runtime/stubRoutines.cpp12
-rw-r--r--src/share/vm/runtime/stubRoutines.hpp57
-rw-r--r--src/share/vm/runtime/sweeper.cpp83
-rw-r--r--src/share/vm/runtime/sweeper.hpp31
-rw-r--r--src/share/vm/runtime/synchronizer.cpp4
-rw-r--r--src/share/vm/runtime/task.cpp10
-rw-r--r--src/share/vm/runtime/thread.cpp139
-rw-r--r--src/share/vm/runtime/thread.hpp35
-rw-r--r--src/share/vm/runtime/timer.cpp46
-rw-r--r--src/share/vm/runtime/timer.hpp18
-rw-r--r--src/share/vm/runtime/unhandledOops.hpp4
-rw-r--r--src/share/vm/runtime/virtualspace.cpp376
-rw-r--r--src/share/vm/runtime/virtualspace.hpp17
-rw-r--r--src/share/vm/runtime/vmStructs.cpp168
-rw-r--r--src/share/vm/runtime/vmThread.cpp19
-rw-r--r--src/share/vm/runtime/vm_operations.cpp21
-rw-r--r--src/share/vm/runtime/vm_operations.hpp4
-rw-r--r--src/share/vm/runtime/vm_version.cpp2
-rw-r--r--src/share/vm/runtime/vm_version.hpp8
-rw-r--r--src/share/vm/services/attachListener.cpp26
-rw-r--r--src/share/vm/services/attachListener.hpp1
-rw-r--r--src/share/vm/services/diagnosticArgument.cpp33
-rw-r--r--src/share/vm/services/diagnosticCommand.cpp6
-rw-r--r--src/share/vm/services/gcNotifier.cpp10
-rw-r--r--src/share/vm/services/management.cpp25
-rw-r--r--src/share/vm/services/memBaseline.cpp5
-rw-r--r--src/share/vm/services/memPtr.cpp6
-rw-r--r--src/share/vm/services/memPtr.hpp11
-rw-r--r--src/share/vm/services/memRecorder.cpp14
-rw-r--r--src/share/vm/services/memRecorder.hpp8
-rw-r--r--src/share/vm/services/memReporter.cpp57
-rw-r--r--src/share/vm/services/memTrackWorker.cpp6
-rw-r--r--src/share/vm/services/memTrackWorker.hpp6
-rw-r--r--src/share/vm/services/memTracker.cpp390
-rw-r--r--src/share/vm/services/memTracker.hpp219
-rw-r--r--src/share/vm/services/memoryManager.cpp4
-rw-r--r--src/share/vm/services/memoryManager.hpp10
-rw-r--r--src/share/vm/services/memoryPool.cpp31
-rw-r--r--src/share/vm/services/memoryPool.hpp15
-rw-r--r--src/share/vm/services/memoryService.cpp25
-rw-r--r--src/share/vm/services/memoryService.hpp4
-rw-r--r--src/share/vm/services/memoryUsage.hpp4
-rw-r--r--src/share/vm/services/threadService.cpp43
-rw-r--r--src/share/vm/shark/sharkBuilder.cpp2
-rw-r--r--src/share/vm/trace/noTraceBackend.hpp (renamed from src/share/tools/launcher/wildcard.h)30
-rw-r--r--src/share/vm/trace/trace.dtd86
-rw-r--r--src/share/vm/trace/trace.xml367
-rw-r--r--src/share/vm/trace/traceBackend.hpp70
-rw-r--r--src/share/vm/trace/traceDataTypes.hpp69
-rw-r--r--src/share/vm/trace/traceEvent.hpp150
-rw-r--r--src/share/vm/trace/traceEventClasses.xsl246
-rw-r--r--src/share/vm/trace/traceEventIds.xsl74
-rw-r--r--src/share/vm/trace/traceMacros.hpp17
-rw-r--r--src/share/vm/trace/traceStream.hpp121
-rw-r--r--src/share/vm/trace/traceTime.hpp (renamed from src/share/vm/trace/traceEventTypes.hpp)11
-rw-r--r--src/share/vm/trace/traceTypes.xsl72
-rw-r--r--src/share/vm/trace/tracetypes.xml356
-rw-r--r--src/share/vm/trace/tracing.hpp5
-rw-r--r--src/share/vm/trace/xinclude.mod37
-rw-r--r--src/share/vm/trace/xsl_util.xsl78
-rw-r--r--src/share/vm/utilities/accessFlags.hpp3
-rw-r--r--src/share/vm/utilities/array.hpp6
-rw-r--r--src/share/vm/utilities/bitMap.cpp10
-rw-r--r--src/share/vm/utilities/bitMap.hpp3
-rw-r--r--src/share/vm/utilities/bitMap.inline.hpp20
-rw-r--r--src/share/vm/utilities/debug.cpp167
-rw-r--r--src/share/vm/utilities/debug.hpp20
-rw-r--r--src/share/vm/utilities/decoder.cpp18
-rw-r--r--src/share/vm/utilities/decoder.hpp14
-rw-r--r--src/share/vm/utilities/events.hpp4
-rw-r--r--src/share/vm/utilities/exceptions.cpp12
-rw-r--r--src/share/vm/utilities/exceptions.hpp12
-rw-r--r--src/share/vm/utilities/globalDefinitions.hpp70
-rw-r--r--src/share/vm/utilities/growableArray.hpp1
-rw-r--r--src/share/vm/utilities/hashtable.cpp52
-rw-r--r--src/share/vm/utilities/hashtable.hpp13
-rw-r--r--src/share/vm/utilities/macros.hpp4
-rw-r--r--src/share/vm/utilities/ostream.cpp393
-rw-r--r--src/share/vm/utilities/ostream.hpp21
-rw-r--r--src/share/vm/utilities/quickSort.cpp14
-rw-r--r--src/share/vm/utilities/taskqueue.hpp75
-rw-r--r--src/share/vm/utilities/vmError.cpp35
-rw-r--r--src/share/vm/utilities/vmError.hpp9
-rw-r--r--src/share/vm/utilities/workgroup.cpp5
-rw-r--r--src/share/vm/utilities/workgroup.hpp4
-rw-r--r--src/share/vm/utilities/yieldingWorkgroup.hpp5
528 files changed, 22136 insertions, 14771 deletions
diff --git a/src/share/tools/LogCompilation/README b/src/share/tools/LogCompilation/README
index 90dc3b893..aa18fe891 100644
--- a/src/share/tools/LogCompilation/README
+++ b/src/share/tools/LogCompilation/README
@@ -4,14 +4,14 @@ It's main purpose is to recreate output similar to
requires a 1.5 JDK to build and simply typing make should build it.
It produces a jar file, logc.jar, that can be run on the
-hotspot.log from LogCompilation output like this:
+HotSpot log (by default, hotspot_pid{pid}.log) from LogCompilation output like this:
- java -jar logc.jar hotspot.log
+ java -jar logc.jar hotspot_pid1234.log
This will produce something like the normal PrintCompilation output.
Adding the -i option with also report inlining like PrintInlining.
-More information about the LogCompilation output can be found at
+More information about the LogCompilation output can be found at
https://wikis.oracle.com/display/HotSpotInternals/LogCompilation+overview
https://wikis.oracle.com/display/HotSpotInternals/PrintCompilation
diff --git a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java
index 4870dffec..c4a77c02a 100644
--- a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java
+++ b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java
@@ -106,10 +106,12 @@ public class CallSite {
" (" + getMethod().getBytes() + " bytes) " + getReason());
}
}
+ stream.printf(" (end time: %6.4f", getTimeStamp());
if (getEndNodes() > 0) {
- stream.printf(" (end time: %6.4f nodes: %d live: %d)", getTimeStamp(), getEndNodes(), getEndLiveNodes());
+ stream.printf(" nodes: %d live: %d", getEndNodes(), getEndLiveNodes());
}
- stream.println("");
+ stream.println(")");
+
if (getReceiver() != null) {
emit(stream, indent + 4);
// stream.println("type profile " + method.holder + " -> " + receiver + " (" +
diff --git a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java
index 80a00364c..de80e9a10 100644
--- a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java
+++ b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java
@@ -207,7 +207,12 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
}
String search(Attributes attr, String name) {
- return search(attr, name, null);
+ String result = attr.getValue(name);
+ if (result != null) {
+ return result;
+ } else {
+ throw new InternalError("can't find " + name);
+ }
}
String search(Attributes attr, String name, String defaultValue) {
@@ -215,13 +220,7 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
if (result != null) {
return result;
}
- if (defaultValue != null) {
- return defaultValue;
- }
- for (int i = 0; i < attr.getLength(); i++) {
- System.out.println(attr.getQName(i) + " " + attr.getValue(attr.getQName(i)));
- }
- throw new InternalError("can't find " + name);
+ return defaultValue;
}
int indent = 0;
@@ -268,17 +267,18 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
Phase p = new Phase(search(atts, "name"),
Double.parseDouble(search(atts, "stamp")),
Integer.parseInt(search(atts, "nodes", "0")),
- Integer.parseInt(search(atts, "live")));
+ Integer.parseInt(search(atts, "live", "0")));
phaseStack.push(p);
} else if (qname.equals("phase_done")) {
Phase p = phaseStack.pop();
- if (! p.getId().equals(search(atts, "name"))) {
+ String phaseName = search(atts, "name", null);
+ if (phaseName != null && !p.getId().equals(phaseName)) {
System.out.println("phase: " + p.getId());
throw new InternalError("phase name mismatch");
}
p.setEnd(Double.parseDouble(search(atts, "stamp")));
p.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
- p.setEndLiveNodes(Integer.parseInt(search(atts, "live")));
+ p.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0")));
compile.getPhases().add(p);
} else if (qname.equals("task")) {
compile = new Compilation(Integer.parseInt(search(atts, "compile_id", "-1")));
@@ -413,8 +413,8 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
}
} else if (qname.equals("parse_done")) {
CallSite call = scopes.pop();
- call.setEndNodes(Integer.parseInt(search(atts, "nodes", "1")));
- call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "1")));
+ call.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
+ call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0")));
call.setTimeStamp(Double.parseDouble(search(atts, "stamp")));
scopes.push(call);
}
diff --git a/src/share/tools/ProjectCreator/BuildConfig.java b/src/share/tools/ProjectCreator/BuildConfig.java
index 953967fcc..7c95db62f 100644
--- a/src/share/tools/ProjectCreator/BuildConfig.java
+++ b/src/share/tools/ProjectCreator/BuildConfig.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -65,6 +65,7 @@ class BuildConfig {
String sourceBase = getFieldString(null, "SourceBase");
String buildSpace = getFieldString(null, "BuildSpace");
String outDir = buildBase;
+ String jdkTargetRoot = getFieldString(null, "JdkTargetRoot");
put("Id", flavourBuild);
put("OutputDir", outDir);
@@ -72,6 +73,7 @@ class BuildConfig {
put("BuildBase", buildBase);
put("BuildSpace", buildSpace);
put("OutputDll", outDir + Util.sep + outDll);
+ put("JdkTargetRoot", jdkTargetRoot);
context = new String [] {flavourBuild, flavour, build, null};
}
@@ -140,6 +142,69 @@ class BuildConfig {
return rv;
}
+ // Returns true if the specified path refers to a relative alternate
+ // source file. RelativeAltSrcInclude is usually "src\closed".
+ public static boolean matchesRelativeAltSrcInclude(String path) {
+ String relativeAltSrcInclude =
+ getFieldString(null, "RelativeAltSrcInclude");
+ Vector<String> v = getFieldVector(null, "AltRelativeInclude");
+ for (String pathPart : v) {
+ if (path.contains(relativeAltSrcInclude + Util.sep + pathPart)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Returns the relative alternate source file for the specified path.
+ // Null is returned if the specified path does not have a matching
+ // alternate source file.
+ public static String getMatchingRelativeAltSrcFile(String path) {
+ Vector<String> v = getFieldVector(null, "RelativeAltSrcFileList");
+ if (v == null) {
+ return null;
+ }
+ for (String pathPart : v) {
+ if (path.endsWith(pathPart)) {
+ String relativeAltSrcInclude =
+ getFieldString(null, "RelativeAltSrcInclude");
+ return relativeAltSrcInclude + Util.sep + pathPart;
+ }
+ }
+ return null;
+ }
+
+ // Returns true if the specified path has a matching alternate
+ // source file.
+ public static boolean matchesRelativeAltSrcFile(String path) {
+ return getMatchingRelativeAltSrcFile(path) != null;
+ }
+
+ // Track the specified alternate source file. The source file is
+ // tracked without the leading .*<sep><RelativeAltSrcFileList><sep>
+ // part to make matching regular source files easier.
+ public static void trackRelativeAltSrcFile(String path) {
+ String pattern = getFieldString(null, "RelativeAltSrcInclude") +
+ Util.sep;
+ int altSrcInd = path.indexOf(pattern);
+ if (altSrcInd == -1) {
+ // not an AltSrc path
+ return;
+ }
+
+ altSrcInd += pattern.length();
+ if (altSrcInd >= path.length()) {
+ // not a valid AltSrc path
+ return;
+ }
+
+ String altSrcFile = path.substring(altSrcInd);
+ Vector v = getFieldVector(null, "RelativeAltSrcFileList");
+ if (v == null || !v.contains(altSrcFile)) {
+ addFieldVector(null, "RelativeAltSrcFileList", altSrcFile);
+ }
+ }
+
void addTo(Hashtable ht, String key, String value) {
ht.put(expandFormat(key), expandFormat(value));
}
@@ -150,7 +215,7 @@ class BuildConfig {
sysDefines.add("_WINDOWS");
sysDefines.add("HOTSPOT_BUILD_USER=\\\""+System.getProperty("user.name")+"\\\"");
sysDefines.add("HOTSPOT_BUILD_TARGET=\\\""+get("Build")+"\\\"");
- sysDefines.add("INCLUDE_TRACE");
+ sysDefines.add("INCLUDE_TRACE=1");
sysDefines.add("_JNI_IMPLEMENTATION_");
if (vars.get("PlatformName").equals("Win32")) {
sysDefines.add("HOTSPOT_LIB_ARCH=\\\"i386\\\"");
@@ -270,8 +335,19 @@ class BuildConfig {
private Vector getSourceIncludes() {
Vector<String> rv = new Vector<String>();
- Vector<String> ri = new Vector<String>();
String sourceBase = getFieldString(null, "SourceBase");
+
+ // add relative alternate source include values:
+ String relativeAltSrcInclude =
+ getFieldString(null, "RelativeAltSrcInclude");
+ Vector<String> asri = new Vector<String>();
+ collectRelevantVectors(asri, "AltRelativeInclude");
+ for (String f : asri) {
+ rv.add(sourceBase + Util.sep + relativeAltSrcInclude +
+ Util.sep + f);
+ }
+
+ Vector<String> ri = new Vector<String>();
collectRelevantVectors(ri, "RelativeInclude");
for (String f : ri) {
rv.add(sourceBase + Util.sep + f);
@@ -539,35 +615,6 @@ class TieredProductConfig extends ProductConfig {
}
}
-class CoreDebugConfig extends GenericDebugNonKernelConfig {
- String getOptFlag() {
- return getCI().getNoOptFlag();
- }
-
- CoreDebugConfig() {
- initNames("core", "debug", "jvm.dll");
- init(getIncludes(), getDefines());
- }
-}
-
-class CoreFastDebugConfig extends GenericDebugNonKernelConfig {
- String getOptFlag() {
- return getCI().getOptFlag();
- }
-
- CoreFastDebugConfig() {
- initNames("core", "fastdebug", "jvm.dll");
- init(getIncludes(), getDefines());
- }
-}
-
-class CoreProductConfig extends ProductConfig {
- CoreProductConfig() {
- initNames("core", "product", "jvm.dll");
- init(getIncludes(), getDefines());
- }
-}
-
abstract class CompilerInterface {
abstract Vector getBaseCompilerFlags(Vector defines, Vector includes, String outDir);
diff --git a/src/share/tools/ProjectCreator/FileTreeCreator.java b/src/share/tools/ProjectCreator/FileTreeCreator.java
index 3643c572e..a81132c70 100644
--- a/src/share/tools/ProjectCreator/FileTreeCreator.java
+++ b/src/share/tools/ProjectCreator/FileTreeCreator.java
@@ -12,11 +12,15 @@ public class FileTreeCreator extends SimpleFileVisitor<Path>
final int startDirLength;
Stack<DirAttributes> attributes = new Stack<DirAttributes>();
Vector<BuildConfig> allConfigs;
- WinGammaPlatformVC10 wg;
+ WinGammaPlatform wg;
+ WinGammaPlatformVC10 wg10;
- public FileTreeCreator(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatformVC10 wg) {
+ public FileTreeCreator(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatform wg) {
super();
this.wg = wg;
+ if (wg instanceof WinGammaPlatformVC10) {
+ wg10 = (WinGammaPlatformVC10)wg;
+ }
this.allConfigs = allConfigs;
this.startDir = startDir;
startDirLength = startDir.toAbsolutePath().toString().length();
diff --git a/src/share/tools/ProjectCreator/FileTreeCreatorVC10.java b/src/share/tools/ProjectCreator/FileTreeCreatorVC10.java
index 837eef1a5..cb0fe3357 100644
--- a/src/share/tools/ProjectCreator/FileTreeCreatorVC10.java
+++ b/src/share/tools/ProjectCreator/FileTreeCreatorVC10.java
@@ -1,3 +1,27 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
import static java.nio.file.FileVisitResult.CONTINUE;
import java.io.IOException;
@@ -21,6 +45,8 @@ public class FileTreeCreatorVC10 extends FileTreeCreator {
boolean usePch = false;
boolean disablePch = false;
boolean useIgnore = false;
+ boolean isAltSrc = false; // only needed as a debugging crumb
+ boolean isReplacedByAltSrc = false;
String fileName = file.getFileName().toString();
// TODO hideFile
@@ -30,6 +56,26 @@ public class FileTreeCreatorVC10 extends FileTreeCreator {
usePch = true;
}
+ String fileLoc = vcProjLocation.relativize(file).toString();
+
+ // isAltSrc and isReplacedByAltSrc applies to all configs for a file
+ if (BuildConfig.matchesRelativeAltSrcInclude(
+ file.toAbsolutePath().toString())) {
+ // current file is an alternate source file so track it
+ isAltSrc = true;
+ BuildConfig.trackRelativeAltSrcFile(
+ file.toAbsolutePath().toString());
+ } else if (BuildConfig.matchesRelativeAltSrcFile(
+ file.toAbsolutePath().toString())) {
+ // current file is a regular file that matches an alternate
+ // source file so yack about replacing the regular file
+ isReplacedByAltSrc = true;
+ System.out.println("INFO: alternate source file '" +
+ BuildConfig.getMatchingRelativeAltSrcFile(
+ file.toAbsolutePath().toString()) +
+ "' replaces '" + fileLoc + "'");
+ }
+
for (BuildConfig cfg : allConfigs) {
if (cfg.lookupHashFieldInContext("IgnoreFile", fileName) != null) {
useIgnore = true;
@@ -57,10 +103,9 @@ public class FileTreeCreatorVC10 extends FileTreeCreator {
}
}
- String tagName = wg.getFileTagFromSuffix(fileName);
- String fileLoc = vcProjLocation.relativize(file).toString();
+ String tagName = wg10.getFileTagFromSuffix(fileName);
- if (!useIgnore && !disablePch && !usePch) {
+ if (!useIgnore && !disablePch && !usePch && !isReplacedByAltSrc) {
wg.tag(tagName, new String[] { "Include", fileLoc});
} else {
wg.startTag(
@@ -78,12 +123,17 @@ public class FileTreeCreatorVC10 extends FileTreeCreator {
if (disablePch) {
wg.tag("PrecompiledHeader", "Condition", "'$(Configuration)|$(Platform)'=='" + cfg.get("Name") + "'");
}
+ if (isReplacedByAltSrc) {
+ wg.tagData("ExcludedFromBuild", "true", "Condition",
+ "'$(Configuration)|$(Platform)'=='" +
+ cfg.get("Name") + "'");
+ }
}
wg.endTag();
}
String filter = startDir.relativize(file.getParent().toAbsolutePath()).toString();
- wg.addFilterDependency(fileLoc, filter);
+ wg10.addFilterDependency(fileLoc, filter);
return CONTINUE;
}
@@ -112,7 +162,7 @@ public class FileTreeCreatorVC10 extends FileTreeCreator {
if (!hide) {
String name = startDir.relativize(path.toAbsolutePath()).toString();
if (!"".equals(name)) {
- wg.addFilter(name);
+ wg10.addFilter(name);
}
attributes.push(newAttr);
@@ -137,6 +187,4 @@ public class FileTreeCreatorVC10 extends FileTreeCreator {
public void writeFileTree() throws IOException {
Files.walkFileTree(this.startDir, this);
}
-
-
- } \ No newline at end of file
+}
diff --git a/src/share/tools/ProjectCreator/FileTreeCreatorVC7.java b/src/share/tools/ProjectCreator/FileTreeCreatorVC7.java
index b36e0121f..9a4318457 100644
--- a/src/share/tools/ProjectCreator/FileTreeCreatorVC7.java
+++ b/src/share/tools/ProjectCreator/FileTreeCreatorVC7.java
@@ -12,7 +12,7 @@ import java.util.Vector;
public class FileTreeCreatorVC7 extends FileTreeCreator {
public FileTreeCreatorVC7(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatform wg) {
- super(startDir, allConfigs, null);
+ super(startDir, allConfigs, wg);
}
@Override
diff --git a/src/share/tools/ProjectCreator/ProjectCreator.java b/src/share/tools/ProjectCreator/ProjectCreator.java
index a3065e512..f1e16ea8a 100644
--- a/src/share/tools/ProjectCreator/ProjectCreator.java
+++ b/src/share/tools/ProjectCreator/ProjectCreator.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,10 +39,15 @@ public class ProjectCreator {
+ "jvm.dll; no trailing slash>");
System.err.println(" If any of the above are specified, "
+ "they must all be.");
+ System.err.println(" Note: if '-altRelativeInclude' option below is "
+ + "used, then the '-relativeAltSrcInclude' option must be used "
+ + "to specify the alternate source dir, e.g., 'src\\closed'");
System.err.println(" Additional, optional arguments, which can be "
+ "specified multiple times:");
System.err.println(" -absoluteInclude <string containing absolute "
+ "path to include directory>");
+ System.err.println(" -altRelativeInclude <string containing "
+ + "alternate include directory relative to -envVar>");
System.err.println(" -relativeInclude <string containing include "
+ "directory relative to -envVar>");
System.err.println(" -define <preprocessor flag to be #defined "
diff --git a/src/share/tools/ProjectCreator/WinGammaPlatform.java b/src/share/tools/ProjectCreator/WinGammaPlatform.java
index db93a27cf..be17959fc 100644
--- a/src/share/tools/ProjectCreator/WinGammaPlatform.java
+++ b/src/share/tools/ProjectCreator/WinGammaPlatform.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -140,10 +140,17 @@ public abstract class WinGammaPlatform {
"already exist>");
System.err.println(" If any of the above are specified, "+
"they must all be.");
+ System.err.println(" Note: if '-altRelativeInclude' option below " +
+ "is used, then the '-relativeAltSrcInclude' " +
+ "option must be used to specify the alternate " +
+ "source dir, e.g., 'src\\closed'");
System.err.println(" Additional, optional arguments, which can be " +
"specified multiple times:");
System.err.println(" -absoluteInclude <string containing absolute " +
"path to include directory>");
+ System.err.println(" -altRelativeInclude <string containing " +
+ "alternate include directory relative to " +
+ "-sourceBase>");
System.err.println(" -relativeInclude <string containing include " +
"directory relative to -sourceBase>");
System.err.println(" -define <preprocessor flag to be #defined " +
@@ -343,6 +350,12 @@ public abstract class WinGammaPlatform {
HsArgHandler.VECTOR
),
+ new HsArgRule("-altRelativeInclude",
+ "AltRelativeInclude",
+ null,
+ HsArgHandler.VECTOR
+ ),
+
new HsArgRule("-relativeInclude",
"RelativeInclude",
null,
@@ -355,6 +368,12 @@ public abstract class WinGammaPlatform {
HsArgHandler.VECTOR
),
+ new HsArgRule("-relativeAltSrcInclude",
+ "RelativeAltSrcInclude",
+ null,
+ HsArgHandler.STRING
+ ),
+
new HsArgRule("-relativeSrcInclude",
"RelativeSrcInclude",
null,
@@ -560,10 +579,6 @@ public abstract class WinGammaPlatform {
allConfigs.add(new TieredFastDebugConfig());
allConfigs.add(new TieredProductConfig());
- allConfigs.add(new CoreDebugConfig());
- allConfigs.add(new CoreFastDebugConfig());
- allConfigs.add(new CoreProductConfig());
-
return allConfigs;
}
diff --git a/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java b/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java
index 137abdc30..6216a84ad 100644
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java
+++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java
@@ -1,3 +1,27 @@
+/*
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
@@ -24,7 +48,7 @@ public class WinGammaPlatformVC10 extends WinGammaPlatformVC7 {
public void writeProjectFile(String projectFileName, String projectName,
Vector<BuildConfig> allConfigs) throws IOException {
System.out.println();
- System.out.print(" Writing .vcxproj file: " + projectFileName);
+ System.out.println(" Writing .vcxproj file: " + projectFileName);
String projDir = Util.normalize(new File(projectFileName).getParent());
@@ -98,11 +122,6 @@ public class WinGammaPlatformVC10 extends WinGammaPlatformVC7 {
tagV(cfg.getV("LinkerFlags"));
endTag();
- startTag("PostBuildEvent");
- tagData("Message", BuildConfig.getFieldString(null, "PostbuildDescription"));
- tagData("Command", cfg.expandFormat(BuildConfig.getFieldString(null, "PostbuildCommand").replace("\t", "\r\n")));
- endTag();
-
startTag("PreLinkEvent");
tagData("Message", BuildConfig.getFieldString(null, "PrelinkDescription"));
tagData("Command", cfg.expandFormat(BuildConfig.getFieldString(null, "PrelinkCommand").replace("\t", "\r\n")));
@@ -119,7 +138,7 @@ public class WinGammaPlatformVC10 extends WinGammaPlatformVC7 {
endTag();
printWriter.close();
- System.out.println(" Done.");
+ System.out.println(" Done writing .vcxproj file.");
writeFilterFile(projectFileName, projectName, allConfigs, projDir);
writeUserFile(projectFileName, allConfigs);
@@ -141,7 +160,9 @@ public class WinGammaPlatformVC10 extends WinGammaPlatformVC7 {
for (BuildConfig cfg : allConfigs) {
startTag(cfg, "PropertyGroup");
- tagData("LocalDebuggerCommand", "$(TargetDir)/hotspot.exe");
+ tagData("LocalDebuggerCommand", cfg.get("JdkTargetRoot") + "\\bin\\java.exe");
+ tagData("LocalDebuggerCommandArguments", "-XXaltjvm=$(TargetDir) -Dsun.java.launcher=gamma");
+ tagData("LocalDebuggerEnvironment", "JAVA_HOME=" + cfg.get("JdkTargetRoot"));
endTag();
}
diff --git a/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java b/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java
index 09e961cbf..b7a99a652 100644
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java
+++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java
@@ -139,19 +139,22 @@ public class WinGammaPlatformVC7 extends WinGammaPlatform {
tagV("Tool", cfg.getV("LinkerFlags"));
- tag("Tool",
- new String[] {
- "Name",
- "VCPostBuildEventTool",
- "Description",
- BuildConfig
- .getFieldString(null, "PostbuildDescription"),
- // Caution: String.replace(String,String) is available
- // from JDK5 onwards only
- "CommandLine",
- cfg.expandFormat(BuildConfig.getFieldString(null,
- "PostbuildCommand").replace("\t",
- "&#x0D;&#x0A;")) });
+ String postBuildCmd = BuildConfig.getFieldString(null,
+ "PostbuildCommand");
+ if (postBuildCmd != null) {
+ tag("Tool",
+ new String[] {
+ "Name",
+ "VCPostBuildEventTool",
+ "Description",
+ BuildConfig
+ .getFieldString(null, "PostbuildDescription"),
+ // Caution: String.replace(String,String) is available
+ // from JDK5 onwards only
+ "CommandLine",
+ cfg.expandFormat(postBuildCmd.replace("\t",
+ "&#x0D;&#x0A;")) });
+ }
tag("Tool", new String[] { "Name", "VCPreBuildEventTool" });
diff --git a/src/share/tools/hsdis/hsdis.c b/src/share/tools/hsdis/hsdis.c
index 527401377..98803d6ba 100644
--- a/src/share/tools/hsdis/hsdis.c
+++ b/src/share/tools/hsdis/hsdis.c
@@ -27,7 +27,6 @@
HotSpot PrintAssembly option.
*/
-#ifdef TARGET_ARCH_aarch64
#include <config.h>
#endif
#include <libiberty.h>
diff --git a/src/share/tools/launcher/java.c b/src/share/tools/launcher/java.c
deleted file mode 100644
index 63f11d77c..000000000
--- a/src/share/tools/launcher/java.c
+++ /dev/null
@@ -1,2080 +0,0 @@
-/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-/*
- * Gamma (Hotspot internal engineering test) launcher based on 6.0u22 JDK,
- * search "GAMMA" for gamma specific changes.
- *
- * GAMMA: gamma launcher is much simpler than regular java launcher in that
- * JVM is either statically linked in or it is installed in the
- * same directory where the launcher exists, so we don't have to
- * worry about choosing the right JVM based on command line flag, jar
- * file and/or ergonomics. Intead of removing unused logic from source
- * they are commented out with #ifndef GAMMA, hopefully it'll be easier
- * to maintain this file in sync with regular JDK launcher.
- */
-
-/*
- * Shared source for 'java' command line tool.
- *
- * If JAVA_ARGS is defined, then acts as a launcher for applications. For
- * instance, the JDK command line tools such as javac and javadoc (see
- * makefiles for more details) are built with this program. Any arguments
- * prefixed with '-J' will be passed directly to the 'java' command.
- */
-
-#ifdef GAMMA
-# ifdef JAVA_ARGS
-# error Do NOT define JAVA_ARGS when building gamma launcher
-# endif
-# if !defined(LINK_INTO_AOUT) && !defined(LINK_INTO_LIBJVM)
-# error Either LINK_INTO_AOUT or LINK_INTO_LIBJVM must be defined
-# endif
-#endif
-
-/*
- * One job of the launcher is to remove command line options which the
- * vm does not understand and will not process. These options include
- * options which select which style of vm is run (e.g. -client and
- * -server) as well as options which select the data model to use.
- * Additionally, for tools which invoke an underlying vm "-J-foo"
- * options are turned into "-foo" options to the vm. This option
- * filtering is handled in a number of places in the launcher, some of
- * it in machine-dependent code. In this file, the function
- * CheckJVMType removes vm style options and TranslateApplicationArgs
- * removes "-J" prefixes. On unix platforms, the
- * CreateExecutionEnvironment function from the unix java_md.c file
- * processes and removes -d<n> options. However, in case
- * CreateExecutionEnvironment does not need to exec because
- * LD_LIBRARY_PATH is set acceptably and the data model does not need
- * to be changed, ParseArguments will screen out the redundant -d<n>
- * options and prevent them from being passed to the vm; this is done
- * by using the machine-dependent call
- * RemovableMachineDependentOption.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <jni.h>
-#include <jvm.h>
-#include "java.h"
-#ifndef GAMMA
-#include "manifest_info.h"
-#include "version_comp.h"
-#include "splashscreen.h"
-#endif
-#include "wildcard.h"
-
-#ifndef FULL_VERSION
-#define FULL_VERSION JDK_MAJOR_VERSION "." JDK_MINOR_VERSION
-#endif
-
-/*
- * The following environment variable is used to influence the behavior
- * of the jre exec'd through the SelectVersion routine. The command line
- * options which specify the version are not passed to the exec'd version,
- * because that jre may be an older version which wouldn't recognize them.
- * This environment variable is known to this (and later) version and serves
- * to suppress the version selection code. This is not only for efficiency,
- * but also for correctness, since any command line options have been
- * removed which would cause any value found in the manifest to be used.
- * This would be incorrect because the command line options are defined
- * to take precedence.
- *
- * The value associated with this environment variable is the MainClass
- * name from within the executable jar file (if any). This is strictly a
- * performance enhancement to avoid re-reading the jar file manifest.
- *
- * A NOTE TO DEVELOPERS: For performance reasons it is important that
- * the program image remain relatively small until after SelectVersion
- * CreateExecutionEnvironment have finished their possibly recursive
- * processing. Watch everything, but resist all temptations to use Java
- * interfaces.
- */
-#define ENV_ENTRY "_JAVA_VERSION_SET"
-
-#ifndef GAMMA
-#define SPLASH_FILE_ENV_ENTRY "_JAVA_SPLASH_FILE"
-#define SPLASH_JAR_ENV_ENTRY "_JAVA_SPLASH_JAR"
-#endif
-
-static jboolean printVersion = JNI_FALSE; /* print and exit */
-static jboolean showVersion = JNI_FALSE; /* print but continue */
-static char *progname;
-jboolean _launcher_debug = JNI_FALSE;
-
-#ifndef GAMMA
-/*
- * Entries for splash screen environment variables.
- * putenv is performed in SelectVersion. We need
- * them in memory until UnsetEnv, so they are made static
- * global instead of auto local.
- */
-static char* splash_file_entry = NULL;
-static char* splash_jar_entry = NULL;
-#endif
-
-/*
- * List of VM options to be specified when the VM is created.
- */
-static JavaVMOption *options;
-static int numOptions, maxOptions;
-
-/*
- * Prototypes for functions internal to launcher.
- */
-static void SetClassPath(const char *s);
-static void SelectVersion(int argc, char **argv, char **main_class);
-static jboolean ParseArguments(int *pargc, char ***pargv, char **pjarfile,
- char **pclassname, int *pret, const char *jvmpath);
-static jboolean InitializeJVM(JavaVM **pvm, JNIEnv **penv,
- InvocationFunctions *ifn);
-static jstring NewPlatformString(JNIEnv *env, char *s);
-static jobjectArray NewPlatformStringArray(JNIEnv *env, char **strv, int strc);
-static jclass LoadClass(JNIEnv *env, char *name);
-static jstring GetMainClassName(JNIEnv *env, char *jarname);
-static void SetJavaCommandLineProp(char* classname, char* jarfile, int argc, char** argv);
-static void SetJavaLauncherProp(void);
-
-#ifdef JAVA_ARGS
-static void TranslateApplicationArgs(int *pargc, char ***pargv);
-static jboolean AddApplicationOptions(void);
-#endif
-
-static void PrintJavaVersion(JNIEnv *env);
-static void PrintUsage(void);
-static jint PrintXUsage(const char *jvmpath);
-
-static void SetPaths(int argc, char **argv);
-
-#ifndef GAMMA
-
-/* Maximum supported entries from jvm.cfg. */
-#define INIT_MAX_KNOWN_VMS 10
-/* Values for vmdesc.flag */
-#define VM_UNKNOWN -1
-#define VM_KNOWN 0
-#define VM_ALIASED_TO 1
-#define VM_WARN 2
-#define VM_ERROR 3
-#define VM_IF_SERVER_CLASS 4
-#define VM_IGNORE 5
-struct vmdesc {
- char *name;
- int flag;
- char *alias;
- char *server_class;
-};
-static struct vmdesc *knownVMs = NULL;
-static int knownVMsCount = 0;
-static int knownVMsLimit = 0;
-
-static void GrowKnownVMs();
-static int KnownVMIndex(const char* name);
-static void FreeKnownVMs();
-static void ShowSplashScreen();
-
-#endif /* ifndef GAMMA */
-
-jboolean ServerClassMachine();
-
-/* flag which if set suppresses error messages from the launcher */
-static int noExitErrorMessage = 0;
-
-/*
- * Running Java code in primordial thread caused many problems. We will
- * create a new thread to invoke JVM. See 6316197 for more information.
- */
-static jlong threadStackSize = 0; /* stack size of the new thread */
-
-int JNICALL JavaMain(void * args); /* entry point */
-
-struct JavaMainArgs {
- int argc;
- char ** argv;
- char * jarfile;
- char * classname;
- InvocationFunctions ifn;
-};
-
-/*
- * Entry point.
- */
-int
-main(int argc, char ** argv)
-{
- char *jarfile = 0;
- char *classname = 0;
- char *s = 0;
- char *main_class = NULL;
- int ret;
- InvocationFunctions ifn;
- jlong start, end;
- char jrepath[MAXPATHLEN], jvmpath[MAXPATHLEN];
- char ** original_argv = argv;
-
- if (getenv("_JAVA_LAUNCHER_DEBUG") != 0) {
- _launcher_debug = JNI_TRUE;
- printf("----_JAVA_LAUNCHER_DEBUG----\n");
- }
-
-#ifndef GAMMA
- /*
- * Make sure the specified version of the JRE is running.
- *
- * There are three things to note about the SelectVersion() routine:
- * 1) If the version running isn't correct, this routine doesn't
- * return (either the correct version has been exec'd or an error
- * was issued).
- * 2) Argc and Argv in this scope are *not* altered by this routine.
- * It is the responsibility of subsequent code to ignore the
- * arguments handled by this routine.
- * 3) As a side-effect, the variable "main_class" is guaranteed to
- * be set (if it should ever be set). This isn't exactly the
- * poster child for structured programming, but it is a small
- * price to pay for not processing a jar file operand twice.
- * (Note: This side effect has been disabled. See comment on
- * bugid 5030265 below.)
- */
- SelectVersion(argc, argv, &main_class);
-#endif /* ifndef GAMMA */
-
- /* copy original argv */
- {
- int i;
- original_argv = (char**)JLI_MemAlloc(sizeof(char*)*(argc+1));
- for(i = 0; i < argc+1; i++)
- original_argv[i] = argv[i];
- }
-
- CreateExecutionEnvironment(&argc, &argv,
- jrepath, sizeof(jrepath),
- jvmpath, sizeof(jvmpath),
- original_argv);
-
- printf("Using java runtime at: %s\n", jrepath);
-
- ifn.CreateJavaVM = 0;
- ifn.GetDefaultJavaVMInitArgs = 0;
-
- if (_launcher_debug)
- start = CounterGet();
- if (!LoadJavaVM(jvmpath, &ifn)) {
- exit(6);
- }
- if (_launcher_debug) {
- end = CounterGet();
- printf("%ld micro seconds to LoadJavaVM\n",
- (long)(jint)Counter2Micros(end-start));
- }
-
-#ifdef JAVA_ARGS /* javac, jar and friends. */
- progname = "java";
-#else /* java, oldjava, javaw and friends */
-#ifdef PROGNAME
- progname = PROGNAME;
-#else
- progname = *argv;
- if ((s = strrchr(progname, FILE_SEPARATOR)) != 0) {
- progname = s + 1;
- }
-#endif /* PROGNAME */
-#endif /* JAVA_ARGS */
- ++argv;
- --argc;
-
-#ifdef JAVA_ARGS
- /* Preprocess wrapper arguments */
- TranslateApplicationArgs(&argc, &argv);
- if (!AddApplicationOptions()) {
- exit(1);
- }
-#endif
-
- /* Set default CLASSPATH */
- if ((s = getenv("CLASSPATH")) == 0) {
- s = ".";
- }
-#ifndef JAVA_ARGS
- SetClassPath(s);
-#endif
-
- /*
- * Parse command line options; if the return value of
- * ParseArguments is false, the program should exit.
- */
- if (!ParseArguments(&argc, &argv, &jarfile, &classname, &ret, jvmpath)) {
- exit(ret);
- }
-
- /* Override class path if -jar flag was specified */
- if (jarfile != 0) {
- SetClassPath(jarfile);
- }
-
- /* set the -Dsun.java.command pseudo property */
- SetJavaCommandLineProp(classname, jarfile, argc, argv);
-
- /* Set the -Dsun.java.launcher pseudo property */
- SetJavaLauncherProp();
-
- /* set the -Dsun.java.launcher.* platform properties */
- SetJavaLauncherPlatformProps();
-
-#ifndef GAMMA
- /* Show the splash screen if needed */
- ShowSplashScreen();
-#endif
-
- /*
- * Done with all command line processing and potential re-execs so
- * clean up the environment.
- */
- (void)UnsetEnv(ENV_ENTRY);
-#ifndef GAMMA
- (void)UnsetEnv(SPLASH_FILE_ENV_ENTRY);
- (void)UnsetEnv(SPLASH_JAR_ENV_ENTRY);
-
- JLI_MemFree(splash_jar_entry);
- JLI_MemFree(splash_file_entry);
-#endif
-
- /*
- * If user doesn't specify stack size, check if VM has a preference.
- * Note that HotSpot no longer supports JNI_VERSION_1_1 but it will
- * return its default stack size through the init args structure.
- */
- if (threadStackSize == 0) {
- struct JDK1_1InitArgs args1_1;
- memset((void*)&args1_1, 0, sizeof(args1_1));
- args1_1.version = JNI_VERSION_1_1;
- ifn.GetDefaultJavaVMInitArgs(&args1_1); /* ignore return value */
- if (args1_1.javaStackSize > 0) {
- threadStackSize = args1_1.javaStackSize;
- }
- }
-
- { /* Create a new thread to create JVM and invoke main method */
- struct JavaMainArgs args;
-
- args.argc = argc;
- args.argv = argv;
- args.jarfile = jarfile;
- args.classname = classname;
- args.ifn = ifn;
-
- return ContinueInNewThread(JavaMain, threadStackSize, (void*)&args);
- }
-}
-
-int JNICALL
-JavaMain(void * _args)
-{
- struct JavaMainArgs *args = (struct JavaMainArgs *)_args;
- int argc = args->argc;
- char **argv = args->argv;
- char *jarfile = args->jarfile;
- char *classname = args->classname;
- InvocationFunctions ifn = args->ifn;
-
- JavaVM *vm = 0;
- JNIEnv *env = 0;
- jstring mainClassName;
- jclass mainClass;
- jmethodID mainID;
- jobjectArray mainArgs;
- int ret = 0;
- jlong start, end;
-
- /*
- * Error message to print or display; by default the message will
- * only be displayed in a window.
- */
- char * message = "Fatal exception occurred. Program will exit.";
- jboolean messageDest = JNI_FALSE;
-
- /* Initialize the virtual machine */
-
- if (_launcher_debug)
- start = CounterGet();
- if (!InitializeJVM(&vm, &env, &ifn)) {
- ReportErrorMessage("Could not create the Java virtual machine.",
- JNI_TRUE);
- exit(1);
- }
-
- if (printVersion || showVersion) {
- PrintJavaVersion(env);
- if ((*env)->ExceptionOccurred(env)) {
- ReportExceptionDescription(env);
- goto leave;
- }
- if (printVersion) {
- ret = 0;
- message = NULL;
- goto leave;
- }
- if (showVersion) {
- fprintf(stderr, "\n");
- }
- }
-
- /* If the user specified neither a class name nor a JAR file */
- if (jarfile == 0 && classname == 0) {
- PrintUsage();
- message = NULL;
- goto leave;
- }
-
-#ifndef GAMMA
- FreeKnownVMs(); /* after last possible PrintUsage() */
-#endif
-
- if (_launcher_debug) {
- end = CounterGet();
- printf("%ld micro seconds to InitializeJVM\n",
- (long)(jint)Counter2Micros(end-start));
- }
-
- /* At this stage, argc/argv have the applications' arguments */
- if (_launcher_debug) {
- int i = 0;
- printf("Main-Class is '%s'\n", classname ? classname : "");
- printf("Apps' argc is %d\n", argc);
- for (; i < argc; i++) {
- printf(" argv[%2d] = '%s'\n", i, argv[i]);
- }
- }
-
- ret = 1;
-
- /*
- * Get the application's main class.
- *
- * See bugid 5030265. The Main-Class name has already been parsed
- * from the manifest, but not parsed properly for UTF-8 support.
- * Hence the code here ignores the value previously extracted and
- * uses the pre-existing code to reextract the value. This is
- * possibly an end of release cycle expedient. However, it has
- * also been discovered that passing some character sets through
- * the environment has "strange" behavior on some variants of
- * Windows. Hence, maybe the manifest parsing code local to the
- * launcher should never be enhanced.
- *
- * Hence, future work should either:
- * 1) Correct the local parsing code and verify that the
- * Main-Class attribute gets properly passed through
- * all environments,
- * 2) Remove the vestages of maintaining main_class through
- * the environment (and remove these comments).
- */
- if (jarfile != 0) {
- mainClassName = GetMainClassName(env, jarfile);
- if ((*env)->ExceptionOccurred(env)) {
- ReportExceptionDescription(env);
- goto leave;
- }
- if (mainClassName == NULL) {
- const char * format = "Failed to load Main-Class manifest "
- "attribute from\n%s";
- message = (char*)JLI_MemAlloc((strlen(format) + strlen(jarfile)) *
- sizeof(char));
- sprintf(message, format, jarfile);
- messageDest = JNI_TRUE;
- goto leave;
- }
- classname = (char *)(*env)->GetStringUTFChars(env, mainClassName, 0);
- if (classname == NULL) {
- ReportExceptionDescription(env);
- goto leave;
- }
- mainClass = LoadClass(env, classname);
- if(mainClass == NULL) { /* exception occured */
- const char * format = "Could not find the main class: %s. Program will exit.";
- ReportExceptionDescription(env);
- message = (char *)JLI_MemAlloc((strlen(format) +
- strlen(classname)) * sizeof(char) );
- messageDest = JNI_TRUE;
- sprintf(message, format, classname);
- goto leave;
- }
- (*env)->ReleaseStringUTFChars(env, mainClassName, classname);
- } else {
- mainClassName = NewPlatformString(env, classname);
- if (mainClassName == NULL) {
- const char * format = "Failed to load Main Class: %s";
- message = (char *)JLI_MemAlloc((strlen(format) + strlen(classname)) *
- sizeof(char) );
- sprintf(message, format, classname);
- messageDest = JNI_TRUE;
- goto leave;
- }
- classname = (char *)(*env)->GetStringUTFChars(env, mainClassName, 0);
- if (classname == NULL) {
- ReportExceptionDescription(env);
- goto leave;
- }
- mainClass = LoadClass(env, classname);
- if(mainClass == NULL) { /* exception occured */
- const char * format = "Could not find the main class: %s. Program will exit.";
- ReportExceptionDescription(env);
- message = (char *)JLI_MemAlloc((strlen(format) +
- strlen(classname)) * sizeof(char) );
- messageDest = JNI_TRUE;
- sprintf(message, format, classname);
- goto leave;
- }
- (*env)->ReleaseStringUTFChars(env, mainClassName, classname);
- }
-
- /* Get the application's main method */
- mainID = (*env)->GetStaticMethodID(env, mainClass, "main",
- "([Ljava/lang/String;)V");
- if (mainID == NULL) {
- if ((*env)->ExceptionOccurred(env)) {
- ReportExceptionDescription(env);
- } else {
- message = "No main method found in specified class.";
- messageDest = JNI_TRUE;
- }
- goto leave;
- }
-
- { /* Make sure the main method is public */
- jint mods;
- jmethodID mid;
- jobject obj = (*env)->ToReflectedMethod(env, mainClass,
- mainID, JNI_TRUE);
-
- if( obj == NULL) { /* exception occurred */
- ReportExceptionDescription(env);
- goto leave;
- }
-
- mid =
- (*env)->GetMethodID(env,
- (*env)->GetObjectClass(env, obj),
- "getModifiers", "()I");
- if ((*env)->ExceptionOccurred(env)) {
- ReportExceptionDescription(env);
- goto leave;
- }
-
- mods = (*env)->CallIntMethod(env, obj, mid);
- if ((mods & 1) == 0) { /* if (!Modifier.isPublic(mods)) ... */
- message = "Main method not public.";
- messageDest = JNI_TRUE;
- goto leave;
- }
- }
-
- /* Build argument array */
- mainArgs = NewPlatformStringArray(env, argv, argc);
- if (mainArgs == NULL) {
- ReportExceptionDescription(env);
- goto leave;
- }
-
- /* Invoke main method. */
- (*env)->CallStaticVoidMethod(env, mainClass, mainID, mainArgs);
-
- /*
- * The launcher's exit code (in the absence of calls to
- * System.exit) will be non-zero if main threw an exception.
- */
- ret = (*env)->ExceptionOccurred(env) == NULL ? 0 : 1;
-
- /*
- * Detach the main thread so that it appears to have ended when
- * the application's main method exits. This will invoke the
- * uncaught exception handler machinery if main threw an
- * exception. An uncaught exception handler cannot change the
- * launcher's return code except by calling System.exit.
- */
- if ((*vm)->DetachCurrentThread(vm) != 0) {
- message = "Could not detach main thread.";
- messageDest = JNI_TRUE;
- ret = 1;
- goto leave;
- }
-
- message = NULL;
-
- leave:
- /*
- * Wait for all non-daemon threads to end, then destroy the VM.
- * This will actually create a trivial new Java waiter thread
- * named "DestroyJavaVM", but this will be seen as a different
- * thread from the one that executed main, even though they are
- * the same C thread. This allows mainThread.join() and
- * mainThread.isAlive() to work as expected.
- */
- (*vm)->DestroyJavaVM(vm);
-
- if(message != NULL && !noExitErrorMessage)
- ReportErrorMessage(message, messageDest);
- return ret;
-}
-
-#ifndef GAMMA
-/*
- * Checks the command line options to find which JVM type was
- * specified. If no command line option was given for the JVM type,
- * the default type is used. The environment variable
- * JDK_ALTERNATE_VM and the command line option -XXaltjvm= are also
- * checked as ways of specifying which JVM type to invoke.
- */
-char *
-CheckJvmType(int *pargc, char ***argv, jboolean speculative) {
- int i, argi;
- int argc;
- char **newArgv;
- int newArgvIdx = 0;
- int isVMType;
- int jvmidx = -1;
- char *jvmtype = getenv("JDK_ALTERNATE_VM");
-
- argc = *pargc;
-
- /* To make things simpler we always copy the argv array */
- newArgv = JLI_MemAlloc((argc + 1) * sizeof(char *));
-
- /* The program name is always present */
- newArgv[newArgvIdx++] = (*argv)[0];
-
- for (argi = 1; argi < argc; argi++) {
- char *arg = (*argv)[argi];
- isVMType = 0;
-
-#ifdef JAVA_ARGS
- if (arg[0] != '-') {
- newArgv[newArgvIdx++] = arg;
- continue;
- }
-#else
- if (strcmp(arg, "-classpath") == 0 ||
- strcmp(arg, "-cp") == 0) {
- newArgv[newArgvIdx++] = arg;
- argi++;
- if (argi < argc) {
- newArgv[newArgvIdx++] = (*argv)[argi];
- }
- continue;
- }
- if (arg[0] != '-') break;
-#endif
-
- /* Did the user pass an explicit VM type? */
- i = KnownVMIndex(arg);
- if (i >= 0) {
- jvmtype = knownVMs[jvmidx = i].name + 1; /* skip the - */
- isVMType = 1;
- *pargc = *pargc - 1;
- }
-
- /* Did the user specify an "alternate" VM? */
- else if (strncmp(arg, "-XXaltjvm=", 10) == 0 || strncmp(arg, "-J-XXaltjvm=", 12) == 0) {
- isVMType = 1;
- jvmtype = arg+((arg[1]=='X')? 10 : 12);
- jvmidx = -1;
- }
-
- if (!isVMType) {
- newArgv[newArgvIdx++] = arg;
- }
- }
-
- /*
- * Finish copying the arguments if we aborted the above loop.
- * NOTE that if we aborted via "break" then we did NOT copy the
- * last argument above, and in addition argi will be less than
- * argc.
- */
- while (argi < argc) {
- newArgv[newArgvIdx++] = (*argv)[argi];
- argi++;
- }
-
- /* argv is null-terminated */
- newArgv[newArgvIdx] = 0;
-
- /* Copy back argv */
- *argv = newArgv;
- *pargc = newArgvIdx;
-
- /* use the default VM type if not specified (no alias processing) */
- if (jvmtype == NULL) {
- char* result = knownVMs[0].name+1;
- /* Use a different VM type if we are on a server class machine? */
- if ((knownVMs[0].flag == VM_IF_SERVER_CLASS) &&
- (ServerClassMachine() == JNI_TRUE)) {
- result = knownVMs[0].server_class+1;
- }
- if (_launcher_debug) {
- printf("Default VM: %s\n", result);
- }
- return result;
- }
-
- /* if using an alternate VM, no alias processing */
- if (jvmidx < 0)
- return jvmtype;
-
- /* Resolve aliases first */
- {
- int loopCount = 0;
- while (knownVMs[jvmidx].flag == VM_ALIASED_TO) {
- int nextIdx = KnownVMIndex(knownVMs[jvmidx].alias);
-
- if (loopCount > knownVMsCount) {
- if (!speculative) {
- ReportErrorMessage("Error: Corrupt jvm.cfg file; cycle in alias list.",
- JNI_TRUE);
- exit(1);
- } else {
- return "ERROR";
- /* break; */
- }
- }
-
- if (nextIdx < 0) {
- if (!speculative) {
- ReportErrorMessage2("Error: Unable to resolve VM alias %s",
- knownVMs[jvmidx].alias, JNI_TRUE);
- exit(1);
- } else {
- return "ERROR";
- }
- }
- jvmidx = nextIdx;
- jvmtype = knownVMs[jvmidx].name+1;
- loopCount++;
- }
- }
-
- switch (knownVMs[jvmidx].flag) {
- case VM_WARN:
- if (!speculative) {
- fprintf(stderr, "Warning: %s VM not supported; %s VM will be used\n",
- jvmtype, knownVMs[0].name + 1);
- }
- /* fall through */
- case VM_IGNORE:
- jvmtype = knownVMs[jvmidx=0].name + 1;
- /* fall through */
- case VM_KNOWN:
- break;
- case VM_ERROR:
- if (!speculative) {
- ReportErrorMessage2("Error: %s VM not supported", jvmtype, JNI_TRUE);
- exit(1);
- } else {
- return "ERROR";
- }
- }
-
- return jvmtype;
-}
-#endif /* ifndef GAMMA */
-
-# define KB (1024UL)
-# define MB (1024UL * KB)
-# define GB (1024UL * MB)
-
-/* copied from HotSpot function "atomll()" */
-static int
-parse_stack_size(const char *s, jlong *result) {
- jlong n = 0;
- int args_read = sscanf(s, JLONG_FORMAT, &n);
- if (args_read != 1) {
- return 0;
- }
- while (*s != '\0' && *s >= '0' && *s <= '9') {
- s++;
- }
- // 4705540: illegal if more characters are found after the first non-digit
- if (strlen(s) > 1) {
- return 0;
- }
- switch (*s) {
- case 'T': case 't':
- *result = n * GB * KB;
- return 1;
- case 'G': case 'g':
- *result = n * GB;
- return 1;
- case 'M': case 'm':
- *result = n * MB;
- return 1;
- case 'K': case 'k':
- *result = n * KB;
- return 1;
- case '\0':
- *result = n;
- return 1;
- default:
- /* Create JVM with default stack and let VM handle malformed -Xss string*/
- return 0;
- }
-}
-
-/*
- * Adds a new VM option with the given given name and value.
- */
-void
-AddOption(char *str, void *info)
-{
- /*
- * Expand options array if needed to accommodate at least one more
- * VM option.
- */
- if (numOptions >= maxOptions) {
- if (options == 0) {
- maxOptions = 4;
- options = JLI_MemAlloc(maxOptions * sizeof(JavaVMOption));
- } else {
- JavaVMOption *tmp;
- maxOptions *= 2;
- tmp = JLI_MemAlloc(maxOptions * sizeof(JavaVMOption));
- memcpy(tmp, options, numOptions * sizeof(JavaVMOption));
- JLI_MemFree(options);
- options = tmp;
- }
- }
- options[numOptions].optionString = str;
- options[numOptions++].extraInfo = info;
-
- if (strncmp(str, "-Xss", 4) == 0) {
- jlong tmp;
- if (parse_stack_size(str + 4, &tmp)) {
- threadStackSize = tmp;
- }
- }
-}
-
-static void
-SetClassPath(const char *s)
-{
- char *def;
- s = JLI_WildcardExpandClasspath(s);
- def = JLI_MemAlloc(strlen(s) + 40);
- sprintf(def, "-Djava.class.path=%s", s);
- AddOption(def, NULL);
-}
-
-#ifndef GAMMA
-/*
- * The SelectVersion() routine ensures that an appropriate version of
- * the JRE is running. The specification for the appropriate version
- * is obtained from either the manifest of a jar file (preferred) or
- * from command line options.
- * The routine also parses splash screen command line options and
- * passes on their values in private environment variables.
- */
-static void
-SelectVersion(int argc, char **argv, char **main_class)
-{
- char *arg;
- char **new_argv;
- char **new_argp;
- char *operand;
- char *version = NULL;
- char *jre = NULL;
- int jarflag = 0;
- int headlessflag = 0;
- int restrict_search = -1; /* -1 implies not known */
- manifest_info info;
- char env_entry[MAXNAMELEN + 24] = ENV_ENTRY "=";
- char *splash_file_name = NULL;
- char *splash_jar_name = NULL;
- char *env_in;
- int res;
-
- /*
- * If the version has already been selected, set *main_class
- * with the value passed through the environment (if any) and
- * simply return.
- */
- if ((env_in = getenv(ENV_ENTRY)) != NULL) {
- if (*env_in != '\0')
- *main_class = JLI_StringDup(env_in);
- return;
- }
-
- /*
- * Scan through the arguments for options relevant to multiple JRE
- * support. For reference, the command line syntax is defined as:
- *
- * SYNOPSIS
- * java [options] class [argument...]
- *
- * java [options] -jar file.jar [argument...]
- *
- * As the scan is performed, make a copy of the argument list with
- * the version specification options (new to 1.5) removed, so that
- * a version less than 1.5 can be exec'd.
- *
- * Note that due to the syntax of the native Windows interface
- * CreateProcess(), processing similar to the following exists in
- * the Windows platform specific routine ExecJRE (in java_md.c).
- * Changes here should be reproduced there.
- */
- new_argv = JLI_MemAlloc((argc + 1) * sizeof(char*));
- new_argv[0] = argv[0];
- new_argp = &new_argv[1];
- argc--;
- argv++;
- while ((arg = *argv) != 0 && *arg == '-') {
- if (strncmp(arg, "-version:", 9) == 0) {
- version = arg + 9;
- } else if (strcmp(arg, "-jre-restrict-search") == 0) {
- restrict_search = 1;
- } else if (strcmp(arg, "-no-jre-restrict-search") == 0) {
- restrict_search = 0;
- } else {
- if (strcmp(arg, "-jar") == 0)
- jarflag = 1;
- /* deal with "unfortunate" classpath syntax */
- if ((strcmp(arg, "-classpath") == 0 || strcmp(arg, "-cp") == 0) &&
- (argc >= 2)) {
- *new_argp++ = arg;
- argc--;
- argv++;
- arg = *argv;
- }
-
- /*
- * Checking for headless toolkit option in the some way as AWT does:
- * "true" means true and any other value means false
- */
- if (strcmp(arg, "-Djava.awt.headless=true") == 0) {
- headlessflag = 1;
- } else if (strncmp(arg, "-Djava.awt.headless=", 20) == 0) {
- headlessflag = 0;
- } else if (strncmp(arg, "-splash:", 8) == 0) {
- splash_file_name = arg+8;
- }
- *new_argp++ = arg;
- }
- argc--;
- argv++;
- }
- if (argc <= 0) { /* No operand? Possibly legit with -[full]version */
- operand = NULL;
- } else {
- argc--;
- *new_argp++ = operand = *argv++;
- }
- while (argc-- > 0) /* Copy over [argument...] */
- *new_argp++ = *argv++;
- *new_argp = NULL;
-
- /*
- * If there is a jar file, read the manifest. If the jarfile can't be
- * read, the manifest can't be read from the jar file, or the manifest
- * is corrupt, issue the appropriate error messages and exit.
- *
- * Even if there isn't a jar file, construct a manifest_info structure
- * containing the command line information. It's a convenient way to carry
- * this data around.
- */
- if (jarflag && operand) {
- if ((res = JLI_ParseManifest(operand, &info)) != 0) {
- if (res == -1)
- ReportErrorMessage2("Unable to access jarfile %s",
- operand, JNI_TRUE);
- else
- ReportErrorMessage2("Invalid or corrupt jarfile %s",
- operand, JNI_TRUE);
- exit(1);
- }
-
- /*
- * Command line splash screen option should have precedence
- * over the manifest, so the manifest data is used only if
- * splash_file_name has not been initialized above during command
- * line parsing
- */
- if (!headlessflag && !splash_file_name && info.splashscreen_image_file_name) {
- splash_file_name = info.splashscreen_image_file_name;
- splash_jar_name = operand;
- }
- } else {
- info.manifest_version = NULL;
- info.main_class = NULL;
- info.jre_version = NULL;
- info.jre_restrict_search = 0;
- }
-
- /*
- * Passing on splash screen info in environment variables
- */
- if (splash_file_name && !headlessflag) {
- char* splash_file_entry = JLI_MemAlloc(strlen(SPLASH_FILE_ENV_ENTRY "=")+strlen(splash_file_name)+1);
- strcpy(splash_file_entry, SPLASH_FILE_ENV_ENTRY "=");
- strcat(splash_file_entry, splash_file_name);
- putenv(splash_file_entry);
- }
- if (splash_jar_name && !headlessflag) {
- char* splash_jar_entry = JLI_MemAlloc(strlen(SPLASH_JAR_ENV_ENTRY "=")+strlen(splash_jar_name)+1);
- strcpy(splash_jar_entry, SPLASH_JAR_ENV_ENTRY "=");
- strcat(splash_jar_entry, splash_jar_name);
- putenv(splash_jar_entry);
- }
-
- /*
- * The JRE-Version and JRE-Restrict-Search values (if any) from the
- * manifest are overwritten by any specified on the command line.
- */
- if (version != NULL)
- info.jre_version = version;
- if (restrict_search != -1)
- info.jre_restrict_search = restrict_search;
-
- /*
- * "Valid" returns (other than unrecoverable errors) follow. Set
- * main_class as a side-effect of this routine.
- */
- if (info.main_class != NULL)
- *main_class = JLI_StringDup(info.main_class);
-
- /*
- * If no version selection information is found either on the command
- * line or in the manifest, simply return.
- */
- if (info.jre_version == NULL) {
- JLI_FreeManifest();
- JLI_MemFree(new_argv);
- return;
- }
-
- /*
- * Check for correct syntax of the version specification (JSR 56).
- */
- if (!JLI_ValidVersionString(info.jre_version)) {
- ReportErrorMessage2("Syntax error in version specification \"%s\"",
- info.jre_version, JNI_TRUE);
- exit(1);
- }
-
- /*
- * Find the appropriate JVM on the system. Just to be as forgiving as
- * possible, if the standard algorithms don't locate an appropriate
- * jre, check to see if the one running will satisfy the requirements.
- * This can happen on systems which haven't been set-up for multiple
- * JRE support.
- */
- jre = LocateJRE(&info);
- if (_launcher_debug)
- printf("JRE-Version = %s, JRE-Restrict-Search = %s Selected = %s\n",
- (info.jre_version?info.jre_version:"null"),
- (info.jre_restrict_search?"true":"false"), (jre?jre:"null"));
- if (jre == NULL) {
- if (JLI_AcceptableRelease(FULL_VERSION, info.jre_version)) {
- JLI_FreeManifest();
- JLI_MemFree(new_argv);
- return;
- } else {
- ReportErrorMessage2(
- "Unable to locate JRE meeting specification \"%s\"",
- info.jre_version, JNI_TRUE);
- exit(1);
- }
- }
-
- /*
- * If I'm not the chosen one, exec the chosen one. Returning from
- * ExecJRE indicates that I am indeed the chosen one.
- *
- * The private environment variable _JAVA_VERSION_SET is used to
- * prevent the chosen one from re-reading the manifest file and
- * using the values found within to override the (potential) command
- * line flags stripped from argv (because the target may not
- * understand them). Passing the MainClass value is an optimization
- * to avoid locating, expanding and parsing the manifest extra
- * times.
- */
- if (info.main_class != NULL) {
- if (strlen(info.main_class) <= MAXNAMELEN) {
- (void)strcat(env_entry, info.main_class);
- } else {
- ReportErrorMessage("Error: main-class: attribute exceeds system limits\n", JNI_TRUE);
- exit(1);
- }
- }
- (void)putenv(env_entry);
- ExecJRE(jre, new_argv);
- JLI_FreeManifest();
- JLI_MemFree(new_argv);
- return;
-}
-#endif /* ifndef GAMMA */
-
-/*
- * Parses command line arguments. Returns JNI_FALSE if launcher
- * should exit without starting vm (e.g. certain version and usage
- * options); returns JNI_TRUE if vm needs to be started to process
- * given options. *pret (the launcher process return value) is set to
- * 0 for a normal exit.
- */
-static jboolean
-ParseArguments(int *pargc, char ***pargv, char **pjarfile,
- char **pclassname, int *pret, const char *jvmpath)
-{
- int argc = *pargc;
- char **argv = *pargv;
- jboolean jarflag = JNI_FALSE;
- char *arg;
-
- *pret = 1;
- while ((arg = *argv) != 0 && *arg == '-') {
- argv++; --argc;
- if (strcmp(arg, "-classpath") == 0 || strcmp(arg, "-cp") == 0) {
- if (argc < 1) {
- ReportErrorMessage2("%s requires class path specification",
- arg, JNI_TRUE);
- PrintUsage();
- return JNI_FALSE;
- }
- SetClassPath(*argv);
- argv++; --argc;
- } else if (strcmp(arg, "-jar") == 0) {
- jarflag = JNI_TRUE;
- } else if (strcmp(arg, "-help") == 0 ||
- strcmp(arg, "-h") == 0 ||
- strcmp(arg, "-?") == 0) {
- PrintUsage();
- *pret = 0;
- return JNI_FALSE;
- } else if (strcmp(arg, "-version") == 0) {
- printVersion = JNI_TRUE;
- return JNI_TRUE;
- } else if (strcmp(arg, "-showversion") == 0) {
- showVersion = JNI_TRUE;
- } else if (strcmp(arg, "-X") == 0) {
- *pret = PrintXUsage(jvmpath);
- return JNI_FALSE;
-/*
- * The following case provide backward compatibility with old-style
- * command line options.
- */
- } else if (strcmp(arg, "-fullversion") == 0) {
- fprintf(stderr, "%s full version \"%s\"\n", progname,
- FULL_VERSION);
- *pret = 0;
- return JNI_FALSE;
- } else if (strcmp(arg, "-verbosegc") == 0) {
- AddOption("-verbose:gc", NULL);
- } else if (strcmp(arg, "-t") == 0) {
- AddOption("-Xt", NULL);
- } else if (strcmp(arg, "-tm") == 0) {
- AddOption("-Xtm", NULL);
- } else if (strcmp(arg, "-debug") == 0) {
- AddOption("-Xdebug", NULL);
- } else if (strcmp(arg, "-noclassgc") == 0) {
- AddOption("-Xnoclassgc", NULL);
- } else if (strcmp(arg, "-Xfuture") == 0) {
- AddOption("-Xverify:all", NULL);
- } else if (strcmp(arg, "-verify") == 0) {
- AddOption("-Xverify:all", NULL);
- } else if (strcmp(arg, "-verifyremote") == 0) {
- AddOption("-Xverify:remote", NULL);
- } else if (strcmp(arg, "-noverify") == 0) {
- AddOption("-Xverify:none", NULL);
- } else if (strcmp(arg, "-XXsuppressExitMessage") == 0) {
- noExitErrorMessage = 1;
- } else if (strncmp(arg, "-prof", 5) == 0) {
- char *p = arg + 5;
- char *tmp = JLI_MemAlloc(strlen(arg) + 50);
- if (*p) {
- sprintf(tmp, "-Xrunhprof:cpu=old,file=%s", p + 1);
- } else {
- sprintf(tmp, "-Xrunhprof:cpu=old,file=java.prof");
- }
- AddOption(tmp, NULL);
- } else if (strncmp(arg, "-ss", 3) == 0 ||
- strncmp(arg, "-oss", 4) == 0 ||
- strncmp(arg, "-ms", 3) == 0 ||
- strncmp(arg, "-mx", 3) == 0) {
- char *tmp = JLI_MemAlloc(strlen(arg) + 6);
- sprintf(tmp, "-X%s", arg + 1); /* skip '-' */
- AddOption(tmp, NULL);
- } else if (strcmp(arg, "-checksource") == 0 ||
- strcmp(arg, "-cs") == 0 ||
- strcmp(arg, "-noasyncgc") == 0) {
- /* No longer supported */
- fprintf(stderr,
- "Warning: %s option is no longer supported.\n",
- arg);
- } else if (strncmp(arg, "-version:", 9) == 0 ||
- strcmp(arg, "-no-jre-restrict-search") == 0 ||
- strcmp(arg, "-jre-restrict-search") == 0 ||
- strncmp(arg, "-splash:", 8) == 0) {
- ; /* Ignore machine independent options already handled */
- } else if (RemovableMachineDependentOption(arg) ) {
- ; /* Do not pass option to vm. */
- }
- else {
- AddOption(arg, NULL);
- }
- }
-
- if (--argc >= 0) {
- if (jarflag) {
- *pjarfile = *argv++;
- *pclassname = 0;
- } else {
- *pjarfile = 0;
- *pclassname = *argv++;
- }
- *pargc = argc;
- *pargv = argv;
- }
-
- return JNI_TRUE;
-}
-
-/*
- * Initializes the Java Virtual Machine. Also frees options array when
- * finished.
- */
-static jboolean
-InitializeJVM(JavaVM **pvm, JNIEnv **penv, InvocationFunctions *ifn)
-{
- JavaVMInitArgs args;
- jint r;
-
- memset(&args, 0, sizeof(args));
- args.version = JNI_VERSION_1_2;
- args.nOptions = numOptions;
- args.options = options;
- args.ignoreUnrecognized = JNI_FALSE;
-
- if (_launcher_debug) {
- int i = 0;
- printf("JavaVM args:\n ");
- printf("version 0x%08lx, ", (long)args.version);
- printf("ignoreUnrecognized is %s, ",
- args.ignoreUnrecognized ? "JNI_TRUE" : "JNI_FALSE");
- printf("nOptions is %ld\n", (long)args.nOptions);
- for (i = 0; i < numOptions; i++)
- printf(" option[%2d] = '%s'\n",
- i, args.options[i].optionString);
- }
-
- r = ifn->CreateJavaVM(pvm, (void **)penv, &args);
- JLI_MemFree(options);
- return r == JNI_OK;
-}
-
-
-#define NULL_CHECK0(e) if ((e) == 0) return 0
-#define NULL_CHECK(e) if ((e) == 0) return
-
-static jstring platformEncoding = NULL;
-static jstring getPlatformEncoding(JNIEnv *env) {
- if (platformEncoding == NULL) {
- jstring propname = (*env)->NewStringUTF(env, "sun.jnu.encoding");
- if (propname) {
- jclass cls;
- jmethodID mid;
- NULL_CHECK0 (cls = (*env)->FindClass(env, "java/lang/System"));
- NULL_CHECK0 (mid = (*env)->GetStaticMethodID(
- env, cls,
- "getProperty",
- "(Ljava/lang/String;)Ljava/lang/String;"));
- platformEncoding = (*env)->CallStaticObjectMethod (
- env, cls, mid, propname);
- }
- }
- return platformEncoding;
-}
-
-static jboolean isEncodingSupported(JNIEnv *env, jstring enc) {
- jclass cls;
- jmethodID mid;
- NULL_CHECK0 (cls = (*env)->FindClass(env, "java/nio/charset/Charset"));
- NULL_CHECK0 (mid = (*env)->GetStaticMethodID(
- env, cls,
- "isSupported",
- "(Ljava/lang/String;)Z"));
- return (*env)->CallStaticBooleanMethod(env, cls, mid, enc);
-}
-
-/*
- * Returns a new Java string object for the specified platform string.
- */
-static jstring
-NewPlatformString(JNIEnv *env, char *s)
-{
- int len = (int)strlen(s);
- jclass cls;
- jmethodID mid;
- jbyteArray ary;
- jstring enc;
-
- if (s == NULL)
- return 0;
- enc = getPlatformEncoding(env);
-
- ary = (*env)->NewByteArray(env, len);
- if (ary != 0) {
- jstring str = 0;
- (*env)->SetByteArrayRegion(env, ary, 0, len, (jbyte *)s);
- if (!(*env)->ExceptionOccurred(env)) {
- if (isEncodingSupported(env, enc) == JNI_TRUE) {
- NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String"));
- NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "<init>",
- "([BLjava/lang/String;)V"));
- str = (*env)->NewObject(env, cls, mid, ary, enc);
- } else {
- /*If the encoding specified in sun.jnu.encoding is not
- endorsed by "Charset.isSupported" we have to fall back
- to use String(byte[]) explicitly here without specifying
- the encoding name, in which the StringCoding class will
- pickup the iso-8859-1 as the fallback converter for us.
- */
- NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String"));
- NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "<init>",
- "([B)V"));
- str = (*env)->NewObject(env, cls, mid, ary);
- }
- (*env)->DeleteLocalRef(env, ary);
- return str;
- }
- }
- return 0;
-}
-
-/*
- * Returns a new array of Java string objects for the specified
- * array of platform strings.
- */
-static jobjectArray
-NewPlatformStringArray(JNIEnv *env, char **strv, int strc)
-{
- jarray cls;
- jarray ary;
- int i;
-
- NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String"));
- NULL_CHECK0(ary = (*env)->NewObjectArray(env, strc, cls, 0));
- for (i = 0; i < strc; i++) {
- jstring str = NewPlatformString(env, *strv++);
- NULL_CHECK0(str);
- (*env)->SetObjectArrayElement(env, ary, i, str);
- (*env)->DeleteLocalRef(env, str);
- }
- return ary;
-}
-
-/*
- * Loads a class, convert the '.' to '/'.
- */
-static jclass
-LoadClass(JNIEnv *env, char *name)
-{
- char *buf = JLI_MemAlloc(strlen(name) + 1);
- char *s = buf, *t = name, c;
- jclass cls;
- jlong start, end;
-
- if (_launcher_debug)
- start = CounterGet();
-
- do {
- c = *t++;
- *s++ = (c == '.') ? '/' : c;
- } while (c != '\0');
- cls = (*env)->FindClass(env, buf);
- JLI_MemFree(buf);
-
- if (_launcher_debug) {
- end = CounterGet();
- printf("%ld micro seconds to load main class\n",
- (long)(jint)Counter2Micros(end-start));
- printf("----_JAVA_LAUNCHER_DEBUG----\n");
- }
-
- return cls;
-}
-
-
-/*
- * Returns the main class name for the specified jar file.
- */
-static jstring
-GetMainClassName(JNIEnv *env, char *jarname)
-{
-#define MAIN_CLASS "Main-Class"
- jclass cls;
- jmethodID mid;
- jobject jar, man, attr;
- jstring str, result = 0;
-
- NULL_CHECK0(cls = (*env)->FindClass(env, "java/util/jar/JarFile"));
- NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "<init>",
- "(Ljava/lang/String;)V"));
- NULL_CHECK0(str = NewPlatformString(env, jarname));
- NULL_CHECK0(jar = (*env)->NewObject(env, cls, mid, str));
- NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "getManifest",
- "()Ljava/util/jar/Manifest;"));
- man = (*env)->CallObjectMethod(env, jar, mid);
- if (man != 0) {
- NULL_CHECK0(mid = (*env)->GetMethodID(env,
- (*env)->GetObjectClass(env, man),
- "getMainAttributes",
- "()Ljava/util/jar/Attributes;"));
- attr = (*env)->CallObjectMethod(env, man, mid);
- if (attr != 0) {
- NULL_CHECK0(mid = (*env)->GetMethodID(env,
- (*env)->GetObjectClass(env, attr),
- "getValue",
- "(Ljava/lang/String;)Ljava/lang/String;"));
- NULL_CHECK0(str = NewPlatformString(env, MAIN_CLASS));
- result = (*env)->CallObjectMethod(env, attr, mid, str);
- }
- }
- return result;
-}
-
-#ifdef JAVA_ARGS
-static char *java_args[] = JAVA_ARGS;
-static char *app_classpath[] = APP_CLASSPATH;
-
-/*
- * For tools, convert command line args thus:
- * javac -cp foo:foo/"*" -J-ms32m ...
- * java -ms32m -cp JLI_WildcardExpandClasspath(foo:foo/"*") ...
- */
-static void
-TranslateApplicationArgs(int *pargc, char ***pargv)
-{
- const int NUM_ARGS = (sizeof(java_args) / sizeof(char *));
- int argc = *pargc;
- char **argv = *pargv;
- int nargc = argc + NUM_ARGS;
- char **nargv = JLI_MemAlloc((nargc + 1) * sizeof(char *));
- int i;
-
- *pargc = nargc;
- *pargv = nargv;
-
- /* Copy the VM arguments (i.e. prefixed with -J) */
- for (i = 0; i < NUM_ARGS; i++) {
- char *arg = java_args[i];
- if (arg[0] == '-' && arg[1] == 'J') {
- *nargv++ = arg + 2;
- }
- }
-
- for (i = 0; i < argc; i++) {
- char *arg = argv[i];
- if (arg[0] == '-' && arg[1] == 'J') {
- if (arg[2] == '\0') {
- ReportErrorMessage("Error: the -J option should not be "
- "followed by a space.", JNI_TRUE);
- exit(1);
- }
- *nargv++ = arg + 2;
- }
- }
-
- /* Copy the rest of the arguments */
- for (i = 0; i < NUM_ARGS; i++) {
- char *arg = java_args[i];
- if (arg[0] != '-' || arg[1] != 'J') {
- *nargv++ = arg;
- }
- }
- for (i = 0; i < argc; i++) {
- char *arg = argv[i];
- if (arg[0] == '-') {
- if (arg[1] == 'J')
- continue;
-#ifdef EXPAND_CLASSPATH_WILDCARDS
- if (arg[1] == 'c'
- && (strcmp(arg, "-cp") == 0 ||
- strcmp(arg, "-classpath") == 0)
- && i < argc - 1) {
- *nargv++ = arg;
- *nargv++ = (char *) JLI_WildcardExpandClasspath(argv[i+1]);
- i++;
- continue;
- }
-#endif
- }
- *nargv++ = arg;
- }
- *nargv = 0;
-}
-
-/*
- * For our tools, we try to add 3 VM options:
- * -Denv.class.path=<envcp>
- * -Dapplication.home=<apphome>
- * -Djava.class.path=<appcp>
- * <envcp> is the user's setting of CLASSPATH -- for instance the user
- * tells javac where to find binary classes through this environment
- * variable. Notice that users will be able to compile against our
- * tools classes (sun.tools.javac.Main) only if they explicitly add
- * tools.jar to CLASSPATH.
- * <apphome> is the directory where the application is installed.
- * <appcp> is the classpath to where our apps' classfiles are.
- */
-static jboolean
-AddApplicationOptions()
-{
- const int NUM_APP_CLASSPATH = (sizeof(app_classpath) / sizeof(char *));
- char *envcp, *appcp, *apphome;
- char home[MAXPATHLEN]; /* application home */
- char separator[] = { PATH_SEPARATOR, '\0' };
- int size, i;
- int strlenHome;
-
- {
- const char *s = getenv("CLASSPATH");
- if (s) {
- s = (char *) JLI_WildcardExpandClasspath(s);
- /* 40 for -Denv.class.path= */
- envcp = (char *)JLI_MemAlloc(strlen(s) + 40);
- sprintf(envcp, "-Denv.class.path=%s", s);
- AddOption(envcp, NULL);
- }
- }
-
- if (!GetApplicationHome(home, sizeof(home))) {
- ReportErrorMessage("Can't determine application home", JNI_TRUE);
- return JNI_FALSE;
- }
-
- /* 40 for '-Dapplication.home=' */
- apphome = (char *)JLI_MemAlloc(strlen(home) + 40);
- sprintf(apphome, "-Dapplication.home=%s", home);
- AddOption(apphome, NULL);
-
- /* How big is the application's classpath? */
- size = 40; /* 40: "-Djava.class.path=" */
- strlenHome = (int)strlen(home);
- for (i = 0; i < NUM_APP_CLASSPATH; i++) {
- size += strlenHome + (int)strlen(app_classpath[i]) + 1; /* 1: separator */
- }
- appcp = (char *)JLI_MemAlloc(size + 1);
- strcpy(appcp, "-Djava.class.path=");
- for (i = 0; i < NUM_APP_CLASSPATH; i++) {
- strcat(appcp, home); /* c:\program files\myapp */
- strcat(appcp, app_classpath[i]); /* \lib\myapp.jar */
- strcat(appcp, separator); /* ; */
- }
- appcp[strlen(appcp)-1] = '\0'; /* remove trailing path separator */
- AddOption(appcp, NULL);
- return JNI_TRUE;
-}
-#endif /* JAVA_ARGS */
-
-/*
- * inject the -Dsun.java.command pseudo property into the args structure
- * this pseudo property is used in the HotSpot VM to expose the
- * Java class name and arguments to the main method to the VM. The
- * HotSpot VM uses this pseudo property to store the Java class name
- * (or jar file name) and the arguments to the class's main method
- * to the instrumentation memory region. The sun.java.command pseudo
- * property is not exported by HotSpot to the Java layer.
- */
-void
-SetJavaCommandLineProp(char *classname, char *jarfile,
- int argc, char **argv)
-{
-
- int i = 0;
- size_t len = 0;
- char* javaCommand = NULL;
- char* dashDstr = "-Dsun.java.command=";
-
- if (classname == NULL && jarfile == NULL) {
- /* unexpected, one of these should be set. just return without
- * setting the property
- */
- return;
- }
-
- /* if the class name is not set, then use the jarfile name */
- if (classname == NULL) {
- classname = jarfile;
- }
-
- /* determine the amount of memory to allocate assuming
- * the individual components will be space separated
- */
- len = strlen(classname);
- for (i = 0; i < argc; i++) {
- len += strlen(argv[i]) + 1;
- }
-
- /* allocate the memory */
- javaCommand = (char*) JLI_MemAlloc(len + strlen(dashDstr) + 1);
-
- /* build the -D string */
- *javaCommand = '\0';
- strcat(javaCommand, dashDstr);
- strcat(javaCommand, classname);
-
- for (i = 0; i < argc; i++) {
- /* the components of the string are space separated. In
- * the case of embedded white space, the relationship of
- * the white space separated components to their true
- * positional arguments will be ambiguous. This issue may
- * be addressed in a future release.
- */
- strcat(javaCommand, " ");
- strcat(javaCommand, argv[i]);
- }
-
- AddOption(javaCommand, NULL);
-}
-
-/*
- * JVM would like to know if it's created by a standard Sun launcher, or by
- * user native application, the following property indicates the former.
- */
-void SetJavaLauncherProp() {
- AddOption("-Dsun.java.launcher=" LAUNCHER_TYPE, NULL);
-}
-
-/*
- * Prints the version information from the java.version and other properties.
- */
-static void
-PrintJavaVersion(JNIEnv *env)
-{
- jclass ver;
- jmethodID print;
-
- NULL_CHECK(ver = (*env)->FindClass(env, "sun/misc/Version"));
- NULL_CHECK(print = (*env)->GetStaticMethodID(env, ver, "print", "()V"));
-
- (*env)->CallStaticVoidMethod(env, ver, print);
-}
-
-/*
- * Prints default usage message.
- */
-static void
-PrintUsage(void)
-{
-#ifndef GAMMA
- int i;
-#endif
-
- fprintf(stdout,
- "Usage: %s [-options] class [args...]\n"
- " (to execute a class)\n"
- " or %s [-options] -jar jarfile [args...]\n"
- " (to execute a jar file)\n"
- "\n"
- "where options include:\n",
- progname,
- progname);
-
-#ifndef GAMMA
- PrintMachineDependentOptions();
-
- if ((knownVMs[0].flag == VM_KNOWN) ||
- (knownVMs[0].flag == VM_IF_SERVER_CLASS)) {
- fprintf(stdout, " %s\t to select the \"%s\" VM\n",
- knownVMs[0].name, knownVMs[0].name+1);
- }
- for (i=1; i<knownVMsCount; i++) {
- if (knownVMs[i].flag == VM_KNOWN)
- fprintf(stdout, " %s\t to select the \"%s\" VM\n",
- knownVMs[i].name, knownVMs[i].name+1);
- }
- for (i=1; i<knownVMsCount; i++) {
- if (knownVMs[i].flag == VM_ALIASED_TO)
- fprintf(stdout, " %s\t is a synonym for "
- "the \"%s\" VM [deprecated]\n",
- knownVMs[i].name, knownVMs[i].alias+1);
- }
- /* The first known VM is the default */
- {
- const char* defaultVM = knownVMs[0].name+1;
- const char* punctuation = ".";
- const char* reason = "";
- if ((knownVMs[0].flag == VM_IF_SERVER_CLASS) &&
- (ServerClassMachine() == JNI_TRUE)) {
- defaultVM = knownVMs[0].server_class+1;
- punctuation = ", ";
- reason = "because you are running on a server-class machine.\n";
- }
- fprintf(stdout, " The default VM is %s%s\n",
- defaultVM, punctuation);
- fprintf(stdout, " %s\n",
- reason);
- }
-#endif /* ifndef GAMMA */
-
- fprintf(stdout,
-" -cp <class search path of directories and zip/jar files>\n"
-" -classpath <class search path of directories and zip/jar files>\n"
-" A %c separated list of directories, JAR archives,\n"
-" and ZIP archives to search for class files.\n"
-" -D<name>=<value>\n"
-" set a system property\n"
-" -verbose[:class|gc|jni]\n"
-" enable verbose output\n"
-" -version print product version and exit\n"
-" -version:<value>\n"
-" require the specified version to run\n"
-" -showversion print product version and continue\n"
-" -jre-restrict-search | -jre-no-restrict-search\n"
-" include/exclude user private JREs in the version search\n"
-" -? -help print this help message\n"
-" -X print help on non-standard options\n"
-" -ea[:<packagename>...|:<classname>]\n"
-" -enableassertions[:<packagename>...|:<classname>]\n"
-" enable assertions\n"
-" -da[:<packagename>...|:<classname>]\n"
-" -disableassertions[:<packagename>...|:<classname>]\n"
-" disable assertions\n"
-" -esa | -enablesystemassertions\n"
-" enable system assertions\n"
-" -dsa | -disablesystemassertions\n"
-" disable system assertions\n"
-" -agentlib:<libname>[=<options>]\n"
-" load native agent library <libname>, e.g. -agentlib:hprof\n"
-" see also, -agentlib:jdwp=help and -agentlib:hprof=help\n"
-" -agentpath:<pathname>[=<options>]\n"
-" load native agent library by full pathname\n"
-" -javaagent:<jarpath>[=<options>]\n"
-" load Java programming language agent, see java.lang.instrument\n"
-" -splash:<imagepath>\n"
-" show splash screen with specified image\n"
-
- ,PATH_SEPARATOR);
-}
-
-/*
- * Print usage message for -X options.
- */
-static jint
-PrintXUsage(const char *jvmpath)
-{
- /*
- A 32 bit cushion to prevent buffer overrun, noting that
- fopen(3C) may fail if the buffer exceeds MAXPATHLEN.
- */
- char path[MAXPATHLEN+32];
- char buf[128];
- size_t n;
- FILE *fp;
- static const char Xusage_txt[] = "/Xusage.txt";
-
- strcpy(path, jvmpath);
- /* Note the FILE_SEPARATOR is platform dependent */
- strcpy(strrchr(path, FILE_SEPARATOR), Xusage_txt);
- fp = fopen(path, "r");
- if (fp == 0) {
- fprintf(stderr, "Can't open %s\n", path);
- return 1;
- }
- while ((n = fread(buf, 1, sizeof(buf), fp)) != 0) {
- fwrite(buf, 1, n, stdout);
- }
- fclose(fp);
- return 0;
-}
-
-#ifndef GAMMA
-/*
- * Read the jvm.cfg file and fill the knownJVMs[] array.
- *
- * The functionality of the jvm.cfg file is subject to change without
- * notice and the mechanism will be removed in the future.
- *
- * The lexical structure of the jvm.cfg file is as follows:
- *
- * jvmcfg := { vmLine }
- * vmLine := knownLine
- * | aliasLine
- * | warnLine
- * | ignoreLine
- * | errorLine
- * | predicateLine
- * | commentLine
- * knownLine := flag "KNOWN" EOL
- * warnLine := flag "WARN" EOL
- * ignoreLine := flag "IGNORE" EOL
- * errorLine := flag "ERROR" EOL
- * aliasLine := flag "ALIASED_TO" flag EOL
- * predicateLine := flag "IF_SERVER_CLASS" flag EOL
- * commentLine := "#" text EOL
- * flag := "-" identifier
- *
- * The semantics are that when someone specifies a flag on the command line:
- * - if the flag appears on a knownLine, then the identifier is used as
- * the name of the directory holding the JVM library (the name of the JVM).
- * - if the flag appears as the first flag on an aliasLine, the identifier
- * of the second flag is used as the name of the JVM.
- * - if the flag appears on a warnLine, the identifier is used as the
- * name of the JVM, but a warning is generated.
- * - if the flag appears on an ignoreLine, the identifier is recognized as the
- * name of a JVM, but the identifier is ignored and the default vm used
- * - if the flag appears on an errorLine, an error is generated.
- * - if the flag appears as the first flag on a predicateLine, and
- * the machine on which you are running passes the predicate indicated,
- * then the identifier of the second flag is used as the name of the JVM,
- * otherwise the identifier of the first flag is used as the name of the JVM.
- * If no flag is given on the command line, the first vmLine of the jvm.cfg
- * file determines the name of the JVM.
- * PredicateLines are only interpreted on first vmLine of a jvm.cfg file,
- * since they only make sense if someone hasn't specified the name of the
- * JVM on the command line.
- *
- * The intent of the jvm.cfg file is to allow several JVM libraries to
- * be installed in different subdirectories of a single JRE installation,
- * for space-savings and convenience in testing.
- * The intent is explicitly not to provide a full aliasing or predicate
- * mechanism.
- */
-jint
-ReadKnownVMs(const char *jrepath, char * arch, jboolean speculative)
-{
- FILE *jvmCfg;
- char jvmCfgName[MAXPATHLEN+20];
- char line[MAXPATHLEN+20];
- int cnt = 0;
- int lineno = 0;
- jlong start, end;
- int vmType;
- char *tmpPtr;
- char *altVMName = NULL;
- char *serverClassVMName = NULL;
- static char *whiteSpace = " \t";
- if (_launcher_debug) {
- start = CounterGet();
- }
-
- strcpy(jvmCfgName, jrepath);
- strcat(jvmCfgName, FILESEP "lib" FILESEP);
- strcat(jvmCfgName, arch);
- strcat(jvmCfgName, FILESEP "jvm.cfg");
-
- jvmCfg = fopen(jvmCfgName, "r");
- if (jvmCfg == NULL) {
- if (!speculative) {
- ReportErrorMessage2("Error: could not open `%s'", jvmCfgName,
- JNI_TRUE);
- exit(1);
- } else {
- return -1;
- }
- }
- while (fgets(line, sizeof(line), jvmCfg) != NULL) {
- vmType = VM_UNKNOWN;
- lineno++;
- if (line[0] == '#')
- continue;
- if (line[0] != '-') {
- fprintf(stderr, "Warning: no leading - on line %d of `%s'\n",
- lineno, jvmCfgName);
- }
- if (cnt >= knownVMsLimit) {
- GrowKnownVMs(cnt);
- }
- line[strlen(line)-1] = '\0'; /* remove trailing newline */
- tmpPtr = line + strcspn(line, whiteSpace);
- if (*tmpPtr == 0) {
- fprintf(stderr, "Warning: missing VM type on line %d of `%s'\n",
- lineno, jvmCfgName);
- } else {
- /* Null-terminate this string for JLI_StringDup below */
- *tmpPtr++ = 0;
- tmpPtr += strspn(tmpPtr, whiteSpace);
- if (*tmpPtr == 0) {
- fprintf(stderr, "Warning: missing VM type on line %d of `%s'\n",
- lineno, jvmCfgName);
- } else {
- if (!strncmp(tmpPtr, "KNOWN", strlen("KNOWN"))) {
- vmType = VM_KNOWN;
- } else if (!strncmp(tmpPtr, "ALIASED_TO", strlen("ALIASED_TO"))) {
- tmpPtr += strcspn(tmpPtr, whiteSpace);
- if (*tmpPtr != 0) {
- tmpPtr += strspn(tmpPtr, whiteSpace);
- }
- if (*tmpPtr == 0) {
- fprintf(stderr, "Warning: missing VM alias on line %d of `%s'\n",
- lineno, jvmCfgName);
- } else {
- /* Null terminate altVMName */
- altVMName = tmpPtr;
- tmpPtr += strcspn(tmpPtr, whiteSpace);
- *tmpPtr = 0;
- vmType = VM_ALIASED_TO;
- }
- } else if (!strncmp(tmpPtr, "WARN", strlen("WARN"))) {
- vmType = VM_WARN;
- } else if (!strncmp(tmpPtr, "IGNORE", strlen("IGNORE"))) {
- vmType = VM_IGNORE;
- } else if (!strncmp(tmpPtr, "ERROR", strlen("ERROR"))) {
- vmType = VM_ERROR;
- } else if (!strncmp(tmpPtr,
- "IF_SERVER_CLASS",
- strlen("IF_SERVER_CLASS"))) {
- tmpPtr += strcspn(tmpPtr, whiteSpace);
- if (*tmpPtr != 0) {
- tmpPtr += strspn(tmpPtr, whiteSpace);
- }
- if (*tmpPtr == 0) {
- fprintf(stderr, "Warning: missing server class VM on line %d of `%s'\n",
- lineno, jvmCfgName);
- } else {
- /* Null terminate server class VM name */
- serverClassVMName = tmpPtr;
- tmpPtr += strcspn(tmpPtr, whiteSpace);
- *tmpPtr = 0;
- vmType = VM_IF_SERVER_CLASS;
- }
- } else {
- fprintf(stderr, "Warning: unknown VM type on line %d of `%s'\n",
- lineno, &jvmCfgName[0]);
- vmType = VM_KNOWN;
- }
- }
- }
-
- if (_launcher_debug)
- printf("jvm.cfg[%d] = ->%s<-\n", cnt, line);
- if (vmType != VM_UNKNOWN) {
- knownVMs[cnt].name = JLI_StringDup(line);
- knownVMs[cnt].flag = vmType;
- switch (vmType) {
- default:
- break;
- case VM_ALIASED_TO:
- knownVMs[cnt].alias = JLI_StringDup(altVMName);
- if (_launcher_debug) {
- printf(" name: %s vmType: %s alias: %s\n",
- knownVMs[cnt].name, "VM_ALIASED_TO", knownVMs[cnt].alias);
- }
- break;
- case VM_IF_SERVER_CLASS:
- knownVMs[cnt].server_class = JLI_StringDup(serverClassVMName);
- if (_launcher_debug) {
- printf(" name: %s vmType: %s server_class: %s\n",
- knownVMs[cnt].name, "VM_IF_SERVER_CLASS", knownVMs[cnt].server_class);
- }
- break;
- }
- cnt++;
- }
- }
- fclose(jvmCfg);
- knownVMsCount = cnt;
-
- if (_launcher_debug) {
- end = CounterGet();
- printf("%ld micro seconds to parse jvm.cfg\n",
- (long)(jint)Counter2Micros(end-start));
- }
-
- return cnt;
-}
-
-
-static void
-GrowKnownVMs(int minimum)
-{
- struct vmdesc* newKnownVMs;
- int newMax;
-
- newMax = (knownVMsLimit == 0 ? INIT_MAX_KNOWN_VMS : (2 * knownVMsLimit));
- if (newMax <= minimum) {
- newMax = minimum;
- }
- newKnownVMs = (struct vmdesc*) JLI_MemAlloc(newMax * sizeof(struct vmdesc));
- if (knownVMs != NULL) {
- memcpy(newKnownVMs, knownVMs, knownVMsLimit * sizeof(struct vmdesc));
- }
- JLI_MemFree(knownVMs);
- knownVMs = newKnownVMs;
- knownVMsLimit = newMax;
-}
-
-
-/* Returns index of VM or -1 if not found */
-static int
-KnownVMIndex(const char* name)
-{
- int i;
- if (strncmp(name, "-J", 2) == 0) name += 2;
- for (i = 0; i < knownVMsCount; i++) {
- if (!strcmp(name, knownVMs[i].name)) {
- return i;
- }
- }
- return -1;
-}
-
-static void
-FreeKnownVMs()
-{
- int i;
- for (i = 0; i < knownVMsCount; i++) {
- JLI_MemFree(knownVMs[i].name);
- knownVMs[i].name = NULL;
- }
- JLI_MemFree(knownVMs);
-}
-
-
-/*
- * Displays the splash screen according to the jar file name
- * and image file names stored in environment variables
- */
-static void
-ShowSplashScreen()
-{
- const char *jar_name = getenv(SPLASH_JAR_ENV_ENTRY);
- const char *file_name = getenv(SPLASH_FILE_ENV_ENTRY);
- int data_size;
- void *image_data;
- if (jar_name) {
- image_data = JLI_JarUnpackFile(jar_name, file_name, &data_size);
- if (image_data) {
- DoSplashInit();
- DoSplashLoadMemory(image_data, data_size);
- JLI_MemFree(image_data);
- }
- } else if (file_name) {
- DoSplashInit();
- DoSplashLoadFile(file_name);
- } else {
- return;
- }
- DoSplashSetFileJarName(file_name, jar_name);
-}
-
-#endif /* ifndef GAMMA */
diff --git a/src/share/tools/launcher/java.h b/src/share/tools/launcher/java.h
deleted file mode 100644
index 1dd79618c..000000000
--- a/src/share/tools/launcher/java.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-
-#ifndef _JAVA_H_
-#define _JAVA_H_
-
-/*
- * Get system specific defines.
- */
-#include "jni.h"
-#include "java_md.h"
-#include "jli_util.h"
-
-/*
- * Pointers to the needed JNI invocation API, initialized by LoadJavaVM.
- */
-typedef jint (JNICALL *CreateJavaVM_t)(JavaVM **pvm, void **env, void *args);
-typedef jint (JNICALL *GetDefaultJavaVMInitArgs_t)(void *args);
-
-typedef struct {
- CreateJavaVM_t CreateJavaVM;
- GetDefaultJavaVMInitArgs_t GetDefaultJavaVMInitArgs;
-} InvocationFunctions;
-
-/*
- * Prototypes for launcher functions in the system specific java_md.c.
- */
-
-jboolean
-LoadJavaVM(const char *jvmpath, InvocationFunctions *ifn);
-
-void
-GetXUsagePath(char *buf, jint bufsize);
-
-jboolean
-GetApplicationHome(char *buf, jint bufsize);
-
-const char *
-GetArch();
-
-void CreateExecutionEnvironment(int *_argc,
- char ***_argv,
- char jrepath[],
- jint so_jrepath,
- char jvmpath[],
- jint so_jvmpath,
- char **original_argv);
-
-/*
- * Report an error message to stderr or a window as appropriate. The
- * flag always is set to JNI_TRUE if message is to be reported to both
- * strerr and windows and set to JNI_FALSE if the message should only
- * be sent to a window.
- */
-void ReportErrorMessage(char * message, jboolean always);
-void ReportErrorMessage2(char * format, char * string, jboolean always);
-
-/*
- * Report an exception which terminates the vm to stderr or a window
- * as appropriate.
- */
-void ReportExceptionDescription(JNIEnv * env);
-
-jboolean RemovableMachineDependentOption(char * option);
-void PrintMachineDependentOptions();
-
-/*
- * Block current thread and continue execution in new thread
- */
-int ContinueInNewThread(int (JNICALL *continuation)(void *),
- jlong stack_size, void * args);
-
-/* sun.java.launcher.* platform properties. */
-void SetJavaLauncherPlatformProps(void);
-
-/*
- * Functions defined in java.c and used in java_md.c.
- */
-jint ReadKnownVMs(const char *jrepath, char * arch, jboolean speculative);
-char *CheckJvmType(int *argc, char ***argv, jboolean speculative);
-void AddOption(char *str, void *info);
-
-/*
- * Make launcher spit debug output.
- */
-extern jboolean _launcher_debug;
-
-#endif /* _JAVA_H_ */
diff --git a/src/share/tools/launcher/jli_util.c b/src/share/tools/launcher/jli_util.c
deleted file mode 100644
index 36b164e3d..000000000
--- a/src/share/tools/launcher/jli_util.c
+++ /dev/null
@@ -1,89 +0,0 @@
-
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include <stdio.h>
-#include <string.h>
-#include "jli_util.h"
-
-#ifdef GAMMA
-#ifdef TARGET_OS_FAMILY_windows
-#define strdup _strdup
-#endif
-#endif
-
-/*
- * Returns a pointer to a block of at least 'size' bytes of memory.
- * Prints error message and exits if the memory could not be allocated.
- */
-void *
-JLI_MemAlloc(size_t size)
-{
- void *p = malloc(size);
- if (p == 0) {
- perror("malloc");
- exit(1);
- }
- return p;
-}
-
-/*
- * Equivalent to realloc(size).
- * Prints error message and exits if the memory could not be reallocated.
- */
-void *
-JLI_MemRealloc(void *ptr, size_t size)
-{
- void *p = realloc(ptr, size);
- if (p == 0) {
- perror("realloc");
- exit(1);
- }
- return p;
-}
-
-/*
- * Wrapper over strdup(3C) which prints an error message and exits if memory
- * could not be allocated.
- */
-char *
-JLI_StringDup(const char *s1)
-{
- char *s = strdup(s1);
- if (s == NULL) {
- perror("strdup");
- exit(1);
- }
- return s;
-}
-
-/*
- * Very equivalent to free(ptr).
- * Here to maintain pairing with the above routines.
- */
-void
-JLI_MemFree(void *ptr)
-{
- free(ptr);
-}
diff --git a/src/share/tools/launcher/wildcard.c b/src/share/tools/launcher/wildcard.c
deleted file mode 100644
index 8b3cbcd69..000000000
--- a/src/share/tools/launcher/wildcard.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-/*
- * Class-Path Wildcards
- *
- * The syntax for wildcards is a single asterisk. The class path
- * foo/"*", e.g., loads all jar files in the directory named foo.
- * (This requires careful quotation when used in shell scripts.)
- *
- * Only files whose names end in .jar or .JAR are matched.
- * Files whose names end in .zip, or which have a particular
- * magic number, regardless of filename extension, are not
- * matched.
- *
- * Files are considered regardless of whether or not they are
- * "hidden" in the UNIX sense, i.e., have names beginning with '.'.
- *
- * A wildcard only matches jar files, not class files in the same
- * directory. If you want to load both class files and jar files from
- * a single directory foo then you can say foo:foo/"*", or foo/"*":foo
- * if you want the jar files to take precedence.
- *
- * Subdirectories are not searched recursively, i.e., foo/"*" only
- * looks for jar files in foo, not in foo/bar, foo/baz, etc.
- *
- * Expansion of wildcards is done early, prior to the invocation of a
- * program's main method, rather than late, during the class-loading
- * process itself. Each element of the input class path containing a
- * wildcard is replaced by the (possibly empty) sequence of elements
- * generated by enumerating the jar files in the named directory. If
- * the directory foo contains a.jar, b.jar, and c.jar,
- * e.g., then the class path foo/"*" is expanded into
- * foo/a.jar:foo/b.jar:foo/c.jar, and that string would be the value
- * of the system property java.class.path.
- *
- * The order in which the jar files in a directory are enumerated in
- * the expanded class path is not specified and may vary from platform
- * to platform and even from moment to moment on the same machine. A
- * well-constructed application should not depend upon any particular
- * order. If a specific order is required then the jar files can be
- * enumerated explicitly in the class path.
- *
- * The CLASSPATH environment variable is not treated any differently
- * from the -classpath (equiv. -cp) command-line option,
- * i.e. wildcards are honored in all these cases.
- *
- * Class-path wildcards are not honored in the Class-Path jar-manifest
- * header.
- *
- * Class-path wildcards are honored not only by the Java launcher but
- * also by most other command-line tools that accept class paths, and
- * in particular by javac and javadoc.
- *
- * Class-path wildcards are not honored in any other kind of path, and
- * especially not in the bootstrap class path, which is a mere
- * artifact of our implementation and not something that developers
- * should use.
- *
- * Classpath wildcards are only expanded in the Java launcher code,
- * supporting the use of wildcards on the command line and in the
- * CLASSPATH environment variable. We do not support the use of
- * wildcards by applications that embed the JVM.
- */
-
-#include <stddef.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include "java.h" /* Strictly for PATH_SEPARATOR/FILE_SEPARATOR */
-#include "jli_util.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#else /* Unix */
-#include <unistd.h>
-#include <dirent.h>
-#endif /* Unix */
-
-static int
-exists(const char* filename)
-{
-#ifdef _WIN32
- return _access(filename, 0) == 0;
-#else
- return access(filename, F_OK) == 0;
-#endif
-}
-
-#define NEW_(TYPE) ((TYPE) JLI_MemAlloc(sizeof(struct TYPE##_)))
-
-/*
- * Wildcard directory iteration.
- * WildcardIterator_for(wildcard) returns an iterator.
- * Each call to that iterator's next() method returns the basename
- * of an entry in the wildcard's directory. The basename's memory
- * belongs to the iterator. The caller is responsible for prepending
- * the directory name and file separator, if necessary.
- * When done with the iterator, call the close method to clean up.
- */
-typedef struct WildcardIterator_* WildcardIterator;
-
-#ifdef _WIN32
-struct WildcardIterator_
-{
- HANDLE handle;
- char *firstFile; /* Stupid FindFirstFile...FindNextFile */
-};
-
-static WildcardIterator
-WildcardIterator_for(const char *wildcard)
-{
- WIN32_FIND_DATA find_data;
- WildcardIterator it = NEW_(WildcardIterator);
- HANDLE handle = FindFirstFile(wildcard, &find_data);
- if (handle == INVALID_HANDLE_VALUE)
- return NULL;
- it->handle = handle;
- it->firstFile = find_data.cFileName;
- return it;
-}
-
-static char *
-WildcardIterator_next(WildcardIterator it)
-{
- WIN32_FIND_DATA find_data;
- if (it->firstFile != NULL) {
- char *firstFile = it->firstFile;
- it->firstFile = NULL;
- return firstFile;
- }
- return FindNextFile(it->handle, &find_data)
- ? find_data.cFileName : NULL;
-}
-
-static void
-WildcardIterator_close(WildcardIterator it)
-{
- if (it) {
- FindClose(it->handle);
- JLI_MemFree(it->firstFile);
- JLI_MemFree(it);
- }
-}
-
-#else /* Unix */
-struct WildcardIterator_
-{
- DIR *dir;
-};
-
-static WildcardIterator
-WildcardIterator_for(const char *wildcard)
-{
- DIR *dir;
- int wildlen = strlen(wildcard);
- if (wildlen < 2) {
- dir = opendir(".");
- } else {
- char *dirname = JLI_StringDup(wildcard);
- dirname[wildlen - 1] = '\0';
- dir = opendir(dirname);
- JLI_MemFree(dirname);
- }
- if (dir == NULL)
- return NULL;
- else {
- WildcardIterator it = NEW_(WildcardIterator);
- it->dir = dir;
- return it;
- }
-}
-
-static char *
-WildcardIterator_next(WildcardIterator it)
-{
- struct dirent* dirp = readdir(it->dir);
- return dirp ? dirp->d_name : NULL;
-}
-
-static void
-WildcardIterator_close(WildcardIterator it)
-{
- if (it) {
- closedir(it->dir);
- JLI_MemFree(it);
- }
-}
-#endif /* Unix */
-
-static int
-equal(const char *s1, const char *s2)
-{
- return strcmp(s1, s2) == 0;
-}
-
-/*
- * FileList ADT - a dynamic list of C filenames
- */
-struct FileList_
-{
- char **files;
- int size;
- int capacity;
-};
-typedef struct FileList_ *FileList;
-
-static FileList
-FileList_new(int capacity)
-{
- FileList fl = NEW_(FileList);
- fl->capacity = capacity;
- fl->files = (char **) JLI_MemAlloc(capacity * sizeof(fl->files[0]));
- fl->size = 0;
- return fl;
-}
-
-#ifdef DEBUG_WILDCARD
-static void
-FileList_print(FileList fl)
-{
- int i;
- putchar('[');
- for (i = 0; i < fl->size; i++) {
- if (i > 0) printf(", ");
- printf("\"%s\"",fl->files[i]);
- }
- putchar(']');
-}
-#endif
-
-static void
-FileList_free(FileList fl)
-{
- if (fl) {
- if (fl->files) {
- int i;
- for (i = 0; i < fl->size; i++)
- JLI_MemFree(fl->files[i]);
- JLI_MemFree(fl->files);
- }
- JLI_MemFree(fl);
- }
-}
-
-static void
-FileList_ensureCapacity(FileList fl, int capacity)
-{
- if (fl->capacity < capacity) {
- while (fl->capacity < capacity)
- fl->capacity *= 2;
- fl->files = JLI_MemRealloc(fl->files,
- fl->capacity * sizeof(fl->files[0]));
- }
-}
-
-static void
-FileList_add(FileList fl, char *file)
-{
- FileList_ensureCapacity(fl, fl->size+1);
- fl->files[fl->size++] = file;
-}
-
-static void
-FileList_addSubstring(FileList fl, const char *beg, int len)
-{
- char *filename = (char *) JLI_MemAlloc(len+1);
- memcpy(filename, beg, len);
- filename[len] = '\0';
- FileList_ensureCapacity(fl, fl->size+1);
- fl->files[fl->size++] = filename;
-}
-
-static char *
-FileList_join(FileList fl, char sep)
-{
- int i;
- int size;
- char *path;
- char *p;
- for (i = 0, size = 1; i < fl->size; i++)
- size += strlen(fl->files[i]) + 1;
-
- path = JLI_MemAlloc(size);
-
- for (i = 0, p = path; i < fl->size; i++) {
- int len = strlen(fl->files[i]);
- if (i > 0) *p++ = sep;
- memcpy(p, fl->files[i], len);
- p += len;
- }
- *p = '\0';
-
- return path;
-}
-
-static FileList
-FileList_split(const char *path, char sep)
-{
- const char *p, *q;
- int len = strlen(path);
- int count;
- FileList fl;
- for (count = 1, p = path; p < path + len; p++)
- count += (*p == sep);
- fl = FileList_new(count);
- for (p = path;;) {
- for (q = p; q <= path + len; q++) {
- if (*q == sep || *q == '\0') {
- FileList_addSubstring(fl, p, q - p);
- if (*q == '\0')
- return fl;
- p = q + 1;
- }
- }
- }
-}
-
-static int
-isJarFileName(const char *filename)
-{
- int len = strlen(filename);
- return (len >= 4) &&
- (filename[len - 4] == '.') &&
- (equal(filename + len - 3, "jar") ||
- equal(filename + len - 3, "JAR")) &&
- /* Paranoia: Maybe filename is "DIR:foo.jar" */
- (strchr(filename, PATH_SEPARATOR) == NULL);
-}
-
-static char *
-wildcardConcat(const char *wildcard, const char *basename)
-{
- int wildlen = strlen(wildcard);
- int baselen = strlen(basename);
- char *filename = (char *) JLI_MemAlloc(wildlen + baselen);
- /* Replace the trailing '*' with basename */
- memcpy(filename, wildcard, wildlen-1);
- memcpy(filename+wildlen-1, basename, baselen+1);
- return filename;
-}
-
-static FileList
-wildcardFileList(const char *wildcard)
-{
- const char *basename;
- FileList fl = FileList_new(16);
- WildcardIterator it = WildcardIterator_for(wildcard);
- if (it == NULL) {
- FileList_free(fl);
- return NULL;
- }
- while ((basename = WildcardIterator_next(it)) != NULL)
- if (isJarFileName(basename))
- FileList_add(fl, wildcardConcat(wildcard, basename));
- WildcardIterator_close(it);
- return fl;
-}
-
-static int
-isWildcard(const char *filename)
-{
- int len = strlen(filename);
- return (len > 0) &&
- (filename[len - 1] == '*') &&
- (len == 1 || IS_FILE_SEPARATOR(filename[len - 2])) &&
- (! exists(filename));
-}
-
-static void
-FileList_expandWildcards(FileList fl)
-{
- int i, j;
- for (i = 0; i < fl->size; i++) {
- if (isWildcard(fl->files[i])) {
- FileList expanded = wildcardFileList(fl->files[i]);
- if (expanded != NULL && expanded->size > 0) {
- JLI_MemFree(fl->files[i]);
- FileList_ensureCapacity(fl, fl->size + expanded->size);
- for (j = fl->size - 1; j >= i+1; j--)
- fl->files[j+expanded->size-1] = fl->files[j];
- for (j = 0; j < expanded->size; j++)
- fl->files[i+j] = expanded->files[j];
- i += expanded->size - 1;
- fl->size += expanded->size - 1;
- /* fl expropriates expanded's elements. */
- expanded->size = 0;
- }
- FileList_free(expanded);
- }
- }
-}
-
-const char *
-JLI_WildcardExpandClasspath(const char *classpath)
-{
- char *expanded;
- FileList fl;
-
- if (strchr(classpath, '*') == NULL)
- return classpath;
- fl = FileList_split(classpath, PATH_SEPARATOR);
- FileList_expandWildcards(fl);
- expanded = FileList_join(fl, PATH_SEPARATOR);
- FileList_free(fl);
- if (getenv("_JAVA_LAUNCHER_DEBUG") != 0)
- printf("Expanded wildcards:\n"
- " before: \"%s\"\n"
- " after : \"%s\"\n",
- classpath, expanded);
- return expanded;
-}
-
-#ifdef DEBUG_WILDCARD
-static void
-wildcardExpandArgv(const char ***argv)
-{
- int i;
- for (i = 0; (*argv)[i]; i++) {
- if (equal((*argv)[i], "-cp") ||
- equal((*argv)[i], "-classpath")) {
- i++;
- (*argv)[i] = wildcardExpandClasspath((*argv)[i]);
- }
- }
-}
-
-static void
-debugPrintArgv(char *argv[])
-{
- int i;
- putchar('[');
- for (i = 0; argv[i]; i++) {
- if (i > 0) printf(", ");
- printf("\"%s\"", argv[i]);
- }
- printf("]\n");
-}
-
-int
-main(int argc, char *argv[])
-{
- argv[0] = "java";
- wildcardExpandArgv((const char***)&argv);
- debugPrintArgv(argv);
- /* execvp("java", argv); */
- return 0;
-}
-#endif /* DEBUG_WILDCARD */
-
-/* Cute little perl prototype implementation....
-
-my $sep = ($^O =~ /^(Windows|cygwin)/) ? ";" : ":";
-
-sub expand($) {
- opendir DIR, $_[0] or return $_[0];
- join $sep, map {"$_[0]/$_"} grep {/\.(jar|JAR)$/} readdir DIR;
-}
-
-sub munge($) {
- join $sep,
- map {(! -r $_ and s/[\/\\]+\*$//) ? expand $_ : $_} split $sep, $_[0];
-}
-
-for (my $i = 0; $i < @ARGV - 1; $i++) {
- $ARGV[$i+1] = munge $ARGV[$i+1] if $ARGV[$i] =~ /^-c(p|lasspath)$/;
-}
-
-$ENV{CLASSPATH} = munge $ENV{CLASSPATH} if exists $ENV{CLASSPATH};
-@ARGV = ("java", @ARGV);
-print "@ARGV\n";
-exec @ARGV;
-
-*/
diff --git a/src/share/vm/adlc/archDesc.cpp b/src/share/vm/adlc/archDesc.cpp
index 7e272e4d0..412a72822 100644
--- a/src/share/vm/adlc/archDesc.cpp
+++ b/src/share/vm/adlc/archDesc.cpp
@@ -29,8 +29,8 @@
static FILE *errfile = stderr;
//--------------------------- utility functions -----------------------------
-inline char toUpper(char lower) {
- return (('a' <= lower && lower <= 'z') ? (lower + ('A'-'a')) : lower);
+inline char toUpper(char lower) {
+ return (('a' <= lower && lower <= 'z') ? ((char) (lower + ('A'-'a'))) : lower);
}
char *toUpper(const char *str) {
char *upper = new char[strlen(str)+1];
diff --git a/src/share/vm/adlc/arena.cpp b/src/share/vm/adlc/arena.cpp
index b8bb1a0bd..d7e4fc6eb 100644
--- a/src/share/vm/adlc/arena.cpp
+++ b/src/share/vm/adlc/arena.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
#include "adlc.hpp"
-void* Chunk::operator new(size_t requested_size, size_t length) {
+void* Chunk::operator new(size_t requested_size, size_t length) throw() {
return CHeapObj::operator new(requested_size + length);
}
@@ -163,7 +163,7 @@ bool Arena::contains( const void *ptr ) const {
//-----------------------------------------------------------------------------
// CHeapObj
-void* CHeapObj::operator new(size_t size){
+void* CHeapObj::operator new(size_t size) throw() {
return (void *) malloc(size);
}
diff --git a/src/share/vm/adlc/arena.hpp b/src/share/vm/adlc/arena.hpp
index a92857e13..7f09511d6 100644
--- a/src/share/vm/adlc/arena.hpp
+++ b/src/share/vm/adlc/arena.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
class CHeapObj {
public:
- void* operator new(size_t size);
+ void* operator new(size_t size) throw();
void operator delete(void* p);
void* new_array(size_t size);
};
@@ -53,7 +53,7 @@ class CHeapObj {
class ValueObj {
public:
- void* operator new(size_t size);
+ void* operator new(size_t size) throw();
void operator delete(void* p);
};
@@ -61,7 +61,7 @@ class ValueObj {
class AllStatic {
public:
- void* operator new(size_t size);
+ void* operator new(size_t size) throw();
void operator delete(void* p);
};
@@ -70,7 +70,7 @@ class AllStatic {
// Linked list of raw memory chunks
class Chunk: public CHeapObj {
public:
- void* operator new(size_t size, size_t length);
+ void* operator new(size_t size, size_t length) throw();
void operator delete(void* p, size_t length);
Chunk(size_t length);
diff --git a/src/share/vm/adlc/dict2.cpp b/src/share/vm/adlc/dict2.cpp
index c7797c742..24d7fcba0 100644
--- a/src/share/vm/adlc/dict2.cpp
+++ b/src/share/vm/adlc/dict2.cpp
@@ -64,18 +64,18 @@ void Dict::init() {
int i;
// Precompute table of null character hashes
- if( !initflag ) { // Not initializated yet?
- xsum[0] = (1<<shft[0])+1; // Initialize
+ if (!initflag) { // Not initializated yet?
+ xsum[0] = (short) ((1 << shft[0]) + 1); // Initialize
for( i = 1; i < MAXID; i++) {
- xsum[i] = (1<<shft[i])+1+xsum[i-1];
+ xsum[i] = (short) ((1 << shft[i]) + 1 + xsum[i-1]);
}
initflag = 1; // Never again
}
_size = 16; // Size is a power of 2
_cnt = 0; // Dictionary is empty
- _bin = (bucket*)_arena->Amalloc_4(sizeof(bucket)*_size);
- memset(_bin,0,sizeof(bucket)*_size);
+ _bin = (bucket*)_arena->Amalloc_4(sizeof(bucket) * _size);
+ memset(_bin, 0, sizeof(bucket) * _size);
}
//------------------------------~Dict------------------------------------------
@@ -287,11 +287,11 @@ int hashstr(const void *t) {
register int sum = 0;
register const char *s = (const char *)t;
- while( ((c = s[k]) != '\0') && (k < MAXID-1) ) { // Get characters till nul
- c = (c<<1)+1; // Characters are always odd!
- sum += c + (c<<shft[k++]); // Universal hash function
+ while (((c = s[k]) != '\0') && (k < MAXID-1)) { // Get characters till nul
+ c = (char) ((c << 1) + 1); // Characters are always odd!
+ sum += c + (c << shft[k++]); // Universal hash function
}
- assert( k < (MAXID), "Exceeded maximum name length");
+ assert(k < (MAXID), "Exceeded maximum name length");
return (int)((sum+xsum[k]) >> 1); // Hash key, un-modulo'd table size
}
diff --git a/src/share/vm/adlc/forms.hpp b/src/share/vm/adlc/forms.hpp
index a682e65a3..63e367dd7 100644
--- a/src/share/vm/adlc/forms.hpp
+++ b/src/share/vm/adlc/forms.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -146,7 +146,7 @@ public:
// Public Methods
Form(int formType=0, int line=0)
: _next(NULL), _linenum(line), _ftype(formType) { };
- ~Form() {};
+ virtual ~Form() {};
virtual bool ideal_only() const {
assert(0,"Check of ideal status on non-instruction/operand form.\n");
diff --git a/src/share/vm/adlc/formssel.cpp b/src/share/vm/adlc/formssel.cpp
index 42f1e3fad..3d4a562e4 100644
--- a/src/share/vm/adlc/formssel.cpp
+++ b/src/share/vm/adlc/formssel.cpp
@@ -235,6 +235,9 @@ bool InstructForm::is_parm(FormDict &globals) {
return false;
}
+bool InstructForm::is_ideal_negD() const {
+ return (_matrule && _matrule->_rChild && strcmp(_matrule->_rChild->_opType, "NegD") == 0);
+}
// Return 'true' if this instruction matches an ideal 'Copy*' node
int InstructForm::is_ideal_copy() const {
@@ -533,6 +536,12 @@ bool InstructForm::rematerialize(FormDict &globals, RegisterForm *registers ) {
if( data_type != Form::none )
rematerialize = true;
+ // Ugly: until a better fix is implemented, disable rematerialization for
+ // negD nodes because they are proved to be problematic.
+ if (is_ideal_negD()) {
+ return false;
+ }
+
// Constants
if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) )
rematerialize = true;
@@ -796,11 +805,11 @@ uint InstructForm::num_opnds() {
return num_opnds;
}
-const char *InstructForm::opnd_ident(int idx) {
+const char* InstructForm::opnd_ident(int idx) {
return _components.at(idx)->_name;
}
-const char *InstructForm::unique_opnd_ident(int idx) {
+const char* InstructForm::unique_opnd_ident(uint idx) {
uint i;
for (i = 1; i < num_opnds(); ++i) {
if (unique_opnds_idx(i) == idx) {
@@ -1315,36 +1324,36 @@ void InstructForm::rep_var_format(FILE *fp, const char *rep_var) {
// Seach through operands to determine parameters unique positions.
void InstructForm::set_unique_opnds() {
uint* uniq_idx = NULL;
- int nopnds = num_opnds();
+ uint nopnds = num_opnds();
uint num_uniq = nopnds;
- int i;
+ uint i;
_uniq_idx_length = 0;
- if ( nopnds > 0 ) {
+ if (nopnds > 0) {
// Allocate index array. Worst case we're mapping from each
// component back to an index and any DEF always goes at 0 so the
// length of the array has to be the number of components + 1.
_uniq_idx_length = _components.count() + 1;
- uniq_idx = (uint*) malloc(sizeof(uint)*(_uniq_idx_length));
- for( i = 0; i < _uniq_idx_length; i++ ) {
+ uniq_idx = (uint*) malloc(sizeof(uint) * _uniq_idx_length);
+ for (i = 0; i < _uniq_idx_length; i++) {
uniq_idx[i] = i;
}
}
// Do it only if there is a match rule and no expand rule. With an
// expand rule it is done by creating new mach node in Expand()
// method.
- if ( nopnds > 0 && _matrule != NULL && _exprule == NULL ) {
+ if (nopnds > 0 && _matrule != NULL && _exprule == NULL) {
const char *name;
uint count;
bool has_dupl_use = false;
_parameters.reset();
- while( (name = _parameters.iter()) != NULL ) {
+ while ((name = _parameters.iter()) != NULL) {
count = 0;
- int position = 0;
- int uniq_position = 0;
+ uint position = 0;
+ uint uniq_position = 0;
_components.reset();
Component *comp = NULL;
- if( sets_result() ) {
+ if (sets_result()) {
comp = _components.iter();
position++;
}
@@ -1352,11 +1361,11 @@ void InstructForm::set_unique_opnds() {
for (; (comp = _components.iter()) != NULL; ++position) {
// When the first component is not a DEF,
// leave space for the result operand!
- if ( position==0 && (! comp->isa(Component::DEF)) ) {
+ if (position==0 && (!comp->isa(Component::DEF))) {
++position;
}
- if( strcmp(name, comp->_name)==0 ) {
- if( ++count > 1 ) {
+ if (strcmp(name, comp->_name) == 0) {
+ if (++count > 1) {
assert(position < _uniq_idx_length, "out of bounds");
uniq_idx[position] = uniq_position;
has_dupl_use = true;
@@ -1364,22 +1373,25 @@ void InstructForm::set_unique_opnds() {
uniq_position = position;
}
}
- if( comp->isa(Component::DEF)
- && comp->isa(Component::USE) ) {
+ if (comp->isa(Component::DEF) && comp->isa(Component::USE)) {
++position;
- if( position != 1 )
+ if (position != 1)
--position; // only use two slots for the 1st USE_DEF
}
}
}
- if( has_dupl_use ) {
- for( i = 1; i < nopnds; i++ )
- if( i != uniq_idx[i] )
+ if (has_dupl_use) {
+ for (i = 1; i < nopnds; i++) {
+ if (i != uniq_idx[i]) {
break;
- int j = i;
- for( ; i < nopnds; i++ )
- if( i == uniq_idx[i] )
+ }
+ }
+ uint j = i;
+ for (; i < nopnds; i++) {
+ if (i == uniq_idx[i]) {
uniq_idx[i] = j++;
+ }
+ }
num_uniq = j;
}
}
@@ -2216,21 +2228,27 @@ RegClass* OperandForm::get_RegClass() const {
bool OperandForm::is_bound_register() const {
- RegClass *reg_class = get_RegClass();
- if (reg_class == NULL) return false;
-
- const char * name = ideal_type(globalAD->globalNames());
- if (name == NULL) return false;
-
- int size = 0;
- if (strcmp(name,"RegFlags")==0) size = 1;
- if (strcmp(name,"RegI")==0) size = 1;
- if (strcmp(name,"RegF")==0) size = 1;
- if (strcmp(name,"RegD")==0) size = 2;
- if (strcmp(name,"RegL")==0) size = 2;
- if (strcmp(name,"RegN")==0) size = 1;
- if (strcmp(name,"RegP")==0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1;
- if (size == 0) return false;
+ RegClass* reg_class = get_RegClass();
+ if (reg_class == NULL) {
+ return false;
+ }
+
+ const char* name = ideal_type(globalAD->globalNames());
+ if (name == NULL) {
+ return false;
+ }
+
+ uint size = 0;
+ if (strcmp(name, "RegFlags") == 0) size = 1;
+ if (strcmp(name, "RegI") == 0) size = 1;
+ if (strcmp(name, "RegF") == 0) size = 1;
+ if (strcmp(name, "RegD") == 0) size = 2;
+ if (strcmp(name, "RegL") == 0) size = 2;
+ if (strcmp(name, "RegN") == 0) size = 1;
+ if (strcmp(name, "RegP") == 0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1;
+ if (size == 0) {
+ return false;
+ }
return size == reg_class->size();
}
diff --git a/src/share/vm/adlc/formssel.hpp b/src/share/vm/adlc/formssel.hpp
index 6f2975fc3..fa3e9ff8f 100644
--- a/src/share/vm/adlc/formssel.hpp
+++ b/src/share/vm/adlc/formssel.hpp
@@ -106,7 +106,7 @@ public:
const char *_ins_pipe; // Instruction Scheduling description class
uint *_uniq_idx; // Indexes of unique operands
- int _uniq_idx_length; // Length of _uniq_idx array
+ uint _uniq_idx_length; // Length of _uniq_idx array
uint _num_uniq; // Number of unique operands
ComponentList _components; // List of Components matches MachNode's
// operand structure
@@ -147,6 +147,7 @@ public:
virtual int is_empty_encoding() const; // _size=0 and/or _insencode empty
virtual int is_tls_instruction() const; // tlsLoadP rule or ideal ThreadLocal
virtual int is_ideal_copy() const; // node matches ideal 'Copy*'
+ virtual bool is_ideal_negD() const; // node matches ideal 'NegD'
virtual bool is_ideal_if() const; // node matches ideal 'If'
virtual bool is_ideal_fastlock() const; // node matches 'FastLock'
virtual bool is_ideal_membar() const; // node matches ideal 'MemBarXXX'
@@ -272,14 +273,14 @@ public:
void set_unique_opnds();
uint num_unique_opnds() { return _num_uniq; }
uint unique_opnds_idx(int idx) {
- if( _uniq_idx != NULL && idx > 0 ) {
- assert(idx < _uniq_idx_length, "out of bounds");
- return _uniq_idx[idx];
- } else {
- return idx;
- }
+ if (_uniq_idx != NULL && idx > 0) {
+ assert((uint)idx < _uniq_idx_length, "out of bounds");
+ return _uniq_idx[idx];
+ } else {
+ return idx;
+ }
}
- const char *unique_opnd_ident(int idx); // Name of operand at unique idx.
+ const char *unique_opnd_ident(uint idx); // Name of operand at unique idx.
// Operands which are only KILLs aren't part of the input array and
// require special handling in some cases. Their position in this
diff --git a/src/share/vm/adlc/main.cpp b/src/share/vm/adlc/main.cpp
index 1bf98e603..7278bcfce 100644
--- a/src/share/vm/adlc/main.cpp
+++ b/src/share/vm/adlc/main.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -490,7 +490,7 @@ int get_legal_text(FileBuff &fbuf, char **legal_text)
// VS2005 has its own definition, identical to this one.
#if !defined(_WIN32) || defined(_WIN64) || _MSC_VER < 1400
-void *operator new( size_t size, int, const char *, int ) {
+void *operator new( size_t size, int, const char *, int ) throw() {
return ::operator new( size );
}
#endif
diff --git a/src/share/vm/adlc/output_c.cpp b/src/share/vm/adlc/output_c.cpp
index f86dd21fc..b89785913 100644
--- a/src/share/vm/adlc/output_c.cpp
+++ b/src/share/vm/adlc/output_c.cpp
@@ -463,8 +463,9 @@ static int pipeline_res_mask_initializer(
uint resources_used_exclusively = 0;
for (pipeclass->_resUsage.reset();
- (piperesource = (const PipeClassResourceForm *)pipeclass->_resUsage.iter()) != NULL; )
+ (piperesource = (const PipeClassResourceForm*)pipeclass->_resUsage.iter()) != NULL; ) {
element_count++;
+ }
// Pre-compute the string length
int templen;
@@ -482,8 +483,8 @@ static int pipeline_res_mask_initializer(
for (i = rescount; i > 0; i /= 10)
maskdigit++;
- static const char * pipeline_use_cycle_mask = "Pipeline_Use_Cycle_Mask";
- static const char * pipeline_use_element = "Pipeline_Use_Element";
+ static const char* pipeline_use_cycle_mask = "Pipeline_Use_Cycle_Mask";
+ static const char* pipeline_use_element = "Pipeline_Use_Element";
templen = 1 +
(int)(strlen(pipeline_use_cycle_mask) + (int)strlen(pipeline_use_element) +
@@ -496,11 +497,12 @@ static int pipeline_res_mask_initializer(
templen = 0;
for (pipeclass->_resUsage.reset();
- (piperesource = (const PipeClassResourceForm *)pipeclass->_resUsage.iter()) != NULL; ) {
+ (piperesource = (const PipeClassResourceForm*)pipeclass->_resUsage.iter()) != NULL; ) {
int used_mask = pipeline->_resdict[piperesource->_resource]->is_resource()->mask();
- if (!used_mask)
+ if (!used_mask) {
fprintf(stderr, "*** used_mask is 0 ***\n");
+ }
resources_used |= used_mask;
@@ -509,8 +511,9 @@ static int pipeline_res_mask_initializer(
for (lb = 0; (used_mask & (1 << lb)) == 0; lb++);
for (ub = 31; (used_mask & (1 << ub)) == 0; ub--);
- if (lb == ub)
+ if (lb == ub) {
resources_used_exclusively |= used_mask;
+ }
int formatlen =
sprintf(&resource_mask[templen], " %s(0x%0*x, %*d, %*d, %s %s(",
@@ -526,7 +529,7 @@ static int pipeline_res_mask_initializer(
int cycles = piperesource->_cycles;
uint stage = pipeline->_stages.index(piperesource->_stage);
- if (NameList::Not_in_list == stage) {
+ if ((uint)NameList::Not_in_list == stage) {
fprintf(stderr,
"pipeline_res_mask_initializer: "
"semantic error: "
@@ -534,8 +537,8 @@ static int pipeline_res_mask_initializer(
piperesource->_stage);
exit(1);
}
- uint upper_limit = stage+cycles-1;
- uint lower_limit = stage-1;
+ uint upper_limit = stage + cycles - 1;
+ uint lower_limit = stage - 1;
uint upper_idx = upper_limit >> 5;
uint lower_idx = lower_limit >> 5;
uint upper_position = upper_limit & 0x1f;
@@ -543,7 +546,7 @@ static int pipeline_res_mask_initializer(
uint mask = (((uint)1) << upper_position) - 1;
- while ( upper_idx > lower_idx ) {
+ while (upper_idx > lower_idx) {
res_mask[upper_idx--] |= mask;
mask = (uint)-1;
}
@@ -565,8 +568,9 @@ static int pipeline_res_mask_initializer(
}
resource_mask[templen] = 0;
- if (last_comma)
+ if (last_comma) {
last_comma[0] = ' ';
+ }
// See if the same string is in the table
int ndx = pipeline_res_mask.index(resource_mask);
@@ -580,7 +584,7 @@ static int pipeline_res_mask_initializer(
fprintf(fp_cpp, "static const Pipeline_Use_Element pipeline_res_mask_%03d[%d] = {\n%s};\n\n",
ndx+1, element_count, resource_mask);
- char * args = new char [9 + 2*masklen + maskdigit];
+ char* args = new char [9 + 2*masklen + maskdigit];
sprintf(args, "0x%0*x, 0x%0*x, %*d",
masklen, resources_used,
@@ -589,8 +593,9 @@ static int pipeline_res_mask_initializer(
pipeline_res_args.addName(args);
}
- else
+ else {
delete [] resource_mask;
+ }
delete [] res_mask;
//delete [] res_masks;
@@ -1090,7 +1095,7 @@ static void check_peepmatch_instruction_sequence(FILE *fp, PeepMatch *pmatch, Pe
fprintf(fp, " // Identify previous instruction if inside this block\n");
fprintf(fp, " if( ");
print_block_index(fp, inst_position);
- fprintf(fp, " > 0 ) {\n Node *n = block->_nodes.at(");
+ fprintf(fp, " > 0 ) {\n Node *n = block->get_node(");
print_block_index(fp, inst_position);
fprintf(fp, ");\n inst%d = (n->is_Mach()) ? ", inst_position);
fprintf(fp, "n->as_Mach() : NULL;\n }\n");
@@ -1787,7 +1792,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
// Skip first unique operands.
for( i = 1; i < cur_num_opnds; i++ ) {
comp = node->_components.iter();
- if( (int)i != node->unique_opnds_idx(i) ) {
+ if (i != node->unique_opnds_idx(i)) {
break;
}
new_num_opnds++;
@@ -1795,7 +1800,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
// Replace not unique operands with next unique operands.
for( ; i < cur_num_opnds; i++ ) {
comp = node->_components.iter();
- int j = node->unique_opnds_idx(i);
+ uint j = node->unique_opnds_idx(i);
// unique_opnds_idx(i) is unique if unique_opnds_idx(j) is not unique.
if( j != node->unique_opnds_idx(j) ) {
fprintf(fp," set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n",
diff --git a/src/share/vm/asm/codeBuffer.hpp b/src/share/vm/asm/codeBuffer.hpp
index fdbc861e9..5399d3432 100644
--- a/src/share/vm/asm/codeBuffer.hpp
+++ b/src/share/vm/asm/codeBuffer.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -296,8 +296,8 @@ class CodeBuffer: public StackObj {
// CodeBuffers must be allocated on the stack except for a single
// special case during expansion which is handled internally. This
// is done to guarantee proper cleanup of resources.
- void* operator new(size_t size) { return ResourceObj::operator new(size); }
- void operator delete(void* p) { ShouldNotCallThis(); }
+ void* operator new(size_t size) throw() { return ResourceObj::operator new(size); }
+ void operator delete(void* p) { ShouldNotCallThis(); }
public:
typedef int csize_t; // code size type; would be size_t except for history
diff --git a/src/share/vm/c1/c1_CodeStubs.hpp b/src/share/vm/c1/c1_CodeStubs.hpp
index 7235cd6c3..5f4a04c5b 100644
--- a/src/share/vm/c1/c1_CodeStubs.hpp
+++ b/src/share/vm/c1/c1_CodeStubs.hpp
@@ -364,7 +364,8 @@ class PatchingStub: public CodeStub {
enum PatchID {
access_field_id,
load_klass_id,
- load_mirror_id
+ load_mirror_id,
+ load_appendix_id
};
enum constants {
patch_info_size = 3
@@ -417,7 +418,7 @@ class PatchingStub: public CodeStub {
}
NativeMovRegMem* n_move = nativeMovRegMem_at(pc_start());
n_move->set_offset(field_offset);
- } else if (_id == load_klass_id || _id == load_mirror_id) {
+ } else if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
assert(_obj != noreg, "must have register object for load_klass/load_mirror");
#ifdef ASSERT
// verify that we're pointing at a NativeMovConstReg
diff --git a/src/share/vm/c1/c1_Compilation.cpp b/src/share/vm/c1/c1_Compilation.cpp
index 3c74f10a3..7824307b2 100644
--- a/src/share/vm/c1/c1_Compilation.cpp
+++ b/src/share/vm/c1/c1_Compilation.cpp
@@ -77,16 +77,19 @@ class PhaseTraceTime: public TraceTime {
private:
JavaThread* _thread;
CompileLog* _log;
+ TimerName _timer;
public:
PhaseTraceTime(TimerName timer)
- : TraceTime("", &timers[timer], CITime || CITimeEach, Verbose), _log(NULL) {
+ : TraceTime("", &timers[timer], CITime || CITimeEach, Verbose),
+ _log(NULL), _timer(timer)
+ {
if (Compilation::current() != NULL) {
_log = Compilation::current()->log();
}
if (_log != NULL) {
- _log->begin_head("phase name='%s'", timer_name[timer]);
+ _log->begin_head("phase name='%s'", timer_name[_timer]);
_log->stamp();
_log->end_head();
}
@@ -94,7 +97,7 @@ class PhaseTraceTime: public TraceTime {
~PhaseTraceTime() {
if (_log != NULL)
- _log->done("phase");
+ _log->done("phase name='%s'", timer_name[_timer]);
}
};
diff --git a/src/share/vm/c1/c1_Compilation.hpp b/src/share/vm/c1/c1_Compilation.hpp
index 897da9762..f98ae97f3 100644
--- a/src/share/vm/c1/c1_Compilation.hpp
+++ b/src/share/vm/c1/c1_Compilation.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -279,8 +279,8 @@ class InstructionMark: public StackObj {
// Base class for objects allocated by the compiler in the compilation arena
class CompilationResourceObj ALLOCATION_SUPER_CLASS_SPEC {
public:
- void* operator new(size_t size) { return Compilation::current()->arena()->Amalloc(size); }
- void* operator new(size_t size, Arena* arena) {
+ void* operator new(size_t size) throw() { return Compilation::current()->arena()->Amalloc(size); }
+ void* operator new(size_t size, Arena* arena) throw() {
return arena->Amalloc(size);
}
void operator delete(void* p) {} // nothing to do
diff --git a/src/share/vm/c1/c1_Compiler.cpp b/src/share/vm/c1/c1_Compiler.cpp
index 282050618..ecace4dad 100644
--- a/src/share/vm/c1/c1_Compiler.cpp
+++ b/src/share/vm/c1/c1_Compiler.cpp
@@ -77,30 +77,42 @@ void Compiler::initialize() {
}
-BufferBlob* Compiler::build_buffer_blob() {
+BufferBlob* Compiler::get_buffer_blob(ciEnv* env) {
+ // Allocate buffer blob once at startup since allocation for each
+ // compilation seems to be too expensive (at least on Intel win32).
+ BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
+ if (buffer_blob != NULL) {
+ return buffer_blob;
+ }
+
// setup CodeBuffer. Preallocate a BufferBlob of size
// NMethodSizeLimit plus some extra space for constants.
int code_buffer_size = Compilation::desired_max_code_buffer_size() +
Compilation::desired_max_constant_size();
- BufferBlob* blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
- code_buffer_size);
- guarantee(blob != NULL, "must create initial code buffer");
- return blob;
+
+ buffer_blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
+ code_buffer_size);
+ if (buffer_blob == NULL) {
+ CompileBroker::handle_full_code_cache();
+ env->record_failure("CodeCache is full");
+ } else {
+ CompilerThread::current()->set_buffer_blob(buffer_blob);
+ }
+
+ return buffer_blob;
}
void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci) {
- // Allocate buffer blob once at startup since allocation for each
- // compilation seems to be too expensive (at least on Intel win32).
- BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
+ BufferBlob* buffer_blob = Compiler::get_buffer_blob(env);
if (buffer_blob == NULL) {
- buffer_blob = build_buffer_blob();
- CompilerThread::current()->set_buffer_blob(buffer_blob);
+ return;
}
if (!is_initialized()) {
initialize();
}
+
// invoke compilation
{
// We are nested here because we need for the destructor
diff --git a/src/share/vm/c1/c1_Compiler.hpp b/src/share/vm/c1/c1_Compiler.hpp
index 9702a15ee..6e209d003 100644
--- a/src/share/vm/c1/c1_Compiler.hpp
+++ b/src/share/vm/c1/c1_Compiler.hpp
@@ -46,7 +46,7 @@ class Compiler: public AbstractCompiler {
virtual bool is_c1() { return true; };
- BufferBlob* build_buffer_blob();
+ BufferBlob* get_buffer_blob(ciEnv* env);
// Missing feature tests
virtual bool supports_native() { return true; }
diff --git a/src/share/vm/c1/c1_GraphBuilder.cpp b/src/share/vm/c1/c1_GraphBuilder.cpp
index 8d7619eed..03d0d75fa 100644
--- a/src/share/vm/c1/c1_GraphBuilder.cpp
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1583,7 +1583,7 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
ObjectType* obj_type = obj->type()->as_ObjectType();
if (obj_type->is_constant() && !PatchALot) {
ciObject* const_oop = obj_type->constant_value();
- if (!const_oop->is_null_object()) {
+ if (!const_oop->is_null_object() && const_oop->is_loaded()) {
if (field->is_constant()) {
ciConstant field_val = field->constant_value_of(const_oop);
BasicType field_type = field_val.basic_type();
@@ -1667,9 +1667,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
assert(declared_signature != NULL, "cannot be null");
- // FIXME bail out for now
- if (Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
- BAILOUT("unlinked call site (FIXME needs patching or recompile support)");
+ if (!C1PatchInvokeDynamic && Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
+ BAILOUT("unlinked call site (C1PatchInvokeDynamic is off)");
}
// we have to make sure the argument size (incl. the receiver)
@@ -1713,10 +1712,23 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
break;
}
+ } else {
+ if (bc_raw == Bytecodes::_invokehandle) {
+ assert(!will_link, "should come here only for unlinked call");
+ code = Bytecodes::_invokespecial;
+ }
}
// Push appendix argument (MethodType, CallSite, etc.), if one.
- if (stream()->has_appendix()) {
+ bool patch_for_appendix = false;
+ int patching_appendix_arg = 0;
+ if (C1PatchInvokeDynamic &&
+ (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot))) {
+ Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before()));
+ apush(arg);
+ patch_for_appendix = true;
+ patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1;
+ } else if (stream()->has_appendix()) {
ciObject* appendix = stream()->get_appendix();
Value arg = append(new Constant(new ObjectConstant(appendix)));
apush(arg);
@@ -1732,7 +1744,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
!(// %%% FIXME: Are both of these relevant?
target->is_method_handle_intrinsic() ||
- target->is_compiled_lambda_form())) {
+ target->is_compiled_lambda_form()) &&
+ !patch_for_appendix) {
Value receiver = NULL;
ciInstanceKlass* receiver_klass = NULL;
bool type_is_exact = false;
@@ -1850,7 +1863,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
// check if we could do inlining
if (!PatchALot && Inline && klass->is_loaded() &&
(klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
- && target->is_loaded()) {
+ && target->is_loaded()
+ && !patch_for_appendix) {
// callee is known => check if we have static binding
assert(target->is_loaded(), "callee must be known");
if (code == Bytecodes::_invokestatic ||
@@ -1901,7 +1915,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code == Bytecodes::_invokespecial ||
code == Bytecodes::_invokevirtual ||
code == Bytecodes::_invokeinterface;
- Values* args = state()->pop_arguments(target->arg_size_no_receiver());
+ Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg);
Value recv = has_receiver ? apop() : NULL;
int vtable_index = Method::invalid_vtable_index;
@@ -3461,6 +3475,14 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
preserves_state = true;
break;
+ case vmIntrinsics::_updateCRC32:
+ case vmIntrinsics::_updateBytesCRC32:
+ case vmIntrinsics::_updateByteBufferCRC32:
+ if (!UseCRC32Intrinsics) return false;
+ cantrap = false;
+ preserves_state = true;
+ break;
+
case vmIntrinsics::_loadFence :
case vmIntrinsics::_storeFence:
case vmIntrinsics::_fullFence :
@@ -4197,7 +4219,9 @@ void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool succes
}
}
- if (!PrintInlining) return;
+ if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
+ return;
+ }
CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
if (success && CIPrintMethodCodes) {
callee->print_codes();
diff --git a/src/share/vm/c1/c1_IR.cpp b/src/share/vm/c1/c1_IR.cpp
index e9e73db0c..13a7f790f 100644
--- a/src/share/vm/c1/c1_IR.cpp
+++ b/src/share/vm/c1/c1_IR.cpp
@@ -506,7 +506,7 @@ ComputeLinearScanOrder::ComputeLinearScanOrder(Compilation* c, BlockBegin* start
_loop_map(0, 0), // initialized later with correct size
_compilation(c)
{
- TRACE_LINEAR_SCAN(2, "***** computing linear-scan block order");
+ TRACE_LINEAR_SCAN(2, tty->print_cr("***** computing linear-scan block order"));
init_visited();
count_edges(start_block, NULL);
@@ -683,7 +683,7 @@ void ComputeLinearScanOrder::clear_non_natural_loops(BlockBegin* start_block) {
}
void ComputeLinearScanOrder::assign_loop_depth(BlockBegin* start_block) {
- TRACE_LINEAR_SCAN(3, "----- computing loop-depth and weight");
+ TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing loop-depth and weight"));
init_visited();
assert(_work_list.is_empty(), "work list must be empty before processing");
@@ -868,7 +868,7 @@ void ComputeLinearScanOrder::append_block(BlockBegin* cur) {
}
void ComputeLinearScanOrder::compute_order(BlockBegin* start_block) {
- TRACE_LINEAR_SCAN(3, "----- computing final block order");
+ TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing final block order"));
// the start block is always the first block in the linear scan order
_linear_scan_order = new BlockList(_num_blocks);
diff --git a/src/share/vm/c1/c1_Instruction.hpp b/src/share/vm/c1/c1_Instruction.hpp
index 6b1f6ddd3..9563b720a 100644
--- a/src/share/vm/c1/c1_Instruction.hpp
+++ b/src/share/vm/c1/c1_Instruction.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -323,7 +323,7 @@ class Instruction: public CompilationResourceObj {
}
public:
- void* operator new(size_t size) {
+ void* operator new(size_t size) throw() {
Compilation* c = Compilation::current();
void* res = c->arena()->Amalloc(size);
((Instruction*)res)->_id = c->get_next_id();
@@ -1611,7 +1611,7 @@ LEAF(BlockBegin, StateSplit)
friend class SuxAndWeightAdjuster;
public:
- void* operator new(size_t size) {
+ void* operator new(size_t size) throw() {
Compilation* c = Compilation::current();
void* res = c->arena()->Amalloc(size);
((BlockBegin*)res)->_id = c->get_next_id();
diff --git a/src/share/vm/c1/c1_LIR.cpp b/src/share/vm/c1/c1_LIR.cpp
index 4036cf213..3d0963255 100644
--- a/src/share/vm/c1/c1_LIR.cpp
+++ b/src/share/vm/c1/c1_LIR.cpp
@@ -205,23 +205,24 @@ void LIR_OprDesc::validate_type() const {
#ifdef ASSERT
if (!is_pointer() && !is_illegal()) {
+ OprKind kindfield = kind_field(); // Factored out because of compiler bug, see 8002160
switch (as_BasicType(type_field())) {
case T_LONG:
- assert((kind_field() == cpu_register || kind_field() == stack_value) &&
+ assert((kindfield == cpu_register || kindfield == stack_value) &&
size_field() == double_size, "must match");
break;
case T_FLOAT:
// FP return values can be also in CPU registers on ARM and PPC (softfp ABI)
- assert((kind_field() == fpu_register || kind_field() == stack_value
- ARM_ONLY(|| kind_field() == cpu_register)
- PPC_ONLY(|| kind_field() == cpu_register) ) &&
+ assert((kindfield == fpu_register || kindfield == stack_value
+ ARM_ONLY(|| kindfield == cpu_register)
+ PPC_ONLY(|| kindfield == cpu_register) ) &&
size_field() == single_size, "must match");
break;
case T_DOUBLE:
// FP return values can be also in CPU registers on ARM and PPC (softfp ABI)
- assert((kind_field() == fpu_register || kind_field() == stack_value
- ARM_ONLY(|| kind_field() == cpu_register)
- PPC_ONLY(|| kind_field() == cpu_register) ) &&
+ assert((kindfield == fpu_register || kindfield == stack_value
+ ARM_ONLY(|| kindfield == cpu_register)
+ PPC_ONLY(|| kindfield == cpu_register) ) &&
size_field() == double_size, "must match");
break;
case T_BOOLEAN:
@@ -233,7 +234,7 @@ void LIR_OprDesc::validate_type() const {
case T_OBJECT:
case T_METADATA:
case T_ARRAY:
- assert((kind_field() == cpu_register || kind_field() == stack_value) &&
+ assert((kindfield == cpu_register || kindfield == stack_value) &&
size_field() == single_size, "must match");
break;
@@ -433,6 +434,11 @@ LIR_OpArrayCopy::LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_
_stub = new ArrayCopyStub(this);
}
+LIR_OpUpdateCRC32::LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res)
+ : LIR_Op(lir_updatecrc32, res, NULL)
+ , _crc(crc)
+ , _val(val) {
+}
//-------------------verify--------------------------
@@ -879,6 +885,20 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
}
+// LIR_OpUpdateCRC32
+ case lir_updatecrc32: {
+ assert(op->as_OpUpdateCRC32() != NULL, "must be");
+ LIR_OpUpdateCRC32* opUp = (LIR_OpUpdateCRC32*)op;
+
+ assert(opUp->_crc->is_valid(), "used"); do_input(opUp->_crc); do_temp(opUp->_crc);
+ assert(opUp->_val->is_valid(), "used"); do_input(opUp->_val); do_temp(opUp->_val);
+ assert(opUp->_result->is_valid(), "used"); do_output(opUp->_result);
+ assert(opUp->_info == NULL, "no info for LIR_OpUpdateCRC32");
+
+ break;
+ }
+
+
// LIR_OpLock
case lir_lock:
case lir_unlock: {
@@ -1059,6 +1079,10 @@ void LIR_OpArrayCopy::emit_code(LIR_Assembler* masm) {
masm->emit_code_stub(stub());
}
+void LIR_OpUpdateCRC32::emit_code(LIR_Assembler* masm) {
+ masm->emit_updatecrc32(this);
+}
+
void LIR_Op0::emit_code(LIR_Assembler* masm) {
masm->emit_op0(this);
}
@@ -1771,6 +1795,8 @@ const char * LIR_Op::name() const {
case lir_dynamic_call: s = "dynamic"; break;
// LIR_OpArrayCopy
case lir_arraycopy: s = "arraycopy"; break;
+ // LIR_OpUpdateCRC32
+ case lir_updatecrc32: s = "updatecrc32"; break;
// LIR_OpLock
case lir_lock: s = "lock"; break;
case lir_unlock: s = "unlock"; break;
@@ -1823,6 +1849,13 @@ void LIR_OpArrayCopy::print_instr(outputStream* out) const {
tmp()->print(out); out->print(" ");
}
+// LIR_OpUpdateCRC32
+void LIR_OpUpdateCRC32::print_instr(outputStream* out) const {
+ crc()->print(out); out->print(" ");
+ val()->print(out); out->print(" ");
+ result_opr()->print(out); out->print(" ");
+}
+
// LIR_OpCompareAndSwap
void LIR_OpCompareAndSwap::print_instr(outputStream* out) const {
addr()->print(out); out->print(" ");
diff --git a/src/share/vm/c1/c1_LIR.hpp b/src/share/vm/c1/c1_LIR.hpp
index 45f7e882c..ee8c542af 100644
--- a/src/share/vm/c1/c1_LIR.hpp
+++ b/src/share/vm/c1/c1_LIR.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -879,6 +879,7 @@ class LIR_OpCall;
class LIR_OpJavaCall;
class LIR_OpRTCall;
class LIR_OpArrayCopy;
+class LIR_OpUpdateCRC32;
class LIR_OpLock;
class LIR_OpTypeCheck;
class LIR_OpCompareAndSwap;
@@ -984,6 +985,9 @@ enum LIR_Code {
, begin_opArrayCopy
, lir_arraycopy
, end_opArrayCopy
+ , begin_opUpdateCRC32
+ , lir_updatecrc32
+ , end_opUpdateCRC32
, begin_opLock
, lir_lock
, lir_unlock
@@ -1139,6 +1143,7 @@ class LIR_Op: public CompilationResourceObj {
virtual LIR_Op2* as_Op2() { return NULL; }
virtual LIR_Op3* as_Op3() { return NULL; }
virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
+ virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; }
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
@@ -1208,8 +1213,6 @@ class LIR_OpJavaCall: public LIR_OpCall {
bool is_invokedynamic() const { return code() == lir_dynamic_call; }
bool is_method_handle_invoke() const {
return
- is_invokedynamic() // An invokedynamic is always a MethodHandle call site.
- ||
method()->is_compiled_lambda_form() // Java-generated adapter
||
method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic
@@ -1295,6 +1298,25 @@ public:
void print_instr(outputStream* out) const PRODUCT_RETURN;
};
+// LIR_OpUpdateCRC32
+class LIR_OpUpdateCRC32: public LIR_Op {
+ friend class LIR_OpVisitState;
+
+private:
+ LIR_Opr _crc;
+ LIR_Opr _val;
+
+public:
+
+ LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res);
+
+ LIR_Opr crc() const { return _crc; }
+ LIR_Opr val() const { return _val; }
+
+ virtual void emit_code(LIR_Assembler* masm);
+ virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return this; }
+ void print_instr(outputStream* out) const PRODUCT_RETURN;
+};
// --------------------------------------------------
// LIR_Op0
@@ -2221,6 +2243,8 @@ class LIR_List: public CompilationResourceObj {
void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
+ void update_crc32(LIR_Opr crc, LIR_Opr val, LIR_Opr res) { append(new LIR_OpUpdateCRC32(crc, val, res)); }
+
void fpop_raw() { append(new LIR_Op0(lir_fpop_raw)); }
void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
diff --git a/src/share/vm/c1/c1_LIRAssembler.cpp b/src/share/vm/c1/c1_LIRAssembler.cpp
index 4d41e7972..5e20257ff 100644
--- a/src/share/vm/c1/c1_LIRAssembler.cpp
+++ b/src/share/vm/c1/c1_LIRAssembler.cpp
@@ -97,12 +97,23 @@ void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_cod
default:
ShouldNotReachHere();
}
+ } else if (patch->id() == PatchingStub::load_appendix_id) {
+ Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
+ assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
} else {
ShouldNotReachHere();
}
#endif
}
+PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
+ IRScope* scope = info->scope();
+ Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
+ if (Bytecodes::has_optional_appendix(bc_raw)) {
+ return PatchingStub::load_appendix_id;
+ }
+ return PatchingStub::load_mirror_id;
+}
//---------------------------------------------------------------
diff --git a/src/share/vm/c1/c1_LIRAssembler.hpp b/src/share/vm/c1/c1_LIRAssembler.hpp
index 747d4fcb5..c82bf3b37 100644
--- a/src/share/vm/c1/c1_LIRAssembler.hpp
+++ b/src/share/vm/c1/c1_LIRAssembler.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -119,6 +119,8 @@ class LIR_Assembler: public CompilationResourceObj {
void comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op);
+ PatchingStub::PatchID patching_id(CodeEmitInfo* info);
+
public:
LIR_Assembler(Compilation* c);
~LIR_Assembler();
@@ -195,6 +197,7 @@ class LIR_Assembler: public CompilationResourceObj {
void emit_opBranch(LIR_OpBranch* op);
void emit_opLabel(LIR_OpLabel* op);
void emit_arraycopy(LIR_OpArrayCopy* op);
+ void emit_updatecrc32(LIR_OpUpdateCRC32* op);
void emit_opConvert(LIR_OpConvert* op);
void emit_alloc_obj(LIR_OpAllocObj* op);
void emit_alloc_array(LIR_OpAllocArray* op);
diff --git a/src/share/vm/c1/c1_LIRGenerator.cpp b/src/share/vm/c1/c1_LIRGenerator.cpp
index f1438a8b6..e6ea67265 100644
--- a/src/share/vm/c1/c1_LIRGenerator.cpp
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp
@@ -2248,6 +2248,7 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
// We still need to continue with the checks.
if (src.is_constant()) {
ciObject* src_con = src.get_jobject_constant();
+ guarantee(src_con != NULL, "no source constant");
if (src_con->is_null_object()) {
// The constant src object is null - We can skip
@@ -2310,7 +2311,7 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
if (gen_type_check) {
// We have determined that offset == referent_offset && src != null.
// if (src->_klass->_reference_type == REF_NONE) -> continue
- __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), UseCompressedKlassPointers ? T_OBJECT : T_ADDRESS), src_klass);
+ __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
LIR_Opr reference_type = new_register(T_INT);
__ move(reference_type_addr, reference_type);
@@ -3009,6 +3010,12 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
do_Reference_get(x);
break;
+ case vmIntrinsics::_updateCRC32:
+ case vmIntrinsics::_updateBytesCRC32:
+ case vmIntrinsics::_updateByteBufferCRC32:
+ do_update_CRC32(x);
+ break;
+
default: ShouldNotReachHere(); break;
}
}
diff --git a/src/share/vm/c1/c1_LIRGenerator.hpp b/src/share/vm/c1/c1_LIRGenerator.hpp
index d3c76865d..0a0292073 100644
--- a/src/share/vm/c1/c1_LIRGenerator.hpp
+++ b/src/share/vm/c1/c1_LIRGenerator.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -247,6 +247,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void do_NIOCheckIndex(Intrinsic* x);
void do_FPIntrinsics(Intrinsic* x);
void do_Reference_get(Intrinsic* x);
+ void do_update_CRC32(Intrinsic* x);
void do_UnsafePrefetch(UnsafePrefetch* x, bool is_store);
diff --git a/src/share/vm/c1/c1_Runtime1.cpp b/src/share/vm/c1/c1_Runtime1.cpp
index fa97b7490..c1e4d4589 100644
--- a/src/share/vm/c1/c1_Runtime1.cpp
+++ b/src/share/vm/c1/c1_Runtime1.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -327,6 +327,7 @@ const char* Runtime1::name_for_address(address entry) {
#ifdef TRACE_HAVE_INTRINSICS
FUNCTION_CASE(entry, TRACE_TIME_METHOD);
#endif
+ FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
#undef FUNCTION_CASE
@@ -736,10 +737,10 @@ static Klass* resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
Bytecodes::Code code = field_access.code();
// We must load class, initialize class and resolvethe field
- FieldAccessInfo result; // initialize class if needed
+ fieldDescriptor result; // initialize class if needed
constantPoolHandle constants(THREAD, caller->constants());
- LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK_NULL);
- return result.klass()();
+ LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL);
+ return result.field_holder();
}
@@ -846,17 +847,18 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code
+ Handle appendix(THREAD, NULL); // oop needed by appendix_patching code
bool load_klass_or_mirror_patch_id =
(stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
if (stub_id == Runtime1::access_field_patching_id) {
Bytecode_field field_access(caller_method, bci);
- FieldAccessInfo result; // initialize class if needed
+ fieldDescriptor result; // initialize class if needed
Bytecodes::Code code = field_access.code();
constantPoolHandle constants(THREAD, caller_method->constants());
- LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK);
- patch_field_offset = result.field_offset();
+ LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
+ patch_field_offset = result.offset();
// If we're patching a field which is volatile then at compile it
// must not have been know to be volatile, so the generated code
@@ -915,10 +917,32 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
mirror = Handle(THREAD, m);
}
break;
- default: Unimplemented();
+ default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
}
// convert to handle
load_klass = KlassHandle(THREAD, k);
+ } else if (stub_id == load_appendix_patching_id) {
+ Bytecode_invoke bytecode(caller_method, bci);
+ Bytecodes::Code bc = bytecode.invoke_code();
+
+ CallInfo info;
+ constantPoolHandle pool(thread, caller_method->constants());
+ int index = bytecode.index();
+ LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
+ appendix = info.resolved_appendix();
+ switch (bc) {
+ case Bytecodes::_invokehandle: {
+ int cache_index = ConstantPool::decode_cpcache_index(index, true);
+ assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
+ pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
+ break;
+ }
+ case Bytecodes::_invokedynamic: {
+ pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
+ break;
+ }
+ default: fatal("unexpected bytecode for load_appendix_patching_id");
+ }
} else {
ShouldNotReachHere();
}
@@ -942,16 +966,6 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
// Return to the now deoptimized frame.
}
- // If we are patching in a non-perm oop, make sure the nmethod
- // is on the right list.
- if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
- MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
- nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
- guarantee(nm != NULL, "only nmethods can contain non-perm oops");
- if (!nm->on_scavenge_root_list())
- CodeCache::add_scavenge_root_nmethod(nm);
- }
-
// Now copy code back
{
MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
@@ -1028,8 +1042,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
n_copy->data() == (intptr_t)Universe::non_oop_word(),
"illegal init value");
if (stub_id == Runtime1::load_klass_patching_id) {
- assert(load_klass() != NULL, "klass not set");
- n_copy->set_data((intx) (load_klass()));
+ assert(load_klass() != NULL, "klass not set");
+ n_copy->set_data((intx) (load_klass()));
} else {
assert(mirror() != NULL, "klass not set");
n_copy->set_data((intx) (mirror()));
@@ -1038,43 +1052,55 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
if (TracePatching) {
Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
}
-
-#if defined(SPARC) || defined(PPC) || defined(TARGET_ARCH_aarch64)
- // Update the location in the nmethod with the proper
- // metadata. When the code was generated, a NULL was stuffed
- // in the metadata table and that table needs to be update to
- // have the right value. On intel the value is kept
- // directly in the instruction instead of in the metadata
- // table, so set_data above effectively updated the value.
- nmethod* nm = CodeCache::find_nmethod(instr_pc);
- assert(nm != NULL, "invalid nmethod_pc");
- RelocIterator mds(nm, copy_buff, copy_buff + 1);
- bool found = false;
- while (mds.next() && !found) {
- if (mds.type() == relocInfo::oop_type) {
- assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
- oop_Relocation* r = mds.oop_reloc();
- oop* oop_adr = r->oop_addr();
- *oop_adr = mirror();
- r->fix_oop_relocation();
- found = true;
- } else if (mds.type() == relocInfo::metadata_type) {
- assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
- metadata_Relocation* r = mds.metadata_reloc();
- Metadata** metadata_adr = r->metadata_addr();
- *metadata_adr = load_klass();
- r->fix_metadata_relocation();
- found = true;
- }
- }
- assert(found, "the metadata must exist!");
-#endif
-
+ }
+ } else if (stub_id == Runtime1::load_appendix_patching_id) {
+ NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
+ assert(n_copy->data() == 0 ||
+ n_copy->data() == (intptr_t)Universe::non_oop_word(),
+ "illegal init value");
+ n_copy->set_data((intx) (appendix()));
+
+ if (TracePatching) {
+ Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
}
} else {
ShouldNotReachHere();
}
+#if defined(SPARC) || defined(PPC) || defined(TARGET_ARCH_aarch64)
+ if (load_klass_or_mirror_patch_id ||
+ stub_id == Runtime1::load_appendix_patching_id) {
+ // Update the location in the nmethod with the proper
+ // metadata. When the code was generated, a NULL was stuffed
+ // in the metadata table and that table needs to be update to
+ // have the right value. On intel the value is kept
+ // directly in the instruction instead of in the metadata
+ // table, so set_data above effectively updated the value.
+ nmethod* nm = CodeCache::find_nmethod(instr_pc);
+ assert(nm != NULL, "invalid nmethod_pc");
+ RelocIterator mds(nm, copy_buff, copy_buff + 1);
+ bool found = false;
+ while (mds.next() && !found) {
+ if (mds.type() == relocInfo::oop_type) {
+ assert(stub_id == Runtime1::load_mirror_patching_id ||
+ stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
+ oop_Relocation* r = mds.oop_reloc();
+ oop* oop_adr = r->oop_addr();
+ *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
+ r->fix_oop_relocation();
+ found = true;
+ } else if (mds.type() == relocInfo::metadata_type) {
+ assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
+ metadata_Relocation* r = mds.metadata_reloc();
+ Metadata** metadata_adr = r->metadata_addr();
+ *metadata_adr = load_klass();
+ r->fix_metadata_relocation();
+ found = true;
+ }
+ }
+ assert(found, "the metadata must exist!");
+ }
+#endif
if (do_patch) {
// replace instructions
// first replace the tail, then the call
@@ -1113,8 +1139,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
ICache::invalidate_range(instr_pc, *byte_count);
NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
- if (load_klass_or_mirror_patch_id
- || stub_id == Runtime1::access_field_patching_id) {
+ if (load_klass_or_mirror_patch_id) {
relocInfo::relocType rtype;
switch(stub_id) {
case Runtime1::load_klass_patching_id:
@@ -1183,6 +1208,21 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
}
}
+ // If we are patching in a non-perm oop, make sure the nmethod
+ // is on the right list.
+ if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
+ (appendix.not_null() && appendix->is_scavengable()))) {
+ MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
+ guarantee(nm != NULL, "only nmethods can contain non-perm oops");
+ if (!nm->on_scavenge_root_list()) {
+ CodeCache::add_scavenge_root_nmethod(nm);
+ }
+
+ // Since we've patched some oops in the nmethod,
+ // (re)register it with the heap.
+ Universe::heap()->register_nmethod(nm);
+ }
JRT_END
//
@@ -1233,6 +1273,24 @@ int Runtime1::move_mirror_patching(JavaThread* thread) {
return caller_is_deopted();
}
+int Runtime1::move_appendix_patching(JavaThread* thread) {
+//
+// NOTE: we are still in Java
+//
+ Thread* THREAD = thread;
+ debug_only(NoHandleMark nhm;)
+ {
+ // Enter VM mode
+
+ ResetNoHandleMark rnhm;
+ patch_code(thread, load_appendix_patching_id);
+ }
+ // Back in JAVA, use no oops DON'T safepoint
+
+ // Return true if calling code is deoptimized
+
+ return caller_is_deopted();
+}
//
// Entry point for compiled code. We want to patch a nmethod.
// We don't do a normal VM transition here because we want to
@@ -1321,7 +1379,7 @@ JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int d
if (length == 0) return ac_ok;
if (src->is_typeArray()) {
- Klass* const klass_oop = src->klass();
+ Klass* klass_oop = src->klass();
if (klass_oop != dst->klass()) return ac_failed;
TypeArrayKlass* klass = TypeArrayKlass::cast(klass_oop);
const int l2es = klass->log2_element_size();
diff --git a/src/share/vm/c1/c1_Runtime1.hpp b/src/share/vm/c1/c1_Runtime1.hpp
index 3262e6506..dedbd1246 100644
--- a/src/share/vm/c1/c1_Runtime1.hpp
+++ b/src/share/vm/c1/c1_Runtime1.hpp
@@ -67,6 +67,7 @@ class StubAssembler;
stub(access_field_patching) \
stub(load_klass_patching) \
stub(load_mirror_patching) \
+ stub(load_appendix_patching) \
stub(g1_pre_barrier_slow) \
stub(g1_post_barrier_slow) \
stub(fpu2long_stub) \
@@ -160,6 +161,7 @@ class Runtime1: public AllStatic {
static int access_field_patching(JavaThread* thread);
static int move_klass_patching(JavaThread* thread);
static int move_mirror_patching(JavaThread* thread);
+ static int move_appendix_patching(JavaThread* thread);
static void patch_code(JavaThread* thread, StubID stub_id);
#ifdef TARGET_ARCH_aarch64
diff --git a/src/share/vm/c1/c1_globals.cpp b/src/share/vm/c1/c1_globals.cpp
index a611f033e..553b9aa43 100644
--- a/src/share/vm/c1/c1_globals.cpp
+++ b/src/share/vm/c1/c1_globals.cpp
@@ -25,4 +25,4 @@
#include "precompiled.hpp"
#include "c1/c1_globals.hpp"
-C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
+C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
diff --git a/src/share/vm/c1/c1_globals.hpp b/src/share/vm/c1/c1_globals.hpp
index d31eeb9c1..7f9cb8032 100644
--- a/src/share/vm/c1/c1_globals.hpp
+++ b/src/share/vm/c1/c1_globals.hpp
@@ -57,7 +57,7 @@
//
// Defines all global flags used by the client compiler.
//
-#define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct) \
+#define C1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
\
/* Printing */ \
notproduct(bool, PrintC1Statistics, false, \
@@ -336,15 +336,19 @@
"Use CHA and exact type results at call sites when updating MDOs")\
\
product(bool, C1UpdateMethodData, trueInTiered, \
- "Update MethodData*s in Tier1-generated code") \
+ "Update MethodData*s in Tier1-generated code") \
\
develop(bool, PrintCFGToFile, false, \
"print control flow graph to a separate file during compilation") \
\
+ diagnostic(bool, C1PatchInvokeDynamic, true, \
+ "Patch invokedynamic appendix not known at compile time") \
+ \
+ \
// Read default values for c1 globals
-C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_NOTPRODUCT_FLAG)
+C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
#endif // SHARE_VM_C1_C1_GLOBALS_HPP
diff --git a/src/share/vm/ci/bcEscapeAnalyzer.cpp b/src/share/vm/ci/bcEscapeAnalyzer.cpp
index 32dc35d80..e2fca4845 100644
--- a/src/share/vm/ci/bcEscapeAnalyzer.cpp
+++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp
@@ -138,6 +138,16 @@ bool BCEscapeAnalyzer::is_arg_stack(ArgumentMap vars){
return false;
}
+// return true if all argument elements of vars are returned
+bool BCEscapeAnalyzer::returns_all(ArgumentMap vars) {
+ for (int i = 0; i < _arg_size; i++) {
+ if (vars.contains(i) && !_arg_returned.test(i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
void BCEscapeAnalyzer::clear_bits(ArgumentMap vars, VectorSet &bm) {
for (int i = 0; i < _arg_size; i++) {
if (vars.contains(i)) {
@@ -166,6 +176,11 @@ void BCEscapeAnalyzer::set_global_escape(ArgumentMap vars, bool merge) {
if (vars.contains_unknown() || vars.contains_vars()) {
_return_allocated = false;
}
+ if (_return_local && vars.contains_vars() && !returns_all(vars)) {
+ // Return result should be invalidated if args in new
+ // state are not recorded in return state.
+ _return_local = false;
+ }
}
}
diff --git a/src/share/vm/ci/bcEscapeAnalyzer.hpp b/src/share/vm/ci/bcEscapeAnalyzer.hpp
index 99a8adccb..3c701b6a4 100644
--- a/src/share/vm/ci/bcEscapeAnalyzer.hpp
+++ b/src/share/vm/ci/bcEscapeAnalyzer.hpp
@@ -80,6 +80,7 @@ class BCEscapeAnalyzer : public ResourceObj {
void set_returned(ArgumentMap vars);
bool is_argument(ArgumentMap vars);
bool is_arg_stack(ArgumentMap vars);
+ bool returns_all(ArgumentMap vars);
void clear_bits(ArgumentMap vars, VectorSet &bs);
void set_method_escape(ArgumentMap vars);
void set_global_escape(ArgumentMap vars, bool merge = false);
diff --git a/src/share/vm/ci/ciArray.cpp b/src/share/vm/ci/ciArray.cpp
index 584b1aeb5..fdcc63a0d 100644
--- a/src/share/vm/ci/ciArray.cpp
+++ b/src/share/vm/ci/ciArray.cpp
@@ -24,13 +24,92 @@
#include "precompiled.hpp"
#include "ci/ciArray.hpp"
+#include "ci/ciArrayKlass.hpp"
+#include "ci/ciConstant.hpp"
#include "ci/ciKlass.hpp"
#include "ci/ciUtilities.hpp"
+#include "oops/objArrayOop.hpp"
+#include "oops/typeArrayOop.hpp"
// ciArray
//
// This class represents an arrayOop in the HotSpot virtual
// machine.
+static BasicType fixup_element_type(BasicType bt) {
+ if (bt == T_ARRAY) return T_OBJECT;
+ if (bt == T_BOOLEAN) return T_BYTE;
+ return bt;
+}
+
+ciConstant ciArray::element_value_impl(BasicType elembt,
+ arrayOop ary,
+ int index) {
+ if (ary == NULL)
+ return ciConstant();
+ assert(ary->is_array(), "");
+ if (index < 0 || index >= ary->length())
+ return ciConstant();
+ ArrayKlass* ak = (ArrayKlass*) ary->klass();
+ BasicType abt = ak->element_type();
+ if (fixup_element_type(elembt) !=
+ fixup_element_type(abt))
+ return ciConstant();
+ switch (elembt) {
+ case T_ARRAY:
+ case T_OBJECT:
+ {
+ assert(ary->is_objArray(), "");
+ objArrayOop objary = (objArrayOop) ary;
+ oop elem = objary->obj_at(index);
+ ciEnv* env = CURRENT_ENV;
+ ciObject* box = env->get_object(elem);
+ return ciConstant(T_OBJECT, box);
+ }
+ }
+ assert(ary->is_typeArray(), "");
+ typeArrayOop tary = (typeArrayOop) ary;
+ jint value = 0;
+ switch (elembt) {
+ case T_LONG: return ciConstant(tary->long_at(index));
+ case T_FLOAT: return ciConstant(tary->float_at(index));
+ case T_DOUBLE: return ciConstant(tary->double_at(index));
+ default: return ciConstant();
+ case T_BYTE: value = tary->byte_at(index); break;
+ case T_BOOLEAN: value = tary->byte_at(index) & 1; break;
+ case T_SHORT: value = tary->short_at(index); break;
+ case T_CHAR: value = tary->char_at(index); break;
+ case T_INT: value = tary->int_at(index); break;
+ }
+ return ciConstant(elembt, value);
+}
+
+// ------------------------------------------------------------------
+// ciArray::element_value
+//
+// Current value of an element.
+// Returns T_ILLEGAL if there is no element at the given index.
+ciConstant ciArray::element_value(int index) {
+ BasicType elembt = element_basic_type();
+ GUARDED_VM_ENTRY(
+ return element_value_impl(elembt, get_arrayOop(), index);
+ )
+}
+
+// ------------------------------------------------------------------
+// ciArray::element_value_by_offset
+//
+// Current value of an element at the specified offset.
+// Returns T_ILLEGAL if there is no element at the given offset.
+ciConstant ciArray::element_value_by_offset(intptr_t element_offset) {
+ BasicType elembt = element_basic_type();
+ intptr_t shift = exact_log2(type2aelembytes(elembt));
+ intptr_t header = arrayOopDesc::base_offset_in_bytes(elembt);
+ intptr_t index = (element_offset - header) >> shift;
+ intptr_t offset = header + ((intptr_t)index << shift);
+ if (offset != element_offset || index != (jint)index)
+ return ciConstant();
+ return element_value((jint) index);
+}
// ------------------------------------------------------------------
// ciArray::print_impl
diff --git a/src/share/vm/ci/ciArray.hpp b/src/share/vm/ci/ciArray.hpp
index 440e407a5..c5c86265d 100644
--- a/src/share/vm/ci/ciArray.hpp
+++ b/src/share/vm/ci/ciArray.hpp
@@ -25,6 +25,8 @@
#ifndef SHARE_VM_CI_CIARRAY_HPP
#define SHARE_VM_CI_CIARRAY_HPP
+#include "ci/ciArrayKlass.hpp"
+#include "ci/ciConstant.hpp"
#include "ci/ciObject.hpp"
#include "oops/arrayOop.hpp"
#include "oops/objArrayOop.hpp"
@@ -45,15 +47,30 @@ protected:
ciArray(ciKlass* klass, int len) : ciObject(klass), _length(len) {}
- arrayOop get_arrayOop() { return (arrayOop)get_oop(); }
+ arrayOop get_arrayOop() const { return (arrayOop)get_oop(); }
const char* type_string() { return "ciArray"; }
void print_impl(outputStream* st);
+ ciConstant element_value_impl(BasicType elembt, arrayOop ary, int index);
+
public:
int length() { return _length; }
+ // Convenience routines.
+ ciArrayKlass* array_type() { return klass()->as_array_klass(); }
+ ciType* element_type() { return array_type()->element_type(); }
+ BasicType element_basic_type() { return element_type()->basic_type(); }
+
+ // Current value of an element.
+ // Returns T_ILLEGAL if there is no element at the given index.
+ ciConstant element_value(int index);
+
+ // Current value of an element at the specified offset.
+ // Returns T_ILLEGAL if there is no element at the given offset.
+ ciConstant element_value_by_offset(intptr_t element_offset);
+
// What kind of ciObject is this?
bool is_array() { return true; }
bool is_java_object() { return true; }
diff --git a/src/share/vm/ci/ciConstant.hpp b/src/share/vm/ci/ciConstant.hpp
index 8cdc893fd..7a72a7de1 100644
--- a/src/share/vm/ci/ciConstant.hpp
+++ b/src/share/vm/ci/ciConstant.hpp
@@ -41,7 +41,6 @@ private:
union {
jint _int;
jlong _long;
- jint _long_half[2];
jfloat _float;
jdouble _double;
ciObject* _object;
@@ -111,6 +110,20 @@ public:
return _value._object;
}
+ bool is_null_or_zero() const {
+ if (!is_java_primitive(basic_type())) {
+ return as_object()->is_null_object();
+ } else if (type2size[basic_type()] == 1) {
+ // treat float bits as int, to avoid comparison with -0 and NaN
+ return (_value._int == 0);
+ } else if (type2size[basic_type()] == 2) {
+ // treat double bits as long, to avoid comparison with -0 and NaN
+ return (_value._long == 0);
+ } else {
+ return false;
+ }
+ }
+
// Debugging output
void print();
};
diff --git a/src/share/vm/ci/ciEnv.cpp b/src/share/vm/ci/ciEnv.cpp
index 7776db5eb..0102b2b21 100644
--- a/src/share/vm/ci/ciEnv.cpp
+++ b/src/share/vm/ci/ciEnv.cpp
@@ -1150,6 +1150,10 @@ void ciEnv::record_out_of_memory_failure() {
record_method_not_compilable("out of memory");
}
+ciInstance* ciEnv::unloaded_ciinstance() {
+ GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
+}
+
void ciEnv::dump_replay_data(outputStream* out) {
VM_ENTRY_MARK;
MutexLocker ml(Compile_lock);
diff --git a/src/share/vm/ci/ciEnv.hpp b/src/share/vm/ci/ciEnv.hpp
index 45dd42eb2..01f417d2f 100644
--- a/src/share/vm/ci/ciEnv.hpp
+++ b/src/share/vm/ci/ciEnv.hpp
@@ -400,6 +400,7 @@ public:
static ciInstanceKlass* unloaded_ciinstance_klass() {
return _unloaded_ciinstance_klass;
}
+ ciInstance* unloaded_ciinstance();
ciKlass* find_system_klass(ciSymbol* klass_name);
// Note: To find a class from its name string, use ciSymbol::make,
diff --git a/src/share/vm/ci/ciField.cpp b/src/share/vm/ci/ciField.cpp
index fe967554c..b08ec3616 100644
--- a/src/share/vm/ci/ciField.cpp
+++ b/src/share/vm/ci/ciField.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,6 @@ ciField::ciField(ciInstanceKlass* klass, int index): _known_to_link_with_put(NUL
assert(klass->get_instanceKlass()->is_linked(), "must be linked before using its constan-pool");
- _cp_index = index;
constantPoolHandle cpool(thread, klass->get_instanceKlass()->constants());
// Get the field's name, signature, and type.
@@ -116,7 +115,7 @@ ciField::ciField(ciInstanceKlass* klass, int index): _known_to_link_with_put(NUL
// The declared holder of this field may not have been loaded.
// Bail out with partial field information.
if (!holder_is_accessible) {
- // _cp_index and _type have already been set.
+ // _type has already been set.
// The default values for _flags and _constant_value will suffice.
// We need values for _holder, _offset, and _is_constant,
_holder = declared_holder;
@@ -146,8 +145,6 @@ ciField::ciField(ciInstanceKlass* klass, int index): _known_to_link_with_put(NUL
ciField::ciField(fieldDescriptor *fd): _known_to_link_with_put(NULL), _known_to_link_with_get(NULL) {
ASSERT_IN_VM;
- _cp_index = -1;
-
// Get the field's name, signature, and type.
ciEnv* env = CURRENT_ENV;
_name = env->get_symbol(fd->name());
@@ -189,12 +186,14 @@ void ciField::initialize_from(fieldDescriptor* fd) {
_holder = CURRENT_ENV->get_instance_klass(fd->field_holder());
// Check to see if the field is constant.
- if (_holder->is_initialized() && this->is_final()) {
+ bool is_final = this->is_final();
+ bool is_stable = FoldStableValues && this->is_stable();
+ if (_holder->is_initialized() && (is_final || is_stable)) {
if (!this->is_static()) {
// A field can be constant if it's a final static field or if
// it's a final non-static field of a trusted class (classes in
// java.lang.invoke and sun.invoke packages and subpackages).
- if (trust_final_non_static_fields(_holder)) {
+ if (is_stable || trust_final_non_static_fields(_holder)) {
_is_constant = true;
return;
}
@@ -227,7 +226,6 @@ void ciField::initialize_from(fieldDescriptor* fd) {
Handle mirror = k->java_mirror();
- _is_constant = true;
switch(type()->basic_type()) {
case T_BYTE:
_constant_value = ciConstant(type()->basic_type(), mirror->byte_field(_offset));
@@ -273,6 +271,12 @@ void ciField::initialize_from(fieldDescriptor* fd) {
}
}
}
+ if (is_stable && _constant_value.is_null_or_zero()) {
+ // It is not a constant after all; treat it as uninitialized.
+ _is_constant = false;
+ } else {
+ _is_constant = true;
+ }
} else {
_is_constant = false;
}
@@ -344,12 +348,11 @@ bool ciField::will_link(ciInstanceKlass* accessing_klass,
}
}
- FieldAccessInfo result;
- constantPoolHandle c_pool(THREAD,
- accessing_klass->get_instanceKlass()->constants());
- LinkResolver::resolve_field(result, c_pool, _cp_index,
- Bytecodes::java_code(bc),
- true, false, KILL_COMPILE_ON_FATAL_(false));
+ fieldDescriptor result;
+ LinkResolver::resolve_field(result, _holder->get_instanceKlass(),
+ _name->get_symbol(), _signature->get_symbol(),
+ accessing_klass->get_Klass(), bc, true, false,
+ KILL_COMPILE_ON_FATAL_(false));
// update the hit-cache, unless there is a problem with memory scoping:
if (accessing_klass->is_shared() || !is_shared()) {
@@ -373,8 +376,11 @@ void ciField::print() {
tty->print(" signature=");
_signature->print_symbol();
tty->print(" offset=%d type=", _offset);
- if (_type != NULL) _type->print_name();
- else tty->print("(reference)");
+ if (_type != NULL)
+ _type->print_name();
+ else
+ tty->print("(reference)");
+ tty->print(" flags=%04x", flags().as_int());
tty->print(" is_constant=%s", bool_to_str(_is_constant));
if (_is_constant && is_static()) {
tty->print(" constant_value=");
diff --git a/src/share/vm/ci/ciField.hpp b/src/share/vm/ci/ciField.hpp
index ff96c9931..75263e3f2 100644
--- a/src/share/vm/ci/ciField.hpp
+++ b/src/share/vm/ci/ciField.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,9 +53,6 @@ private:
ciInstanceKlass* _known_to_link_with_get;
ciConstant _constant_value;
- // Used for will_link
- int _cp_index;
-
ciType* compute_type();
ciType* compute_type_impl();
@@ -139,7 +136,10 @@ public:
// non-constant fields. These are java.lang.System.in
// and java.lang.System.out. Abomination.
//
- // Note: the check for case 4 is not yet implemented.
+ // A field is also considered constant if it is marked @Stable
+ // and is non-null (or non-zero, if a primitive).
+ // For non-static fields, the null/zero check must be
+ // arranged by the user, as constant_value().is_null_or_zero().
bool is_constant() { return _is_constant; }
// Get the constant value of this field.
@@ -173,6 +173,7 @@ public:
bool is_protected () { return flags().is_protected(); }
bool is_static () { return flags().is_static(); }
bool is_final () { return flags().is_final(); }
+ bool is_stable () { return flags().is_stable(); }
bool is_volatile () { return flags().is_volatile(); }
bool is_transient () { return flags().is_transient(); }
diff --git a/src/share/vm/ci/ciFlags.hpp b/src/share/vm/ci/ciFlags.hpp
index 6dc50d25a..87e19466f 100644
--- a/src/share/vm/ci/ciFlags.hpp
+++ b/src/share/vm/ci/ciFlags.hpp
@@ -59,6 +59,7 @@ public:
bool is_interface () const { return (_flags & JVM_ACC_INTERFACE ) != 0; }
bool is_abstract () const { return (_flags & JVM_ACC_ABSTRACT ) != 0; }
bool is_strict () const { return (_flags & JVM_ACC_STRICT ) != 0; }
+ bool is_stable () const { return (_flags & JVM_ACC_FIELD_STABLE) != 0; }
// Conversion
jint as_int() { return _flags; }
diff --git a/src/share/vm/ci/ciInstance.cpp b/src/share/vm/ci/ciInstance.cpp
index 2d29c0dc8..8b48b1b37 100644
--- a/src/share/vm/ci/ciInstance.cpp
+++ b/src/share/vm/ci/ciInstance.cpp
@@ -60,10 +60,10 @@ ciType* ciInstance::java_mirror_type() {
//
// Constant value of a field.
ciConstant ciInstance::field_value(ciField* field) {
- assert(is_loaded() &&
- field->holder()->is_loaded() &&
- klass()->is_subclass_of(field->holder()),
- "invalid access");
+ assert(is_loaded(), "invalid access - must be loaded");
+ assert(field->holder()->is_loaded(), "invalid access - holder must be loaded");
+ assert(klass()->is_subclass_of(field->holder()), "invalid access - must be subclass");
+
VM_ENTRY_MARK;
ciConstant result;
Handle obj = get_oop();
@@ -127,6 +127,8 @@ ciConstant ciInstance::field_value(ciField* field) {
ciConstant ciInstance::field_value_by_offset(int field_offset) {
ciInstanceKlass* ik = klass()->as_instance_klass();
ciField* field = ik->get_field_by_offset(field_offset, false);
+ if (field == NULL)
+ return ciConstant(); // T_ILLEGAL
return field_value(field);
}
diff --git a/src/share/vm/ci/ciInstanceKlass.cpp b/src/share/vm/ci/ciInstanceKlass.cpp
index d74e3693a..c689efa46 100644
--- a/src/share/vm/ci/ciInstanceKlass.cpp
+++ b/src/share/vm/ci/ciInstanceKlass.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -211,13 +211,42 @@ bool ciInstanceKlass::is_java_lang_Object() const {
// ------------------------------------------------------------------
// ciInstanceKlass::uses_default_loader
-bool ciInstanceKlass::uses_default_loader() {
+bool ciInstanceKlass::uses_default_loader() const {
// Note: We do not need to resolve the handle or enter the VM
// in order to test null-ness.
return _loader == NULL;
}
// ------------------------------------------------------------------
+
+/**
+ * Return basic type of boxed value for box klass or T_OBJECT if not.
+ */
+BasicType ciInstanceKlass::box_klass_type() const {
+ if (uses_default_loader() && is_loaded()) {
+ return SystemDictionary::box_klass_type(get_Klass());
+ } else {
+ return T_OBJECT;
+ }
+}
+
+/**
+ * Is this boxing klass?
+ */
+bool ciInstanceKlass::is_box_klass() const {
+ return is_java_primitive(box_klass_type());
+}
+
+/**
+ * Is this boxed value offset?
+ */
+bool ciInstanceKlass::is_boxed_value_offset(int offset) const {
+ BasicType bt = box_klass_type();
+ return is_java_primitive(bt) &&
+ (offset == java_lang_boxing_object::value_offset_in_bytes(bt));
+}
+
+// ------------------------------------------------------------------
// ciInstanceKlass::is_in_package
//
// Is this klass in the given package?
@@ -493,8 +522,7 @@ ciInstanceKlass::compute_nonstatic_fields_impl(GrowableArray<ciField*>*
for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
if (fs.access_flags().is_static()) continue;
- fieldDescriptor fd;
- fd.initialize(k, fs.index());
+ fieldDescriptor& fd = fs.field_descriptor();
ciField* field = new (arena) ciField(&fd);
fields->append(field);
}
diff --git a/src/share/vm/ci/ciInstanceKlass.hpp b/src/share/vm/ci/ciInstanceKlass.hpp
index f7aeba2df..0b244a297 100644
--- a/src/share/vm/ci/ciInstanceKlass.hpp
+++ b/src/share/vm/ci/ciInstanceKlass.hpp
@@ -217,10 +217,14 @@ public:
ciInstanceKlass* implementor();
// Is the defining class loader of this class the default loader?
- bool uses_default_loader();
+ bool uses_default_loader() const;
bool is_java_lang_Object() const;
+ BasicType box_klass_type() const;
+ bool is_box_klass() const;
+ bool is_boxed_value_offset(int offset) const;
+
// Is this klass in the given package?
bool is_in_package(const char* packagename) {
return is_in_package(packagename, (int) strlen(packagename));
diff --git a/src/share/vm/ci/ciMethod.cpp b/src/share/vm/ci/ciMethod.cpp
index 780f4ad86..486d8c499 100644
--- a/src/share/vm/ci/ciMethod.cpp
+++ b/src/share/vm/ci/ciMethod.cpp
@@ -286,7 +286,10 @@ int ciMethod::itable_index() {
check_is_loaded();
assert(holder()->is_linked(), "must be linked");
VM_ENTRY_MARK;
- return klassItable::compute_itable_index(get_Method());
+ Method* m = get_Method();
+ if (!m->has_itable_index())
+ return Method::nonvirtual_vtable_index;
+ return m->itable_index();
}
#endif // SHARK
@@ -1137,6 +1140,10 @@ bool ciMethod::is_klass_loaded(int refinfo_index, bool must_be_resolved) const {
// ------------------------------------------------------------------
// ciMethod::check_call
bool ciMethod::check_call(int refinfo_index, bool is_static) const {
+ // This method is used only in C2 from InlineTree::ok_to_inline,
+ // and is only used under -Xcomp or -XX:CompileTheWorld.
+ // It appears to fail when applied to an invokeinterface call site.
+ // FIXME: Remove this method and resolve_method_statically; refactor to use the other LinkResolver entry points.
VM_ENTRY_MARK;
{
EXCEPTION_MARK;
@@ -1179,6 +1186,44 @@ bool ciMethod::has_jsrs () const { FETCH_FLAG_FROM_VM(has_jsrs);
bool ciMethod::is_accessor () const { FETCH_FLAG_FROM_VM(is_accessor); }
bool ciMethod::is_initializer () const { FETCH_FLAG_FROM_VM(is_initializer); }
+bool ciMethod::is_boxing_method() const {
+ if (holder()->is_box_klass()) {
+ switch (intrinsic_id()) {
+ case vmIntrinsics::_Boolean_valueOf:
+ case vmIntrinsics::_Byte_valueOf:
+ case vmIntrinsics::_Character_valueOf:
+ case vmIntrinsics::_Short_valueOf:
+ case vmIntrinsics::_Integer_valueOf:
+ case vmIntrinsics::_Long_valueOf:
+ case vmIntrinsics::_Float_valueOf:
+ case vmIntrinsics::_Double_valueOf:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+bool ciMethod::is_unboxing_method() const {
+ if (holder()->is_box_klass()) {
+ switch (intrinsic_id()) {
+ case vmIntrinsics::_booleanValue:
+ case vmIntrinsics::_byteValue:
+ case vmIntrinsics::_charValue:
+ case vmIntrinsics::_shortValue:
+ case vmIntrinsics::_intValue:
+ case vmIntrinsics::_longValue:
+ case vmIntrinsics::_floatValue:
+ case vmIntrinsics::_doubleValue:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
BCEscapeAnalyzer *ciMethod::get_bcea() {
#ifdef COMPILER2
if (_bcea == NULL) {
diff --git a/src/share/vm/ci/ciMethod.hpp b/src/share/vm/ci/ciMethod.hpp
index 46ea500b5..ddff0ac9b 100644
--- a/src/share/vm/ci/ciMethod.hpp
+++ b/src/share/vm/ci/ciMethod.hpp
@@ -177,6 +177,10 @@ class ciMethod : public ciMetadata {
address bcp = code() + bci;
return Bytecodes::java_code_at(NULL, bcp);
}
+ Bytecodes::Code raw_code_at_bci(int bci) {
+ address bcp = code() + bci;
+ return Bytecodes::code_at(NULL, bcp);
+ }
BCEscapeAnalyzer *get_bcea();
ciMethodBlocks *get_method_blocks();
@@ -298,6 +302,8 @@ class ciMethod : public ciMetadata {
bool is_initializer () const;
bool can_be_statically_bound() const { return _can_be_statically_bound; }
void dump_replay_data(outputStream* st);
+ bool is_boxing_method() const;
+ bool is_unboxing_method() const;
// Print the bytecodes of this method.
void print_codes_on(outputStream* st);
diff --git a/src/share/vm/ci/ciObjectFactory.cpp b/src/share/vm/ci/ciObjectFactory.cpp
index 8fb6e12b7..a22fcf62c 100644
--- a/src/share/vm/ci/ciObjectFactory.cpp
+++ b/src/share/vm/ci/ciObjectFactory.cpp
@@ -265,8 +265,6 @@ ciObject* ciObjectFactory::get(oop key) {
ciMetadata* ciObjectFactory::get_metadata(Metadata* key) {
ASSERT_IN_VM;
- assert(key == NULL || key->is_metadata(), "must be");
-
#ifdef ASSERT
if (CIObjectFactoryVerify) {
Metadata* last = NULL;
@@ -565,7 +563,10 @@ ciInstance* ciObjectFactory::get_unloaded_method_type_constant(ciSymbol* signatu
return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass());
}
-
+ciInstance* ciObjectFactory::get_unloaded_object_constant() {
+ if (ciEnv::_Object_klass == NULL) return NULL;
+ return get_unloaded_instance(ciEnv::_Object_klass->as_instance_klass());
+}
//------------------------------------------------------------------
// ciObjectFactory::get_empty_methodData
diff --git a/src/share/vm/ci/ciObjectFactory.hpp b/src/share/vm/ci/ciObjectFactory.hpp
index 29de514b2..ba3d88c12 100644
--- a/src/share/vm/ci/ciObjectFactory.hpp
+++ b/src/share/vm/ci/ciObjectFactory.hpp
@@ -131,6 +131,8 @@ public:
ciInstance* get_unloaded_method_type_constant(ciSymbol* signature);
+ ciInstance* get_unloaded_object_constant();
+
// Get the ciMethodData representing the methodData for a method
// with none.
ciMethodData* get_empty_methodData();
diff --git a/src/share/vm/ci/ciReplay.cpp b/src/share/vm/ci/ciReplay.cpp
index a314f35f8..837d529ca 100644
--- a/src/share/vm/ci/ciReplay.cpp
+++ b/src/share/vm/ci/ciReplay.cpp
@@ -299,7 +299,7 @@ class CompileReplay : public StackObj {
Symbol* method_signature = parse_symbol(CHECK_NULL);
Method* m = k->find_method(method_name, method_signature);
if (m == NULL) {
- report_error("can't find method");
+ report_error("Can't find method");
}
return m;
}
@@ -398,8 +398,8 @@ class CompileReplay : public StackObj {
// compile <klass> <name> <signature> <entry_bci> <comp_level>
void process_compile(TRAPS) {
- // methodHandle method;
Method* method = parse_method(CHECK);
+ if (had_error()) return;
int entry_bci = parse_int("entry_bci");
const char* comp_level_label = "comp_level";
int comp_level = parse_int(comp_level_label);
@@ -440,6 +440,7 @@ class CompileReplay : public StackObj {
//
void process_ciMethod(TRAPS) {
Method* method = parse_method(CHECK);
+ if (had_error()) return;
ciMethodRecord* rec = new_ciMethod(method);
rec->invocation_counter = parse_int("invocation_counter");
rec->backedge_counter = parse_int("backedge_counter");
@@ -451,6 +452,7 @@ class CompileReplay : public StackObj {
// ciMethodData <klass> <name> <signature> <state> <current mileage> orig <length> # # ... data <length> # # ... oops <length>
void process_ciMethodData(TRAPS) {
Method* method = parse_method(CHECK);
+ if (had_error()) return;
/* jsut copied from Method, to build interpret data*/
if (InstanceRefKlass::owns_pending_list_lock((JavaThread*)THREAD)) {
return;
@@ -492,7 +494,9 @@ class CompileReplay : public StackObj {
}
Klass* k = parse_klass(CHECK);
rec->oops_offsets[i] = offset;
- rec->oops_handles[i] = (jobject)(new KlassHandle(THREAD, k));
+ KlassHandle *kh = NEW_C_HEAP_OBJ(KlassHandle, mtCompiler);
+ ::new ((void*)kh) KlassHandle(THREAD, k);
+ rec->oops_handles[i] = (jobject)kh;
}
}
diff --git a/src/share/vm/ci/ciSymbol.hpp b/src/share/vm/ci/ciSymbol.hpp
index d54b54009..3c974cf27 100644
--- a/src/share/vm/ci/ciSymbol.hpp
+++ b/src/share/vm/ci/ciSymbol.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@ class ciSymbol : public ciBaseObject {
friend class ciInstanceKlass;
friend class ciSignature;
friend class ciMethod;
+ friend class ciField;
friend class ciObjArrayKlass;
private:
diff --git a/src/share/vm/ci/ciTypeArray.cpp b/src/share/vm/ci/ciTypeArray.cpp
index d4a6eff6f..2d013e21c 100644
--- a/src/share/vm/ci/ciTypeArray.cpp
+++ b/src/share/vm/ci/ciTypeArray.cpp
@@ -39,5 +39,10 @@
jchar ciTypeArray::char_at(int index) {
VM_ENTRY_MARK;
assert(index >= 0 && index < length(), "out of range");
- return get_typeArrayOop()->char_at(index);
+ jchar c = get_typeArrayOop()->char_at(index);
+#ifdef ASSERT
+ jchar d = element_value(index).as_char();
+ assert(c == d, "");
+#endif //ASSERT
+ return c;
}
diff --git a/src/share/vm/ci/ciUtilities.hpp b/src/share/vm/ci/ciUtilities.hpp
index 1a075bf6e..2032a8f35 100644
--- a/src/share/vm/ci/ciUtilities.hpp
+++ b/src/share/vm/ci/ciUtilities.hpp
@@ -96,7 +96,7 @@
CLEAR_PENDING_EXCEPTION; \
return (result); \
} \
- (0
+ (void)(0
#define KILL_COMPILE_ON_ANY \
THREAD); \
@@ -104,7 +104,7 @@
fatal("unhandled ci exception"); \
CLEAR_PENDING_EXCEPTION; \
} \
-(0
+(void)(0
inline const char* bool_to_str(bool b) {
diff --git a/src/share/vm/classfile/altHashing.cpp b/src/share/vm/classfile/altHashing.cpp
index df2c53e55..8dfc3153c 100644
--- a/src/share/vm/classfile/altHashing.cpp
+++ b/src/share/vm/classfile/altHashing.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -242,8 +242,8 @@ static const juint MURMUR3_32_X86_CHECK_VALUE = 0xB0F57EE3;
void AltHashing::testMurmur3_32_ByteArray() {
// printf("testMurmur3_32_ByteArray\n");
- jbyte* vector = new jbyte[256];
- jbyte* hashes = new jbyte[4 * 256];
+ jbyte vector[256];
+ jbyte hashes[4 * 256];
for (int i = 0; i < 256; i++) {
vector[i] = (jbyte) i;
diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile/classFileParser.cpp
index c93d72de6..70662d3e8 100644
--- a/src/share/vm/classfile/classFileParser.cpp
+++ b/src/share/vm/classfile/classFileParser.cpp
@@ -28,7 +28,6 @@
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/defaultMethods.hpp"
-#include "classfile/genericSignatures.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
@@ -39,6 +38,7 @@
#include "memory/gcLocker.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/oopFactory.hpp"
+#include "memory/referenceType.hpp"
#include "memory/universe.inline.hpp"
#include "oops/constantPool.hpp"
#include "oops/fieldStreams.hpp"
@@ -444,8 +444,8 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
break;
case JVM_REF_invokeStatic:
case JVM_REF_invokeSpecial:
- check_property(
- tag.is_method() || tag.is_interface_method(),
+ check_property(tag.is_method() ||
+ ((_major_version >= JAVA_8_VERSION) && tag.is_interface_method()),
"Invalid constant pool index %u in class file %s (not a method)",
ref_index, CHECK_(nullHandle));
break;
@@ -888,6 +888,7 @@ void ClassFileParser::parse_field_attributes(u2 attributes_count,
int runtime_visible_type_annotations_length = 0;
u1* runtime_invisible_type_annotations = NULL;
int runtime_invisible_type_annotations_length = 0;
+ bool runtime_invisible_type_annotations_exists = false;
while (attributes_count--) {
cfs->guarantee_more(6, CHECK); // attribute_name_index, attribute_length
u2 attribute_name_index = cfs->get_u2_fast();
@@ -946,15 +947,27 @@ void ClassFileParser::parse_field_attributes(u2 attributes_count,
assert(runtime_invisible_annotations != NULL, "null invisible annotations");
cfs->skip_u1(runtime_invisible_annotations_length, CHECK);
} else if (attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
+ if (runtime_visible_type_annotations != NULL) {
+ classfile_parse_error(
+ "Multiple RuntimeVisibleTypeAnnotations attributes for field in class file %s", CHECK);
+ }
runtime_visible_type_annotations_length = attribute_length;
runtime_visible_type_annotations = cfs->get_u1_buffer();
assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
- } else if (PreserveAllAnnotations && attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
- runtime_invisible_type_annotations_length = attribute_length;
- runtime_invisible_type_annotations = cfs->get_u1_buffer();
- assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
- cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
+ } else if (attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
+ if (runtime_invisible_type_annotations_exists) {
+ classfile_parse_error(
+ "Multiple RuntimeInvisibleTypeAnnotations attributes for field in class file %s", CHECK);
+ } else {
+ runtime_invisible_type_annotations_exists = true;
+ }
+ if (PreserveAllAnnotations) {
+ runtime_invisible_type_annotations_length = attribute_length;
+ runtime_invisible_type_annotations = cfs->get_u1_buffer();
+ assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
+ }
+ cfs->skip_u1(attribute_length, CHECK);
} else {
cfs->skip_u1(attribute_length, CHECK); // Skip unknown attributes
}
@@ -1719,15 +1732,28 @@ void ClassFileParser::parse_annotations(u1* buffer, int limit,
coll->set_annotation(id);
if (id == AnnotationCollector::_sun_misc_Contended) {
+ // @Contended can optionally specify the contention group.
+ //
+ // Contended group defines the equivalence class over the fields:
+ // the fields within the same contended group are not treated distinct.
+ // The only exception is default group, which does not incur the
+ // equivalence. Naturally, contention group for classes is meaningless.
+ //
+ // While the contention group is specified as String, annotation
+ // values are already interned, and we might as well use the constant
+ // pool index as the group tag.
+ //
+ u2 group_index = 0; // default contended group
if (count == 1
&& s_size == (index - index0) // match size
&& s_tag_val == *(abase + tag_off)
&& member == vmSymbols::value_name()) {
- u2 group_index = Bytes::get_Java_u2(abase + s_con_off);
- coll->set_contended_group(group_index);
- } else {
- coll->set_contended_group(0); // default contended group
+ group_index = Bytes::get_Java_u2(abase + s_con_off);
+ if (_cp->symbol_at(group_index)->utf8_length() == 0) {
+ group_index = 0; // default contended group
+ }
}
+ coll->set_contended_group(group_index);
}
}
}
@@ -1761,6 +1787,10 @@ ClassFileParser::AnnotationCollector::annotation_index(ClassLoaderData* loader_d
if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code
return _method_LambdaForm_Hidden;
+ case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_invoke_Stable_signature):
+ if (_location != _in_field) break; // only allow for fields
+ if (!privileged) break; // only allow in privileged code
+ return _field_Stable;
case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_misc_Contended_signature):
if (_location != _in_field && _location != _in_class) break; // only allow for fields and classes
if (!EnableContended || (RestrictContended && !privileged)) break; // honor privileges
@@ -1773,6 +1803,8 @@ ClassFileParser::AnnotationCollector::annotation_index(ClassLoaderData* loader_d
void ClassFileParser::FieldAnnotationCollector::apply_to(FieldInfo* f) {
if (is_contended())
f->set_contended_group(contended_group());
+ if (is_stable())
+ f->set_stable(true);
}
ClassFileParser::FieldAnnotationCollector::~FieldAnnotationCollector() {
@@ -2047,6 +2079,7 @@ methodHandle ClassFileParser::parse_method(bool is_interface,
int runtime_visible_type_annotations_length = 0;
u1* runtime_invisible_type_annotations = NULL;
int runtime_invisible_type_annotations_length = 0;
+ bool runtime_invisible_type_annotations_exists = false;
u1* annotation_default = NULL;
int annotation_default_length = 0;
@@ -2303,16 +2336,30 @@ methodHandle ClassFileParser::parse_method(bool is_interface,
assert(annotation_default != NULL, "null annotation default");
cfs->skip_u1(annotation_default_length, CHECK_(nullHandle));
} else if (method_attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
+ if (runtime_visible_type_annotations != NULL) {
+ classfile_parse_error(
+ "Multiple RuntimeVisibleTypeAnnotations attributes for method in class file %s",
+ CHECK_(nullHandle));
+ }
runtime_visible_type_annotations_length = method_attribute_length;
runtime_visible_type_annotations = cfs->get_u1_buffer();
assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
// No need for the VM to parse Type annotations
cfs->skip_u1(runtime_visible_type_annotations_length, CHECK_(nullHandle));
- } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
- runtime_invisible_type_annotations_length = method_attribute_length;
- runtime_invisible_type_annotations = cfs->get_u1_buffer();
- assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
- cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK_(nullHandle));
+ } else if (method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
+ if (runtime_invisible_type_annotations_exists) {
+ classfile_parse_error(
+ "Multiple RuntimeInvisibleTypeAnnotations attributes for method in class file %s",
+ CHECK_(nullHandle));
+ } else {
+ runtime_invisible_type_annotations_exists = true;
+ }
+ if (PreserveAllAnnotations) {
+ runtime_invisible_type_annotations_length = method_attribute_length;
+ runtime_invisible_type_annotations = cfs->get_u1_buffer();
+ assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
+ }
+ cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
} else {
// Skip unknown attributes
cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
@@ -2576,7 +2623,7 @@ void ClassFileParser::parse_classfile_sourcefile_attribute(TRAPS) {
valid_symbol_at(sourcefile_index),
"Invalid SourceFile attribute at constant pool index %u in class file %s",
sourcefile_index, CHECK);
- set_class_sourcefile(_cp->symbol_at(sourcefile_index));
+ set_class_sourcefile_index(sourcefile_index);
}
@@ -2714,7 +2761,7 @@ void ClassFileParser::parse_classfile_signature_attribute(TRAPS) {
valid_symbol_at(signature_index),
"Invalid constant pool index %u in Signature attribute in class file %s",
signature_index, CHECK);
- set_class_generic_signature(_cp->symbol_at(signature_index));
+ set_class_generic_signature_index(signature_index);
}
void ClassFileParser::parse_classfile_bootstrap_methods_attribute(u4 attribute_byte_length, TRAPS) {
@@ -2805,6 +2852,7 @@ void ClassFileParser::parse_classfile_attributes(ClassFileParser::ClassAnnotatio
int runtime_visible_type_annotations_length = 0;
u1* runtime_invisible_type_annotations = NULL;
int runtime_invisible_type_annotations_length = 0;
+ bool runtime_invisible_type_annotations_exists = false;
u1* inner_classes_attribute_start = NULL;
u4 inner_classes_attribute_length = 0;
u2 enclosing_method_class_index = 0;
@@ -2908,16 +2956,28 @@ void ClassFileParser::parse_classfile_attributes(ClassFileParser::ClassAnnotatio
parsed_bootstrap_methods_attribute = true;
parse_classfile_bootstrap_methods_attribute(attribute_length, CHECK);
} else if (tag == vmSymbols::tag_runtime_visible_type_annotations()) {
+ if (runtime_visible_type_annotations != NULL) {
+ classfile_parse_error(
+ "Multiple RuntimeVisibleTypeAnnotations attributes in class file %s", CHECK);
+ }
runtime_visible_type_annotations_length = attribute_length;
runtime_visible_type_annotations = cfs->get_u1_buffer();
assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
// No need for the VM to parse Type annotations
cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
- } else if (PreserveAllAnnotations && tag == vmSymbols::tag_runtime_invisible_type_annotations()) {
- runtime_invisible_type_annotations_length = attribute_length;
- runtime_invisible_type_annotations = cfs->get_u1_buffer();
- assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
- cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
+ } else if (tag == vmSymbols::tag_runtime_invisible_type_annotations()) {
+ if (runtime_invisible_type_annotations_exists) {
+ classfile_parse_error(
+ "Multiple RuntimeInvisibleTypeAnnotations attributes in class file %s", CHECK);
+ } else {
+ runtime_invisible_type_annotations_exists = true;
+ }
+ if (PreserveAllAnnotations) {
+ runtime_invisible_type_annotations_length = attribute_length;
+ runtime_invisible_type_annotations = cfs->get_u1_buffer();
+ assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
+ }
+ cfs->skip_u1(attribute_length, CHECK);
} else {
// Unknown attribute
cfs->skip_u1(attribute_length, CHECK);
@@ -2961,13 +3021,11 @@ void ClassFileParser::parse_classfile_attributes(ClassFileParser::ClassAnnotatio
void ClassFileParser::apply_parsed_class_attributes(instanceKlassHandle k) {
if (_synthetic_flag)
k->set_is_synthetic();
- if (_sourcefile != NULL) {
- _sourcefile->increment_refcount();
- k->set_source_file_name(_sourcefile);
+ if (_sourcefile_index != 0) {
+ k->set_source_file_name_index(_sourcefile_index);
}
- if (_generic_signature != NULL) {
- _generic_signature->increment_refcount();
- k->set_generic_signature(_generic_signature);
+ if (_generic_signature_index != 0) {
+ k->set_generic_signature_index(_generic_signature_index);
}
if (_sde_buffer != NULL) {
k->set_source_debug_extension(_sde_buffer, _sde_length);
@@ -3027,35 +3085,6 @@ AnnotationArray* ClassFileParser::assemble_annotations(u1* runtime_visible_annot
return annotations;
}
-
-#ifndef PRODUCT
-static void parseAndPrintGenericSignatures(
- instanceKlassHandle this_klass, TRAPS) {
- assert(ParseAllGenericSignatures == true, "Shouldn't call otherwise");
- ResourceMark rm;
-
- if (this_klass->generic_signature() != NULL) {
- using namespace generic;
- ClassDescriptor* spec = ClassDescriptor::parse_generic_signature(this_klass(), CHECK);
-
- tty->print_cr("Parsing %s", this_klass->generic_signature()->as_C_string());
- spec->print_on(tty);
-
- for (int i = 0; i < this_klass->methods()->length(); ++i) {
- Method* m = this_klass->methods()->at(i);
- MethodDescriptor* method_spec = MethodDescriptor::parse_generic_signature(m, spec);
- Symbol* sig = m->generic_signature();
- if (sig == NULL) {
- sig = m->signature();
- }
- tty->print_cr("Parsing %s", sig->as_C_string());
- method_spec->print_on(tty);
- }
- }
-}
-#endif // ndef PRODUCT
-
-
instanceKlassHandle ClassFileParser::parse_super_class(int super_class_index,
TRAPS) {
instanceKlassHandle super_klass;
@@ -3108,15 +3137,8 @@ void ClassFileParser::layout_fields(Handle class_loader,
FieldLayoutInfo* info,
TRAPS) {
- // get the padding width from the option
- // TODO: Ask VM about specific CPU we are running on
- int pad_size = ContendedPaddingWidth;
-
// Field size and offset computation
int nonstatic_field_size = _super_klass() == NULL ? 0 : _super_klass()->nonstatic_field_size();
-#ifndef PRODUCT
- int orig_nonstatic_field_size = 0;
-#endif
int next_static_oop_offset;
int next_static_double_offset;
int next_static_word_offset;
@@ -3127,13 +3149,14 @@ void ClassFileParser::layout_fields(Handle class_loader,
int next_nonstatic_word_offset;
int next_nonstatic_short_offset;
int next_nonstatic_byte_offset;
- int next_nonstatic_type_offset;
int first_nonstatic_oop_offset;
- int first_nonstatic_field_offset;
int next_nonstatic_field_offset;
int next_nonstatic_padded_offset;
// Count the contended fields by type.
+ //
+ // We ignore static fields, because @Contended is not supported for them.
+ // The layout code below will also ignore the static fields.
int nonstatic_contended_count = 0;
FieldAllocationCount fac_contended;
for (AllFieldStream fs(_fields, _cp); !fs.done(); fs.next()) {
@@ -3145,7 +3168,6 @@ void ClassFileParser::layout_fields(Handle class_loader,
}
}
}
- int contended_count = nonstatic_contended_count;
// Calculate the starting byte offsets
@@ -3165,61 +3187,60 @@ void ClassFileParser::layout_fields(Handle class_loader,
next_static_byte_offset = next_static_short_offset +
((fac->count[STATIC_SHORT]) * BytesPerShort);
- first_nonstatic_field_offset = instanceOopDesc::base_offset_in_bytes() +
- nonstatic_field_size * heapOopSize;
+ int nonstatic_fields_start = instanceOopDesc::base_offset_in_bytes() +
+ nonstatic_field_size * heapOopSize;
- // class is contended, pad before all the fields
- if (parsed_annotations->is_contended()) {
- first_nonstatic_field_offset += pad_size;
- }
+ next_nonstatic_field_offset = nonstatic_fields_start;
+
+ bool is_contended_class = parsed_annotations->is_contended();
- next_nonstatic_field_offset = first_nonstatic_field_offset;
+ // Class is contended, pad before all the fields
+ if (is_contended_class) {
+ next_nonstatic_field_offset += ContendedPaddingWidth;
+ }
+ // Compute the non-contended fields count.
+ // The packing code below relies on these counts to determine if some field
+ // can be squeezed into the alignment gap. Contended fields are obviously
+ // exempt from that.
unsigned int nonstatic_double_count = fac->count[NONSTATIC_DOUBLE] - fac_contended.count[NONSTATIC_DOUBLE];
unsigned int nonstatic_word_count = fac->count[NONSTATIC_WORD] - fac_contended.count[NONSTATIC_WORD];
unsigned int nonstatic_short_count = fac->count[NONSTATIC_SHORT] - fac_contended.count[NONSTATIC_SHORT];
unsigned int nonstatic_byte_count = fac->count[NONSTATIC_BYTE] - fac_contended.count[NONSTATIC_BYTE];
unsigned int nonstatic_oop_count = fac->count[NONSTATIC_OOP] - fac_contended.count[NONSTATIC_OOP];
+ // Total non-static fields count, including every contended field
+ unsigned int nonstatic_fields_count = fac->count[NONSTATIC_DOUBLE] + fac->count[NONSTATIC_WORD] +
+ fac->count[NONSTATIC_SHORT] + fac->count[NONSTATIC_BYTE] +
+ fac->count[NONSTATIC_OOP];
+
bool super_has_nonstatic_fields =
(_super_klass() != NULL && _super_klass->has_nonstatic_fields());
- bool has_nonstatic_fields = super_has_nonstatic_fields ||
- ((nonstatic_double_count + nonstatic_word_count +
- nonstatic_short_count + nonstatic_byte_count +
- nonstatic_oop_count) != 0);
+ bool has_nonstatic_fields = super_has_nonstatic_fields || (nonstatic_fields_count != 0);
// Prepare list of oops for oop map generation.
+ //
+ // "offset" and "count" lists are describing the set of contiguous oop
+ // regions. offset[i] is the start of the i-th region, which then has
+ // count[i] oops following. Before we know how many regions are required,
+ // we pessimistically allocate the maps to fit all the oops into the
+ // distinct regions.
+ //
+ // TODO: We add +1 to always allocate non-zero resource arrays; we need
+ // to figure out if we still need to do this.
int* nonstatic_oop_offsets;
unsigned int* nonstatic_oop_counts;
unsigned int nonstatic_oop_map_count = 0;
+ unsigned int max_nonstatic_oop_maps = fac->count[NONSTATIC_OOP] + 1;
nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD(
- THREAD, int, nonstatic_oop_count + 1);
+ THREAD, int, max_nonstatic_oop_maps);
nonstatic_oop_counts = NEW_RESOURCE_ARRAY_IN_THREAD(
- THREAD, unsigned int, nonstatic_oop_count + 1);
+ THREAD, unsigned int, max_nonstatic_oop_maps);
first_nonstatic_oop_offset = 0; // will be set for first oop field
-#ifndef PRODUCT
- if( PrintCompactFieldsSavings ) {
- next_nonstatic_double_offset = next_nonstatic_field_offset +
- (nonstatic_oop_count * heapOopSize);
- if ( nonstatic_double_count > 0 ) {
- next_nonstatic_double_offset = align_size_up(next_nonstatic_double_offset, BytesPerLong);
- }
- next_nonstatic_word_offset = next_nonstatic_double_offset +
- (nonstatic_double_count * BytesPerLong);
- next_nonstatic_short_offset = next_nonstatic_word_offset +
- (nonstatic_word_count * BytesPerInt);
- next_nonstatic_byte_offset = next_nonstatic_short_offset +
- (nonstatic_short_count * BytesPerShort);
- next_nonstatic_type_offset = align_size_up((next_nonstatic_byte_offset +
- nonstatic_byte_count ), heapOopSize );
- orig_nonstatic_field_size = nonstatic_field_size +
- ((next_nonstatic_type_offset - first_nonstatic_field_offset)/heapOopSize);
- }
-#endif
bool compact_fields = CompactFields;
int allocation_style = FieldsAllocationStyle;
if( allocation_style < 0 || allocation_style > 2 ) { // Out of range?
@@ -3251,6 +3272,7 @@ void ClassFileParser::layout_fields(Handle class_loader,
compact_fields = false; // Don't compact fields
}
+ // Rearrange fields for a given allocation style
if( allocation_style == 0 ) {
// Fields order: oops, longs/doubles, ints, shorts/chars, bytes, padded fields
next_nonstatic_oop_offset = next_nonstatic_field_offset;
@@ -3291,6 +3313,8 @@ void ClassFileParser::layout_fields(Handle class_loader,
int nonstatic_short_space_offset;
int nonstatic_byte_space_offset;
+ // Try to squeeze some of the fields into the gaps due to
+ // long/double alignment.
if( nonstatic_double_count > 0 ) {
int offset = next_nonstatic_double_offset;
next_nonstatic_double_offset = align_size_up(offset, BytesPerLong);
@@ -3400,9 +3424,11 @@ void ClassFileParser::layout_fields(Handle class_loader,
int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
heapOopSize ) {
// Extend current oop map
+ assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check");
nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
} else {
// Create new oop map
+ assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
nonstatic_oop_map_count += 1;
@@ -3460,12 +3486,10 @@ void ClassFileParser::layout_fields(Handle class_loader,
//
// Additionally, this should not break alignment for the fields, so we round the alignment up
// for each field.
- if (contended_count > 0) {
+ if (nonstatic_contended_count > 0) {
// if there is at least one contended field, we need to have pre-padding for them
- if (nonstatic_contended_count > 0) {
- next_nonstatic_padded_offset += pad_size;
- }
+ next_nonstatic_padded_offset += ContendedPaddingWidth;
// collect all contended groups
BitMap bm(_cp->size());
@@ -3526,6 +3550,7 @@ void ClassFileParser::layout_fields(Handle class_loader,
next_nonstatic_padded_offset += heapOopSize;
// Create new oop map
+ assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
nonstatic_oop_map_count += 1;
@@ -3543,7 +3568,7 @@ void ClassFileParser::layout_fields(Handle class_loader,
// the fields within the same contended group are not inter-padded.
// The only exception is default group, which does not incur the
// equivalence, and so requires intra-padding.
- next_nonstatic_padded_offset += pad_size;
+ next_nonstatic_padded_offset += ContendedPaddingWidth;
}
fs.set_offset(real_offset);
@@ -3555,67 +3580,59 @@ void ClassFileParser::layout_fields(Handle class_loader,
// subclass fields and/or adjacent object.
// If this was the default group, the padding is already in place.
if (current_group != 0) {
- next_nonstatic_padded_offset += pad_size;
+ next_nonstatic_padded_offset += ContendedPaddingWidth;
}
}
// handle static fields
}
- // Size of instances
- int notaligned_offset = next_nonstatic_padded_offset;
-
// Entire class is contended, pad in the back.
// This helps to alleviate memory contention effects for subclass fields
// and/or adjacent object.
- if (parsed_annotations->is_contended()) {
- notaligned_offset += pad_size;
+ if (is_contended_class) {
+ next_nonstatic_padded_offset += ContendedPaddingWidth;
}
- int next_static_type_offset = align_size_up(next_static_byte_offset, wordSize);
- int static_field_size = (next_static_type_offset -
- InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
+ int notaligned_nonstatic_fields_end = next_nonstatic_padded_offset;
- next_nonstatic_type_offset = align_size_up(notaligned_offset, heapOopSize );
- nonstatic_field_size = nonstatic_field_size + ((next_nonstatic_type_offset
- - first_nonstatic_field_offset)/heapOopSize);
+ int nonstatic_fields_end = align_size_up(notaligned_nonstatic_fields_end, heapOopSize);
+ int instance_end = align_size_up(notaligned_nonstatic_fields_end, wordSize);
+ int static_fields_end = align_size_up(next_static_byte_offset, wordSize);
- next_nonstatic_type_offset = align_size_up(notaligned_offset, wordSize );
- int instance_size = align_object_size(next_nonstatic_type_offset / wordSize);
+ int static_field_size = (static_fields_end -
+ InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
+ nonstatic_field_size = nonstatic_field_size +
+ (nonstatic_fields_end - nonstatic_fields_start) / heapOopSize;
+
+ int instance_size = align_object_size(instance_end / wordSize);
assert(instance_size == align_object_size(align_size_up(
- (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize + ((parsed_annotations->is_contended()) ? pad_size : 0)),
+ (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize),
wordSize) / wordSize), "consistent layout helper value");
+ // Invariant: nonstatic_field end/start should only change if there are
+ // nonstatic fields in the class, or if the class is contended. We compare
+ // against the non-aligned value, so that end alignment will not fail the
+ // assert without actually having the fields.
+ assert((notaligned_nonstatic_fields_end == nonstatic_fields_start) ||
+ is_contended_class ||
+ (nonstatic_fields_count > 0), "double-check nonstatic start/end");
+
// Number of non-static oop map blocks allocated at end of klass.
const unsigned int total_oop_map_count =
compute_oop_map_count(_super_klass, nonstatic_oop_map_count,
first_nonstatic_oop_offset);
#ifndef PRODUCT
- if( PrintCompactFieldsSavings ) {
- ResourceMark rm;
- if( nonstatic_field_size < orig_nonstatic_field_size ) {
- tty->print("[Saved %d of %d bytes in %s]\n",
- (orig_nonstatic_field_size - nonstatic_field_size)*heapOopSize,
- orig_nonstatic_field_size*heapOopSize,
- _class_name);
- } else if( nonstatic_field_size > orig_nonstatic_field_size ) {
- tty->print("[Wasted %d over %d bytes in %s]\n",
- (nonstatic_field_size - orig_nonstatic_field_size)*heapOopSize,
- orig_nonstatic_field_size*heapOopSize,
- _class_name);
- }
- }
-
if (PrintFieldLayout) {
print_field_layout(_class_name,
_fields,
_cp,
instance_size,
- first_nonstatic_field_offset,
- next_nonstatic_field_offset,
- next_static_type_offset);
+ nonstatic_fields_start,
+ nonstatic_fields_end,
+ static_fields_end);
}
#endif
@@ -3645,8 +3662,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
// If RedefineClasses() was used before the retransformable
// agent attached, then the cached class bytes may not be the
// original class bytes.
- unsigned char *cached_class_file_bytes = NULL;
- jint cached_class_file_length;
+ JvmtiCachedClassFileData *cached_class_file = NULL;
Handle class_loader(THREAD, loader_data->class_loader());
bool has_default_methods = false;
ResourceMark rm(THREAD);
@@ -3678,10 +3694,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
if (h_class_being_redefined != NULL) {
instanceKlassHandle ikh_class_being_redefined =
instanceKlassHandle(THREAD, (*h_class_being_redefined)());
- cached_class_file_bytes =
- ikh_class_being_redefined->get_cached_class_file_bytes();
- cached_class_file_length =
- ikh_class_being_redefined->get_cached_class_file_len();
+ cached_class_file = ikh_class_being_redefined->get_cached_class_file();
}
}
@@ -3689,9 +3702,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
unsigned char* end_ptr = cfs->buffer() + cfs->length();
JvmtiExport::post_class_file_load_hook(name, class_loader(), protection_domain,
- &ptr, &end_ptr,
- &cached_class_file_bytes,
- &cached_class_file_length);
+ &ptr, &end_ptr, &cached_class_file);
if (ptr != cfs->buffer()) {
// JVMTI agent has modified class file data.
@@ -3984,9 +3995,8 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
this_klass->set_has_final_method();
}
this_klass->copy_method_ordering(method_ordering, CHECK_NULL);
- // The InstanceKlass::_methods_jmethod_ids cache and the
- // InstanceKlass::_methods_cached_itable_indices cache are
- // both managed on the assumption that the initial cache
+ // The InstanceKlass::_methods_jmethod_ids cache
+ // is managed on the assumption that the initial cache
// size is equal to the number of methods in the class. If
// that changes, then InstanceKlass::idnum_can_increment()
// has to be changed accordingly.
@@ -4009,10 +4019,9 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
}
}
- if (cached_class_file_bytes != NULL) {
+ if (cached_class_file != NULL) {
// JVMTI: we have an InstanceKlass now, tell it about the cached bytes
- this_klass->set_cached_class_file(cached_class_file_bytes,
- cached_class_file_length);
+ this_klass->set_cached_class_file(cached_class_file);
}
// Fill in field values obtained by parse_classfile_attributes
@@ -4063,12 +4072,9 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
}
}
+ // Allocate mirror and initialize static fields
+ java_lang_Class::create_mirror(this_klass, protection_domain, CHECK_(nullHandle));
-#ifdef ASSERT
- if (ParseAllGenericSignatures) {
- parseAndPrintGenericSignatures(this_klass, CHECK_(nullHandle));
- }
-#endif
// Generate any default methods - default methods are interface methods
// that have a default implementation. This is new with Lambda project.
@@ -4078,17 +4084,6 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
this_klass(), &all_mirandas, CHECK_(nullHandle));
}
- // Allocate mirror and initialize static fields
- java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle));
-
- // Allocate a simple java object for locking during class initialization.
- // This needs to be a java object because it can be held across a java call.
- typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_NULL);
- this_klass->set_init_lock(r);
-
- // TODO: Move these oops to the mirror
- this_klass->set_protection_domain(protection_domain());
-
// Update the loader_data graph.
record_defined_class_dependencies(this_klass, CHECK_NULL);
diff --git a/src/share/vm/classfile/classFileParser.hpp b/src/share/vm/classfile/classFileParser.hpp
index 8f0707472..02a4ce20d 100644
--- a/src/share/vm/classfile/classFileParser.hpp
+++ b/src/share/vm/classfile/classFileParser.hpp
@@ -62,8 +62,8 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
bool _synthetic_flag;
int _sde_length;
char* _sde_buffer;
- Symbol* _sourcefile;
- Symbol* _generic_signature;
+ u2 _sourcefile_index;
+ u2 _generic_signature_index;
// Metadata created before the instance klass is created. Must be deallocated
// if not transferred to the InstanceKlass upon successful class loading
@@ -81,16 +81,16 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
Array<AnnotationArray*>* _fields_type_annotations;
InstanceKlass* _klass; // InstanceKlass once created.
- void set_class_synthetic_flag(bool x) { _synthetic_flag = x; }
- void set_class_sourcefile(Symbol* x) { _sourcefile = x; }
- void set_class_generic_signature(Symbol* x) { _generic_signature = x; }
- void set_class_sde_buffer(char* x, int len) { _sde_buffer = x; _sde_length = len; }
+ void set_class_synthetic_flag(bool x) { _synthetic_flag = x; }
+ void set_class_sourcefile_index(u2 x) { _sourcefile_index = x; }
+ void set_class_generic_signature_index(u2 x) { _generic_signature_index = x; }
+ void set_class_sde_buffer(char* x, int len) { _sde_buffer = x; _sde_length = len; }
void init_parsed_class_attributes(ClassLoaderData* loader_data) {
_loader_data = loader_data;
_synthetic_flag = false;
- _sourcefile = NULL;
- _generic_signature = NULL;
+ _sourcefile_index = 0;
+ _generic_signature_index = 0;
_sde_buffer = NULL;
_sde_length = 0;
// initialize the other flags too:
@@ -125,6 +125,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
_method_LambdaForm_Compiled,
_method_LambdaForm_Hidden,
_sun_misc_Contended,
+ _field_Stable,
_annotation_LIMIT
};
const Location _location;
@@ -143,14 +144,23 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
_annotations_present |= nth_bit((int)id);
}
+
+ void remove_annotation(ID id) {
+ assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
+ _annotations_present &= ~nth_bit((int)id);
+ }
+
// Report if the annotation is present.
- bool has_any_annotations() { return _annotations_present != 0; }
- bool has_annotation(ID id) { return (nth_bit((int)id) & _annotations_present) != 0; }
+ bool has_any_annotations() const { return _annotations_present != 0; }
+ bool has_annotation(ID id) const { return (nth_bit((int)id) & _annotations_present) != 0; }
void set_contended_group(u2 group) { _contended_group = group; }
- u2 contended_group() { return _contended_group; }
+ u2 contended_group() const { return _contended_group; }
+
+ bool is_contended() const { return has_annotation(_sun_misc_Contended); }
- bool is_contended() { return has_annotation(_sun_misc_Contended); }
+ void set_stable(bool stable) { set_annotation(_field_Stable); }
+ bool is_stable() const { return has_annotation(_field_Stable); }
};
// This class also doubles as a holder for metadata cleanup.
diff --git a/src/share/vm/classfile/classLoader.cpp b/src/share/vm/classfile/classLoader.cpp
index 650a299f3..0ab350d90 100644
--- a/src/share/vm/classfile/classLoader.cpp
+++ b/src/share/vm/classfile/classLoader.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -197,7 +197,7 @@ ClassPathDirEntry::ClassPathDirEntry(char* dir) : ClassPathEntry() {
}
-ClassFileStream* ClassPathDirEntry::open_stream(const char* name) {
+ClassFileStream* ClassPathDirEntry::open_stream(const char* name, TRAPS) {
// construct full path name
char path[JVM_MAXPATHLEN];
if (jio_snprintf(path, sizeof(path), "%s%s%s", _dir, os::file_separator(), name) == -1) {
@@ -240,7 +240,7 @@ ClassPathZipEntry::~ClassPathZipEntry() {
FREE_C_HEAP_ARRAY(char, _zip_name, mtClass);
}
-ClassFileStream* ClassPathZipEntry::open_stream(const char* name) {
+ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
// enable call to C land
JavaThread* thread = JavaThread::current();
ThreadToNativeFromVM ttn(thread);
@@ -284,24 +284,24 @@ void ClassPathZipEntry::contents_do(void f(const char* name, void* context), voi
}
}
-LazyClassPathEntry::LazyClassPathEntry(char* path, struct stat st) : ClassPathEntry() {
+LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() {
_path = strdup(path);
- _st = st;
+ _st = *st;
_meta_index = NULL;
_resolved_entry = NULL;
+ _has_error = false;
}
bool LazyClassPathEntry::is_jar_file() {
return ((_st.st_mode & S_IFREG) == S_IFREG);
}
-ClassPathEntry* LazyClassPathEntry::resolve_entry() {
+ClassPathEntry* LazyClassPathEntry::resolve_entry(TRAPS) {
if (_resolved_entry != NULL) {
return (ClassPathEntry*) _resolved_entry;
}
ClassPathEntry* new_entry = NULL;
- ClassLoader::create_class_path_entry(_path, _st, &new_entry, false);
- assert(new_entry != NULL, "earlier code should have caught this");
+ new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, CHECK_NULL);
{
ThreadCritical tc;
if (_resolved_entry == NULL) {
@@ -314,12 +314,21 @@ ClassPathEntry* LazyClassPathEntry::resolve_entry() {
return (ClassPathEntry*) _resolved_entry;
}
-ClassFileStream* LazyClassPathEntry::open_stream(const char* name) {
+ClassFileStream* LazyClassPathEntry::open_stream(const char* name, TRAPS) {
if (_meta_index != NULL &&
!_meta_index->may_contain(name)) {
return NULL;
}
- return resolve_entry()->open_stream(name);
+ if (_has_error) {
+ return NULL;
+ }
+ ClassPathEntry* cpe = resolve_entry(THREAD);
+ if (cpe == NULL) {
+ _has_error = true;
+ return NULL;
+ } else {
+ return cpe->open_stream(name, THREAD);
+ }
}
bool LazyClassPathEntry::is_lazy() {
@@ -465,20 +474,19 @@ void ClassLoader::setup_bootstrap_search_path() {
}
}
-void ClassLoader::create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy) {
+ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) {
JavaThread* thread = JavaThread::current();
if (lazy) {
- *new_entry = new LazyClassPathEntry(path, st);
- return;
+ return new LazyClassPathEntry(path, st);
}
- if ((st.st_mode & S_IFREG) == S_IFREG) {
+ ClassPathEntry* new_entry = NULL;
+ if ((st->st_mode & S_IFREG) == S_IFREG) {
// Regular file, should be a zip file
// Canonicalized filename
char canonical_path[JVM_MAXPATHLEN];
if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
// This matches the classic VM
- EXCEPTION_MARK;
- THROW_MSG(vmSymbols::java_io_IOException(), "Bad pathname");
+ THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
}
char* error_msg = NULL;
jzfile* zip;
@@ -489,7 +497,7 @@ void ClassLoader::create_class_path_entry(char *path, struct stat st, ClassPathE
zip = (*ZipOpen)(canonical_path, &error_msg);
}
if (zip != NULL && error_msg == NULL) {
- *new_entry = new ClassPathZipEntry(zip, path);
+ new_entry = new ClassPathZipEntry(zip, path);
if (TraceClassLoading) {
tty->print_cr("[Opened %s]", path);
}
@@ -504,16 +512,16 @@ void ClassLoader::create_class_path_entry(char *path, struct stat st, ClassPathE
msg = NEW_RESOURCE_ARRAY(char, len); ;
jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path);
}
- EXCEPTION_MARK;
- THROW_MSG(vmSymbols::java_lang_ClassNotFoundException(), msg);
+ THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL);
}
} else {
// Directory
- *new_entry = new ClassPathDirEntry(path);
+ new_entry = new ClassPathDirEntry(path);
if (TraceClassLoading) {
tty->print_cr("[Path %s]", path);
}
}
+ return new_entry;
}
@@ -572,13 +580,14 @@ void ClassLoader::add_to_list(ClassPathEntry *new_entry) {
}
}
-void ClassLoader::update_class_path_entry_list(const char *path,
+void ClassLoader::update_class_path_entry_list(char *path,
bool check_for_duplicates) {
struct stat st;
- if (os::stat((char *)path, &st) == 0) {
+ if (os::stat(path, &st) == 0) {
// File or directory found
ClassPathEntry* new_entry = NULL;
- create_class_path_entry((char *)path, st, &new_entry, LazyBootClassLoader);
+ Thread* THREAD = Thread::current();
+ new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, CHECK);
// The kernel VM adds dynamically to the end of the classloader path and
// doesn't reorder the bootclasspath which would break java.lang.Package
// (see PackageInfo).
@@ -878,7 +887,7 @@ objArrayOop ClassLoader::get_system_packages(TRAPS) {
instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
ResourceMark rm(THREAD);
- EventMark m("loading class " INTPTR_FORMAT, (address)h_name);
+ EventMark m("loading class %s", h_name->as_C_string());
ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion);
stringStream st;
@@ -897,7 +906,7 @@ instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
PerfClassTraceTime::CLASS_LOAD);
ClassPathEntry* e = _first_entry;
while (e != NULL) {
- stream = e->open_stream(name);
+ stream = e->open_stream(name, CHECK_NULL);
if (stream != NULL) {
break;
}
@@ -1257,11 +1266,16 @@ bool ClassPathZipEntry::is_rt_jar12() {
}
void LazyClassPathEntry::compile_the_world(Handle loader, TRAPS) {
- resolve_entry()->compile_the_world(loader, CHECK);
+ ClassPathEntry* cpe = resolve_entry(THREAD);
+ if (cpe != NULL) {
+ cpe->compile_the_world(loader, CHECK);
+ }
}
bool LazyClassPathEntry::is_rt_jar() {
- return resolve_entry()->is_rt_jar();
+ Thread* THREAD = Thread::current();
+ ClassPathEntry* cpe = resolve_entry(THREAD);
+ return (cpe != NULL) ? cpe->is_jar_file() : false;
}
void ClassLoader::compile_the_world() {
@@ -1305,6 +1319,25 @@ static void clear_pending_exception_if_not_oom(TRAPS) {
// The CHECK at the caller will propagate the exception out
}
+/**
+ * Returns if the given method should be compiled when doing compile-the-world.
+ *
+ * TODO: This should be a private method in a CompileTheWorld class.
+ */
+static bool can_be_compiled(methodHandle m, int comp_level) {
+ assert(CompileTheWorld, "must be");
+
+ // It's not valid to compile a native wrapper for MethodHandle methods
+ // that take a MemberName appendix since the bytecode signature is not
+ // correct.
+ vmIntrinsics::ID iid = m->intrinsic_id();
+ if (MethodHandles::is_signature_polymorphic(iid) && MethodHandles::has_member_arg(iid)) {
+ return false;
+ }
+
+ return CompilationPolicy::can_be_compiled(m, comp_level);
+}
+
void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
int len = (int)strlen(name);
if (len > 6 && strcmp(".class", name + len - 6) == 0) {
@@ -1348,8 +1381,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
int comp_level = CompilationPolicy::policy()->initial_compile_level();
for (int n = 0; n < k->methods()->length(); n++) {
methodHandle m (THREAD, k->methods()->at(n));
- if (CompilationPolicy::can_be_compiled(m, comp_level)) {
-
+ if (can_be_compiled(m, comp_level)) {
if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
// Give sweeper a chance to keep up with CTW
VM_ForceSafepoint op;
@@ -1361,7 +1393,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
methodHandle(), 0, "CTW", THREAD);
if (HAS_PENDING_EXCEPTION) {
clear_pending_exception_if_not_oom(CHECK);
- tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
+ tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
} else {
_compile_the_world_method_counter++;
}
@@ -1377,11 +1409,13 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
methodHandle(), 0, "CTW", THREAD);
if (HAS_PENDING_EXCEPTION) {
clear_pending_exception_if_not_oom(CHECK);
- tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
+ tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
} else {
_compile_the_world_method_counter++;
}
}
+ } else {
+ tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
}
nmethod* nm = m->code();
diff --git a/src/share/vm/classfile/classLoader.hpp b/src/share/vm/classfile/classLoader.hpp
index 786914cad..e03cfad1b 100644
--- a/src/share/vm/classfile/classLoader.hpp
+++ b/src/share/vm/classfile/classLoader.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -63,7 +63,7 @@ class ClassPathEntry: public CHeapObj<mtClass> {
ClassPathEntry();
// Attempt to locate file_name through this class path entry.
// Returns a class file parsing stream if successfull.
- virtual ClassFileStream* open_stream(const char* name) = 0;
+ virtual ClassFileStream* open_stream(const char* name, TRAPS) = 0;
// Debugging
NOT_PRODUCT(virtual void compile_the_world(Handle loader, TRAPS) = 0;)
NOT_PRODUCT(virtual bool is_rt_jar() = 0;)
@@ -77,7 +77,7 @@ class ClassPathDirEntry: public ClassPathEntry {
bool is_jar_file() { return false; }
const char* name() { return _dir; }
ClassPathDirEntry(char* dir);
- ClassFileStream* open_stream(const char* name);
+ ClassFileStream* open_stream(const char* name, TRAPS);
// Debugging
NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
NOT_PRODUCT(bool is_rt_jar();)
@@ -107,7 +107,7 @@ class ClassPathZipEntry: public ClassPathEntry {
const char* name() { return _zip_name; }
ClassPathZipEntry(jzfile* zip, const char* zip_name);
~ClassPathZipEntry();
- ClassFileStream* open_stream(const char* name);
+ ClassFileStream* open_stream(const char* name, TRAPS);
void contents_do(void f(const char* name, void* context), void* context);
// Debugging
NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
@@ -125,13 +125,14 @@ class LazyClassPathEntry: public ClassPathEntry {
char* _path; // dir or file
struct stat _st;
MetaIndex* _meta_index;
+ bool _has_error;
volatile ClassPathEntry* _resolved_entry;
- ClassPathEntry* resolve_entry();
+ ClassPathEntry* resolve_entry(TRAPS);
public:
bool is_jar_file();
const char* name() { return _path; }
- LazyClassPathEntry(char* path, struct stat st);
- ClassFileStream* open_stream(const char* name);
+ LazyClassPathEntry(char* path, const struct stat* st);
+ ClassFileStream* open_stream(const char* name, TRAPS);
void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
virtual bool is_lazy();
// Debugging
@@ -207,14 +208,15 @@ class ClassLoader: AllStatic {
static void setup_meta_index();
static void setup_bootstrap_search_path();
static void load_zip_library();
- static void create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy);
+ static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st,
+ bool lazy, TRAPS);
// Canonicalizes path names, so strcmp will work properly. This is mainly
// to avoid confusing the zip library
static bool get_canonical_path(char* orig, char* out, int len);
public:
// Used by the kernel jvm.
- static void update_class_path_entry_list(const char *path,
+ static void update_class_path_entry_list(char *path,
bool check_for_duplicates);
static void print_bootclasspath();
diff --git a/src/share/vm/classfile/classLoaderData.cpp b/src/share/vm/classfile/classLoaderData.cpp
index c030645f6..1d39afd7f 100644
--- a/src/share/vm/classfile/classLoaderData.cpp
+++ b/src/share/vm/classfile/classLoaderData.cpp
@@ -64,6 +64,11 @@
#include "utilities/growableArray.hpp"
#include "utilities/ostream.hpp"
+#if INCLUDE_TRACE
+ #include "trace/tracing.hpp"
+#endif
+
+
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
@@ -120,6 +125,12 @@ void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
}
}
+void ClassLoaderData::classes_do(void f(Klass * const)) {
+ for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
+ f(k);
+ }
+}
+
void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
if (k->oop_is_instance()) {
@@ -583,6 +594,19 @@ void ClassLoaderDataGraph::classes_do(KlassClosure* klass_closure) {
}
}
+void ClassLoaderDataGraph::classes_do(void f(Klass* const)) {
+ for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+ cld->classes_do(f);
+ }
+}
+
+void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) {
+ assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
+ for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
+ cld->classes_do(f);
+ }
+}
+
GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
assert(_head == NULL || _saved_head != NULL, "remember_new_clds(true) not called?");
@@ -687,6 +711,11 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
dead->set_next(_unloading);
_unloading = dead;
}
+
+ if (seen_dead_loader) {
+ post_class_unload_events();
+ }
+
return seen_dead_loader;
}
@@ -702,6 +731,20 @@ void ClassLoaderDataGraph::purge() {
Metaspace::purge();
}
+void ClassLoaderDataGraph::post_class_unload_events(void) {
+#if INCLUDE_TRACE
+ assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
+ if (Tracing::enabled()) {
+ if (Tracing::is_event_enabled(TraceClassUnloadEvent)) {
+ assert(_unloading != NULL, "need class loader data unload list!");
+ _class_unload_time = Tracing::time();
+ classes_unloading_do(&class_unload_event);
+ }
+ Tracing::on_unloading_classes();
+ }
+#endif
+}
+
// CDS support
// Global metaspaces for writing information to the shared archive. When
@@ -769,3 +812,21 @@ void ClassLoaderData::print_value_on(outputStream* out) const {
class_loader()->print_value_on(out);
}
}
+
+#if INCLUDE_TRACE
+
+TracingTime ClassLoaderDataGraph::_class_unload_time;
+
+void ClassLoaderDataGraph::class_unload_event(Klass* const k) {
+
+ // post class unload event
+ EventClassUnload event(UNTIMED);
+ event.set_endtime(_class_unload_time);
+ event.set_unloadedClass(k);
+ oop defining_class_loader = k->class_loader();
+ event.set_definingClassLoader(defining_class_loader != NULL ?
+ defining_class_loader->klass() : (Klass*)NULL);
+ event.commit();
+}
+
+#endif /* INCLUDE_TRACE */
diff --git a/src/share/vm/classfile/classLoaderData.hpp b/src/share/vm/classfile/classLoaderData.hpp
index 2a7e43082..6d5747483 100644
--- a/src/share/vm/classfile/classLoaderData.hpp
+++ b/src/share/vm/classfile/classLoaderData.hpp
@@ -32,6 +32,10 @@
#include "runtime/mutex.hpp"
#include "utilities/growableArray.hpp"
+#if INCLUDE_TRACE
+# include "trace/traceTime.hpp"
+#endif
+
//
// A class loader represents a linkset. Conceptually, a linkset identifies
// the complete transitive closure of resolved links that a dynamic linker can
@@ -49,6 +53,7 @@ class ClassLoaderData;
class JNIMethodBlock;
class JNIHandleBlock;
class Metadebug;
+
// GC root for walking class loader data created
class ClassLoaderDataGraph : public AllStatic {
@@ -63,6 +68,7 @@ class ClassLoaderDataGraph : public AllStatic {
static ClassLoaderData* _saved_head;
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
+ static void post_class_unload_events(void);
public:
static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
static void purge();
@@ -71,6 +77,8 @@ class ClassLoaderDataGraph : public AllStatic {
static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
static void classes_do(KlassClosure* klass_closure);
+ static void classes_do(void f(Klass* const));
+ static void classes_unloading_do(void f(Klass* const));
static bool do_unloading(BoolObjectClosure* is_alive);
// CMS support.
@@ -86,6 +94,12 @@ class ClassLoaderDataGraph : public AllStatic {
static bool contains(address x);
static bool contains_loader_data(ClassLoaderData* loader_data);
#endif
+
+#if INCLUDE_TRACE
+ private:
+ static TracingTime _class_unload_time;
+ static void class_unload_event(Klass* const k);
+#endif
};
// ClassLoaderData class
@@ -171,7 +185,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
void unload();
bool keep_alive() const { return _keep_alive; }
bool is_alive(BoolObjectClosure* is_alive_closure) const;
-
+ void classes_do(void f(Klass*));
void classes_do(void f(InstanceKlass*));
// Deallocate free list during class unloading.
diff --git a/src/share/vm/classfile/defaultMethods.cpp b/src/share/vm/classfile/defaultMethods.cpp
index 1977b07ea..99a45b55b 100644
--- a/src/share/vm/classfile/defaultMethods.cpp
+++ b/src/share/vm/classfile/defaultMethods.cpp
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "classfile/bytecodeAssembler.hpp"
#include "classfile/defaultMethods.hpp"
-#include "classfile/genericSignatures.hpp"
#include "classfile/symbolTable.hpp"
#include "memory/allocation.hpp"
#include "memory/metadataFactory.hpp"
@@ -75,14 +74,6 @@ class PseudoScope : public ResourceObj {
}
};
-class ContextMark : public PseudoScopeMark {
- private:
- generic::Context::Mark _mark;
- public:
- ContextMark(const generic::Context::Mark& cm) : _mark(cm) {}
- virtual void destroy() { _mark.destroy(); }
-};
-
#ifndef PRODUCT
static void print_slot(outputStream* str, Symbol* name, Symbol* signature) {
ResourceMark rm;
@@ -318,17 +309,17 @@ class KeepAliveVisitor : public HierarchyVisitor<KeepAliveVisitor> {
}
};
+
// A method family contains a set of all methods that implement a single
-// language-level method. Because of erasure, these methods may have different
-// signatures. As members of the set are collected while walking over the
+// erased method. As members of the set are collected while walking over the
// hierarchy, they are tagged with a qualification state. The qualification
// state for an erased method is set to disqualified if there exists a path
// from the root of hierarchy to the method that contains an interleaving
-// language-equivalent method defined in an interface.
+// erased method defined in an interface.
+
class MethodFamily : public ResourceObj {
private:
- generic::MethodDescriptor* _descriptor; // language-level description
GrowableArray<Pair<Method*,QualifiedState> > _members;
ResourceHashtable<Method*, int> _member_index;
@@ -358,15 +349,8 @@ class MethodFamily : public ResourceObj {
public:
- MethodFamily(generic::MethodDescriptor* canonical_desc)
- : _descriptor(canonical_desc), _selected_target(NULL),
- _exception_message(NULL) {}
-
- generic::MethodDescriptor* descriptor() const { return _descriptor; }
-
- bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
- return descriptor()->covariant_match(md, ctx);
- }
+ MethodFamily()
+ : _selected_target(NULL), _exception_message(NULL) {}
void set_target_if_empty(Method* m) {
if (_selected_target == NULL && !m->is_overpass()) {
@@ -441,16 +425,10 @@ class MethodFamily : public ResourceObj {
}
#ifndef PRODUCT
- void print_on(outputStream* str) const {
- print_on(str, 0);
- }
-
- void print_on(outputStream* str, int indent) const {
+ void print_sig_on(outputStream* str, Symbol* signature, int indent) const {
streamIndentor si(str, indent * 2);
- generic::Context ctx(NULL); // empty, as _descriptor already canonicalized
- TempNewSymbol family = descriptor()->reify_signature(&ctx, Thread::current());
- str->indent().print_cr("Logical Method %s:", family->as_C_string());
+ str->indent().print_cr("Logical Method %s:", signature->as_C_string());
streamIndentor si2(str);
for (int i = 0; i < _members.length(); ++i) {
@@ -472,6 +450,10 @@ class MethodFamily : public ResourceObj {
streamIndentor si(str, indent * 2);
str->indent().print("Selected method: ");
print_method(str, _selected_target);
+ Klass* method_holder = _selected_target->method_holder();
+ if (!method_holder->is_interface()) {
+ tty->print(" : in superclass");
+ }
str->print_cr("");
}
@@ -516,34 +498,38 @@ Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods
return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
}
+
class StateRestorer;
-// StatefulMethodFamily is a wrapper around MethodFamily that maintains the
+// StatefulMethodFamily is a wrapper around a MethodFamily that maintains the
// qualification state during hierarchy visitation, and applies that state
-// when adding members to the MethodFamily.
+// when adding members to the MethodFamily
class StatefulMethodFamily : public ResourceObj {
friend class StateRestorer;
private:
- MethodFamily* _method;
QualifiedState _qualification_state;
void set_qualification_state(QualifiedState state) {
_qualification_state = state;
}
+ protected:
+ MethodFamily* _method_family;
+
public:
- StatefulMethodFamily(generic::MethodDescriptor* md, generic::Context* ctx) {
- _method = new MethodFamily(md->canonicalize(ctx));
- _qualification_state = QUALIFIED;
+ StatefulMethodFamily() {
+ _method_family = new MethodFamily();
+ _qualification_state = QUALIFIED;
}
- void set_target_if_empty(Method* m) { _method->set_target_if_empty(m); }
+ StatefulMethodFamily(MethodFamily* mf) {
+ _method_family = mf;
+ _qualification_state = QUALIFIED;
+ }
- MethodFamily* get_method_family() { return _method; }
+ void set_target_if_empty(Method* m) { _method_family->set_target_if_empty(m); }
- bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
- return _method->descriptor_matches(md, ctx);
- }
+ MethodFamily* get_method_family() { return _method_family; }
StateRestorer* record_method_and_dq_further(Method* mo);
};
@@ -563,9 +549,9 @@ class StateRestorer : public PseudoScopeMark {
StateRestorer* StatefulMethodFamily::record_method_and_dq_further(Method* mo) {
StateRestorer* mark = new StateRestorer(this, _qualification_state);
if (_qualification_state == QUALIFIED) {
- _method->record_qualified_method(mo);
+ _method_family->record_qualified_method(mo);
} else {
- _method->record_disqualified_method(mo);
+ _method_family->record_disqualified_method(mo);
}
// Everything found "above"??? this method in the hierarchy walk is set to
// disqualified
@@ -573,39 +559,6 @@ StateRestorer* StatefulMethodFamily::record_method_and_dq_further(Method* mo) {
return mark;
}
-class StatefulMethodFamilies : public ResourceObj {
- private:
- GrowableArray<StatefulMethodFamily*> _methods;
-
- public:
- StatefulMethodFamily* find_matching(
- generic::MethodDescriptor* md, generic::Context* ctx) {
- for (int i = 0; i < _methods.length(); ++i) {
- StatefulMethodFamily* existing = _methods.at(i);
- if (existing->descriptor_matches(md, ctx)) {
- return existing;
- }
- }
- return NULL;
- }
-
- StatefulMethodFamily* find_matching_or_create(
- generic::MethodDescriptor* md, generic::Context* ctx) {
- StatefulMethodFamily* method = find_matching(md, ctx);
- if (method == NULL) {
- method = new StatefulMethodFamily(md, ctx);
- _methods.append(method);
- }
- return method;
- }
-
- void extract_families_into(GrowableArray<MethodFamily*>* array) {
- for (int i = 0; i < _methods.length(); ++i) {
- array->append(_methods.at(i)->get_method_family());
- }
- }
-};
-
// Represents a location corresponding to a vtable slot for methods that
// neither the class nor any of it's ancestors provide an implementaion.
// Default methods may be present to fill this slot.
@@ -683,27 +636,26 @@ static GrowableArray<EmptyVtableSlot*>* find_empty_vtable_slots(
return slots;
}
-// Iterates over the type hierarchy looking for all methods with a specific
-// method name. The result of this is a set of method families each of
-// which is populated with a set of methods that implement the same
-// language-level signature.
-class FindMethodsByName : public HierarchyVisitor<FindMethodsByName> {
+// Iterates over the superinterface type hierarchy looking for all methods
+// with a specific erased signature.
+class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
private:
// Context data
- Thread* THREAD;
- generic::DescriptorCache* _cache;
Symbol* _method_name;
- generic::Context* _ctx;
- StatefulMethodFamilies _families;
+ Symbol* _method_signature;
+ StatefulMethodFamily* _family;
public:
+ FindMethodsByErasedSig(Symbol* name, Symbol* signature) :
+ _method_name(name), _method_signature(signature),
+ _family(NULL) {}
- FindMethodsByName(generic::DescriptorCache* cache, Symbol* name,
- generic::Context* ctx, Thread* thread) :
- _cache(cache), _method_name(name), _ctx(ctx), THREAD(thread) {}
-
- void get_discovered_families(GrowableArray<MethodFamily*>* methods) {
- _families.extract_families_into(methods);
+ void get_discovered_family(MethodFamily** family) {
+ if (_family != NULL) {
+ *family = _family->get_method_family();
+ } else {
+ *family = NULL;
+ }
}
void* new_node_data(InstanceKlass* cls) { return new PseudoScope(); }
@@ -711,92 +663,69 @@ class FindMethodsByName : public HierarchyVisitor<FindMethodsByName> {
PseudoScope::cast(node_data)->destroy();
}
+ // Find all methods on this hierarchy that match this
+ // method's erased (name, signature)
bool visit() {
PseudoScope* scope = PseudoScope::cast(current_data());
- InstanceKlass* klass = current_class();
- InstanceKlass* sub = current_depth() > 0 ? class_at_depth(1) : NULL;
-
- ContextMark* cm = new ContextMark(_ctx->mark());
- scope->add_mark(cm); // will restore context when scope is freed
-
- _ctx->apply_type_arguments(sub, klass, THREAD);
-
- int start, end = 0;
- start = klass->find_method_by_name(_method_name, &end);
- if (start != -1) {
- for (int i = start; i < end; ++i) {
- Method* m = klass->methods()->at(i);
- // This gets the method's parameter list with its generic type
- // parameters resolved
- generic::MethodDescriptor* md = _cache->descriptor_for(m, THREAD);
-
- // Find all methods on this hierarchy that match this method
- // (name, signature). This class collects other families of this
- // method name.
- StatefulMethodFamily* family =
- _families.find_matching_or_create(md, _ctx);
-
- if (klass->is_interface()) {
- // ???
- StateRestorer* restorer = family->record_method_and_dq_further(m);
- scope->add_mark(restorer);
- } else {
- // This is the rule that methods in classes "win" (bad word) over
- // methods in interfaces. This works because of single inheritance
- family->set_target_if_empty(m);
- }
+ InstanceKlass* iklass = current_class();
+
+ Method* m = iklass->find_method(_method_name, _method_signature);
+ if (m != NULL) {
+ if (_family == NULL) {
+ _family = new StatefulMethodFamily();
+ }
+
+ if (iklass->is_interface()) {
+ StateRestorer* restorer = _family->record_method_and_dq_further(m);
+ scope->add_mark(restorer);
+ } else {
+ // This is the rule that methods in classes "win" (bad word) over
+ // methods in interfaces. This works because of single inheritance
+ _family->set_target_if_empty(m);
}
}
return true;
}
+
};
-#ifndef PRODUCT
-static void print_families(
- GrowableArray<MethodFamily*>* methods, Symbol* match) {
- streamIndentor si(tty, 4);
- if (methods->length() == 0) {
- tty->indent();
- tty->print_cr("No Logical Method found");
- }
- for (int i = 0; i < methods->length(); ++i) {
- tty->indent();
- MethodFamily* lm = methods->at(i);
- if (lm->contains_signature(match)) {
- tty->print_cr("<Matching>");
- } else {
- tty->print_cr("<Non-Matching>");
- }
- lm->print_on(tty, 1);
+
+
+static void create_overpasses(
+ GrowableArray<EmptyVtableSlot*>* slots, InstanceKlass* klass, TRAPS);
+
+static void generate_erased_defaults(
+ InstanceKlass* klass, GrowableArray<EmptyVtableSlot*>* empty_slots,
+ EmptyVtableSlot* slot, TRAPS) {
+
+ // sets up a set of methods with the same exact erased signature
+ FindMethodsByErasedSig visitor(slot->name(), slot->signature());
+ visitor.run(klass);
+
+ MethodFamily* family;
+ visitor.get_discovered_family(&family);
+ if (family != NULL) {
+ family->determine_target(klass, CHECK);
+ slot->bind_family(family);
}
}
-#endif // ndef PRODUCT
static void merge_in_new_methods(InstanceKlass* klass,
GrowableArray<Method*>* new_methods, TRAPS);
-static void create_overpasses(
- GrowableArray<EmptyVtableSlot*>* slots, InstanceKlass* klass, TRAPS);
// This is the guts of the default methods implementation. This is called just
// after the classfile has been parsed if some ancestor has default methods.
//
// First if finds any name/signature slots that need any implementation (either
// because they are miranda or a superclass's implementation is an overpass
-// itself). For each slot, iterate over the hierarchy, using generic signature
-// information to partition any methods that match the name into method families
-// where each family contains methods whose signatures are equivalent at the
-// language level (i.e., their reified parameters match and return values are
-// covariant). Check those sets to see if they contain a signature that matches
-// the slot we're looking at (if we're lucky, there might be other empty slots
-// that we can fill using the same analysis).
+// itself). For each slot, iterate over the hierarchy, to see if they contain a
+// signature that matches the slot we are looking at.
//
// For each slot filled, we generate an overpass method that either calls the
// unique default method candidate using invokespecial, or throws an exception
// (in the case of no default method candidates, or more than one valid
-// candidate). These methods are then added to the class's method list. If
-// the method set we're using contains methods (qualified or not) with a
-// different runtime signature than the method we're creating, then we have to
-// create bridges with those signatures too.
+// candidate). These methods are then added to the class's method list.
+// The JVM does not create bridges nor handle generic signatures here.
void DefaultMethods::generate_default_methods(
InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) {
@@ -807,8 +736,6 @@ void DefaultMethods::generate_default_methods(
// whatever scope it's in.
ResourceMark rm(THREAD);
- generic::DescriptorCache cache;
-
// Keep entire hierarchy alive for the duration of the computation
KeepAliveRegistrar keepAlive(THREAD);
KeepAliveVisitor loadKeepAlive(&keepAlive);
@@ -837,47 +764,9 @@ void DefaultMethods::generate_default_methods(
tty->print_cr("");
}
#endif // ndef PRODUCT
- if (slot->is_bound()) {
-#ifndef PRODUCT
- if (TraceDefaultMethods) {
- streamIndentor si(tty, 4);
- tty->indent().print_cr("Already bound to logical method:");
- slot->get_binding()->print_on(tty, 1);
- }
-#endif // ndef PRODUCT
- continue; // covered by previous processing
- }
-
- generic::Context ctx(&cache);
- FindMethodsByName visitor(&cache, slot->name(), &ctx, CHECK);
- visitor.run(klass);
-
- GrowableArray<MethodFamily*> discovered_families;
- visitor.get_discovered_families(&discovered_families);
-
-#ifndef PRODUCT
- if (TraceDefaultMethods) {
- print_families(&discovered_families, slot->signature());
- }
-#endif // ndef PRODUCT
-
- // Find and populate any other slots that match the discovered families
- for (int j = i; j < empty_slots->length(); ++j) {
- EmptyVtableSlot* open_slot = empty_slots->at(j);
-
- if (slot->name() == open_slot->name()) {
- for (int k = 0; k < discovered_families.length(); ++k) {
- MethodFamily* lm = discovered_families.at(k);
-
- if (lm->contains_signature(open_slot->signature())) {
- lm->determine_target(klass, CHECK);
- open_slot->bind_family(lm);
- }
- }
- }
- }
- }
+ generate_erased_defaults(klass, empty_slots, slot, CHECK);
+ }
#ifndef PRODUCT
if (TraceDefaultMethods) {
tty->print_cr("Creating overpasses...");
@@ -893,15 +782,14 @@ void DefaultMethods::generate_default_methods(
#endif // ndef PRODUCT
}
-
/**
- * Generic analysis was used upon interface '_target' and found a unique
- * default method candidate with generic signature '_method_desc'. This
+ * Interface inheritance rules were used to find a unique default method
+ * candidate for the resolved class. This
* method is only viable if it would also be in the set of default method
* candidates if we ran a full analysis on the current class.
*
* The only reason that the method would not be in the set of candidates for
- * the current class is if that there's another covariantly matching method
+ * the current class is if that there's another matching method
* which is "more specific" than the found method -- i.e., one could find a
* path in the interface hierarchy in which the matching method appears
* before we get to '_target'.
@@ -912,49 +800,22 @@ void DefaultMethods::generate_default_methods(
* the selected method along that path.
*/
class ShadowChecker : public HierarchyVisitor<ShadowChecker> {
- private:
- generic::DescriptorCache* _cache;
+ protected:
Thread* THREAD;
InstanceKlass* _target;
Symbol* _method_name;
InstanceKlass* _method_holder;
- generic::MethodDescriptor* _method_desc;
bool _found_shadow;
- bool path_has_shadow() {
- generic::Context ctx(_cache);
-
- for (int i = current_depth() - 1; i > 0; --i) {
- InstanceKlass* ik = class_at_depth(i);
- InstanceKlass* sub = class_at_depth(i + 1);
- ctx.apply_type_arguments(sub, ik, THREAD);
-
- if (ik->is_interface()) {
- int end;
- int start = ik->find_method_by_name(_method_name, &end);
- if (start != -1) {
- for (int j = start; j < end; ++j) {
- Method* mo = ik->methods()->at(j);
- generic::MethodDescriptor* md = _cache->descriptor_for(mo, THREAD);
- if (_method_desc->covariant_match(md, &ctx)) {
- return true;
- }
- }
- }
- }
- }
- return false;
- }
public:
- ShadowChecker(generic::DescriptorCache* cache, Thread* thread,
- Symbol* name, InstanceKlass* holder, generic::MethodDescriptor* desc,
- InstanceKlass* target)
- : _cache(cache), THREAD(thread), _method_name(name), _method_holder(holder),
- _method_desc(desc), _target(target), _found_shadow(false) {}
+ ShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder,
+ InstanceKlass* target)
+ : THREAD(thread), _method_name(name), _method_holder(holder),
+ _target(target), _found_shadow(false) {}
void* new_node_data(InstanceKlass* cls) { return NULL; }
void free_node_data(void* data) { return; }
@@ -975,14 +836,105 @@ class ShadowChecker : public HierarchyVisitor<ShadowChecker> {
return true;
}
+ virtual bool path_has_shadow() = 0;
bool found_shadow() { return _found_shadow; }
};
+// Used for Invokespecial.
+// Invokespecial is allowed to invoke a concrete interface method
+// and can be used to disambuiguate among qualified candidates,
+// which are methods in immediate superinterfaces,
+// but may not be used to invoke a candidate that would be shadowed
+// from the perspective of the caller.
+// Invokespecial is also used in the overpass generation today
+// We re-run the shadowchecker because we can't distinguish this case,
+// but it should return the same answer, since the overpass target
+// is now the invokespecial caller.
+class ErasedShadowChecker : public ShadowChecker {
+ private:
+ bool path_has_shadow() {
+
+ for (int i = current_depth() - 1; i > 0; --i) {
+ InstanceKlass* ik = class_at_depth(i);
+
+ if (ik->is_interface()) {
+ int end;
+ int start = ik->find_method_by_name(_method_name, &end);
+ if (start != -1) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+ public:
+
+ ErasedShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder,
+ InstanceKlass* target)
+ : ShadowChecker(thread, name, holder, target) {}
+};
+
+// Find the unique qualified candidate from the perspective of the super_class
+// which is the resolved_klass, which must be an immediate superinterface
+// of klass
+Method* find_erased_super_default(InstanceKlass* current_class, InstanceKlass* super_class, Symbol* method_name, Symbol* sig, TRAPS) {
+
+ FindMethodsByErasedSig visitor(method_name, sig);
+ visitor.run(super_class); // find candidates from resolved_klass
+
+ MethodFamily* family;
+ visitor.get_discovered_family(&family);
+
+ if (family != NULL) {
+ family->determine_target(current_class, CHECK_NULL); // get target from current_class
+
+ if (family->has_target()) {
+ Method* target = family->get_selected_target();
+ InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
+
+ // Verify that the identified method is valid from the context of
+ // the current class, which is the caller class for invokespecial
+ // link resolution, i.e. ensure there it is not shadowed.
+ // You can use invokespecial to disambiguate interface methods, but
+ // you can not use it to skip over an interface method that would shadow it.
+ ErasedShadowChecker checker(THREAD, target->name(), holder, super_class);
+ checker.run(current_class);
+
+ if (checker.found_shadow()) {
+#ifndef PRODUCT
+ if (TraceDefaultMethods) {
+ tty->print_cr(" Only candidate found was shadowed.");
+ }
+#endif // ndef PRODUCT
+ THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
+ "Accessible default method not found", NULL);
+ } else {
+#ifndef PRODUCT
+ if (TraceDefaultMethods) {
+ family->print_sig_on(tty, target->signature(), 1);
+ }
+#endif // ndef PRODUCT
+ return target;
+ }
+ } else {
+ assert(family->throws_exception(), "must have target or throw");
+ THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
+ family->get_exception_message()->as_C_string(), NULL);
+ }
+ } else {
+ // no method found
+ ResourceMark rm(THREAD);
+ THROW_MSG_(vmSymbols::java_lang_NoSuchMethodError(),
+ Method::name_and_sig_as_C_string(current_class,
+ method_name, sig), NULL);
+ }
+}
// This is called during linktime when we find an invokespecial call that
// refers to a direct superinterface. It indicates that we should find the
// default method in the hierarchy of that superinterface, and if that method
// would have been a candidate from the point of view of 'this' class, then we
// return that method.
+// This logic assumes that the super is a direct superclass of the caller
Method* DefaultMethods::find_super_default(
Klass* cls, Klass* super, Symbol* method_name, Symbol* sig, TRAPS) {
@@ -991,90 +943,52 @@ Method* DefaultMethods::find_super_default(
assert(cls != NULL && super != NULL, "Need real classes");
InstanceKlass* current_class = InstanceKlass::cast(cls);
- InstanceKlass* direction = InstanceKlass::cast(super);
+ InstanceKlass* super_class = InstanceKlass::cast(super);
// Keep entire hierarchy alive for the duration of the computation
KeepAliveRegistrar keepAlive(THREAD);
KeepAliveVisitor loadKeepAlive(&keepAlive);
- loadKeepAlive.run(current_class);
+ loadKeepAlive.run(current_class); // get hierarchy from current class
#ifndef PRODUCT
if (TraceDefaultMethods) {
tty->print_cr("Finding super default method %s.%s%s from %s",
- direction->name()->as_C_string(),
+ super_class->name()->as_C_string(),
method_name->as_C_string(), sig->as_C_string(),
current_class->name()->as_C_string());
}
#endif // ndef PRODUCT
- if (!direction->is_interface()) {
- // We should not be here
- return NULL;
- }
-
- generic::DescriptorCache cache;
- generic::Context ctx(&cache);
-
- // Prime the initial generic context for current -> direction
- ctx.apply_type_arguments(current_class, direction, CHECK_NULL);
+ assert(super_class->is_interface(), "only call for default methods");
- FindMethodsByName visitor(&cache, method_name, &ctx, CHECK_NULL);
- visitor.run(direction);
-
- GrowableArray<MethodFamily*> families;
- visitor.get_discovered_families(&families);
+ Method* target = NULL;
+ target = find_erased_super_default(current_class, super_class,
+ method_name, sig, CHECK_NULL);
#ifndef PRODUCT
- if (TraceDefaultMethods) {
- print_families(&families, sig);
- }
-#endif // ndef PRODUCT
-
- MethodFamily* selected_family = NULL;
-
- for (int i = 0; i < families.length(); ++i) {
- MethodFamily* lm = families.at(i);
- if (lm->contains_signature(sig)) {
- lm->determine_target(current_class, CHECK_NULL);
- selected_family = lm;
+ if (target != NULL) {
+ if (TraceDefaultMethods) {
+ tty->print(" Returning ");
+ print_method(tty, target, true);
+ tty->print_cr("");
}
}
-
- if (selected_family->has_target()) {
- Method* target = selected_family->get_selected_target();
- InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
-
- // Verify that the identified method is valid from the context of
- // the current class
- ShadowChecker checker(&cache, THREAD, target->name(),
- holder, selected_family->descriptor(), direction);
- checker.run(current_class);
-
- if (checker.found_shadow()) {
-#ifndef PRODUCT
- if (TraceDefaultMethods) {
- tty->print_cr(" Only candidate found was shadowed.");
- }
#endif // ndef PRODUCT
- THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
- "Accessible default method not found", NULL);
- } else {
+ return target;
+}
+
#ifndef PRODUCT
- if (TraceDefaultMethods) {
- tty->print(" Returning ");
- print_method(tty, target, true);
- tty->print_cr("");
- }
-#endif // ndef PRODUCT
- return target;
- }
- } else {
- assert(selected_family->throws_exception(), "must have target or throw");
- THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
- selected_family->get_exception_message()->as_C_string(), NULL);
+// Return true is broad type is a covariant return of narrow type
+static bool covariant_return_type(BasicType narrow, BasicType broad) {
+ if (narrow == broad) {
+ return true;
}
+ if (broad == T_OBJECT) {
+ return true;
+ }
+ return false;
}
-
+#endif // ndef PRODUCT
static int assemble_redirect(
BytecodeConstantPool* cp, BytecodeBuffer* buffer,
@@ -1103,7 +1017,7 @@ static int assemble_redirect(
out.next();
}
assert(out.at_return_type(), "Parameter counts do not match");
- assert(in.type() == out.type(), "Return types are not compatible");
+ assert(covariant_return_type(out.type(), in.type()), "Return types are not compatible");
if (parameter_count == 1 && (in.type() == T_LONG || in.type() == T_DOUBLE)) {
++parameter_count; // need room for return value
@@ -1144,10 +1058,15 @@ static Method* new_method(
Symbol* sig, AccessFlags flags, int max_stack, int params,
ConstMethod::MethodType mt, TRAPS) {
- address code_start = static_cast<address>(bytecodes->adr_at(0));
- int code_length = bytecodes->length();
+ address code_start = 0;
+ int code_length = 0;
InlineTableSizes sizes;
+ if (bytecodes != NULL && bytecodes->length() > 0) {
+ code_start = static_cast<address>(bytecodes->adr_at(0));
+ code_length = bytecodes->length();
+ }
+
Method* m = Method::allocate(cp->pool_holder()->class_loader_data(),
code_length, flags, &sizes,
mt, CHECK_NULL);
@@ -1226,19 +1145,23 @@ static void create_overpasses(
#endif // ndef PRODUCT
if (method->has_target()) {
Method* selected = method->get_selected_target();
- max_stack = assemble_redirect(
+ if (selected->method_holder()->is_interface()) {
+ max_stack = assemble_redirect(
&bpool, &buffer, slot->signature(), selected, CHECK);
+ }
} else if (method->throws_exception()) {
max_stack = assemble_abstract_method_error(
&bpool, &buffer, method->get_exception_message(), CHECK);
}
- AccessFlags flags = accessFlags_from(
+ if (max_stack != 0) {
+ AccessFlags flags = accessFlags_from(
JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE);
- Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
+ Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
flags, max_stack, slot->size_of_parameters(),
ConstMethod::OVERPASS, CHECK);
- if (m != NULL) {
- overpasses.push(m);
+ if (m != NULL) {
+ overpasses.push(m);
+ }
}
}
}
@@ -1349,6 +1272,7 @@ static void merge_in_new_methods(InstanceKlass* klass,
// Replace klass methods with new merged lists
klass->set_methods(merged_methods);
+ klass->set_initial_method_idnum(new_size);
ClassLoaderData* cld = klass->class_loader_data();
MetadataFactory::free_array(cld, original_methods);
diff --git a/src/share/vm/classfile/dictionary.cpp b/src/share/vm/classfile/dictionary.cpp
index 92d9fb438..26e06c8a0 100644
--- a/src/share/vm/classfile/dictionary.cpp
+++ b/src/share/vm/classfile/dictionary.cpp
@@ -253,22 +253,6 @@ void Dictionary::classes_do(void f(Klass*, TRAPS), TRAPS) {
}
}
-
-// All classes, and their class loaders
-// (added for helpers that use HandleMarks and ResourceMarks)
-// Don't iterate over placeholders
-void Dictionary::classes_do(void f(Klass*, ClassLoaderData*, TRAPS), TRAPS) {
- for (int index = 0; index < table_size(); index++) {
- for (DictionaryEntry* probe = bucket(index);
- probe != NULL;
- probe = probe->next()) {
- Klass* k = probe->klass();
- f(k, probe->loader_data(), CHECK);
- }
- }
-}
-
-
// All classes, and their class loaders
// Don't iterate over placeholders
void Dictionary::classes_do(void f(Klass*, ClassLoaderData*)) {
@@ -571,7 +555,7 @@ void Dictionary::verify() {
loader_data->class_loader() == NULL ||
loader_data->class_loader()->is_instance(),
"checking type of class_loader");
- e->verify();
+ e->verify(/*check_dictionary*/false);
probe->verify_protection_domain_set();
element_count++;
}
diff --git a/src/share/vm/classfile/dictionary.hpp b/src/share/vm/classfile/dictionary.hpp
index 8820b2517..53629a015 100644
--- a/src/share/vm/classfile/dictionary.hpp
+++ b/src/share/vm/classfile/dictionary.hpp
@@ -90,7 +90,6 @@ public:
void classes_do(void f(Klass*));
void classes_do(void f(Klass*, TRAPS), TRAPS);
void classes_do(void f(Klass*, ClassLoaderData*));
- void classes_do(void f(Klass*, ClassLoaderData*, TRAPS), TRAPS);
void methods_do(void f(Method*));
diff --git a/src/share/vm/classfile/genericSignatures.cpp b/src/share/vm/classfile/genericSignatures.cpp
deleted file mode 100644
index 33fbca051..000000000
--- a/src/share/vm/classfile/genericSignatures.cpp
+++ /dev/null
@@ -1,1279 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-
-#include "classfile/genericSignatures.hpp"
-#include "classfile/symbolTable.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "memory/resourceArea.hpp"
-
-namespace generic {
-
-// Helper class for parsing the generic signature Symbol in klass and methods
-class DescriptorStream : public ResourceObj {
- private:
- Symbol* _symbol;
- int _offset;
- int _mark;
- const char* _parse_error;
-
- void set_parse_error(const char* error) {
- assert(error != NULL, "Can't set NULL error string");
- _parse_error = error;
- }
-
- public:
- DescriptorStream(Symbol* sym)
- : _symbol(sym), _offset(0), _mark(-1), _parse_error(NULL) {}
-
- const char* parse_error() const {
- return _parse_error;
- }
-
- bool at_end() { return _offset >= _symbol->utf8_length(); }
-
- char peek() {
- if (at_end()) {
- set_parse_error("Peeking past end of signature");
- return '\0';
- } else {
- return _symbol->byte_at(_offset);
- }
- }
-
- char read() {
- if (at_end()) {
- set_parse_error("Reading past end of signature");
- return '\0';
- } else {
- return _symbol->byte_at(_offset++);
- }
- }
-
- void read(char expected) {
- char c = read();
- assert_char(c, expected, 0);
- }
-
- void assert_char(char c, char expected, int pos = -1) {
- if (c != expected) {
- const char* fmt = "Parse error at %d: expected %c but got %c";
- size_t len = strlen(fmt) + 5;
- char* buffer = NEW_RESOURCE_ARRAY(char, len);
- jio_snprintf(buffer, len, fmt, _offset + pos, expected, c);
- set_parse_error(buffer);
- }
- }
-
- void push(char c) {
- assert(c == _symbol->byte_at(_offset - 1), "Pushing back wrong value");
- --_offset;
- }
-
- void expect_end() {
- if (!at_end()) {
- set_parse_error("Unexpected data trailing signature");
- }
- }
-
- bool has_mark() { return _mark != -1; }
-
- void set_mark() {
- _mark = _offset;
- }
-
- Identifier* identifier_from_mark() {
- assert(has_mark(), "Mark should be set");
- if (!has_mark()) {
- set_parse_error("Expected mark to be set");
- return NULL;
- } else {
- Identifier* id = new Identifier(_symbol, _mark, _offset - 1);
- _mark = -1;
- return id;
- }
- }
-};
-
-
-#define CHECK_FOR_PARSE_ERROR() \
- if (STREAM->parse_error() != NULL) { \
- if (VerifyGenericSignatures) { \
- fatal(STREAM->parse_error()); \
- } \
- return NULL; \
- } 0
-
-#define READ() STREAM->read(); CHECK_FOR_PARSE_ERROR()
-#define PEEK() STREAM->peek(); CHECK_FOR_PARSE_ERROR()
-#define PUSH(c) STREAM->push(c)
-#define EXPECT(c) STREAM->read(c); CHECK_FOR_PARSE_ERROR()
-#define EXPECTED(c, ch) STREAM->assert_char(c, ch); CHECK_FOR_PARSE_ERROR()
-#define EXPECT_END() STREAM->expect_end(); CHECK_FOR_PARSE_ERROR()
-
-#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); (0
-
-#ifndef PRODUCT
-void Identifier::print_on(outputStream* str) const {
- for (int i = _begin; i < _end; ++i) {
- str->print("%c", (char)_sym->byte_at(i));
- }
-}
-#endif // ndef PRODUCT
-
-bool Identifier::equals(Identifier* other) {
- if (_sym == other->_sym && _begin == other->_begin && _end == other->_end) {
- return true;
- } else if (_end - _begin != other->_end - other->_begin) {
- return false;
- } else {
- size_t len = _end - _begin;
- char* addr = ((char*)_sym->bytes()) + _begin;
- char* oaddr = ((char*)other->_sym->bytes()) + other->_begin;
- return strncmp(addr, oaddr, len) == 0;
- }
-}
-
-bool Identifier::equals(Symbol* sym) {
- Identifier id(sym, 0, sym->utf8_length());
- return equals(&id);
-}
-
-/**
- * A formal type parameter may be found in the the enclosing class, but it could
- * also come from an enclosing method or outer class, in the case of inner-outer
- * classes or anonymous classes. For example:
- *
- * class Outer<T,V> {
- * class Inner<W> {
- * void m(T t, V v, W w);
- * }
- * }
- *
- * In this case, the type variables in m()'s signature are not all found in the
- * immediate enclosing class (Inner). class Inner has only type parameter W,
- * but it's outer_class field will reference Outer's descriptor which contains
- * T & V (no outer_method in this case).
- *
- * If you have an anonymous class, it has both an enclosing method *and* an
- * enclosing class where type parameters can be declared:
- *
- * class MOuter<T> {
- * <V> void bar(V v) {
- * Runnable r = new Runnable() {
- * public void run() {}
- * public void foo(T t, V v) { ... }
- * };
- * }
- * }
- *
- * In this case, foo will be a member of some class, Runnable$1, which has no
- * formal parameters itself, but has an outer_method (bar()) which provides
- * type parameter V, and an outer class MOuter with type parameter T.
- *
- * It is also possible that the outer class is itself an inner class to some
- * other class (or an anonymous class with an enclosing method), so we need to
- * follow the outer_class/outer_method chain to it's end when looking for a
- * type parameter.
- */
-TypeParameter* Descriptor::find_type_parameter(Identifier* id, int* depth) {
-
- int current_depth = 0;
-
- MethodDescriptor* outer_method = as_method_signature();
- ClassDescriptor* outer_class = as_class_signature();
-
- if (outer_class == NULL) { // 'this' is a method signature; use the holder
- outer_class = outer_method->outer_class();
- }
-
- while (outer_method != NULL || outer_class != NULL) {
- if (outer_method != NULL) {
- for (int i = 0; i < outer_method->type_parameters().length(); ++i) {
- TypeParameter* p = outer_method->type_parameters().at(i);
- if (p->identifier()->equals(id)) {
- *depth = -1; // indicates this this is a method parameter
- return p;
- }
- }
- }
- if (outer_class != NULL) {
- for (int i = 0; i < outer_class->type_parameters().length(); ++i) {
- TypeParameter* p = outer_class->type_parameters().at(i);
- if (p->identifier()->equals(id)) {
- *depth = current_depth;
- return p;
- }
- }
- outer_method = outer_class->outer_method();
- outer_class = outer_class->outer_class();
- ++current_depth;
- }
- }
-
- if (VerifyGenericSignatures) {
- fatal("Could not resolve identifier");
- }
-
- return NULL;
-}
-
-ClassDescriptor* ClassDescriptor::parse_generic_signature(Klass* klass, TRAPS) {
- return parse_generic_signature(klass, NULL, CHECK_NULL);
-}
-
-ClassDescriptor* ClassDescriptor::parse_generic_signature(
- Klass* klass, Symbol* original_name, TRAPS) {
-
- InstanceKlass* ik = InstanceKlass::cast(klass);
- Symbol* sym = ik->generic_signature();
-
- ClassDescriptor* spec;
-
- if (sym == NULL || (spec = ClassDescriptor::parse_generic_signature(sym)) == NULL) {
- spec = ClassDescriptor::placeholder(ik);
- }
-
- u2 outer_index = get_outer_class_index(ik, CHECK_NULL);
- if (outer_index != 0) {
- if (original_name == NULL) {
- original_name = ik->name();
- }
- Handle class_loader = Handle(THREAD, ik->class_loader());
- Handle protection_domain = Handle(THREAD, ik->protection_domain());
-
- Symbol* outer_name = ik->constants()->klass_name_at(outer_index);
- Klass* outer = SystemDictionary::find(
- outer_name, class_loader, protection_domain, CHECK_NULL);
- if (outer == NULL && !THREAD->is_Compiler_thread()) {
- if (outer_name == ik->super()->name()) {
- outer = SystemDictionary::resolve_super_or_fail(original_name, outer_name,
- class_loader, protection_domain,
- false, CHECK_NULL);
- }
- else {
- outer = SystemDictionary::resolve_or_fail(outer_name, class_loader,
- protection_domain, false, CHECK_NULL);
- }
- }
-
- InstanceKlass* outer_ik;
- ClassDescriptor* outer_spec = NULL;
- if (outer == NULL) {
- outer_spec = ClassDescriptor::placeholder(ik);
- assert(false, "Outer class not loaded and not loadable from here");
- } else {
- outer_ik = InstanceKlass::cast(outer);
- outer_spec = parse_generic_signature(outer, original_name, CHECK_NULL);
- }
- spec->set_outer_class(outer_spec);
-
- u2 encl_method_idx = ik->enclosing_method_method_index();
- if (encl_method_idx != 0 && outer_ik != NULL) {
- ConstantPool* cp = ik->constants();
- u2 name_index = cp->name_ref_index_at(encl_method_idx);
- u2 sig_index = cp->signature_ref_index_at(encl_method_idx);
- Symbol* name = cp->symbol_at(name_index);
- Symbol* sig = cp->symbol_at(sig_index);
- Method* m = outer_ik->find_method(name, sig);
- if (m != NULL) {
- Symbol* gsig = m->generic_signature();
- if (gsig != NULL) {
- MethodDescriptor* gms = MethodDescriptor::parse_generic_signature(gsig, outer_spec);
- spec->set_outer_method(gms);
- }
- } else if (VerifyGenericSignatures) {
- ResourceMark rm;
- stringStream ss;
- ss.print("Could not find method %s %s in class %s",
- name->as_C_string(), sig->as_C_string(), outer_name->as_C_string());
- fatal(ss.as_string());
- }
- }
- }
-
- spec->bind_variables_to_parameters();
- return spec;
-}
-
-ClassDescriptor* ClassDescriptor::placeholder(InstanceKlass* klass) {
- GrowableArray<TypeParameter*> formals;
- GrowableArray<ClassType*> interfaces;
- ClassType* super_type = NULL;
-
- Klass* super_klass = klass->super();
- if (super_klass != NULL) {
- InstanceKlass* super = InstanceKlass::cast(super_klass);
- super_type = ClassType::from_symbol(super->name());
- }
-
- for (int i = 0; i < klass->local_interfaces()->length(); ++i) {
- InstanceKlass* iface = InstanceKlass::cast(klass->local_interfaces()->at(i));
- interfaces.append(ClassType::from_symbol(iface->name()));
- }
- return new ClassDescriptor(formals, super_type, interfaces);
-}
-
-ClassDescriptor* ClassDescriptor::parse_generic_signature(Symbol* sym) {
-
- DescriptorStream ds(sym);
- DescriptorStream* STREAM = &ds;
-
- GrowableArray<TypeParameter*> parameters(8);
- char c = READ();
- if (c == '<') {
- c = READ();
- while (c != '>') {
- PUSH(c);
- TypeParameter* ftp = TypeParameter::parse_generic_signature(CHECK_STREAM);
- parameters.append(ftp);
- c = READ();
- }
- } else {
- PUSH(c);
- }
-
- EXPECT('L');
- ClassType* super = ClassType::parse_generic_signature(CHECK_STREAM);
-
- GrowableArray<ClassType*> signatures(2);
- while (!STREAM->at_end()) {
- EXPECT('L');
- ClassType* iface = ClassType::parse_generic_signature(CHECK_STREAM);
- signatures.append(iface);
- }
-
- EXPECT_END();
-
- return new ClassDescriptor(parameters, super, signatures);
-}
-
-#ifndef PRODUCT
-void ClassDescriptor::print_on(outputStream* str) const {
- str->indent().print_cr("ClassDescriptor {");
- {
- streamIndentor si(str);
- if (_type_parameters.length() > 0) {
- str->indent().print_cr("Formals {");
- {
- streamIndentor si(str);
- for (int i = 0; i < _type_parameters.length(); ++i) {
- _type_parameters.at(i)->print_on(str);
- }
- }
- str->indent().print_cr("}");
- }
- if (_super != NULL) {
- str->indent().print_cr("Superclass: ");
- {
- streamIndentor si(str);
- _super->print_on(str);
- }
- }
- if (_interfaces.length() > 0) {
- str->indent().print_cr("SuperInterfaces: {");
- {
- streamIndentor si(str);
- for (int i = 0; i < _interfaces.length(); ++i) {
- _interfaces.at(i)->print_on(str);
- }
- }
- str->indent().print_cr("}");
- }
- if (_outer_method != NULL) {
- str->indent().print_cr("Outer Method: {");
- {
- streamIndentor si(str);
- _outer_method->print_on(str);
- }
- str->indent().print_cr("}");
- }
- if (_outer_class != NULL) {
- str->indent().print_cr("Outer Class: {");
- {
- streamIndentor si(str);
- _outer_class->print_on(str);
- }
- str->indent().print_cr("}");
- }
- }
- str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-ClassType* ClassDescriptor::interface_desc(Symbol* sym) {
- for (int i = 0; i < _interfaces.length(); ++i) {
- if (_interfaces.at(i)->identifier()->equals(sym)) {
- return _interfaces.at(i);
- }
- }
- if (VerifyGenericSignatures) {
- fatal("Did not find expected interface");
- }
- return NULL;
-}
-
-void ClassDescriptor::bind_variables_to_parameters() {
- if (_outer_class != NULL) {
- _outer_class->bind_variables_to_parameters();
- }
- if (_outer_method != NULL) {
- _outer_method->bind_variables_to_parameters();
- }
- for (int i = 0; i < _type_parameters.length(); ++i) {
- _type_parameters.at(i)->bind_variables_to_parameters(this, i);
- }
- if (_super != NULL) {
- _super->bind_variables_to_parameters(this);
- }
- for (int i = 0; i < _interfaces.length(); ++i) {
- _interfaces.at(i)->bind_variables_to_parameters(this);
- }
-}
-
-ClassDescriptor* ClassDescriptor::canonicalize(Context* ctx) {
-
- GrowableArray<TypeParameter*> type_params(_type_parameters.length());
- for (int i = 0; i < _type_parameters.length(); ++i) {
- type_params.append(_type_parameters.at(i)->canonicalize(ctx, 0));
- }
-
- ClassDescriptor* outer = _outer_class == NULL ? NULL :
- _outer_class->canonicalize(ctx);
-
- ClassType* super = _super == NULL ? NULL : _super->canonicalize(ctx, 0);
-
- GrowableArray<ClassType*> interfaces(_interfaces.length());
- for (int i = 0; i < _interfaces.length(); ++i) {
- interfaces.append(_interfaces.at(i)->canonicalize(ctx, 0));
- }
-
- MethodDescriptor* md = _outer_method == NULL ? NULL :
- _outer_method->canonicalize(ctx);
-
- return new ClassDescriptor(type_params, super, interfaces, outer, md);
-}
-
-u2 ClassDescriptor::get_outer_class_index(InstanceKlass* klass, TRAPS) {
- int inner_index = InstanceKlass::inner_class_inner_class_info_offset;
- int outer_index = InstanceKlass::inner_class_outer_class_info_offset;
- int name_offset = InstanceKlass::inner_class_inner_name_offset;
- int next_offset = InstanceKlass::inner_class_next_offset;
-
- if (klass->inner_classes() == NULL || klass->inner_classes()->length() == 0) {
- // No inner class info => no declaring class
- return 0;
- }
-
- Array<u2>* i_icls = klass->inner_classes();
- ConstantPool* i_cp = klass->constants();
- int i_length = i_icls->length();
-
- // Find inner_klass attribute
- for (int i = 0; i + next_offset < i_length; i += next_offset) {
- u2 ioff = i_icls->at(i + inner_index);
- u2 ooff = i_icls->at(i + outer_index);
- u2 noff = i_icls->at(i + name_offset);
- if (ioff != 0) {
- // Check to see if the name matches the class we're looking for
- // before attempting to find the class.
- if (i_cp->klass_name_at_matches(klass, ioff) && ooff != 0) {
- return ooff;
- }
- }
- }
-
- // It may be anonymous; try for that.
- u2 encl_method_class_idx = klass->enclosing_method_class_index();
- if (encl_method_class_idx != 0) {
- return encl_method_class_idx;
- }
-
- return 0;
-}
-
-MethodDescriptor* MethodDescriptor::parse_generic_signature(Method* m, ClassDescriptor* outer) {
- Symbol* generic_sig = m->generic_signature();
- MethodDescriptor* md = NULL;
- if (generic_sig == NULL || (md = parse_generic_signature(generic_sig, outer)) == NULL) {
- md = parse_generic_signature(m->signature(), outer);
- }
- assert(md != NULL, "Could not parse method signature");
- md->bind_variables_to_parameters();
- return md;
-}
-
-MethodDescriptor* MethodDescriptor::parse_generic_signature(Symbol* sym, ClassDescriptor* outer) {
-
- DescriptorStream ds(sym);
- DescriptorStream* STREAM = &ds;
-
- GrowableArray<TypeParameter*> params(8);
- char c = READ();
- if (c == '<') {
- c = READ();
- while (c != '>') {
- PUSH(c);
- TypeParameter* ftp = TypeParameter::parse_generic_signature(CHECK_STREAM);
- params.append(ftp);
- c = READ();
- }
- } else {
- PUSH(c);
- }
-
- EXPECT('(');
-
- GrowableArray<Type*> parameters(8);
- c = READ();
- while (c != ')') {
- PUSH(c);
- Type* arg = Type::parse_generic_signature(CHECK_STREAM);
- parameters.append(arg);
- c = READ();
- }
-
- Type* rt = Type::parse_generic_signature(CHECK_STREAM);
-
- GrowableArray<Type*> throws;
- while (!STREAM->at_end()) {
- EXPECT('^');
- Type* spec = Type::parse_generic_signature(CHECK_STREAM);
- throws.append(spec);
- }
-
- return new MethodDescriptor(params, outer, parameters, rt, throws);
-}
-
-void MethodDescriptor::bind_variables_to_parameters() {
- for (int i = 0; i < _type_parameters.length(); ++i) {
- _type_parameters.at(i)->bind_variables_to_parameters(this, i);
- }
- for (int i = 0; i < _parameters.length(); ++i) {
- _parameters.at(i)->bind_variables_to_parameters(this);
- }
- _return_type->bind_variables_to_parameters(this);
- for (int i = 0; i < _throws.length(); ++i) {
- _throws.at(i)->bind_variables_to_parameters(this);
- }
-}
-
-bool MethodDescriptor::covariant_match(MethodDescriptor* other, Context* ctx) {
-
- if (_parameters.length() == other->_parameters.length()) {
- for (int i = 0; i < _parameters.length(); ++i) {
- if (!_parameters.at(i)->covariant_match(other->_parameters.at(i), ctx)) {
- return false;
- }
- }
-
- if (_return_type->as_primitive() != NULL) {
- return _return_type->covariant_match(other->_return_type, ctx);
- } else {
- // return type is a reference
- return other->_return_type->as_class() != NULL ||
- other->_return_type->as_variable() != NULL ||
- other->_return_type->as_array() != NULL;
- }
- } else {
- return false;
- }
-}
-
-MethodDescriptor* MethodDescriptor::canonicalize(Context* ctx) {
-
- GrowableArray<TypeParameter*> type_params(_type_parameters.length());
- for (int i = 0; i < _type_parameters.length(); ++i) {
- type_params.append(_type_parameters.at(i)->canonicalize(ctx, 0));
- }
-
- ClassDescriptor* outer = _outer_class == NULL ? NULL :
- _outer_class->canonicalize(ctx);
-
- GrowableArray<Type*> params(_parameters.length());
- for (int i = 0; i < _parameters.length(); ++i) {
- params.append(_parameters.at(i)->canonicalize(ctx, 0));
- }
-
- Type* rt = _return_type->canonicalize(ctx, 0);
-
- GrowableArray<Type*> throws(_throws.length());
- for (int i = 0; i < _throws.length(); ++i) {
- throws.append(_throws.at(i)->canonicalize(ctx, 0));
- }
-
- return new MethodDescriptor(type_params, outer, params, rt, throws);
-}
-
-#ifndef PRODUCT
-TempNewSymbol MethodDescriptor::reify_signature(Context* ctx, TRAPS) {
- stringStream ss(256);
-
- ss.print("(");
- for (int i = 0; i < _parameters.length(); ++i) {
- _parameters.at(i)->reify_signature(&ss, ctx);
- }
- ss.print(")");
- _return_type->reify_signature(&ss, ctx);
- return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD);
-}
-
-void MethodDescriptor::print_on(outputStream* str) const {
- str->indent().print_cr("MethodDescriptor {");
- {
- streamIndentor si(str);
- if (_type_parameters.length() > 0) {
- str->indent().print_cr("Formals: {");
- {
- streamIndentor si(str);
- for (int i = 0; i < _type_parameters.length(); ++i) {
- _type_parameters.at(i)->print_on(str);
- }
- }
- str->indent().print_cr("}");
- }
- str->indent().print_cr("Parameters: {");
- {
- streamIndentor si(str);
- for (int i = 0; i < _parameters.length(); ++i) {
- _parameters.at(i)->print_on(str);
- }
- }
- str->indent().print_cr("}");
- str->indent().print_cr("Return Type: ");
- {
- streamIndentor si(str);
- _return_type->print_on(str);
- }
-
- if (_throws.length() > 0) {
- str->indent().print_cr("Throws: {");
- {
- streamIndentor si(str);
- for (int i = 0; i < _throws.length(); ++i) {
- _throws.at(i)->print_on(str);
- }
- }
- str->indent().print_cr("}");
- }
- }
- str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-TypeParameter* TypeParameter::parse_generic_signature(DescriptorStream* STREAM) {
- STREAM->set_mark();
- char c = READ();
- while (c != ':') {
- c = READ();
- }
-
- Identifier* id = STREAM->identifier_from_mark();
-
- ClassType* class_bound = NULL;
- GrowableArray<ClassType*> interface_bounds(8);
-
- c = READ();
- if (c != '>') {
- if (c != ':') {
- EXPECTED(c, 'L');
- class_bound = ClassType::parse_generic_signature(CHECK_STREAM);
- c = READ();
- }
-
- while (c == ':') {
- EXPECT('L');
- ClassType* fts = ClassType::parse_generic_signature(CHECK_STREAM);
- interface_bounds.append(fts);
- c = READ();
- }
- }
- PUSH(c);
-
- return new TypeParameter(id, class_bound, interface_bounds);
-}
-
-void TypeParameter::bind_variables_to_parameters(Descriptor* sig, int position) {
- if (_class_bound != NULL) {
- _class_bound->bind_variables_to_parameters(sig);
- }
- for (int i = 0; i < _interface_bounds.length(); ++i) {
- _interface_bounds.at(i)->bind_variables_to_parameters(sig);
- }
- _position = position;
-}
-
-Type* TypeParameter::resolve(
- Context* ctx, int inner_depth, int ctx_depth) {
-
- if (inner_depth == -1) {
- // This indicates that the parameter is a method type parameter, which
- // isn't resolveable using the class hierarchy context
- return bound();
- }
-
- ClassType* provider = ctx->at_depth(ctx_depth);
- if (provider != NULL) {
- for (int i = 0; i < inner_depth && provider != NULL; ++i) {
- provider = provider->outer_class();
- }
- if (provider != NULL) {
- TypeArgument* arg = provider->type_argument_at(_position);
- if (arg != NULL) {
- Type* value = arg->lower_bound();
- return value->canonicalize(ctx, ctx_depth + 1);
- }
- }
- }
-
- return bound();
-}
-
-TypeParameter* TypeParameter::canonicalize(Context* ctx, int ctx_depth) {
- ClassType* bound = _class_bound == NULL ? NULL :
- _class_bound->canonicalize(ctx, ctx_depth);
-
- GrowableArray<ClassType*> ifaces(_interface_bounds.length());
- for (int i = 0; i < _interface_bounds.length(); ++i) {
- ifaces.append(_interface_bounds.at(i)->canonicalize(ctx, ctx_depth));
- }
-
- TypeParameter* ret = new TypeParameter(_identifier, bound, ifaces);
- ret->_position = _position;
- return ret;
-}
-
-ClassType* TypeParameter::bound() {
- if (_class_bound != NULL) {
- return _class_bound;
- }
-
- if (_interface_bounds.length() == 1) {
- return _interface_bounds.at(0);
- }
-
- return ClassType::java_lang_Object(); // TODO: investigate this case
-}
-
-#ifndef PRODUCT
-void TypeParameter::print_on(outputStream* str) const {
- str->indent().print_cr("Formal: {");
- {
- streamIndentor si(str);
-
- str->indent().print("Identifier: ");
- _identifier->print_on(str);
- str->print_cr("");
- if (_class_bound != NULL) {
- str->indent().print_cr("Class Bound: ");
- streamIndentor si(str);
- _class_bound->print_on(str);
- }
- if (_interface_bounds.length() > 0) {
- str->indent().print_cr("Interface Bounds: {");
- {
- streamIndentor si(str);
- for (int i = 0; i < _interface_bounds.length(); ++i) {
- _interface_bounds.at(i)->print_on(str);
- }
- }
- str->indent().print_cr("}");
- }
- str->indent().print_cr("Ordinal Position: %d", _position);
- }
- str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-Type* Type::parse_generic_signature(DescriptorStream* STREAM) {
- char c = READ();
- switch (c) {
- case 'L':
- return ClassType::parse_generic_signature(CHECK_STREAM);
- case 'T':
- return TypeVariable::parse_generic_signature(CHECK_STREAM);
- case '[':
- return ArrayType::parse_generic_signature(CHECK_STREAM);
- default:
- return new PrimitiveType(c);
- }
-}
-
-Identifier* ClassType::parse_generic_signature_simple(GrowableArray<TypeArgument*>* args,
- bool* has_inner, DescriptorStream* STREAM) {
- STREAM->set_mark();
-
- char c = READ();
- while (c != ';' && c != '.' && c != '<') { c = READ(); }
- Identifier* id = STREAM->identifier_from_mark();
-
- if (c == '<') {
- c = READ();
- while (c != '>') {
- PUSH(c);
- TypeArgument* arg = TypeArgument::parse_generic_signature(CHECK_STREAM);
- args->append(arg);
- c = READ();
- }
- c = READ();
- }
-
- *has_inner = (c == '.');
- if (!(*has_inner)) {
- EXPECTED(c, ';');
- }
-
- return id;
-}
-
-ClassType* ClassType::parse_generic_signature(DescriptorStream* STREAM) {
- return parse_generic_signature(NULL, CHECK_STREAM);
-}
-
-ClassType* ClassType::parse_generic_signature(ClassType* outer, DescriptorStream* STREAM) {
- GrowableArray<TypeArgument*> args;
- ClassType* gct = NULL;
- bool has_inner = false;
-
- Identifier* id = parse_generic_signature_simple(&args, &has_inner, STREAM);
- if (id != NULL) {
- gct = new ClassType(id, args, outer);
-
- if (has_inner) {
- gct = parse_generic_signature(gct, CHECK_STREAM);
- }
- }
- return gct;
-}
-
-ClassType* ClassType::from_symbol(Symbol* sym) {
- assert(sym != NULL, "Must not be null");
- GrowableArray<TypeArgument*> args;
- Identifier* id = new Identifier(sym, 0, sym->utf8_length());
- return new ClassType(id, args, NULL);
-}
-
-ClassType* ClassType::java_lang_Object() {
- return from_symbol(vmSymbols::java_lang_Object());
-}
-
-void ClassType::bind_variables_to_parameters(Descriptor* sig) {
- for (int i = 0; i < _type_arguments.length(); ++i) {
- _type_arguments.at(i)->bind_variables_to_parameters(sig);
- }
- if (_outer_class != NULL) {
- _outer_class->bind_variables_to_parameters(sig);
- }
-}
-
-TypeArgument* ClassType::type_argument_at(int i) {
- if (i >= 0 && i < _type_arguments.length()) {
- return _type_arguments.at(i);
- } else {
- return NULL;
- }
-}
-
-#ifndef PRODUCT
-void ClassType::reify_signature(stringStream* ss, Context* ctx) {
- ss->print("L");
- _identifier->print_on(ss);
- ss->print(";");
-}
-
-void ClassType::print_on(outputStream* str) const {
- str->indent().print_cr("Class {");
- {
- streamIndentor si(str);
- str->indent().print("Name: ");
- _identifier->print_on(str);
- str->print_cr("");
- if (_type_arguments.length() != 0) {
- str->indent().print_cr("Type Arguments: {");
- {
- streamIndentor si(str);
- for (int j = 0; j < _type_arguments.length(); ++j) {
- _type_arguments.at(j)->print_on(str);
- }
- }
- str->indent().print_cr("}");
- }
- if (_outer_class != NULL) {
- str->indent().print_cr("Outer Class: ");
- streamIndentor sir(str);
- _outer_class->print_on(str);
- }
- }
- str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-bool ClassType::covariant_match(Type* other, Context* ctx) {
-
- if (other == this) {
- return true;
- }
-
- TypeVariable* variable = other->as_variable();
- if (variable != NULL) {
- other = variable->resolve(ctx, 0);
- }
-
- ClassType* outer = outer_class();
- ClassType* other_class = other->as_class();
-
- if (other_class == NULL ||
- (outer == NULL) != (other_class->outer_class() == NULL)) {
- return false;
- }
-
- if (!_identifier->equals(other_class->_identifier)) {
- return false;
- }
-
- if (outer != NULL && !outer->covariant_match(other_class->outer_class(), ctx)) {
- return false;
- }
-
- return true;
-}
-
-ClassType* ClassType::canonicalize(Context* ctx, int ctx_depth) {
-
- GrowableArray<TypeArgument*> args(_type_arguments.length());
- for (int i = 0; i < _type_arguments.length(); ++i) {
- args.append(_type_arguments.at(i)->canonicalize(ctx, ctx_depth));
- }
-
- ClassType* outer = _outer_class == NULL ? NULL :
- _outer_class->canonicalize(ctx, ctx_depth);
-
- return new ClassType(_identifier, args, outer);
-}
-
-TypeVariable* TypeVariable::parse_generic_signature(DescriptorStream* STREAM) {
- STREAM->set_mark();
- char c = READ();
- while (c != ';') {
- c = READ();
- }
- Identifier* id = STREAM->identifier_from_mark();
-
- return new TypeVariable(id);
-}
-
-void TypeVariable::bind_variables_to_parameters(Descriptor* sig) {
- _parameter = sig->find_type_parameter(_id, &_inner_depth);
- if (VerifyGenericSignatures && _parameter == NULL) {
- fatal("Could not find formal parameter");
- }
-}
-
-Type* TypeVariable::resolve(Context* ctx, int ctx_depth) {
- if (parameter() != NULL) {
- return parameter()->resolve(ctx, inner_depth(), ctx_depth);
- } else {
- if (VerifyGenericSignatures) {
- fatal("Type variable matches no parameter");
- }
- return NULL;
- }
-}
-
-bool TypeVariable::covariant_match(Type* other, Context* ctx) {
-
- if (other == this) {
- return true;
- }
-
- Context my_context(NULL); // empty, results in erasure
- Type* my_type = resolve(&my_context, 0);
- if (my_type == NULL) {
- return false;
- }
-
- return my_type->covariant_match(other, ctx);
-}
-
-Type* TypeVariable::canonicalize(Context* ctx, int ctx_depth) {
- return resolve(ctx, ctx_depth);
-}
-
-#ifndef PRODUCT
-void TypeVariable::reify_signature(stringStream* ss, Context* ctx) {
- Type* type = resolve(ctx, 0);
- if (type != NULL) {
- type->reify_signature(ss, ctx);
- }
-}
-
-void TypeVariable::print_on(outputStream* str) const {
- str->indent().print_cr("Type Variable {");
- {
- streamIndentor si(str);
- str->indent().print("Name: ");
- _id->print_on(str);
- str->print_cr("");
- str->indent().print_cr("Inner depth: %d", _inner_depth);
- }
- str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-ArrayType* ArrayType::parse_generic_signature(DescriptorStream* STREAM) {
- Type* base = Type::parse_generic_signature(CHECK_STREAM);
- return new ArrayType(base);
-}
-
-void ArrayType::bind_variables_to_parameters(Descriptor* sig) {
- assert(_base != NULL, "Invalid base");
- _base->bind_variables_to_parameters(sig);
-}
-
-bool ArrayType::covariant_match(Type* other, Context* ctx) {
- assert(_base != NULL, "Invalid base");
-
- if (other == this) {
- return true;
- }
-
- ArrayType* other_array = other->as_array();
- return (other_array != NULL && _base->covariant_match(other_array->_base, ctx));
-}
-
-ArrayType* ArrayType::canonicalize(Context* ctx, int ctx_depth) {
- assert(_base != NULL, "Invalid base");
- return new ArrayType(_base->canonicalize(ctx, ctx_depth));
-}
-
-#ifndef PRODUCT
-void ArrayType::reify_signature(stringStream* ss, Context* ctx) {
- assert(_base != NULL, "Invalid base");
- ss->print("[");
- _base->reify_signature(ss, ctx);
-}
-
-void ArrayType::print_on(outputStream* str) const {
- str->indent().print_cr("Array {");
- {
- streamIndentor si(str);
- _base->print_on(str);
- }
- str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-bool PrimitiveType::covariant_match(Type* other, Context* ctx) {
-
- PrimitiveType* other_prim = other->as_primitive();
- return (other_prim != NULL && _type == other_prim->_type);
-}
-
-PrimitiveType* PrimitiveType::canonicalize(Context* ctx, int ctxd) {
- return this;
-}
-
-#ifndef PRODUCT
-void PrimitiveType::reify_signature(stringStream* ss, Context* ctx) {
- ss->print("%c", _type);
-}
-
-void PrimitiveType::print_on(outputStream* str) const {
- str->indent().print_cr("Primitive: '%c'", _type);
-}
-#endif // ndef PRODUCT
-
-void PrimitiveType::bind_variables_to_parameters(Descriptor* sig) {
-}
-
-TypeArgument* TypeArgument::parse_generic_signature(DescriptorStream* STREAM) {
- char c = READ();
- Type* type = NULL;
-
- switch (c) {
- case '*':
- return new TypeArgument(ClassType::java_lang_Object(), NULL);
- break;
- default:
- PUSH(c);
- // fall-through
- case '+':
- case '-':
- type = Type::parse_generic_signature(CHECK_STREAM);
- if (c == '+') {
- return new TypeArgument(type, NULL);
- } else if (c == '-') {
- return new TypeArgument(ClassType::java_lang_Object(), type);
- } else {
- return new TypeArgument(type, type);
- }
- }
-}
-
-void TypeArgument::bind_variables_to_parameters(Descriptor* sig) {
- assert(_lower_bound != NULL, "Invalid lower bound");
- _lower_bound->bind_variables_to_parameters(sig);
- if (_upper_bound != NULL && _upper_bound != _lower_bound) {
- _upper_bound->bind_variables_to_parameters(sig);
- }
-}
-
-bool TypeArgument::covariant_match(TypeArgument* other, Context* ctx) {
- assert(_lower_bound != NULL, "Invalid lower bound");
-
- if (other == this) {
- return true;
- }
-
- if (!_lower_bound->covariant_match(other->lower_bound(), ctx)) {
- return false;
- }
- return true;
-}
-
-TypeArgument* TypeArgument::canonicalize(Context* ctx, int ctx_depth) {
- assert(_lower_bound != NULL, "Invalid lower bound");
- Type* lower = _lower_bound->canonicalize(ctx, ctx_depth);
- Type* upper = NULL;
-
- if (_upper_bound == _lower_bound) {
- upper = lower;
- } else if (_upper_bound != NULL) {
- upper = _upper_bound->canonicalize(ctx, ctx_depth);
- }
-
- return new TypeArgument(lower, upper);
-}
-
-#ifndef PRODUCT
-void TypeArgument::print_on(outputStream* str) const {
- str->indent().print_cr("TypeArgument {");
- {
- streamIndentor si(str);
- if (_lower_bound != NULL) {
- str->indent().print("Lower bound: ");
- _lower_bound->print_on(str);
- }
- if (_upper_bound != NULL) {
- str->indent().print("Upper bound: ");
- _upper_bound->print_on(str);
- }
- }
- str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-void Context::Mark::destroy() {
- if (is_active()) {
- _context->reset_to_mark(_marked_size);
- }
- deactivate();
-}
-
-void Context::apply_type_arguments(
- InstanceKlass* current, InstanceKlass* super, TRAPS) {
- assert(_cache != NULL, "Cannot use an empty context");
- ClassType* spec = NULL;
- if (current != NULL) {
- ClassDescriptor* descriptor = _cache->descriptor_for(current, CHECK);
- if (super == current->super()) {
- spec = descriptor->super();
- } else {
- spec = descriptor->interface_desc(super->name());
- }
- if (spec != NULL) {
- _type_arguments.push(spec);
- }
- }
-}
-
-void Context::reset_to_mark(int size) {
- _type_arguments.trunc_to(size);
-}
-
-ClassType* Context::at_depth(int i) const {
- if (i < _type_arguments.length()) {
- return _type_arguments.at(_type_arguments.length() - 1 - i);
- }
- return NULL;
-}
-
-#ifndef PRODUCT
-void Context::print_on(outputStream* str) const {
- str->indent().print_cr("Context {");
- for (int i = 0; i < _type_arguments.length(); ++i) {
- streamIndentor si(str);
- str->indent().print("leval %d: ", i);
- ClassType* ct = at_depth(i);
- if (ct == NULL) {
- str->print_cr("<empty>");
- continue;
- } else {
- str->print_cr("{");
- }
-
- for (int j = 0; j < ct->type_arguments_length(); ++j) {
- streamIndentor si(str);
- TypeArgument* ta = ct->type_argument_at(j);
- Type* bound = ta->lower_bound();
- bound->print_on(str);
- }
- str->indent().print_cr("}");
- }
- str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-ClassDescriptor* DescriptorCache::descriptor_for(InstanceKlass* ik, TRAPS) {
-
- ClassDescriptor** existing = _class_descriptors.get(ik);
- if (existing == NULL) {
- ClassDescriptor* cd = ClassDescriptor::parse_generic_signature(ik, CHECK_NULL);
- _class_descriptors.put(ik, cd);
- return cd;
- } else {
- return *existing;
- }
-}
-
-MethodDescriptor* DescriptorCache::descriptor_for(
- Method* mh, ClassDescriptor* cd, TRAPS) {
- assert(mh != NULL && cd != NULL, "Should not be NULL");
- MethodDescriptor** existing = _method_descriptors.get(mh);
- if (existing == NULL) {
- MethodDescriptor* md = MethodDescriptor::parse_generic_signature(mh, cd);
- _method_descriptors.put(mh, md);
- return md;
- } else {
- return *existing;
- }
-}
-MethodDescriptor* DescriptorCache::descriptor_for(Method* mh, TRAPS) {
- ClassDescriptor* cd = descriptor_for(
- InstanceKlass::cast(mh->method_holder()), CHECK_NULL);
- return descriptor_for(mh, cd, THREAD);
-}
-
-} // namespace generic
diff --git a/src/share/vm/classfile/genericSignatures.hpp b/src/share/vm/classfile/genericSignatures.hpp
deleted file mode 100644
index 07eb47593..000000000
--- a/src/share/vm/classfile/genericSignatures.hpp
+++ /dev/null
@@ -1,467 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
-#define SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
-
-#include "classfile/symbolTable.hpp"
-#include "memory/allocation.hpp"
-#include "runtime/signature.hpp"
-#include "utilities/growableArray.hpp"
-#include "utilities/resourceHash.hpp"
-
-class stringStream;
-
-namespace generic {
-
-class Identifier;
-class ClassDescriptor;
-class MethodDescriptor;
-
-class TypeParameter; // a formal type parameter declared in generic signatures
-class TypeArgument; // The "type value" passed to fill parameters in supertypes
-class TypeVariable; // A usage of a type parameter as a value
-/**
- * Example:
- *
- * <T, V> class Foo extends Bar<String> { int m(V v) {} }
- * ^^^^^^ ^^^^^^ ^^
- * type parameters type argument type variable
- *
- * Note that a type variable could be passed as an argument too:
- * <T, V> class Foo extends Bar<T> { int m(V v) {} }
- * ^^^
- * type argument's value is a type variable
- */
-
-
-class Type;
-class ClassType;
-class ArrayType;
-class PrimitiveType;
-class Context;
-class DescriptorCache;
-
-class DescriptorStream;
-
-class Identifier : public ResourceObj {
- private:
- Symbol* _sym;
- int _begin;
- int _end;
-
- public:
- Identifier(Symbol* sym, int begin, int end) :
- _sym(sym), _begin(begin), _end(end) {}
-
- bool equals(Identifier* other);
- bool equals(Symbol* sym);
-
-#ifndef PRODUCT
- void print_on(outputStream* str) const;
-#endif // ndef PRODUCT
-};
-
-class Descriptor : public ResourceObj {
- protected:
- GrowableArray<TypeParameter*> _type_parameters;
- ClassDescriptor* _outer_class;
-
- Descriptor(GrowableArray<TypeParameter*>& params,
- ClassDescriptor* outer)
- : _type_parameters(params), _outer_class(outer) {}
-
- public:
-
- ClassDescriptor* outer_class() { return _outer_class; }
- void set_outer_class(ClassDescriptor* sig) { _outer_class = sig; }
-
- virtual ClassDescriptor* as_class_signature() { return NULL; }
- virtual MethodDescriptor* as_method_signature() { return NULL; }
-
- bool is_class_signature() { return as_class_signature() != NULL; }
- bool is_method_signature() { return as_method_signature() != NULL; }
-
- GrowableArray<TypeParameter*>& type_parameters() {
- return _type_parameters;
- }
-
- TypeParameter* find_type_parameter(Identifier* id, int* param_depth);
-
- virtual void bind_variables_to_parameters() = 0;
-
-#ifndef PRODUCT
- virtual void print_on(outputStream* str) const = 0;
-#endif
-};
-
-class ClassDescriptor : public Descriptor {
- private:
- ClassType* _super;
- GrowableArray<ClassType*> _interfaces;
- MethodDescriptor* _outer_method;
-
- ClassDescriptor(GrowableArray<TypeParameter*>& ftp, ClassType* scs,
- GrowableArray<ClassType*>& sis, ClassDescriptor* outer_class = NULL,
- MethodDescriptor* outer_method = NULL)
- : Descriptor(ftp, outer_class), _super(scs), _interfaces(sis),
- _outer_method(outer_method) {}
-
- static u2 get_outer_class_index(InstanceKlass* k, TRAPS);
- static ClassDescriptor* parse_generic_signature(Klass* k, Symbol* original_name, TRAPS);
-
- public:
-
- virtual ClassDescriptor* as_class_signature() { return this; }
-
- MethodDescriptor* outer_method() { return _outer_method; }
- void set_outer_method(MethodDescriptor* m) { _outer_method = m; }
-
- ClassType* super() { return _super; }
- ClassType* interface_desc(Symbol* sym);
-
- static ClassDescriptor* parse_generic_signature(Klass* k, TRAPS);
- static ClassDescriptor* parse_generic_signature(Symbol* sym);
-
- // For use in superclass chains in positions where this is no generic info
- static ClassDescriptor* placeholder(InstanceKlass* klass);
-
-#ifndef PRODUCT
- void print_on(outputStream* str) const;
-#endif
-
- ClassDescriptor* canonicalize(Context* ctx);
-
- // Linking sets the position index in any contained TypeVariable type
- // to correspond to the location of that identifier in the formal type
- // parameters.
- void bind_variables_to_parameters();
-};
-
-class MethodDescriptor : public Descriptor {
- private:
- GrowableArray<Type*> _parameters;
- Type* _return_type;
- GrowableArray<Type*> _throws;
-
- MethodDescriptor(GrowableArray<TypeParameter*>& ftp, ClassDescriptor* outer,
- GrowableArray<Type*>& sigs, Type* rt, GrowableArray<Type*>& throws)
- : Descriptor(ftp, outer), _parameters(sigs), _return_type(rt),
- _throws(throws) {}
-
- public:
-
- static MethodDescriptor* parse_generic_signature(Method* m, ClassDescriptor* outer);
- static MethodDescriptor* parse_generic_signature(Symbol* sym, ClassDescriptor* outer);
-
- MethodDescriptor* as_method_signature() { return this; }
-
- // Performs generic analysis on the method parameters to determine
- // if both methods refer to the same argument types.
- bool covariant_match(MethodDescriptor* other, Context* ctx);
-
- // Returns a new method descriptor with all generic variables
- // removed and replaced with whatever is indicated using the Context.
- MethodDescriptor* canonicalize(Context* ctx);
-
- void bind_variables_to_parameters();
-
-#ifndef PRODUCT
- TempNewSymbol reify_signature(Context* ctx, TRAPS);
- void print_on(outputStream* str) const;
-#endif
-};
-
-class TypeParameter : public ResourceObj {
- private:
- Identifier* _identifier;
- ClassType* _class_bound;
- GrowableArray<ClassType*> _interface_bounds;
-
- // The position is the ordinal location of the parameter within the
- // formal parameter list (excluding outer classes). It is only set for
- // formal type parameters that are associated with a class -- method
- // type parameters are left as -1. When resolving a generic variable to
- // find the actual type, this index is used to access the generic type
- // argument in the provided context object.
- int _position; // Assigned during variable linking
-
- TypeParameter(Identifier* id, ClassType* class_bound,
- GrowableArray<ClassType*>& interface_bounds) :
- _identifier(id), _class_bound(class_bound),
- _interface_bounds(interface_bounds), _position(-1) {}
-
- public:
- static TypeParameter* parse_generic_signature(DescriptorStream* str);
-
- ClassType* bound();
- int position() { return _position; }
-
- void bind_variables_to_parameters(Descriptor* sig, int position);
- Identifier* identifier() { return _identifier; }
-
- Type* resolve(Context* ctx, int inner_depth, int ctx_depth);
- TypeParameter* canonicalize(Context* ctx, int ctx_depth);
-
-#ifndef PRODUCT
- void print_on(outputStream* str) const;
-#endif
-};
-
-class Type : public ResourceObj {
- public:
- static Type* parse_generic_signature(DescriptorStream* str);
-
- virtual ClassType* as_class() { return NULL; }
- virtual TypeVariable* as_variable() { return NULL; }
- virtual ArrayType* as_array() { return NULL; }
- virtual PrimitiveType* as_primitive() { return NULL; }
-
- virtual bool covariant_match(Type* gt, Context* ctx) = 0;
- virtual Type* canonicalize(Context* ctx, int ctx_depth) = 0;
-
- virtual void bind_variables_to_parameters(Descriptor* sig) = 0;
-
-#ifndef PRODUCT
- virtual void reify_signature(stringStream* ss, Context* ctx) = 0;
- virtual void print_on(outputStream* str) const = 0;
-#endif
-};
-
-class ClassType : public Type {
- friend class ClassDescriptor;
- protected:
- Identifier* _identifier;
- GrowableArray<TypeArgument*> _type_arguments;
- ClassType* _outer_class;
-
- ClassType(Identifier* identifier,
- GrowableArray<TypeArgument*>& args,
- ClassType* outer)
- : _identifier(identifier), _type_arguments(args), _outer_class(outer) {}
-
- // Returns true if there are inner classes to read
- static Identifier* parse_generic_signature_simple(
- GrowableArray<TypeArgument*>* args,
- bool* has_inner, DescriptorStream* str);
-
- static ClassType* parse_generic_signature(ClassType* outer,
- DescriptorStream* str);
- static ClassType* from_symbol(Symbol* sym);
-
- public:
- ClassType* as_class() { return this; }
-
- static ClassType* parse_generic_signature(DescriptorStream* str);
- static ClassType* java_lang_Object();
-
- Identifier* identifier() { return _identifier; }
- int type_arguments_length() { return _type_arguments.length(); }
- TypeArgument* type_argument_at(int i);
-
- virtual ClassType* outer_class() { return _outer_class; }
-
- bool covariant_match(Type* gt, Context* ctx);
- ClassType* canonicalize(Context* ctx, int context_depth);
-
- void bind_variables_to_parameters(Descriptor* sig);
-
-#ifndef PRODUCT
- void reify_signature(stringStream* ss, Context* ctx);
- void print_on(outputStream* str) const;
-#endif
-};
-
-class TypeVariable : public Type {
- private:
- Identifier* _id;
- TypeParameter* _parameter; // assigned during linking
-
- // how many steps "out" from inner classes, -1 if method
- int _inner_depth;
-
- TypeVariable(Identifier* id)
- : _id(id), _parameter(NULL), _inner_depth(0) {}
-
- public:
- TypeVariable* as_variable() { return this; }
-
- static TypeVariable* parse_generic_signature(DescriptorStream* str);
-
- Identifier* identifier() { return _id; }
- TypeParameter* parameter() { return _parameter; }
- int inner_depth() { return _inner_depth; }
-
- void bind_variables_to_parameters(Descriptor* sig);
-
- Type* resolve(Context* ctx, int ctx_depth);
- bool covariant_match(Type* gt, Context* ctx);
- Type* canonicalize(Context* ctx, int ctx_depth);
-
-#ifndef PRODUCT
- void reify_signature(stringStream* ss, Context* ctx);
- void print_on(outputStream* str) const;
-#endif
-};
-
-class ArrayType : public Type {
- private:
- Type* _base;
-
- ArrayType(Type* base) : _base(base) {}
-
- public:
- ArrayType* as_array() { return this; }
-
- static ArrayType* parse_generic_signature(DescriptorStream* str);
-
- bool covariant_match(Type* gt, Context* ctx);
- ArrayType* canonicalize(Context* ctx, int ctx_depth);
-
- void bind_variables_to_parameters(Descriptor* sig);
-
-#ifndef PRODUCT
- void reify_signature(stringStream* ss, Context* ctx);
- void print_on(outputStream* str) const;
-#endif
-};
-
-class PrimitiveType : public Type {
- friend class Type;
- private:
- char _type; // includes V for void
-
- PrimitiveType(char& type) : _type(type) {}
-
- public:
- PrimitiveType* as_primitive() { return this; }
-
- bool covariant_match(Type* gt, Context* ctx);
- PrimitiveType* canonicalize(Context* ctx, int ctx_depth);
-
- void bind_variables_to_parameters(Descriptor* sig);
-
-#ifndef PRODUCT
- void reify_signature(stringStream* ss, Context* ctx);
- void print_on(outputStream* str) const;
-#endif
-};
-
-class TypeArgument : public ResourceObj {
- private:
- Type* _lower_bound;
- Type* _upper_bound; // may be null or == _lower_bound
-
- TypeArgument(Type* lower_bound, Type* upper_bound)
- : _lower_bound(lower_bound), _upper_bound(upper_bound) {}
-
- public:
-
- static TypeArgument* parse_generic_signature(DescriptorStream* str);
-
- Type* lower_bound() { return _lower_bound; }
- Type* upper_bound() { return _upper_bound; }
-
- void bind_variables_to_parameters(Descriptor* sig);
- TypeArgument* canonicalize(Context* ctx, int ctx_depth);
-
- bool covariant_match(TypeArgument* a, Context* ctx);
-
-#ifndef PRODUCT
- void print_on(outputStream* str) const;
-#endif
-};
-
-
-class Context : public ResourceObj {
- private:
- DescriptorCache* _cache;
- GrowableArray<ClassType*> _type_arguments;
-
- void reset_to_mark(int size);
-
- public:
- // When this object goes out of scope or 'destroy' is
- // called, then the application of the type to the
- // context is wound-back (unless it's been deactivated).
- class Mark : public StackObj {
- private:
- mutable Context* _context;
- int _marked_size;
-
- bool is_active() const { return _context != NULL; }
- void deactivate() const { _context = NULL; }
-
- public:
- Mark() : _context(NULL), _marked_size(0) {}
- Mark(Context* ctx, int sz) : _context(ctx), _marked_size(sz) {}
- Mark(const Mark& m) : _context(m._context), _marked_size(m._marked_size) {
- m.deactivate(); // Ownership is transferred
- }
-
- Mark& operator=(const Mark& cm) {
- destroy();
- _context = cm._context;
- _marked_size = cm._marked_size;
- cm.deactivate();
- return *this;
- }
-
- void destroy();
- ~Mark() { destroy(); }
- };
-
- Context(DescriptorCache* cache) : _cache(cache) {}
-
- Mark mark() { return Mark(this, _type_arguments.length()); }
- void apply_type_arguments(InstanceKlass* current, InstanceKlass* super,TRAPS);
-
- ClassType* at_depth(int i) const;
-
-#ifndef PRODUCT
- void print_on(outputStream* str) const;
-#endif
-};
-
-/**
- * Contains a cache of descriptors for classes and methods so they can be
- * looked-up instead of reparsing each time they are needed.
- */
-class DescriptorCache : public ResourceObj {
- private:
- ResourceHashtable<InstanceKlass*, ClassDescriptor*> _class_descriptors;
- ResourceHashtable<Method*, MethodDescriptor*> _method_descriptors;
-
- public:
- ClassDescriptor* descriptor_for(InstanceKlass* ikh, TRAPS);
-
- MethodDescriptor* descriptor_for(Method* mh, ClassDescriptor* cd, TRAPS);
- // Class descriptor derived from method holder
- MethodDescriptor* descriptor_for(Method* mh, TRAPS);
-};
-
-} // namespace generic
-
-#endif // SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
-
diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/javaClasses.cpp
index bb02a6d86..9331cc124 100644
--- a/src/share/vm/classfile/javaClasses.cpp
+++ b/src/share/vm/classfile/javaClasses.cpp
@@ -438,6 +438,29 @@ bool java_lang_String::equals(oop java_string, jchar* chars, int len) {
return true;
}
+bool java_lang_String::equals(oop str1, oop str2) {
+ assert(str1->klass() == SystemDictionary::String_klass(),
+ "must be java String");
+ assert(str2->klass() == SystemDictionary::String_klass(),
+ "must be java String");
+ typeArrayOop value1 = java_lang_String::value(str1);
+ int offset1 = java_lang_String::offset(str1);
+ int length1 = java_lang_String::length(str1);
+ typeArrayOop value2 = java_lang_String::value(str2);
+ int offset2 = java_lang_String::offset(str2);
+ int length2 = java_lang_String::length(str2);
+
+ if (length1 != length2) {
+ return false;
+ }
+ for (int i = 0; i < length1; i++) {
+ if (value1->char_at(i + offset1) != value2->char_at(i + offset2)) {
+ return false;
+ }
+ }
+ return true;
+}
+
void java_lang_String::print(Handle java_string, outputStream* st) {
oop obj = java_string();
assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string");
@@ -512,22 +535,22 @@ void java_lang_Class::fixup_mirror(KlassHandle k, TRAPS) {
// If the offset was read from the shared archive, it was fixed up already
if (!k->is_shared()) {
- if (k->oop_is_instance()) {
- // During bootstrap, java.lang.Class wasn't loaded so static field
- // offsets were computed without the size added it. Go back and
- // update all the static field offsets to included the size.
- for (JavaFieldStream fs(InstanceKlass::cast(k())); !fs.done(); fs.next()) {
- if (fs.access_flags().is_static()) {
- int real_offset = fs.offset() + InstanceMirrorKlass::offset_of_static_fields();
- fs.set_offset(real_offset);
+ if (k->oop_is_instance()) {
+ // During bootstrap, java.lang.Class wasn't loaded so static field
+ // offsets were computed without the size added it. Go back and
+ // update all the static field offsets to included the size.
+ for (JavaFieldStream fs(InstanceKlass::cast(k())); !fs.done(); fs.next()) {
+ if (fs.access_flags().is_static()) {
+ int real_offset = fs.offset() + InstanceMirrorKlass::offset_of_static_fields();
+ fs.set_offset(real_offset);
+ }
}
}
}
- }
- create_mirror(k, CHECK);
+ create_mirror(k, Handle(NULL), CHECK);
}
-oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) {
+oop java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAPS) {
assert(k->java_mirror() == NULL, "should only assign mirror once");
// Use this moment of initialization to cache modifier_flags also,
// to support Class.getModifiers(). Instance classes recalculate
@@ -563,6 +586,16 @@ oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) {
set_array_klass(comp_mirror(), k());
} else {
assert(k->oop_is_instance(), "Must be");
+
+ // Allocate a simple java object for a lock.
+ // This needs to be a java object because during class initialization
+ // it can be held across a java call.
+ typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_NULL);
+ set_init_lock(mirror(), r);
+
+ // Set protection domain also
+ set_protection_domain(mirror(), protection_domain());
+
// Initialize static fields
InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL);
}
@@ -597,6 +630,34 @@ void java_lang_Class::set_static_oop_field_count(oop java_class, int size) {
java_class->int_field_put(_static_oop_field_count_offset, size);
}
+oop java_lang_Class::protection_domain(oop java_class) {
+ assert(_protection_domain_offset != 0, "must be set");
+ return java_class->obj_field(_protection_domain_offset);
+}
+void java_lang_Class::set_protection_domain(oop java_class, oop pd) {
+ assert(_protection_domain_offset != 0, "must be set");
+ java_class->obj_field_put(_protection_domain_offset, pd);
+}
+
+oop java_lang_Class::init_lock(oop java_class) {
+ assert(_init_lock_offset != 0, "must be set");
+ return java_class->obj_field(_init_lock_offset);
+}
+void java_lang_Class::set_init_lock(oop java_class, oop init_lock) {
+ assert(_init_lock_offset != 0, "must be set");
+ java_class->obj_field_put(_init_lock_offset, init_lock);
+}
+
+objArrayOop java_lang_Class::signers(oop java_class) {
+ assert(_signers_offset != 0, "must be set");
+ return (objArrayOop)java_class->obj_field(_signers_offset);
+}
+void java_lang_Class::set_signers(oop java_class, objArrayOop signers) {
+ assert(_signers_offset != 0, "must be set");
+ java_class->obj_field_put(_signers_offset, (oop)signers);
+}
+
+
oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) {
// This should be improved by adding a field at the Java level or by
// introducing a new VM klass (see comment in ClassFileParser)
@@ -923,7 +984,7 @@ void java_lang_Thread::set_thread_status(oop java_thread,
// Read thread status value from threadStatus field in java.lang.Thread java class.
java_lang_Thread::ThreadStatus java_lang_Thread::get_thread_status(oop java_thread) {
- assert(Thread::current()->is_VM_thread() ||
+ assert(Thread::current()->is_Watcher_thread() || Thread::current()->is_VM_thread() ||
JavaThread::current()->thread_state() == _thread_in_vm,
"Java Thread is not running in vm");
// The threadStatus is only present starting in 1.5
@@ -2519,6 +2580,26 @@ void java_lang_ref_SoftReference::set_clock(jlong value) {
*offset = value;
}
+// Support for java_lang_invoke_DirectMethodHandle
+
+int java_lang_invoke_DirectMethodHandle::_member_offset;
+
+oop java_lang_invoke_DirectMethodHandle::member(oop dmh) {
+ oop member_name = NULL;
+ bool is_dmh = dmh->is_oop() && java_lang_invoke_DirectMethodHandle::is_instance(dmh);
+ assert(is_dmh, "a DirectMethodHandle oop is expected");
+ if (is_dmh) {
+ member_name = dmh->obj_field(member_offset_in_bytes());
+ }
+ return member_name;
+}
+
+void java_lang_invoke_DirectMethodHandle::compute_offsets() {
+ Klass* klass_oop = SystemDictionary::DirectMethodHandle_klass();
+ if (klass_oop != NULL && EnableInvokeDynamic) {
+ compute_offset(_member_offset, klass_oop, vmSymbols::member_name(), vmSymbols::java_lang_invoke_MemberName_signature());
+ }
+}
// Support for java_lang_invoke_MethodHandle
@@ -2787,6 +2868,7 @@ void java_lang_invoke_CallSite::compute_offsets() {
int java_security_AccessControlContext::_context_offset = 0;
int java_security_AccessControlContext::_privilegedContext_offset = 0;
int java_security_AccessControlContext::_isPrivileged_offset = 0;
+int java_security_AccessControlContext::_isAuthorized_offset = -1;
void java_security_AccessControlContext::compute_offsets() {
assert(_isPrivileged_offset == 0, "offsets should be initialized only once");
@@ -2807,9 +2889,20 @@ void java_security_AccessControlContext::compute_offsets() {
fatal("Invalid layout of java.security.AccessControlContext");
}
_isPrivileged_offset = fd.offset();
+
+ // The offset may not be present for bootstrapping with older JDK.
+ if (ik->find_local_field(vmSymbols::isAuthorized_name(), vmSymbols::bool_signature(), &fd)) {
+ _isAuthorized_offset = fd.offset();
+ }
}
+bool java_security_AccessControlContext::is_authorized(Handle context) {
+ assert(context.not_null() && context->klass() == SystemDictionary::AccessControlContext_klass(), "Invalid type");
+ assert(_isAuthorized_offset != -1, "should be set");
+ return context->bool_field(_isAuthorized_offset) != 0;
+}
+
oop java_security_AccessControlContext::create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS) {
assert(_isPrivileged_offset != 0, "offsets should have been initialized");
// Ensure klass is initialized
@@ -2820,6 +2913,10 @@ oop java_security_AccessControlContext::create(objArrayHandle context, bool isPr
result->obj_field_put(_context_offset, context());
result->obj_field_put(_privilegedContext_offset, privileged_context());
result->bool_field_put(_isPrivileged_offset, isPrivileged);
+ // whitelist AccessControlContexts created by the JVM if present
+ if (_isAuthorized_offset != -1) {
+ result->bool_field_put(_isAuthorized_offset, true);
+ }
return result;
}
@@ -2929,11 +3026,23 @@ int java_lang_System::err_offset_in_bytes() {
}
+bool java_lang_System::has_security_manager() {
+ InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::System_klass());
+ address addr = ik->static_field_addr(static_security_offset);
+ if (UseCompressedOops) {
+ return oopDesc::load_decode_heap_oop((narrowOop *)addr) != NULL;
+ } else {
+ return oopDesc::load_decode_heap_oop((oop*)addr) != NULL;
+ }
+}
int java_lang_Class::_klass_offset;
int java_lang_Class::_array_klass_offset;
int java_lang_Class::_oop_size_offset;
int java_lang_Class::_static_oop_field_count_offset;
+int java_lang_Class::_protection_domain_offset;
+int java_lang_Class::_init_lock_offset;
+int java_lang_Class::_signers_offset;
GrowableArray<Klass*>* java_lang_Class::_fixup_mirror_list = NULL;
int java_lang_Throwable::backtrace_offset;
int java_lang_Throwable::detailMessage_offset;
@@ -2989,6 +3098,7 @@ int java_lang_ClassLoader::parent_offset;
int java_lang_System::static_in_offset;
int java_lang_System::static_out_offset;
int java_lang_System::static_err_offset;
+int java_lang_System::static_security_offset;
int java_lang_StackTraceElement::declaringClass_offset;
int java_lang_StackTraceElement::methodName_offset;
int java_lang_StackTraceElement::fileName_offset;
@@ -3114,6 +3224,7 @@ void JavaClasses::compute_hard_coded_offsets() {
java_lang_System::static_in_offset = java_lang_System::hc_static_in_offset * x;
java_lang_System::static_out_offset = java_lang_System::hc_static_out_offset * x;
java_lang_System::static_err_offset = java_lang_System::hc_static_err_offset * x;
+ java_lang_System::static_security_offset = java_lang_System::hc_static_security_offset * x;
// java_lang_StackTraceElement
java_lang_StackTraceElement::declaringClass_offset = java_lang_StackTraceElement::hc_declaringClass_offset * x + header;
@@ -3137,6 +3248,7 @@ void JavaClasses::compute_offsets() {
java_lang_ThreadGroup::compute_offsets();
if (EnableInvokeDynamic) {
java_lang_invoke_MethodHandle::compute_offsets();
+ java_lang_invoke_DirectMethodHandle::compute_offsets();
java_lang_invoke_MemberName::compute_offsets();
java_lang_invoke_LambdaForm::compute_offsets();
java_lang_invoke_MethodType::compute_offsets();
@@ -3313,6 +3425,7 @@ void JavaClasses::check_offsets() {
CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, in, "Ljava/io/InputStream;");
CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, out, "Ljava/io/PrintStream;");
CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, err, "Ljava/io/PrintStream;");
+ CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, security, "Ljava/lang/SecurityManager;");
// java.lang.StackTraceElement
diff --git a/src/share/vm/classfile/javaClasses.hpp b/src/share/vm/classfile/javaClasses.hpp
index 8e4dd46f3..ffccf2f0e 100644
--- a/src/share/vm/classfile/javaClasses.hpp
+++ b/src/share/vm/classfile/javaClasses.hpp
@@ -182,6 +182,7 @@ class java_lang_String : AllStatic {
static unsigned int hash_string(oop java_string);
static bool equals(oop java_string, jchar* chars, int len);
+ static bool equals(oop str1, oop str2);
// Conversion between '.' and '/' formats
static Handle externalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '/', '.', THREAD); }
@@ -208,7 +209,10 @@ class java_lang_String : AllStatic {
macro(java_lang_Class, klass, intptr_signature, false) \
macro(java_lang_Class, array_klass, intptr_signature, false) \
macro(java_lang_Class, oop_size, int_signature, false) \
- macro(java_lang_Class, static_oop_field_count, int_signature, false)
+ macro(java_lang_Class, static_oop_field_count, int_signature, false) \
+ macro(java_lang_Class, protection_domain, object_signature, false) \
+ macro(java_lang_Class, init_lock, object_signature, false) \
+ macro(java_lang_Class, signers, object_signature, false)
class java_lang_Class : AllStatic {
friend class VMStructs;
@@ -222,15 +226,21 @@ class java_lang_Class : AllStatic {
static int _oop_size_offset;
static int _static_oop_field_count_offset;
+ static int _protection_domain_offset;
+ static int _init_lock_offset;
+ static int _signers_offset;
+
static bool offsets_computed;
static int classRedefinedCount_offset;
static GrowableArray<Klass*>* _fixup_mirror_list;
+ static void set_init_lock(oop java_class, oop init_lock);
+ static void set_protection_domain(oop java_class, oop protection_domain);
public:
static void compute_offsets();
// Instance creation
- static oop create_mirror(KlassHandle k, TRAPS);
+ static oop create_mirror(KlassHandle k, Handle protection_domain, TRAPS);
static void fixup_mirror(KlassHandle k, TRAPS);
static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
// Conversion
@@ -262,6 +272,12 @@ class java_lang_Class : AllStatic {
static int classRedefinedCount(oop the_class_mirror);
static void set_classRedefinedCount(oop the_class_mirror, int value);
+ // Support for embedded per-class oops
+ static oop protection_domain(oop java_class);
+ static oop init_lock(oop java_class);
+ static objArrayOop signers(oop java_class);
+ static void set_signers(oop java_class, objArrayOop signers);
+
static int oop_size(oop java_class);
static void set_oop_size(oop java_class, int size);
static int static_oop_field_count(oop java_class);
@@ -961,6 +977,32 @@ class java_lang_invoke_MethodHandle: AllStatic {
static int form_offset_in_bytes() { return _form_offset; }
};
+// Interface to java.lang.invoke.DirectMethodHandle objects
+
+class java_lang_invoke_DirectMethodHandle: AllStatic {
+ friend class JavaClasses;
+
+ private:
+ static int _member_offset; // the MemberName of this DMH
+
+ static void compute_offsets();
+
+ public:
+ // Accessors
+ static oop member(oop mh);
+
+ // Testers
+ static bool is_subclass(Klass* klass) {
+ return klass->is_subclass_of(SystemDictionary::DirectMethodHandle_klass());
+ }
+ static bool is_instance(oop obj) {
+ return obj != NULL && is_subclass(obj->klass());
+ }
+
+ // Accessors for code generation:
+ static int member_offset_in_bytes() { return _member_offset; }
+};
+
// Interface to java.lang.invoke.LambdaForm objects
// (These are a private interface for managing adapter code generation.)
@@ -1152,11 +1194,14 @@ class java_security_AccessControlContext: AllStatic {
static int _context_offset;
static int _privilegedContext_offset;
static int _isPrivileged_offset;
+ static int _isAuthorized_offset;
static void compute_offsets();
public:
static oop create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS);
+ static bool is_authorized(Handle context);
+
// Debugging/initialization
friend class JavaClasses;
};
@@ -1216,18 +1261,22 @@ class java_lang_System : AllStatic {
enum {
hc_static_in_offset = 0,
hc_static_out_offset = 1,
- hc_static_err_offset = 2
+ hc_static_err_offset = 2,
+ hc_static_security_offset = 3
};
static int static_in_offset;
static int static_out_offset;
static int static_err_offset;
+ static int static_security_offset;
public:
static int in_offset_in_bytes();
static int out_offset_in_bytes();
static int err_offset_in_bytes();
+ static bool has_security_manager();
+
// Debugging
friend class JavaClasses;
};
diff --git a/src/share/vm/classfile/symbolTable.cpp b/src/share/vm/classfile/symbolTable.cpp
index b36432a3c..62988c7bf 100644
--- a/src/share/vm/classfile/symbolTable.cpp
+++ b/src/share/vm/classfile/symbolTable.cpp
@@ -35,7 +35,6 @@
#include "oops/oop.inline2.hpp"
#include "runtime/mutexLocker.hpp"
#include "utilities/hashtable.inline.hpp"
-#include "utilities/numberSeq.hpp"
// --------------------------------------------------------------------------
@@ -342,7 +341,7 @@ Symbol* SymbolTable::new_permanent_symbol(const char* name, TRAPS) {
Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len,
unsigned int hashValue_arg, bool c_heap, TRAPS) {
- assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
+ assert(!Universe::heap()->is_in_reserved(name),
"proposed name of symbol must be stable");
// Don't allow symbols to be created which cannot fit in a Symbol*.
@@ -451,21 +450,7 @@ void SymbolTable::verify() {
}
void SymbolTable::dump(outputStream* st) {
- NumberSeq summary;
- for (int i = 0; i < the_table()->table_size(); ++i) {
- int count = 0;
- for (HashtableEntry<Symbol*, mtSymbol>* e = the_table()->bucket(i);
- e != NULL; e = e->next()) {
- count++;
- }
- summary.add((double)count);
- }
- st->print_cr("SymbolTable statistics:");
- st->print_cr("Number of buckets : %7d", summary.num());
- st->print_cr("Average bucket size : %7.0f", summary.avg());
- st->print_cr("Variance of bucket size : %7.0f", summary.variance());
- st->print_cr("Std. dev. of bucket size: %7.0f", summary.sd());
- st->print_cr("Maximum bucket size : %7.0f", summary.maximum());
+ the_table()->dump_table(st, "SymbolTable");
}
@@ -613,6 +598,8 @@ StringTable* StringTable::_the_table = NULL;
bool StringTable::_needs_rehashing = false;
+volatile int StringTable::_parallel_claimed_idx = 0;
+
// Pick hashing algorithm
unsigned int StringTable::hash_string(const jchar* s, int len) {
return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) :
@@ -698,7 +685,7 @@ oop StringTable::intern(Handle string_or_null, jchar* name,
if (found_string != NULL) return found_string;
debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
- assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
+ assert(!Universe::heap()->is_in_reserved(name),
"proposed name of symbol must be stable");
Handle string;
@@ -752,7 +739,7 @@ oop StringTable::intern(const char* utf8_string, TRAPS) {
return result;
}
-void StringTable::unlink(BoolObjectClosure* is_alive) {
+void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
// Readers of the table are unlocked, so we should only be removing
// entries at a safepoint.
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
@@ -760,45 +747,68 @@ void StringTable::unlink(BoolObjectClosure* is_alive) {
HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
while (entry != NULL) {
- // Shared entries are normally at the end of the bucket and if we run into
- // a shared entry, then there is nothing more to remove. However, if we
- // have rehashed the table, then the shared entries are no longer at the
- // end of the bucket.
- if (entry->is_shared() && !use_alternate_hashcode()) {
- break;
- }
- assert(entry->literal() != NULL, "just checking");
- if (entry->is_shared() || is_alive->do_object_b(entry->literal())) {
+ assert(!entry->is_shared(), "CDS not used for the StringTable");
+
+ if (is_alive->do_object_b(entry->literal())) {
+ if (f != NULL) {
+ f->do_oop((oop*)entry->literal_addr());
+ }
p = entry->next_addr();
} else {
*p = entry->next();
the_table()->free_entry(entry);
}
- entry = (HashtableEntry<oop, mtSymbol>*)HashtableEntry<oop, mtSymbol>::make_ptr(*p);
+ entry = *p;
}
}
}
-void StringTable::oops_do(OopClosure* f) {
- for (int i = 0; i < the_table()->table_size(); ++i) {
- HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
+void StringTable::buckets_do(OopClosure* f, int start_idx, int end_idx) {
+ const int limit = the_table()->table_size();
+
+ assert(0 <= start_idx && start_idx <= limit,
+ err_msg("start_idx (" INT32_FORMAT ") oob?", start_idx));
+ assert(0 <= end_idx && end_idx <= limit,
+ err_msg("end_idx (" INT32_FORMAT ") oob?", end_idx));
+ assert(start_idx <= end_idx,
+ err_msg("Ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
+ start_idx, end_idx));
+
+ for (int i = start_idx; i < end_idx; i += 1) {
HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
while (entry != NULL) {
+ assert(!entry->is_shared(), "CDS not used for the StringTable");
+
f->do_oop((oop*)entry->literal_addr());
- // Did the closure remove the literal from the table?
- if (entry->literal() == NULL) {
- assert(!entry->is_shared(), "immutable hashtable entry?");
- *p = entry->next();
- the_table()->free_entry(entry);
- } else {
- p = entry->next_addr();
- }
- entry = (HashtableEntry<oop, mtSymbol>*)HashtableEntry<oop, mtSymbol>::make_ptr(*p);
+ entry = entry->next();
}
}
}
+void StringTable::oops_do(OopClosure* f) {
+ buckets_do(f, 0, the_table()->table_size());
+}
+
+void StringTable::possibly_parallel_oops_do(OopClosure* f) {
+ const int ClaimChunkSize = 32;
+ const int limit = the_table()->table_size();
+
+ for (;;) {
+ // Grab next set of buckets to scan
+ int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
+ if (start_idx >= limit) {
+ // End of table
+ break;
+ }
+
+ int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
+ buckets_do(f, start_idx, end_idx);
+ }
+}
+
+// This verification is part of Universe::verify() and needs to be quick.
+// See StringTable::verify_and_compare() below for exhaustive verification.
void StringTable::verify() {
for (int i = 0; i < the_table()->table_size(); ++i) {
HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i);
@@ -814,23 +824,165 @@ void StringTable::verify() {
}
void StringTable::dump(outputStream* st) {
- NumberSeq summary;
- for (int i = 0; i < the_table()->table_size(); ++i) {
- HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i);
- int count = 0;
- for ( ; p != NULL; p = p->next()) {
- count++;
- }
- summary.add((double)count);
+ the_table()->dump_table(st, "StringTable");
+}
+
+StringTable::VerifyRetTypes StringTable::compare_entries(
+ int bkt1, int e_cnt1,
+ HashtableEntry<oop, mtSymbol>* e_ptr1,
+ int bkt2, int e_cnt2,
+ HashtableEntry<oop, mtSymbol>* e_ptr2) {
+ // These entries are sanity checked by verify_and_compare_entries()
+ // before this function is called.
+ oop str1 = e_ptr1->literal();
+ oop str2 = e_ptr2->literal();
+
+ if (str1 == str2) {
+ tty->print_cr("ERROR: identical oop values (0x" PTR_FORMAT ") "
+ "in entry @ bucket[%d][%d] and entry @ bucket[%d][%d]",
+ str1, bkt1, e_cnt1, bkt2, e_cnt2);
+ return _verify_fail_continue;
+ }
+
+ if (java_lang_String::equals(str1, str2)) {
+ tty->print_cr("ERROR: identical String values in entry @ "
+ "bucket[%d][%d] and entry @ bucket[%d][%d]",
+ bkt1, e_cnt1, bkt2, e_cnt2);
+ return _verify_fail_continue;
}
- st->print_cr("StringTable statistics:");
- st->print_cr("Number of buckets : %7d", summary.num());
- st->print_cr("Average bucket size : %7.0f", summary.avg());
- st->print_cr("Variance of bucket size : %7.0f", summary.variance());
- st->print_cr("Std. dev. of bucket size: %7.0f", summary.sd());
- st->print_cr("Maximum bucket size : %7.0f", summary.maximum());
+
+ return _verify_pass;
}
+StringTable::VerifyRetTypes StringTable::verify_entry(int bkt, int e_cnt,
+ HashtableEntry<oop, mtSymbol>* e_ptr,
+ StringTable::VerifyMesgModes mesg_mode) {
+
+ VerifyRetTypes ret = _verify_pass; // be optimistic
+
+ oop str = e_ptr->literal();
+ if (str == NULL) {
+ if (mesg_mode == _verify_with_mesgs) {
+ tty->print_cr("ERROR: NULL oop value in entry @ bucket[%d][%d]", bkt,
+ e_cnt);
+ }
+ // NULL oop means no more verifications are possible
+ return _verify_fail_done;
+ }
+
+ if (str->klass() != SystemDictionary::String_klass()) {
+ if (mesg_mode == _verify_with_mesgs) {
+ tty->print_cr("ERROR: oop is not a String in entry @ bucket[%d][%d]",
+ bkt, e_cnt);
+ }
+ // not a String means no more verifications are possible
+ return _verify_fail_done;
+ }
+
+ unsigned int h = java_lang_String::hash_string(str);
+ if (e_ptr->hash() != h) {
+ if (mesg_mode == _verify_with_mesgs) {
+ tty->print_cr("ERROR: broken hash value in entry @ bucket[%d][%d], "
+ "bkt_hash=%d, str_hash=%d", bkt, e_cnt, e_ptr->hash(), h);
+ }
+ ret = _verify_fail_continue;
+ }
+
+ if (the_table()->hash_to_index(h) != bkt) {
+ if (mesg_mode == _verify_with_mesgs) {
+ tty->print_cr("ERROR: wrong index value for entry @ bucket[%d][%d], "
+ "str_hash=%d, hash_to_index=%d", bkt, e_cnt, h,
+ the_table()->hash_to_index(h));
+ }
+ ret = _verify_fail_continue;
+ }
+
+ return ret;
+}
+
+// See StringTable::verify() above for the quick verification that is
+// part of Universe::verify(). This verification is exhaustive and
+// reports on every issue that is found. StringTable::verify() only
+// reports on the first issue that is found.
+//
+// StringTable::verify_entry() checks:
+// - oop value != NULL (same as verify())
+// - oop value is a String
+// - hash(String) == hash in entry (same as verify())
+// - index for hash == index of entry (same as verify())
+//
+// StringTable::compare_entries() checks:
+// - oops are unique across all entries
+// - String values are unique across all entries
+//
+int StringTable::verify_and_compare_entries() {
+ assert(StringTable_lock->is_locked(), "sanity check");
+
+ int fail_cnt = 0;
+
+ // first, verify all the entries individually:
+ for (int bkt = 0; bkt < the_table()->table_size(); bkt++) {
+ HashtableEntry<oop, mtSymbol>* e_ptr = the_table()->bucket(bkt);
+ for (int e_cnt = 0; e_ptr != NULL; e_ptr = e_ptr->next(), e_cnt++) {
+ VerifyRetTypes ret = verify_entry(bkt, e_cnt, e_ptr, _verify_with_mesgs);
+ if (ret != _verify_pass) {
+ fail_cnt++;
+ }
+ }
+ }
+
+ // Optimization: if the above check did not find any failures, then
+ // the comparison loop below does not need to call verify_entry()
+ // before calling compare_entries(). If there were failures, then we
+ // have to call verify_entry() to see if the entry can be passed to
+ // compare_entries() safely. When we call verify_entry() in the loop
+ // below, we do so quietly to void duplicate messages and we don't
+ // increment fail_cnt because the failures have already been counted.
+ bool need_entry_verify = (fail_cnt != 0);
+
+ // second, verify all entries relative to each other:
+ for (int bkt1 = 0; bkt1 < the_table()->table_size(); bkt1++) {
+ HashtableEntry<oop, mtSymbol>* e_ptr1 = the_table()->bucket(bkt1);
+ for (int e_cnt1 = 0; e_ptr1 != NULL; e_ptr1 = e_ptr1->next(), e_cnt1++) {
+ if (need_entry_verify) {
+ VerifyRetTypes ret = verify_entry(bkt1, e_cnt1, e_ptr1,
+ _verify_quietly);
+ if (ret == _verify_fail_done) {
+ // cannot use the current entry to compare against other entries
+ continue;
+ }
+ }
+
+ for (int bkt2 = bkt1; bkt2 < the_table()->table_size(); bkt2++) {
+ HashtableEntry<oop, mtSymbol>* e_ptr2 = the_table()->bucket(bkt2);
+ int e_cnt2;
+ for (e_cnt2 = 0; e_ptr2 != NULL; e_ptr2 = e_ptr2->next(), e_cnt2++) {
+ if (bkt1 == bkt2 && e_cnt2 <= e_cnt1) {
+ // skip the entries up to and including the one that
+ // we're comparing against
+ continue;
+ }
+
+ if (need_entry_verify) {
+ VerifyRetTypes ret = verify_entry(bkt2, e_cnt2, e_ptr2,
+ _verify_quietly);
+ if (ret == _verify_fail_done) {
+ // cannot compare against this entry
+ continue;
+ }
+ }
+
+ // compare two entries, report and count any failures:
+ if (compare_entries(bkt1, e_cnt1, e_ptr1, bkt2, e_cnt2, e_ptr2)
+ != _verify_pass) {
+ fail_cnt++;
+ }
+ }
+ }
+ }
+ }
+ return fail_cnt;
+}
// Create a new table and using alternate hash code, populate the new table
// with the existing strings. Set flag to use the alternate hash code afterwards.
diff --git a/src/share/vm/classfile/symbolTable.hpp b/src/share/vm/classfile/symbolTable.hpp
index a2896382f..dc7d0337a 100644
--- a/src/share/vm/classfile/symbolTable.hpp
+++ b/src/share/vm/classfile/symbolTable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -246,12 +246,19 @@ private:
// Set if one bucket is out of balance due to hash algorithm deficiency
static bool _needs_rehashing;
+ // Claimed high water mark for parallel chunked scanning
+ static volatile int _parallel_claimed_idx;
+
static oop intern(Handle string_or_null, jchar* chars, int length, TRAPS);
oop basic_add(int index, Handle string_or_null, jchar* name, int len,
unsigned int hashValue, TRAPS);
oop lookup(int index, jchar* chars, int length, unsigned int hashValue);
+ // Apply the give oop closure to the entries to the buckets
+ // in the range [start_idx, end_idx).
+ static void buckets_do(OopClosure* f, int start_idx, int end_idx);
+
StringTable() : Hashtable<oop, mtSymbol>((int)StringTableSize,
sizeof (HashtableEntry<oop, mtSymbol>)) {}
@@ -272,11 +279,17 @@ public:
// GC support
// Delete pointers to otherwise-unreachable objects.
- static void unlink(BoolObjectClosure* cl);
+ static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f);
+ static void unlink(BoolObjectClosure* cl) {
+ unlink_or_oops_do(cl, NULL);
+ }
- // Invoke "f->do_oop" on the locations of all oops in the table.
+ // Serially invoke "f->do_oop" on the locations of all oops in the table.
static void oops_do(OopClosure* f);
+ // Possibly parallel version of the above
+ static void possibly_parallel_oops_do(OopClosure* f);
+
// Hashing algorithm, used as the hash value used by the
// StringTable for bucket selection and comparison (stored in the
// HashtableEntry structures). This is used in the String.intern() method.
@@ -298,6 +311,26 @@ public:
static void verify();
static void dump(outputStream* st);
+ enum VerifyMesgModes {
+ _verify_quietly = 0,
+ _verify_with_mesgs = 1
+ };
+
+ enum VerifyRetTypes {
+ _verify_pass = 0,
+ _verify_fail_continue = 1,
+ _verify_fail_done = 2
+ };
+
+ static VerifyRetTypes compare_entries(int bkt1, int e_cnt1,
+ HashtableEntry<oop, mtSymbol>* e_ptr1,
+ int bkt2, int e_cnt2,
+ HashtableEntry<oop, mtSymbol>* e_ptr2);
+ static VerifyRetTypes verify_entry(int bkt, int e_cnt,
+ HashtableEntry<oop, mtSymbol>* e_ptr,
+ VerifyMesgModes mesg_mode);
+ static int verify_and_compare_entries();
+
// Sharing
static void copy_buckets(char** top, char*end) {
the_table()->Hashtable<oop, mtSymbol>::copy_buckets(top, end);
@@ -312,5 +345,8 @@ public:
// Rehash the symbol table if it gets out of balance
static void rehash_table();
static bool needs_rehashing() { return _needs_rehashing; }
+
+ // Parallel chunked scanning
+ static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; }
};
#endif // SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp
index 3aa1b77c9..c0d50ca9a 100644
--- a/src/share/vm/classfile/systemDictionary.cpp
+++ b/src/share/vm/classfile/systemDictionary.cpp
@@ -56,6 +56,11 @@
#include "services/classLoadingService.hpp"
#include "services/threadService.hpp"
+#if INCLUDE_TRACE
+ #include "trace/tracing.hpp"
+ #include "trace/traceMacros.hpp"
+#endif
+
Dictionary* SystemDictionary::_dictionary = NULL;
PlaceholderTable* SystemDictionary::_placeholders = NULL;
@@ -586,10 +591,15 @@ instanceKlassHandle SystemDictionary::handle_parallel_super_load(
}
-Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle class_loader, Handle protection_domain, TRAPS) {
+Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
+ Handle class_loader,
+ Handle protection_domain,
+ TRAPS) {
assert(name != NULL && !FieldType::is_array(name) &&
!FieldType::is_obj(name), "invalid class name");
+ TracingTime class_load_start_time = Tracing::time();
+
// UseNewReflection
// Fix for 4474172; see evaluation for more details
class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
@@ -804,8 +814,9 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
// during compilations.
MutexLocker mu(Compile_lock, THREAD);
update_dictionary(d_index, d_hash, p_index, p_hash,
- k, class_loader, THREAD);
+ k, class_loader, THREAD);
}
+
if (JvmtiExport::should_post_class_load()) {
Thread *thread = THREAD;
assert(thread->is_Java_thread(), "thread->is_Java_thread()");
@@ -861,8 +872,8 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
// This brackets the SystemDictionary updates for both defining
// and initiating loaders
MutexLocker mu(SystemDictionary_lock, THREAD);
- placeholders()->find_and_remove(p_index, p_hash, name, loader_data, PlaceholderTable::LOAD_INSTANCE, THREAD);
- SystemDictionary_lock->notify_all();
+ placeholders()->find_and_remove(p_index, p_hash, name, loader_data, PlaceholderTable::LOAD_INSTANCE, THREAD);
+ SystemDictionary_lock->notify_all();
}
}
@@ -870,6 +881,8 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
return NULL;
}
+ post_class_load_event(class_load_start_time, k, class_loader);
+
#ifdef ASSERT
{
ClassLoaderData* loader_data = k->class_loader_data();
@@ -993,6 +1006,8 @@ Klass* SystemDictionary::parse_stream(Symbol* class_name,
TRAPS) {
TempNewSymbol parsed_name = NULL;
+ TracingTime class_load_start_time = Tracing::time();
+
ClassLoaderData* loader_data;
if (host_klass.not_null()) {
// Create a new CLD for anonymous class, that uses the same class loader
@@ -1048,6 +1063,8 @@ Klass* SystemDictionary::parse_stream(Symbol* class_name,
assert(THREAD->is_Java_thread(), "thread->is_Java_thread()");
JvmtiExport::post_class_load((JavaThread *) THREAD, k());
}
+
+ post_class_load_event(class_load_start_time, k, class_loader);
}
assert(host_klass.not_null() || cp_patches == NULL,
"cp_patches only found with host_klass");
@@ -1435,6 +1452,7 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) {
JvmtiExport::post_class_load((JavaThread *) THREAD, k());
}
+
}
// Support parallel classloading
@@ -1678,6 +1696,7 @@ int SystemDictionary::calculate_systemdictionary_size(int classcount) {
}
return newsize;
}
+
// Assumes classes in the SystemDictionary are only unloaded at a safepoint
// Note: anonymous classes are not in the SD.
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
@@ -1747,13 +1766,6 @@ void SystemDictionary::classes_do(void f(Klass*, ClassLoaderData*)) {
dictionary()->classes_do(f);
}
-// All classes, and their class loaders
-// (added for helpers that use HandleMarks and ResourceMarks)
-// Don't iterate over placeholders
-void SystemDictionary::classes_do(void f(Klass*, ClassLoaderData*, TRAPS), TRAPS) {
- dictionary()->classes_do(f, CHECK);
-}
-
void SystemDictionary::placeholders_do(void f(Symbol*)) {
placeholders()->entries_do(f);
}
@@ -2031,12 +2043,6 @@ void SystemDictionary::update_dictionary(int d_index, unsigned int d_hash,
}
}
- // Assign a classid if one has not already been assigned. The
- // counter does not need to be atomically incremented since this
- // is only done while holding the SystemDictionary_lock.
- // All loaded classes get a unique ID.
- TRACE_INIT_ID(k);
-
// Make a new system dictionary entry.
Klass* sd_check = find_class(d_index, d_hash, name, loader_data);
if (sd_check == NULL) {
@@ -2619,6 +2625,27 @@ void SystemDictionary::verify_obj_klass_present(Symbol* class_name,
"Loaded klasses should be in SystemDictionary");
}
+// utility function for class load event
+void SystemDictionary::post_class_load_event(TracingTime start_time,
+ instanceKlassHandle k,
+ Handle initiating_loader) {
+#if INCLUDE_TRACE
+ EventClassLoad event(UNTIMED);
+ if (event.should_commit()) {
+ event.set_endtime(Tracing::time());
+ event.set_starttime(start_time);
+ event.set_loadedClass(k());
+ oop defining_class_loader = k->class_loader();
+ event.set_definingClassLoader(defining_class_loader != NULL ?
+ defining_class_loader->klass() : (Klass*)NULL);
+ oop class_loader = initiating_loader.is_null() ? (oop)NULL : initiating_loader();
+ event.set_initiatingClassLoader(class_loader != NULL ?
+ class_loader->klass() : (Klass*)NULL);
+ event.commit();
+ }
+#endif /* INCLUDE_TRACE */
+}
+
#ifndef PRODUCT
// statistics code
diff --git a/src/share/vm/classfile/systemDictionary.hpp b/src/share/vm/classfile/systemDictionary.hpp
index f1ac0b4e6..85735a732 100644
--- a/src/share/vm/classfile/systemDictionary.hpp
+++ b/src/share/vm/classfile/systemDictionary.hpp
@@ -31,9 +31,11 @@
#include "oops/symbol.hpp"
#include "runtime/java.hpp"
#include "runtime/reflectionUtils.hpp"
+#include "trace/traceTime.hpp"
#include "utilities/hashtable.hpp"
#include "utilities/hashtable.inline.hpp"
+
// The system dictionary stores all loaded classes and maps:
//
// [class name,class loader] -> class i.e. [Symbol*,oop] -> Klass*
@@ -149,6 +151,7 @@ class SymbolPropertyTable;
do_klass(reflect_CallerSensitive_klass, sun_reflect_CallerSensitive, Opt ) \
\
/* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
+ do_klass(DirectMethodHandle_klass, java_lang_invoke_DirectMethodHandle, Opt ) \
do_klass(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292 ) \
do_klass(MemberName_klass, java_lang_invoke_MemberName, Pre_JSR292 ) \
do_klass(MethodHandleNatives_klass, java_lang_invoke_MethodHandleNatives, Pre_JSR292 ) \
@@ -313,10 +316,7 @@ public:
static void classes_do(void f(Klass*, TRAPS), TRAPS);
// All classes, and their class loaders
static void classes_do(void f(Klass*, ClassLoaderData*));
- // All classes, and their class loaders
- // (added for helpers that use HandleMarks and ResourceMarks)
- static void classes_do(void f(Klass*, ClassLoaderData*, TRAPS), TRAPS);
- // All entries in the placeholder table and their class loaders
+
static void placeholders_do(void f(Symbol*));
// Iterate over all methods in all klasses in dictionary
@@ -639,6 +639,9 @@ private:
// Setup link to hierarchy
static void add_to_hierarchy(instanceKlassHandle k, TRAPS);
+ // event based tracing
+ static void post_class_load_event(TracingTime start_time, instanceKlassHandle k,
+ Handle initiating_loader);
// We pass in the hashtable index so we can calculate it outside of
// the SystemDictionary_lock.
diff --git a/src/share/vm/classfile/verifier.cpp b/src/share/vm/classfile/verifier.cpp
index f2ab496fc..1b8a2701b 100644
--- a/src/share/vm/classfile/verifier.cpp
+++ b/src/share/vm/classfile/verifier.cpp
@@ -191,6 +191,10 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul
bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool should_verify_class) {
Symbol* name = klass->name();
Klass* refl_magic_klass = SystemDictionary::reflect_MagicAccessorImpl_klass();
+ Klass* lambda_magic_klass = SystemDictionary::lambda_MagicLambdaImpl_klass();
+
+ bool is_reflect = refl_magic_klass != NULL && klass->is_subtype_of(refl_magic_klass);
+ bool is_lambda = lambda_magic_klass != NULL && klass->is_subtype_of(lambda_magic_klass);
return (should_verify_for(klass->class_loader(), should_verify_class) &&
// return if the class is a bootstrapping class
@@ -213,9 +217,9 @@ bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool shou
// sun/reflect/SerializationConstructorAccessor.
// NOTE: this is called too early in the bootstrapping process to be
// guarded by Universe::is_gte_jdk14x_version()/UseNewReflection.
- (refl_magic_klass == NULL ||
- !klass->is_subtype_of(refl_magic_klass) ||
- VerifyReflectionBytecodes)
+ // Also for lambda generated code, gte jdk8
+ (!is_reflect || VerifyReflectionBytecodes) &&
+ (!is_lambda || VerifyLambdaBytecodes)
);
}
@@ -365,7 +369,7 @@ void TypeOrigin::print_on(outputStream* str) const {
}
#endif
-void ErrorContext::details(outputStream* ss, Method* method) const {
+void ErrorContext::details(outputStream* ss, const Method* method) const {
if (is_valid()) {
ss->print_cr("");
ss->print_cr("Exception Details:");
@@ -438,7 +442,7 @@ void ErrorContext::reason_details(outputStream* ss) const {
ss->print_cr("");
}
-void ErrorContext::location_details(outputStream* ss, Method* method) const {
+void ErrorContext::location_details(outputStream* ss, const Method* method) const {
if (_bci != -1 && method != NULL) {
streamIndentor si(ss);
const char* bytecode_name = "<invalid>";
@@ -473,7 +477,7 @@ void ErrorContext::frame_details(outputStream* ss) const {
}
}
-void ErrorContext::bytecode_details(outputStream* ss, Method* method) const {
+void ErrorContext::bytecode_details(outputStream* ss, const Method* method) const {
if (method != NULL) {
streamIndentor si(ss);
ss->indent().print_cr("Bytecode:");
@@ -482,7 +486,7 @@ void ErrorContext::bytecode_details(outputStream* ss, Method* method) const {
}
}
-void ErrorContext::handler_details(outputStream* ss, Method* method) const {
+void ErrorContext::handler_details(outputStream* ss, const Method* method) const {
if (method != NULL) {
streamIndentor si(ss);
ExceptionTable table(method);
@@ -497,7 +501,7 @@ void ErrorContext::handler_details(outputStream* ss, Method* method) const {
}
}
-void ErrorContext::stackmap_details(outputStream* ss, Method* method) const {
+void ErrorContext::stackmap_details(outputStream* ss, const Method* method) const {
if (method != NULL && method->has_stackmap_table()) {
streamIndentor si(ss);
ss->indent().print_cr("Stackmap Table:");
@@ -2321,9 +2325,6 @@ void ClassVerifier::verify_invoke_instructions(
types = 1 << JVM_CONSTANT_InvokeDynamic;
break;
case Bytecodes::_invokespecial:
- types = (1 << JVM_CONSTANT_InterfaceMethodref) |
- (1 << JVM_CONSTANT_Methodref);
- break;
case Bytecodes::_invokestatic:
types = (_klass->major_version() < STATIC_METHOD_IN_INTERFACE_MAJOR_VERSION) ?
(1 << JVM_CONSTANT_Methodref) :
diff --git a/src/share/vm/classfile/verifier.hpp b/src/share/vm/classfile/verifier.hpp
index bdab7a7e2..9da0ac34a 100644
--- a/src/share/vm/classfile/verifier.hpp
+++ b/src/share/vm/classfile/verifier.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,8 +36,10 @@
class Verifier : AllStatic {
public:
enum {
+ STRICTER_ACCESS_CTRL_CHECK_VERSION = 49,
STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50,
- INVOKEDYNAMIC_MAJOR_VERSION = 51
+ INVOKEDYNAMIC_MAJOR_VERSION = 51,
+ NO_RELAX_ACCESS_CTRL_CHECK_VERSION = 52
};
typedef enum { ThrowException, NoException } Mode;
@@ -84,9 +86,9 @@ class StackMapTable;
// These macros are used similarly to CHECK macros but also check
// the status of the verifier and return if that has an error.
#define CHECK_VERIFY(verifier) \
- CHECK); if ((verifier)->has_error()) return; (0
+ CHECK); if ((verifier)->has_error()) return; ((void)0
#define CHECK_VERIFY_(verifier, result) \
- CHECK_(result)); if ((verifier)->has_error()) return (result); (0
+ CHECK_(result)); if ((verifier)->has_error()) return (result); ((void)0
class TypeOrigin VALUE_OBJ_CLASS_SPEC {
private:
@@ -224,7 +226,7 @@ class ErrorContext VALUE_OBJ_CLASS_SPEC {
_expected.reset_frame();
}
- void details(outputStream* ss, Method* method) const;
+ void details(outputStream* ss, const Method* method) const;
#ifdef ASSERT
void print_on(outputStream* str) const {
@@ -237,12 +239,12 @@ class ErrorContext VALUE_OBJ_CLASS_SPEC {
#endif
private:
- void location_details(outputStream* ss, Method* method) const;
+ void location_details(outputStream* ss, const Method* method) const;
void reason_details(outputStream* ss) const;
void frame_details(outputStream* ss) const;
- void bytecode_details(outputStream* ss, Method* method) const;
- void handler_details(outputStream* ss, Method* method) const;
- void stackmap_details(outputStream* ss, Method* method) const;
+ void bytecode_details(outputStream* ss, const Method* method) const;
+ void handler_details(outputStream* ss, const Method* method) const;
+ void stackmap_details(outputStream* ss, const Method* method) const;
};
// A new instance of this class is created for each class being verified
diff --git a/src/share/vm/classfile/vmSymbols.cpp b/src/share/vm/classfile/vmSymbols.cpp
index 92939ea24..cc38c6d2c 100644
--- a/src/share/vm/classfile/vmSymbols.cpp
+++ b/src/share/vm/classfile/vmSymbols.cpp
@@ -49,7 +49,7 @@ extern "C" {
}
}
-#ifndef PRODUCT
+#ifdef ASSERT
#define VM_SYMBOL_ENUM_NAME_BODY(name, string) #name "\0"
static const char* vm_symbol_enum_names =
VM_SYMBOLS_DO(VM_SYMBOL_ENUM_NAME_BODY, VM_ALIAS_IGNORE)
@@ -64,7 +64,7 @@ static const char* vm_symbol_enum_name(vmSymbols::SID sid) {
}
return string;
}
-#endif //PRODUCT
+#endif //ASSERT
// Put all the VM symbol strings in one place.
// Makes for a more compact libjvm.
diff --git a/src/share/vm/classfile/vmSymbols.hpp b/src/share/vm/classfile/vmSymbols.hpp
index 428b2940f..f1758f866 100644
--- a/src/share/vm/classfile/vmSymbols.hpp
+++ b/src/share/vm/classfile/vmSymbols.hpp
@@ -53,8 +53,6 @@
template(java_lang_Object, "java/lang/Object") \
template(java_lang_Class, "java/lang/Class") \
template(java_lang_String, "java/lang/String") \
- template(java_lang_StringValue, "java/lang/StringValue") \
- template(java_lang_StringCache, "java/lang/StringValue$StringCache") \
template(java_lang_Thread, "java/lang/Thread") \
template(java_lang_ThreadGroup, "java/lang/ThreadGroup") \
template(java_lang_Cloneable, "java/lang/Cloneable") \
@@ -68,7 +66,7 @@
template(java_lang_Float, "java/lang/Float") \
template(java_lang_Double, "java/lang/Double") \
template(java_lang_Byte, "java/lang/Byte") \
- template(java_lang_Byte_Cache, "java/lang/Byte$ByteCache") \
+ template(java_lang_Byte_ByteCache, "java/lang/Byte$ByteCache") \
template(java_lang_Short, "java/lang/Short") \
template(java_lang_Short_ShortCache, "java/lang/Short$ShortCache") \
template(java_lang_Integer, "java/lang/Integer") \
@@ -94,6 +92,7 @@
template(java_lang_SecurityManager, "java/lang/SecurityManager") \
template(java_security_AccessControlContext, "java/security/AccessControlContext") \
template(java_security_ProtectionDomain, "java/security/ProtectionDomain") \
+ template(impliesCreateAccessControlContext_name, "impliesCreateAccessControlContext") \
template(java_io_OutputStream, "java/io/OutputStream") \
template(java_io_Reader, "java/io/Reader") \
template(java_io_BufferedReader, "java/io/BufferedReader") \
@@ -105,7 +104,6 @@
template(java_util_Vector, "java/util/Vector") \
template(java_util_AbstractList, "java/util/AbstractList") \
template(java_util_Hashtable, "java/util/Hashtable") \
- template(java_util_HashMap, "java/util/HashMap") \
template(java_lang_Compiler, "java/lang/Compiler") \
template(sun_misc_Signal, "sun/misc/Signal") \
template(java_lang_AssertionStatusDirectives, "java/lang/AssertionStatusDirectives") \
@@ -257,6 +255,7 @@
/* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */ \
template(java_lang_invoke_CallSite, "java/lang/invoke/CallSite") \
template(java_lang_invoke_ConstantCallSite, "java/lang/invoke/ConstantCallSite") \
+ template(java_lang_invoke_DirectMethodHandle, "java/lang/invoke/DirectMethodHandle") \
template(java_lang_invoke_MutableCallSite, "java/lang/invoke/MutableCallSite") \
template(java_lang_invoke_VolatileCallSite, "java/lang/invoke/VolatileCallSite") \
template(java_lang_invoke_MethodHandle, "java/lang/invoke/MethodHandle") \
@@ -271,6 +270,7 @@
template(java_lang_invoke_LambdaForm, "java/lang/invoke/LambdaForm") \
template(java_lang_invoke_ForceInline_signature, "Ljava/lang/invoke/ForceInline;") \
template(java_lang_invoke_DontInline_signature, "Ljava/lang/invoke/DontInline;") \
+ template(sun_invoke_Stable_signature, "Lsun/invoke/Stable;") \
template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \
template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;") \
template(java_lang_invoke_MagicLambdaImpl, "java/lang/invoke/MagicLambdaImpl") \
@@ -346,6 +346,7 @@
template(contextClassLoader_name, "contextClassLoader") \
template(inheritedAccessControlContext_name, "inheritedAccessControlContext") \
template(isPrivileged_name, "isPrivileged") \
+ template(isAuthorized_name, "isAuthorized") \
template(getClassContext_name, "getClassContext") \
template(wait_name, "wait") \
template(checkPackageAccess_name, "checkPackageAccess") \
@@ -353,6 +354,7 @@
template(thread_id_name, "tid") \
template(newInstance0_name, "newInstance0") \
template(limit_name, "limit") \
+ template(member_name, "member") \
template(forName_name, "forName") \
template(forName0_name, "forName0") \
template(isJavaIdentifierStart_name, "isJavaIdentifierStart") \
@@ -365,8 +367,6 @@
template(offset_name, "offset") \
template(count_name, "count") \
template(hash_name, "hash") \
- template(frontCacheEnabled_name, "frontCacheEnabled") \
- template(stringCacheEnabled_name, "stringCacheEnabled") \
template(numberOfLeadingZeros_name, "numberOfLeadingZeros") \
template(numberOfTrailingZeros_name, "numberOfTrailingZeros") \
template(bitCount_name, "bitCount") \
@@ -392,6 +392,9 @@
template(array_klass_name, "array_klass") \
template(oop_size_name, "oop_size") \
template(static_oop_field_count_name, "static_oop_field_count") \
+ template(protection_domain_name, "protection_domain") \
+ template(init_lock_name, "init_lock") \
+ template(signers_name, "signers_name") \
template(loader_data_name, "loader_data") \
template(dependencies_name, "dependencies") \
\
@@ -766,6 +769,17 @@
do_name( decrypt_name, "decrypt") \
do_signature(byteArray_int_int_byteArray_int_signature, "([BII[BI)V") \
\
+ /* support for java.util.zip */ \
+ do_class(java_util_zip_CRC32, "java/util/zip/CRC32") \
+ do_intrinsic(_updateCRC32, java_util_zip_CRC32, update_name, int2_int_signature, F_SN) \
+ do_name( update_name, "update") \
+ do_intrinsic(_updateBytesCRC32, java_util_zip_CRC32, updateBytes_name, updateBytes_signature, F_SN) \
+ do_name( updateBytes_name, "updateBytes") \
+ do_signature(updateBytes_signature, "(I[BII)I") \
+ do_intrinsic(_updateByteBufferCRC32, java_util_zip_CRC32, updateByteBuffer_name, updateByteBuffer_signature, F_SN) \
+ do_name( updateByteBuffer_name, "updateByteBuffer") \
+ do_signature(updateByteBuffer_signature, "(IJII)I") \
+ \
/* support for sun.misc.Unsafe */ \
do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \
\
diff --git a/src/share/vm/code/codeBlob.cpp b/src/share/vm/code/codeBlob.cpp
index db12102ca..95f263c13 100644
--- a/src/share/vm/code/codeBlob.cpp
+++ b/src/share/vm/code/codeBlob.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -248,7 +248,7 @@ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
}
-void* BufferBlob::operator new(size_t s, unsigned size) {
+void* BufferBlob::operator new(size_t s, unsigned size) throw() {
void* p = CodeCache::allocate(size);
return p;
}
@@ -350,14 +350,14 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
}
-void* RuntimeStub::operator new(size_t s, unsigned size) {
+void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
void* p = CodeCache::allocate(size, true);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}
// operator new shared by all singletons:
-void* SingletonBlob::operator new(size_t s, unsigned size) {
+void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
void* p = CodeCache::allocate(size, true);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
diff --git a/src/share/vm/code/codeBlob.hpp b/src/share/vm/code/codeBlob.hpp
index e59ae7f1f..6587b2d2e 100644
--- a/src/share/vm/code/codeBlob.hpp
+++ b/src/share/vm/code/codeBlob.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -209,7 +209,7 @@ class BufferBlob: public CodeBlob {
BufferBlob(const char* name, int size);
BufferBlob(const char* name, int size, CodeBuffer* cb);
- void* operator new(size_t s, unsigned size);
+ void* operator new(size_t s, unsigned size) throw();
public:
// Creation
@@ -283,7 +283,7 @@ class RuntimeStub: public CodeBlob {
bool caller_must_gc_arguments
);
- void* operator new(size_t s, unsigned size);
+ void* operator new(size_t s, unsigned size) throw();
public:
// Creation
@@ -321,7 +321,7 @@ class SingletonBlob: public CodeBlob {
friend class VMStructs;
protected:
- void* operator new(size_t s, unsigned size);
+ void* operator new(size_t s, unsigned size) throw();
public:
SingletonBlob(
diff --git a/src/share/vm/code/codeCache.cpp b/src/share/vm/code/codeCache.cpp
index f7be307b2..65c1e5f2e 100644
--- a/src/share/vm/code/codeCache.cpp
+++ b/src/share/vm/code/codeCache.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,7 @@
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
#include "services/memoryService.hpp"
+#include "trace/tracing.hpp"
#include "utilities/xmlstream.hpp"
// Helper class for printing in CodeCache
@@ -114,7 +115,6 @@ class CodeBlob_sizes {
}
};
-
// CodeCache implementation
CodeHeap * CodeCache::_heap = new CodeHeap();
@@ -126,6 +126,7 @@ bool CodeCache::_needs_cache_clean = false;
nmethod* CodeCache::_scavenge_root_nmethods = NULL;
nmethod* CodeCache::_saved_nmethods = NULL;
+int CodeCache::_codemem_full_count = 0;
CodeBlob* CodeCache::first() {
assert_locked_or_safepoint(CodeCache_lock);
@@ -622,6 +623,15 @@ address CodeCache::last_address() {
return (address)_heap->high();
}
+/**
+ * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
+ * is free, reverse_free_ratio() returns 4.
+ */
+double CodeCache::reverse_free_ratio() {
+ double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace);
+ double max_capacity = (double)CodeCache::max_capacity();
+ return max_capacity / unallocated_capacity;
+}
void icache_init();
@@ -820,6 +830,22 @@ void CodeCache::verify() {
}
}
+void CodeCache::report_codemem_full() {
+ _codemem_full_count++;
+ EventCodeCacheFull event;
+ if (event.should_commit()) {
+ event.set_startAddress((u8)low_bound());
+ event.set_commitedTopAddress((u8)high());
+ event.set_reservedTopAddress((u8)high_bound());
+ event.set_entryCount(nof_blobs());
+ event.set_methodCount(nof_nmethods());
+ event.set_adaptorCount(nof_adapters());
+ event.set_unallocatedCapacity(unallocated_capacity()/K);
+ event.set_fullCount(_codemem_full_count);
+ event.commit();
+ }
+}
+
//------------------------------------------------------------------------------------------------
// Non-product version
diff --git a/src/share/vm/code/codeCache.hpp b/src/share/vm/code/codeCache.hpp
index 38799019b..3e8eda6e2 100644
--- a/src/share/vm/code/codeCache.hpp
+++ b/src/share/vm/code/codeCache.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,11 +64,15 @@ class CodeCache : AllStatic {
static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
+ static int _codemem_full_count;
+
public:
// Initialization
static void initialize();
+ static void report_codemem_full();
+
// Allocation/administration
static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
@@ -155,6 +159,7 @@ class CodeCache : AllStatic {
// The full limits of the codeCache
static address low_bound() { return (address) _heap->low_boundary(); }
static address high_bound() { return (address) _heap->high_boundary(); }
+ static address high() { return (address) _heap->high(); }
// Profiling
static address first_address(); // first address used for CodeBlobs
@@ -163,6 +168,7 @@ class CodeCache : AllStatic {
static size_t max_capacity() { return _heap->max_capacity(); }
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
+ static double reverse_free_ratio();
static bool needs_cache_clean() { return _needs_cache_clean; }
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
@@ -185,6 +191,8 @@ class CodeCache : AllStatic {
// tells how many nmethods have dependencies
static int number_of_nmethods_with_dependencies();
+
+ static int get_codemem_full_count() { return _codemem_full_count; }
};
#endif // SHARE_VM_CODE_CODECACHE_HPP
diff --git a/src/share/vm/code/compiledIC.cpp b/src/share/vm/code/compiledIC.cpp
index e9a63b866..489e649fa 100644
--- a/src/share/vm/code/compiledIC.cpp
+++ b/src/share/vm/code/compiledIC.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -160,32 +160,42 @@ address CompiledIC::stub_address() const {
// High-level access to an inline cache. Guaranteed to be MT-safe.
-void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
- methodHandle method = call_info->selected_method();
- bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index());
+bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
address entry;
- if (is_invoke_interface) {
- int index = klassItable::compute_itable_index(call_info->resolved_method()());
- entry = VtableStubs::create_stub(false, index, method());
- assert(entry != NULL, "entry not computed");
+ if (call_info->call_kind() == CallInfo::itable_call) {
+ assert(bytecode == Bytecodes::_invokeinterface, "");
+ int itable_index = call_info->itable_index();
+ entry = VtableStubs::find_itable_stub(itable_index);
+ if (entry == false) {
+ return false;
+ }
+#ifdef ASSERT
+ int index = call_info->resolved_method()->itable_index();
+ assert(index == itable_index, "CallInfo pre-computes this");
+#endif //ASSERT
InstanceKlass* k = call_info->resolved_method()->method_holder();
- assert(k->is_interface(), "sanity check");
+ assert(k->verify_itable_index(itable_index), "sanity check");
InlineCacheBuffer::create_transition_stub(this, k, entry);
} else {
- // Can be different than method->vtable_index(), due to package-private etc.
+ assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
+ // Can be different than selected_method->vtable_index(), due to package-private etc.
int vtable_index = call_info->vtable_index();
- entry = VtableStubs::create_stub(true, vtable_index, method());
- InlineCacheBuffer::create_transition_stub(this, method(), entry);
+ assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
+ entry = VtableStubs::find_vtable_stub(vtable_index);
+ if (entry == NULL) {
+ return false;
+ }
+ InlineCacheBuffer::create_transition_stub(this, NULL, entry);
}
if (TraceICs) {
ResourceMark rm;
tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
- instruction_address(), method->print_value_string(), entry);
+ instruction_address(), call_info->selected_method()->print_value_string(), entry);
}
// We can't check this anymore. With lazy deopt we could have already
@@ -195,6 +205,7 @@ void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod
// race because the IC entry was complete when we safepointed so
// cleaning it immediately is harmless.
// assert(is_megamorphic(), "sanity check");
+ return true;
}
diff --git a/src/share/vm/code/compiledIC.hpp b/src/share/vm/code/compiledIC.hpp
index 8eec502b7..a2ddb1aab 100644
--- a/src/share/vm/code/compiledIC.hpp
+++ b/src/share/vm/code/compiledIC.hpp
@@ -229,7 +229,10 @@ class CompiledIC: public ResourceObj {
//
void set_to_clean(); // Can only be called during a safepoint operation
void set_to_monomorphic(CompiledICInfo& info);
- void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
+
+ // Returns true if successful and false otherwise. The call can fail if memory
+ // allocation in the code cache fails.
+ bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
diff --git a/src/share/vm/code/debugInfo.hpp b/src/share/vm/code/debugInfo.hpp
index 3f65652d8..7a4f7e2d4 100644
--- a/src/share/vm/code/debugInfo.hpp
+++ b/src/share/vm/code/debugInfo.hpp
@@ -274,7 +274,7 @@ class DebugInfoReadStream : public CompressedReadStream {
Method* read_method() {
Method* o = (Method*)(code()->metadata_at(read_int()));
assert(o == NULL ||
- o->is_metadata(), "meta data only");
+ o->is_metaspace_object(), "meta data only");
return o;
}
ScopeValue* read_object_value();
diff --git a/src/share/vm/code/debugInfoRec.cpp b/src/share/vm/code/debugInfoRec.cpp
index eaa718875..fa0cb70fc 100644
--- a/src/share/vm/code/debugInfoRec.cpp
+++ b/src/share/vm/code/debugInfoRec.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@ class DIR_Chunk {
int _length; // number of bytes in the stream
int _hash; // hash of stream bytes (for quicker reuse)
- void* operator new(size_t ignore, DebugInformationRecorder* dir) {
+ void* operator new(size_t ignore, DebugInformationRecorder* dir) throw() {
assert(ignore == sizeof(DIR_Chunk), "");
if (dir->_next_chunk >= dir->_next_chunk_limit) {
const int CHUNK = 100;
diff --git a/src/share/vm/code/dependencies.cpp b/src/share/vm/code/dependencies.cpp
index b790c3597..df269430e 100644
--- a/src/share/vm/code/dependencies.cpp
+++ b/src/share/vm/code/dependencies.cpp
@@ -655,8 +655,8 @@ inline Metadata* Dependencies::DepStream::recorded_metadata_at(int i) {
} else {
o = _deps->oop_recorder()->metadata_at(i);
}
- assert(o == NULL || o->is_metadata(),
- err_msg("Should be perm " PTR_FORMAT, o));
+ assert(o == NULL || o->is_metaspace_object(),
+ err_msg("Should be metadata " PTR_FORMAT, o));
return o;
}
@@ -989,7 +989,7 @@ Klass* ClassHierarchyWalker::find_witness_in(KlassDepChange& changes,
assert(changes.involves_context(context_type), "irrelevant dependency");
Klass* new_type = changes.new_type();
- count_find_witness_calls();
+ (void)count_find_witness_calls();
NOT_PRODUCT(deps_find_witness_singles++);
// Current thread must be in VM (not native mode, as in CI):
diff --git a/src/share/vm/code/nmethod.cpp b/src/share/vm/code/nmethod.cpp
index cc3847e98..9b35368e2 100644
--- a/src/share/vm/code/nmethod.cpp
+++ b/src/share/vm/code/nmethod.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -97,18 +97,21 @@ HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
#endif
bool nmethod::is_compiled_by_c1() const {
- if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing
- if (is_native_method()) return false;
+ if (compiler() == NULL) {
+ return false;
+ }
return compiler()->is_c1();
}
bool nmethod::is_compiled_by_c2() const {
- if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing
- if (is_native_method()) return false;
+ if (compiler() == NULL) {
+ return false;
+ }
return compiler()->is_c2();
}
bool nmethod::is_compiled_by_shark() const {
- if (is_native_method()) return false;
- assert(compiler() != NULL, "must be");
+ if (compiler() == NULL) {
+ return false;
+ }
return compiler()->is_shark();
}
@@ -691,6 +694,7 @@ nmethod::nmethod(
code_buffer->copy_values_to(this);
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
CodeCache::add_scavenge_root_nmethod(this);
+ Universe::heap()->register_nmethod(this);
}
debug_only(verify_scavenge_root_oops());
@@ -812,7 +816,7 @@ nmethod::nmethod(
}
#endif // def HAVE_DTRACE_H
-void* nmethod::operator new(size_t size, int nmethod_size) throw () {
+void* nmethod::operator new(size_t size, int nmethod_size) throw() {
// Not critical, may return null if there is too little continuous memory
return CodeCache::allocate(nmethod_size);
}
@@ -894,6 +898,7 @@ nmethod::nmethod(
dependencies->copy_to(this);
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
CodeCache::add_scavenge_root_nmethod(this);
+ Universe::heap()->register_nmethod(this);
}
debug_only(verify_scavenge_root_oops());
@@ -1102,11 +1107,6 @@ void nmethod::fix_oop_relocations(address begin, address end, bool initialize_im
metadata_Relocation* reloc = iter.metadata_reloc();
reloc->fix_metadata_relocation();
}
-
- // There must not be any interfering patches or breakpoints.
- assert(!(iter.type() == relocInfo::breakpoint_type
- && iter.breakpoint_reloc()->active()),
- "no active breakpoint");
}
}
@@ -1326,6 +1326,13 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
methodHandle the_method(method());
No_Safepoint_Verifier nsv;
+ // during patching, depending on the nmethod state we must notify the GC that
+ // code has been unloaded, unregistering it. We cannot do this right while
+ // holding the Patching_lock because we need to use the CodeCache_lock. This
+ // would be prone to deadlocks.
+ // This flag is used to remember whether we need to later lock and unregister.
+ bool nmethod_needs_unregister = false;
+
{
// invalidate osr nmethod before acquiring the patching lock since
// they both acquire leaf locks and we don't want a deadlock.
@@ -1358,6 +1365,13 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
inc_decompile_count();
}
+ // If the state is becoming a zombie, signal to unregister the nmethod with
+ // the heap.
+ // This nmethod may have already been unloaded during a full GC.
+ if ((state == zombie) && !is_unloaded()) {
+ nmethod_needs_unregister = true;
+ }
+
// Change state
_state = state;
@@ -1393,6 +1407,9 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// safepoint can sneak in, otherwise the oops used by the
// dependency logic could have become stale.
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ if (nmethod_needs_unregister) {
+ Universe::heap()->unregister_nmethod(this);
+ }
flush_dependencies(NULL);
}
@@ -1408,6 +1425,9 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// nmethods aren't scanned for GC.
_oops_are_stale = true;
#endif
+ // the Method may be reclaimed by class unloading now that the
+ // nmethod is in zombie state
+ set_method(NULL);
} else {
assert(state == not_entrant, "other cases may need to be handled differently");
}
@@ -1815,6 +1835,19 @@ void nmethod::metadata_do(void f(Metadata*)) {
Metadata* md = r->metadata_value();
f(md);
}
+ } else if (iter.type() == relocInfo::virtual_call_type) {
+ // Check compiledIC holders associated with this nmethod
+ CompiledIC *ic = CompiledIC_at(iter.reloc());
+ if (ic->is_icholder_call()) {
+ CompiledICHolder* cichk = ic->cached_icholder();
+ f(cichk->holder_method());
+ f(cichk->holder_klass());
+ } else {
+ Metadata* ic_oop = ic->cached_metadata();
+ if (ic_oop != NULL) {
+ f(ic_oop);
+ }
+ }
}
}
}
@@ -1825,25 +1858,15 @@ void nmethod::metadata_do(void f(Metadata*)) {
Metadata* md = *p;
f(md);
}
+
// Call function Method*, not embedded in these other places.
if (_method != NULL) f(_method);
}
-
-// This method is called twice during GC -- once while
-// tracing the "active" nmethods on thread stacks during
-// the (strong) marking phase, and then again when walking
-// the code cache contents during the weak roots processing
-// phase. The two uses are distinguished by means of the
-// 'do_strong_roots_only' flag, which is true in the first
-// case. We want to walk the weak roots in the nmethod
-// only in the second case. The weak roots in the nmethod
-// are the oops in the ExceptionCache and the InlineCache
-// oops.
-void nmethod::oops_do(OopClosure* f, bool do_strong_roots_only) {
+void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
// make sure the oops ready to receive visitors
- assert(!is_zombie() && !is_unloaded(),
- "should not call follow on zombie or unloaded nmethod");
+ assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
+ assert(!is_unloaded(), "should not call follow on unloaded nmethod");
// If the method is not entrant or zombie then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
@@ -1983,11 +2006,10 @@ void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map
if (!method()->is_native()) {
SimpleScopeDesc ssd(this, fr.pc());
Bytecode_invoke call(ssd.method(), ssd.bci());
- // compiled invokedynamic call sites have an implicit receiver at
- // resolution time, so make sure it gets GC'ed.
- bool has_receiver = !call.is_invokestatic();
+ bool has_receiver = call.has_receiver();
+ bool has_appendix = call.has_appendix();
Symbol* signature = call.signature();
- fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
+ fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
}
#endif // !SHARK
}
@@ -2623,7 +2645,8 @@ void nmethod::print_relocations() {
relocation_begin()-1+ip[1]);
for (; ip < index_end; ip++)
tty->print_cr(" (%d ?)", ip[0]);
- tty->print_cr(" @" INTPTR_FORMAT ": index_size=%d", ip, *ip++);
+ tty->print_cr(" @" INTPTR_FORMAT ": index_size=%d", ip, *ip);
+ ip++;
tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
}
}
diff --git a/src/share/vm/code/nmethod.hpp b/src/share/vm/code/nmethod.hpp
index 477ceb6dc..4929ea820 100644
--- a/src/share/vm/code/nmethod.hpp
+++ b/src/share/vm/code/nmethod.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -265,7 +265,7 @@ class nmethod : public CodeBlob {
int comp_level);
// helper methods
- void* operator new(size_t size, int nmethod_size);
+ void* operator new(size_t size, int nmethod_size) throw();
const char* reloc_string_for(u_char* begin, u_char* end);
// Returns true if this thread changed the state of the nmethod or
@@ -566,7 +566,7 @@ public:
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
OopClosure* f);
void oops_do(OopClosure* f) { oops_do(f, false); }
- void oops_do(OopClosure* f, bool do_strong_roots_only);
+ void oops_do(OopClosure* f, bool allow_zombie);
bool detect_scavenge_root_oops();
void verify_scavenge_root_oops() PRODUCT_RETURN;
diff --git a/src/share/vm/code/relocInfo.cpp b/src/share/vm/code/relocInfo.cpp
index 89c73d842..68f113c04 100644
--- a/src/share/vm/code/relocInfo.cpp
+++ b/src/share/vm/code/relocInfo.cpp
@@ -341,31 +341,6 @@ void RelocIterator::set_limit(address limit) {
_limit = limit;
}
-
-void PatchingRelocIterator:: prepass() {
- // turn breakpoints off during patching
- _init_state = (*this); // save cursor
- while (next()) {
- if (type() == relocInfo::breakpoint_type) {
- breakpoint_reloc()->set_active(false);
- }
- }
- (RelocIterator&)(*this) = _init_state; // reset cursor for client
-}
-
-
-void PatchingRelocIterator:: postpass() {
- // turn breakpoints back on after patching
- (RelocIterator&)(*this) = _init_state; // reset cursor again
- while (next()) {
- if (type() == relocInfo::breakpoint_type) {
- breakpoint_Relocation* bpt = breakpoint_reloc();
- bpt->set_active(bpt->enabled());
- }
- }
-}
-
-
// All the strange bit-encodings are in here.
// The idea is to encode relocation data which are small integers
// very efficiently (a single extra halfword). Larger chunks of
@@ -707,51 +682,6 @@ void section_word_Relocation::unpack_data() {
_target = address_from_scaled_offset(offset, base);
}
-
-void breakpoint_Relocation::pack_data_to(CodeSection* dest) {
- short* p = (short*) dest->locs_end();
- address point = dest->locs_point();
-
- *p++ = _bits;
-
- assert(_target != NULL, "sanity");
-
- if (internal()) normalize_address(_target, dest);
-
- jint target_bits =
- (jint)( internal() ? scaled_offset (_target, point)
- : runtime_address_to_index(_target) );
- if (settable()) {
- // save space for set_target later
- p = add_jint(p, target_bits);
- } else {
- p = add_var_int(p, target_bits);
- }
-
- for (int i = 0; i < instrlen(); i++) {
- // put placeholder words until bytes can be saved
- p = add_short(p, (short)0x7777);
- }
-
- dest->set_locs_end((relocInfo*) p);
-}
-
-
-void breakpoint_Relocation::unpack_data() {
- _bits = live_bits();
-
- int targetlen = datalen() - 1 - instrlen();
- jint target_bits = 0;
- if (targetlen == 0) target_bits = 0;
- else if (targetlen == 1) target_bits = *(data()+1);
- else if (targetlen == 2) target_bits = relocInfo::jint_from_data(data()+1);
- else { ShouldNotReachHere(); }
-
- _target = internal() ? address_from_scaled_offset(target_bits, addr())
- : index_to_runtime_address (target_bits);
-}
-
-
//// miscellaneous methods
oop* oop_Relocation::oop_addr() {
int n = _oop_index;
@@ -936,81 +866,6 @@ address internal_word_Relocation::target() {
return target;
}
-
-breakpoint_Relocation::breakpoint_Relocation(int kind, address target, bool internal) {
- bool active = false;
- bool enabled = (kind == initialization);
- bool removable = (kind != safepoint);
- bool settable = (target == NULL);
-
- int bits = kind;
- if (enabled) bits |= enabled_state;
- if (internal) bits |= internal_attr;
- if (removable) bits |= removable_attr;
- if (settable) bits |= settable_attr;
-
- _bits = bits | high_bit;
- _target = target;
-
- assert(this->kind() == kind, "kind encoded");
- assert(this->enabled() == enabled, "enabled encoded");
- assert(this->active() == active, "active encoded");
- assert(this->internal() == internal, "internal encoded");
- assert(this->removable() == removable, "removable encoded");
- assert(this->settable() == settable, "settable encoded");
-}
-
-
-address breakpoint_Relocation::target() const {
- return _target;
-}
-
-
-void breakpoint_Relocation::set_target(address x) {
- assert(settable(), "must be settable");
- jint target_bits =
- (jint)(internal() ? scaled_offset (x, addr())
- : runtime_address_to_index(x));
- short* p = &live_bits() + 1;
- p = add_jint(p, target_bits);
- assert(p == instrs(), "new target must fit");
- _target = x;
-}
-
-
-void breakpoint_Relocation::set_enabled(bool b) {
- if (enabled() == b) return;
-
- if (b) {
- set_bits(bits() | enabled_state);
- } else {
- set_active(false); // remove the actual breakpoint insn, if any
- set_bits(bits() & ~enabled_state);
- }
-}
-
-
-void breakpoint_Relocation::set_active(bool b) {
- assert(!b || enabled(), "cannot activate a disabled breakpoint");
-
- if (active() == b) return;
-
- // %%% should probably seize a lock here (might not be the right lock)
- //MutexLockerEx ml_patch(Patching_lock, true);
- //if (active() == b) return; // recheck state after locking
-
- if (b) {
- set_bits(bits() | active_state);
- if (instrlen() == 0)
- fatal("breakpoints in original code must be undoable");
- pd_swap_in_breakpoint (addr(), instrs(), instrlen());
- } else {
- set_bits(bits() & ~active_state);
- pd_swap_out_breakpoint(addr(), instrs(), instrlen());
- }
-}
-
-
//---------------------------------------------------------------------------------
// Non-product code
diff --git a/src/share/vm/code/relocInfo.hpp b/src/share/vm/code/relocInfo.hpp
index 6dcb83e09..dab7d5167 100644
--- a/src/share/vm/code/relocInfo.hpp
+++ b/src/share/vm/code/relocInfo.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,9 +49,6 @@ class NativeMovConstReg;
// RelocIterator
// A StackObj which iterates over the relocations associated with
// a range of code addresses. Can be used to operate a copy of code.
-// PatchingRelocIterator
-// Specialized subtype of RelocIterator which removes breakpoints
-// temporarily during iteration, then restores them.
// BoundRelocation
// An _internal_ type shared by packers and unpackers of relocations.
// It pastes together a RelocationHolder with some pointers into
@@ -204,15 +201,6 @@ class NativeMovConstReg;
// immediate field must not straddle a unit of memory coherence.
// //%note reloc_3
//
-// relocInfo::breakpoint_type -- a conditional breakpoint in the code
-// Value: none
-// Instruction types: any whatsoever
-// Data: [b [T]t i...]
-// The b is a bit-packed word representing the breakpoint's attributes.
-// The t is a target address which the breakpoint calls (when it is enabled).
-// The i... is a place to store one or two instruction words overwritten
-// by a trap, so that the breakpoint may be subsequently removed.
-//
// relocInfo::static_stub_type -- an extra stub for each static_call_type
// Value: none
// Instruction types: a virtual call: { set_oop; jump; }
@@ -271,8 +259,8 @@ class relocInfo VALUE_OBJ_CLASS_SPEC {
section_word_type = 9, // internal, but a cross-section reference
poll_type = 10, // polling instruction for safepoints
poll_return_type = 11, // polling instruction for safepoints at return
- breakpoint_type = 12, // an initialization barrier or safepoint
- metadata_type = 13, // metadata that used to be oops
+ metadata_type = 12, // metadata that used to be oops
+ yet_unused_type_1 = 13, // Still unused
yet_unused_type_2 = 14, // Still unused
data_prefix_tag = 15, // tag for a prefix (carries data arguments)
type_mask = 15 // A mask which selects only the above values
@@ -312,7 +300,6 @@ class relocInfo VALUE_OBJ_CLASS_SPEC {
visitor(internal_word) \
visitor(poll) \
visitor(poll_return) \
- visitor(breakpoint) \
visitor(section_word) \
@@ -457,7 +444,7 @@ class relocInfo VALUE_OBJ_CLASS_SPEC {
public:
enum {
// Conservatively large estimate of maximum length (in shorts)
- // of any relocation record (probably breakpoints are largest).
+ // of any relocation record.
// Extended format is length prefix, data words, and tag/offset suffix.
length_limit = 1 + 1 + (3*BytesPerWord/BytesPerShort) + 1,
have_format = format_width > 0
@@ -574,8 +561,6 @@ class RelocIterator : public StackObj {
void initialize(nmethod* nm, address begin, address limit);
- friend class PatchingRelocIterator;
- // make an uninitialized one, for PatchingRelocIterator:
RelocIterator() { initialize_misc(); }
public:
@@ -695,7 +680,7 @@ class Relocation VALUE_OBJ_CLASS_SPEC {
}
public:
- void* operator new(size_t size, const RelocationHolder& holder) {
+ void* operator new(size_t size, const RelocationHolder& holder) throw() {
if (size > sizeof(holder._relocbuf)) guarantee_size();
assert((void* const *)holder.reloc() == &holder._relocbuf[0], "ptrs must agree");
return holder.reloc();
@@ -782,9 +767,6 @@ class Relocation VALUE_OBJ_CLASS_SPEC {
void pd_verify_data_value (address x, intptr_t off) { pd_set_data_value(x, off, true); }
address pd_call_destination (address orig_addr = NULL);
void pd_set_call_destination (address x);
- void pd_swap_in_breakpoint (address x, short* instrs, int instrlen);
- void pd_swap_out_breakpoint (address x, short* instrs, int instrlen);
- static int pd_breakpoint_size ();
// this extracts the address of an address in the code stream instead of the reloc data
address* pd_address_in_code ();
@@ -1309,87 +1291,6 @@ class poll_return_Relocation : public Relocation {
void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest);
};
-
-class breakpoint_Relocation : public Relocation {
- relocInfo::relocType type() { return relocInfo::breakpoint_type; }
-
- enum {
- // attributes which affect the interpretation of the data:
- removable_attr = 0x0010, // buffer [i...] allows for undoing the trap
- internal_attr = 0x0020, // the target is an internal addr (local stub)
- settable_attr = 0x0040, // the target is settable
-
- // states which can change over time:
- enabled_state = 0x0100, // breakpoint must be active in running code
- active_state = 0x0200, // breakpoint instruction actually in code
-
- kind_mask = 0x000F, // mask for extracting kind
- high_bit = 0x4000 // extra bit which is always set
- };
-
- public:
- enum {
- // kinds:
- initialization = 1,
- safepoint = 2
- };
-
- // If target is NULL, 32 bits are reserved for a later set_target().
- static RelocationHolder spec(int kind, address target = NULL, bool internal_target = false) {
- RelocationHolder rh = newHolder();
- new(rh) breakpoint_Relocation(kind, target, internal_target);
- return rh;
- }
-
- private:
- // We require every bits value to NOT to fit into relocInfo::datalen_width,
- // because we are going to actually store state in the reloc, and so
- // cannot allow it to be compressed (and hence copied by the iterator).
-
- short _bits; // bit-encoded kind, attrs, & state
- address _target;
-
- breakpoint_Relocation(int kind, address target, bool internal_target);
-
- friend class RelocIterator;
- breakpoint_Relocation() { }
-
- short bits() const { return _bits; }
- short& live_bits() const { return data()[0]; }
- short* instrs() const { return data() + datalen() - instrlen(); }
- int instrlen() const { return removable() ? pd_breakpoint_size() : 0; }
-
- void set_bits(short x) {
- assert(live_bits() == _bits, "must be the only mutator of reloc info");
- live_bits() = _bits = x;
- }
-
- public:
- address target() const;
- void set_target(address x);
-
- int kind() const { return bits() & kind_mask; }
- bool enabled() const { return (bits() & enabled_state) != 0; }
- bool active() const { return (bits() & active_state) != 0; }
- bool internal() const { return (bits() & internal_attr) != 0; }
- bool removable() const { return (bits() & removable_attr) != 0; }
- bool settable() const { return (bits() & settable_attr) != 0; }
-
- void set_enabled(bool b); // to activate, you must also say set_active
- void set_active(bool b); // actually inserts bpt (must be enabled 1st)
-
- // data is packed as 16 bits, followed by the target (1 or 2 words), followed
- // if necessary by empty storage for saving away original instruction bytes.
- void pack_data_to(CodeSection* dest);
- void unpack_data();
-
- // during certain operations, breakpoints must be out of the way:
- void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
- assert(!active(), "cannot perform relocation on enabled breakpoints");
- }
-};
-
-
// We know all the xxx_Relocation classes, so now we can define these:
#define EACH_CASE(name) \
inline name##_Relocation* RelocIterator::name##_reloc() { \
@@ -1408,25 +1309,4 @@ inline RelocIterator::RelocIterator(nmethod* nm, address begin, address limit) {
initialize(nm, begin, limit);
}
-// if you are going to patch code, you should use this subclass of
-// RelocIterator
-class PatchingRelocIterator : public RelocIterator {
- private:
- RelocIterator _init_state;
-
- void prepass(); // deactivates all breakpoints
- void postpass(); // reactivates all enabled breakpoints
-
- // do not copy these puppies; it would have unpredictable side effects
- // these are private and have no bodies defined because they should not be called
- PatchingRelocIterator(const RelocIterator&);
- void operator=(const RelocIterator&);
-
- public:
- PatchingRelocIterator(nmethod* nm, address begin = NULL, address limit = NULL)
- : RelocIterator(nm, begin, limit) { prepass(); }
-
- ~PatchingRelocIterator() { postpass(); }
-};
-
#endif // SHARE_VM_CODE_RELOCINFO_HPP
diff --git a/src/share/vm/code/vtableStubs.cpp b/src/share/vm/code/vtableStubs.cpp
index b08071453..5af91a3a6 100644
--- a/src/share/vm/code/vtableStubs.cpp
+++ b/src/share/vm/code/vtableStubs.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,12 +46,9 @@ address VtableStub::_chunk = NULL;
address VtableStub::_chunk_end = NULL;
VMReg VtableStub::_receiver_location = VMRegImpl::Bad();
-static int num_vtable_chunks = 0;
-
-void* VtableStub::operator new(size_t size, int code_size) {
+void* VtableStub::operator new(size_t size, int code_size) throw() {
assert(size == sizeof(VtableStub), "mismatched size");
- num_vtable_chunks++;
// compute real VtableStub size (rounded to nearest word)
const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
// malloc them in chunks to minimize header overhead
@@ -60,7 +57,7 @@ void* VtableStub::operator new(size_t size, int code_size) {
const int bytes = chunk_factor * real_size + pd_code_alignment();
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
if (blob == NULL) {
- vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks");
+ return NULL;
}
_chunk = blob->content_begin();
_chunk_end = _chunk + bytes;
@@ -111,7 +108,7 @@ void VtableStubs::initialize() {
}
-address VtableStubs::create_stub(bool is_vtable_stub, int vtable_index, Method* method) {
+address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
assert(vtable_index >= 0, "must be positive");
VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
@@ -121,6 +118,12 @@ address VtableStubs::create_stub(bool is_vtable_stub, int vtable_index, Method*
} else {
s = create_itable_stub(vtable_index);
}
+
+ // Creation of vtable or itable can fail if there is not enough free space in the code cache.
+ if (s == NULL) {
+ return NULL;
+ }
+
enter(is_vtable_stub, vtable_index, s);
if (PrintAdapterHandlers) {
tty->print_cr("Decoding VtableStub %s[%d]@%d",
diff --git a/src/share/vm/code/vtableStubs.hpp b/src/share/vm/code/vtableStubs.hpp
index d6e755a6e..06f2a67a8 100644
--- a/src/share/vm/code/vtableStubs.hpp
+++ b/src/share/vm/code/vtableStubs.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,7 @@ class VtableStub {
bool _is_vtable_stub; // True if vtable stub, false, is itable stub
/* code follows here */ // The vtableStub code
- void* operator new(size_t size, int code_size);
+ void* operator new(size_t size, int code_size) throw();
VtableStub(bool is_vtable_stub, int index)
: _next(NULL), _is_vtable_stub(is_vtable_stub),
@@ -121,9 +121,11 @@ class VtableStubs : AllStatic {
static VtableStub* lookup (bool is_vtable_stub, int vtable_index);
static void enter (bool is_vtable_stub, int vtable_index, VtableStub* s);
static inline uint hash (bool is_vtable_stub, int vtable_index);
+ static address find_stub (bool is_vtable_stub, int vtable_index);
public:
- static address create_stub(bool is_vtable_stub, int vtable_index, Method* method); // return the entry point of a stub for this call
+ static address find_vtable_stub(int vtable_index) { return find_stub(true, vtable_index); }
+ static address find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
static bool is_entry_point(address pc); // is pc a vtable stub entry point?
static bool contains(address pc); // is pc within any stub?
static VtableStub* stub_containing(address pc); // stub containing pc or NULL
diff --git a/src/share/vm/compiler/compileBroker.cpp b/src/share/vm/compiler/compileBroker.cpp
index dc97ae771..157b5786e 100644
--- a/src/share/vm/compiler/compileBroker.cpp
+++ b/src/share/vm/compiler/compileBroker.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -43,6 +43,7 @@
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/sweeper.hpp"
+#include "trace/tracing.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#ifdef COMPILER1
@@ -179,9 +180,11 @@ int CompileBroker::_sum_standard_bytes_compiled = 0;
int CompileBroker::_sum_nmethod_size = 0;
int CompileBroker::_sum_nmethod_code_size = 0;
-CompileQueue* CompileBroker::_c2_method_queue = NULL;
-CompileQueue* CompileBroker::_c1_method_queue = NULL;
-CompileTask* CompileBroker::_task_free_list = NULL;
+long CompileBroker::_peak_compilation_time = 0;
+
+CompileQueue* CompileBroker::_c2_method_queue = NULL;
+CompileQueue* CompileBroker::_c1_method_queue = NULL;
+CompileTask* CompileBroker::_task_free_list = NULL;
GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
@@ -1642,42 +1645,37 @@ void CompileBroker::compiler_thread_loop() {
// Set up state required by +LogCompilation.
void CompileBroker::init_compiler_thread_log() {
CompilerThread* thread = CompilerThread::current();
- char fileBuf[4*K];
+ char file_name[4*K];
FILE* fp = NULL;
- char* file = NULL;
intx thread_id = os::current_thread_id();
for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) {
const char* dir = (try_temp_dir ? os::get_temp_directory() : NULL);
if (dir == NULL) {
- jio_snprintf(fileBuf, sizeof(fileBuf), "hs_c" UINTX_FORMAT "_pid%u.log",
+ jio_snprintf(file_name, sizeof(file_name), "hs_c" UINTX_FORMAT "_pid%u.log",
thread_id, os::current_process_id());
} else {
- jio_snprintf(fileBuf, sizeof(fileBuf),
+ jio_snprintf(file_name, sizeof(file_name),
"%s%shs_c" UINTX_FORMAT "_pid%u.log", dir,
os::file_separator(), thread_id, os::current_process_id());
}
- fp = fopen(fileBuf, "at");
- if (fp != NULL) {
- file = NEW_C_HEAP_ARRAY(char, strlen(fileBuf)+1, mtCompiler);
- strcpy(file, fileBuf);
- break;
- }
- }
- if (fp == NULL) {
- warning("Cannot open log file: %s", fileBuf);
- } else {
- if (LogCompilation && Verbose)
- tty->print_cr("Opening compilation log %s", file);
- CompileLog* log = new(ResourceObj::C_HEAP, mtCompiler) CompileLog(file, fp, thread_id);
- thread->init_log(log);
- if (xtty != NULL) {
- ttyLocker ttyl;
+ fp = fopen(file_name, "at");
+ if (fp != NULL) {
+ if (LogCompilation && Verbose) {
+ tty->print_cr("Opening compilation log %s", file_name);
+ }
+ CompileLog* log = new(ResourceObj::C_HEAP, mtCompiler) CompileLog(file_name, fp, thread_id);
+ thread->init_log(log);
- // Record any per thread log files
- xtty->elem("thread_logfile thread='%d' filename='%s'", thread_id, file);
+ if (xtty != NULL) {
+ ttyLocker ttyl;
+ // Record any per thread log files
+ xtty->elem("thread_logfile thread='%d' filename='%s'", thread_id, file_name);
+ }
+ return;
}
}
+ warning("Cannot open log file: %s", file_name);
}
// ------------------------------------------------------------------
@@ -1720,7 +1718,7 @@ static void codecache_print(bool detailed)
CodeCache::print_summary(&s, detailed);
}
ttyLocker ttyl;
- tty->print_cr(s.as_string());
+ tty->print(s.as_string());
}
// ------------------------------------------------------------------
@@ -1800,6 +1798,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
ciMethod* target = ci_env.get_method_from_handle(target_handle);
TraceTime t1("compilation", &time);
+ EventCompilation event;
AbstractCompiler *comp = compiler(task_level);
if (comp == NULL) {
@@ -1841,6 +1840,16 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
}
// simulate crash during compilation
assert(task->compile_id() != CICrashAt, "just as planned");
+ if (event.should_commit()) {
+ event.set_method(target->get_Method());
+ event.set_compileID(compile_id);
+ event.set_compileLevel(task->comp_level());
+ event.set_succeded(task->is_success());
+ event.set_isOsr(is_osr);
+ event.set_codeSize((task->code() == NULL) ? 0 : task->code()->total_size());
+ event.set_inlinedBytes(task->num_inlined_bytecodes());
+ event.commit();
+ }
}
pop_jni_handle_block();
@@ -1854,8 +1863,10 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
tty->print("%7d ", (int) tty->time_stamp().milliseconds()); // print timestamp
tty->print("%4d ", compile_id); // print compilation number
tty->print("%s ", (is_osr ? "%" : " "));
- int code_size = (task->code() == NULL) ? 0 : task->code()->total_size();
- tty->print_cr("size: %d time: %d inlined: %d bytes", code_size, (int)time.milliseconds(), task->num_inlined_bytecodes());
+ if (task->code() != NULL) {
+ tty->print("size: %d(%d) ", task->code()->total_size(), task->code()->insts_size());
+ }
+ tty->print_cr("time: %d inlined: %d bytes", (int)time.milliseconds(), task->num_inlined_bytecodes());
}
if (PrintCodeCacheOnCompilation)
@@ -1919,6 +1930,10 @@ void CompileBroker::handle_full_code_cache() {
}
warning("CodeCache is full. Compiler has been disabled.");
warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
+
+ CodeCache::report_codemem_full();
+
+
#ifndef PRODUCT
if (CompileTheWorld || ExitOnFullCodeCache) {
codecache_print(/* detailed= */ true);
@@ -2076,8 +2091,10 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time
// java.lang.management.CompilationMBean
_perf_total_compilation->inc(time.ticks());
+ _t_total_compilation.add(time);
+ _peak_compilation_time = time.milliseconds() > _peak_compilation_time ? time.milliseconds() : _peak_compilation_time;
+
if (CITime) {
- _t_total_compilation.add(time);
if (is_osr) {
_t_osr_compilation.add(time);
_sum_osr_bytes_compiled += method->code_size() + task->num_inlined_bytecodes();
@@ -2175,7 +2192,6 @@ void CompileBroker::print_times() {
tty->print_cr(" nmethod total size : %6d bytes", CompileBroker::_sum_nmethod_size);
}
-
// Debugging output for failure
void CompileBroker::print_last_compile() {
if ( _last_compile_level != CompLevel_none &&
diff --git a/src/share/vm/compiler/compileBroker.hpp b/src/share/vm/compiler/compileBroker.hpp
index 27fe52851..f336497a3 100644
--- a/src/share/vm/compiler/compileBroker.hpp
+++ b/src/share/vm/compiler/compileBroker.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -299,17 +299,17 @@ class CompileBroker: AllStatic {
static elapsedTimer _t_osr_compilation;
static elapsedTimer _t_standard_compilation;
+ static int _total_compile_count;
static int _total_bailout_count;
static int _total_invalidated_count;
- static int _total_compile_count;
static int _total_native_compile_count;
static int _total_osr_compile_count;
static int _total_standard_compile_count;
-
static int _sum_osr_bytes_compiled;
static int _sum_standard_bytes_compiled;
static int _sum_nmethod_size;
static int _sum_nmethod_code_size;
+ static long _peak_compilation_time;
static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS);
static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
@@ -421,6 +421,19 @@ class CompileBroker: AllStatic {
// compiler name for debugging
static const char* compiler_name(int comp_level);
+
+ static int get_total_compile_count() { return _total_compile_count; }
+ static int get_total_bailout_count() { return _total_bailout_count; }
+ static int get_total_invalidated_count() { return _total_invalidated_count; }
+ static int get_total_native_compile_count() { return _total_native_compile_count; }
+ static int get_total_osr_compile_count() { return _total_osr_compile_count; }
+ static int get_total_standard_compile_count() { return _total_standard_compile_count; }
+ static int get_sum_osr_bytes_compiled() { return _sum_osr_bytes_compiled; }
+ static int get_sum_standard_bytes_compiled() { return _sum_standard_bytes_compiled; }
+ static int get_sum_nmethod_size() { return _sum_nmethod_size;}
+ static int get_sum_nmethod_code_size() { return _sum_nmethod_code_size; }
+ static long get_peak_compilation_time() { return _peak_compilation_time; }
+ static long get_total_compilation_time() { return _t_total_compilation.milliseconds(); }
};
#endif // SHARE_VM_COMPILER_COMPILEBROKER_HPP
diff --git a/src/share/vm/compiler/compileLog.cpp b/src/share/vm/compiler/compileLog.cpp
index 2201c7f9e..2cce602f3 100644
--- a/src/share/vm/compiler/compileLog.cpp
+++ b/src/share/vm/compiler/compileLog.cpp
@@ -34,17 +34,18 @@ CompileLog* CompileLog::_first = NULL;
// ------------------------------------------------------------------
// CompileLog::CompileLog
-CompileLog::CompileLog(const char* file, FILE* fp, intx thread_id)
+CompileLog::CompileLog(const char* file_name, FILE* fp, intx thread_id)
: _context(_context_buffer, sizeof(_context_buffer))
{
- initialize(new(ResourceObj::C_HEAP, mtCompiler) fileStream(fp));
- _file = file;
+ initialize(new(ResourceObj::C_HEAP, mtCompiler) fileStream(fp, true));
_file_end = 0;
_thread_id = thread_id;
_identities_limit = 0;
_identities_capacity = 400;
_identities = NEW_C_HEAP_ARRAY(char, _identities_capacity, mtCompiler);
+ _file = NEW_C_HEAP_ARRAY(char, strlen(file_name)+1, mtCompiler);
+ strcpy((char*)_file, file_name);
// link into the global list
{ MutexLocker locker(CompileTaskAlloc_lock);
@@ -57,6 +58,7 @@ CompileLog::~CompileLog() {
delete _out;
_out = NULL;
FREE_C_HEAP_ARRAY(char, _identities, mtCompiler);
+ FREE_C_HEAP_ARRAY(char, _file, mtCompiler);
}
@@ -188,7 +190,8 @@ void CompileLog::finish_log_on_error(outputStream* file, char* buf, int buflen)
if (called_exit) return;
called_exit = true;
- for (CompileLog* log = _first; log != NULL; log = log->_next) {
+ CompileLog* log = _first;
+ while (log != NULL) {
log->flush();
const char* partial_file = log->file();
int partial_fd = open(partial_file, O_RDONLY);
@@ -267,7 +270,11 @@ void CompileLog::finish_log_on_error(outputStream* file, char* buf, int buflen)
close(partial_fd);
unlink(partial_file);
}
+ CompileLog* next_log = log->_next;
+ delete log;
+ log = next_log;
}
+ _first = NULL;
}
// ------------------------------------------------------------------
diff --git a/src/share/vm/compiler/compileLog.hpp b/src/share/vm/compiler/compileLog.hpp
index 1af5e9143..8b740f9f2 100644
--- a/src/share/vm/compiler/compileLog.hpp
+++ b/src/share/vm/compiler/compileLog.hpp
@@ -57,7 +57,7 @@ class CompileLog : public xmlStream {
void va_tag(bool push, const char* format, va_list ap);
public:
- CompileLog(const char* file, FILE* fp, intx thread_id);
+ CompileLog(const char* file_name, FILE* fp, intx thread_id);
~CompileLog();
intx thread_id() { return _thread_id; }
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp
index 01e0e8745..cfba2376e 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp
@@ -51,14 +51,6 @@ AdaptiveFreeList<Chunk>::AdaptiveFreeList() : FreeList<Chunk>(), _hint(0) {
}
template <class Chunk>
-AdaptiveFreeList<Chunk>::AdaptiveFreeList(Chunk* fc) : FreeList<Chunk>(fc), _hint(0) {
- init_statistics();
-#ifndef PRODUCT
- _allocation_stats.set_returned_bytes(size() * HeapWordSize);
-#endif
-}
-
-template <class Chunk>
void AdaptiveFreeList<Chunk>::initialize() {
FreeList<Chunk>::initialize();
set_hint(0);
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp
index 8b56bb11d..7215119ae 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp
@@ -55,7 +55,6 @@ class AdaptiveFreeList : public FreeList<Chunk> {
public:
AdaptiveFreeList();
- AdaptiveFreeList(Chunk* fc);
using FreeList<Chunk>::assert_proper_lock_protection;
#ifdef ASSERT
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp
index 838e35fb5..d60fe8068 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -969,8 +969,8 @@ size_t CMSAdaptiveSizePolicy::promo_increment_aligned_up(size_t cur_promo) {
}
-void CMSAdaptiveSizePolicy::compute_young_generation_free_space(size_t cur_eden,
- size_t max_eden_size)
+void CMSAdaptiveSizePolicy::compute_eden_space_size(size_t cur_eden,
+ size_t max_eden_size)
{
size_t desired_eden_size = cur_eden;
size_t eden_limit = max_eden_size;
@@ -978,7 +978,7 @@ void CMSAdaptiveSizePolicy::compute_young_generation_free_space(size_t cur_eden,
// Printout input
if (PrintGC && PrintAdaptiveSizePolicy) {
gclog_or_tty->print_cr(
- "CMSAdaptiveSizePolicy::compute_young_generation_free_space: "
+ "CMSAdaptiveSizePolicy::compute_eden_space_size: "
"cur_eden " SIZE_FORMAT,
cur_eden);
}
@@ -1024,7 +1024,7 @@ void CMSAdaptiveSizePolicy::compute_young_generation_free_space(size_t cur_eden,
if (PrintGC && PrintAdaptiveSizePolicy) {
gclog_or_tty->print_cr(
- "CMSAdaptiveSizePolicy::compute_young_generation_free_space limits:"
+ "CMSAdaptiveSizePolicy::compute_eden_space_size limits:"
" desired_eden_size: " SIZE_FORMAT
" old_eden_size: " SIZE_FORMAT,
desired_eden_size, cur_eden);
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp
index 6053b4323..00a4f8fd7 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -436,8 +436,8 @@ class CMSAdaptiveSizePolicy : public AdaptiveSizePolicy {
size_t generation_alignment() { return _generation_alignment; }
- virtual void compute_young_generation_free_space(size_t cur_eden,
- size_t max_eden_size);
+ virtual void compute_eden_space_size(size_t cur_eden,
+ size_t max_eden_size);
// Calculates new survivor space size; returns a new tenuring threshold
// value. Stores new survivor size in _survivor_size.
virtual uint compute_survivor_space_size_and_threshold(
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp
index e57d405e5..c2aafb7bc 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,7 @@ ConcurrentMarkSweepPolicy::ConcurrentMarkSweepPolicy() {
}
void ConcurrentMarkSweepPolicy::initialize_generations() {
- _generations = new GenerationSpecPtr[number_of_generations()];
+ _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
if (_generations == NULL)
vm_exit_during_initialization("Unable to allocate gen spec");
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
index 1c7a28c6b..f7730287b 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
@@ -122,6 +122,22 @@ class MarkRefsIntoClosure: public CMSOopsInGenClosure {
}
};
+class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure {
+ private:
+ const MemRegion _span;
+ CMSBitMap* _bitMap;
+ protected:
+ DO_OOP_WORK_DEFN
+ public:
+ Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
+ virtual void do_oop(oop* p);
+ virtual void do_oop(narrowOop* p);
+
+ Prefetch::style prefetch_style() {
+ return Prefetch::do_read;
+ }
+};
+
// A variant of the above used in certain kinds of CMS
// marking verification.
class MarkRefsIntoVerifyClosure: public CMSOopsInGenClosure {
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
index cb8b73743..b87efd7bc 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
@@ -153,8 +153,6 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
_indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
"a freelist par lock",
true);
- if (_indexedFreeListParLocks[i] == NULL)
- vm_exit_during_initialization("Could not allocate a par lock");
DEBUG_ONLY(
_indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
)
@@ -285,6 +283,7 @@ void CompactibleFreeListSpace::reset(MemRegion mr) {
_bt.verify_not_unallocated((HeapWord*)fc, fc->size());
_indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
}
+ coalBirth(mr.word_size());
}
_promoInfo.reset();
_smallLinearAllocBlock._ptr = NULL;
@@ -1762,7 +1761,7 @@ CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
}
ec->set_size(size);
debug_only(ec->mangleFreed(size));
- if (size < SmallForDictionary) {
+ if (size < SmallForDictionary && ParallelGCThreads != 0) {
lock = _indexedFreeListParLocks[size];
}
MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
@@ -2018,12 +2017,6 @@ oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
-
-void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
- // ugghh... how would one do this efficiently for a non-contiguous space?
- guarantee(false, "NYI");
-}
-
bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
return _smallLinearAllocBlock._word_size == 0;
}
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
index 23c95897c..74c97e8df 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
@@ -396,7 +396,6 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// iteration support for promotion
void save_marks();
bool no_allocs_since_save_marks();
- void object_iterate_since_last_GC(ObjectClosure* cl);
// iteration support for sweeping
void save_sweep_limit() {
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
index 0f92b3602..2fc6edd34 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
@@ -37,8 +37,12 @@
#include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
#include "gc_implementation/parNew/parNewGeneration.hpp"
#include "gc_implementation/shared/collectorCounters.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
+#include "memory/allocation.hpp"
#include "memory/cardTableRS.hpp"
#include "memory/collectorPolicy.hpp"
#include "memory/gcLocker.inline.hpp"
@@ -46,6 +50,7 @@
#include "memory/genMarkSweep.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/iterator.hpp"
+#include "memory/padded.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
#include "memory/tenuredGeneration.hpp"
@@ -60,7 +65,8 @@
// statics
CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
-bool CMSCollector::_full_gc_requested = false;
+bool CMSCollector::_full_gc_requested = false;
+GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
//////////////////////////////////////////////////////////////////
// In support of CMS/VM thread synchronization
@@ -224,7 +230,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
// depends on this property.
debug_only(
FreeChunk* junk = NULL;
- assert(UseCompressedKlassPointers ||
+ assert(UseCompressedClassPointers ||
junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
"Offset of FreeChunk::_prev within FreeChunk must match"
" that of OopDesc::_klass within OopDesc");
@@ -564,6 +570,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_restart_addr(NULL),
_overflow_list(NULL),
_stats(cmsGen),
+ _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
_eden_chunk_array(NULL), // may be set in ctor body
_eden_chunk_capacity(0), // -- ditto --
_eden_chunk_index(0), // -- ditto --
@@ -591,7 +598,10 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_concurrent_cycles_since_last_unload(0),
_roots_scanning_options(0),
_inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
- _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
+ _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
+ _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
+ _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
+ _cms_start_registered(false)
{
if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
ExplicitGCInvokesConcurrent = true;
@@ -692,8 +702,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
// Clip CMSBootstrapOccupancy between 0 and 100.
- _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
- /(double)100;
+ _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
_full_gcs_since_conc_gc = 0;
@@ -725,7 +734,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
// Support for parallelizing survivor space rescan
- if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
+ if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
const size_t max_plab_samples =
((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
@@ -1398,7 +1407,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
OrderAccess::storestore();
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
// Copy gap missed by (aligned) header size calculation below
obj->set_klass_gap(old->klass_gap());
}
@@ -1677,18 +1686,38 @@ void CMSCollector::collect(bool full,
_full_gcs_since_conc_gc++;
}
-void CMSCollector::request_full_gc(unsigned int full_gc_count) {
+void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
unsigned int gc_count = gch->total_full_collections();
if (gc_count == full_gc_count) {
MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
_full_gc_requested = true;
+ _full_gc_cause = cause;
CGC_lock->notify(); // nudge CMS thread
} else {
assert(gc_count > full_gc_count, "Error: causal loop");
}
}
+bool CMSCollector::is_external_interruption() {
+ GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
+ return GCCause::is_user_requested_gc(cause) ||
+ GCCause::is_serviceability_requested_gc(cause);
+}
+
+void CMSCollector::report_concurrent_mode_interruption() {
+ if (is_external_interruption()) {
+ if (PrintGCDetails) {
+ gclog_or_tty->print(" (concurrent mode interrupted)");
+ }
+ } else {
+ if (PrintGCDetails) {
+ gclog_or_tty->print(" (concurrent mode failure)");
+ }
+ _gc_tracer_cm->report_concurrent_mode_failure();
+ }
+}
+
// The foreground and background collectors need to coordinate in order
// to make sure that they do not mutually interfere with CMS collections.
@@ -1846,14 +1875,8 @@ NOT_PRODUCT(
}
)
- if (PrintGCDetails && first_state > Idling) {
- GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
- if (GCCause::is_user_requested_gc(cause) ||
- GCCause::is_serviceability_requested_gc(cause)) {
- gclog_or_tty->print(" (concurrent mode interrupted)");
- } else {
- gclog_or_tty->print(" (concurrent mode failure)");
- }
+ if (first_state > Idling) {
+ report_concurrent_mode_interruption();
}
set_did_compact(should_compact);
@@ -1869,6 +1892,10 @@ NOT_PRODUCT(
// Reference objects are active.
ref_processor()->clean_up_discovered_references();
+ if (first_state > Idling) {
+ save_heap_summary();
+ }
+
do_compaction_work(clear_all_soft_refs);
// Has the GC time limit been exceeded?
@@ -1972,7 +1999,14 @@ void CMSCollector::decide_foreground_collection_type(
// a mark-sweep-compact.
void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
- TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
+
+ STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
+ gc_timer->register_gc_start(os::elapsed_counter());
+
+ SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
+ gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
+
+ GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
"collections passed to foreground collector", _full_gcs_since_conc_gc);
@@ -2063,6 +2097,10 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
size_policy()->msc_collection_end(gch->gc_cause());
}
+ gc_timer->register_gc_end(os::elapsed_counter());
+
+ gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
+
// For a mark-sweep-compact, compute_new_size() will be called
// in the heap's do_collection() method.
}
@@ -2094,13 +2132,46 @@ void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
// required.
_collectorState = FinalMarking;
}
- collect_in_foreground(clear_all_soft_refs);
+ collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
// For a mark-sweep, compute_new_size() will be called
// in the heap's do_collection() method.
}
+void CMSCollector::print_eden_and_survivor_chunk_arrays() {
+ DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
+ EdenSpace* eden_space = dng->eden();
+ ContiguousSpace* from_space = dng->from();
+ ContiguousSpace* to_space = dng->to();
+ // Eden
+ if (_eden_chunk_array != NULL) {
+ gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
+ eden_space->bottom(), eden_space->top(),
+ eden_space->end(), eden_space->capacity());
+ gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
+ "_eden_chunk_capacity=" SIZE_FORMAT,
+ _eden_chunk_index, _eden_chunk_capacity);
+ for (size_t i = 0; i < _eden_chunk_index; i++) {
+ gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
+ i, _eden_chunk_array[i]);
+ }
+ }
+ // Survivor
+ if (_survivor_chunk_array != NULL) {
+ gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
+ from_space->bottom(), from_space->top(),
+ from_space->end(), from_space->capacity());
+ gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
+ "_survivor_chunk_capacity=" SIZE_FORMAT,
+ _survivor_chunk_index, _survivor_chunk_capacity);
+ for (size_t i = 0; i < _survivor_chunk_index; i++) {
+ gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
+ i, _survivor_chunk_array[i]);
+ }
+ }
+}
+
void CMSCollector::getFreelistLocks() const {
// Get locks for all free lists in all generations that this
// collector is responsible for
@@ -2154,7 +2225,7 @@ class ReleaseForegroundGC: public StackObj {
// one "collect" method between the background collector and the foreground
// collector but the if-then-else required made it cleaner to have
// separate methods.
-void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
+void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
assert(Thread::current()->is_ConcurrentGC_thread(),
"A CMS asynchronous collection is only allowed on a CMS thread.");
@@ -2173,6 +2244,7 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
} else {
assert(_collectorState == Idling, "Should be idling before start.");
_collectorState = InitialMarking;
+ register_gc_start(cause);
// Reset the expansion cause, now that we are about to begin
// a new cycle.
clear_expansion_cause();
@@ -2185,6 +2257,7 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
// ensuing concurrent GC cycle.
update_should_unload_classes();
_full_gc_requested = false; // acks all outstanding full gc requests
+ _full_gc_cause = GCCause::_no_gc;
// Signal that we are about to start a collection
gch->increment_total_full_collections(); // ... starting a collection cycle
_collection_count_start = gch->total_full_collections();
@@ -2264,7 +2337,6 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
{
ReleaseForegroundGC x(this);
stats().record_cms_begin();
-
VM_CMS_Initial_Mark initial_mark_op(this);
VMThread::execute(&initial_mark_op);
}
@@ -2344,6 +2416,7 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
CMSTokenSync z(true); // not strictly needed.
if (_collectorState == Resizing) {
compute_new_size();
+ save_heap_summary();
_collectorState = Resetting;
} else {
assert(_collectorState == Idling, "The state should only change"
@@ -2402,7 +2475,39 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
}
}
-void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
+void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
+ if (!_cms_start_registered) {
+ register_gc_start(cause);
+ }
+}
+
+void CMSCollector::register_gc_start(GCCause::Cause cause) {
+ _cms_start_registered = true;
+ _gc_timer_cm->register_gc_start(os::elapsed_counter());
+ _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
+}
+
+void CMSCollector::register_gc_end() {
+ if (_cms_start_registered) {
+ report_heap_summary(GCWhen::AfterGC);
+
+ _gc_timer_cm->register_gc_end(os::elapsed_counter());
+ _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
+ _cms_start_registered = false;
+ }
+}
+
+void CMSCollector::save_heap_summary() {
+ GenCollectedHeap* gch = GenCollectedHeap::heap();
+ _last_heap_summary = gch->create_heap_summary();
+ _last_metaspace_summary = gch->create_metaspace_summary();
+}
+
+void CMSCollector::report_heap_summary(GCWhen::Type when) {
+ _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_metaspace_summary);
+}
+
+void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
"Foreground collector should be waiting, not executing");
assert(Thread::current()->is_VM_thread(), "A foreground collection"
@@ -2410,8 +2515,8 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
"VM thread should have CMS token");
- NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
- true, gclog_or_tty);)
+ NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
+ true, NULL);)
if (UseAdaptiveSizePolicy) {
size_policy()->ms_collection_begin();
}
@@ -2435,6 +2540,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
}
switch (_collectorState) {
case InitialMarking:
+ register_foreground_gc_start(cause);
init_mark_was_synchronous = true; // fact to be exploited in re-mark
checkpointRootsInitial(false);
assert(_collectorState == Marking, "Collector state should have changed"
@@ -2483,6 +2589,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
Universe::verify("Verify before reset: ");
}
+ save_heap_summary();
reset(false);
assert(_collectorState == Idling, "Collector state should "
"have changed");
@@ -3058,26 +3165,6 @@ oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
void
-ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
-{
- // Not currently implemented; need to do the following. -- ysr.
- // dld -- I think that is used for some sort of allocation profiler. So it
- // really means the objects allocated by the mutator since the last
- // GC. We could potentially implement this cheaply by recording only
- // the direct allocations in a side data structure.
- //
- // I think we probably ought not to be required to support these
- // iterations at any arbitrary point; I think there ought to be some
- // call to enable/disable allocation profiling in a generation/space,
- // and the iterator ought to return the objects allocated in the
- // gen/space since the enable call, or the last iterator call (which
- // will probably be at a GC.) That way, for gens like CM&S that would
- // require some extra data structure to support this, we only pay the
- // cost when it's in use...
- cmsSpace()->object_iterate_since_last_GC(blk);
-}
-
-void
ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
cl->set_generation(this);
younger_refs_in_space_iterate(_cmsSpace, cl);
@@ -3373,7 +3460,9 @@ void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
assert_locked_or_safepoint(Heap_lock);
size_t size = ReservedSpace::page_align_size_down(bytes);
- if (size > 0) {
+ // Only shrink if a compaction was done so that all the free space
+ // in the generation is in a contiguous block at the end.
+ if (size > 0 && did_compact()) {
shrink_by(size);
}
}
@@ -3382,7 +3471,6 @@ bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
assert_locked_or_safepoint(Heap_lock);
bool result = _virtual_space.expand_by(bytes);
if (result) {
- HeapWord* old_end = _cmsSpace->end();
size_t new_word_size =
heap_word_size(_virtual_space.committed_size());
MemRegion mr(_cmsSpace->bottom(), new_word_size);
@@ -3498,6 +3586,31 @@ CMSPhaseAccounting::~CMSPhaseAccounting() {
// CMS work
+// The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
+class CMSParMarkTask : public AbstractGangTask {
+ protected:
+ CMSCollector* _collector;
+ int _n_workers;
+ CMSParMarkTask(const char* name, CMSCollector* collector, int n_workers) :
+ AbstractGangTask(name),
+ _collector(collector),
+ _n_workers(n_workers) {}
+ // Work method in support of parallel rescan ... of young gen spaces
+ void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
+ ContiguousSpace* space,
+ HeapWord** chunk_array, size_t chunk_top);
+ void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
+};
+
+// Parallel initial mark task
+class CMSParInitialMarkTask: public CMSParMarkTask {
+ public:
+ CMSParInitialMarkTask(CMSCollector* collector, int n_workers) :
+ CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
+ collector, n_workers) {}
+ void work(uint worker_id);
+};
+
// Checkpoint the roots into this generation from outside
// this generation. [Note this initial checkpoint need only
// be approximate -- we'll do a catch up phase subsequently.]
@@ -3506,6 +3619,9 @@ void CMSCollector::checkpointRootsInitial(bool asynch) {
check_correct_thread_executing();
TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
+ save_heap_summary();
+ report_heap_summary(GCWhen::BeforeGC);
+
ReferenceProcessor* rp = ref_processor();
SpecializationStats::clear();
assert(_restart_addr == NULL, "Control point invariant");
@@ -3551,8 +3667,8 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
// CMS collection cycle.
setup_cms_unloading_and_verification_state();
- NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
- PrintGCDetails && Verbose, true, gclog_or_tty);)
+ NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
+ PrintGCDetails && Verbose, true, _gc_timer_cm);)
if (UseAdaptiveSizePolicy) {
size_policy()->checkpoint_roots_initial_begin();
}
@@ -3592,19 +3708,42 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
// the klasses. The claimed marks need to be cleared before marking starts.
ClassLoaderDataGraph::clear_claimed_marks();
- CMKlassClosure klass_closure(&notOlder);
+ if (CMSPrintEdenSurvivorChunks) {
+ print_eden_and_survivor_chunk_arrays();
+ }
+
{
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
- gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
- gch->gen_process_strong_roots(_cmsGen->level(),
- true, // younger gens are roots
- true, // activate StrongRootsScope
- false, // not scavenging
- SharedHeap::ScanningOption(roots_scanning_options()),
- &notOlder,
- true, // walk all of code cache if (so & SO_CodeCache)
- NULL,
- &klass_closure);
+ if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
+ // The parallel version.
+ FlexibleWorkGang* workers = gch->workers();
+ assert(workers != NULL, "Need parallel worker threads.");
+ int n_workers = workers->active_workers();
+ CMSParInitialMarkTask tsk(this, n_workers);
+ gch->set_par_threads(n_workers);
+ initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
+ if (n_workers > 1) {
+ GenCollectedHeap::StrongRootsScope srs(gch);
+ workers->run_task(&tsk);
+ } else {
+ GenCollectedHeap::StrongRootsScope srs(gch);
+ tsk.work(0);
+ }
+ gch->set_par_threads(0);
+ } else {
+ // The serial version.
+ CMKlassClosure klass_closure(&notOlder);
+ gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+ gch->gen_process_strong_roots(_cmsGen->level(),
+ true, // younger gens are roots
+ true, // activate StrongRootsScope
+ false, // not scavenging
+ SharedHeap::ScanningOption(roots_scanning_options()),
+ &notOlder,
+ true, // walk all of code cache if (so & SO_CodeCache)
+ NULL,
+ &klass_closure);
+ }
}
// Clear mod-union table; it will be dirtied in the prologue of
@@ -4363,7 +4502,9 @@ void CMSCollector::preclean() {
verify_overflow_empty();
_abort_preclean = false;
if (CMSPrecleaningEnabled) {
- _eden_chunk_index = 0;
+ if (!CMSEdenChunksRecordAlways) {
+ _eden_chunk_index = 0;
+ }
size_t used = get_eden_used();
size_t capacity = get_eden_capacity();
// Don't start sampling unless we will get sufficiently
@@ -4472,7 +4613,9 @@ void CMSCollector::sample_eden() {
if (!_start_sampling) {
return;
}
- if (_eden_chunk_array) {
+ // When CMSEdenChunksRecordAlways is true, the eden chunk array
+ // is populated by the young generation.
+ if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
if (_eden_chunk_index < _eden_chunk_capacity) {
_eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
@@ -4544,8 +4687,10 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
// The code in this method may need further
// tweaking for better performance and some restructuring
// for cleaner interfaces.
+ GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
rp->preclean_discovered_references(
- rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl);
+ rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
+ gc_timer);
}
if (clean_survivor) { // preclean the active survivor space(s)
@@ -4887,8 +5032,8 @@ void CMSCollector::checkpointRootsFinal(bool asynch,
// Temporarily set flag to false, GCH->do_collection will
// expect it to be false and set to true
FlagSetting fl(gch->_is_gc_active, false);
- NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
- PrintGCDetails && Verbose, true, gclog_or_tty);)
+ NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
+ PrintGCDetails && Verbose, true, _gc_timer_cm);)
int level = _cmsGen->level() - 1;
if (level >= 0) {
gch->do_collection(true, // full (i.e. force, see below)
@@ -4917,7 +5062,7 @@ void CMSCollector::checkpointRootsFinal(bool asynch,
void CMSCollector::checkpointRootsFinalWork(bool asynch,
bool clear_all_soft_refs, bool init_mark_was_synchronous) {
- NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
+ NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
assert(haveFreelistLocks(), "must have free list locks");
assert_lock_strong(bitMapLock());
@@ -4954,6 +5099,10 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
// Update the saved marks which may affect the root scans.
gch->save_marks();
+ if (CMSPrintEdenSurvivorChunks) {
+ print_eden_and_survivor_chunk_arrays();
+ }
+
{
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
@@ -4968,11 +5117,11 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
// the most recent young generation GC, minus those cleaned up by the
// concurrent precleaning.
if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
- TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
+ GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
do_remark_parallel();
} else {
- TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
- gclog_or_tty);
+ GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
+ _gc_timer_cm);
do_remark_non_parallel();
}
}
@@ -4985,7 +5134,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
verify_overflow_empty();
{
- NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
+ NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
refProcessingWork(asynch, clear_all_soft_refs);
}
verify_work_stacks_empty();
@@ -5046,6 +5195,8 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
verify_after_remark();
}
+ _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
+
// Change under the freelistLocks.
_collectorState = Sweeping;
// Call isAllClear() under bitMapLock
@@ -5058,10 +5209,53 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
}
}
+void CMSParInitialMarkTask::work(uint worker_id) {
+ elapsedTimer _timer;
+ ResourceMark rm;
+ HandleMark hm;
+
+ // ---------- scan from roots --------------
+ _timer.start();
+ GenCollectedHeap* gch = GenCollectedHeap::heap();
+ Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
+ CMKlassClosure klass_closure(&par_mri_cl);
+
+ // ---------- young gen roots --------------
+ {
+ work_on_young_gen_roots(worker_id, &par_mri_cl);
+ _timer.stop();
+ if (PrintCMSStatistics != 0) {
+ gclog_or_tty->print_cr(
+ "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
+ worker_id, _timer.seconds());
+ }
+ }
+
+ // ---------- remaining roots --------------
+ _timer.reset();
+ _timer.start();
+ gch->gen_process_strong_roots(_collector->_cmsGen->level(),
+ false, // yg was scanned above
+ false, // this is parallel code
+ false, // not scavenging
+ SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+ &par_mri_cl,
+ true, // walk all of code cache if (so & SO_CodeCache)
+ NULL,
+ &klass_closure);
+ assert(_collector->should_unload_classes()
+ || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
+ "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
+ _timer.stop();
+ if (PrintCMSStatistics != 0) {
+ gclog_or_tty->print_cr(
+ "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
+ worker_id, _timer.seconds());
+ }
+}
+
// Parallel remark task
-class CMSParRemarkTask: public AbstractGangTask {
- CMSCollector* _collector;
- int _n_workers;
+class CMSParRemarkTask: public CMSParMarkTask {
CompactibleFreeListSpace* _cms_space;
// The per-thread work queues, available here for stealing.
@@ -5075,10 +5269,9 @@ class CMSParRemarkTask: public AbstractGangTask {
CompactibleFreeListSpace* cms_space,
int n_workers, FlexibleWorkGang* workers,
OopTaskQueueSet* task_queues):
- AbstractGangTask("Rescan roots and grey objects in parallel"),
- _collector(collector),
+ CMSParMarkTask("Rescan roots and grey objects in parallel",
+ collector, n_workers),
_cms_space(cms_space),
- _n_workers(n_workers),
_task_queues(task_queues),
_term(n_workers, task_queues) { }
@@ -5092,11 +5285,6 @@ class CMSParRemarkTask: public AbstractGangTask {
void work(uint worker_id);
private:
- // Work method in support of parallel rescan ... of young gen spaces
- void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
- ContiguousSpace* space,
- HeapWord** chunk_array, size_t chunk_top);
-
// ... of dirty cards in old space
void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
Par_MarkRefsIntoAndScanClosure* cl);
@@ -5128,6 +5316,25 @@ class RemarkKlassClosure : public KlassClosure {
}
};
+void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
+ DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
+ EdenSpace* eden_space = dng->eden();
+ ContiguousSpace* from_space = dng->from();
+ ContiguousSpace* to_space = dng->to();
+
+ HeapWord** eca = _collector->_eden_chunk_array;
+ size_t ect = _collector->_eden_chunk_index;
+ HeapWord** sca = _collector->_survivor_chunk_array;
+ size_t sct = _collector->_survivor_chunk_index;
+
+ assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
+ assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
+
+ do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
+ do_young_space_rescan(worker_id, cl, from_space, sca, sct);
+ do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
+}
+
// work_queue(i) is passed to the closure
// Par_MarkRefsIntoAndScanClosure. The "i" parameter
// also is passed to do_dirty_card_rescan_tasks() and to
@@ -5152,23 +5359,7 @@ void CMSParRemarkTask::work(uint worker_id) {
// work first.
// ---------- young gen roots --------------
{
- DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
- EdenSpace* eden_space = dng->eden();
- ContiguousSpace* from_space = dng->from();
- ContiguousSpace* to_space = dng->to();
-
- HeapWord** eca = _collector->_eden_chunk_array;
- size_t ect = _collector->_eden_chunk_index;
- HeapWord** sca = _collector->_survivor_chunk_array;
- size_t sct = _collector->_survivor_chunk_index;
-
- assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
- assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
-
- do_young_space_rescan(worker_id, &par_mrias_cl, to_space, NULL, 0);
- do_young_space_rescan(worker_id, &par_mrias_cl, from_space, sca, sct);
- do_young_space_rescan(worker_id, &par_mrias_cl, eden_space, eca, ect);
-
+ work_on_young_gen_roots(worker_id, &par_mrias_cl);
_timer.stop();
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr(
@@ -5276,8 +5467,8 @@ void CMSParRemarkTask::work(uint worker_id) {
// Note that parameter "i" is not used.
void
-CMSParRemarkTask::do_young_space_rescan(int i,
- Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
+CMSParMarkTask::do_young_space_rescan(uint worker_id,
+ OopsInGenClosure* cl, ContiguousSpace* space,
HeapWord** chunk_array, size_t chunk_top) {
// Until all tasks completed:
// . claim an unclaimed task
@@ -5289,40 +5480,42 @@ CMSParRemarkTask::do_young_space_rescan(int i,
HandleMark hm;
SequentialSubTasksDone* pst = space->par_seq_tasks();
- assert(pst->valid(), "Uninitialized use?");
uint nth_task = 0;
uint n_tasks = pst->n_tasks();
- HeapWord *start, *end;
- while (!pst->is_task_claimed(/* reference */ nth_task)) {
- // We claimed task # nth_task; compute its boundaries.
- if (chunk_top == 0) { // no samples were taken
- assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
- start = space->bottom();
- end = space->top();
- } else if (nth_task == 0) {
- start = space->bottom();
- end = chunk_array[nth_task];
- } else if (nth_task < (uint)chunk_top) {
- assert(nth_task >= 1, "Control point invariant");
- start = chunk_array[nth_task - 1];
- end = chunk_array[nth_task];
- } else {
- assert(nth_task == (uint)chunk_top, "Control point invariant");
- start = chunk_array[chunk_top - 1];
- end = space->top();
- }
- MemRegion mr(start, end);
- // Verify that mr is in space
- assert(mr.is_empty() || space->used_region().contains(mr),
- "Should be in space");
- // Verify that "start" is an object boundary
- assert(mr.is_empty() || oop(mr.start())->is_oop(),
- "Should be an oop");
- space->par_oop_iterate(mr, cl);
+ if (n_tasks > 0) {
+ assert(pst->valid(), "Uninitialized use?");
+ HeapWord *start, *end;
+ while (!pst->is_task_claimed(/* reference */ nth_task)) {
+ // We claimed task # nth_task; compute its boundaries.
+ if (chunk_top == 0) { // no samples were taken
+ assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
+ start = space->bottom();
+ end = space->top();
+ } else if (nth_task == 0) {
+ start = space->bottom();
+ end = chunk_array[nth_task];
+ } else if (nth_task < (uint)chunk_top) {
+ assert(nth_task >= 1, "Control point invariant");
+ start = chunk_array[nth_task - 1];
+ end = chunk_array[nth_task];
+ } else {
+ assert(nth_task == (uint)chunk_top, "Control point invariant");
+ start = chunk_array[chunk_top - 1];
+ end = space->top();
+ }
+ MemRegion mr(start, end);
+ // Verify that mr is in space
+ assert(mr.is_empty() || space->used_region().contains(mr),
+ "Should be in space");
+ // Verify that "start" is an object boundary
+ assert(mr.is_empty() || oop(mr.start())->is_oop(),
+ "Should be an oop");
+ space->par_oop_iterate(mr, cl);
+ }
+ pst->all_tasks_completed();
}
- pst->all_tasks_completed();
}
void
@@ -5472,6 +5665,32 @@ CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
"Else our work is not yet done");
}
+// Record object boundaries in _eden_chunk_array by sampling the eden
+// top in the slow-path eden object allocation code path and record
+// the boundaries, if CMSEdenChunksRecordAlways is true. If
+// CMSEdenChunksRecordAlways is false, we use the other asynchronous
+// sampling in sample_eden() that activates during the part of the
+// preclean phase.
+void CMSCollector::sample_eden_chunk() {
+ if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
+ if (_eden_chunk_lock->try_lock()) {
+ // Record a sample. This is the critical section. The contents
+ // of the _eden_chunk_array have to be non-decreasing in the
+ // address order.
+ _eden_chunk_array[_eden_chunk_index] = *_top_addr;
+ assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
+ "Unexpected state of Eden");
+ if (_eden_chunk_index == 0 ||
+ ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
+ (pointer_delta(_eden_chunk_array[_eden_chunk_index],
+ _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
+ _eden_chunk_index++; // commit sample
+ }
+ _eden_chunk_lock->unlock();
+ }
+ }
+}
+
// Return a thread-local PLAB recording array, as appropriate.
void* CMSCollector::get_data_recorder(int thr_num) {
if (_survivor_plab_array != NULL &&
@@ -5495,12 +5714,13 @@ void CMSCollector::reset_survivor_plab_arrays() {
// Merge the per-thread plab arrays into the global survivor chunk
// array which will provide the partitioning of the survivor space
-// for CMS rescan.
+// for CMS initial scan and rescan.
void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
int no_of_gc_threads) {
assert(_survivor_plab_array != NULL, "Error");
assert(_survivor_chunk_array != NULL, "Error");
- assert(_collectorState == FinalMarking, "Error");
+ assert(_collectorState == FinalMarking ||
+ (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
for (int j = 0; j < no_of_gc_threads; j++) {
_cursor[j] = 0;
}
@@ -5563,7 +5783,7 @@ void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
}
// Set up the space's par_seq_tasks structure for work claiming
-// for parallel rescan of young gen.
+// for parallel initial scan and rescan of young gen.
// See ParRescanTask where this is currently used.
void
CMSCollector::
@@ -5572,7 +5792,7 @@ initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
// Eden space
- {
+ if (!dng->eden()->is_empty()) {
SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
assert(!pst->valid(), "Clobbering existing data?");
// Each valid entry in [0, _eden_chunk_index) represents a task.
@@ -5699,7 +5919,7 @@ void CMSCollector::do_remark_non_parallel() {
NULL, // space is set further below
&_markBitMap, &_markStack, &mrias_cl);
{
- TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
+ GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
// Iterate over the dirty cards, setting the corresponding bits in the
// mod union table.
{
@@ -5736,7 +5956,7 @@ void CMSCollector::do_remark_non_parallel() {
Universe::verify();
}
{
- TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
+ GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
verify_work_stacks_empty();
@@ -5758,7 +5978,7 @@ void CMSCollector::do_remark_non_parallel() {
}
{
- TraceTime t("visit unhandled CLDs", PrintGCDetails, false, gclog_or_tty);
+ GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
verify_work_stacks_empty();
@@ -5777,7 +5997,7 @@ void CMSCollector::do_remark_non_parallel() {
}
{
- TraceTime t("dirty klass scan", PrintGCDetails, false, gclog_or_tty);
+ GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
verify_work_stacks_empty();
@@ -5979,7 +6199,9 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
_span, &_markBitMap, &_markStack,
&cmsKeepAliveClosure, false /* !preclean */);
{
- TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
+ GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
+
+ ReferenceProcessorStats stats;
if (rp->processing_is_mt()) {
// Set the degree of MT here. If the discovery is done MT, there
// may have been a different number of threads doing the discovery
@@ -5998,16 +6220,20 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
}
rp->set_active_mt_degree(active_workers);
CMSRefProcTaskExecutor task_executor(*this);
- rp->process_discovered_references(&_is_alive_closure,
+ stats = rp->process_discovered_references(&_is_alive_closure,
&cmsKeepAliveClosure,
&cmsDrainMarkingStackClosure,
- &task_executor);
+ &task_executor,
+ _gc_timer_cm);
} else {
- rp->process_discovered_references(&_is_alive_closure,
+ stats = rp->process_discovered_references(&_is_alive_closure,
&cmsKeepAliveClosure,
&cmsDrainMarkingStackClosure,
- NULL);
+ NULL,
+ _gc_timer_cm);
}
+ _gc_tracer_cm->report_gc_reference_stats(stats);
+
}
// This is the point where the entire marking should have completed.
@@ -6015,7 +6241,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
if (should_unload_classes()) {
{
- TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
+ GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
@@ -6028,7 +6254,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
}
{
- TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty);
+ GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
@@ -6037,7 +6263,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
// CMS doesn't use the StringTable as hard roots when class unloading is turned off.
// Need to check if we really scanned the StringTable.
if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
- TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty);
+ GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
// Delete entries for dead interned strings.
StringTable::unlink(&_is_alive_closure);
}
@@ -6382,12 +6608,14 @@ void CMSCollector::reset(bool asynch) {
_cmsGen->rotate_debug_collection_type();
}
)
+
+ register_gc_end();
}
void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
- TraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
+ GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
TraceCollectorStats tcs(counters());
switch (op) {
@@ -6682,6 +6910,28 @@ void MarkRefsIntoClosure::do_oop(oop obj) {
void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
+Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
+ MemRegion span, CMSBitMap* bitMap):
+ _span(span),
+ _bitMap(bitMap)
+{
+ assert(_ref_processor == NULL, "deliberately left NULL");
+ assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
+}
+
+void Par_MarkRefsIntoClosure::do_oop(oop obj) {
+ // if p points into _span, then mark corresponding bit in _markBitMap
+ assert(obj->is_oop(), "expected an oop");
+ HeapWord* addr = (HeapWord*)obj;
+ if (_span.contains(addr)) {
+ // this should be made more efficient
+ _bitMap->par_mark(addr);
+ }
+}
+
+void Par_MarkRefsIntoClosure::do_oop(oop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
+void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
+
// A variant of the above, used for CMS marking verification.
MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
@@ -8448,9 +8698,10 @@ void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
assert(inFreeRange(), "Should only be called if currently in a free range.");
HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
assert(_sp->used_region().contains(eob - 1),
- err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
+ err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
+ " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
" when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
- _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
+ eob, eob-1, _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
if (eob >= _limit) {
assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
if (CMSTraceSweeper) {
@@ -9239,7 +9490,6 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
return;
}
}
-
// Transfer some number of overflown objects to usual marking
// stack. Return true if some objects were transferred.
bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
@@ -9311,4 +9561,3 @@ TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorSt
ShouldNotReachHere();
}
}
-
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
index e930b5863..2c87671df 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
@@ -25,8 +25,10 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
+#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gSpaceCounters.hpp"
#include "gc_implementation/shared/gcStats.hpp"
+#include "gc_implementation/shared/gcWhen.hpp"
#include "gc_implementation/shared/generationCounters.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/generation.hpp"
@@ -53,6 +55,8 @@
class CMSAdaptiveSizePolicy;
class CMSConcMarkingTask;
class CMSGCAdaptivePolicyCounters;
+class CMSTracer;
+class ConcurrentGCTimer;
class ConcurrentMarkSweepGeneration;
class ConcurrentMarkSweepPolicy;
class ConcurrentMarkSweepThread;
@@ -61,6 +65,7 @@ class FreeChunk;
class PromotionInfo;
class ScanMarkedObjectsAgainCarefullyClosure;
class TenuredGeneration;
+class SerialOldTracer;
// A generic CMS bit map. It's the basis for both the CMS marking bit map
// as well as for the mod union table (in each case only a subset of the
@@ -485,10 +490,6 @@ class CMSIsAliveClosure: public BoolObjectClosure {
assert(!span.is_empty(), "Empty span could spell trouble");
}
- void do_object(oop obj) {
- assert(false, "not to be invoked");
- }
-
bool do_object_b(oop obj);
};
@@ -514,6 +515,8 @@ class CMSCollector: public CHeapObj<mtGC> {
friend class ConcurrentMarkSweepThread;
friend class ConcurrentMarkSweepGeneration;
friend class CompactibleFreeListSpace;
+ friend class CMSParMarkTask;
+ friend class CMSParInitialMarkTask;
friend class CMSParRemarkTask;
friend class CMSConcMarkingTask;
friend class CMSRefProcTaskProxy;
@@ -571,8 +574,9 @@ class CMSCollector: public CHeapObj<mtGC> {
bool _completed_initialization;
// In support of ExplicitGCInvokesConcurrent
- static bool _full_gc_requested;
- unsigned int _collection_count_start;
+ static bool _full_gc_requested;
+ static GCCause::Cause _full_gc_cause;
+ unsigned int _collection_count_start;
// Should we unload classes this concurrent cycle?
bool _should_unload_classes;
@@ -613,6 +617,20 @@ class CMSCollector: public CHeapObj<mtGC> {
AdaptivePaddedAverage _inter_sweep_estimate;
AdaptivePaddedAverage _intra_sweep_estimate;
+ CMSTracer* _gc_tracer_cm;
+ ConcurrentGCTimer* _gc_timer_cm;
+
+ bool _cms_start_registered;
+
+ GCHeapSummary _last_heap_summary;
+ MetaspaceSummary _last_metaspace_summary;
+
+ void register_foreground_gc_start(GCCause::Cause cause);
+ void register_gc_start(GCCause::Cause cause);
+ void register_gc_end();
+ void save_heap_summary();
+ void report_heap_summary(GCWhen::Type when);
+
protected:
ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS)
MemRegion _span; // span covering above two
@@ -733,6 +751,7 @@ class CMSCollector: public CHeapObj<mtGC> {
Generation* _young_gen; // the younger gen
HeapWord** _top_addr; // ... Top of Eden
HeapWord** _end_addr; // ... End of Eden
+ Mutex* _eden_chunk_lock;
HeapWord** _eden_chunk_array; // ... Eden partitioning array
size_t _eden_chunk_index; // ... top (exclusive) of array
size_t _eden_chunk_capacity; // ... max entries in array
@@ -831,6 +850,10 @@ class CMSCollector: public CHeapObj<mtGC> {
void do_mark_sweep_work(bool clear_all_soft_refs,
CollectorState first_state, bool should_start_over);
+ // Work methods for reporting concurrent mode interruption or failure
+ bool is_external_interruption();
+ void report_concurrent_mode_interruption();
+
// If the backgrould GC is active, acquire control from the background
// GC and do the collection.
void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
@@ -880,11 +903,11 @@ class CMSCollector: public CHeapObj<mtGC> {
bool clear_all_soft_refs,
size_t size,
bool tlab);
- void collect_in_background(bool clear_all_soft_refs);
- void collect_in_foreground(bool clear_all_soft_refs);
+ void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
+ void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
// In support of ExplicitGCInvokesConcurrent
- static void request_full_gc(unsigned int full_gc_count);
+ static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
// Should we unload classes in a particular concurrent cycle?
bool should_unload_classes() const {
return _should_unload_classes;
@@ -930,6 +953,7 @@ class CMSCollector: public CHeapObj<mtGC> {
// Support for parallel remark of survivor space
void* get_data_recorder(int thr_num);
+ void sample_eden_chunk();
CMSBitMap* markBitMap() { return &_markBitMap; }
void directAllocated(HeapWord* start, size_t size);
@@ -1007,6 +1031,8 @@ class CMSCollector: public CHeapObj<mtGC> {
// Initialization errors
bool completed_initialization() { return _completed_initialization; }
+
+ void print_eden_and_survivor_chunk_arrays();
};
class CMSExpansionCause : public AllStatic {
@@ -1253,7 +1279,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// Iteration support and related enquiries
void save_marks();
bool no_allocs_since_save_marks();
- void object_iterate_since_last_GC(ObjectClosure* cl);
void younger_refs_iterate(OopsInGenClosure* cl);
// Iteration support specific to CMS generations
@@ -1298,6 +1323,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
//Delegate to collector
return collector()->get_data_recorder(thr_num);
}
+ void sample_eden_chunk() {
+ //Delegate to collector
+ return collector()->sample_eden_chunk();
+ }
// Printing
const char* name() const;
@@ -1536,9 +1565,6 @@ class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
_bit_map(bit_map),
_par_scan_closure(cl) { }
- void do_object(oop obj) {
- guarantee(false, "Call do_object_b(oop, MemRegion) instead");
- }
bool do_object_b(oop obj) {
guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
return false;
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp
index 7bc1c1ea9..f8b0ccb90 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp
@@ -140,7 +140,9 @@ void ConcurrentMarkSweepThread::run() {
while (!_should_terminate) {
sleepBeforeNextCycle();
if (_should_terminate) break;
- _collector->collect_in_background(false); // !clear_all_soft_refs
+ GCCause::Cause cause = _collector->_full_gc_requested ?
+ _collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
+ _collector->collect_in_background(false, cause);
}
assert(_should_terminate, "just checking");
// Check that the state of any protocol for synchronization
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp
index eacd2a9f6..3e24c2419 100644
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,9 +26,12 @@
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
#include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "memory/gcLocker.inline.hpp"
#include "runtime/interfaceSupport.hpp"
+#include "runtime/os.hpp"
#include "utilities/dtrace.hpp"
@@ -60,6 +63,7 @@ void VM_CMS_Operation::release_and_notify_pending_list_lock() {
void VM_CMS_Operation::verify_before_gc() {
if (VerifyBeforeGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+ GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm);
HandleMark hm;
FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@@ -71,6 +75,7 @@ void VM_CMS_Operation::verify_before_gc() {
void VM_CMS_Operation::verify_after_gc() {
if (VerifyAfterGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+ GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm);
HandleMark hm;
FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@@ -140,6 +145,8 @@ void VM_CMS_Initial_Mark::doit() {
);
#endif /* USDT2 */
+ _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark", os::elapsed_counter());
+
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
@@ -149,6 +156,9 @@ void VM_CMS_Initial_Mark::doit() {
_collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause());
VM_CMS_Operation::verify_after_gc();
+
+ _collector->_gc_timer_cm->register_gc_pause_end(os::elapsed_counter());
+
#ifndef USDT2
HS_DTRACE_PROBE(hs_private, cms__initmark__end);
#else /* USDT2 */
@@ -172,6 +182,8 @@ void VM_CMS_Final_Remark::doit() {
);
#endif /* USDT2 */
+ _collector->_gc_timer_cm->register_gc_pause_start("Final Mark", os::elapsed_counter());
+
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
@@ -181,6 +193,10 @@ void VM_CMS_Final_Remark::doit() {
_collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause());
VM_CMS_Operation::verify_after_gc();
+
+ _collector->save_heap_summary();
+ _collector->_gc_timer_cm->register_gc_pause_end(os::elapsed_counter());
+
#ifndef USDT2
HS_DTRACE_PROBE(hs_private, cms__remark__end);
#else /* USDT2 */
@@ -225,7 +241,7 @@ void VM_GenCollectFullConcurrent::doit() {
// In case CMS thread was in icms_wait(), wake it up.
CMSCollector::start_icms();
// Nudge the CMS thread to start a concurrent collection.
- CMSCollector::request_full_gc(_full_gc_count_before);
+ CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
} else {
assert(_full_gc_count_before < gch->total_full_collections(), "Error");
FullGCCount_lock->notify_all(); // Inform the Java thread its work is done
diff --git a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
index 5f049209e..f0b4da8a8 100644
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
@@ -114,6 +114,14 @@ void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
}
}
+void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
+ if (_threads != NULL) {
+ for (int i = 0; i < worker_thread_num(); i++) {
+ tc->do_thread(_threads[i]);
+ }
+ }
+}
+
int ConcurrentG1Refine::thread_num() {
int n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
: ParallelGCThreads;
@@ -126,3 +134,7 @@ void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
st->cr();
}
}
+
+ConcurrentG1RefineThread * ConcurrentG1Refine::sampling_thread() const {
+ return _threads[worker_thread_num()];
+}
diff --git a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp
index 46e6622ee..3dc7c6242 100644
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp
@@ -35,6 +35,7 @@ class ConcurrentG1RefineThread;
class G1CollectedHeap;
class G1HotCardCache;
class G1RemSet;
+class DirtyCardQueue;
class ConcurrentG1Refine: public CHeapObj<mtGC> {
ConcurrentG1RefineThread** _threads;
@@ -78,9 +79,15 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
void reinitialize_threads();
- // Iterate over the conc refine threads
+ // Iterate over all concurrent refinement threads
void threads_do(ThreadClosure *tc);
+ // Iterate over all worker refinement threads
+ void worker_threads_do(ThreadClosure * tc);
+
+ // The RS sampling thread
+ ConcurrentG1RefineThread * sampling_thread() const;
+
static int thread_num();
void print_worker_threads_on(outputStream* st) const;
diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/src/share/vm/gc_implementation/g1/concurrentMark.cpp
index 39e5a2792..77f08f0d3 100644
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp
@@ -36,6 +36,9 @@
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
@@ -478,9 +481,8 @@ uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
_g1h(g1h),
- _markBitMap1(MinObjAlignment - 1),
- _markBitMap2(MinObjAlignment - 1),
-
+ _markBitMap1(log2_intptr(MinObjAlignment)),
+ _markBitMap2(log2_intptr(MinObjAlignment)),
_parallel_marking_threads(0),
_max_parallel_marking_threads(0),
_sleep_factor(0.0),
@@ -1342,6 +1344,9 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
_remark_times.add((now - start) * 1000.0);
g1p->record_concurrent_mark_remark_end();
+
+ G1CMIsAliveClosure is_alive(g1h);
+ g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
}
// Base class of the closures that finalize and verify the
@@ -2129,6 +2134,7 @@ void ConcurrentMark::cleanup() {
}
g1h->verify_region_sets_optional();
+ g1h->trace_heap_after_concurrent_cycle();
}
void ConcurrentMark::completeCleanup() {
@@ -2439,7 +2445,7 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
if (G1Log::finer()) {
gclog_or_tty->put(' ');
}
- TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty);
+ GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm());
ReferenceProcessor* rp = g1h->ref_processor_cm();
@@ -2491,10 +2497,13 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
rp->set_active_mt_degree(active_workers);
// Process the weak references.
- rp->process_discovered_references(&g1_is_alive,
- &g1_keep_alive,
- &g1_drain_mark_stack,
- executor);
+ const ReferenceProcessorStats& stats =
+ rp->process_discovered_references(&g1_is_alive,
+ &g1_keep_alive,
+ &g1_drain_mark_stack,
+ executor,
+ g1h->gc_timer_cm());
+ g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
// The do_oop work routines of the keep_alive and drain_marking_stack
// oop closures will set the has_overflown flag if we overflow the
@@ -3227,6 +3236,9 @@ void ConcurrentMark::abort() {
satb_mq_set.set_active_all_threads(
false, /* new active value */
satb_mq_set.is_active() /* expected_active */);
+
+ _g1h->trace_heap_after_concurrent_cycle();
+ _g1h->register_concurrent_cycle_end();
}
static void print_ms_time_info(const char* prefix, const char* name,
@@ -4515,7 +4527,8 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
_total_used_bytes(0), _total_capacity_bytes(0),
_total_prev_live_bytes(0), _total_next_live_bytes(0),
_hum_used_bytes(0), _hum_capacity_bytes(0),
- _hum_prev_live_bytes(0), _hum_next_live_bytes(0) {
+ _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
+ _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
MemRegion g1_committed = g1h->g1_committed();
MemRegion g1_reserved = g1h->g1_reserved();
@@ -4533,23 +4546,29 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
HeapRegion::GrainBytes);
_out->print_cr(G1PPRL_LINE_PREFIX);
_out->print_cr(G1PPRL_LINE_PREFIX
- G1PPRL_TYPE_H_FORMAT
- G1PPRL_ADDR_BASE_H_FORMAT
- G1PPRL_BYTE_H_FORMAT
- G1PPRL_BYTE_H_FORMAT
- G1PPRL_BYTE_H_FORMAT
- G1PPRL_DOUBLE_H_FORMAT,
- "type", "address-range",
- "used", "prev-live", "next-live", "gc-eff");
+ G1PPRL_TYPE_H_FORMAT
+ G1PPRL_ADDR_BASE_H_FORMAT
+ G1PPRL_BYTE_H_FORMAT
+ G1PPRL_BYTE_H_FORMAT
+ G1PPRL_BYTE_H_FORMAT
+ G1PPRL_DOUBLE_H_FORMAT
+ G1PPRL_BYTE_H_FORMAT
+ G1PPRL_BYTE_H_FORMAT,
+ "type", "address-range",
+ "used", "prev-live", "next-live", "gc-eff",
+ "remset", "code-roots");
_out->print_cr(G1PPRL_LINE_PREFIX
- G1PPRL_TYPE_H_FORMAT
- G1PPRL_ADDR_BASE_H_FORMAT
- G1PPRL_BYTE_H_FORMAT
- G1PPRL_BYTE_H_FORMAT
- G1PPRL_BYTE_H_FORMAT
- G1PPRL_DOUBLE_H_FORMAT,
- "", "",
- "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)");
+ G1PPRL_TYPE_H_FORMAT
+ G1PPRL_ADDR_BASE_H_FORMAT
+ G1PPRL_BYTE_H_FORMAT
+ G1PPRL_BYTE_H_FORMAT
+ G1PPRL_BYTE_H_FORMAT
+ G1PPRL_DOUBLE_H_FORMAT
+ G1PPRL_BYTE_H_FORMAT
+ G1PPRL_BYTE_H_FORMAT,
+ "", "",
+ "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
+ "(bytes)", "(bytes)");
}
// It takes as a parameter a reference to one of the _hum_* fields, it
@@ -4591,6 +4610,9 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
size_t prev_live_bytes = r->live_bytes();
size_t next_live_bytes = r->next_live_bytes();
double gc_eff = r->gc_efficiency();
+ size_t remset_bytes = r->rem_set()->mem_size();
+ size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
+
if (r->used() == 0) {
type = "FREE";
} else if (r->is_survivor()) {
@@ -4624,6 +4646,8 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
_total_capacity_bytes += capacity_bytes;
_total_prev_live_bytes += prev_live_bytes;
_total_next_live_bytes += next_live_bytes;
+ _total_remset_bytes += remset_bytes;
+ _total_strong_code_roots_bytes += strong_code_roots_bytes;
// Print a line for this particular region.
_out->print_cr(G1PPRL_LINE_PREFIX
@@ -4632,14 +4656,19 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
G1PPRL_BYTE_FORMAT
G1PPRL_BYTE_FORMAT
G1PPRL_BYTE_FORMAT
- G1PPRL_DOUBLE_FORMAT,
+ G1PPRL_DOUBLE_FORMAT
+ G1PPRL_BYTE_FORMAT
+ G1PPRL_BYTE_FORMAT,
type, bottom, end,
- used_bytes, prev_live_bytes, next_live_bytes, gc_eff);
+ used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
+ remset_bytes, strong_code_roots_bytes);
return false;
}
G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
+ // add static memory usages to remembered set sizes
+ _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
// Print the footer of the output.
_out->print_cr(G1PPRL_LINE_PREFIX);
_out->print_cr(G1PPRL_LINE_PREFIX
@@ -4647,13 +4676,17 @@ G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
G1PPRL_SUM_MB_FORMAT("capacity")
G1PPRL_SUM_MB_PERC_FORMAT("used")
G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
- G1PPRL_SUM_MB_PERC_FORMAT("next-live"),
+ G1PPRL_SUM_MB_PERC_FORMAT("next-live")
+ G1PPRL_SUM_MB_FORMAT("remset")
+ G1PPRL_SUM_MB_FORMAT("code-roots"),
bytes_to_mb(_total_capacity_bytes),
bytes_to_mb(_total_used_bytes),
perc(_total_used_bytes, _total_capacity_bytes),
bytes_to_mb(_total_prev_live_bytes),
perc(_total_prev_live_bytes, _total_capacity_bytes),
bytes_to_mb(_total_next_live_bytes),
- perc(_total_next_live_bytes, _total_capacity_bytes));
+ perc(_total_next_live_bytes, _total_capacity_bytes),
+ bytes_to_mb(_total_remset_bytes),
+ bytes_to_mb(_total_strong_code_roots_bytes));
_out->cr();
}
diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.hpp b/src/share/vm/gc_implementation/g1/concurrentMark.hpp
index 211a728e2..a01024fcb 100644
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp
@@ -44,9 +44,6 @@ class G1CMIsAliveClosure: public BoolObjectClosure {
public:
G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
- void do_object(oop obj) {
- ShouldNotCallThis();
- }
bool do_object_b(oop obj);
};
@@ -572,8 +569,6 @@ protected:
void clear_has_overflown() { _has_overflown = false; }
bool restart_for_overflow() { return _restart_for_overflow; }
- bool has_aborted() { return _has_aborted; }
-
// Methods to enter the two overflow sync barriers
void enter_first_sync_barrier(uint worker_id);
void enter_second_sync_barrier(uint worker_id);
@@ -824,6 +819,8 @@ public:
// Called to abort the marking cycle after a Full GC takes palce.
void abort();
+ bool has_aborted() { return _has_aborted; }
+
// This prints the global/local fingers. It is used for debugging.
NOT_PRODUCT(void print_finger();)
@@ -1257,6 +1254,12 @@ private:
size_t _hum_prev_live_bytes;
size_t _hum_next_live_bytes;
+ // Accumulator for the remembered set size
+ size_t _total_remset_bytes;
+
+ // Accumulator for strong code roots memory size
+ size_t _total_strong_code_roots_bytes;
+
static double perc(size_t val, size_t total) {
if (total == 0) {
return 0.0;
diff --git a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
index a961fda8f..ee53c3ba6 100644
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
@@ -93,7 +93,6 @@ void ConcurrentMarkThread::run() {
ResourceMark rm;
HandleMark hm;
double cycle_start = os::elapsedVTime();
- char verbose_str[128];
// We have to ensure that we finish scanning the root regions
// before the next GC takes place. To ensure this we have to
@@ -155,8 +154,7 @@ void ConcurrentMarkThread::run() {
}
CMCheckpointRootsFinalClosure final_cl(_cm);
- sprintf(verbose_str, "GC remark");
- VM_CGC_Operation op(&final_cl, verbose_str, true /* needs_pll */);
+ VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
VMThread::execute(&op);
}
if (cm()->restart_for_overflow()) {
@@ -187,8 +185,7 @@ void ConcurrentMarkThread::run() {
}
CMCleanUp cl_cl(_cm);
- sprintf(verbose_str, "GC cleanup");
- VM_CGC_Operation op(&cl_cl, verbose_str, false /* needs_pll */);
+ VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */);
VMThread::execute(&op);
} else {
// We don't want to update the marking status if a GC pause
@@ -292,6 +289,7 @@ void ConcurrentMarkThread::run() {
// called System.gc() with +ExplicitGCInvokesConcurrent).
_sts.join();
g1h->increment_old_marking_cycles_completed(true /* concurrent */);
+ g1h->register_concurrent_cycle_end();
_sts.leave();
}
assert(_should_terminate, "just checking");
diff --git a/src/share/vm/gc_implementation/g1/evacuationInfo.hpp b/src/share/vm/gc_implementation/g1/evacuationInfo.hpp
new file mode 100644
index 000000000..97e0ab2f7
--- /dev/null
+++ b/src/share/vm/gc_implementation/g1/evacuationInfo.hpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP
+
+#include "memory/allocation.hpp"
+
+class EvacuationInfo : public StackObj {
+ uint _collectionset_regions;
+ uint _allocation_regions;
+ size_t _collectionset_used_before;
+ size_t _collectionset_used_after;
+ size_t _alloc_regions_used_before;
+ size_t _bytes_copied;
+ uint _regions_freed;
+
+public:
+ EvacuationInfo() : _collectionset_regions(0), _allocation_regions(0), _collectionset_used_before(0),
+ _collectionset_used_after(0), _alloc_regions_used_before(0),
+ _bytes_copied(0), _regions_freed(0) { }
+
+ void set_collectionset_regions(uint collectionset_regions) {
+ _collectionset_regions = collectionset_regions;
+ }
+
+ void set_allocation_regions(uint allocation_regions) {
+ _allocation_regions = allocation_regions;
+ }
+
+ void set_collectionset_used_before(size_t used) {
+ _collectionset_used_before = used;
+ }
+
+ void increment_collectionset_used_after(size_t used) {
+ _collectionset_used_after += used;
+ }
+
+ void set_alloc_regions_used_before(size_t used) {
+ _alloc_regions_used_before = used;
+ }
+
+ void set_bytes_copied(size_t copied) {
+ _bytes_copied = copied;
+ }
+
+ void set_regions_freed(uint freed) {
+ _regions_freed += freed;
+ }
+
+ uint collectionset_regions() { return _collectionset_regions; }
+ uint allocation_regions() { return _allocation_regions; }
+ size_t collectionset_used_before() { return _collectionset_used_before; }
+ size_t collectionset_used_after() { return _collectionset_used_after; }
+ size_t alloc_regions_used_before() { return _alloc_regions_used_before; }
+ size_t bytes_copied() { return _bytes_copied; }
+ uint regions_freed() { return _regions_freed; }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP
diff --git a/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp b/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp
new file mode 100644
index 000000000..7f5023b42
--- /dev/null
+++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+
+#ifndef PRODUCT
+void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
+ guarantee(_base != NULL, "Array not initialized");
+ guarantee(index < length(), err_msg("Index out of bounds index: "SIZE_FORMAT" length: "SIZE_FORMAT, index, length()));
+}
+
+void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const {
+ guarantee(_biased_base != NULL, "Array not initialized");
+ guarantee(biased_index >= bias() && biased_index < (bias() + length()),
+ err_msg("Biased index out of bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
+}
+
+void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const {
+ guarantee(_biased_base != NULL, "Array not initialized");
+ guarantee(biased_index >= bias() && biased_index <= (bias() + length()),
+ err_msg("Biased index out of inclusive bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
+}
+
+class TestMappedArray : public G1BiasedMappedArray<int> {
+protected:
+ virtual int default_value() const { return 0xBAADBABE; }
+public:
+ static void test_biasedarray() {
+ const size_t REGION_SIZE_IN_WORDS = 512;
+ const size_t NUM_REGIONS = 20;
+ HeapWord* fake_heap = (HeapWord*)LP64_ONLY(0xBAAA00000) NOT_LP64(0xBA000000); // Any value that is non-zero
+
+ TestMappedArray array;
+ array.initialize(fake_heap, fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS,
+ REGION_SIZE_IN_WORDS * HeapWordSize);
+ // Check address calculation (bounds)
+ assert(array.bottom_address_mapped() == fake_heap,
+ err_msg("bottom mapped address should be "PTR_FORMAT", but is "PTR_FORMAT, fake_heap, array.bottom_address_mapped()));
+ assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be");
+
+ int* bottom = array.address_mapped_to(fake_heap);
+ assert((void*)bottom == (void*) array.base(), "must be");
+ int* end = array.address_mapped_to(fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS);
+ assert((void*)end == (void*)(array.base() + array.length()), "must be");
+ // The entire array should contain default value elements
+ for (int* current = bottom; current < end; current++) {
+ assert(*current == array.default_value(), "must be");
+ }
+
+ // Test setting values in the table
+
+ HeapWord* region_start_address = fake_heap + REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2);
+ HeapWord* region_end_address = fake_heap + (REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2) + REGION_SIZE_IN_WORDS - 1);
+
+ // Set/get by address tests: invert some value; first retrieve one
+ int actual_value = array.get_by_index(NUM_REGIONS / 2);
+ array.set_by_index(NUM_REGIONS / 2, ~actual_value);
+ // Get the same value by address, should correspond to the start of the "region"
+ int value = array.get_by_address(region_start_address);
+ assert(value == ~actual_value, "must be");
+ // Get the same value by address, at one HeapWord before the start
+ value = array.get_by_address(region_start_address - 1);
+ assert(value == array.default_value(), "must be");
+ // Get the same value by address, at the end of the "region"
+ value = array.get_by_address(region_end_address);
+ assert(value == ~actual_value, "must be");
+ // Make sure the next value maps to another index
+ value = array.get_by_address(region_end_address + 1);
+ assert(value == array.default_value(), "must be");
+
+ // Reset the value in the array
+ array.set_by_address(region_start_address + (region_end_address - region_start_address) / 2, actual_value);
+
+ // The entire array should have the default value again
+ for (int* current = bottom; current < end; current++) {
+ assert(*current == array.default_value(), "must be");
+ }
+
+ // Set/get by index tests: invert some value
+ idx_t index = NUM_REGIONS / 2;
+ actual_value = array.get_by_index(index);
+ array.set_by_index(index, ~actual_value);
+
+ value = array.get_by_index(index);
+ assert(value == ~actual_value, "must be");
+
+ value = array.get_by_index(index - 1);
+ assert(value == array.default_value(), "must be");
+
+ value = array.get_by_index(index + 1);
+ assert(value == array.default_value(), "must be");
+
+ array.set_by_index(0, 0);
+ value = array.get_by_index(0);
+ assert(value == 0, "must be");
+
+ array.set_by_index(array.length() - 1, 0);
+ value = array.get_by_index(array.length() - 1);
+ assert(value == 0, "must be");
+
+ array.set_by_index(index, 0);
+
+ // The array should have three zeros, and default values otherwise
+ size_t num_zeros = 0;
+ for (int* current = bottom; current < end; current++) {
+ assert(*current == array.default_value() || *current == 0, "must be");
+ if (*current == 0) {
+ num_zeros++;
+ }
+ }
+ assert(num_zeros == 3, "must be");
+ }
+};
+
+void TestG1BiasedArray_test() {
+ TestMappedArray::test_biasedarray();
+}
+
+#endif
diff --git a/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp
new file mode 100644
index 000000000..f80c70b4e
--- /dev/null
+++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
+
+#include "utilities/debug.hpp"
+#include "memory/allocation.inline.hpp"
+
+// Implements the common base functionality for arrays that contain provisions
+// for accessing its elements using a biased index.
+// The element type is defined by the instantiating the template.
+class G1BiasedMappedArrayBase VALUE_OBJ_CLASS_SPEC {
+ friend class VMStructs;
+public:
+ typedef size_t idx_t;
+protected:
+ address _base; // the real base address
+ size_t _length; // the length of the array
+ address _biased_base; // base address biased by "bias" elements
+ size_t _bias; // the bias, i.e. the offset biased_base is located to the right in elements
+ uint _shift_by; // the amount of bits to shift right when mapping to an index of the array.
+
+protected:
+
+ G1BiasedMappedArrayBase() : _base(NULL), _length(0), _biased_base(NULL),
+ _bias(0), _shift_by(0) { }
+
+ // Allocate a new array, generic version.
+ static address create_new_base_array(size_t length, size_t elem_size) {
+ assert(length > 0, "just checking");
+ assert(elem_size > 0, "just checking");
+ return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
+ }
+
+ // Initialize the members of this class. The biased start address of this array
+ // is the bias (in elements) multiplied by the element size.
+ void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) {
+ assert(base != NULL, "just checking");
+ assert(length > 0, "just checking");
+ assert(shift_by < sizeof(uintptr_t) * 8, err_msg("Shifting by %zd, larger than word size?", shift_by));
+ _base = base;
+ _length = length;
+ _biased_base = base - (bias * elem_size);
+ _bias = bias;
+ _shift_by = shift_by;
+ }
+
+ // Allocate and initialize this array to cover the heap addresses in the range
+ // of [bottom, end).
+ void initialize(HeapWord* bottom, HeapWord* end, size_t target_elem_size_in_bytes, size_t mapping_granularity_in_bytes) {
+ assert(mapping_granularity_in_bytes > 0, "just checking");
+ assert(is_power_of_2(mapping_granularity_in_bytes),
+ err_msg("mapping granularity must be power of 2, is %zd", mapping_granularity_in_bytes));
+ assert((uintptr_t)bottom % mapping_granularity_in_bytes == 0,
+ err_msg("bottom mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
+ mapping_granularity_in_bytes, bottom));
+ assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
+ err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
+ mapping_granularity_in_bytes, end));
+ size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize);
+ idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
+ address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
+ initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
+ }
+
+ size_t bias() const { return _bias; }
+ uint shift_by() const { return _shift_by; }
+
+ void verify_index(idx_t index) const PRODUCT_RETURN;
+ void verify_biased_index(idx_t biased_index) const PRODUCT_RETURN;
+ void verify_biased_index_inclusive_end(idx_t biased_index) const PRODUCT_RETURN;
+
+public:
+ // Return the length of the array in elements.
+ size_t length() const { return _length; }
+};
+
+// Array that provides biased access and mapping from (valid) addresses in the
+// heap into this array.
+template<class T>
+class G1BiasedMappedArray : public G1BiasedMappedArrayBase {
+public:
+ typedef G1BiasedMappedArrayBase::idx_t idx_t;
+
+ T* base() const { return (T*)G1BiasedMappedArrayBase::_base; }
+ // Return the element of the given array at the given index. Assume
+ // the index is valid. This is a convenience method that does sanity
+ // checking on the index.
+ T get_by_index(idx_t index) const {
+ verify_index(index);
+ return this->base()[index];
+ }
+
+ // Set the element of the given array at the given index to the
+ // given value. Assume the index is valid. This is a convenience
+ // method that does sanity checking on the index.
+ void set_by_index(idx_t index, T value) {
+ verify_index(index);
+ this->base()[index] = value;
+ }
+
+ // The raw biased base pointer.
+ T* biased_base() const { return (T*)G1BiasedMappedArrayBase::_biased_base; }
+
+ // Return the element of the given array that covers the given word in the
+ // heap. Assumes the index is valid.
+ T get_by_address(HeapWord* value) const {
+ idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
+ this->verify_biased_index(biased_index);
+ return biased_base()[biased_index];
+ }
+
+ // Set the value of the array entry that corresponds to the given array.
+ void set_by_address(HeapWord * address, T value) {
+ idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
+ this->verify_biased_index(biased_index);
+ biased_base()[biased_index] = value;
+ }
+
+protected:
+ // Returns the address of the element the given address maps to
+ T* address_mapped_to(HeapWord* address) {
+ idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
+ this->verify_biased_index_inclusive_end(biased_index);
+ return biased_base() + biased_index;
+ }
+
+public:
+ // Return the smallest address (inclusive) in the heap that this array covers.
+ HeapWord* bottom_address_mapped() const {
+ return (HeapWord*) ((uintptr_t)this->bias() << this->shift_by());
+ }
+
+ // Return the highest address (exclusive) in the heap that this array covers.
+ HeapWord* end_address_mapped() const {
+ return (HeapWord*) ((uintptr_t)(this->bias() + this->length()) << this->shift_by());
+ }
+
+protected:
+ virtual T default_value() const = 0;
+ // Set all elements of the given array to the given value.
+ void clear() {
+ T value = default_value();
+ for (idx_t i = 0; i < length(); i++) {
+ set_by_index(i, value);
+ }
+ }
+public:
+ G1BiasedMappedArray() {}
+
+ // Allocate and initialize this array to cover the heap addresses in the range
+ // of [bottom, end).
+ void initialize(HeapWord* bottom, HeapWord* end, size_t mapping_granularity) {
+ G1BiasedMappedArrayBase::initialize(bottom, end, sizeof(T), mapping_granularity);
+ this->clear();
+ }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
diff --git a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp
index a055d4f28..31972bf3c 100644
--- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp
@@ -33,8 +33,8 @@
void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
if (has_count_table()) {
- check_card_num(from_card_num,
- err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
+ assert(from_card_num >= 0 && from_card_num < _committed_max_card_num,
+ err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
assert(from_card_num < to_card_num,
err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
from_card_num, to_card_num));
@@ -101,20 +101,23 @@ void G1CardCounts::resize(size_t heap_capacity) {
ReservedSpace::allocation_align_size_up(_committed_size),
err_msg("Unaligned? committed_size: " SIZE_FORMAT, _committed_size));
- // Verify that the committed space for the card counts
- // matches our committed max card num.
+ // Verify that the committed space for the card counts matches our
+ // committed max card num. Note for some allocation alignments, the
+ // amount of space actually committed for the counts table will be able
+ // to span more cards than the number spanned by the maximum heap.
size_t prev_committed_size = _committed_size;
- size_t prev_committed_card_num = prev_committed_size / sizeof(jbyte);
+ size_t prev_committed_card_num = committed_to_card_num(prev_committed_size);
+
assert(prev_committed_card_num == _committed_max_card_num,
err_msg("Card mismatch: "
"prev: " SIZE_FORMAT ", "
- "committed: "SIZE_FORMAT,
- prev_committed_card_num, _committed_max_card_num));
+ "committed: "SIZE_FORMAT", "
+ "reserved: "SIZE_FORMAT,
+ prev_committed_card_num, _committed_max_card_num, _reserved_max_card_num));
size_t new_size = (heap_capacity >> CardTableModRefBS::card_shift) * sizeof(jbyte);
size_t new_committed_size = ReservedSpace::allocation_align_size_up(new_size);
- size_t new_committed_card_num =
- MIN2(_reserved_max_card_num, new_committed_size / sizeof(jbyte));
+ size_t new_committed_card_num = committed_to_card_num(new_committed_size);
if (_committed_max_card_num < new_committed_card_num) {
// we need to expand the backing store for the card counts
@@ -149,12 +152,9 @@ uint G1CardCounts::add_card_count(jbyte* card_ptr) {
if (card_num < _committed_max_card_num) {
count = (uint) _card_counts[card_num];
if (count < G1ConcRSHotCardLimit) {
- _card_counts[card_num] += 1;
+ _card_counts[card_num] =
+ (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit));
}
- assert(_card_counts[card_num] <= G1ConcRSHotCardLimit,
- err_msg("Refinement count overflow? "
- "new count: "UINT32_FORMAT,
- (uint) _card_counts[card_num]));
}
}
return count;
diff --git a/src/share/vm/gc_implementation/g1/g1CardCounts.hpp b/src/share/vm/gc_implementation/g1/g1CardCounts.hpp
index cef297bd2..129b3b0d2 100644
--- a/src/share/vm/gc_implementation/g1/g1CardCounts.hpp
+++ b/src/share/vm/gc_implementation/g1/g1CardCounts.hpp
@@ -72,28 +72,32 @@ class G1CardCounts: public CHeapObj<mtGC> {
return has_reserved_count_table() && _committed_max_card_num > 0;
}
- void check_card_num(size_t card_num, const char* msg) {
- assert(card_num >= 0 && card_num < _committed_max_card_num, msg);
- }
-
size_t ptr_2_card_num(const jbyte* card_ptr) {
assert(card_ptr >= _ct_bot,
- err_msg("Inavalied card pointer: "
+ err_msg("Invalid card pointer: "
"card_ptr: " PTR_FORMAT ", "
"_ct_bot: " PTR_FORMAT,
card_ptr, _ct_bot));
size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
- check_card_num(card_num,
- err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
+ assert(card_num >= 0 && card_num < _committed_max_card_num,
+ err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
return card_num;
}
jbyte* card_num_2_ptr(size_t card_num) {
- check_card_num(card_num,
- err_msg("card num out of range: "SIZE_FORMAT, card_num));
+ assert(card_num >= 0 && card_num < _committed_max_card_num,
+ err_msg("card num out of range: "SIZE_FORMAT, card_num));
return (jbyte*) (_ct_bot + card_num);
}
+ // Helper routine.
+ // Returns the number of cards that can be counted by the given committed
+ // table size, with a maximum of the number of cards spanned by the max
+ // capacity of the heap.
+ size_t committed_to_card_num(size_t committed_size) {
+ return MIN2(_reserved_max_card_num, committed_size / sizeof(jbyte));
+ }
+
// Clear the counts table for the given (exclusive) index range.
void clear_range(size_t from_card_num, size_t to_card_num);
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
index 49cde4272..0ecfd3ab3 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc_implementation/g1/bufferingOopClosure.hpp"
#include "gc_implementation/g1/concurrentG1Refine.hpp"
@@ -38,10 +39,15 @@
#include "gc_implementation/g1/g1MarkSweep.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
+#include "gc_implementation/g1/g1YCTypes.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/genOopClosures.inline.hpp"
@@ -49,7 +55,6 @@
#include "memory/referenceProcessor.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.pcgc.inline.hpp"
-#include "runtime/aprofiler.hpp"
#include "runtime/vmThread.hpp"
size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
@@ -76,7 +81,7 @@ size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
// The number of GC workers is passed to heap_region_par_iterate_chunked().
// It does use run_task() which sets _n_workers in the task.
// G1ParTask executes g1_process_strong_roots() ->
-// SharedHeap::process_strong_roots() which calls eventuall to
+// SharedHeap::process_strong_roots() which calls eventually to
// CardTableModRefBS::par_non_clean_card_iterate_work() which uses
// SequentialSubTasksDone. SharedHeap::process_strong_roots() also
// directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
@@ -457,7 +462,7 @@ bool G1CollectedHeap::is_in_partial_collection(const void* p) {
#endif
// Returns true if the reference points to an object that
-// can move in an incremental collecction.
+// can move in an incremental collection.
bool G1CollectedHeap::is_scavengable(const void* p) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1CollectorPolicy* g1p = g1h->g1_policy();
@@ -548,7 +553,7 @@ G1CollectedHeap::new_region_try_secondary_free_list() {
return res;
}
- // Wait here until we get notifed either when (a) there are no
+ // Wait here until we get notified either when (a) there are no
// more free regions coming or (b) some regions have been moved on
// the secondary_free_list.
SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
@@ -623,7 +628,7 @@ uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
uint first = G1_NULL_HRS_INDEX;
if (num_regions == 1) {
// Only one region to allocate, no need to go through the slower
- // path. The caller will attempt the expasion if this fails, so
+ // path. The caller will attempt the expansion if this fails, so
// let's not try to expand here too.
HeapRegion* hr = new_region(word_size, false /* do_expand */);
if (hr != NULL) {
@@ -688,7 +693,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
// the first region.
HeapWord* new_obj = first_hr->bottom();
// This will be the new end of the first region in the series that
- // should also match the end of the last region in the seriers.
+ // should also match the end of the last region in the series.
HeapWord* new_end = new_obj + word_size_sum;
// This will be the new top of the first region that will reflect
// this allocation.
@@ -863,7 +868,7 @@ G1CollectedHeap::mem_allocate(size_t word_size,
bool* gc_overhead_limit_was_exceeded) {
assert_heap_not_locked_and_not_at_safepoint();
- // Loop until the allocation is satisified, or unsatisfied after GC.
+ // Loop until the allocation is satisfied, or unsatisfied after GC.
for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
unsigned int gc_count_before;
@@ -976,7 +981,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
if (should_try_gc) {
bool succeeded;
- result = do_collection_pause(word_size, gc_count_before, &succeeded);
+ result = do_collection_pause(word_size, gc_count_before, &succeeded,
+ GCCause::_g1_inc_collection_pause);
if (result != NULL) {
assert(succeeded, "only way to get back a non-NULL result");
return result;
@@ -1003,7 +1009,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
(*gclocker_retry_count_ret) += 1;
}
- // We can reach here if we were unsuccessul in scheduling a
+ // We can reach here if we were unsuccessful in scheduling a
// collection (because another thread beat us to it) or if we were
// stalled due to the GC locker. In either can we should retry the
// allocation attempt in case another thread successfully
@@ -1101,7 +1107,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
// enough space for the allocation to succeed after the pause.
bool succeeded;
- result = do_collection_pause(word_size, gc_count_before, &succeeded);
+ result = do_collection_pause(word_size, gc_count_before, &succeeded,
+ GCCause::_g1_humongous_allocation);
if (result != NULL) {
assert(succeeded, "only way to get back a non-NULL result");
return result;
@@ -1128,7 +1135,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
(*gclocker_retry_count_ret) += 1;
}
- // We can reach here if we were unsuccessul in scheduling a
+ // We can reach here if we were unsuccessful in scheduling a
// collection (because another thread beat us to it) or if we were
// stalled due to the GC locker. In either can we should retry the
// allocation attempt in case another thread successfully
@@ -1172,20 +1179,27 @@ class PostMCRemSetClearClosure: public HeapRegionClosure {
ModRefBarrierSet* _mr_bs;
public:
PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
- _g1h(g1h), _mr_bs(mr_bs) { }
+ _g1h(g1h), _mr_bs(mr_bs) {}
+
bool doHeapRegion(HeapRegion* r) {
+ HeapRegionRemSet* hrrs = r->rem_set();
+
if (r->continuesHumongous()) {
+ // We'll assert that the strong code root list and RSet is empty
+ assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
+ assert(hrrs->occupied() == 0, "RSet should be empty");
return false;
}
+
_g1h->reset_gc_time_stamps(r);
- HeapRegionRemSet* hrrs = r->rem_set();
- if (hrrs != NULL) hrrs->clear();
+ hrrs->clear();
// You might think here that we could clear just the cards
// corresponding to the used region. But no: if we leave a dirty card
// in a region we might allocate into, then it would prevent that card
// from being enqueued, and cause it to be missed.
// Re: the performance cost: we shouldn't be doing full GC anyway!
_mr_bs->clear(MemRegion(r->bottom(), r->end()));
+
return false;
}
};
@@ -1265,30 +1279,6 @@ void G1CollectedHeap::print_hrs_post_compaction() {
heap_region_iterate(&cl);
}
-double G1CollectedHeap::verify(bool guard, const char* msg) {
- double verify_time_ms = 0.0;
-
- if (guard && total_collections() >= VerifyGCStartAt) {
- double verify_start = os::elapsedTime();
- HandleMark hm; // Discard invalid handles created during verification
- prepare_for_verify();
- Universe::verify(VerifyOption_G1UsePrevMarking, msg);
- verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
- }
-
- return verify_time_ms;
-}
-
-void G1CollectedHeap::verify_before_gc() {
- double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
- g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
-}
-
-void G1CollectedHeap::verify_after_gc() {
- double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
- g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
-}
-
bool G1CollectedHeap::do_collection(bool explicit_gc,
bool clear_all_soft_refs,
size_t word_size) {
@@ -1298,10 +1288,17 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
return false;
}
+ STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
+ gc_timer->register_gc_start(os::elapsed_counter());
+
+ SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
+ gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
+
SvcGCMarker sgcm(SvcGCMarker::FULL);
ResourceMark rm;
print_heap_before_gc();
+ trace_heap_before_gc(gc_tracer);
size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
@@ -1322,7 +1319,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
{
- TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
+ GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
TraceCollectorStats tcs(g1mm()->full_collection_counters());
TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
@@ -1351,7 +1348,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
verify_before_gc();
- pre_full_gc_dump();
+ pre_full_gc_dump(gc_timer);
COMPILER2_PRESENT(DerivedPointerTable::clear());
@@ -1417,14 +1414,12 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
MemoryService::track_memory_usage();
- verify_after_gc();
-
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
ref_processor_stw()->verify_no_references_recorded();
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph::purge();
- MetaspaceAux::verify_metrics();
+ MetaspaceAux::verify_metrics();
// Note: since we've just done a full GC, concurrent
// marking is no longer active. Therefore we need not
@@ -1435,7 +1430,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
reset_gc_time_stamp();
// Since everything potentially moved, we will clear all remembered
- // sets, and clear all cards. Later we will rebuild remebered
+ // sets, and clear all cards. Later we will rebuild remembered
// sets. We will also reset the GC time stamps of the regions.
clear_rsets_post_compaction();
check_gc_time_stamps();
@@ -1495,6 +1490,9 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
heap_region_iterate(&rebuild_rs);
}
+ // Rebuild the strong code root lists for each region
+ rebuild_strong_code_roots();
+
if (true) { // FIXME
MetaspaceGC::compute_new_size();
}
@@ -1521,6 +1519,8 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
_hrs.verify_optional();
verify_region_sets_optional();
+ verify_after_gc();
+
// Start a new incremental collection set for the next pause
assert(g1_policy()->collection_set() == NULL, "must be");
g1_policy()->start_incremental_cset_building();
@@ -1549,12 +1549,16 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
}
if (G1Log::finer()) {
- g1_policy()->print_detailed_heap_transition();
+ g1_policy()->print_detailed_heap_transition(true /* full */);
}
print_heap_after_gc();
+ trace_heap_after_gc(gc_tracer);
+
+ post_full_gc_dump(gc_timer);
- post_full_gc_dump();
+ gc_timer->register_gc_end(os::elapsed_counter());
+ gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
}
return true;
@@ -1919,7 +1923,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_ref_processor_stw(NULL),
_process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
_bot_shared(NULL),
- _evac_failure_scan_stack(NULL) ,
+ _evac_failure_scan_stack(NULL),
_mark_in_progress(false),
_cg1r(NULL), _summary_bytes_used(0),
_g1mm(NULL),
@@ -1939,12 +1943,18 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_surviving_young_words(NULL),
_old_marking_cycles_started(0),
_old_marking_cycles_completed(0),
+ _concurrent_cycle_started(false),
_in_cset_fast_test(NULL),
_in_cset_fast_test_base(NULL),
_dirty_cards_region_list(NULL),
_worker_cset_start_region(NULL),
- _worker_cset_start_region_time_stamp(NULL) {
- _g1h = this; // To catch bugs.
+ _worker_cset_start_region_time_stamp(NULL),
+ _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
+ _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
+ _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
+ _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
+
+ _g1h = this;
if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
vm_exit_during_initialization("Failed necessary allocation.");
}
@@ -1959,13 +1969,14 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
_worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
+ _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
for (int i = 0; i < n_queues; i++) {
RefToScanQueue* q = new RefToScanQueue();
q->initialize();
_task_queues->register_queue(i, q);
+ ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
}
-
clear_cset_start_regions();
// Initialize the G1EvacuationFailureALot counters and flags.
@@ -1997,10 +2008,12 @@ jint G1CollectedHeap::initialize() {
size_t init_byte_size = collector_policy()->initial_heap_byte_size();
size_t max_byte_size = collector_policy()->max_heap_byte_size();
+ size_t heap_alignment = collector_policy()->max_alignment();
// Ensure that the sizes are properly aligned.
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
+ Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
_cg1r = new ConcurrentG1Refine(this);
@@ -2017,15 +2030,11 @@ jint G1CollectedHeap::initialize() {
// If this happens then we could end up using a non-optimal
// compressed oops mode.
- // Since max_byte_size is aligned to the size of a heap region (checked
- // above).
- Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
-
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
- HeapRegion::GrainBytes);
+ heap_alignment);
// It is important to do this in a way such that concurrent readers can't
- // temporarily think somethings in the heap. (I've actually seen this
+ // temporarily think something is in the heap. (I've actually seen this
// happen in asserts: DLD.)
_reserved.set_word_size(0);
_reserved.set_start((HeapWord*)heap_rs.base());
@@ -2060,8 +2069,10 @@ jint G1CollectedHeap::initialize() {
_g1_storage.initialize(g1_rs, 0);
_g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
_hrs.initialize((HeapWord*) _g1_reserved.start(),
- (HeapWord*) _g1_reserved.end(),
- _expansion_regions);
+ (HeapWord*) _g1_reserved.end());
+ assert(_hrs.max_length() == _expansion_regions,
+ err_msg("max length: %u expansion regions: %u",
+ _hrs.max_length(), _expansion_regions));
// Do later initialization work for concurrent refinement.
_cg1r->init();
@@ -2182,6 +2193,10 @@ jint G1CollectedHeap::initialize() {
return JNI_OK;
}
+size_t G1CollectedHeap::conservative_max_heap_alignment() {
+ return HeapRegion::max_region_size();
+}
+
void G1CollectedHeap::ref_processing_init() {
// Reference processing in G1 currently works as follows:
//
@@ -2462,7 +2477,7 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
// We need to clear the "in_progress" flag in the CM thread before
// we wake up any waiters (especially when ExplicitInvokesConcurrent
// is set) so that if a waiter requests another System.gc() it doesn't
- // incorrectly see that a marking cyle is still in progress.
+ // incorrectly see that a marking cycle is still in progress.
if (concurrent) {
_cmThread->clear_in_progress();
}
@@ -2474,6 +2489,49 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
FullGCCount_lock->notify_all();
}
+void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) {
+ _concurrent_cycle_started = true;
+ _gc_timer_cm->register_gc_start(start_time);
+
+ _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
+ trace_heap_before_gc(_gc_tracer_cm);
+}
+
+void G1CollectedHeap::register_concurrent_cycle_end() {
+ if (_concurrent_cycle_started) {
+ if (_cm->has_aborted()) {
+ _gc_tracer_cm->report_concurrent_mode_failure();
+ }
+
+ _gc_timer_cm->register_gc_end(os::elapsed_counter());
+ _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
+
+ _concurrent_cycle_started = false;
+ }
+}
+
+void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
+ if (_concurrent_cycle_started) {
+ trace_heap_after_gc(_gc_tracer_cm);
+ }
+}
+
+G1YCType G1CollectedHeap::yc_type() {
+ bool is_young = g1_policy()->gcs_are_young();
+ bool is_initial_mark = g1_policy()->during_initial_mark_pause();
+ bool is_during_mark = mark_in_progress();
+
+ if (is_initial_mark) {
+ return InitialMark;
+ } else if (is_during_mark) {
+ return DuringMark;
+ } else if (is_young) {
+ return Normal;
+ } else {
+ return Mixed;
+ }
+}
+
void G1CollectedHeap::collect(GCCause::Cause cause) {
assert_heap_not_locked();
@@ -2599,11 +2657,6 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
heap_region_iterate(&blk);
}
-void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
- // FIXME: is this right?
- guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
-}
-
// Calls a SpaceClosure on a HeapRegion.
class SpaceClosureRegionClosure: public HeapRegionClosure {
@@ -2676,13 +2729,13 @@ G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
break;
}
- // Noone should have claimed it directly. We can given
+ // No one should have claimed it directly. We can given
// that we claimed its "starts humongous" region.
assert(chr->claim_value() != claim_value, "sanity");
assert(chr->humongous_start_region() == r, "sanity");
if (chr->claimHeapRegion(claim_value)) {
- // we should always be able to claim it; noone else should
+ // we should always be able to claim it; no one else should
// be trying to claim this region
bool res2 = cl->doHeapRegion(chr);
@@ -2976,7 +3029,7 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
// the min TLAB size.
// Also, this value can be at most the humongous object threshold,
- // since we can't allow tlabs to grow big enough to accomodate
+ // since we can't allow tlabs to grow big enough to accommodate
// humongous objects.
HeapRegion* hr = _mutator_alloc_region.get();
@@ -3049,6 +3102,145 @@ const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
return NULL; // keep some compilers happy
}
+// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
+// pass it as the perm_blk to SharedHeap::process_strong_roots.
+// When process_strong_roots stop calling perm_blk->younger_refs_iterate
+// we can change this closure to extend the simpler OopClosure.
+class VerifyRootsClosure: public OopsInGenClosure {
+private:
+ G1CollectedHeap* _g1h;
+ VerifyOption _vo;
+ bool _failures;
+public:
+ // _vo == UsePrevMarking -> use "prev" marking information,
+ // _vo == UseNextMarking -> use "next" marking information,
+ // _vo == UseMarkWord -> use mark word from object header.
+ VerifyRootsClosure(VerifyOption vo) :
+ _g1h(G1CollectedHeap::heap()),
+ _vo(vo),
+ _failures(false) { }
+
+ bool failures() { return _failures; }
+
+ template <class T> void do_oop_nv(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (_g1h->is_obj_dead_cond(obj, _vo)) {
+ gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
+ "points to dead obj "PTR_FORMAT, p, (void*) obj);
+ if (_vo == VerifyOption_G1UseMarkWord) {
+ gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark()));
+ }
+ obj->print_on(gclog_or_tty);
+ _failures = true;
+ }
+ }
+ }
+
+ void do_oop(oop* p) { do_oop_nv(p); }
+ void do_oop(narrowOop* p) { do_oop_nv(p); }
+};
+
+class G1VerifyCodeRootOopClosure: public OopsInGenClosure {
+ G1CollectedHeap* _g1h;
+ OopClosure* _root_cl;
+ nmethod* _nm;
+ VerifyOption _vo;
+ bool _failures;
+
+ template <class T> void do_oop_work(T* p) {
+ // First verify that this root is live
+ _root_cl->do_oop(p);
+
+ if (!G1VerifyHeapRegionCodeRoots) {
+ // We're not verifying the code roots attached to heap region.
+ return;
+ }
+
+ // Don't check the code roots during marking verification in a full GC
+ if (_vo == VerifyOption_G1UseMarkWord) {
+ return;
+ }
+
+ // Now verify that the current nmethod (which contains p) is
+ // in the code root list of the heap region containing the
+ // object referenced by p.
+
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+
+ // Now fetch the region containing the object
+ HeapRegion* hr = _g1h->heap_region_containing(obj);
+ HeapRegionRemSet* hrrs = hr->rem_set();
+ // Verify that the strong code root list for this region
+ // contains the nmethod
+ if (!hrrs->strong_code_roots_list_contains(_nm)) {
+ gclog_or_tty->print_cr("Code root location "PTR_FORMAT" "
+ "from nmethod "PTR_FORMAT" not in strong "
+ "code roots for region ["PTR_FORMAT","PTR_FORMAT")",
+ p, _nm, hr->bottom(), hr->end());
+ _failures = true;
+ }
+ }
+ }
+
+public:
+ G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
+ _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
+
+ void do_oop(oop* p) { do_oop_work(p); }
+ void do_oop(narrowOop* p) { do_oop_work(p); }
+
+ void set_nmethod(nmethod* nm) { _nm = nm; }
+ bool failures() { return _failures; }
+};
+
+class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
+ G1VerifyCodeRootOopClosure* _oop_cl;
+
+public:
+ G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
+ _oop_cl(oop_cl) {}
+
+ void do_code_blob(CodeBlob* cb) {
+ nmethod* nm = cb->as_nmethod_or_null();
+ if (nm != NULL) {
+ _oop_cl->set_nmethod(nm);
+ nm->oops_do(_oop_cl);
+ }
+ }
+};
+
+class YoungRefCounterClosure : public OopClosure {
+ G1CollectedHeap* _g1h;
+ int _count;
+ public:
+ YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
+ void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } }
+ void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+
+ int count() { return _count; }
+ void reset_count() { _count = 0; };
+};
+
+class VerifyKlassClosure: public KlassClosure {
+ YoungRefCounterClosure _young_ref_counter_closure;
+ OopClosure *_oop_closure;
+ public:
+ VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
+ void do_klass(Klass* k) {
+ k->oops_do(_oop_closure);
+
+ _young_ref_counter_closure.reset_count();
+ k->oops_do(&_young_ref_counter_closure);
+ if (_young_ref_counter_closure.count() > 0) {
+ guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
+ }
+ }
+};
+
class VerifyLivenessOopClosure: public OopClosure {
G1CollectedHeap* _g1h;
VerifyOption _vo;
@@ -3182,75 +3374,7 @@ public:
}
};
-class YoungRefCounterClosure : public OopClosure {
- G1CollectedHeap* _g1h;
- int _count;
- public:
- YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
- void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } }
- void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-
- int count() { return _count; }
- void reset_count() { _count = 0; };
-};
-
-class VerifyKlassClosure: public KlassClosure {
- YoungRefCounterClosure _young_ref_counter_closure;
- OopClosure *_oop_closure;
- public:
- VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
- void do_klass(Klass* k) {
- k->oops_do(_oop_closure);
-
- _young_ref_counter_closure.reset_count();
- k->oops_do(&_young_ref_counter_closure);
- if (_young_ref_counter_closure.count() > 0) {
- guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
- }
- }
-};
-
-// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
-// pass it as the perm_blk to SharedHeap::process_strong_roots.
-// When process_strong_roots stop calling perm_blk->younger_refs_iterate
-// we can change this closure to extend the simpler OopClosure.
-class VerifyRootsClosure: public OopsInGenClosure {
-private:
- G1CollectedHeap* _g1h;
- VerifyOption _vo;
- bool _failures;
-public:
- // _vo == UsePrevMarking -> use "prev" marking information,
- // _vo == UseNextMarking -> use "next" marking information,
- // _vo == UseMarkWord -> use mark word from object header.
- VerifyRootsClosure(VerifyOption vo) :
- _g1h(G1CollectedHeap::heap()),
- _vo(vo),
- _failures(false) { }
-
- bool failures() { return _failures; }
-
- template <class T> void do_oop_nv(T* p) {
- T heap_oop = oopDesc::load_heap_oop(p);
- if (!oopDesc::is_null(heap_oop)) {
- oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
- if (_g1h->is_obj_dead_cond(obj, _vo)) {
- gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
- "points to dead obj "PTR_FORMAT, p, (void*) obj);
- if (_vo == VerifyOption_G1UseMarkWord) {
- gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark()));
- }
- obj->print_on(gclog_or_tty);
- _failures = true;
- }
- }
- }
-
- void do_oop(oop* p) { do_oop_nv(p); }
- void do_oop(narrowOop* p) { do_oop_nv(p); }
-};
-
-// This is the task used for parallel heap verification.
+// This is the task used for parallel verification of the heap regions
class G1ParVerifyTask: public AbstractGangTask {
private:
@@ -3284,20 +3408,15 @@ public:
}
};
-void G1CollectedHeap::verify(bool silent) {
- verify(silent, VerifyOption_G1UsePrevMarking);
-}
-
-void G1CollectedHeap::verify(bool silent,
- VerifyOption vo) {
+void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
if (SafepointSynchronize::is_at_safepoint()) {
- if (!silent) { gclog_or_tty->print("Roots "); }
- VerifyRootsClosure rootsCl(vo);
-
assert(Thread::current()->is_VM_thread(),
"Expected to be executed serially by the VM thread at this point");
- CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
+ if (!silent) { gclog_or_tty->print("Roots "); }
+ VerifyRootsClosure rootsCl(vo);
+ G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
+ G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
VerifyKlassClosure klassCl(this, &rootsCl);
// We apply the relevant closures to all the oops in the
@@ -3316,7 +3435,7 @@ void G1CollectedHeap::verify(bool silent,
&klassCl
);
- bool failures = rootsCl.failures();
+ bool failures = rootsCl.failures() || codeRootsCl.failures();
if (vo != VerifyOption_G1UseMarkWord) {
// If we're verifying during a full GC then the region sets
@@ -3385,6 +3504,34 @@ void G1CollectedHeap::verify(bool silent,
}
}
+void G1CollectedHeap::verify(bool silent) {
+ verify(silent, VerifyOption_G1UsePrevMarking);
+}
+
+double G1CollectedHeap::verify(bool guard, const char* msg) {
+ double verify_time_ms = 0.0;
+
+ if (guard && total_collections() >= VerifyGCStartAt) {
+ double verify_start = os::elapsedTime();
+ HandleMark hm; // Discard invalid handles created during verification
+ prepare_for_verify();
+ Universe::verify(VerifyOption_G1UsePrevMarking, msg);
+ verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
+ }
+
+ return verify_time_ms;
+}
+
+void G1CollectedHeap::verify_before_gc() {
+ double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
+ g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
+}
+
+void G1CollectedHeap::verify_after_gc() {
+ double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
+ g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
+}
+
class PrintRegionClosure: public HeapRegionClosure {
outputStream* _st;
public:
@@ -3532,13 +3679,19 @@ G1CollectedHeap* G1CollectedHeap::heap() {
void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
// always_do_update_barrier = false;
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
- // Call allocation profiler
- AllocationProfiler::iterate_since_last_gc();
// Fill TLAB's and such
ensure_parsability(true);
}
void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
+
+ if (G1SummarizeRSetStats &&
+ (G1SummarizeRSetStatsPeriod > 0) &&
+ // we are at the end of the GC. Total collections has already been increased.
+ ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
+ g1_rem_set()->print_periodic_summary_info();
+ }
+
// FIXME: what is this about?
// I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
// is set.
@@ -3553,14 +3706,15 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
unsigned int gc_count_before,
- bool* succeeded) {
+ bool* succeeded,
+ GCCause::Cause gc_cause) {
assert_heap_not_locked_and_not_at_safepoint();
g1_policy()->record_stop_world_start();
VM_G1IncCollectionPause op(gc_count_before,
word_size,
false, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(),
- GCCause::_g1_inc_collection_pause);
+ gc_cause);
VMThread::execute(&op);
HeapWord* result = op.result();
@@ -3735,10 +3889,15 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
return false;
}
+ _gc_timer_stw->register_gc_start(os::elapsed_counter());
+
+ _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
+
SvcGCMarker sgcm(SvcGCMarker::MINOR);
ResourceMark rm;
print_heap_before_gc();
+ trace_heap_before_gc(_gc_tracer_stw);
HRSPhaseSetter x(HRSPhaseEvacuation);
verify_region_sets_optional();
@@ -3763,11 +3922,17 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// Inner scope for scope based logging, timers, and stats collection
{
+ EvacuationInfo evacuation_info;
+
if (g1_policy()->during_initial_mark_pause()) {
// We are about to start a marking cycle, so we increment the
// full collection counter.
increment_old_marking_cycles_started();
+ register_concurrent_cycle_start(_gc_timer_stw->gc_start());
}
+
+ _gc_tracer_stw->report_yc_type(yc_type());
+
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
@@ -3789,8 +3954,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
append_secondary_free_list_if_not_empty_with_lock();
}
- assert(check_young_list_well_formed(),
- "young list should be well formed");
+ assert(check_young_list_well_formed(), "young list should be well formed");
+ assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
+ "sanity check");
// Don't dynamically change the number of GC threads this early. A value of
// 0 is used to indicate serial work. When parallel work is done,
@@ -3877,7 +4043,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
#endif // YOUNG_LIST_VERBOSE
- g1_policy()->finalize_cset(target_pause_time_ms);
+ g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
_cm->note_start_of_gc();
// We should not verify the per-thread SATB buffers given that
@@ -3913,10 +4079,10 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
setup_surviving_young_words();
// Initialize the GC alloc regions.
- init_gc_alloc_regions();
+ init_gc_alloc_regions(evacuation_info);
// Actually do the work...
- evacuate_collection_set();
+ evacuate_collection_set(evacuation_info);
// We do this to mainly verify the per-thread SATB buffers
// (which have been filtered by now) since we didn't verify
@@ -3928,7 +4094,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
true /* verify_thread_buffers */,
true /* verify_fingers */);
- free_collection_set(g1_policy()->collection_set());
+ free_collection_set(g1_policy()->collection_set(), evacuation_info);
g1_policy()->clear_collection_set();
cleanup_surviving_young_words();
@@ -3956,13 +4122,19 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
#endif // YOUNG_LIST_VERBOSE
g1_policy()->record_survivor_regions(_young_list->survivor_length(),
- _young_list->first_survivor_region(),
- _young_list->last_survivor_region());
+ _young_list->first_survivor_region(),
+ _young_list->last_survivor_region());
_young_list->reset_auxilary_lists();
if (evacuation_failed()) {
_summary_bytes_used = recalculate_used();
+ uint n_queues = MAX2((int)ParallelGCThreads, 1);
+ for (uint i = 0; i < n_queues; i++) {
+ if (_evacuation_failed_info_array[i].has_failed()) {
+ _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
+ }
+ }
} else {
// The "used" of the the collection set have already been subtracted
// when they were freed. Add in the bytes evacuated.
@@ -4005,7 +4177,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
}
}
- // We redo the verificaiton but now wrt to the new CSet which
+ // We redo the verification but now wrt to the new CSet which
// has just got initialized after the previous CSet was freed.
_cm->verify_no_cset_oops(true /* verify_stacks */,
true /* verify_enqueued_buffers */,
@@ -4018,7 +4190,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// investigate this in CR 7178365.
double sample_end_time_sec = os::elapsedTime();
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
- g1_policy()->record_collection_pause_end(pause_time_ms);
+ g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);
MemoryService::track_memory_usage();
@@ -4085,20 +4257,19 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
print_heap_after_gc();
+ trace_heap_after_gc(_gc_tracer_stw);
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
// before any GC notifications are raised.
g1mm()->update_sizes();
- }
- if (G1SummarizeRSetStats &&
- (G1SummarizeRSetStatsPeriod > 0) &&
- (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
- g1_rem_set()->print_summary_info();
+ _gc_tracer_stw->report_evacuation_info(&evacuation_info);
+ _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
+ _gc_timer_stw->register_gc_end(os::elapsed_counter());
+ _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
}
-
// It should now be safe to tell the concurrent mark thread to start
// without its logging output interfering with the logging output
// that came from the pause.
@@ -4150,7 +4321,7 @@ void G1CollectedHeap::release_mutator_alloc_region() {
assert(_mutator_alloc_region.get() == NULL, "post-condition");
}
-void G1CollectedHeap::init_gc_alloc_regions() {
+void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
assert_at_safepoint(true /* should_be_vm_thread */);
_survivor_gc_alloc_region.init();
@@ -4165,7 +4336,7 @@ void G1CollectedHeap::init_gc_alloc_regions() {
// a cleanup and it should be on the free list now), or
// d) it's humongous (this means that it was emptied
// during a cleanup and was added to the free list, but
- // has been subseqently used to allocate a humongous
+ // has been subsequently used to allocate a humongous
// object that may be less than the region size).
if (retained_region != NULL &&
!retained_region->in_collection_set() &&
@@ -4182,10 +4353,13 @@ void G1CollectedHeap::init_gc_alloc_regions() {
retained_region->note_start_of_copying(during_im);
_old_gc_alloc_region.set(retained_region);
_hr_printer.reuse(retained_region);
+ evacuation_info.set_alloc_regions_used_before(retained_region->used());
}
}
-void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) {
+void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
+ evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
+ _old_gc_alloc_region.count());
_survivor_gc_alloc_region.release();
// If we have an old GC alloc region to release, we'll save it in
// _retained_old_gc_alloc_region. If we don't
@@ -4268,7 +4442,7 @@ void G1CollectedHeap::drain_evac_failure_scan_stack() {
}
oop
-G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
+G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,
oop old) {
assert(obj_in_cs(old),
err_msg("obj: "PTR_FORMAT" should still be in the CSet",
@@ -4277,7 +4451,12 @@ G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
oop forward_ptr = old->forward_to_atomic(old);
if (forward_ptr == NULL) {
// Forward-to-self succeeded.
+ assert(_par_scan_state != NULL, "par scan state");
+ OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
+ uint queue_num = _par_scan_state->queue_num();
+ _evacuation_failed = true;
+ _evacuation_failed_info_array[queue_num].register_copy_failure(old->size());
if (_evac_failure_closure != cl) {
MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
assert(!_drain_in_progress,
@@ -4308,8 +4487,6 @@ G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
}
void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
- set_evacuation_failed(true);
-
preserve_mark_if_necessary(old, m);
HeapRegion* r = heap_region_containing(old);
@@ -4559,8 +4736,7 @@ oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
if (obj_ptr == NULL) {
// This will either forward-to-self, or detect that someone else has
// installed a forwarding pointer.
- OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
- return _g1->handle_evacuation_failure_par(cl, old);
+ return _g1->handle_evacuation_failure_par(_par_scan_state, old);
}
oop obj = oop(obj_ptr);
@@ -4900,7 +5076,11 @@ public:
G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
- int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
+ // Don't scan the scavengable methods in the code cache as part
+ // of strong root scanning. The code roots that point into a
+ // region in the collection set are scanned when we scan the
+ // region's RSet.
+ int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
pss.start_strong_roots();
_g1h->g1_process_strong_roots(/* is scavenging */ true,
@@ -4942,67 +5122,6 @@ public:
// *** Common G1 Evacuation Stuff
-// Closures that support the filtering of CodeBlobs scanned during
-// external root scanning.
-
-// Closure applied to reference fields in code blobs (specifically nmethods)
-// to determine whether an nmethod contains references that point into
-// the collection set. Used as a predicate when walking code roots so
-// that only nmethods that point into the collection set are added to the
-// 'marked' list.
-
-class G1FilteredCodeBlobToOopClosure : public CodeBlobToOopClosure {
-
- class G1PointsIntoCSOopClosure : public OopClosure {
- G1CollectedHeap* _g1;
- bool _points_into_cs;
- public:
- G1PointsIntoCSOopClosure(G1CollectedHeap* g1) :
- _g1(g1), _points_into_cs(false) { }
-
- bool points_into_cs() const { return _points_into_cs; }
-
- template <class T>
- void do_oop_nv(T* p) {
- if (!_points_into_cs) {
- T heap_oop = oopDesc::load_heap_oop(p);
- if (!oopDesc::is_null(heap_oop) &&
- _g1->in_cset_fast_test(oopDesc::decode_heap_oop_not_null(heap_oop))) {
- _points_into_cs = true;
- }
- }
- }
-
- virtual void do_oop(oop* p) { do_oop_nv(p); }
- virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
- };
-
- G1CollectedHeap* _g1;
-
-public:
- G1FilteredCodeBlobToOopClosure(G1CollectedHeap* g1, OopClosure* cl) :
- CodeBlobToOopClosure(cl, true), _g1(g1) { }
-
- virtual void do_code_blob(CodeBlob* cb) {
- nmethod* nm = cb->as_nmethod_or_null();
- if (nm != NULL && !(nm->test_oops_do_mark())) {
- G1PointsIntoCSOopClosure predicate_cl(_g1);
- nm->oops_do(&predicate_cl);
-
- if (predicate_cl.points_into_cs()) {
- // At least one of the reference fields or the oop relocations
- // in the nmethod points into the collection set. We have to
- // 'mark' this nmethod.
- // Note: Revisit the following if CodeBlobToOopClosure::do_code_blob()
- // or MarkingCodeBlobClosure::do_code_blob() change.
- if (!nm->test_set_oops_do_mark()) {
- do_newly_marked_nmethod(nm);
- }
- }
- }
- }
-};
-
// This method is run in a GC worker.
void
@@ -5020,9 +5139,10 @@ g1_process_strong_roots(bool is_scavenging,
BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
- // Walk the code cache w/o buffering, because StarTask cannot handle
- // unaligned oop locations.
- G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots);
+ assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow");
+ // Walk the code cache/strong code roots w/o buffering, because StarTask
+ // cannot handle unaligned oop locations.
+ CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
process_strong_roots(false, // no scoping; this is parallel code
is_scavenging, so,
@@ -5067,9 +5187,22 @@ g1_process_strong_roots(bool is_scavenging,
}
g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
+ // If this is an initial mark pause, and we're not scanning
+ // the entire code cache, we need to mark the oops in the
+ // strong code root lists for the regions that are not in
+ // the collection set.
+ // Note all threads participate in this set of root tasks.
+ double mark_strong_code_roots_ms = 0.0;
+ if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
+ double mark_strong_roots_start = os::elapsedTime();
+ mark_strong_code_roots(worker_i);
+ mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
+ }
+ g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
+
// Now scan the complement of the collection set.
if (scan_rs != NULL) {
- g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
+ g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
}
_process_strong_tasks->all_tasks_completed();
}
@@ -5090,7 +5223,6 @@ class G1AlwaysAliveClosure: public BoolObjectClosure {
G1CollectedHeap* _g1;
public:
G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
- void do_object(oop p) { assert(false, "Do not call."); }
bool do_object_b(oop p) {
if (p != NULL) {
return true;
@@ -5165,7 +5297,7 @@ public:
// will be copied, the reference field set to point to the
// new location, and the RSet updated. Otherwise we need to
// use the the non-heap or metadata closures directly to copy
- // the refernt object and update the pointer, while avoiding
+ // the referent object and update the pointer, while avoiding
// updating the RSet.
if (_g1h->is_in_g1_reserved(p)) {
@@ -5333,7 +5465,7 @@ public:
}
};
-// Driver routine for parallel reference enqueing.
+// Driver routine for parallel reference enqueueing.
// Creates an instance of the ref enqueueing gang
// task and has the worker threads execute it.
@@ -5462,7 +5594,7 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
// processor would have seen that the reference object had already
// been 'discovered' and would have skipped discovering the reference,
// but would not have treated the reference object as a regular oop.
- // As a reult the copy closure would not have been applied to the
+ // As a result the copy closure would not have been applied to the
// referent object.
//
// We need to explicitly copy these referent objects - the references
@@ -5538,21 +5670,28 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
// Setup the soft refs policy...
rp->setup_policy(false);
+ ReferenceProcessorStats stats;
if (!rp->processing_is_mt()) {
// Serial reference processing...
- rp->process_discovered_references(&is_alive,
- &keep_alive,
- &drain_queue,
- NULL);
+ stats = rp->process_discovered_references(&is_alive,
+ &keep_alive,
+ &drain_queue,
+ NULL,
+ _gc_timer_stw);
} else {
// Parallel reference processing
assert(rp->num_q() == no_of_gc_workers, "sanity");
assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
- rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
+ stats = rp->process_discovered_references(&is_alive,
+ &keep_alive,
+ &drain_queue,
+ &par_task_executor,
+ _gc_timer_stw);
}
+ _gc_tracer_stw->report_gc_reference_stats(stats);
// We have completed copying any necessary live referent objects
// (that were not copied during the actual pause) so we can
// retire any active alloc buffers
@@ -5576,7 +5715,7 @@ void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
// Serial reference processing...
rp->enqueue_discovered_references();
} else {
- // Parallel reference enqueuing
+ // Parallel reference enqueueing
assert(no_of_gc_workers == workers()->active_workers(),
"Need to reset active workers");
@@ -5593,15 +5732,15 @@ void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
// FIXME
// CM's reference processing also cleans up the string and symbol tables.
// Should we do that here also? We could, but it is a serial operation
- // and could signicantly increase the pause time.
+ // and could significantly increase the pause time.
double ref_enq_time = os::elapsedTime() - ref_enq_start;
g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
}
-void G1CollectedHeap::evacuate_collection_set() {
+void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
_expand_heap_after_alloc_failure = true;
- set_evacuation_failed(false);
+ _evacuation_failed = false;
// Should G1EvacuationFailureALot be in effect for this GC?
NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
@@ -5681,16 +5820,13 @@ void G1CollectedHeap::evacuate_collection_set() {
process_discovered_references(n_workers);
// Weak root processing.
- // Note: when JSR 292 is enabled and code blobs can contain
- // non-perm oops then we will need to process the code blobs
- // here too.
{
G1STWIsAliveClosure is_alive(this);
G1KeepAliveClosure keep_alive(this);
JNIHandles::weak_oops_do(&is_alive, &keep_alive);
}
- release_gc_alloc_regions(n_workers);
+ release_gc_alloc_regions(n_workers, evacuation_info);
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
// Reset and re-enable the hot card cache.
@@ -5699,6 +5835,17 @@ void G1CollectedHeap::evacuate_collection_set() {
hot_card_cache->reset_hot_cache();
hot_card_cache->set_use_cache(true);
+ // Migrate the strong code roots attached to each region in
+ // the collection set. Ideally we would like to do this
+ // after we have finished the scanning/evacuation of the
+ // strong code roots for a particular heap region.
+ migrate_strong_code_roots();
+
+ if (g1_policy()->during_initial_mark_pause()) {
+ // Reset the claim values set during marking the strong code roots
+ reset_heap_region_claim_values();
+ }
+
finalize_for_evac_failure();
if (evacuation_failed()) {
@@ -5713,7 +5860,7 @@ void G1CollectedHeap::evacuate_collection_set() {
// Enqueue any remaining references remaining on the STW
// reference processor's discovered lists. We need to do
// this after the card table is cleaned (and verified) as
- // the act of enqueuing entries on to the pending list
+ // the act of enqueueing entries on to the pending list
// will log these updates (and dirty their associated
// cards). We need these updates logged to update any
// RSets.
@@ -5941,7 +6088,7 @@ void G1CollectedHeap::cleanUpCardTable() {
g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
}
-void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
+void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) {
size_t pre_used = 0;
FreeRegionList local_free_list("Local List for CSet Freeing");
@@ -6027,10 +6174,12 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
cur->set_evacuation_failed(false);
// The region is now considered to be old.
_old_set.add(cur);
+ evacuation_info.increment_collectionset_used_after(cur->used());
}
cur = next;
}
+ evacuation_info.set_regions_freed(local_free_list.length());
policy->record_max_rs_lengths(rs_lengths);
policy->cset_regions_freed();
@@ -6493,3 +6642,208 @@ void G1CollectedHeap::verify_region_sets() {
_humongous_set.verify_end();
_free_list.verify_end();
}
+
+// Optimized nmethod scanning
+
+class RegisterNMethodOopClosure: public OopClosure {
+ G1CollectedHeap* _g1h;
+ nmethod* _nm;
+
+ template <class T> void do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ HeapRegion* hr = _g1h->heap_region_containing(obj);
+ assert(!hr->isHumongous(), "code root in humongous region?");
+
+ // HeapRegion::add_strong_code_root() avoids adding duplicate
+ // entries but having duplicates is OK since we "mark" nmethods
+ // as visited when we scan the strong code root lists during the GC.
+ hr->add_strong_code_root(_nm);
+ assert(hr->rem_set()->strong_code_roots_list_contains(_nm), "add failed?");
+ }
+ }
+
+public:
+ RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
+ _g1h(g1h), _nm(nm) {}
+
+ void do_oop(oop* p) { do_oop_work(p); }
+ void do_oop(narrowOop* p) { do_oop_work(p); }
+};
+
+class UnregisterNMethodOopClosure: public OopClosure {
+ G1CollectedHeap* _g1h;
+ nmethod* _nm;
+
+ template <class T> void do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ HeapRegion* hr = _g1h->heap_region_containing(obj);
+ assert(!hr->isHumongous(), "code root in humongous region?");
+ hr->remove_strong_code_root(_nm);
+ assert(!hr->rem_set()->strong_code_roots_list_contains(_nm), "remove failed?");
+ }
+ }
+
+public:
+ UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
+ _g1h(g1h), _nm(nm) {}
+
+ void do_oop(oop* p) { do_oop_work(p); }
+ void do_oop(narrowOop* p) { do_oop_work(p); }
+};
+
+void G1CollectedHeap::register_nmethod(nmethod* nm) {
+ CollectedHeap::register_nmethod(nm);
+
+ guarantee(nm != NULL, "sanity");
+ RegisterNMethodOopClosure reg_cl(this, nm);
+ nm->oops_do(&reg_cl);
+}
+
+void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
+ CollectedHeap::unregister_nmethod(nm);
+
+ guarantee(nm != NULL, "sanity");
+ UnregisterNMethodOopClosure reg_cl(this, nm);
+ nm->oops_do(&reg_cl, true);
+}
+
+class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
+public:
+ bool doHeapRegion(HeapRegion *hr) {
+ assert(!hr->isHumongous(), "humongous region in collection set?");
+ hr->migrate_strong_code_roots();
+ return false;
+ }
+};
+
+void G1CollectedHeap::migrate_strong_code_roots() {
+ MigrateCodeRootsHeapRegionClosure cl;
+ double migrate_start = os::elapsedTime();
+ collection_set_iterate(&cl);
+ double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
+ g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
+}
+
+// Mark all the code roots that point into regions *not* in the
+// collection set.
+//
+// Note we do not want to use a "marking" CodeBlobToOopClosure while
+// walking the the code roots lists of regions not in the collection
+// set. Suppose we have an nmethod (M) that points to objects in two
+// separate regions - one in the collection set (R1) and one not (R2).
+// Using a "marking" CodeBlobToOopClosure here would result in "marking"
+// nmethod M when walking the code roots for R1. When we come to scan
+// the code roots for R2, we would see that M is already marked and it
+// would be skipped and the objects in R2 that are referenced from M
+// would not be evacuated.
+
+class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
+
+ class MarkStrongCodeRootOopClosure: public OopClosure {
+ ConcurrentMark* _cm;
+ HeapRegion* _hr;
+ uint _worker_id;
+
+ template <class T> void do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ // Only mark objects in the region (which is assumed
+ // to be not in the collection set).
+ if (_hr->is_in(obj)) {
+ _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
+ }
+ }
+ }
+
+ public:
+ MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
+ _cm(cm), _hr(hr), _worker_id(worker_id) {
+ assert(!_hr->in_collection_set(), "sanity");
+ }
+
+ void do_oop(narrowOop* p) { do_oop_work(p); }
+ void do_oop(oop* p) { do_oop_work(p); }
+ };
+
+ MarkStrongCodeRootOopClosure _oop_cl;
+
+public:
+ MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
+ _oop_cl(cm, hr, worker_id) {}
+
+ void do_code_blob(CodeBlob* cb) {
+ nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
+ if (nm != NULL) {
+ nm->oops_do(&_oop_cl);
+ }
+ }
+};
+
+class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {
+ G1CollectedHeap* _g1h;
+ uint _worker_id;
+
+public:
+ MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) :
+ _g1h(g1h), _worker_id(worker_id) {}
+
+ bool doHeapRegion(HeapRegion *hr) {
+ HeapRegionRemSet* hrrs = hr->rem_set();
+ if (hr->isHumongous()) {
+ // Code roots should never be attached to a humongous region
+ assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
+ return false;
+ }
+
+ if (hr->in_collection_set()) {
+ // Don't mark code roots into regions in the collection set here.
+ // They will be marked when we scan them.
+ return false;
+ }
+
+ MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id);
+ hr->strong_code_roots_do(&cb_cl);
+ return false;
+ }
+};
+
+void G1CollectedHeap::mark_strong_code_roots(uint worker_id) {
+ MarkStrongCodeRootsHRClosure cl(this, worker_id);
+ if (G1CollectedHeap::use_parallel_gc_threads()) {
+ heap_region_par_iterate_chunked(&cl,
+ worker_id,
+ workers()->active_workers(),
+ HeapRegion::ParMarkRootClaimValue);
+ } else {
+ heap_region_iterate(&cl);
+ }
+}
+
+class RebuildStrongCodeRootClosure: public CodeBlobClosure {
+ G1CollectedHeap* _g1h;
+
+public:
+ RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
+ _g1h(g1h) {}
+
+ void do_code_blob(CodeBlob* cb) {
+ nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
+ if (nm == NULL) {
+ return;
+ }
+
+ if (ScavengeRootsInCode && nm->detect_scavenge_root_oops()) {
+ _g1h->register_nmethod(nm);
+ }
+ }
+};
+
+void G1CollectedHeap::rebuild_strong_code_roots() {
+ RebuildStrongCodeRootClosure blob_cl(this);
+ CodeCache::blobs_do(&blob_cl);
+}
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
index 32f5a46b4..747b23262 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,10 +26,12 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
#include "gc_implementation/g1/concurrentMark.hpp"
+#include "gc_implementation/g1/evacuationInfo.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1HRPrinter.hpp"
-#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
+#include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/g1/g1YCTypes.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp"
@@ -44,6 +46,7 @@
// may combine concurrent marking with parallel, incremental compaction of
// heap subsets that will yield large amounts of garbage.
+// Forward declarations
class HeapRegion;
class HRRSCleanupTask;
class GenerationSpec;
@@ -61,7 +64,13 @@ class HeapRegionRemSetIterator;
class ConcurrentMark;
class ConcurrentMarkThread;
class ConcurrentG1Refine;
+class ConcurrentGCTimer;
class GenerationCounters;
+class STWGCTimer;
+class G1NewTracer;
+class G1OldTracer;
+class EvacuationFailedInfo;
+class nmethod;
typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
@@ -156,19 +165,6 @@ public:
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
};
-// The G1 STW is alive closure.
-// An instance is embedded into the G1CH and used as the
-// (optional) _is_alive_non_header closure in the STW
-// reference processor. It is also extensively used during
-// refence processing during STW evacuation pauses.
-class G1STWIsAliveClosure: public BoolObjectClosure {
- G1CollectedHeap* _g1;
-public:
- G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
- void do_object(oop p) { assert(false, "Do not call."); }
- bool do_object_b(oop p);
-};
-
class SurvivorGCAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
@@ -187,6 +183,18 @@ public:
: G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
};
+// The G1 STW is alive closure.
+// An instance is embedded into the G1CH and used as the
+// (optional) _is_alive_non_header closure in the STW
+// reference processor. It is also extensively used during
+// reference processing during STW evacuation pauses.
+class G1STWIsAliveClosure: public BoolObjectClosure {
+ G1CollectedHeap* _g1;
+public:
+ G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
+ bool do_object_b(oop p);
+};
+
class RefineCardTableEntryClosure;
class G1CollectedHeap : public SharedHeap {
@@ -324,10 +332,10 @@ private:
void release_mutator_alloc_region();
// It initializes the GC alloc regions at the start of a GC.
- void init_gc_alloc_regions();
+ void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
// It releases the GC alloc regions at the end of a GC.
- void release_gc_alloc_regions(uint no_of_gc_workers);
+ void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
// It does any cleanup that needs to be done on the GC alloc regions
// before a Full GC.
@@ -390,6 +398,8 @@ private:
// concurrent cycles) we have completed.
volatile unsigned int _old_marking_cycles_completed;
+ bool _concurrent_cycle_started;
+
// This is a non-product method that is helpful for testing. It is
// called at the end of a GC and artificially expands the heap by
// allocating a number of dead regions. This way we can induce very
@@ -594,11 +604,6 @@ protected:
// may not be a humongous - it must fit into a single heap region.
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
- HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
- HeapRegion* alloc_region,
- bool par,
- size_t word_size);
-
// Ensure that no further allocations can happen in "r", bearing in mind
// that parallel threads might be attempting allocations.
void par_allocate_remaining_space(HeapRegion* r);
@@ -740,6 +745,12 @@ public:
return _old_marking_cycles_completed;
}
+ void register_concurrent_cycle_start(jlong start_time);
+ void register_concurrent_cycle_end();
+ void trace_heap_after_concurrent_cycle();
+
+ G1YCType yc_type();
+
G1HRPrinter* hr_printer() { return &_hr_printer; }
protected:
@@ -765,9 +776,10 @@ protected:
// it has to be read while holding the Heap_lock. Currently, both
// methods that call do_collection_pause() release the Heap_lock
// before the call, so it's easy to read gc_count_before just before.
- HeapWord* do_collection_pause(size_t word_size,
- unsigned int gc_count_before,
- bool* succeeded);
+ HeapWord* do_collection_pause(size_t word_size,
+ unsigned int gc_count_before,
+ bool* succeeded,
+ GCCause::Cause gc_cause);
// The guts of the incremental collection pause, executed by the vm
// thread. It returns false if it is unable to do the collection due
@@ -775,7 +787,7 @@ protected:
bool do_collection_pause_at_safepoint(double target_pause_time_ms);
// Actually do the work of evacuating the collection set.
- void evacuate_collection_set();
+ void evacuate_collection_set(EvacuationInfo& evacuation_info);
// The g1 remembered set of the heap.
G1RemSet* _g1_rem_set;
@@ -800,7 +812,7 @@ protected:
// After a collection pause, make the regions in the CS into free
// regions.
- void free_collection_set(HeapRegion* cs_head);
+ void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info);
// Abandon the current collection set without recording policy
// statistics or updating free lists.
@@ -869,9 +881,7 @@ protected:
// True iff a evacuation has failed in the current collection.
bool _evacuation_failed;
- // Set the attribute indicating whether evacuation has failed in the
- // current collection.
- void set_evacuation_failed(bool b) { _evacuation_failed = b; }
+ EvacuationFailedInfo* _evacuation_failed_info_array;
// Failed evacuations cause some logical from-space objects to have
// forwarding pointers to themselves. Reset them.
@@ -913,7 +923,7 @@ protected:
void finalize_for_evac_failure();
// An attempt to evacuate "obj" has failed; take necessary steps.
- oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
+ oop handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, oop obj);
void handle_evacuation_failure_common(oop obj, markOop m);
#ifndef PRODUCT
@@ -945,13 +955,13 @@ protected:
inline bool evacuation_should_fail();
// Reset the G1EvacuationFailureALot counters. Should be called at
- // the end of an evacuation pause in which an evacuation failure ocurred.
+ // the end of an evacuation pause in which an evacuation failure occurred.
inline void reset_evacuation_should_fail();
#endif // !PRODUCT
// ("Weak") Reference processing support.
//
- // G1 has 2 instances of the referece processor class. One
+ // G1 has 2 instances of the reference processor class. One
// (_ref_processor_cm) handles reference object discovery
// and subsequent processing during concurrent marking cycles.
//
@@ -1001,6 +1011,12 @@ protected:
// The (stw) reference processor...
ReferenceProcessor* _ref_processor_stw;
+ STWGCTimer* _gc_timer_stw;
+ ConcurrentGCTimer* _gc_timer_cm;
+
+ G1OldTracer* _gc_tracer_cm;
+ G1NewTracer* _gc_tracer_stw;
+
// During reference object discovery, the _is_alive_non_header
// closure (if non-null) is applied to the referent object to
// determine whether the referent is live. If so then the
@@ -1076,6 +1092,9 @@ public:
// specified by the policy object.
jint initialize();
+ // Return the (conservative) maximum heap alignment for any G1 heap
+ static size_t conservative_max_heap_alignment();
+
// Initialize weak reference processing.
virtual void ref_processing_init();
@@ -1146,9 +1165,12 @@ public:
// The STW reference processor....
ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
- // The Concurent Marking reference processor...
+ // The Concurrent Marking reference processor...
ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
+ ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
+ G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
+
virtual size_t capacity() const;
virtual size_t used() const;
// This should be called when we're not holding the heap lock. The
@@ -1206,7 +1228,7 @@ public:
// verify_region_sets_optional() is planted in the code for
// list verification in non-product builds (and it can be enabled in
- // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1).
+ // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
#if HEAP_REGION_SET_FORCE_VERIFY
void verify_region_sets_optional() {
verify_region_sets();
@@ -1272,7 +1294,7 @@ public:
// The same as above but assume that the caller holds the Heap_lock.
void collect_locked(GCCause::Cause cause);
- // True iff a evacuation has failed in the most-recent collection.
+ // True iff an evacuation has failed in the most-recent collection.
bool evacuation_failed() { return _evacuation_failed; }
// It will free a region if it has allocated objects in it that are
@@ -1344,11 +1366,6 @@ public:
object_iterate(cl);
}
- // Iterate over all objects allocated since the last collection, calling
- // "cl.do_object" on each. The heap must have been initialized properly
- // to support this function, or else this call will fail.
- virtual void object_iterate_since_last_GC(ObjectClosure* cl);
-
// Iterate over all spaces in use in the heap, in ascending address order.
virtual void space_iterate(SpaceClosure* cl);
@@ -1538,41 +1555,6 @@ public:
virtual jlong millis_since_last_gc();
- // Perform any cleanup actions necessary before allowing a verification.
- virtual void prepare_for_verify();
-
- // Perform verification.
-
- // vo == UsePrevMarking -> use "prev" marking information,
- // vo == UseNextMarking -> use "next" marking information
- // vo == UseMarkWord -> use the mark word in the object header
- //
- // NOTE: Only the "prev" marking information is guaranteed to be
- // consistent most of the time, so most calls to this should use
- // vo == UsePrevMarking.
- // Currently, there is only one case where this is called with
- // vo == UseNextMarking, which is to verify the "next" marking
- // information at the end of remark.
- // Currently there is only one place where this is called with
- // vo == UseMarkWord, which is to verify the marking during a
- // full GC.
- void verify(bool silent, VerifyOption vo);
-
- // Override; it uses the "prev" marking information
- virtual void verify(bool silent);
- virtual void print_on(outputStream* st) const;
- virtual void print_extended_on(outputStream* st) const;
- virtual void print_on_error(outputStream* st) const;
-
- virtual void print_gc_threads_on(outputStream* st) const;
- virtual void gc_threads_do(ThreadClosure* tc) const;
-
- // Override
- void print_tracing_info() const;
-
- // The following two methods are helpful for debugging RSet issues.
- void print_cset_rsets() PRODUCT_RETURN;
- void print_all_rsets() PRODUCT_RETURN;
// Convenience function to be used in situations where the heap type can be
// asserted to be this type.
@@ -1649,13 +1631,86 @@ public:
else return is_obj_ill(obj, hr);
}
+ bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
+ HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
+ bool is_marked(oop obj, VerifyOption vo);
+ const char* top_at_mark_start_str(VerifyOption vo);
+
+ ConcurrentMark* concurrent_mark() const { return _cm; }
+
+ // Refinement
+
+ ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
+
+ // The dirty cards region list is used to record a subset of regions
+ // whose cards need clearing. The list if populated during the
+ // remembered set scanning and drained during the card table
+ // cleanup. Although the methods are reentrant, population/draining
+ // phases must not overlap. For synchronization purposes the last
+ // element on the list points to itself.
+ HeapRegion* _dirty_cards_region_list;
+ void push_dirty_cards_region(HeapRegion* hr);
+ HeapRegion* pop_dirty_cards_region();
+
+ // Optimized nmethod scanning support routines
+
+ // Register the given nmethod with the G1 heap
+ virtual void register_nmethod(nmethod* nm);
+
+ // Unregister the given nmethod from the G1 heap
+ virtual void unregister_nmethod(nmethod* nm);
+
+ // Migrate the nmethods in the code root lists of the regions
+ // in the collection set to regions in to-space. In the event
+ // of an evacuation failure, nmethods that reference objects
+ // that were not successfullly evacuated are not migrated.
+ void migrate_strong_code_roots();
+
+ // During an initial mark pause, mark all the code roots that
+ // point into regions *not* in the collection set.
+ void mark_strong_code_roots(uint worker_id);
+
+ // Rebuild the stong code root lists for each region
+ // after a full GC
+ void rebuild_strong_code_roots();
+
+ // Verification
+
+ // The following is just to alert the verification code
+ // that a full collection has occurred and that the
+ // remembered sets are no longer up to date.
+ bool _full_collection;
+ void set_full_collection() { _full_collection = true;}
+ void clear_full_collection() {_full_collection = false;}
+ bool full_collection() {return _full_collection;}
+
+ // Perform any cleanup actions necessary before allowing a verification.
+ virtual void prepare_for_verify();
+
+ // Perform verification.
+
+ // vo == UsePrevMarking -> use "prev" marking information,
+ // vo == UseNextMarking -> use "next" marking information
+ // vo == UseMarkWord -> use the mark word in the object header
+ //
+ // NOTE: Only the "prev" marking information is guaranteed to be
+ // consistent most of the time, so most calls to this should use
+ // vo == UsePrevMarking.
+ // Currently, there is only one case where this is called with
+ // vo == UseNextMarking, which is to verify the "next" marking
+ // information at the end of remark.
+ // Currently there is only one place where this is called with
+ // vo == UseMarkWord, which is to verify the marking during a
+ // full GC.
+ void verify(bool silent, VerifyOption vo);
+
+ // Override; it uses the "prev" marking information
+ virtual void verify(bool silent);
+
// The methods below are here for convenience and dispatch the
// appropriate method depending on value of the given VerifyOption
- // parameter. The options for that parameter are:
- //
- // vo == UsePrevMarking -> use "prev" marking information,
- // vo == UseNextMarking -> use "next" marking information,
- // vo == UseMarkWord -> use mark word from object header
+ // parameter. The values for that parameter, and their meanings,
+ // are the same as those above.
bool is_obj_dead_cond(const oop obj,
const HeapRegion* hr,
@@ -1680,31 +1735,21 @@ public:
return false; // keep some compilers happy
}
- bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
- HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
- bool is_marked(oop obj, VerifyOption vo);
- const char* top_at_mark_start_str(VerifyOption vo);
+ // Printing
- // The following is just to alert the verification code
- // that a full collection has occurred and that the
- // remembered sets are no longer up to date.
- bool _full_collection;
- void set_full_collection() { _full_collection = true;}
- void clear_full_collection() {_full_collection = false;}
- bool full_collection() {return _full_collection;}
+ virtual void print_on(outputStream* st) const;
+ virtual void print_extended_on(outputStream* st) const;
+ virtual void print_on_error(outputStream* st) const;
- ConcurrentMark* concurrent_mark() const { return _cm; }
- ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
+ virtual void print_gc_threads_on(outputStream* st) const;
+ virtual void gc_threads_do(ThreadClosure* tc) const;
- // The dirty cards region list is used to record a subset of regions
- // whose cards need clearing. The list if populated during the
- // remembered set scanning and drained during the card table
- // cleanup. Although the methods are reentrant, population/draining
- // phases must not overlap. For synchronization purposes the last
- // element on the list points to itself.
- HeapRegion* _dirty_cards_region_list;
- void push_dirty_cards_region(HeapRegion* hr);
- HeapRegion* pop_dirty_cards_region();
+ // Override
+ void print_tracing_info() const;
+
+ // The following two methods are helpful for debugging RSet issues.
+ void print_cset_rsets() PRODUCT_RETURN;
+ void print_all_rsets() PRODUCT_RETURN;
public:
void stop_conc_gc_threads();
@@ -1734,6 +1779,95 @@ public:
ParGCAllocBuffer::retire(end_of_gc, retain);
_retired = true;
}
+
+ bool is_retired() {
+ return _retired;
+ }
+};
+
+class G1ParGCAllocBufferContainer {
+protected:
+ static int const _priority_max = 2;
+ G1ParGCAllocBuffer* _priority_buffer[_priority_max];
+
+public:
+ G1ParGCAllocBufferContainer(size_t gclab_word_size) {
+ for (int pr = 0; pr < _priority_max; ++pr) {
+ _priority_buffer[pr] = new G1ParGCAllocBuffer(gclab_word_size);
+ }
+ }
+
+ ~G1ParGCAllocBufferContainer() {
+ for (int pr = 0; pr < _priority_max; ++pr) {
+ assert(_priority_buffer[pr]->is_retired(), "alloc buffers should all retire at this point.");
+ delete _priority_buffer[pr];
+ }
+ }
+
+ HeapWord* allocate(size_t word_sz) {
+ HeapWord* obj;
+ for (int pr = 0; pr < _priority_max; ++pr) {
+ obj = _priority_buffer[pr]->allocate(word_sz);
+ if (obj != NULL) return obj;
+ }
+ return obj;
+ }
+
+ bool contains(void* addr) {
+ for (int pr = 0; pr < _priority_max; ++pr) {
+ if (_priority_buffer[pr]->contains(addr)) return true;
+ }
+ return false;
+ }
+
+ void undo_allocation(HeapWord* obj, size_t word_sz) {
+ bool finish_undo;
+ for (int pr = 0; pr < _priority_max; ++pr) {
+ if (_priority_buffer[pr]->contains(obj)) {
+ _priority_buffer[pr]->undo_allocation(obj, word_sz);
+ finish_undo = true;
+ }
+ }
+ if (!finish_undo) ShouldNotReachHere();
+ }
+
+ size_t words_remaining() {
+ size_t result = 0;
+ for (int pr = 0; pr < _priority_max; ++pr) {
+ result += _priority_buffer[pr]->words_remaining();
+ }
+ return result;
+ }
+
+ size_t words_remaining_in_retired_buffer() {
+ G1ParGCAllocBuffer* retired = _priority_buffer[0];
+ return retired->words_remaining();
+ }
+
+ void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
+ for (int pr = 0; pr < _priority_max; ++pr) {
+ _priority_buffer[pr]->flush_stats_and_retire(stats, end_of_gc, retain);
+ }
+ }
+
+ void update(bool end_of_gc, bool retain, HeapWord* buf, size_t word_sz) {
+ G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
+ retired_and_set->retire(end_of_gc, retain);
+ retired_and_set->set_buf(buf);
+ retired_and_set->set_word_size(word_sz);
+ adjust_priority_order();
+ }
+
+private:
+ void adjust_priority_order() {
+ G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
+
+ int last = _priority_max - 1;
+ for (int pr = 0; pr < last; ++pr) {
+ _priority_buffer[pr] = _priority_buffer[pr + 1];
+ }
+ _priority_buffer[last] = retired_and_set;
+ }
};
class G1ParScanThreadState : public StackObj {
@@ -1744,9 +1878,9 @@ protected:
CardTableModRefBS* _ct_bs;
G1RemSet* _g1_rem;
- G1ParGCAllocBuffer _surviving_alloc_buffer;
- G1ParGCAllocBuffer _tenured_alloc_buffer;
- G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+ G1ParGCAllocBufferContainer _surviving_alloc_buffer;
+ G1ParGCAllocBufferContainer _tenured_alloc_buffer;
+ G1ParGCAllocBufferContainer* _alloc_buffers[GCAllocPurposeCount];
ageTable _age_table;
size_t _alloc_buffer_waste;
@@ -1756,7 +1890,7 @@ protected:
G1ParScanHeapEvacClosure* _evac_cl;
G1ParScanPartialArrayClosure* _partial_scan_cl;
- int _hash_seed;
+ int _hash_seed;
uint _queue_num;
size_t _term_attempts;
@@ -1810,7 +1944,7 @@ public:
RefToScanQueue* refs() { return _refs; }
ageTable* age_table() { return &_age_table; }
- G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
+ G1ParGCAllocBufferContainer* alloc_buffer(GCAllocPurpose purpose) {
return _alloc_buffers[purpose];
}
@@ -1840,15 +1974,13 @@ public:
HeapWord* obj = NULL;
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
- G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
- add_to_alloc_buffer_waste(alloc_buf->words_remaining());
- alloc_buf->retire(false /* end_of_gc */, false /* retain */);
+ G1ParGCAllocBufferContainer* alloc_buf = alloc_buffer(purpose);
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
if (buf == NULL) return NULL; // Let caller handle allocation failure.
- // Otherwise.
- alloc_buf->set_word_size(gclab_word_size);
- alloc_buf->set_buf(buf);
+
+ add_to_alloc_buffer_waste(alloc_buf->words_remaining_in_retired_buffer());
+ alloc_buf->update(false /* end_of_gc */, false /* retain */, buf, gclab_word_size);
obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
@@ -1960,7 +2092,6 @@ public:
}
}
-public:
void trim_queue();
};
diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
index a21290c83..178acd26a 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
@@ -124,9 +124,12 @@ G1CollectorPolicy::G1CollectorPolicy() :
_last_young_gc(false),
_last_gc_was_young(false),
- _eden_bytes_before_gc(0),
- _survivor_bytes_before_gc(0),
- _capacity_before_gc(0),
+ _eden_used_bytes_before_gc(0),
+ _survivor_used_bytes_before_gc(0),
+ _heap_used_bytes_before_gc(0),
+ _metaspace_used_bytes_before_gc(0),
+ _eden_capacity_bytes_before_gc(0),
+ _heap_capacity_bytes_before_gc(0),
_eden_cset_region_length(0),
_survivor_cset_region_length(0),
@@ -165,7 +168,15 @@ G1CollectorPolicy::G1CollectorPolicy() :
// Set up the region size and associated fields. Given that the
// policy is created before the heap, we have to set this up here,
// so it's done as soon as possible.
- HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
+
+ // It would have been natural to pass initial_heap_byte_size() and
+ // max_heap_byte_size() to setup_heap_region_size() but those have
+ // not been set up at this point since they should be aligned with
+ // the region size. So, there is a circular dependency here. We base
+ // the region size on the heap size, but the heap size should be
+ // aligned with the region size. To get around this we use the
+ // unaligned values for the heap.
+ HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
HeapRegionRemSet::setup_remset_size();
G1ErgoVerbose::initialize();
@@ -310,7 +321,8 @@ G1CollectorPolicy::G1CollectorPolicy() :
void G1CollectorPolicy::initialize_flags() {
set_min_alignment(HeapRegion::GrainBytes);
size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
- set_max_alignment(MAX2(card_table_alignment, min_alignment()));
+ size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
+ set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size));
if (SurvivorRatio < 1) {
vm_exit_during_initialization("Invalid survivor ratio specified");
}
@@ -746,7 +758,7 @@ G1CollectorPolicy::verify_young_ages(HeapRegion* head,
void G1CollectorPolicy::record_full_collection_start() {
_full_collection_start_sec = os::elapsedTime();
- record_heap_size_info_at_start();
+ record_heap_size_info_at_start(true /* full */);
// Release the future to-space so that it is available for compaction into.
_g1->set_full_collection();
}
@@ -803,7 +815,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
_trace_gen0_time_data.record_start_collection(s_w_t_ms);
_stop_world_start = 0.0;
- record_heap_size_info_at_start();
+ record_heap_size_info_at_start(false /* full */);
phase_times()->record_cur_collection_start_sec(start_time_sec);
_pending_cards = _g1->pending_card_num();
@@ -870,7 +882,7 @@ bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc
size_t alloc_byte_size = alloc_word_size * HeapWordSize;
if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
- if (gcs_are_young()) {
+ if (gcs_are_young() && !_last_young_gc) {
ergo_verbose5(ErgoConcCycles,
"request concurrent cycle initiation",
ergo_format_reason("occupancy higher than threshold")
@@ -906,7 +918,7 @@ bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc
// Anything below that is considered to be zero
#define MIN_TIMER_GRANULARITY 0.0000001
-void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
+void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
double end_time_sec = os::elapsedTime();
assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
"otherwise, the subtraction below does not make sense");
@@ -928,7 +940,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
last_pause_included_initial_mark = during_initial_mark_pause();
if (last_pause_included_initial_mark) {
record_concurrent_mark_init_end(0.0);
- } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
+ } else if (need_to_start_conc_mark("end of GC")) {
// Note: this might have already been set, if during the last
// pause we decided to start a cycle but at the beginning of
// this pause we decided to postpone it. That's OK.
@@ -938,13 +950,8 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
_mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
end_time_sec, false);
- size_t freed_bytes =
- _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
- size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
-
- double survival_fraction =
- (double)surviving_bytes/
- (double)_collection_set_bytes_used_before;
+ evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
+ evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
if (update_stats) {
_trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
@@ -998,6 +1005,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
}
}
}
+
bool new_in_marking_window = _in_marking_window;
bool new_in_marking_window_im = false;
if (during_initial_mark_pause()) {
@@ -1083,8 +1091,10 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
}
_rs_length_diff_seq->add((double) rs_length_diff);
- size_t copied_bytes = surviving_bytes;
+ size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
+ size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
double cost_per_byte_ms = 0.0;
+
if (copied_bytes > 0) {
cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
if (_in_marking_window) {
@@ -1148,51 +1158,61 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
byte_size_in_proper_unit((double)(bytes)), \
proper_unit_for_byte_size((bytes))
-void G1CollectorPolicy::record_heap_size_info_at_start() {
+void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
YoungList* young_list = _g1->young_list();
- _eden_bytes_before_gc = young_list->eden_used_bytes();
- _survivor_bytes_before_gc = young_list->survivor_used_bytes();
- _capacity_before_gc = _g1->capacity();
-
- _cur_collection_pause_used_at_start_bytes = _g1->used();
+ _eden_used_bytes_before_gc = young_list->eden_used_bytes();
+ _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
+ _heap_capacity_bytes_before_gc = _g1->capacity();
+ _heap_used_bytes_before_gc = _g1->used();
_cur_collection_pause_used_regions_at_start = _g1->used_regions();
- size_t eden_capacity_before_gc =
- (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_bytes_before_gc;
+ _eden_capacity_bytes_before_gc =
+ (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
- _prev_eden_capacity = eden_capacity_before_gc;
+ if (full) {
+ _metaspace_used_bytes_before_gc = MetaspaceAux::allocated_used_bytes();
+ }
}
void G1CollectorPolicy::print_heap_transition() {
_g1->print_size_transition(gclog_or_tty,
- _cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity());
-}
-
-void G1CollectorPolicy::print_detailed_heap_transition() {
- YoungList* young_list = _g1->young_list();
- size_t eden_bytes = young_list->eden_used_bytes();
- size_t survivor_bytes = young_list->survivor_used_bytes();
- size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
- size_t used = _g1->used();
- size_t capacity = _g1->capacity();
- size_t eden_capacity =
- (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
-
- gclog_or_tty->print_cr(
- " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
- "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
- "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
- EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
- EXT_SIZE_PARAMS(_eden_bytes_before_gc),
- EXT_SIZE_PARAMS(_prev_eden_capacity),
- EXT_SIZE_PARAMS(eden_bytes),
- EXT_SIZE_PARAMS(eden_capacity),
- EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
- EXT_SIZE_PARAMS(survivor_bytes),
- EXT_SIZE_PARAMS(used_before_gc),
- EXT_SIZE_PARAMS(_capacity_before_gc),
- EXT_SIZE_PARAMS(used),
- EXT_SIZE_PARAMS(capacity));
+ _heap_used_bytes_before_gc,
+ _g1->used(),
+ _g1->capacity());
+}
+
+void G1CollectorPolicy::print_detailed_heap_transition(bool full) {
+ YoungList* young_list = _g1->young_list();
+
+ size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
+ size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
+ size_t heap_used_bytes_after_gc = _g1->used();
+
+ size_t heap_capacity_bytes_after_gc = _g1->capacity();
+ size_t eden_capacity_bytes_after_gc =
+ (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
+
+ gclog_or_tty->print(
+ " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
+ "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
+ "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
+ EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
+ EXT_SIZE_PARAMS(_eden_used_bytes_before_gc),
+ EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc),
+ EXT_SIZE_PARAMS(eden_used_bytes_after_gc),
+ EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc),
+ EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc),
+ EXT_SIZE_PARAMS(survivor_used_bytes_after_gc),
+ EXT_SIZE_PARAMS(_heap_used_bytes_before_gc),
+ EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc),
+ EXT_SIZE_PARAMS(heap_used_bytes_after_gc),
+ EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc));
+
+ if (full) {
+ MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
+ }
+
+ gclog_or_tty->cr();
}
void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
@@ -1888,7 +1908,7 @@ uint G1CollectorPolicy::calc_max_old_cset_length() {
}
-void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
+void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {
double young_start_time_sec = os::elapsedTime();
YoungList* young_list = _g1->young_list();
@@ -2094,6 +2114,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
double non_young_end_time_sec = os::elapsedTime();
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
+ evacuation_info.set_collectionset_regions(cset_region_length());
}
void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
index 088678503..a497f2fa3 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
@@ -175,7 +175,6 @@ private:
CollectionSetChooser* _collectionSetChooser;
double _full_collection_start_sec;
- size_t _cur_collection_pause_used_at_start_bytes;
uint _cur_collection_pause_used_regions_at_start;
// These exclude marking times.
@@ -194,7 +193,6 @@ private:
uint _young_list_target_length;
uint _young_list_fixed_length;
- size_t _prev_eden_capacity; // used for logging
// The max number of regions we can extend the eden by while the GC
// locker is active. This should be >= _young_list_target_length;
@@ -673,7 +671,7 @@ public:
// Record the start and end of an evacuation pause.
void record_collection_pause_start(double start_time_sec);
- void record_collection_pause_end(double pause_time_ms);
+ void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info);
// Record the start and end of a full collection.
void record_full_collection_start();
@@ -693,11 +691,11 @@ public:
// Records the information about the heap size for reporting in
// print_detailed_heap_transition
- void record_heap_size_info_at_start();
+ void record_heap_size_info_at_start(bool full);
// Print heap sizing transition (with less and more detail).
void print_heap_transition();
- void print_detailed_heap_transition();
+ void print_detailed_heap_transition(bool full = false);
void record_stop_world_start();
void record_concurrent_pause();
@@ -722,7 +720,7 @@ public:
// Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods.
- void finalize_cset(double target_pause_time_ms);
+ void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info);
// The head of the list (via "next_in_collection_set()") representing the
// current collection set.
@@ -861,9 +859,16 @@ private:
uint _max_survivor_regions;
// For reporting purposes.
- size_t _eden_bytes_before_gc;
- size_t _survivor_bytes_before_gc;
- size_t _capacity_before_gc;
+ // The value of _heap_bytes_before_gc is also used to calculate
+ // the cost of copying.
+
+ size_t _eden_used_bytes_before_gc; // Eden occupancy before GC
+ size_t _survivor_used_bytes_before_gc; // Survivor occupancy before GC
+ size_t _heap_used_bytes_before_gc; // Heap occupancy before GC
+ size_t _metaspace_used_bytes_before_gc; // Metaspace occupancy before GC
+
+ size_t _eden_capacity_bytes_before_gc; // Eden capacity before GC
+ size_t _heap_capacity_bytes_before_gc; // Heap capacity before GC
// The amount of survivor regions after a collection.
uint _recorded_survivor_regions;
@@ -874,6 +879,7 @@ private:
ageTable _survivors_age_table;
public:
+ uint tenuring_threshold() const { return _tenuring_threshold; }
inline GCAllocPurpose
evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) {
diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
index f6d3b6f4b..0eda4b35d 100644
--- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
+++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
@@ -161,6 +161,8 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_last_update_rs_times_ms(_max_gc_threads, "%.1lf"),
_last_update_rs_processed_buffers(_max_gc_threads, "%d"),
_last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
+ _last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"),
+ _last_strong_code_root_mark_times_ms(_max_gc_threads, "%.1lf"),
_last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
_last_termination_times_ms(_max_gc_threads, "%.1lf"),
_last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
@@ -182,6 +184,8 @@ void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
_last_update_rs_times_ms.reset();
_last_update_rs_processed_buffers.reset();
_last_scan_rs_times_ms.reset();
+ _last_strong_code_root_scan_times_ms.reset();
+ _last_strong_code_root_mark_times_ms.reset();
_last_obj_copy_times_ms.reset();
_last_termination_times_ms.reset();
_last_termination_attempts.reset();
@@ -197,6 +201,8 @@ void G1GCPhaseTimes::note_gc_end() {
_last_update_rs_times_ms.verify();
_last_update_rs_processed_buffers.verify();
_last_scan_rs_times_ms.verify();
+ _last_strong_code_root_scan_times_ms.verify();
+ _last_strong_code_root_mark_times_ms.verify();
_last_obj_copy_times_ms.verify();
_last_termination_times_ms.verify();
_last_termination_attempts.verify();
@@ -210,6 +216,8 @@ void G1GCPhaseTimes::note_gc_end() {
_last_satb_filtering_times_ms.get(i) +
_last_update_rs_times_ms.get(i) +
_last_scan_rs_times_ms.get(i) +
+ _last_strong_code_root_scan_times_ms.get(i) +
+ _last_strong_code_root_mark_times_ms.get(i) +
_last_obj_copy_times_ms.get(i) +
_last_termination_times_ms.get(i);
@@ -239,6 +247,9 @@ double G1GCPhaseTimes::accounted_time_ms() {
// Now subtract the time taken to fix up roots in generated code
misc_time_ms += _cur_collection_code_root_fixup_time_ms;
+ // Strong code root migration time
+ misc_time_ms += _cur_strong_code_root_migration_time_ms;
+
// Subtract the time taken to clean the card table from the
// current value of "other time"
misc_time_ms += _cur_clear_ct_time_ms;
@@ -257,9 +268,13 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
if (_last_satb_filtering_times_ms.sum() > 0.0) {
_last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
}
+ if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
+ _last_strong_code_root_mark_times_ms.print(2, "Code Root Marking (ms)");
+ }
_last_update_rs_times_ms.print(2, "Update RS (ms)");
_last_update_rs_processed_buffers.print(3, "Processed Buffers");
_last_scan_rs_times_ms.print(2, "Scan RS (ms)");
+ _last_strong_code_root_scan_times_ms.print(2, "Code Root Scanning (ms)");
_last_obj_copy_times_ms.print(2, "Object Copy (ms)");
_last_termination_times_ms.print(2, "Termination (ms)");
if (G1Log::finest()) {
@@ -273,12 +288,17 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
if (_last_satb_filtering_times_ms.sum() > 0.0) {
_last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)");
}
+ if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
+ _last_strong_code_root_mark_times_ms.print(1, "Code Root Marking (ms)");
+ }
_last_update_rs_times_ms.print(1, "Update RS (ms)");
_last_update_rs_processed_buffers.print(2, "Processed Buffers");
_last_scan_rs_times_ms.print(1, "Scan RS (ms)");
+ _last_strong_code_root_scan_times_ms.print(1, "Code Root Scanning (ms)");
_last_obj_copy_times_ms.print(1, "Object Copy (ms)");
}
print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
+ print_stats(1, "Code Root Migration", _cur_strong_code_root_migration_time_ms);
print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
print_stats(1, "Other", misc_time_ms);
diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
index 2fa5300b1..b2de97dc4 100644
--- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
+++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
@@ -38,7 +38,7 @@ class WorkerDataArray : public CHeapObj<mtGC> {
NOT_PRODUCT(static const T _uninitialized;)
// We are caching the sum and average to only have to calculate them once.
- // This is not done in an MT-safe way. It is intetened to allow single
+ // This is not done in an MT-safe way. It is intended to allow single
// threaded code to call sum() and average() multiple times in any order
// without having to worry about the cost.
bool _has_new_data;
@@ -119,6 +119,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
WorkerDataArray<double> _last_update_rs_times_ms;
WorkerDataArray<int> _last_update_rs_processed_buffers;
WorkerDataArray<double> _last_scan_rs_times_ms;
+ WorkerDataArray<double> _last_strong_code_root_scan_times_ms;
+ WorkerDataArray<double> _last_strong_code_root_mark_times_ms;
WorkerDataArray<double> _last_obj_copy_times_ms;
WorkerDataArray<double> _last_termination_times_ms;
WorkerDataArray<size_t> _last_termination_attempts;
@@ -128,6 +130,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _cur_collection_par_time_ms;
double _cur_collection_code_root_fixup_time_ms;
+ double _cur_strong_code_root_migration_time_ms;
double _cur_clear_ct_time_ms;
double _cur_ref_proc_time_ms;
@@ -179,6 +182,14 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_last_scan_rs_times_ms.set(worker_i, ms);
}
+ void record_strong_code_root_scan_time(uint worker_i, double ms) {
+ _last_strong_code_root_scan_times_ms.set(worker_i, ms);
+ }
+
+ void record_strong_code_root_mark_time(uint worker_i, double ms) {
+ _last_strong_code_root_mark_times_ms.set(worker_i, ms);
+ }
+
void record_obj_copy_time(uint worker_i, double ms) {
_last_obj_copy_times_ms.set(worker_i, ms);
}
@@ -208,6 +219,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_cur_collection_code_root_fixup_time_ms = ms;
}
+ void record_strong_code_root_migration_time(double ms) {
+ _cur_strong_code_root_migration_time_ms = ms;
+ }
+
void record_ref_proc_time(double ms) {
_cur_ref_proc_time_ms = ms;
}
@@ -294,6 +309,14 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
return _last_scan_rs_times_ms.average();
}
+ double average_last_strong_code_root_scan_time(){
+ return _last_strong_code_root_scan_times_ms.average();
+ }
+
+ double average_last_strong_code_root_mark_time(){
+ return _last_strong_code_root_mark_times_ms.average();
+ }
+
double average_last_obj_copy_time() {
return _last_obj_copy_times_ms.average();
}
diff --git a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
index d87a6cca1..74aabc129 100644
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,10 @@
#include "code/icBuffer.hpp"
#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1MarkSweep.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
#include "memory/gcLocker.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/modRefBarrierSet.hpp"
@@ -39,7 +43,6 @@
#include "oops/instanceRefKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
-#include "runtime/aprofiler.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/synchronizer.hpp"
@@ -119,7 +122,7 @@ void G1MarkSweep::allocate_stacks() {
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
- TraceTime tm("phase 1", G1Log::fine() && Verbose, true, gclog_or_tty);
+ GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer());
GenMarkSweep::trace(" 1");
SharedHeap* sh = SharedHeap::heap();
@@ -139,10 +142,13 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity");
rp->setup_policy(clear_all_softrefs);
- rp->process_discovered_references(&GenMarkSweep::is_alive,
- &GenMarkSweep::keep_alive,
- &GenMarkSweep::follow_stack_closure,
- NULL);
+ const ReferenceProcessorStats& stats =
+ rp->process_discovered_references(&GenMarkSweep::is_alive,
+ &GenMarkSweep::keep_alive,
+ &GenMarkSweep::follow_stack_closure,
+ NULL,
+ gc_timer());
+ gc_tracer()->report_gc_reference_stats(stats);
// This is the point where the entire marking should have completed.
@@ -185,6 +191,8 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
gclog_or_tty->print_cr("]");
}
}
+
+ gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
}
class G1PrepareCompactClosure: public HeapRegionClosure {
@@ -257,7 +265,7 @@ void G1MarkSweep::mark_sweep_phase2() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
- TraceTime tm("phase 2", G1Log::fine() && Verbose, true, gclog_or_tty);
+ GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer());
GenMarkSweep::trace("2");
// find the first region
@@ -294,7 +302,7 @@ void G1MarkSweep::mark_sweep_phase3() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
// Adjust the pointers to reflect the new locations
- TraceTime tm("phase 3", G1Log::fine() && Verbose, true, gclog_or_tty);
+ GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer());
GenMarkSweep::trace("3");
SharedHeap* sh = SharedHeap::heap();
@@ -353,7 +361,7 @@ void G1MarkSweep::mark_sweep_phase4() {
// to use a higher index (saved from phase2) when verifying perm_gen.
G1CollectedHeap* g1h = G1CollectedHeap::heap();
- TraceTime tm("phase 4", G1Log::fine() && Verbose, true, gclog_or_tty);
+ GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer());
GenMarkSweep::trace("4");
G1SpaceCompactClosure blk;
diff --git a/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp b/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp
index c49bc1939..f1b9d8356 100644
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,9 @@ class G1MarkSweep : AllStatic {
static void invoke_at_safepoint(ReferenceProcessor* rp,
bool clear_all_softrefs);
+ static STWGCTimer* gc_timer() { return GenMarkSweep::_gc_timer; }
+ static SerialOldTracer* gc_tracer() { return GenMarkSweep::_gc_tracer; }
+
private:
// Mark live objects
diff --git a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp
index 16839dcb2..01bb43bef 100644
--- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp
@@ -262,6 +262,7 @@ void G1MonitoringSupport::update_sizes() {
old_collection_counters()->update_all();
young_collection_counters()->update_all();
MetaspaceCounters::update_performance_counters();
+ CompressedClassSpaceCounters::update_performance_counters();
}
}
diff --git a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp
index 4e1761e26..03b7300ae 100644
--- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -224,6 +224,7 @@ class G1MonitoringSupport : public CHeapObj<mtGC> {
// Monitoring support used by
// MemoryService
// jstat counters
+ // Tracing
size_t overall_reserved() { return _overall_reserved; }
size_t overall_committed() { return _overall_committed; }
diff --git a/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/src/share/vm/gc_implementation/g1/g1RemSet.cpp
index d527a3fc6..5a1b92d34 100644
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp
@@ -34,6 +34,7 @@
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "memory/iterator.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/intHisto.hpp"
@@ -73,7 +74,8 @@ G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
_ct_bs(ct_bs), _g1p(_g1->g1_policy()),
_cg1r(g1->concurrent_g1_refine()),
_cset_rs_update_cl(NULL),
- _cards_scanned(NULL), _total_cards_scanned(0)
+ _cards_scanned(NULL), _total_cards_scanned(0),
+ _prev_period_summary()
{
_seq_task = new SubTasksDone(NumSeqTasks);
guarantee(n_workers() > 0, "There should be some workers");
@@ -81,6 +83,7 @@ G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
for (uint i = 0; i < n_workers(); i++) {
_cset_rs_update_cl[i] = NULL;
}
+ _prev_period_summary.initialize(this, n_workers());
}
G1RemSet::~G1RemSet() {
@@ -101,15 +104,25 @@ void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
class ScanRSClosure : public HeapRegionClosure {
size_t _cards_done, _cards;
G1CollectedHeap* _g1h;
+
OopsInHeapRegionClosure* _oc;
+ CodeBlobToOopClosure* _code_root_cl;
+
G1BlockOffsetSharedArray* _bot_shared;
CardTableModRefBS *_ct_bs;
- int _worker_i;
- int _block_size;
- bool _try_claimed;
+
+ double _strong_code_root_scan_time_sec;
+ int _worker_i;
+ int _block_size;
+ bool _try_claimed;
+
public:
- ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
+ ScanRSClosure(OopsInHeapRegionClosure* oc,
+ CodeBlobToOopClosure* code_root_cl,
+ int worker_i) :
_oc(oc),
+ _code_root_cl(code_root_cl),
+ _strong_code_root_scan_time_sec(0.0),
_cards(0),
_cards_done(0),
_worker_i(worker_i),
@@ -157,6 +170,12 @@ public:
card_start, card_start + G1BlockOffsetSharedArray::N_words);
}
+ void scan_strong_code_roots(HeapRegion* r) {
+ double scan_start = os::elapsedTime();
+ r->strong_code_roots_do(_code_root_cl);
+ _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start);
+ }
+
bool doHeapRegion(HeapRegion* r) {
assert(r->in_collection_set(), "should only be called on elements of CS.");
HeapRegionRemSet* hrrs = r->rem_set();
@@ -170,6 +189,7 @@ public:
// _try_claimed || r->claim_iter()
// is true: either we're supposed to work on claimed-but-not-complete
// regions, or we successfully claimed the region.
+
HeapRegionRemSetIterator iter(hrrs);
size_t card_index;
@@ -202,30 +222,43 @@ public:
}
}
if (!_try_claimed) {
+ // Scan the strong code root list attached to the current region
+ scan_strong_code_roots(r);
+
hrrs->set_iter_complete();
}
return false;
}
+
+ double strong_code_root_scan_time_sec() {
+ return _strong_code_root_scan_time_sec;
+ }
+
size_t cards_done() { return _cards_done;}
size_t cards_looked_up() { return _cards;}
};
-void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
+void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
+ CodeBlobToOopClosure* code_root_cl,
+ int worker_i) {
double rs_time_start = os::elapsedTime();
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
- ScanRSClosure scanRScl(oc, worker_i);
+ ScanRSClosure scanRScl(oc, code_root_cl, worker_i);
_g1->collection_set_iterate_from(startRegion, &scanRScl);
scanRScl.set_try_claimed();
_g1->collection_set_iterate_from(startRegion, &scanRScl);
- double scan_rs_time_sec = os::elapsedTime() - rs_time_start;
+ double scan_rs_time_sec = (os::elapsedTime() - rs_time_start)
+ - scanRScl.strong_code_root_scan_time_sec();
- assert( _cards_scanned != NULL, "invariant" );
+ assert(_cards_scanned != NULL, "invariant");
_cards_scanned[worker_i] = scanRScl.cards_done();
_g1p->phase_times()->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
+ _g1p->phase_times()->record_strong_code_root_scan_time(worker_i,
+ scanRScl.strong_code_root_scan_time_sec() * 1000.0);
}
// Closure used for updating RSets and recording references that
@@ -285,7 +318,8 @@ void G1RemSet::cleanupHRRS() {
}
void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
- int worker_i) {
+ CodeBlobToOopClosure* code_root_cl,
+ int worker_i) {
#if CARD_REPEAT_HISTO
ct_freq_update_histo_and_reset();
#endif
@@ -325,7 +359,7 @@ void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
_g1p->phase_times()->record_update_rs_time(worker_i, 0.0);
}
if (G1UseParallelRSetScanning || (worker_i == 0)) {
- scanRS(oc, worker_i);
+ scanRS(oc, code_root_cl, worker_i);
} else {
_g1p->phase_times()->record_scan_rs_time(worker_i, 0.0);
}
@@ -697,47 +731,29 @@ bool G1RemSet::refine_card(jbyte* card_ptr, int worker_i,
return has_refs_into_cset;
}
-class HRRSStatsIter: public HeapRegionClosure {
- size_t _occupied;
- size_t _total_mem_sz;
- size_t _max_mem_sz;
- HeapRegion* _max_mem_sz_region;
-public:
- HRRSStatsIter() :
- _occupied(0),
- _total_mem_sz(0),
- _max_mem_sz(0),
- _max_mem_sz_region(NULL)
- {}
+void G1RemSet::print_periodic_summary_info() {
+ G1RemSetSummary current;
+ current.initialize(this, n_workers());
- bool doHeapRegion(HeapRegion* r) {
- if (r->continuesHumongous()) return false;
- size_t mem_sz = r->rem_set()->mem_size();
- if (mem_sz > _max_mem_sz) {
- _max_mem_sz = mem_sz;
- _max_mem_sz_region = r;
- }
- _total_mem_sz += mem_sz;
- size_t occ = r->rem_set()->occupied();
- _occupied += occ;
- return false;
- }
- size_t total_mem_sz() { return _total_mem_sz; }
- size_t max_mem_sz() { return _max_mem_sz; }
- size_t occupied() { return _occupied; }
- HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
-};
+ _prev_period_summary.subtract_from(&current);
+ print_summary_info(&_prev_period_summary);
-class PrintRSThreadVTimeClosure : public ThreadClosure {
-public:
- virtual void do_thread(Thread *t) {
- ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t;
- gclog_or_tty->print(" %5.2f", crt->vtime_accum());
- }
-};
+ _prev_period_summary.set(&current);
+}
void G1RemSet::print_summary_info() {
- G1CollectedHeap* g1 = G1CollectedHeap::heap();
+ G1RemSetSummary current;
+ current.initialize(this, n_workers());
+
+ print_summary_info(&current, " Cumulative RS summary");
+}
+
+void G1RemSet::print_summary_info(G1RemSetSummary * summary, const char * header) {
+ assert(summary != NULL, "just checking");
+
+ if (header != NULL) {
+ gclog_or_tty->print_cr("%s", header);
+ }
#if CARD_REPEAT_HISTO
gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
@@ -745,52 +761,13 @@ void G1RemSet::print_summary_info() {
card_repeat_count.print_on(gclog_or_tty);
#endif
- gclog_or_tty->print_cr("\n Concurrent RS processed %d cards",
- _conc_refine_cards);
- DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
- jint tot_processed_buffers =
- dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread();
- gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers);
- gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS threads.",
- dcqs.processed_buffers_rs_thread(),
- 100.0*(float)dcqs.processed_buffers_rs_thread()/
- (float)tot_processed_buffers);
- gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.",
- dcqs.processed_buffers_mut(),
- 100.0*(float)dcqs.processed_buffers_mut()/
- (float)tot_processed_buffers);
- gclog_or_tty->print_cr(" Conc RS threads times(s)");
- PrintRSThreadVTimeClosure p;
- gclog_or_tty->print(" ");
- g1->concurrent_g1_refine()->threads_do(&p);
- gclog_or_tty->print_cr("");
-
- HRRSStatsIter blk;
- g1->heap_region_iterate(&blk);
- gclog_or_tty->print_cr(" Total heap region rem set sizes = "SIZE_FORMAT"K."
- " Max = "SIZE_FORMAT"K.",
- blk.total_mem_sz()/K, blk.max_mem_sz()/K);
- gclog_or_tty->print_cr(" Static structures = "SIZE_FORMAT"K,"
- " free_lists = "SIZE_FORMAT"K.",
- HeapRegionRemSet::static_mem_size() / K,
- HeapRegionRemSet::fl_mem_size() / K);
- gclog_or_tty->print_cr(" "SIZE_FORMAT" occupied cards represented.",
- blk.occupied());
- HeapRegion* max_mem_sz_region = blk.max_mem_sz_region();
- HeapRegionRemSet* rem_set = max_mem_sz_region->rem_set();
- gclog_or_tty->print_cr(" Max size region = "HR_FORMAT", "
- "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
- HR_FORMAT_PARAMS(max_mem_sz_region),
- (rem_set->mem_size() + K - 1)/K,
- (rem_set->occupied() + K - 1)/K);
- gclog_or_tty->print_cr(" Did %d coarsenings.",
- HeapRegionRemSet::n_coarsenings());
+ summary->print_on(gclog_or_tty);
}
void G1RemSet::prepare_for_verify() {
if (G1HRRSFlushLogBuffersOnVerify &&
(VerifyBeforeGC || VerifyAfterGC)
- && !_g1->full_collection()) {
+ && (!_g1->full_collection() || G1VerifyRSetsDuringFullGC)) {
cleanupHRRS();
_g1->set_refine_cte_cl_concurrency(false);
if (SafepointSynchronize::is_at_safepoint()) {
diff --git a/src/share/vm/gc_implementation/g1/g1RemSet.hpp b/src/share/vm/gc_implementation/g1/g1RemSet.hpp
index 7444ae819..513945609 100644
--- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp
@@ -25,6 +25,8 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
+#include "gc_implementation/g1/g1RemSetSummary.hpp"
+
// A G1RemSet provides ways of iterating over pointers into a selected
// collection set.
@@ -37,9 +39,11 @@ class ConcurrentG1Refine;
// so that they can be used to update the individual region remsets.
class G1RemSet: public CHeapObj<mtGC> {
+private:
+ G1RemSetSummary _prev_period_summary;
protected:
G1CollectedHeap* _g1;
- unsigned _conc_refine_cards;
+ size_t _conc_refine_cards;
uint n_workers();
protected:
@@ -66,6 +70,8 @@ protected:
// references into the collection set.
OopsInHeapRegionClosure** _cset_rs_update_cl;
+ // Print the given summary info
+ virtual void print_summary_info(G1RemSetSummary * summary, const char * header = NULL);
public:
// This is called to reset dual hash tables after the gc pause
// is finished and the initial hash table is no longer being
@@ -75,14 +81,23 @@ public:
G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
~G1RemSet();
- // Invoke "blk->do_oop" on all pointers into the CS in objects in regions
- // outside the CS (having invoked "blk->set_region" to set the "from"
- // region correctly beforehand.) The "worker_i" param is for the
- // parallel case where the number of the worker thread calling this
- // function can be helpful in partitioning the work to be done. It
- // should be the same as the "i" passed to the calling thread's
- // work(i) function. In the sequential case this param will be ingored.
- void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, int worker_i);
+ // Invoke "blk->do_oop" on all pointers into the collection set
+ // from objects in regions outside the collection set (having
+ // invoked "blk->set_region" to set the "from" region correctly
+ // beforehand.)
+ //
+ // Invoke code_root_cl->do_code_blob on the unmarked nmethods
+ // on the strong code roots list for each region in the
+ // collection set.
+ //
+ // The "worker_i" param is for the parallel case where the id
+ // of the worker thread calling this function can be helpful in
+ // partitioning the work to be done. It should be the same as
+ // the "i" passed to the calling thread's work(i) function.
+ // In the sequential case this param will be ignored.
+ void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
+ CodeBlobToOopClosure* code_root_cl,
+ int worker_i);
// Prepare for and cleanup after an oops_into_collection_set_do
// call. Must call each of these once before and after (in sequential
@@ -92,7 +107,10 @@ public:
void prepare_for_oops_into_collection_set_do();
void cleanup_after_oops_into_collection_set_do();
- void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
+ void scanRS(OopsInHeapRegionClosure* oc,
+ CodeBlobToOopClosure* code_root_cl,
+ int worker_i);
+
void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
CardTableModRefBS* ct_bs() { return _ct_bs; }
@@ -123,11 +141,18 @@ public:
int worker_i,
bool check_for_refs_into_cset);
- // Print any relevant summary info.
+ // Print accumulated summary info from the start of the VM.
virtual void print_summary_info();
+ // Print accumulated summary info from the last time called.
+ virtual void print_periodic_summary_info();
+
// Prepare remembered set for verification.
virtual void prepare_for_verify();
+
+ size_t conc_refine_cards() const {
+ return _conc_refine_cards;
+ }
};
class CountNonCleanMemRegionClosure: public MemRegionClosure {
diff --git a/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp b/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp
new file mode 100644
index 000000000..4a6b37654
--- /dev/null
+++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/concurrentG1Refine.hpp"
+#include "gc_implementation/g1/concurrentG1RefineThread.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1RemSet.inline.hpp"
+#include "gc_implementation/g1/g1RemSetSummary.hpp"
+#include "gc_implementation/g1/heapRegionRemSet.hpp"
+#include "runtime/thread.inline.hpp"
+
+class GetRSThreadVTimeClosure : public ThreadClosure {
+private:
+ G1RemSetSummary* _summary;
+ uint _counter;
+
+public:
+ GetRSThreadVTimeClosure(G1RemSetSummary * summary) : ThreadClosure(), _summary(summary), _counter(0) {
+ assert(_summary != NULL, "just checking");
+ }
+
+ virtual void do_thread(Thread* t) {
+ ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t;
+ _summary->set_rs_thread_vtime(_counter, crt->vtime_accum());
+ _counter++;
+ }
+};
+
+void G1RemSetSummary::update() {
+ _num_refined_cards = remset()->conc_refine_cards();
+ DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+ _num_processed_buf_mutator = dcqs.processed_buffers_mut();
+ _num_processed_buf_rs_threads = dcqs.processed_buffers_rs_thread();
+
+ _num_coarsenings = HeapRegionRemSet::n_coarsenings();
+
+ ConcurrentG1Refine * cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
+ if (_rs_threads_vtimes != NULL) {
+ GetRSThreadVTimeClosure p(this);
+ cg1r->worker_threads_do(&p);
+ }
+ set_sampling_thread_vtime(cg1r->sampling_thread()->vtime_accum());
+}
+
+void G1RemSetSummary::set_rs_thread_vtime(uint thread, double value) {
+ assert(_rs_threads_vtimes != NULL, "just checking");
+ assert(thread < _num_vtimes, "just checking");
+ _rs_threads_vtimes[thread] = value;
+}
+
+double G1RemSetSummary::rs_thread_vtime(uint thread) const {
+ assert(_rs_threads_vtimes != NULL, "just checking");
+ assert(thread < _num_vtimes, "just checking");
+ return _rs_threads_vtimes[thread];
+}
+
+void G1RemSetSummary::initialize(G1RemSet* remset, uint num_workers) {
+ assert(_rs_threads_vtimes == NULL, "just checking");
+ assert(remset != NULL, "just checking");
+
+ _remset = remset;
+ _num_vtimes = num_workers;
+ _rs_threads_vtimes = NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC);
+ memset(_rs_threads_vtimes, 0, sizeof(double) * _num_vtimes);
+
+ update();
+}
+
+void G1RemSetSummary::set(G1RemSetSummary* other) {
+ assert(other != NULL, "just checking");
+ assert(remset() == other->remset(), "just checking");
+ assert(_num_vtimes == other->_num_vtimes, "just checking");
+
+ _num_refined_cards = other->num_concurrent_refined_cards();
+
+ _num_processed_buf_mutator = other->num_processed_buf_mutator();
+ _num_processed_buf_rs_threads = other->num_processed_buf_rs_threads();
+
+ _num_coarsenings = other->_num_coarsenings;
+
+ memcpy(_rs_threads_vtimes, other->_rs_threads_vtimes, sizeof(double) * _num_vtimes);
+
+ set_sampling_thread_vtime(other->sampling_thread_vtime());
+}
+
+void G1RemSetSummary::subtract_from(G1RemSetSummary* other) {
+ assert(other != NULL, "just checking");
+ assert(remset() == other->remset(), "just checking");
+ assert(_num_vtimes == other->_num_vtimes, "just checking");
+
+ _num_refined_cards = other->num_concurrent_refined_cards() - _num_refined_cards;
+
+ _num_processed_buf_mutator = other->num_processed_buf_mutator() - _num_processed_buf_mutator;
+ _num_processed_buf_rs_threads = other->num_processed_buf_rs_threads() - _num_processed_buf_rs_threads;
+
+ _num_coarsenings = other->num_coarsenings() - _num_coarsenings;
+
+ for (uint i = 0; i < _num_vtimes; i++) {
+ set_rs_thread_vtime(i, other->rs_thread_vtime(i) - rs_thread_vtime(i));
+ }
+
+ _sampling_thread_vtime = other->sampling_thread_vtime() - _sampling_thread_vtime;
+}
+
+class HRRSStatsIter: public HeapRegionClosure {
+ size_t _occupied;
+
+ size_t _total_rs_mem_sz;
+ size_t _max_rs_mem_sz;
+ HeapRegion* _max_rs_mem_sz_region;
+
+ size_t _total_code_root_mem_sz;
+ size_t _max_code_root_mem_sz;
+ HeapRegion* _max_code_root_mem_sz_region;
+public:
+ HRRSStatsIter() :
+ _occupied(0),
+ _total_rs_mem_sz(0),
+ _max_rs_mem_sz(0),
+ _max_rs_mem_sz_region(NULL),
+ _total_code_root_mem_sz(0),
+ _max_code_root_mem_sz(0),
+ _max_code_root_mem_sz_region(NULL)
+ {}
+
+ bool doHeapRegion(HeapRegion* r) {
+ HeapRegionRemSet* hrrs = r->rem_set();
+
+ // HeapRegionRemSet::mem_size() includes the
+ // size of the strong code roots
+ size_t rs_mem_sz = hrrs->mem_size();
+ if (rs_mem_sz > _max_rs_mem_sz) {
+ _max_rs_mem_sz = rs_mem_sz;
+ _max_rs_mem_sz_region = r;
+ }
+ _total_rs_mem_sz += rs_mem_sz;
+
+ size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size();
+ if (code_root_mem_sz > _max_code_root_mem_sz) {
+ _max_code_root_mem_sz = code_root_mem_sz;
+ _max_code_root_mem_sz_region = r;
+ }
+ _total_code_root_mem_sz += code_root_mem_sz;
+
+ size_t occ = hrrs->occupied();
+ _occupied += occ;
+ return false;
+ }
+ size_t total_rs_mem_sz() { return _total_rs_mem_sz; }
+ size_t max_rs_mem_sz() { return _max_rs_mem_sz; }
+ HeapRegion* max_rs_mem_sz_region() { return _max_rs_mem_sz_region; }
+ size_t total_code_root_mem_sz() { return _total_code_root_mem_sz; }
+ size_t max_code_root_mem_sz() { return _max_code_root_mem_sz; }
+ HeapRegion* max_code_root_mem_sz_region() { return _max_code_root_mem_sz_region; }
+ size_t occupied() { return _occupied; }
+};
+
+double calc_percentage(size_t numerator, size_t denominator) {
+ if (denominator != 0) {
+ return (double)numerator / denominator * 100.0;
+ } else {
+ return 0.0f;
+ }
+}
+
+void G1RemSetSummary::print_on(outputStream* out) {
+ out->print_cr("\n Concurrent RS processed "SIZE_FORMAT" cards",
+ num_concurrent_refined_cards());
+ out->print_cr(" Of %d completed buffers:", num_processed_buf_total());
+ out->print_cr(" %8d (%5.1f%%) by concurrent RS threads.",
+ num_processed_buf_total(),
+ calc_percentage(num_processed_buf_rs_threads(), num_processed_buf_total()));
+ out->print_cr(" %8d (%5.1f%%) by mutator threads.",
+ num_processed_buf_mutator(),
+ calc_percentage(num_processed_buf_mutator(), num_processed_buf_total()));
+ out->print_cr(" Concurrent RS threads times (s)");
+ out->print(" ");
+ for (uint i = 0; i < _num_vtimes; i++) {
+ out->print(" %5.2f", rs_thread_vtime(i));
+ }
+ out->cr();
+ out->print_cr(" Concurrent sampling threads times (s)");
+ out->print_cr(" %5.2f", sampling_thread_vtime());
+
+ HRRSStatsIter blk;
+ G1CollectedHeap::heap()->heap_region_iterate(&blk);
+ // RemSet stats
+ out->print_cr(" Total heap region rem set sizes = "SIZE_FORMAT"K."
+ " Max = "SIZE_FORMAT"K.",
+ blk.total_rs_mem_sz()/K, blk.max_rs_mem_sz()/K);
+ out->print_cr(" Static structures = "SIZE_FORMAT"K,"
+ " free_lists = "SIZE_FORMAT"K.",
+ HeapRegionRemSet::static_mem_size() / K,
+ HeapRegionRemSet::fl_mem_size() / K);
+ out->print_cr(" "SIZE_FORMAT" occupied cards represented.",
+ blk.occupied());
+ HeapRegion* max_rs_mem_sz_region = blk.max_rs_mem_sz_region();
+ HeapRegionRemSet* max_rs_rem_set = max_rs_mem_sz_region->rem_set();
+ out->print_cr(" Max size region = "HR_FORMAT", "
+ "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
+ HR_FORMAT_PARAMS(max_rs_mem_sz_region),
+ (max_rs_rem_set->mem_size() + K - 1)/K,
+ (max_rs_rem_set->occupied() + K - 1)/K);
+ out->print_cr(" Did %d coarsenings.", num_coarsenings());
+ // Strong code root stats
+ out->print_cr(" Total heap region code-root set sizes = "SIZE_FORMAT"K."
+ " Max = "SIZE_FORMAT"K.",
+ blk.total_code_root_mem_sz()/K, blk.max_code_root_mem_sz()/K);
+ HeapRegion* max_code_root_mem_sz_region = blk.max_code_root_mem_sz_region();
+ HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region->rem_set();
+ out->print_cr(" Max size region = "HR_FORMAT", "
+ "size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".",
+ HR_FORMAT_PARAMS(max_code_root_mem_sz_region),
+ (max_code_root_rem_set->strong_code_roots_mem_size() + K - 1)/K,
+ (max_code_root_rem_set->strong_code_roots_list_length()));
+}
diff --git a/src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp b/src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp
new file mode 100644
index 000000000..7f5f37763
--- /dev/null
+++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSETSUMMARY_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSETSUMMARY_HPP
+
+#include "utilities/ostream.hpp"
+
+class G1RemSet;
+
+// A G1RemSetSummary manages statistical information about the G1RemSet
+
+class G1RemSetSummary VALUE_OBJ_CLASS_SPEC {
+private:
+ friend class GetRSThreadVTimeClosure;
+
+ G1RemSet* _remset;
+
+ G1RemSet* remset() const {
+ return _remset;
+ }
+
+ size_t _num_refined_cards;
+ size_t _num_processed_buf_mutator;
+ size_t _num_processed_buf_rs_threads;
+
+ size_t _num_coarsenings;
+
+ double* _rs_threads_vtimes;
+ size_t _num_vtimes;
+
+ double _sampling_thread_vtime;
+
+ void set_rs_thread_vtime(uint thread, double value);
+ void set_sampling_thread_vtime(double value) {
+ _sampling_thread_vtime = value;
+ }
+
+ void free_and_null() {
+ if (_rs_threads_vtimes) {
+ FREE_C_HEAP_ARRAY(double, _rs_threads_vtimes, mtGC);
+ _rs_threads_vtimes = NULL;
+ _num_vtimes = 0;
+ }
+ }
+
+ // update this summary with current data from various places
+ void update();
+
+public:
+ G1RemSetSummary() : _remset(NULL), _num_refined_cards(0),
+ _num_processed_buf_mutator(0), _num_processed_buf_rs_threads(0), _num_coarsenings(0),
+ _rs_threads_vtimes(NULL), _num_vtimes(0), _sampling_thread_vtime(0.0f) {
+ }
+
+ ~G1RemSetSummary() {
+ free_and_null();
+ }
+
+ // set the counters in this summary to the values of the others
+ void set(G1RemSetSummary* other);
+ // subtract all counters from the other summary, and set them in the current
+ void subtract_from(G1RemSetSummary* other);
+
+ // initialize and get the first sampling
+ void initialize(G1RemSet* remset, uint num_workers);
+
+ void print_on(outputStream* out);
+
+ double rs_thread_vtime(uint thread) const;
+
+ double sampling_thread_vtime() const {
+ return _sampling_thread_vtime;
+ }
+
+ size_t num_concurrent_refined_cards() const {
+ return _num_refined_cards;
+ }
+
+ size_t num_processed_buf_mutator() const {
+ return _num_processed_buf_mutator;
+ }
+
+ size_t num_processed_buf_rs_threads() const {
+ return _num_processed_buf_rs_threads;
+ }
+
+ size_t num_processed_buf_total() const {
+ return num_processed_buf_mutator() + num_processed_buf_rs_threads();
+ }
+
+ size_t num_coarsenings() const {
+ return _num_coarsenings;
+ }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSETSUMMARY_HPP
diff --git a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
index 9cee1eb1b..218be0c0e 100644
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp
@@ -47,7 +47,7 @@ void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
JavaThread* jt = (JavaThread*)thr;
jt->satb_mark_queue().enqueue(pre_val);
} else {
- MutexLocker x(Shared_SATB_Q_lock);
+ MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val);
}
}
diff --git a/src/share/vm/gc_implementation/g1/g1YCTypes.hpp b/src/share/vm/gc_implementation/g1/g1YCTypes.hpp
new file mode 100644
index 000000000..7d2216059
--- /dev/null
+++ b/src/share/vm/gc_implementation/g1/g1YCTypes.hpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP
+
+#include "utilities/debug.hpp"
+
+enum G1YCType {
+ Normal,
+ InitialMark,
+ DuringMark,
+ Mixed,
+ G1YCTypeEndSentinel
+};
+
+class G1YCTypeHelper {
+ public:
+ static const char* to_string(G1YCType type) {
+ switch(type) {
+ case Normal: return "Normal";
+ case InitialMark: return "Initial Mark";
+ case DuringMark: return "During Mark";
+ case Mixed: return "Mixed";
+ default: ShouldNotReachHere(); return NULL;
+ }
+ }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP
diff --git a/src/share/vm/gc_implementation/g1/g1_globals.hpp b/src/share/vm/gc_implementation/g1/g1_globals.hpp
index 7e62b70cd..c7d8049fe 100644
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp
@@ -96,11 +96,6 @@
"the buffer will be enqueued for processing. A value of 0 " \
"specifies that mutator threads should not do such filtering.") \
\
- develop(intx, G1ExtraRegionSurvRate, 33, \
- "If the young survival rate is S, and there's room left in " \
- "to-space, we will allow regions whose survival rate is up to " \
- "S + (1 - S)*X, where X is this parameter (as a fraction.)") \
- \
develop(bool, G1SATBPrintStubs, false, \
"If true, print generated stubs for the SATB barrier") \
\
@@ -110,9 +105,6 @@
develop(bool, G1RSBarrierRegionFilter, true, \
"If true, generate region filtering code in RS barrier") \
\
- develop(bool, G1RSBarrierNullFilter, true, \
- "If true, generate null-pointer filtering code in RS barrier") \
- \
develop(bool, G1DeferredRSUpdate, true, \
"If true, use deferred RS updates") \
\
@@ -120,9 +112,6 @@
"If true, verify that no dirty cards remain after RS log " \
"processing.") \
\
- develop(bool, G1RSCountHisto, false, \
- "If true, print a histogram of RS occupancies after each pause") \
- \
diagnostic(bool, G1PrintRegionLivenessInfo, false, \
"Prints the liveness information for all regions in the heap " \
"at the end of a marking cycle.") \
@@ -169,9 +158,6 @@
product(uintx, G1ConcRSHotCardLimit, 4, \
"The threshold that defines (>=) a hot card.") \
\
- develop(bool, G1PrintOopAppls, false, \
- "When true, print applications of closures to external locs.") \
- \
develop(intx, G1RSetRegionEntriesBase, 256, \
"Max number of regions in a fine-grain table per MB.") \
\
@@ -329,7 +315,14 @@
\
develop(bool, G1EvacuationFailureALotDuringMixedGC, true, \
"Force use of evacuation failure handling during mixed " \
- "evacuation pauses")
+ "evacuation pauses") \
+ \
+ diagnostic(bool, G1VerifyRSetsDuringFullGC, false, \
+ "If true, perform verification of each heap region's " \
+ "remembered set when verifying the heap during a full GC.") \
+ \
+ diagnostic(bool, G1VerifyHeapRegionCodeRoots, false, \
+ "Verify the code root lists attached to each heap region.")
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
diff --git a/src/share/vm/gc_implementation/g1/heapRegion.cpp b/src/share/vm/gc_implementation/g1/heapRegion.cpp
index f33e0456e..726acbfcd 100644
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "code/nmethod.hpp"
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
@@ -50,144 +51,6 @@ FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
OopClosure* oc) :
_r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
-class VerifyLiveClosure: public OopClosure {
-private:
- G1CollectedHeap* _g1h;
- CardTableModRefBS* _bs;
- oop _containing_obj;
- bool _failures;
- int _n_failures;
- VerifyOption _vo;
-public:
- // _vo == UsePrevMarking -> use "prev" marking information,
- // _vo == UseNextMarking -> use "next" marking information,
- // _vo == UseMarkWord -> use mark word from object header.
- VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
- _g1h(g1h), _bs(NULL), _containing_obj(NULL),
- _failures(false), _n_failures(0), _vo(vo)
- {
- BarrierSet* bs = _g1h->barrier_set();
- if (bs->is_a(BarrierSet::CardTableModRef))
- _bs = (CardTableModRefBS*)bs;
- }
-
- void set_containing_obj(oop obj) {
- _containing_obj = obj;
- }
-
- bool failures() { return _failures; }
- int n_failures() { return _n_failures; }
-
- virtual void do_oop(narrowOop* p) { do_oop_work(p); }
- virtual void do_oop( oop* p) { do_oop_work(p); }
-
- void print_object(outputStream* out, oop obj) {
-#ifdef PRODUCT
- Klass* k = obj->klass();
- const char* class_name = InstanceKlass::cast(k)->external_name();
- out->print_cr("class name %s", class_name);
-#else // PRODUCT
- obj->print_on(out);
-#endif // PRODUCT
- }
-
- template <class T>
- void do_oop_work(T* p) {
- assert(_containing_obj != NULL, "Precondition");
- assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
- "Precondition");
- T heap_oop = oopDesc::load_heap_oop(p);
- if (!oopDesc::is_null(heap_oop)) {
- oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
- bool failed = false;
- if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
- MutexLockerEx x(ParGCRareEvent_lock,
- Mutex::_no_safepoint_check_flag);
-
- if (!_failures) {
- gclog_or_tty->print_cr("");
- gclog_or_tty->print_cr("----------");
- }
- if (!_g1h->is_in_closed_subset(obj)) {
- HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
- gclog_or_tty->print_cr("Field "PTR_FORMAT
- " of live obj "PTR_FORMAT" in region "
- "["PTR_FORMAT", "PTR_FORMAT")",
- p, (void*) _containing_obj,
- from->bottom(), from->end());
- print_object(gclog_or_tty, _containing_obj);
- gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
- (void*) obj);
- } else {
- HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
- HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
- gclog_or_tty->print_cr("Field "PTR_FORMAT
- " of live obj "PTR_FORMAT" in region "
- "["PTR_FORMAT", "PTR_FORMAT")",
- p, (void*) _containing_obj,
- from->bottom(), from->end());
- print_object(gclog_or_tty, _containing_obj);
- gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
- "["PTR_FORMAT", "PTR_FORMAT")",
- (void*) obj, to->bottom(), to->end());
- print_object(gclog_or_tty, obj);
- }
- gclog_or_tty->print_cr("----------");
- gclog_or_tty->flush();
- _failures = true;
- failed = true;
- _n_failures++;
- }
-
- if (!_g1h->full_collection()) {
- HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
- HeapRegion* to = _g1h->heap_region_containing(obj);
- if (from != NULL && to != NULL &&
- from != to &&
- !to->isHumongous()) {
- jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
- jbyte cv_field = *_bs->byte_for_const(p);
- const jbyte dirty = CardTableModRefBS::dirty_card_val();
-
- bool is_bad = !(from->is_young()
- || to->rem_set()->contains_reference(p)
- || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
- (_containing_obj->is_objArray() ?
- cv_field == dirty
- : cv_obj == dirty || cv_field == dirty));
- if (is_bad) {
- MutexLockerEx x(ParGCRareEvent_lock,
- Mutex::_no_safepoint_check_flag);
-
- if (!_failures) {
- gclog_or_tty->print_cr("");
- gclog_or_tty->print_cr("----------");
- }
- gclog_or_tty->print_cr("Missing rem set entry:");
- gclog_or_tty->print_cr("Field "PTR_FORMAT" "
- "of obj "PTR_FORMAT", "
- "in region "HR_FORMAT,
- p, (void*) _containing_obj,
- HR_FORMAT_PARAMS(from));
- _containing_obj->print_on(gclog_or_tty);
- gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
- "in region "HR_FORMAT,
- (void*) obj,
- HR_FORMAT_PARAMS(to));
- obj->print_on(gclog_or_tty);
- gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
- cv_obj, cv_field);
- gclog_or_tty->print_cr("----------");
- gclog_or_tty->flush();
- _failures = true;
- if (!failed) _n_failures++;
- }
- }
- }
- }
- }
-};
-
template<class ClosureType>
HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
HeapRegion* hr,
@@ -286,18 +149,15 @@ void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
// many regions in the heap (based on the min heap size).
#define TARGET_REGION_NUMBER 2048
-void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
- // region_size in bytes
+size_t HeapRegion::max_region_size() {
+ return (size_t)MAX_REGION_SIZE;
+}
+
+void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
uintx region_size = G1HeapRegionSize;
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
- // We base the automatic calculation on the min heap size. This
- // can be problematic if the spread between min and max is quite
- // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
- // the max size, the region size might be way too large for the
- // min size. Either way, some users might have to set the region
- // size manually for some -Xms / -Xmx combos.
-
- region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
+ size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
+ region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER,
(uintx) MIN_REGION_SIZE);
}
@@ -314,6 +174,11 @@ void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
region_size = MAX_REGION_SIZE;
}
+ if (region_size != G1HeapRegionSize) {
+ // Update the flag to make sure that PrintFlagsFinal logs the correct value
+ FLAG_SET_ERGO(uintx, G1HeapRegionSize, region_size);
+ }
+
// And recalculate the log.
region_size_log = log2_long((jlong) region_size);
@@ -363,7 +228,7 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
if (!par) {
// If this is parallel, this will be done later.
HeapRegionRemSet* hrrs = rem_set();
- if (hrrs != NULL) hrrs->clear();
+ hrrs->clear();
_claimed = InitialClaimValue;
}
zero_marked_bytes();
@@ -500,6 +365,7 @@ HeapRegion::HeapRegion(uint hrs_index,
_rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
_predicted_bytes_to_copy(0)
{
+ _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
_orig_end = mr.end();
// Note that initialize() will set the start of the unmarked area of the
// region.
@@ -507,8 +373,6 @@ HeapRegion::HeapRegion(uint hrs_index,
set_top(bottom());
set_saved_mark();
- _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
-
assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
}
@@ -728,6 +592,160 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
return NULL;
}
+// Code roots support
+
+void HeapRegion::add_strong_code_root(nmethod* nm) {
+ HeapRegionRemSet* hrrs = rem_set();
+ hrrs->add_strong_code_root(nm);
+}
+
+void HeapRegion::remove_strong_code_root(nmethod* nm) {
+ HeapRegionRemSet* hrrs = rem_set();
+ hrrs->remove_strong_code_root(nm);
+}
+
+void HeapRegion::migrate_strong_code_roots() {
+ assert(in_collection_set(), "only collection set regions");
+ assert(!isHumongous(), "not humongous regions");
+
+ HeapRegionRemSet* hrrs = rem_set();
+ hrrs->migrate_strong_code_roots();
+}
+
+void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
+ HeapRegionRemSet* hrrs = rem_set();
+ hrrs->strong_code_roots_do(blk);
+}
+
+class VerifyStrongCodeRootOopClosure: public OopClosure {
+ const HeapRegion* _hr;
+ nmethod* _nm;
+ bool _failures;
+ bool _has_oops_in_region;
+
+ template <class T> void do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+
+ // Note: not all the oops embedded in the nmethod are in the
+ // current region. We only look at those which are.
+ if (_hr->is_in(obj)) {
+ // Object is in the region. Check that its less than top
+ if (_hr->top() <= (HeapWord*)obj) {
+ // Object is above top
+ gclog_or_tty->print_cr("Object "PTR_FORMAT" in region "
+ "["PTR_FORMAT", "PTR_FORMAT") is above "
+ "top "PTR_FORMAT,
+ obj, _hr->bottom(), _hr->end(), _hr->top());
+ _failures = true;
+ return;
+ }
+ // Nmethod has at least one oop in the current region
+ _has_oops_in_region = true;
+ }
+ }
+ }
+
+public:
+ VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm):
+ _hr(hr), _failures(false), _has_oops_in_region(false) {}
+
+ void do_oop(narrowOop* p) { do_oop_work(p); }
+ void do_oop(oop* p) { do_oop_work(p); }
+
+ bool failures() { return _failures; }
+ bool has_oops_in_region() { return _has_oops_in_region; }
+};
+
+class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
+ const HeapRegion* _hr;
+ bool _failures;
+public:
+ VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) :
+ _hr(hr), _failures(false) {}
+
+ void do_code_blob(CodeBlob* cb) {
+ nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
+ if (nm != NULL) {
+ // Verify that the nemthod is live
+ if (!nm->is_alive()) {
+ gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod "
+ PTR_FORMAT" in its strong code roots",
+ _hr->bottom(), _hr->end(), nm);
+ _failures = true;
+ } else {
+ VerifyStrongCodeRootOopClosure oop_cl(_hr, nm);
+ nm->oops_do(&oop_cl);
+ if (!oop_cl.has_oops_in_region()) {
+ gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod "
+ PTR_FORMAT" in its strong code roots "
+ "with no pointers into region",
+ _hr->bottom(), _hr->end(), nm);
+ _failures = true;
+ } else if (oop_cl.failures()) {
+ gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other "
+ "failures for nmethod "PTR_FORMAT,
+ _hr->bottom(), _hr->end(), nm);
+ _failures = true;
+ }
+ }
+ }
+ }
+
+ bool failures() { return _failures; }
+};
+
+void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const {
+ if (!G1VerifyHeapRegionCodeRoots) {
+ // We're not verifying code roots.
+ return;
+ }
+ if (vo == VerifyOption_G1UseMarkWord) {
+ // Marking verification during a full GC is performed after class
+ // unloading, code cache unloading, etc so the strong code roots
+ // attached to each heap region are in an inconsistent state. They won't
+ // be consistent until the strong code roots are rebuilt after the
+ // actual GC. Skip verifying the strong code roots in this particular
+ // time.
+ assert(VerifyDuringGC, "only way to get here");
+ return;
+ }
+
+ HeapRegionRemSet* hrrs = rem_set();
+ int strong_code_roots_length = hrrs->strong_code_roots_list_length();
+
+ // if this region is empty then there should be no entries
+ // on its strong code root list
+ if (is_empty()) {
+ if (strong_code_roots_length > 0) {
+ gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty "
+ "but has "INT32_FORMAT" code root entries",
+ bottom(), end(), strong_code_roots_length);
+ *failures = true;
+ }
+ return;
+ }
+
+ // An H-region should have an empty strong code root list
+ if (isHumongous()) {
+ if (strong_code_roots_length > 0) {
+ gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
+ "but has "INT32_FORMAT" code root entries",
+ bottom(), end(), strong_code_roots_length);
+ *failures = true;
+ }
+ return;
+ }
+
+ VerifyStrongCodeRootCodeBlobClosure cb_cl(this);
+ strong_code_roots_do(&cb_cl);
+
+ if (cb_cl.failures()) {
+ *failures = true;
+ }
+}
+
void HeapRegion::print() const { print_on(gclog_or_tty); }
void HeapRegion::print_on(outputStream* st) const {
if (isHumongous()) {
@@ -756,10 +774,143 @@ void HeapRegion::print_on(outputStream* st) const {
G1OffsetTableContigSpace::print_on(st);
}
-void HeapRegion::verify() const {
- bool dummy = false;
- verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
-}
+class VerifyLiveClosure: public OopClosure {
+private:
+ G1CollectedHeap* _g1h;
+ CardTableModRefBS* _bs;
+ oop _containing_obj;
+ bool _failures;
+ int _n_failures;
+ VerifyOption _vo;
+public:
+ // _vo == UsePrevMarking -> use "prev" marking information,
+ // _vo == UseNextMarking -> use "next" marking information,
+ // _vo == UseMarkWord -> use mark word from object header.
+ VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
+ _g1h(g1h), _bs(NULL), _containing_obj(NULL),
+ _failures(false), _n_failures(0), _vo(vo)
+ {
+ BarrierSet* bs = _g1h->barrier_set();
+ if (bs->is_a(BarrierSet::CardTableModRef))
+ _bs = (CardTableModRefBS*)bs;
+ }
+
+ void set_containing_obj(oop obj) {
+ _containing_obj = obj;
+ }
+
+ bool failures() { return _failures; }
+ int n_failures() { return _n_failures; }
+
+ virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+ virtual void do_oop( oop* p) { do_oop_work(p); }
+
+ void print_object(outputStream* out, oop obj) {
+#ifdef PRODUCT
+ Klass* k = obj->klass();
+ const char* class_name = InstanceKlass::cast(k)->external_name();
+ out->print_cr("class name %s", class_name);
+#else // PRODUCT
+ obj->print_on(out);
+#endif // PRODUCT
+ }
+
+ template <class T>
+ void do_oop_work(T* p) {
+ assert(_containing_obj != NULL, "Precondition");
+ assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
+ "Precondition");
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ bool failed = false;
+ if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
+ MutexLockerEx x(ParGCRareEvent_lock,
+ Mutex::_no_safepoint_check_flag);
+
+ if (!_failures) {
+ gclog_or_tty->print_cr("");
+ gclog_or_tty->print_cr("----------");
+ }
+ if (!_g1h->is_in_closed_subset(obj)) {
+ HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
+ gclog_or_tty->print_cr("Field "PTR_FORMAT
+ " of live obj "PTR_FORMAT" in region "
+ "["PTR_FORMAT", "PTR_FORMAT")",
+ p, (void*) _containing_obj,
+ from->bottom(), from->end());
+ print_object(gclog_or_tty, _containing_obj);
+ gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
+ (void*) obj);
+ } else {
+ HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
+ HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
+ gclog_or_tty->print_cr("Field "PTR_FORMAT
+ " of live obj "PTR_FORMAT" in region "
+ "["PTR_FORMAT", "PTR_FORMAT")",
+ p, (void*) _containing_obj,
+ from->bottom(), from->end());
+ print_object(gclog_or_tty, _containing_obj);
+ gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
+ "["PTR_FORMAT", "PTR_FORMAT")",
+ (void*) obj, to->bottom(), to->end());
+ print_object(gclog_or_tty, obj);
+ }
+ gclog_or_tty->print_cr("----------");
+ gclog_or_tty->flush();
+ _failures = true;
+ failed = true;
+ _n_failures++;
+ }
+
+ if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) {
+ HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
+ HeapRegion* to = _g1h->heap_region_containing(obj);
+ if (from != NULL && to != NULL &&
+ from != to &&
+ !to->isHumongous()) {
+ jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
+ jbyte cv_field = *_bs->byte_for_const(p);
+ const jbyte dirty = CardTableModRefBS::dirty_card_val();
+
+ bool is_bad = !(from->is_young()
+ || to->rem_set()->contains_reference(p)
+ || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
+ (_containing_obj->is_objArray() ?
+ cv_field == dirty
+ : cv_obj == dirty || cv_field == dirty));
+ if (is_bad) {
+ MutexLockerEx x(ParGCRareEvent_lock,
+ Mutex::_no_safepoint_check_flag);
+
+ if (!_failures) {
+ gclog_or_tty->print_cr("");
+ gclog_or_tty->print_cr("----------");
+ }
+ gclog_or_tty->print_cr("Missing rem set entry:");
+ gclog_or_tty->print_cr("Field "PTR_FORMAT" "
+ "of obj "PTR_FORMAT", "
+ "in region "HR_FORMAT,
+ p, (void*) _containing_obj,
+ HR_FORMAT_PARAMS(from));
+ _containing_obj->print_on(gclog_or_tty);
+ gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
+ "in region "HR_FORMAT,
+ (void*) obj,
+ HR_FORMAT_PARAMS(to));
+ obj->print_on(gclog_or_tty);
+ gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
+ cv_obj, cv_field);
+ gclog_or_tty->print_cr("----------");
+ gclog_or_tty->flush();
+ _failures = true;
+ if (!failed) _n_failures++;
+ }
+ }
+ }
+ }
+ }
+};
// This really ought to be commoned up into OffsetTableContigSpace somehow.
// We would need a mechanism to make that code skip dead objects.
@@ -798,7 +949,7 @@ void HeapRegion::verify(VerifyOption vo,
if (!g1->is_obj_dead_cond(obj, this, vo)) {
if (obj->is_oop()) {
Klass* klass = obj->klass();
- if (!klass->is_metadata()) {
+ if (!klass->is_metaspace_object()) {
gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
"not metadata", klass, obj);
*failures = true;
@@ -899,6 +1050,13 @@ void HeapRegion::verify(VerifyOption vo,
*failures = true;
return;
}
+
+ verify_strong_code_roots(vo, failures);
+}
+
+void HeapRegion::verify() const {
+ bool dummy = false;
+ verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
}
// G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
diff --git a/src/share/vm/gc_implementation/g1/heapRegion.hpp b/src/share/vm/gc_implementation/g1/heapRegion.hpp
index 7c79195d3..ad2b06497 100644
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,6 +52,7 @@ class HeapRegionRemSet;
class HeapRegionRemSetIterator;
class HeapRegion;
class HeapRegionSetBase;
+class nmethod;
#define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(_hr_) \
@@ -354,13 +355,15 @@ class HeapRegion: public G1OffsetTableContigSpace {
~((1 << (size_t) LogOfHRGrainBytes) - 1);
}
+ static size_t max_region_size();
+
// It sets up the heap region size (GrainBytes / GrainWords), as
// well as other related fields that are based on the heap region
// size (LogOfHRGrainBytes / LogOfHRGrainWords /
// CardsPerRegion). All those fields are considered constant
// throughout the JVM's execution, therefore they should only be set
// up once during initialization time.
- static void setup_heap_region_size(uintx min_heap_size);
+ static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
enum ClaimValues {
InitialClaimValue = 0,
@@ -371,7 +374,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
RebuildRSClaimValue = 5,
ParEvacFailureClaimValue = 6,
AggregateCountClaimValue = 7,
- VerifyCountClaimValue = 8
+ VerifyCountClaimValue = 8,
+ ParMarkRootClaimValue = 9
};
inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
@@ -796,6 +800,25 @@ class HeapRegion: public G1OffsetTableContigSpace {
virtual void reset_after_compaction();
+ // Routines for managing a list of code roots (attached to the
+ // this region's RSet) that point into this heap region.
+ void add_strong_code_root(nmethod* nm);
+ void remove_strong_code_root(nmethod* nm);
+
+ // During a collection, migrate the successfully evacuated
+ // strong code roots that referenced into this region to the
+ // new regions that they now point into. Unsuccessfully
+ // evacuated code roots are not migrated.
+ void migrate_strong_code_roots();
+
+ // Applies blk->do_code_blob() to each of the entries in
+ // the strong code roots list for this region
+ void strong_code_roots_do(CodeBlobClosure* blk) const;
+
+ // Verify that the entries on the strong code root list for this
+ // region are live and include at least one pointer into this region.
+ void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
+
void print() const;
void print_on(outputStream* st) const;
diff --git a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
index 1e005a26e..69eaa53c3 100644
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
#include "oops/oop.inline.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/globalDefinitions.hpp"
+#include "utilities/growableArray.hpp"
class PerRegionTable: public CHeapObj<mtGC> {
friend class OtherRegionsTable;
@@ -242,11 +243,13 @@ public:
PerRegionTable* cur = _free_list;
size_t res = 0;
while (cur != NULL) {
- res += sizeof(PerRegionTable);
+ res += cur->mem_size();
cur = cur->next();
}
return res;
}
+
+ static void test_fl_mem_size();
};
PerRegionTable* PerRegionTable::_free_list = NULL;
@@ -282,7 +285,8 @@ OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
_fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
}
- _fine_grain_regions = new PerRegionTablePtr[_max_fine_entries];
+ _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
+ mtGC, 0, AllocFailStrategy::RETURN_NULL);
if (_fine_grain_regions == NULL) {
vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
@@ -706,10 +710,11 @@ size_t OtherRegionsTable::mem_size() const {
// Cast away const in this case.
MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
size_t sum = 0;
- PerRegionTable * cur = _first_all_fine_prts;
- while (cur != NULL) {
- sum += cur->mem_size();
- cur = cur->next();
+ // all PRTs are of the same size so it is sufficient to query only one of them.
+ if (_first_all_fine_prts != NULL) {
+ assert(_last_all_fine_prts != NULL &&
+ _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant");
+ sum += _first_all_fine_prts->mem_size() * _n_fine_entries;
}
sum += (sizeof(PerRegionTable*) * _max_fine_entries);
sum += (_coarse_map.size_in_words() * HeapWordSize);
@@ -845,7 +850,7 @@ int HeapRegionRemSet::num_par_rem_sets() {
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
HeapRegion* hr)
- : _bosa(bosa), _other_regions(hr) {
+ : _bosa(bosa), _strong_code_roots_list(NULL), _other_regions(hr) {
reset_for_par_iteration();
}
@@ -904,6 +909,12 @@ void HeapRegionRemSet::cleanup() {
}
void HeapRegionRemSet::clear() {
+ if (_strong_code_roots_list != NULL) {
+ delete _strong_code_roots_list;
+ }
+ _strong_code_roots_list = new (ResourceObj::C_HEAP, mtGC)
+ GrowableArray<nmethod*>(10, 0, NULL, true);
+
_other_regions.clear();
assert(occupied() == 0, "Should be clear.");
reset_for_par_iteration();
@@ -921,6 +932,121 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
_other_regions.scrub(ctbs, region_bm, card_bm);
}
+
+// Code roots support
+
+void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
+ assert(nm != NULL, "sanity");
+ // Search for the code blob from the RHS to avoid
+ // duplicate entries as much as possible
+ if (_strong_code_roots_list->find_from_end(nm) < 0) {
+ // Code blob isn't already in the list
+ _strong_code_roots_list->push(nm);
+ }
+}
+
+void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
+ assert(nm != NULL, "sanity");
+ int idx = _strong_code_roots_list->find(nm);
+ if (idx >= 0) {
+ _strong_code_roots_list->remove_at(idx);
+ }
+ // Check that there were no duplicates
+ guarantee(_strong_code_roots_list->find(nm) < 0, "duplicate entry found");
+}
+
+class NMethodMigrationOopClosure : public OopClosure {
+ G1CollectedHeap* _g1h;
+ HeapRegion* _from;
+ nmethod* _nm;
+
+ uint _num_self_forwarded;
+
+ template <class T> void do_oop_work(T* p) {
+ T heap_oop = oopDesc::load_heap_oop(p);
+ if (!oopDesc::is_null(heap_oop)) {
+ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+ if (_from->is_in(obj)) {
+ // Reference still points into the source region.
+ // Since roots are immediately evacuated this means that
+ // we must have self forwarded the object
+ assert(obj->is_forwarded(),
+ err_msg("code roots should be immediately evacuated. "
+ "Ref: "PTR_FORMAT", "
+ "Obj: "PTR_FORMAT", "
+ "Region: "HR_FORMAT,
+ p, (void*) obj, HR_FORMAT_PARAMS(_from)));
+ assert(obj->forwardee() == obj,
+ err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj));
+
+ // The object has been self forwarded.
+ // Note, if we're during an initial mark pause, there is
+ // no need to explicitly mark object. It will be marked
+ // during the regular evacuation failure handling code.
+ _num_self_forwarded++;
+ } else {
+ // The reference points into a promotion or to-space region
+ HeapRegion* to = _g1h->heap_region_containing(obj);
+ to->rem_set()->add_strong_code_root(_nm);
+ }
+ }
+ }
+
+public:
+ NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm):
+ _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
+
+ void do_oop(narrowOop* p) { do_oop_work(p); }
+ void do_oop(oop* p) { do_oop_work(p); }
+
+ uint retain() { return _num_self_forwarded > 0; }
+};
+
+void HeapRegionRemSet::migrate_strong_code_roots() {
+ assert(hr()->in_collection_set(), "only collection set regions");
+ assert(!hr()->isHumongous(), "not humongous regions");
+
+ ResourceMark rm;
+
+ // List of code blobs to retain for this region
+ GrowableArray<nmethod*> to_be_retained(10);
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+ while (_strong_code_roots_list->is_nonempty()) {
+ nmethod *nm = _strong_code_roots_list->pop();
+ if (nm != NULL) {
+ NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
+ nm->oops_do(&oop_cl);
+ if (oop_cl.retain()) {
+ to_be_retained.push(nm);
+ }
+ }
+ }
+
+ // Now push any code roots we need to retain
+ assert(to_be_retained.is_empty() || hr()->evacuation_failed(),
+ "Retained nmethod list must be empty or "
+ "evacuation of this region failed");
+
+ while (to_be_retained.is_nonempty()) {
+ nmethod* nm = to_be_retained.pop();
+ assert(nm != NULL, "sanity");
+ add_strong_code_root(nm);
+ }
+}
+
+void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
+ for (int i = 0; i < _strong_code_roots_list->length(); i += 1) {
+ nmethod* nm = _strong_code_roots_list->at(i);
+ blk->do_code_blob(nm);
+ }
+}
+
+size_t HeapRegionRemSet::strong_code_roots_mem_size() {
+ return sizeof(GrowableArray<nmethod*>) +
+ _strong_code_roots_list->max_length() * sizeof(nmethod*);
+}
+
//-------------------- Iteration --------------------
HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) :
@@ -1147,6 +1273,19 @@ HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
}
#ifndef PRODUCT
+void PerRegionTable::test_fl_mem_size() {
+ PerRegionTable* dummy = alloc(NULL);
+ free(dummy);
+ guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
+ // try to reset the state
+ _free_list = NULL;
+ delete dummy;
+}
+
+void HeapRegionRemSet::test_prt() {
+ PerRegionTable::test_fl_mem_size();
+}
+
void HeapRegionRemSet::test() {
os::sleep(Thread::current(), (jlong)5000, false);
G1CollectedHeap* g1h = G1CollectedHeap::heap();
diff --git a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
index 2e165074e..e40f6195a 100644
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
@@ -37,6 +37,7 @@ class HeapRegion;
class HeapRegionRemSetIterator;
class PerRegionTable;
class SparsePRT;
+class nmethod;
// Essentially a wrapper around SparsePRTCleanupTask. See
// sparsePRT.hpp for more details.
@@ -191,6 +192,10 @@ private:
G1BlockOffsetSharedArray* _bosa;
G1BlockOffsetSharedArray* bosa() const { return _bosa; }
+ // A list of code blobs (nmethods) whose code contains pointers into
+ // the region that owns this RSet.
+ GrowableArray<nmethod*>* _strong_code_roots_list;
+
OtherRegionsTable _other_regions;
enum ParIterState { Unclaimed, Claimed, Complete };
@@ -282,11 +287,13 @@ public:
}
// The actual # of bytes this hr_remset takes up.
+ // Note also includes the strong code root set.
size_t mem_size() {
return _other_regions.mem_size()
// This correction is necessary because the above includes the second
// part.
- + sizeof(this) - sizeof(OtherRegionsTable);
+ + (sizeof(this) - sizeof(OtherRegionsTable))
+ + strong_code_roots_mem_size();
}
// Returns the memory occupancy of all static data structures associated
@@ -304,6 +311,37 @@ public:
bool contains_reference(OopOrNarrowOopStar from) const {
return _other_regions.contains_reference(from);
}
+
+ // Routines for managing the list of code roots that point into
+ // the heap region that owns this RSet.
+ void add_strong_code_root(nmethod* nm);
+ void remove_strong_code_root(nmethod* nm);
+
+ // During a collection, migrate the successfully evacuated strong
+ // code roots that referenced into the region that owns this RSet
+ // to the RSets of the new regions that they now point into.
+ // Unsuccessfully evacuated code roots are not migrated.
+ void migrate_strong_code_roots();
+
+ // Applies blk->do_code_blob() to each of the entries in
+ // the strong code roots list
+ void strong_code_roots_do(CodeBlobClosure* blk) const;
+
+ // Returns the number of elements in the strong code roots list
+ int strong_code_roots_list_length() {
+ return _strong_code_roots_list->length();
+ }
+
+ // Returns true if the strong code roots contains the given
+ // nmethod.
+ bool strong_code_roots_list_contains(nmethod* nm) {
+ return _strong_code_roots_list->contains(nm);
+ }
+
+ // Returns the amount of memory, in bytes, currently
+ // consumed by the strong code roots.
+ size_t strong_code_roots_mem_size();
+
void print() const;
// Called during a stop-world phase to perform any deferred cleanups.
@@ -338,6 +376,7 @@ public:
// Run unit tests.
#ifndef PRODUCT
+ static void test_prt();
static void test();
#endif
};
diff --git a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
index dade3dfdf..eaa8e10f3 100644
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
@@ -71,27 +71,16 @@ uint HeapRegionSeq::find_contiguous_from(uint from, uint num) {
// Public
-void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
- uint max_length) {
+void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
"bottom should be heap region aligned");
assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
"end should be heap region aligned");
- _length = 0;
- _heap_bottom = bottom;
- _heap_end = end;
- _region_shift = HeapRegion::LogOfHRGrainBytes;
_next_search_index = 0;
_allocated_length = 0;
- _max_length = max_length;
- _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length, mtGC);
- memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
- _regions_biased = _regions - ((uintx) bottom >> _region_shift);
-
- assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
- "bottom should be included in the region with index 0");
+ _regions.initialize(bottom, end, HeapRegion::GrainBytes);
}
MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
@@ -101,15 +90,15 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapWord* next_bottom = old_end;
- assert(_heap_bottom <= next_bottom, "invariant");
+ assert(heap_bottom() <= next_bottom, "invariant");
while (next_bottom < new_end) {
- assert(next_bottom < _heap_end, "invariant");
+ assert(next_bottom < heap_end(), "invariant");
uint index = length();
- assert(index < _max_length, "otherwise we cannot expand further");
+ assert(index < max_length(), "otherwise we cannot expand further");
if (index == 0) {
// We have not allocated any regions so far
- assert(next_bottom == _heap_bottom, "invariant");
+ assert(next_bottom == heap_bottom(), "invariant");
} else {
// next_bottom should match the end of the last/previous region
assert(next_bottom == at(index - 1)->end(), "invariant");
@@ -122,8 +111,8 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
// allocation failed, we bail out and return what we have done so far
return MemRegion(old_end, next_bottom);
}
- assert(_regions[index] == NULL, "invariant");
- _regions[index] = new_hr;
+ assert(_regions.get_by_index(index) == NULL, "invariant");
+ _regions.set_by_index(index, new_hr);
increment_allocated_length();
}
// Have to increment the length first, otherwise we will get an
@@ -228,26 +217,26 @@ uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
#ifndef PRODUCT
void HeapRegionSeq::verify_optional() {
- guarantee(_length <= _allocated_length,
+ guarantee(length() <= _allocated_length,
err_msg("invariant: _length: %u _allocated_length: %u",
- _length, _allocated_length));
- guarantee(_allocated_length <= _max_length,
+ length(), _allocated_length));
+ guarantee(_allocated_length <= max_length(),
err_msg("invariant: _allocated_length: %u _max_length: %u",
- _allocated_length, _max_length));
- guarantee(_next_search_index <= _length,
+ _allocated_length, max_length()));
+ guarantee(_next_search_index <= length(),
err_msg("invariant: _next_search_index: %u _length: %u",
- _next_search_index, _length));
+ _next_search_index, length()));
- HeapWord* prev_end = _heap_bottom;
+ HeapWord* prev_end = heap_bottom();
for (uint i = 0; i < _allocated_length; i += 1) {
- HeapRegion* hr = _regions[i];
+ HeapRegion* hr = _regions.get_by_index(i);
guarantee(hr != NULL, err_msg("invariant: i: %u", i));
guarantee(hr->bottom() == prev_end,
err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
i, HR_FORMAT_PARAMS(hr), prev_end));
guarantee(hr->hrs_index() == i,
err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
- if (i < _length) {
+ if (i < length()) {
// Asserts will fire if i is >= _length
HeapWord* addr = hr->bottom();
guarantee(addr_to_region(addr) == hr, "sanity");
@@ -265,8 +254,8 @@ void HeapRegionSeq::verify_optional() {
prev_end = hr->end();
}
}
- for (uint i = _allocated_length; i < _max_length; i += 1) {
- guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i));
+ for (uint i = _allocated_length; i < max_length(); i += 1) {
+ guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
}
}
#endif // PRODUCT
diff --git a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
index b7a58f76a..b0c3eb48a 100644
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
@@ -25,10 +25,17 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+
class HeapRegion;
class HeapRegionClosure;
class FreeRegionList;
+class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
+ protected:
+ virtual HeapRegion* default_value() const { return NULL; }
+};
+
// This class keeps track of the region metadata (i.e., HeapRegion
// instances). They are kept in the _regions array in address
// order. A region's index in the array corresponds to its index in
@@ -44,35 +51,21 @@ class FreeRegionList;
//
// We keep track of three lengths:
//
-// * _length (returned by length()) is the number of currently
+// * _committed_length (returned by length()) is the number of currently
// committed regions.
// * _allocated_length (not exposed outside this class) is the
// number of regions for which we have HeapRegions.
-// * _max_length (returned by max_length()) is the maximum number of
-// regions the heap can have.
+// * max_length() returns the maximum number of regions the heap can have.
//
-// and maintain that: _length <= _allocated_length <= _max_length
+// and maintain that: _committed_length <= _allocated_length <= max_length()
class HeapRegionSeq: public CHeapObj<mtGC> {
friend class VMStructs;
- // The array that holds the HeapRegions.
- HeapRegion** _regions;
-
- // Version of _regions biased to address 0
- HeapRegion** _regions_biased;
+ G1HeapRegionTable _regions;
// The number of regions committed in the heap.
- uint _length;
-
- // The address of the first reserved word in the heap.
- HeapWord* _heap_bottom;
-
- // The address of the last reserved word in the heap - 1.
- HeapWord* _heap_end;
-
- // The log of the region byte size.
- uint _region_shift;
+ uint _committed_length;
// A hint for which index to start searching from for humongous
// allocations.
@@ -81,37 +74,33 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
// The number of regions for which we have allocated HeapRegions for.
uint _allocated_length;
- // The maximum number of regions in the heap.
- uint _max_length;
-
// Find a contiguous set of empty regions of length num, starting
// from the given index.
uint find_contiguous_from(uint from, uint num);
- // Map a heap address to a biased region index. Assume that the
- // address is valid.
- inline uintx addr_to_index_biased(HeapWord* addr) const;
-
void increment_allocated_length() {
- assert(_allocated_length < _max_length, "pre-condition");
+ assert(_allocated_length < max_length(), "pre-condition");
_allocated_length++;
}
void increment_length() {
- assert(_length < _max_length, "pre-condition");
- _length++;
+ assert(length() < max_length(), "pre-condition");
+ _committed_length++;
}
void decrement_length() {
- assert(_length > 0, "pre-condition");
- _length--;
+ assert(length() > 0, "pre-condition");
+ _committed_length--;
}
+ HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
+ HeapWord* heap_end() const {return _regions.end_address_mapped(); }
+
public:
// Empty contructor, we'll initialize it with the initialize() method.
- HeapRegionSeq() { }
+ HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
- void initialize(HeapWord* bottom, HeapWord* end, uint max_length);
+ void initialize(HeapWord* bottom, HeapWord* end);
// Return the HeapRegion at the given index. Assume that the index
// is valid.
@@ -126,10 +115,10 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
// Return the number of regions that have been committed in the heap.
- uint length() const { return _length; }
+ uint length() const { return _committed_length; }
// Return the maximum number of regions in the heap.
- uint max_length() const { return _max_length; }
+ uint max_length() const { return (uint)_regions.length(); }
// Expand the sequence to reflect that the heap has grown from
// old_end to new_end. Either create new HeapRegions, or re-use
diff --git a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
index e840287ed..96588dea0 100644
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
@@ -28,28 +28,16 @@
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
-inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
- assert(_heap_bottom <= addr && addr < _heap_end,
- err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
- addr, _heap_bottom, _heap_end));
- uintx index = (uintx) addr >> _region_shift;
- return index;
-}
-
inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
- assert(_heap_bottom <= addr && addr < _heap_end,
- err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
- addr, _heap_bottom, _heap_end));
- uintx index_biased = addr_to_index_biased(addr);
- HeapRegion* hr = _regions_biased[index_biased];
+ HeapRegion* hr = _regions.get_by_address(addr);
assert(hr != NULL, "invariant");
return hr;
}
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
- if (addr != NULL && addr < _heap_end) {
- assert(addr >= _heap_bottom,
- err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom));
+ if (addr != NULL && addr < heap_end()) {
+ assert(addr >= heap_bottom(),
+ err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom()));
return addr_to_region_unsafe(addr);
}
return NULL;
@@ -57,7 +45,7 @@ inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
inline HeapRegion* HeapRegionSeq::at(uint index) const {
assert(index < length(), "pre-condition");
- HeapRegion* hr = _regions[index];
+ HeapRegion* hr = _regions.get_by_index(index);
assert(hr != NULL, "sanity");
assert(hr->hrs_index() == index, "sanity");
return hr;
diff --git a/src/share/vm/gc_implementation/g1/ptrQueue.hpp b/src/share/vm/gc_implementation/g1/ptrQueue.hpp
index 1f6d9b213..958317166 100644
--- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp
@@ -38,6 +38,7 @@
class PtrQueueSet;
class PtrQueue VALUE_OBJ_CLASS_SPEC {
+ friend class VMStructs;
protected:
// The ptr queue set to which this queue belongs.
diff --git a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp
index 5507dee5f..9268eb78e 100644
--- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp
+++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp
@@ -31,10 +31,17 @@
#define VM_STRUCTS_G1(nonstatic_field, static_field) \
\
- static_field(HeapRegion, GrainBytes, size_t) \
+ static_field(HeapRegion, GrainBytes, size_t) \
+ static_field(HeapRegion, LogOfHRGrainBytes, int) \
\
- nonstatic_field(HeapRegionSeq, _regions, HeapRegion**) \
- nonstatic_field(HeapRegionSeq, _length, uint) \
+ nonstatic_field(G1HeapRegionTable, _base, address) \
+ nonstatic_field(G1HeapRegionTable, _length, size_t) \
+ nonstatic_field(G1HeapRegionTable, _biased_base, address) \
+ nonstatic_field(G1HeapRegionTable, _bias, size_t) \
+ nonstatic_field(G1HeapRegionTable, _shift_by, uint) \
+ \
+ nonstatic_field(HeapRegionSeq, _regions, G1HeapRegionTable) \
+ nonstatic_field(HeapRegionSeq, _committed_length, uint) \
\
nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \
nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \
@@ -57,6 +64,8 @@
#define VM_TYPES_G1(declare_type, declare_toplevel_type) \
\
+ declare_toplevel_type(G1HeapRegionTable) \
+ \
declare_type(G1CollectedHeap, SharedHeap) \
\
declare_type(HeapRegion, ContiguousSpace) \
diff --git a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
index cf7488ffe..9f298da38 100644
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,8 @@
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp"
#include "runtime/interfaceSupport.hpp"
@@ -68,9 +70,6 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(
guarantee(target_pause_time_ms > 0.0,
err_msg("target_pause_time_ms = %1.6lf should be positive",
target_pause_time_ms));
- guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
- "we can only request an allocation if the GC cause is for "
- "an incremental GC pause");
_gc_cause = gc_cause;
}
@@ -227,7 +226,7 @@ void VM_CGC_Operation::release_and_notify_pending_list_lock() {
void VM_CGC_Operation::doit() {
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
- TraceTime t(_printGCMessage, G1Log::fine(), true, gclog_or_tty);
+ GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm());
SharedHeap* sh = SharedHeap::heap();
// This could go away if CollectedHeap gave access to _gc_is_active...
if (sh != NULL) {
diff --git a/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp b/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp
index a18933356..2925b9d43 100644
--- a/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp
+++ b/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -585,8 +585,7 @@ void ASParNewGeneration::compute_new_size() {
size_policy->avg_young_live()->sample(used());
size_policy->avg_eden_live()->sample(eden()->used());
- size_policy->compute_young_generation_free_space(eden()->capacity(),
- max_gen_size());
+ size_policy->compute_eden_space_size(eden()->capacity(), max_gen_size());
resize(size_policy->calculated_eden_size_in_bytes(),
size_policy->calculated_survivor_size_in_bytes());
diff --git a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
index e868d8709..a1eb3130b 100644
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,11 @@
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
#include "gc_implementation/shared/ageTable.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "memory/defNewGeneration.inline.hpp"
#include "memory/genCollectedHeap.hpp"
@@ -75,7 +80,6 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
work_queue_set_, &term_),
_is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
_keep_alive_closure(&_scan_weak_ref_closure),
- _promotion_failure_size(0),
_strong_roots_time(0.0), _term_time(0.0)
{
#if TASKQUEUE_STATS
@@ -279,13 +283,10 @@ void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
}
}
-void ParScanThreadState::print_and_clear_promotion_failure_size() {
- if (_promotion_failure_size != 0) {
- if (PrintPromotionFailure) {
- gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
- _thread_num, _promotion_failure_size);
- }
- _promotion_failure_size = 0;
+void ParScanThreadState::print_promotion_failure_size() {
+ if (_promotion_failed_info.has_failed() && PrintPromotionFailure) {
+ gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
+ _thread_num, _promotion_failed_info.first_size());
}
}
@@ -305,6 +306,7 @@ public:
inline ParScanThreadState& thread_state(int i);
+ void trace_promotion_failed(YoungGCTracer& gc_tracer);
void reset(int active_workers, bool promotion_failed);
void flush();
@@ -353,13 +355,21 @@ inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
return ((ParScanThreadState*)_data)[i];
}
+void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) {
+ for (int i = 0; i < length(); ++i) {
+ if (thread_state(i).promotion_failed()) {
+ gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info());
+ thread_state(i).promotion_failed_info().reset();
+ }
+ }
+}
void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed)
{
_term.reset_for_reuse(active_threads);
if (promotion_failed) {
for (int i = 0; i < length(); ++i) {
- thread_state(i).print_and_clear_promotion_failure_size();
+ thread_state(i).print_promotion_failure_size();
}
}
}
@@ -583,14 +593,6 @@ void ParNewGenTask::set_for_termination(int active_workers) {
gch->set_n_termination(active_workers);
}
-// The "i" passed to this method is the part of the work for
-// this thread. It is not the worker ID. The "i" is derived
-// from _started_workers which is incremented in internal_note_start()
-// called in GangWorker loop() and which is called under the
-// which is called under the protection of the gang monitor and is
-// called after a task is started. So "i" is based on
-// first-come-first-served.
-
void ParNewGenTask::work(uint worker_id) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
// Since this is being done in a separate thread, need new resource
@@ -876,16 +878,45 @@ void EvacuateFollowersClosureGeneral::do_void() {
}
+// A Generation that does parallel young-gen collection.
+
bool ParNewGeneration::_avoid_promotion_undo = false;
-// A Generation that does parallel young-gen collection.
+void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) {
+ assert(_promo_failure_scan_stack.is_empty(), "post condition");
+ _promo_failure_scan_stack.clear(true); // Clear cached segments.
+
+ remove_forwarding_pointers();
+ if (PrintGCDetails) {
+ gclog_or_tty->print(" (promotion failed)");
+ }
+ // All the spaces are in play for mark-sweep.
+ swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
+ from()->set_next_compaction_space(to());
+ gch->set_incremental_collection_failed();
+ // Inform the next generation that a promotion failure occurred.
+ _next_gen->promotion_failure_occurred();
+
+ // Trace promotion failure in the parallel GC threads
+ thread_state_set.trace_promotion_failed(gc_tracer);
+ // Single threaded code may have reported promotion failure to the global state
+ if (_promotion_failed_info.has_failed()) {
+ gc_tracer.report_promotion_failed(_promotion_failed_info);
+ }
+ // Reset the PromotionFailureALot counters.
+ NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
+}
void ParNewGeneration::collect(bool full,
bool clear_all_soft_refs,
size_t size,
bool is_tlab) {
assert(full || size > 0, "otherwise we don't want to collect");
+
GenCollectedHeap* gch = GenCollectedHeap::heap();
+
+ _gc_timer->register_gc_start(os::elapsed_counter());
+
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
"not a CMS generational heap");
AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
@@ -896,17 +927,15 @@ void ParNewGeneration::collect(bool full,
workers->active_workers(),
Threads::number_of_non_daemon_threads());
workers->set_active_workers(active_workers);
- _next_gen = gch->next_gen(this);
- assert(_next_gen != NULL,
- "This must be the youngest gen, and not the only gen");
assert(gch->n_gens() == 2,
"Par collection currently only works with single older gen.");
+ _next_gen = gch->next_gen(this);
// Do we have to avoid promotion_undo?
if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
set_avoid_promotion_undo(true);
}
- // If the next generation is too full to accomodate worst-case promotion
+ // If the next generation is too full to accommodate worst-case promotion
// from this generation, pass on collection; let the next generation
// do it.
if (!collection_attempt_is_safe()) {
@@ -915,6 +944,10 @@ void ParNewGeneration::collect(bool full,
}
assert(to()->is_empty(), "Else not collection_attempt_is_safe");
+ ParNewTracer gc_tracer;
+ gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
+ gch->trace_heap_before_gc(&gc_tracer);
+
init_assuming_no_promotion_failure();
if (UseAdaptiveSizePolicy) {
@@ -922,7 +955,7 @@ void ParNewGeneration::collect(bool full,
size_policy->minor_collection_begin();
}
- TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty);
+ GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
// Capture heap used before collection (for printing).
size_t gch_prev_used = gch->used();
@@ -975,17 +1008,21 @@ void ParNewGeneration::collect(bool full,
rp->setup_policy(clear_all_soft_refs);
// Can the mt_degree be set later (at run_task() time would be best)?
rp->set_active_mt_degree(active_workers);
+ ReferenceProcessorStats stats;
if (rp->processing_is_mt()) {
ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
- rp->process_discovered_references(&is_alive, &keep_alive,
- &evacuate_followers, &task_executor);
+ stats = rp->process_discovered_references(&is_alive, &keep_alive,
+ &evacuate_followers, &task_executor,
+ _gc_timer);
} else {
thread_state_set.flush();
gch->set_par_threads(0); // 0 ==> non-parallel.
gch->save_marks();
- rp->process_discovered_references(&is_alive, &keep_alive,
- &evacuate_followers, NULL);
+ stats = rp->process_discovered_references(&is_alive, &keep_alive,
+ &evacuate_followers, NULL,
+ _gc_timer);
}
+ gc_tracer.report_gc_reference_stats(stats);
if (!promotion_failed()) {
// Swap the survivor spaces.
eden()->clear(SpaceDecorator::Mangle);
@@ -1010,22 +1047,7 @@ void ParNewGeneration::collect(bool full,
adjust_desired_tenuring_threshold();
} else {
- assert(_promo_failure_scan_stack.is_empty(), "post condition");
- _promo_failure_scan_stack.clear(true); // Clear cached segments.
-
- remove_forwarding_pointers();
- if (PrintGCDetails) {
- gclog_or_tty->print(" (promotion failed)");
- }
- // All the spaces are in play for mark-sweep.
- swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
- from()->set_next_compaction_space(to());
- gch->set_incremental_collection_failed();
- // Inform the next generation that a promotion failure occurred.
- _next_gen->promotion_failure_occurred();
-
- // Reset the PromotionFailureALot counters.
- NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
+ handle_promotion_failed(gch, thread_state_set, gc_tracer);
}
// set new iteration safe limit for the survivor spaces
from()->set_concurrent_iteration_safe_limit(from()->top());
@@ -1065,6 +1087,13 @@ void ParNewGeneration::collect(bool full,
rp->enqueue_discovered_references(NULL);
}
rp->verify_no_references_recorded();
+
+ gch->trace_heap_after_gc(&gc_tracer);
+ gc_tracer.report_tenuring_threshold(tenuring_threshold());
+
+ _gc_timer->register_gc_end(os::elapsed_counter());
+
+ gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
}
static int sum;
@@ -1174,8 +1203,7 @@ oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
new_obj = old;
preserve_mark_if_necessary(old, m);
- // Log the size of the maiden promotion failure
- par_scan_state->log_promotion_failure(sz);
+ par_scan_state->register_promotion_failure(sz);
}
old->forward_to(new_obj);
@@ -1300,8 +1328,7 @@ oop ParNewGeneration::copy_to_survivor_space_with_undo(
failed_to_promote = true;
preserve_mark_if_necessary(old, m);
- // Log the size of the maiden promotion failure
- par_scan_state->log_promotion_failure(sz);
+ par_scan_state->register_promotion_failure(sz);
}
} else {
// Is in to-space; do copying ourselves.
@@ -1599,8 +1626,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
}
#undef BUSY
-void ParNewGeneration::ref_processor_init()
-{
+void ParNewGeneration::ref_processor_init() {
if (_ref_processor == NULL) {
// Allocate and initialize a reference processor
_ref_processor =
diff --git a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
index 487552bfb..6d3b25d95 100644
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,11 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
+#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "memory/defNewGeneration.hpp"
+#include "memory/padded.hpp"
#include "utilities/taskqueue.hpp"
class ChunkArray;
@@ -105,7 +108,7 @@ class ParScanThreadState {
#endif // TASKQUEUE_STATS
// Stats for promotion failure
- size_t _promotion_failure_size;
+ PromotionFailedInfo _promotion_failed_info;
// Timing numbers.
double _start;
@@ -180,13 +183,16 @@ class ParScanThreadState {
void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
// Promotion failure stats
- size_t promotion_failure_size() { return promotion_failure_size(); }
- void log_promotion_failure(size_t sz) {
- if (_promotion_failure_size == 0) {
- _promotion_failure_size = sz;
- }
+ void register_promotion_failure(size_t sz) {
+ _promotion_failed_info.register_copy_failure(sz);
+ }
+ PromotionFailedInfo& promotion_failed_info() {
+ return _promotion_failed_info;
}
- void print_and_clear_promotion_failure_size();
+ bool promotion_failed() {
+ return _promotion_failed_info.has_failed();
+ }
+ void print_promotion_failure_size();
#if TASKQUEUE_STATS
TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; }
@@ -337,6 +343,8 @@ class ParNewGeneration: public DefNewGeneration {
// word being overwritten with a self-forwarding-pointer.
void preserve_mark_if_necessary(oop obj, markOop m);
+ void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer);
+
protected:
bool _survivor_overflow;
diff --git a/src/share/vm/gc_implementation/parNew/parOopClosures.hpp b/src/share/vm/gc_implementation/parNew/parOopClosures.hpp
index 0d9d7761c..00b865f2a 100644
--- a/src/share/vm/gc_implementation/parNew/parOopClosures.hpp
+++ b/src/share/vm/gc_implementation/parNew/parOopClosures.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PAROOPCLOSURES_HPP
#include "memory/genOopClosures.hpp"
+#include "memory/padded.hpp"
// Closures for ParNewGeneration
diff --git a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp
index 0c12e98be..39f5835ff 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp
@@ -70,6 +70,17 @@ ASPSOldGen::ASPSOldGen(PSVirtualSpace* vs,
_virtual_space = vs;
}
+void ASPSOldGen::initialize_work(const char* perf_data_name, int level) {
+
+ PSOldGen::initialize_work(perf_data_name, level);
+
+ // The old gen can grow to gen_size_limit(). _reserve reflects only
+ // the current maximum that can be committed.
+ assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
+
+ initialize_performance_counters(perf_data_name, level);
+}
+
void ASPSOldGen::reset_after_change() {
_reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
(HeapWord*)virtual_space()->high_boundary());
diff --git a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp
index 3de4f9793..eaa1f2b55 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp
@@ -50,6 +50,8 @@ class ASPSOldGen : public PSOldGen {
size_t max_gen_size() { return _reserved.byte_size(); }
void set_gen_size_limit(size_t v) { _gen_size_limit = v; }
+ virtual void initialize_work(const char* perf_data_name, int level);
+
// After a shrink or expand reset the generation
void reset_after_change();
diff --git a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp
index b2134f10e..82ea39b52 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -565,11 +565,9 @@ bool CardTableExtension::resize_commit_uncommit(int changed_region,
if(new_start_aligned < new_end_for_commit) {
MemRegion new_committed =
MemRegion(new_start_aligned, new_end_for_commit);
- if (!os::commit_memory((char*)new_committed.start(),
- new_committed.byte_size())) {
- vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR,
- "card table expansion");
- }
+ os::commit_memory_or_exit((char*)new_committed.start(),
+ new_committed.byte_size(), !ExecMem,
+ "card table expansion");
}
result = true;
} else if (new_start_aligned > cur_committed.start()) {
diff --git a/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp
index b4b8c1ae9..e5637687e 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -68,9 +68,6 @@ class GenerationSizer : public TwoGenerationCollectorPolicy {
size_t min_old_gen_size() { return _min_gen1_size; }
size_t old_gen_size() { return _initial_gen1_size; }
size_t max_old_gen_size() { return _max_gen1_size; }
-
- size_t metaspace_size() { return MetaspaceSize; }
- size_t max_metaspace_size() { return MaxMetaspaceSize; }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GENERATIONSIZER_HPP
diff --git a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp
index 3d4cbedd8..0db70cdd7 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
-#include "gc_implementation/parallelScavenge/parMarkBitMap.inline.hpp"
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/os.hpp"
@@ -55,18 +54,18 @@ ParMarkBitMap::initialize(MemRegion covered_region)
const size_t raw_bytes = words * sizeof(idx_t);
const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
const size_t granularity = os::vm_allocation_granularity();
- const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
+ _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
MAX2(page_sz, granularity);
- ReservedSpace rs(bytes, rs_align, rs_align > 0);
+ ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
os::trace_page_sizes("par bitmap", raw_bytes, raw_bytes, page_sz,
rs.base(), rs.size());
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
_virtual_space = new PSVirtualSpace(rs, page_sz);
- if (_virtual_space != NULL && _virtual_space->expand_by(bytes)) {
+ if (_virtual_space != NULL && _virtual_space->expand_by(_reserved_byte_size)) {
_region_start = covered_region.start();
_region_size = covered_region.word_size();
idx_t* map = (idx_t*)_virtual_space->reserved_low_addr();
@@ -108,31 +107,6 @@ ParMarkBitMap::mark_obj(HeapWord* addr, size_t size)
return false;
}
-size_t
-ParMarkBitMap::live_words_in_range(HeapWord* beg_addr, HeapWord* end_addr) const
-{
- assert(beg_addr <= end_addr, "bad range");
-
- idx_t live_bits = 0;
-
- // The bitmap routines require the right boundary to be word-aligned.
- const idx_t end_bit = addr_to_bit(end_addr);
- const idx_t range_end = BitMap::word_align_up(end_bit);
-
- idx_t beg_bit = find_obj_beg(addr_to_bit(beg_addr), range_end);
- while (beg_bit < end_bit) {
- idx_t tmp_end = find_obj_end(beg_bit, range_end);
- if (tmp_end < end_bit) {
- live_bits += tmp_end - beg_bit + 1;
- beg_bit = find_obj_beg(tmp_end + 1, range_end);
- } else {
- live_bits += end_bit - beg_bit; // No + 1 here; end_bit is not counted.
- return bits_to_words(live_bits);
- }
- }
- return bits_to_words(live_bits);
-}
-
size_t ParMarkBitMap::live_words_in_range(HeapWord* beg_addr, oop end_obj) const
{
assert(beg_addr <= (HeapWord*)end_obj, "bad range");
@@ -244,13 +218,6 @@ ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure,
return complete;
}
-#ifndef PRODUCT
-void ParMarkBitMap::reset_counters()
-{
- _cas_tries = _cas_retries = _cas_by_another = 0;
-}
-#endif // #ifndef PRODUCT
-
#ifdef ASSERT
void ParMarkBitMap::verify_clear() const
{
diff --git a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp
index fdc0febfa..32f7eed82 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,11 @@
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARMARKBITMAP_HPP
#include "memory/memRegion.hpp"
-#include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
-#include "utilities/bitMap.inline.hpp"
+#include "oops/oop.hpp"
+#include "utilities/bitMap.hpp"
-class oopDesc;
class ParMarkBitMapClosure;
+class PSVirtualSpace;
class ParMarkBitMap: public CHeapObj<mtGC>
{
@@ -41,13 +41,11 @@ public:
enum IterationStatus { incomplete, complete, full, would_overflow };
inline ParMarkBitMap();
- inline ParMarkBitMap(MemRegion covered_region);
bool initialize(MemRegion covered_region);
// Atomically mark an object as live.
bool mark_obj(HeapWord* addr, size_t size);
inline bool mark_obj(oop obj, int size);
- inline bool mark_obj(oop obj);
// Return whether the specified begin or end bit is set.
inline bool is_obj_beg(idx_t bit) const;
@@ -77,11 +75,6 @@ public:
// Return the size in words of the object (a search is done for the end bit).
inline size_t obj_size(idx_t beg_bit) const;
inline size_t obj_size(HeapWord* addr) const;
- inline size_t obj_size(oop obj) const;
-
- // Synonyms for the above.
- size_t obj_size_in_words(oop obj) const { return obj_size((HeapWord*)obj); }
- size_t obj_size_in_words(HeapWord* addr) const { return obj_size(addr); }
// Apply live_closure to each live object that lies completely within the
// range [live_range_beg, live_range_end). This is used to iterate over the
@@ -124,15 +117,12 @@ public:
HeapWord* range_end,
HeapWord* dead_range_end) const;
- // Return the number of live words in the range [beg_addr, end_addr) due to
+ // Return the number of live words in the range [beg_addr, end_obj) due to
// objects that start in the range. If a live object extends onto the range,
// the caller must detect and account for any live words due to that object.
// If a live object extends beyond the end of the range, only the words within
- // the range are included in the result.
- size_t live_words_in_range(HeapWord* beg_addr, HeapWord* end_addr) const;
-
- // Same as the above, except the end of the range must be a live object, which
- // is the case when updating pointers. This allows a branch to be removed
+ // the range are included in the result. The end of the range must be a live object,
+ // which is the case when updating pointers. This allows a branch to be removed
// from inside the loop.
size_t live_words_in_range(HeapWord* beg_addr, oop end_obj) const;
@@ -141,6 +131,8 @@ public:
inline size_t region_size() const;
inline size_t size() const;
+ size_t reserved_byte_size() const { return _reserved_byte_size; }
+
// Convert a heap address to/from a bit index.
inline idx_t addr_to_bit(HeapWord* addr) const;
inline HeapWord* bit_to_addr(idx_t bit) const;
@@ -156,22 +148,11 @@ public:
// Clear a range of bits or the entire bitmap (both begin and end bits are
// cleared).
inline void clear_range(idx_t beg, idx_t end);
- inline void clear() { clear_range(0, size()); }
// Return the number of bits required to represent the specified number of
// HeapWords, or the specified region.
static inline idx_t bits_required(size_t words);
static inline idx_t bits_required(MemRegion covered_region);
- static inline idx_t words_required(MemRegion covered_region);
-
-#ifndef PRODUCT
- // CAS statistics.
- size_t cas_tries() { return _cas_tries; }
- size_t cas_retries() { return _cas_retries; }
- size_t cas_by_another() { return _cas_by_another; }
-
- void reset_counters();
-#endif // #ifndef PRODUCT
void print_on_error(outputStream* st) const {
st->print_cr("Marking Bits: (ParMarkBitMap*) " PTR_FORMAT, this);
@@ -197,28 +178,12 @@ private:
BitMap _beg_bits;
BitMap _end_bits;
PSVirtualSpace* _virtual_space;
-
-#ifndef PRODUCT
- size_t _cas_tries;
- size_t _cas_retries;
- size_t _cas_by_another;
-#endif // #ifndef PRODUCT
+ size_t _reserved_byte_size;
};
inline ParMarkBitMap::ParMarkBitMap():
- _beg_bits(),
- _end_bits()
-{
- _region_start = 0;
- _virtual_space = 0;
-}
-
-inline ParMarkBitMap::ParMarkBitMap(MemRegion covered_region):
- _beg_bits(),
- _end_bits()
-{
- initialize(covered_region);
-}
+ _beg_bits(), _end_bits(), _region_start(NULL), _region_size(0), _virtual_space(NULL), _reserved_byte_size(0)
+{ }
inline void ParMarkBitMap::clear_range(idx_t beg, idx_t end)
{
@@ -240,12 +205,6 @@ ParMarkBitMap::bits_required(MemRegion covered_region)
return bits_required(covered_region.word_size());
}
-inline ParMarkBitMap::idx_t
-ParMarkBitMap::words_required(MemRegion covered_region)
-{
- return bits_required(covered_region) / BitsPerWord;
-}
-
inline HeapWord*
ParMarkBitMap::region_start() const
{
@@ -350,11 +309,6 @@ inline size_t ParMarkBitMap::obj_size(HeapWord* addr) const
return obj_size(addr_to_bit(addr));
}
-inline size_t ParMarkBitMap::obj_size(oop obj) const
-{
- return obj_size((HeapWord*)obj);
-}
-
inline ParMarkBitMap::IterationStatus
ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure,
HeapWord* range_beg,
@@ -435,8 +389,10 @@ inline void ParMarkBitMap::verify_bit(idx_t bit) const {
inline void ParMarkBitMap::verify_addr(HeapWord* addr) const {
// Allow one past the last valid address; useful for loop bounds.
- assert(addr >= region_start(), "addr too small");
- assert(addr <= region_start() + region_size(), "addr too big");
+ assert(addr >= region_start(),
+ err_msg("addr too small, addr: " PTR_FORMAT " region start: " PTR_FORMAT, addr, region_start()));
+ assert(addr <= region_end(),
+ err_msg("addr too big, addr: " PTR_FORMAT " region end: " PTR_FORMAT, addr, region_end()));
}
#endif // #ifdef ASSERT
diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
index 9ea2fa856..e5d5229d3 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,8 @@
#include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
#include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcWhen.hpp"
#include "memory/gcLocker.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
@@ -214,6 +216,7 @@ void ParallelScavengeHeap::update_counters() {
young_gen()->update_counters();
old_gen()->update_counters();
MetaspaceCounters::update_performance_counters();
+ CompressedClassSpaceCounters::update_performance_counters();
}
size_t ParallelScavengeHeap::capacity() const {
@@ -642,6 +645,29 @@ void ParallelScavengeHeap::prepare_for_verify() {
ensure_parsability(false); // no need to retire TLABs for verification
}
+PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
+ PSOldGen* old = old_gen();
+ HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
+ VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
+ SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
+
+ PSYoungGen* young = young_gen();
+ VirtualSpaceSummary young_summary(young->reserved().start(),
+ (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
+
+ MutableSpace* eden = young_gen()->eden_space();
+ SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
+
+ MutableSpace* from = young_gen()->from_space();
+ SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
+
+ MutableSpace* to = young_gen()->to_space();
+ SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
+
+ VirtualSpaceSummary heap_summary = create_heap_space_summary();
+ return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
+}
+
void ParallelScavengeHeap::print_on(outputStream* st) const {
young_gen()->print_on(st);
old_gen()->print_on(st);
@@ -706,6 +732,12 @@ void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
}
}
+void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
+ const PSHeapSummary& heap_summary = create_ps_heap_summary();
+ const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
+ gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary);
+}
+
ParallelScavengeHeap* ParallelScavengeHeap::heap() {
assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
index c0933fe16..4e458efa9 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,14 +30,18 @@
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
#include "gc_implementation/shared/gcPolicyCounters.hpp"
+#include "gc_implementation/shared/gcWhen.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "utilities/ostream.hpp"
class AdjoiningGenerations;
+class CollectorPolicy;
+class GCHeapSummary;
class GCTaskManager;
-class PSAdaptiveSizePolicy;
class GenerationSizer;
class CollectorPolicy;
+class PSAdaptiveSizePolicy;
+class PSHeapSummary;
class ParallelScavengeHeap : public CollectedHeap {
friend class VMStructs;
@@ -65,6 +69,8 @@ class ParallelScavengeHeap : public CollectedHeap {
static GCTaskManager* _gc_task_manager; // The task manager.
+ void trace_heap(GCWhen::Type when, GCTracer* tracer);
+
protected:
static inline size_t total_invocations();
HeapWord* allocate_new_tlab(size_t size);
@@ -80,6 +86,11 @@ class ParallelScavengeHeap : public CollectedHeap {
set_alignment(_old_gen_alignment, intra_heap_alignment());
}
+ // Return the (conservative) maximum heap alignment
+ static size_t conservative_max_heap_alignment() {
+ return intra_heap_alignment();
+ }
+
// For use by VM operations
enum CollectionType {
Scavenge,
@@ -116,7 +127,7 @@ class ParallelScavengeHeap : public CollectedHeap {
// The alignment used for eden and survivors within the young gen
// and for boundary between young gen and old gen.
- size_t intra_heap_alignment() const { return 64 * K; }
+ static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; }
size_t capacity() const;
size_t used() const;
@@ -219,6 +230,7 @@ class ParallelScavengeHeap : public CollectedHeap {
jlong millis_since_last_gc();
void prepare_for_verify();
+ PSHeapSummary create_ps_heap_summary();
virtual void print_on(outputStream* st) const;
virtual void print_on_error(outputStream* st) const;
virtual void print_gc_threads_on(outputStream* st) const;
diff --git a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
index 3834722fa..14be13a26 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,8 @@
#include "code/codeCache.hpp"
#include "gc_implementation/parallelScavenge/pcTasks.hpp"
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/universe.hpp"
#include "oops/objArrayKlass.inline.hpp"
@@ -48,8 +50,8 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
ResourceMark rm;
- NOT_PRODUCT(TraceTime tm("ThreadRootsMarkingTask",
- PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+ NOT_PRODUCT(GCTraceTime tm("ThreadRootsMarkingTask",
+ PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
@@ -77,8 +79,8 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
assert(Universe::heap()->is_gc_active(), "called outside gc");
- NOT_PRODUCT(TraceTime tm("MarkFromRootsTask",
- PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+ NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask",
+ PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
@@ -120,6 +122,9 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
case system_dictionary:
SystemDictionary::always_strong_oops_do(&mark_and_push_closure);
+ break;
+
+ case class_loader_data:
ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, &follow_klass_closure, true);
break;
@@ -145,8 +150,8 @@ void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
{
assert(Universe::heap()->is_gc_active(), "called outside gc");
- NOT_PRODUCT(TraceTime tm("RefProcTask",
- PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+ NOT_PRODUCT(GCTraceTime tm("RefProcTask",
+ PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
@@ -201,8 +206,8 @@ StealMarkingTask::StealMarkingTask(ParallelTaskTerminator* t) :
void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
assert(Universe::heap()->is_gc_active(), "called outside gc");
- NOT_PRODUCT(TraceTime tm("StealMarkingTask",
- PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+ NOT_PRODUCT(GCTraceTime tm("StealMarkingTask",
+ PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
@@ -213,7 +218,7 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
int random_seed = 17;
do {
while (ParCompactionManager::steal_objarray(which, &random_seed, task)) {
- ObjArrayKlass* const k = (ObjArrayKlass*)task.obj()->klass();
+ ObjArrayKlass* k = (ObjArrayKlass*)task.obj()->klass();
k->oop_follow_contents(cm, task.obj(), task.index());
cm->follow_marking_stacks();
}
@@ -234,8 +239,8 @@ StealRegionCompactionTask::StealRegionCompactionTask(ParallelTaskTerminator* t):
void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
assert(Universe::heap()->is_gc_active(), "called outside gc");
- NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask",
- PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+ NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask",
+ PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
@@ -301,8 +306,8 @@ UpdateDensePrefixTask::UpdateDensePrefixTask(
void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
- NOT_PRODUCT(TraceTime tm("UpdateDensePrefixTask",
- PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+ NOT_PRODUCT(GCTraceTime tm("UpdateDensePrefixTask",
+ PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
@@ -316,8 +321,8 @@ void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
assert(Universe::heap()->is_gc_active(), "called outside gc");
- NOT_PRODUCT(TraceTime tm("DrainStacksCompactionTask",
- PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+ NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask",
+ PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
diff --git a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp
index c79bc7c3a..f966ccf23 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp
@@ -98,7 +98,8 @@ class MarkFromRootsTask : public GCTask {
management = 6,
jvmti = 7,
system_dictionary = 8,
- code_cache = 9
+ class_loader_data = 9,
+ code_cache = 10
};
private:
RootType _root_type;
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp
index 3b24ed886..7fba4f358 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -194,26 +194,38 @@ void PSAdaptiveSizePolicy::clear_generation_free_space_flags() {
// If this is not a full GC, only test and modify the young generation.
-void PSAdaptiveSizePolicy::compute_generation_free_space(
+void PSAdaptiveSizePolicy::compute_generations_free_space(
size_t young_live,
size_t eden_live,
size_t old_live,
size_t cur_eden,
size_t max_old_gen_size,
size_t max_eden_size,
- bool is_full_gc,
- GCCause::Cause gc_cause,
- CollectorPolicy* collector_policy) {
+ bool is_full_gc) {
+ compute_eden_space_size(young_live,
+ eden_live,
+ cur_eden,
+ max_eden_size,
+ is_full_gc);
+
+ compute_old_gen_free_space(old_live,
+ cur_eden,
+ max_old_gen_size,
+ is_full_gc);
+}
+
+void PSAdaptiveSizePolicy::compute_eden_space_size(
+ size_t young_live,
+ size_t eden_live,
+ size_t cur_eden,
+ size_t max_eden_size,
+ bool is_full_gc) {
// Update statistics
// Time statistics are updated as we go, update footprint stats here
_avg_base_footprint->sample(BaseFootPrintEstimate);
avg_young_live()->sample(young_live);
avg_eden_live()->sample(eden_live);
- if (is_full_gc) {
- // old_live is only accurate after a full gc
- avg_old_live()->sample(old_live);
- }
// This code used to return if the policy was not ready , i.e.,
// policy_is_ready() returning false. The intent was that
@@ -242,22 +254,11 @@ void PSAdaptiveSizePolicy::compute_generation_free_space(
// some case but is dangerous.
size_t desired_eden_size = cur_eden;
-#ifdef ASSERT
- size_t original_promo_size = desired_promo_size;
- size_t original_eden_size = desired_eden_size;
-#endif
-
// Cache some values. There's a bit of work getting these, so
// we might save a little time.
const double major_cost = major_gc_cost();
const double minor_cost = minor_gc_cost();
- // Used for diagnostics
- clear_generation_free_space_flags();
-
- // Limits on our growth
- size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());
-
// This method sets the desired eden size. That plus the
// desired survivor space sizes sets the desired young generation
// size. This methods does not know what the desired survivor
@@ -268,10 +269,6 @@ void PSAdaptiveSizePolicy::compute_generation_free_space(
// subtracted out.
size_t eden_limit = max_eden_size;
- // But don't force a promo size below the current promo size. Otherwise,
- // the promo size will shrink for no good reason.
- promo_limit = MAX2(promo_limit, _promo_size);
-
const double gc_cost_limit = GCTimeLimit/100.0;
// Which way should we go?
@@ -286,7 +283,7 @@ void PSAdaptiveSizePolicy::compute_generation_free_space(
// adjust down the total heap size. Adjust down the larger of the
// generations.
- // Add some checks for a threshhold for a change. For example,
+ // Add some checks for a threshold for a change. For example,
// a change less than the necessary alignment is probably not worth
// attempting.
@@ -298,11 +295,11 @@ void PSAdaptiveSizePolicy::compute_generation_free_space(
//
// Make changes only to affect one of the pauses (the larger)
// at a time.
- adjust_for_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size);
+ adjust_eden_for_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size);
} else if (_avg_minor_pause->padded_average() > gc_minor_pause_goal_sec()) {
// Adjust only for the minor pause time goal
- adjust_for_minor_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size);
+ adjust_eden_for_minor_pause_time(is_full_gc, &desired_eden_size);
} else if(adjusted_mutator_cost() < _throughput_goal) {
// This branch used to require that (mutator_cost() > 0.0 in 1.4.2.
@@ -316,7 +313,7 @@ void PSAdaptiveSizePolicy::compute_generation_free_space(
assert(major_cost >= 0.0, "major cost is < 0.0");
assert(minor_cost >= 0.0, "minor cost is < 0.0");
// Try to reduce the GC times.
- adjust_for_throughput(is_full_gc, &desired_promo_size, &desired_eden_size);
+ adjust_eden_for_throughput(is_full_gc, &desired_eden_size);
} else {
@@ -328,35 +325,16 @@ void PSAdaptiveSizePolicy::compute_generation_free_space(
avg_major_gc_cost()->average() >= 0.0 &&
avg_minor_gc_cost()->average() >= 0.0) {
size_t desired_sum = desired_eden_size + desired_promo_size;
- desired_eden_size = adjust_eden_for_footprint(desired_eden_size,
- desired_sum);
- if (is_full_gc) {
- set_decide_at_full_gc(decide_at_full_gc_true);
- desired_promo_size = adjust_promo_for_footprint(desired_promo_size,
- desired_sum);
- }
+ desired_eden_size = adjust_eden_for_footprint(desired_eden_size, desired_sum);
}
}
// Note we make the same tests as in the code block below; the code
// seems a little easier to read with the printing in another block.
if (PrintAdaptiveSizePolicy) {
- if (desired_promo_size > promo_limit) {
- // "free_in_old_gen" was the original value for used for promo_limit
- size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
- gclog_or_tty->print_cr(
- "PSAdaptiveSizePolicy::compute_generation_free_space limits:"
- " desired_promo_size: " SIZE_FORMAT
- " promo_limit: " SIZE_FORMAT
- " free_in_old_gen: " SIZE_FORMAT
- " max_old_gen_size: " SIZE_FORMAT
- " avg_old_live: " SIZE_FORMAT,
- desired_promo_size, promo_limit, free_in_old_gen,
- max_old_gen_size, (size_t) avg_old_live()->average());
- }
if (desired_eden_size > eden_limit) {
gclog_or_tty->print_cr(
- "AdaptiveSizePolicy::compute_generation_free_space limits:"
+ "PSAdaptiveSizePolicy::compute_eden_space_size limits:"
" desired_eden_size: " SIZE_FORMAT
" old_eden_size: " SIZE_FORMAT
" eden_limit: " SIZE_FORMAT
@@ -368,7 +346,7 @@ void PSAdaptiveSizePolicy::compute_generation_free_space(
}
if (gc_cost() > gc_cost_limit) {
gclog_or_tty->print_cr(
- "AdaptiveSizePolicy::compute_generation_free_space: gc time limit"
+ "PSAdaptiveSizePolicy::compute_eden_space_size: gc time limit"
" gc_cost: %f "
" GCTimeLimit: %d",
gc_cost(), GCTimeLimit);
@@ -379,27 +357,8 @@ void PSAdaptiveSizePolicy::compute_generation_free_space(
const size_t alignment = _intra_generation_alignment;
desired_eden_size = align_size_up(desired_eden_size, alignment);
desired_eden_size = MAX2(desired_eden_size, alignment);
- desired_promo_size = align_size_up(desired_promo_size, alignment);
- desired_promo_size = MAX2(desired_promo_size, alignment);
eden_limit = align_size_down(eden_limit, alignment);
- promo_limit = align_size_down(promo_limit, alignment);
-
- // Is too much time being spent in GC?
- // Is the heap trying to grow beyond it's limits?
-
- const size_t free_in_old_gen =
- (size_t)(max_old_gen_size - avg_old_live()->average());
- if (desired_promo_size > free_in_old_gen && desired_eden_size > eden_limit) {
- check_gc_overhead_limit(young_live,
- eden_live,
- max_old_gen_size,
- max_eden_size,
- is_full_gc,
- gc_cause,
- collector_policy);
- }
-
// And one last limit check, now that we've aligned things.
if (desired_eden_size > eden_limit) {
@@ -410,13 +369,11 @@ void PSAdaptiveSizePolicy::compute_generation_free_space(
// into the old gen.
desired_eden_size = MAX2(eden_limit, cur_eden);
}
- desired_promo_size = MIN2(desired_promo_size, promo_limit);
-
if (PrintAdaptiveSizePolicy) {
// Timing stats
gclog_or_tty->print(
- "PSAdaptiveSizePolicy::compute_generation_free_space: costs"
+ "PSAdaptiveSizePolicy::compute_eden_space_size: costs"
" minor_time: %f"
" major_cost: %f"
" mutator_cost: %f"
@@ -453,20 +410,215 @@ void PSAdaptiveSizePolicy::compute_generation_free_space(
}
// And finally, our old and new sizes.
- gclog_or_tty->print(" old_promo_size: " SIZE_FORMAT
- " old_eden_size: " SIZE_FORMAT
- " desired_promo_size: " SIZE_FORMAT
+ gclog_or_tty->print(" old_eden_size: " SIZE_FORMAT
" desired_eden_size: " SIZE_FORMAT,
- _promo_size, _eden_size,
- desired_promo_size, desired_eden_size);
+ _eden_size, desired_eden_size);
gclog_or_tty->cr();
}
- decay_supplemental_growth(is_full_gc);
+ set_eden_size(desired_eden_size);
+}
+
+void PSAdaptiveSizePolicy::compute_old_gen_free_space(
+ size_t old_live,
+ size_t cur_eden,
+ size_t max_old_gen_size,
+ bool is_full_gc) {
+
+ // Update statistics
+ // Time statistics are updated as we go, update footprint stats here
+ if (is_full_gc) {
+ // old_live is only accurate after a full gc
+ avg_old_live()->sample(old_live);
+ }
+
+ // This code used to return if the policy was not ready , i.e.,
+ // policy_is_ready() returning false. The intent was that
+ // decisions below needed major collection times and so could
+ // not be made before two major collections. A consequence was
+ // adjustments to the young generation were not done until after
+ // two major collections even if the minor collections times
+ // exceeded the requested goals. Now let the young generation
+ // adjust for the minor collection times. Major collection times
+ // will be zero for the first collection and will naturally be
+ // ignored. Tenured generation adjustments are only made at the
+ // full collections so until the second major collection has
+ // been reached, no tenured generation adjustments will be made.
+
+ // Until we know better, desired promotion size uses the last calculation
+ size_t desired_promo_size = _promo_size;
+
+ // Start eden at the current value. The desired value that is stored
+ // in _eden_size is not bounded by constraints of the heap and can
+ // run away.
+ //
+ // As expected setting desired_eden_size to the current
+ // value of desired_eden_size as a starting point
+ // caused desired_eden_size to grow way too large and caused
+ // an overflow down stream. It may have improved performance in
+ // some case but is dangerous.
+ size_t desired_eden_size = cur_eden;
+
+ // Cache some values. There's a bit of work getting these, so
+ // we might save a little time.
+ const double major_cost = major_gc_cost();
+ const double minor_cost = minor_gc_cost();
+
+ // Limits on our growth
+ size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());
+
+ // But don't force a promo size below the current promo size. Otherwise,
+ // the promo size will shrink for no good reason.
+ promo_limit = MAX2(promo_limit, _promo_size);
+
+ const double gc_cost_limit = GCTimeLimit/100.0;
+
+ // Which way should we go?
+ // if pause requirement is not met
+ // adjust size of any generation with average paus exceeding
+ // the pause limit. Adjust one pause at a time (the larger)
+ // and only make adjustments for the major pause at full collections.
+ // else if throughput requirement not met
+ // adjust the size of the generation with larger gc time. Only
+ // adjust one generation at a time.
+ // else
+ // adjust down the total heap size. Adjust down the larger of the
+ // generations.
+
+ // Add some checks for a threshhold for a change. For example,
+ // a change less than the necessary alignment is probably not worth
+ // attempting.
+
+ if ((_avg_minor_pause->padded_average() > gc_pause_goal_sec()) ||
+ (_avg_major_pause->padded_average() > gc_pause_goal_sec())) {
+ //
+ // Check pauses
+ //
+ // Make changes only to affect one of the pauses (the larger)
+ // at a time.
+ if (is_full_gc) {
+ set_decide_at_full_gc(decide_at_full_gc_true);
+ adjust_promo_for_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size);
+ }
+ } else if (_avg_minor_pause->padded_average() > gc_minor_pause_goal_sec()) {
+ // Adjust only for the minor pause time goal
+ adjust_promo_for_minor_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size);
+ } else if(adjusted_mutator_cost() < _throughput_goal) {
+ // This branch used to require that (mutator_cost() > 0.0 in 1.4.2.
+ // This sometimes resulted in skipping to the minimize footprint
+ // code. Change this to try and reduce GC time if mutator time is
+ // negative for whatever reason. Or for future consideration,
+ // bail out of the code if mutator time is negative.
+ //
+ // Throughput
+ //
+ assert(major_cost >= 0.0, "major cost is < 0.0");
+ assert(minor_cost >= 0.0, "minor cost is < 0.0");
+ // Try to reduce the GC times.
+ if (is_full_gc) {
+ set_decide_at_full_gc(decide_at_full_gc_true);
+ adjust_promo_for_throughput(is_full_gc, &desired_promo_size);
+ }
+ } else {
+
+ // Be conservative about reducing the footprint.
+ // Do a minimum number of major collections first.
+ // Have reasonable averages for major and minor collections costs.
+ if (UseAdaptiveSizePolicyFootprintGoal &&
+ young_gen_policy_is_ready() &&
+ avg_major_gc_cost()->average() >= 0.0 &&
+ avg_minor_gc_cost()->average() >= 0.0) {
+ if (is_full_gc) {
+ set_decide_at_full_gc(decide_at_full_gc_true);
+ size_t desired_sum = desired_eden_size + desired_promo_size;
+ desired_promo_size = adjust_promo_for_footprint(desired_promo_size, desired_sum);
+ }
+ }
+ }
+
+ // Note we make the same tests as in the code block below; the code
+ // seems a little easier to read with the printing in another block.
+ if (PrintAdaptiveSizePolicy) {
+ if (desired_promo_size > promo_limit) {
+ // "free_in_old_gen" was the original value for used for promo_limit
+ size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
+ gclog_or_tty->print_cr(
+ "PSAdaptiveSizePolicy::compute_old_gen_free_space limits:"
+ " desired_promo_size: " SIZE_FORMAT
+ " promo_limit: " SIZE_FORMAT
+ " free_in_old_gen: " SIZE_FORMAT
+ " max_old_gen_size: " SIZE_FORMAT
+ " avg_old_live: " SIZE_FORMAT,
+ desired_promo_size, promo_limit, free_in_old_gen,
+ max_old_gen_size, (size_t) avg_old_live()->average());
+ }
+ if (gc_cost() > gc_cost_limit) {
+ gclog_or_tty->print_cr(
+ "PSAdaptiveSizePolicy::compute_old_gen_free_space: gc time limit"
+ " gc_cost: %f "
+ " GCTimeLimit: %d",
+ gc_cost(), GCTimeLimit);
+ }
+ }
+
+ // Align everything and make a final limit check
+ const size_t alignment = _intra_generation_alignment;
+ desired_promo_size = align_size_up(desired_promo_size, alignment);
+ desired_promo_size = MAX2(desired_promo_size, alignment);
+
+ promo_limit = align_size_down(promo_limit, alignment);
+
+ // And one last limit check, now that we've aligned things.
+ desired_promo_size = MIN2(desired_promo_size, promo_limit);
+
+ if (PrintAdaptiveSizePolicy) {
+ // Timing stats
+ gclog_or_tty->print(
+ "PSAdaptiveSizePolicy::compute_old_gen_free_space: costs"
+ " minor_time: %f"
+ " major_cost: %f"
+ " mutator_cost: %f"
+ " throughput_goal: %f",
+ minor_gc_cost(), major_gc_cost(), mutator_cost(),
+ _throughput_goal);
+
+ // We give more details if Verbose is set
+ if (Verbose) {
+ gclog_or_tty->print( " minor_pause: %f"
+ " major_pause: %f"
+ " minor_interval: %f"
+ " major_interval: %f"
+ " pause_goal: %f",
+ _avg_minor_pause->padded_average(),
+ _avg_major_pause->padded_average(),
+ _avg_minor_interval->average(),
+ _avg_major_interval->average(),
+ gc_pause_goal_sec());
+ }
+
+ // Footprint stats
+ gclog_or_tty->print( " live_space: " SIZE_FORMAT
+ " free_space: " SIZE_FORMAT,
+ live_space(), free_space());
+ // More detail
+ if (Verbose) {
+ gclog_or_tty->print( " base_footprint: " SIZE_FORMAT
+ " avg_young_live: " SIZE_FORMAT
+ " avg_old_live: " SIZE_FORMAT,
+ (size_t)_avg_base_footprint->average(),
+ (size_t)avg_young_live()->average(),
+ (size_t)avg_old_live()->average());
+ }
+
+ // And finally, our old and new sizes.
+ gclog_or_tty->print(" old_promo_size: " SIZE_FORMAT
+ " desired_promo_size: " SIZE_FORMAT,
+ _promo_size, desired_promo_size);
+ gclog_or_tty->cr();
+ }
set_promo_size(desired_promo_size);
- set_eden_size(desired_eden_size);
-};
+}
void PSAdaptiveSizePolicy::decay_supplemental_growth(bool is_full_gc) {
// Decay the supplemental increment? Decay the supplement growth
@@ -490,42 +642,24 @@ void PSAdaptiveSizePolicy::decay_supplemental_growth(bool is_full_gc) {
}
}
-void PSAdaptiveSizePolicy::adjust_for_minor_pause_time(bool is_full_gc,
+void PSAdaptiveSizePolicy::adjust_promo_for_minor_pause_time(bool is_full_gc,
size_t* desired_promo_size_ptr, size_t* desired_eden_size_ptr) {
- // Adjust the young generation size to reduce pause time of
- // of collections.
- //
- // The AdaptiveSizePolicyInitializingSteps test is not used
- // here. It has not seemed to be needed but perhaps should
- // be added for consistency.
- if (minor_pause_young_estimator()->decrement_will_decrease()) {
- // reduce eden size
- set_change_young_gen_for_min_pauses(
- decrease_young_gen_for_min_pauses_true);
- *desired_eden_size_ptr = *desired_eden_size_ptr -
- eden_decrement_aligned_down(*desired_eden_size_ptr);
- } else {
- // EXPERIMENTAL ADJUSTMENT
- // Only record that the estimator indicated such an action.
- // *desired_eden_size_ptr = *desired_eden_size_ptr + eden_heap_delta;
- set_change_young_gen_for_min_pauses(
- increase_young_gen_for_min_pauses_true);
- }
if (PSAdjustTenuredGenForMinorPause) {
+ if (is_full_gc) {
+ set_decide_at_full_gc(decide_at_full_gc_true);
+ }
// If the desired eden size is as small as it will get,
// try to adjust the old gen size.
if (*desired_eden_size_ptr <= _intra_generation_alignment) {
// Vary the old gen size to reduce the young gen pause. This
// may not be a good idea. This is just a test.
if (minor_pause_old_estimator()->decrement_will_decrease()) {
- set_change_old_gen_for_min_pauses(
- decrease_old_gen_for_min_pauses_true);
+ set_change_old_gen_for_min_pauses(decrease_old_gen_for_min_pauses_true);
*desired_promo_size_ptr =
_promo_size - promo_decrement_aligned_down(*desired_promo_size_ptr);
} else {
- set_change_old_gen_for_min_pauses(
- increase_old_gen_for_min_pauses_true);
+ set_change_old_gen_for_min_pauses(increase_old_gen_for_min_pauses_true);
size_t promo_heap_delta =
promo_increment_with_supplement_aligned_up(*desired_promo_size_ptr);
if ((*desired_promo_size_ptr + promo_heap_delta) >
@@ -538,23 +672,41 @@ void PSAdaptiveSizePolicy::adjust_for_minor_pause_time(bool is_full_gc,
}
}
-void PSAdaptiveSizePolicy::adjust_for_pause_time(bool is_full_gc,
+void PSAdaptiveSizePolicy::adjust_eden_for_minor_pause_time(bool is_full_gc,
+ size_t* desired_eden_size_ptr) {
+
+ // Adjust the young generation size to reduce pause time of
+ // of collections.
+ //
+ // The AdaptiveSizePolicyInitializingSteps test is not used
+ // here. It has not seemed to be needed but perhaps should
+ // be added for consistency.
+ if (minor_pause_young_estimator()->decrement_will_decrease()) {
+ // reduce eden size
+ set_change_young_gen_for_min_pauses(
+ decrease_young_gen_for_min_pauses_true);
+ *desired_eden_size_ptr = *desired_eden_size_ptr -
+ eden_decrement_aligned_down(*desired_eden_size_ptr);
+ } else {
+ // EXPERIMENTAL ADJUSTMENT
+ // Only record that the estimator indicated such an action.
+ // *desired_eden_size_ptr = *desired_eden_size_ptr + eden_heap_delta;
+ set_change_young_gen_for_min_pauses(
+ increase_young_gen_for_min_pauses_true);
+ }
+}
+
+void PSAdaptiveSizePolicy::adjust_promo_for_pause_time(bool is_full_gc,
size_t* desired_promo_size_ptr,
size_t* desired_eden_size_ptr) {
size_t promo_heap_delta = 0;
- size_t eden_heap_delta = 0;
- // Add some checks for a threshhold for a change. For example,
+ // Add some checks for a threshold for a change. For example,
// a change less than the required alignment is probably not worth
// attempting.
- if (is_full_gc) {
- set_decide_at_full_gc(decide_at_full_gc_true);
- }
if (_avg_minor_pause->padded_average() > _avg_major_pause->padded_average()) {
- adjust_for_minor_pause_time(is_full_gc,
- desired_promo_size_ptr,
- desired_eden_size_ptr);
+ adjust_promo_for_minor_pause_time(is_full_gc, desired_promo_size_ptr, desired_eden_size_ptr);
// major pause adjustments
} else if (is_full_gc) {
// Adjust for the major pause time only at full gc's because the
@@ -573,6 +725,33 @@ void PSAdaptiveSizePolicy::adjust_for_pause_time(bool is_full_gc,
// promo_increment_aligned_up(*desired_promo_size_ptr);
set_change_old_gen_for_maj_pauses(increase_old_gen_for_maj_pauses_true);
}
+ }
+
+ if (PrintAdaptiveSizePolicy && Verbose) {
+ gclog_or_tty->print_cr(
+ "PSAdaptiveSizePolicy::adjust_promo_for_pause_time "
+ "adjusting gen sizes for major pause (avg %f goal %f). "
+ "desired_promo_size " SIZE_FORMAT " promo delta " SIZE_FORMAT,
+ _avg_major_pause->average(), gc_pause_goal_sec(),
+ *desired_promo_size_ptr, promo_heap_delta);
+ }
+}
+
+void PSAdaptiveSizePolicy::adjust_eden_for_pause_time(bool is_full_gc,
+ size_t* desired_promo_size_ptr,
+ size_t* desired_eden_size_ptr) {
+
+ size_t eden_heap_delta = 0;
+ // Add some checks for a threshold for a change. For example,
+ // a change less than the required alignment is probably not worth
+ // attempting.
+ if (_avg_minor_pause->padded_average() > _avg_major_pause->padded_average()) {
+ adjust_eden_for_minor_pause_time(is_full_gc,
+ desired_eden_size_ptr);
+ // major pause adjustments
+ } else if (is_full_gc) {
+ // Adjust for the major pause time only at full gc's because the
+ // affects of a change can only be seen at full gc's.
if (PSAdjustYoungGenForMajorPause) {
// If the promo size is at the minimum (i.e., the old gen
// size will not actually decrease), consider changing the
@@ -607,43 +786,35 @@ void PSAdaptiveSizePolicy::adjust_for_pause_time(bool is_full_gc,
if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr(
- "AdaptiveSizePolicy::compute_generation_free_space "
+ "PSAdaptiveSizePolicy::adjust_eden_for_pause_time "
"adjusting gen sizes for major pause (avg %f goal %f). "
- "desired_promo_size " SIZE_FORMAT "desired_eden_size "
- SIZE_FORMAT
- " promo delta " SIZE_FORMAT " eden delta " SIZE_FORMAT,
+ "desired_eden_size " SIZE_FORMAT " eden delta " SIZE_FORMAT,
_avg_major_pause->average(), gc_pause_goal_sec(),
- *desired_promo_size_ptr, *desired_eden_size_ptr,
- promo_heap_delta, eden_heap_delta);
+ *desired_eden_size_ptr, eden_heap_delta);
}
}
-void PSAdaptiveSizePolicy::adjust_for_throughput(bool is_full_gc,
- size_t* desired_promo_size_ptr,
- size_t* desired_eden_size_ptr) {
+void PSAdaptiveSizePolicy::adjust_promo_for_throughput(bool is_full_gc,
+ size_t* desired_promo_size_ptr) {
- // Add some checks for a threshhold for a change. For example,
+ // Add some checks for a threshold for a change. For example,
// a change less than the required alignment is probably not worth
// attempting.
- if (is_full_gc) {
- set_decide_at_full_gc(decide_at_full_gc_true);
- }
if ((gc_cost() + mutator_cost()) == 0.0) {
return;
}
if (PrintAdaptiveSizePolicy && Verbose) {
- gclog_or_tty->print("\nPSAdaptiveSizePolicy::adjust_for_throughput("
- "is_full: %d, promo: " SIZE_FORMAT ", cur_eden: " SIZE_FORMAT "): ",
- is_full_gc, *desired_promo_size_ptr, *desired_eden_size_ptr);
+ gclog_or_tty->print("\nPSAdaptiveSizePolicy::adjust_promo_for_throughput("
+ "is_full: %d, promo: " SIZE_FORMAT "): ",
+ is_full_gc, *desired_promo_size_ptr);
gclog_or_tty->print_cr("mutator_cost %f major_gc_cost %f "
"minor_gc_cost %f", mutator_cost(), major_gc_cost(), minor_gc_cost());
}
// Tenured generation
if (is_full_gc) {
-
// Calculate the change to use for the tenured gen.
size_t scaled_promo_heap_delta = 0;
// Can the increment to the generation be scaled?
@@ -720,6 +891,26 @@ void PSAdaptiveSizePolicy::adjust_for_throughput(bool is_full_gc,
*desired_promo_size_ptr, scaled_promo_heap_delta);
}
}
+}
+
+void PSAdaptiveSizePolicy::adjust_eden_for_throughput(bool is_full_gc,
+ size_t* desired_eden_size_ptr) {
+
+ // Add some checks for a threshold for a change. For example,
+ // a change less than the required alignment is probably not worth
+ // attempting.
+
+ if ((gc_cost() + mutator_cost()) == 0.0) {
+ return;
+ }
+
+ if (PrintAdaptiveSizePolicy && Verbose) {
+ gclog_or_tty->print("\nPSAdaptiveSizePolicy::adjust_eden_for_throughput("
+ "is_full: %d, cur_eden: " SIZE_FORMAT "): ",
+ is_full_gc, *desired_eden_size_ptr);
+ gclog_or_tty->print_cr("mutator_cost %f major_gc_cost %f "
+ "minor_gc_cost %f", mutator_cost(), major_gc_cost(), minor_gc_cost());
+ }
// Young generation
size_t scaled_eden_heap_delta = 0;
@@ -810,7 +1001,7 @@ size_t PSAdaptiveSizePolicy::adjust_promo_for_footprint(
if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr(
- "AdaptiveSizePolicy::compute_generation_free_space "
+ "AdaptiveSizePolicy::adjust_promo_for_footprint "
"adjusting tenured gen for footprint. "
"starting promo size " SIZE_FORMAT
" reduced promo size " SIZE_FORMAT,
@@ -834,7 +1025,7 @@ size_t PSAdaptiveSizePolicy::adjust_eden_for_footprint(
if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr(
- "AdaptiveSizePolicy::compute_generation_free_space "
+ "AdaptiveSizePolicy::adjust_eden_for_footprint "
"adjusting eden for footprint. "
" starting eden size " SIZE_FORMAT
" reduced eden size " SIZE_FORMAT
@@ -1059,14 +1250,13 @@ uint PSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
avg_promoted()->deviation());
}
- gclog_or_tty->print( " avg_promoted_padded_avg: %f"
+ gclog_or_tty->print_cr( " avg_promoted_padded_avg: %f"
" avg_pretenured_padded_avg: %f"
" tenuring_thresh: %d"
" target_size: " SIZE_FORMAT,
avg_promoted()->padded_average(),
_avg_pretenured->padded_average(),
tenuring_threshold, target_size);
- tty->cr();
}
set_survivor_size(target_size);
@@ -1088,8 +1278,8 @@ void PSAdaptiveSizePolicy::update_averages(bool is_survivor_overflow,
avg_promoted()->sample(promoted + _avg_pretenured->padded_average());
if (PrintAdaptiveSizePolicy) {
- gclog_or_tty->print(
- "AdaptiveSizePolicy::compute_survivor_space_size_and_thresh:"
+ gclog_or_tty->print_cr(
+ "AdaptiveSizePolicy::update_averages:"
" survived: " SIZE_FORMAT
" promoted: " SIZE_FORMAT
" overflow: %s",
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp
index 030cb9da1..b0cb8c7ef 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -136,18 +136,24 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
double gc_minor_pause_goal_sec() const { return _gc_minor_pause_goal_sec; }
// Change the young generation size to achieve a minor GC pause time goal
- void adjust_for_minor_pause_time(bool is_full_gc,
+ void adjust_promo_for_minor_pause_time(bool is_full_gc,
size_t* desired_promo_size_ptr,
size_t* desired_eden_size_ptr);
+ void adjust_eden_for_minor_pause_time(bool is_full_gc,
+ size_t* desired_eden_size_ptr);
// Change the generation sizes to achieve a GC pause time goal
// Returned sizes are not necessarily aligned.
- void adjust_for_pause_time(bool is_full_gc,
+ void adjust_promo_for_pause_time(bool is_full_gc,
+ size_t* desired_promo_size_ptr,
+ size_t* desired_eden_size_ptr);
+ void adjust_eden_for_pause_time(bool is_full_gc,
size_t* desired_promo_size_ptr,
size_t* desired_eden_size_ptr);
// Change the generation sizes to achieve an application throughput goal
// Returned sizes are not necessarily aligned.
- void adjust_for_throughput(bool is_full_gc,
- size_t* desired_promo_size_ptr,
+ void adjust_promo_for_throughput(bool is_full_gc,
+ size_t* desired_promo_size_ptr);
+ void adjust_eden_for_throughput(bool is_full_gc,
size_t* desired_eden_size_ptr);
// Change the generation sizes to achieve minimum footprint
// Returned sizes are not aligned.
@@ -168,9 +174,6 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
size_t promo_decrement_aligned_down(size_t cur_promo);
size_t promo_increment_with_supplement_aligned_up(size_t cur_promo);
- // Decay the supplemental growth additive.
- void decay_supplemental_growth(bool is_full_gc);
-
// Returns a change that has been scaled down. Result
// is not aligned. (If useful, move to some shared
// location.)
@@ -336,20 +339,29 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
// perform a Full GC?
bool should_full_GC(size_t live_in_old_gen);
- // Calculates optimial free space sizes for both the old and young
+ // Calculates optimal (free) space sizes for both the young and old
// generations. Stores results in _eden_size and _promo_size.
// Takes current used space in all generations as input, as well
// as an indication if a full gc has just been performed, for use
// in deciding if an OOM error should be thrown.
- void compute_generation_free_space(size_t young_live,
- size_t eden_live,
- size_t old_live,
- size_t cur_eden, // current eden in bytes
- size_t max_old_gen_size,
- size_t max_eden_size,
- bool is_full_gc,
- GCCause::Cause gc_cause,
- CollectorPolicy* collector_policy);
+ void compute_generations_free_space(size_t young_live,
+ size_t eden_live,
+ size_t old_live,
+ size_t cur_eden, // current eden in bytes
+ size_t max_old_gen_size,
+ size_t max_eden_size,
+ bool is_full_gc);
+
+ void compute_eden_space_size(size_t young_live,
+ size_t eden_live,
+ size_t cur_eden, // current eden in bytes
+ size_t max_eden_size,
+ bool is_full_gc);
+
+ void compute_old_gen_free_space(size_t old_live,
+ size_t cur_eden, // current eden in bytes
+ size_t max_old_gen_size,
+ bool is_full_gc);
// Calculates new survivor space size; returns a new tenuring threshold
// value. Stores new survivor size in _survivor_size.
@@ -390,6 +402,9 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
// Printing support
virtual bool print_adaptive_size_policy_on(outputStream* st) const;
+
+ // Decay the supplemental growth additive.
+ void decay_supplemental_growth(bool is_full_gc);
};
#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSADAPTIVESIZEPOLICY_HPP
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
index 62c578f33..415d62947 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -187,11 +187,8 @@ void ParCompactionManager::follow_marking_stacks() {
// Process ObjArrays one at a time to avoid marking stack bloat.
ObjArrayTask task;
- if (_objarray_stack.pop_overflow(task)) {
- ObjArrayKlass* const k = (ObjArrayKlass*)task.obj()->klass();
- k->oop_follow_contents(this, task.obj(), task.index());
- } else if (_objarray_stack.pop_local(task)) {
- ObjArrayKlass* const k = (ObjArrayKlass*)task.obj()->klass();
+ if (_objarray_stack.pop_overflow(task) || _objarray_stack.pop_local(task)) {
+ ObjArrayKlass* k = (ObjArrayKlass*)task.obj()->klass();
k->oop_follow_contents(this, task.obj(), task.index());
}
} while (!marking_stacks_empty());
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp b/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp
index c68397056..1db224431 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -119,7 +119,7 @@ class PSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
ps_size_policy()->change_old_gen_for_min_pauses());
}
- // compute_generation_free_space() statistics
+ // compute_generations_free_space() statistics
inline void update_avg_major_pause() {
_avg_major_pause->set_value(
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
index adbaee43f..dcdc21806 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,10 @@
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_implementation/shared/markSweep.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
@@ -92,8 +96,8 @@ void PSMarkSweep::invoke(bool maximum_heap_compaction) {
const bool clear_all_soft_refs =
heap->collector_policy()->should_clear_all_soft_refs();
- int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
- IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
+ uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
+ UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
}
@@ -108,8 +112,12 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
}
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
- GCCause::Cause gc_cause = heap->gc_cause();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+ GCCause::Cause gc_cause = heap->gc_cause();
+
+ _gc_timer->register_gc_start(os::elapsed_counter());
+ _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
+
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
// The scope of casr should end after code that can change
@@ -131,6 +139,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
heap->print_heap_before_gc();
+ heap->trace_heap_before_gc(_gc_tracer);
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
@@ -147,7 +156,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
old_gen->verify_object_start_array();
}
- heap->pre_full_gc_dump();
+ heap->pre_full_gc_dump(_gc_timer);
// Filled in below to track the state of the young gen after the collection.
bool eden_empty;
@@ -159,7 +168,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
- TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
+ GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
@@ -277,18 +286,36 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
young_gen->from_space()->capacity_in_bytes() +
young_gen->to_space()->capacity_in_bytes(),
"Sizes of space in young gen are out-of-bounds");
+
+ size_t young_live = young_gen->used_in_bytes();
+ size_t eden_live = young_gen->eden_space()->used_in_bytes();
+ size_t old_live = old_gen->used_in_bytes();
+ size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
+ size_t max_old_gen_size = old_gen->max_gen_size();
size_t max_eden_size = young_gen->max_size() -
young_gen->from_space()->capacity_in_bytes() -
young_gen->to_space()->capacity_in_bytes();
- size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
- young_gen->eden_space()->used_in_bytes(),
- old_gen->used_in_bytes(),
- young_gen->eden_space()->capacity_in_bytes(),
- old_gen->max_gen_size(),
- max_eden_size,
- true /* full gc*/,
- gc_cause,
- heap->collector_policy());
+
+ // Used for diagnostics
+ size_policy->clear_generation_free_space_flags();
+
+ size_policy->compute_generations_free_space(young_live,
+ eden_live,
+ old_live,
+ cur_eden,
+ max_old_gen_size,
+ max_eden_size,
+ true /* full gc*/);
+
+ size_policy->check_gc_overhead_limit(young_live,
+ eden_live,
+ max_old_gen_size,
+ max_eden_size,
+ true /* full gc*/,
+ gc_cause,
+ heap->collector_policy());
+
+ size_policy->decay_supplemental_growth(true /* full gc*/);
heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
@@ -356,13 +383,18 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
heap->print_heap_after_gc();
+ heap->trace_heap_after_gc(_gc_tracer);
- heap->post_full_gc_dump();
+ heap->post_full_gc_dump(_gc_timer);
#ifdef TRACESPINNING
ParallelTaskTerminator::print_termination_counts();
#endif
+ _gc_timer->register_gc_end(os::elapsed_counter());
+
+ _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
+
return true;
}
@@ -480,7 +512,7 @@ void PSMarkSweep::deallocate_stacks() {
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
- TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
+ GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
trace(" 1");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@@ -513,8 +545,10 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Process reference objects found during marking
{
ref_processor()->setup_policy(clear_all_softrefs);
- ref_processor()->process_discovered_references(
- is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
+ const ReferenceProcessorStats& stats =
+ ref_processor()->process_discovered_references(
+ is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
+ gc_tracer()->report_gc_reference_stats(stats);
}
// This is the point where the entire marking should have completed.
@@ -534,11 +568,12 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
+ _gc_tracer->report_object_count_after_gc(is_alive_closure());
}
void PSMarkSweep::mark_sweep_phase2() {
- TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
+ GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer);
trace("2");
// Now all live objects are marked, compute the new object addresses.
@@ -562,14 +597,13 @@ void PSMarkSweep::mark_sweep_phase2() {
// This should be moved to the shared markSweep code!
class PSAlwaysTrueClosure: public BoolObjectClosure {
public:
- void do_object(oop p) { ShouldNotReachHere(); }
bool do_object_b(oop p) { return true; }
};
static PSAlwaysTrueClosure always_true;
void PSMarkSweep::mark_sweep_phase3() {
// Adjust the pointers to reflect the new locations
- TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
+ GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer);
trace("3");
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@@ -612,7 +646,7 @@ void PSMarkSweep::mark_sweep_phase3() {
void PSMarkSweep::mark_sweep_phase4() {
EventMark m("4 compact heap");
- TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty);
+ GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer);
trace("4");
// All pointers are now adjusted, move objects accordingly
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
index 9844d1afd..820696af0 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
@@ -88,8 +88,7 @@ void PSMarkSweepDecorator::precompact() {
* by the MarkSweepAlwaysCompactCount parameter. This is a significant
* performance improvement!
*/
- bool skip_dead = (MarkSweepAlwaysCompactCount < 1)
- || ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);
+ bool skip_dead = ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);
size_t allowed_deadspace = 0;
if (skip_dead) {
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp
index ed784f30d..cf8f574dd 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp
@@ -110,7 +110,7 @@ class PSOldGen : public CHeapObj<mtGC> {
virtual void initialize(ReservedSpace rs, size_t alignment,
const char* perf_data_name, int level);
void initialize_virtual_space(ReservedSpace rs, size_t alignment);
- void initialize_work(const char* perf_data_name, int level);
+ virtual void initialize_work(const char* perf_data_name, int level);
virtual void initialize_performance_counters(const char* perf_data_name, int level);
MemRegion reserved() const { return _reserved; }
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
index b03107a97..27a42de95 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,10 @@
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_interface/gcCause.hpp"
#include "memory/gcLocker.inline.hpp"
@@ -59,13 +63,25 @@
#include <math.h>
// All sizes are in HeapWords.
-const size_t ParallelCompactData::Log2RegionSize = 9; // 512 words
+const size_t ParallelCompactData::Log2RegionSize = 16; // 64K words
const size_t ParallelCompactData::RegionSize = (size_t)1 << Log2RegionSize;
const size_t ParallelCompactData::RegionSizeBytes =
RegionSize << LogHeapWordSize;
const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
-const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask;
+const size_t ParallelCompactData::RegionAddrMask = ~RegionAddrOffsetMask;
+
+const size_t ParallelCompactData::Log2BlockSize = 7; // 128 words
+const size_t ParallelCompactData::BlockSize = (size_t)1 << Log2BlockSize;
+const size_t ParallelCompactData::BlockSizeBytes =
+ BlockSize << LogHeapWordSize;
+const size_t ParallelCompactData::BlockSizeOffsetMask = BlockSize - 1;
+const size_t ParallelCompactData::BlockAddrOffsetMask = BlockSizeBytes - 1;
+const size_t ParallelCompactData::BlockAddrMask = ~BlockAddrOffsetMask;
+
+const size_t ParallelCompactData::BlocksPerRegion = RegionSize / BlockSize;
+const size_t ParallelCompactData::Log2BlocksPerRegion =
+ Log2RegionSize - Log2BlockSize;
const ParallelCompactData::RegionData::region_sz_t
ParallelCompactData::RegionData::dc_shift = 27;
@@ -356,8 +372,13 @@ ParallelCompactData::ParallelCompactData()
_region_start = 0;
_region_vspace = 0;
+ _reserved_byte_size = 0;
_region_data = 0;
_region_count = 0;
+
+ _block_vspace = 0;
+ _block_data = 0;
+ _block_count = 0;
}
bool ParallelCompactData::initialize(MemRegion covered_region)
@@ -371,8 +392,7 @@ bool ParallelCompactData::initialize(MemRegion covered_region)
assert((region_size & RegionSizeOffsetMask) == 0,
"region size not a multiple of RegionSize");
- bool result = initialize_region_data(region_size);
-
+ bool result = initialize_region_data(region_size) && initialize_block_data();
return result;
}
@@ -382,11 +402,11 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)
const size_t raw_bytes = count * element_size;
const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
const size_t granularity = os::vm_allocation_granularity();
- const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
+ _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
MAX2(page_sz, granularity);
- ReservedSpace rs(bytes, rs_align, rs_align > 0);
+ ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
rs.size());
@@ -394,7 +414,7 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)
PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
if (vspace != 0) {
- if (vspace->expand_by(bytes)) {
+ if (vspace->expand_by(_reserved_byte_size)) {
return vspace;
}
delete vspace;
@@ -417,17 +437,36 @@ bool ParallelCompactData::initialize_region_data(size_t region_size)
return false;
}
+bool ParallelCompactData::initialize_block_data()
+{
+ assert(_region_count != 0, "region data must be initialized first");
+ const size_t count = _region_count << Log2BlocksPerRegion;
+ _block_vspace = create_vspace(count, sizeof(BlockData));
+ if (_block_vspace != 0) {
+ _block_data = (BlockData*)_block_vspace->reserved_low_addr();
+ _block_count = count;
+ return true;
+ }
+ return false;
+}
+
void ParallelCompactData::clear()
{
memset(_region_data, 0, _region_vspace->committed_size());
+ memset(_block_data, 0, _block_vspace->committed_size());
}
void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
assert(beg_region <= _region_count, "beg_region out of range");
assert(end_region <= _region_count, "end_region out of range");
+ assert(RegionSize % BlockSize == 0, "RegionSize not a multiple of BlockSize");
const size_t region_cnt = end_region - beg_region;
memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
+
+ const size_t beg_block = beg_region * BlocksPerRegion;
+ const size_t block_cnt = region_cnt * BlocksPerRegion;
+ memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
}
HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
@@ -706,49 +745,48 @@ bool ParallelCompactData::summarize(SplitInfo& split_info,
HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
assert(addr != NULL, "Should detect NULL oop earlier");
- assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
-#ifdef ASSERT
- if (PSParallelCompact::mark_bitmap()->is_unmarked(addr)) {
- gclog_or_tty->print_cr("calc_new_pointer:: addr " PTR_FORMAT, addr);
- }
-#endif
- assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
+ assert(PSParallelCompact::gc_heap()->is_in(addr), "not in heap");
+ assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
// Region covering the object.
- size_t region_index = addr_to_region_idx(addr);
- const RegionData* const region_ptr = region(region_index);
- HeapWord* const region_addr = region_align_down(addr);
-
- assert(addr < region_addr + RegionSize, "Region does not cover object");
- assert(addr_to_region_ptr(region_addr) == region_ptr, "sanity check");
-
+ RegionData* const region_ptr = addr_to_region_ptr(addr);
HeapWord* result = region_ptr->destination();
- // If all the data in the region is live, then the new location of the object
- // can be calculated from the destination of the region plus the offset of the
- // object in the region.
+ // If the entire Region is live, the new location is region->destination + the
+ // offset of the object within in the Region.
+
+ // Run some performance tests to determine if this special case pays off. It
+ // is worth it for pointers into the dense prefix. If the optimization to
+ // avoid pointer updates in regions that only point to the dense prefix is
+ // ever implemented, this should be revisited.
if (region_ptr->data_size() == RegionSize) {
- result += pointer_delta(addr, region_addr);
- DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
+ result += region_offset(addr);
return result;
}
- // The new location of the object is
- // region destination +
- // size of the partial object extending onto the region +
- // sizes of the live objects in the Region that are to the left of addr
- const size_t partial_obj_size = region_ptr->partial_obj_size();
- HeapWord* const search_start = region_addr + partial_obj_size;
+ // Otherwise, the new location is region->destination + block offset + the
+ // number of live words in the Block that are (a) to the left of addr and (b)
+ // due to objects that start in the Block.
- const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
- size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
+ // Fill in the block table if necessary. This is unsynchronized, so multiple
+ // threads may fill the block table for a region (harmless, since it is
+ // idempotent).
+ if (!region_ptr->blocks_filled()) {
+ PSParallelCompact::fill_blocks(addr_to_region_idx(addr));
+ region_ptr->set_blocks_filled();
+ }
+
+ HeapWord* const search_start = block_align_down(addr);
+ const size_t block_offset = addr_to_block_ptr(addr)->offset();
- result += partial_obj_size + live_to_left;
- DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
+ const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
+ const size_t live = bitmap->live_words_in_range(search_start, oop(addr));
+ result += block_offset + live;
+ DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result));
return result;
}
-#ifdef ASSERT
+#ifdef ASSERT
void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
{
const size_t* const beg = (const size_t*)vspace->committed_low_addr();
@@ -761,16 +799,12 @@ void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
void ParallelCompactData::verify_clear()
{
verify_clear(_region_vspace);
+ verify_clear(_block_vspace);
}
#endif // #ifdef ASSERT
-#ifdef NOT_PRODUCT
-ParallelCompactData::RegionData* debug_region(size_t region_index) {
- ParallelCompactData& sd = PSParallelCompact::summary_data();
- return sd.region(region_index);
-}
-#endif
-
+STWGCTimer PSParallelCompact::_gc_timer;
+ParallelOldTracer PSParallelCompact::_gc_tracer;
elapsedTimer PSParallelCompact::_accumulated_time;
unsigned int PSParallelCompact::_total_invocations = 0;
unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
@@ -781,7 +815,6 @@ ParallelCompactData PSParallelCompact::_summary_data;
PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
-void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
@@ -841,14 +874,18 @@ bool PSParallelCompact::initialize() {
initialize_dead_wood_limiter();
if (!_mark_bitmap.initialize(mr)) {
- vm_shutdown_during_initialization("Unable to allocate bit map for "
- "parallel garbage collection for the requested heap size.");
+ vm_shutdown_during_initialization(
+ err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel "
+ "garbage collection for the requested " SIZE_FORMAT "KB heap.",
+ _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
return false;
}
if (!_summary_data.initialize(mr)) {
- vm_shutdown_during_initialization("Unable to allocate tables for "
- "parallel garbage collection for the requested heap size.");
+ vm_shutdown_during_initialization(
+ err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel "
+ "garbage collection for the requested " SIZE_FORMAT "KB heap.",
+ _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
return false;
}
@@ -941,14 +978,13 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
// at each young gen gc. Do the update unconditionally (even though a
// promotion failure does not swap spaces) because an unknown number of minor
// collections will have swapped the spaces an unknown number of times.
- TraceTime tm("pre compact", print_phases(), true, gclog_or_tty);
+ GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer);
ParallelScavengeHeap* heap = gc_heap();
_space_info[from_space_id].set_space(heap->young_gen()->from_space());
_space_info[to_space_id].set_space(heap->young_gen()->to_space());
pre_gc_values->fill(heap);
- NOT_PRODUCT(_mark_bitmap.reset_counters());
DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
@@ -959,6 +995,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
_total_invocations++;
heap->print_heap_before_gc();
+ heap->trace_heap_before_gc(&_gc_tracer);
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
@@ -984,7 +1021,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
void PSParallelCompact::post_compact()
{
- TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
+ GCTraceTime tm("post compact", print_phases(), true, &_gc_timer);
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
// Clear the marking bitmap, summary data and split info.
@@ -1810,7 +1847,7 @@ void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
void PSParallelCompact::summary_phase(ParCompactionManager* cm,
bool maximum_compaction)
{
- TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
+ GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer);
// trace("2");
#ifdef ASSERT
@@ -1958,11 +1995,6 @@ void PSParallelCompact::invoke(bool maximum_heap_compaction) {
maximum_heap_compaction);
}
-bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
- size_t addr_region_index = addr_to_region_idx(addr);
- return region_index == addr_region_index;
-}
-
// This method contains no policy. You should probably
// be calling invoke() instead.
bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
@@ -1973,11 +2005,15 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
return false;
}
+ ParallelScavengeHeap* heap = gc_heap();
+
+ _gc_timer.register_gc_start(os::elapsed_counter());
+ _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
+
TimeStamp marking_start;
TimeStamp compaction_start;
TimeStamp collection_exit;
- ParallelScavengeHeap* heap = gc_heap();
GCCause::Cause gc_cause = heap->gc_cause();
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
@@ -1993,7 +2029,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
heap->record_gen_tops_before_GC();
}
- heap->pre_full_gc_dump();
+ heap->pre_full_gc_dump(&_gc_timer);
_print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
@@ -2020,7 +2056,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
- TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
+ GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
@@ -2040,16 +2076,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
bool marked_for_unloading = false;
marking_start.update();
- marking_phase(vmthread_cm, maximum_heap_compaction);
-
-#ifndef PRODUCT
- if (TraceParallelOldGCMarkingPhase) {
- gclog_or_tty->print_cr("marking_phase: cas_tries %d cas_retries %d "
- "cas_by_another %d",
- mark_bitmap()->cas_tries(), mark_bitmap()->cas_retries(),
- mark_bitmap()->cas_by_another());
- }
-#endif // #ifndef PRODUCT
+ marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
bool max_on_system_gc = UseMaximumCompactionOnSystemGC
&& gc_cause == GCCause::_java_lang_system_gc;
@@ -2094,19 +2121,36 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
young_gen->from_space()->capacity_in_bytes() +
young_gen->to_space()->capacity_in_bytes(),
"Sizes of space in young gen are out-of-bounds");
+
+ size_t young_live = young_gen->used_in_bytes();
+ size_t eden_live = young_gen->eden_space()->used_in_bytes();
+ size_t old_live = old_gen->used_in_bytes();
+ size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
+ size_t max_old_gen_size = old_gen->max_gen_size();
size_t max_eden_size = young_gen->max_size() -
young_gen->from_space()->capacity_in_bytes() -
young_gen->to_space()->capacity_in_bytes();
- size_policy->compute_generation_free_space(
- young_gen->used_in_bytes(),
- young_gen->eden_space()->used_in_bytes(),
- old_gen->used_in_bytes(),
- young_gen->eden_space()->capacity_in_bytes(),
- old_gen->max_gen_size(),
- max_eden_size,
- true /* full gc*/,
- gc_cause,
- heap->collector_policy());
+
+ // Used for diagnostics
+ size_policy->clear_generation_free_space_flags();
+
+ size_policy->compute_generations_free_space(young_live,
+ eden_live,
+ old_live,
+ cur_eden,
+ max_old_gen_size,
+ max_eden_size,
+ true /* full gc*/);
+
+ size_policy->check_gc_overhead_limit(young_live,
+ eden_live,
+ max_old_gen_size,
+ max_eden_size,
+ true /* full gc*/,
+ gc_cause,
+ heap->collector_policy());
+
+ size_policy->decay_supplemental_growth(true /* full gc*/);
heap->resize_old_gen(
size_policy->calculated_old_free_size_in_bytes());
@@ -2185,6 +2229,8 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
collection_exit.update();
heap->print_heap_after_gc();
+ heap->trace_heap_after_gc(&_gc_tracer);
+
if (PrintGCTaskTimeStamps) {
gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
INT64_FORMAT,
@@ -2193,12 +2239,17 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
gc_task_manager()->print_task_time_stamps();
}
- heap->post_full_gc_dump();
+ heap->post_full_gc_dump(&_gc_timer);
#ifdef TRACESPINNING
ParallelTaskTerminator::print_termination_counts();
#endif
+ _gc_timer.register_gc_end(os::elapsed_counter());
+
+ _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
+ _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
+
return true;
}
@@ -2297,9 +2348,10 @@ GCTaskManager* const PSParallelCompact::gc_task_manager() {
}
void PSParallelCompact::marking_phase(ParCompactionManager* cm,
- bool maximum_heap_compaction) {
+ bool maximum_heap_compaction,
+ ParallelOldTracer *gc_tracer) {
// Recursively traverse all live objects and mark them
- TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
+ GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer);
ParallelScavengeHeap* heap = gc_heap();
uint parallel_gc_threads = heap->gc_task_manager()->workers();
@@ -2314,7 +2366,8 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
ClassLoaderDataGraph::clear_claimed_marks();
{
- TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
+ GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer);
+
ParallelScavengeHeap::ParStrongRootsScope psrs;
GCTaskQueue* q = GCTaskQueue::create();
@@ -2327,6 +2380,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
+ q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
@@ -2341,19 +2395,24 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
// Process reference objects found during marking
{
- TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty);
+ GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer);
+
+ ReferenceProcessorStats stats;
if (ref_processor()->processing_is_mt()) {
RefProcTaskExecutor task_executor;
- ref_processor()->process_discovered_references(
+ stats = ref_processor()->process_discovered_references(
is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
- &task_executor);
+ &task_executor, &_gc_timer);
} else {
- ref_processor()->process_discovered_references(
- is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL);
+ stats = ref_processor()->process_discovered_references(
+ is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
+ &_gc_timer);
}
+
+ gc_tracer->report_gc_reference_stats(stats);
}
- TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
+ GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer);
// This is the point where the entire marking should have completed.
assert(cm->marking_stacks_empty(), "Marking should have completed");
@@ -2372,6 +2431,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
+ _gc_tracer.report_object_count_after_gc(is_alive_closure());
}
void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
@@ -2406,14 +2466,13 @@ void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm,
// This should be moved to the shared markSweep code!
class PSAlwaysTrueClosure: public BoolObjectClosure {
public:
- void do_object(oop p) { ShouldNotReachHere(); }
bool do_object_b(oop p) { return true; }
};
static PSAlwaysTrueClosure always_true;
void PSParallelCompact::adjust_roots() {
// Adjust the pointers to reflect the new locations
- TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
+ GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer);
// Need new claim bits when tracing through and adjusting pointers.
ClassLoaderDataGraph::clear_claimed_marks();
@@ -2449,7 +2508,7 @@ void PSParallelCompact::adjust_roots() {
void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
uint parallel_gc_threads)
{
- TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
+ GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer);
// Find the threads that are active
unsigned int which = 0;
@@ -2523,7 +2582,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
uint parallel_gc_threads) {
- TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty);
+ GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer);
ParallelCompactData& sd = PSParallelCompact::summary_data();
@@ -2605,7 +2664,7 @@ void PSParallelCompact::enqueue_region_stealing_tasks(
GCTaskQueue* q,
ParallelTaskTerminator* terminator_ptr,
uint parallel_gc_threads) {
- TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
+ GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer);
// Once a thread has drained it's stack, it should try to steal regions from
// other threads.
@@ -2616,9 +2675,44 @@ void PSParallelCompact::enqueue_region_stealing_tasks(
}
}
+#ifdef ASSERT
+// Write a histogram of the number of times the block table was filled for a
+// region.
+void PSParallelCompact::write_block_fill_histogram(outputStream* const out)
+{
+ if (!TraceParallelOldGCCompactionPhase) return;
+
+ typedef ParallelCompactData::RegionData rd_t;
+ ParallelCompactData& sd = summary_data();
+
+ for (unsigned int id = old_space_id; id < last_space_id; ++id) {
+ MutableSpace* const spc = _space_info[id].space();
+ if (spc->bottom() != spc->top()) {
+ const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
+ HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
+ const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
+
+ size_t histo[5] = { 0, 0, 0, 0, 0 };
+ const size_t histo_len = sizeof(histo) / sizeof(size_t);
+ const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
+
+ for (const rd_t* cur = beg; cur < end; ++cur) {
+ ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
+ }
+ out->print("%u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
+ for (size_t i = 0; i < histo_len; ++i) {
+ out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
+ histo[i], 100.0 * histo[i] / region_cnt);
+ }
+ out->cr();
+ }
+ }
+}
+#endif // #ifdef ASSERT
+
void PSParallelCompact::compact() {
// trace("5");
- TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
+ GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer);
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
@@ -2635,7 +2729,7 @@ void PSParallelCompact::compact() {
enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
{
- TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
+ GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer);
gc_task_manager()->execute_and_wait(q);
@@ -2649,12 +2743,14 @@ void PSParallelCompact::compact() {
{
// Update the deferred objects, if any. Any compaction manager can be used.
- TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty);
+ GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer);
ParCompactionManager* cm = ParCompactionManager::manager_array(0);
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
update_deferred_objects(cm, SpaceId(id));
}
}
+
+ DEBUG_ONLY(write_block_fill_histogram(gclog_or_tty));
}
#ifdef ASSERT
@@ -3119,6 +3215,57 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
} while (true);
}
+void PSParallelCompact::fill_blocks(size_t region_idx)
+{
+ // Fill in the block table elements for the specified region. Each block
+ // table element holds the number of live words in the region that are to the
+ // left of the first object that starts in the block. Thus only blocks in
+ // which an object starts need to be filled.
+ //
+ // The algorithm scans the section of the bitmap that corresponds to the
+ // region, keeping a running total of the live words. When an object start is
+ // found, if it's the first to start in the block that contains it, the
+ // current total is written to the block table element.
+ const size_t Log2BlockSize = ParallelCompactData::Log2BlockSize;
+ const size_t Log2RegionSize = ParallelCompactData::Log2RegionSize;
+ const size_t RegionSize = ParallelCompactData::RegionSize;
+
+ ParallelCompactData& sd = summary_data();
+ const size_t partial_obj_size = sd.region(region_idx)->partial_obj_size();
+ if (partial_obj_size >= RegionSize) {
+ return; // No objects start in this region.
+ }
+
+ // Ensure the first loop iteration decides that the block has changed.
+ size_t cur_block = sd.block_count();
+
+ const ParMarkBitMap* const bitmap = mark_bitmap();
+
+ const size_t Log2BitsPerBlock = Log2BlockSize - LogMinObjAlignment;
+ assert((size_t)1 << Log2BitsPerBlock ==
+ bitmap->words_to_bits(ParallelCompactData::BlockSize), "sanity");
+
+ size_t beg_bit = bitmap->words_to_bits(region_idx << Log2RegionSize);
+ const size_t range_end = beg_bit + bitmap->words_to_bits(RegionSize);
+ size_t live_bits = bitmap->words_to_bits(partial_obj_size);
+ beg_bit = bitmap->find_obj_beg(beg_bit + live_bits, range_end);
+ while (beg_bit < range_end) {
+ const size_t new_block = beg_bit >> Log2BitsPerBlock;
+ if (new_block != cur_block) {
+ cur_block = new_block;
+ sd.block(cur_block)->set_offset(bitmap->bits_to_words(live_bits));
+ }
+
+ const size_t end_bit = bitmap->find_obj_end(beg_bit, range_end);
+ if (end_bit < range_end - 1) {
+ live_bits += end_bit - beg_bit + 1;
+ beg_bit = bitmap->find_obj_beg(end_bit + 1, range_end);
+ } else {
+ return;
+ }
+ }
+}
+
void
PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
const MutableSpace* sp = space(space_id);
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
index 6ced655c2..2ba4eb45f 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,6 +46,8 @@ class GCTaskQueue;
class PreGCValues;
class MoveAndUpdateClosure;
class RefProcTaskExecutor;
+class ParallelOldTracer;
+class STWGCTimer;
// The SplitInfo class holds the information needed to 'split' a source region
// so that the live data can be copied to two destination *spaces*. Normally,
@@ -220,6 +222,17 @@ public:
// Mask for the bits in a pointer to get the address of the start of a region.
static const size_t RegionAddrMask;
+ static const size_t Log2BlockSize;
+ static const size_t BlockSize;
+ static const size_t BlockSizeBytes;
+
+ static const size_t BlockSizeOffsetMask;
+ static const size_t BlockAddrOffsetMask;
+ static const size_t BlockAddrMask;
+
+ static const size_t BlocksPerRegion;
+ static const size_t Log2BlocksPerRegion;
+
class RegionData
{
public:
@@ -272,6 +285,12 @@ public:
inline uint destination_count() const;
inline uint destination_count_raw() const;
+ // Whether the block table for this region has been filled.
+ inline bool blocks_filled() const;
+
+ // Number of times the block table was filled.
+ DEBUG_ONLY(inline size_t blocks_filled_count() const;)
+
// The location of the java heap data that corresponds to this region.
inline HeapWord* data_location() const;
@@ -296,6 +315,7 @@ public:
void set_partial_obj_size(size_t words) {
_partial_obj_size = (region_sz_t) words;
}
+ inline void set_blocks_filled();
inline void set_destination_count(uint count);
inline void set_live_obj_size(size_t words);
@@ -328,7 +348,11 @@ public:
HeapWord* _partial_obj_addr;
region_sz_t _partial_obj_size;
region_sz_t volatile _dc_and_los;
+ bool _blocks_filled;
+
#ifdef ASSERT
+ size_t _blocks_filled_count; // Number of block table fills.
+
// These enable optimizations that are only partially implemented. Use
// debug builds to prevent the code fragments from breaking.
HeapWord* _data_location;
@@ -337,23 +361,40 @@ public:
#ifdef ASSERT
public:
- uint _pushed; // 0 until region is pushed onto a worker's stack
+ uint _pushed; // 0 until region is pushed onto a stack
private:
#endif
};
+ // "Blocks" allow shorter sections of the bitmap to be searched. Each Block
+ // holds an offset, which is the amount of live data in the Region to the left
+ // of the first live object that starts in the Block.
+ class BlockData
+ {
+ public:
+ typedef unsigned short int blk_ofs_t;
+
+ blk_ofs_t offset() const { return _offset; }
+ void set_offset(size_t val) { _offset = (blk_ofs_t)val; }
+
+ private:
+ blk_ofs_t _offset;
+ };
+
public:
ParallelCompactData();
bool initialize(MemRegion covered_region);
size_t region_count() const { return _region_count; }
+ size_t reserved_byte_size() const { return _reserved_byte_size; }
// Convert region indices to/from RegionData pointers.
inline RegionData* region(size_t region_idx) const;
inline size_t region(const RegionData* const region_ptr) const;
- // Returns true if the given address is contained within the region
- bool region_contains(size_t region_index, HeapWord* addr);
+ size_t block_count() const { return _block_count; }
+ inline BlockData* block(size_t block_idx) const;
+ inline size_t block(const BlockData* block_ptr) const;
void add_obj(HeapWord* addr, size_t len);
void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
@@ -393,11 +434,24 @@ public:
inline HeapWord* region_align_up(HeapWord* addr) const;
inline bool is_region_aligned(HeapWord* addr) const;
+ // Analogous to region_offset() for blocks.
+ size_t block_offset(const HeapWord* addr) const;
+ size_t addr_to_block_idx(const HeapWord* addr) const;
+ size_t addr_to_block_idx(const oop obj) const {
+ return addr_to_block_idx((HeapWord*) obj);
+ }
+ inline BlockData* addr_to_block_ptr(const HeapWord* addr) const;
+ inline HeapWord* block_to_addr(size_t block) const;
+ inline size_t region_to_block_idx(size_t region) const;
+
+ inline HeapWord* block_align_down(HeapWord* addr) const;
+ inline HeapWord* block_align_up(HeapWord* addr) const;
+ inline bool is_block_aligned(HeapWord* addr) const;
+
// Return the address one past the end of the partial object.
HeapWord* partial_obj_end(size_t region_idx) const;
- // Return the new location of the object p after the
- // the compaction.
+ // Return the location of the object after compaction.
HeapWord* calc_new_pointer(HeapWord* addr);
HeapWord* calc_new_pointer(oop p) {
@@ -410,6 +464,7 @@ public:
#endif // #ifdef ASSERT
private:
+ bool initialize_block_data();
bool initialize_region_data(size_t region_size);
PSVirtualSpace* create_vspace(size_t count, size_t element_size);
@@ -420,8 +475,13 @@ private:
#endif // #ifdef ASSERT
PSVirtualSpace* _region_vspace;
+ size_t _reserved_byte_size;
RegionData* _region_data;
size_t _region_count;
+
+ PSVirtualSpace* _block_vspace;
+ BlockData* _block_data;
+ size_t _block_count;
};
inline uint
@@ -436,6 +496,28 @@ ParallelCompactData::RegionData::destination_count() const
return destination_count_raw() >> dc_shift;
}
+inline bool
+ParallelCompactData::RegionData::blocks_filled() const
+{
+ return _blocks_filled;
+}
+
+#ifdef ASSERT
+inline size_t
+ParallelCompactData::RegionData::blocks_filled_count() const
+{
+ return _blocks_filled_count;
+}
+#endif // #ifdef ASSERT
+
+inline void
+ParallelCompactData::RegionData::set_blocks_filled()
+{
+ _blocks_filled = true;
+ // Debug builds count the number of times the table was filled.
+ DEBUG_ONLY(Atomic::inc_ptr(&_blocks_filled_count));
+}
+
inline void
ParallelCompactData::RegionData::set_destination_count(uint count)
{
@@ -530,6 +612,12 @@ ParallelCompactData::region(const RegionData* const region_ptr) const
return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
}
+inline ParallelCompactData::BlockData*
+ParallelCompactData::block(size_t n) const {
+ assert(n < block_count(), "bad arg");
+ return _block_data + n;
+}
+
inline size_t
ParallelCompactData::region_offset(const HeapWord* addr) const
{
@@ -596,6 +684,63 @@ ParallelCompactData::is_region_aligned(HeapWord* addr) const
return region_offset(addr) == 0;
}
+inline size_t
+ParallelCompactData::block_offset(const HeapWord* addr) const
+{
+ assert(addr >= _region_start, "bad addr");
+ assert(addr <= _region_end, "bad addr");
+ return (size_t(addr) & BlockAddrOffsetMask) >> LogHeapWordSize;
+}
+
+inline size_t
+ParallelCompactData::addr_to_block_idx(const HeapWord* addr) const
+{
+ assert(addr >= _region_start, "bad addr");
+ assert(addr <= _region_end, "bad addr");
+ return pointer_delta(addr, _region_start) >> Log2BlockSize;
+}
+
+inline ParallelCompactData::BlockData*
+ParallelCompactData::addr_to_block_ptr(const HeapWord* addr) const
+{
+ return block(addr_to_block_idx(addr));
+}
+
+inline HeapWord*
+ParallelCompactData::block_to_addr(size_t block) const
+{
+ assert(block < _block_count, "block out of range");
+ return _region_start + (block << Log2BlockSize);
+}
+
+inline size_t
+ParallelCompactData::region_to_block_idx(size_t region) const
+{
+ return region << Log2BlocksPerRegion;
+}
+
+inline HeapWord*
+ParallelCompactData::block_align_down(HeapWord* addr) const
+{
+ assert(addr >= _region_start, "bad addr");
+ assert(addr < _region_end + RegionSize, "bad addr");
+ return (HeapWord*)(size_t(addr) & BlockAddrMask);
+}
+
+inline HeapWord*
+ParallelCompactData::block_align_up(HeapWord* addr) const
+{
+ assert(addr >= _region_start, "bad addr");
+ assert(addr <= _region_end, "bad addr");
+ return block_align_down(addr + BlockSizeOffsetMask);
+}
+
+inline bool
+ParallelCompactData::is_block_aligned(HeapWord* addr) const
+{
+ return block_offset(addr) == 0;
+}
+
// Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
// do_addr() method.
//
@@ -773,6 +918,7 @@ class PSParallelCompact : AllStatic {
// Convenient access to type names.
typedef ParMarkBitMap::idx_t idx_t;
typedef ParallelCompactData::RegionData RegionData;
+ typedef ParallelCompactData::BlockData BlockData;
typedef enum {
old_space_id, eden_space_id,
@@ -784,7 +930,6 @@ class PSParallelCompact : AllStatic {
//
class IsAliveClosure: public BoolObjectClosure {
public:
- virtual void do_object(oop p);
virtual bool do_object_b(oop p);
};
@@ -829,6 +974,8 @@ class PSParallelCompact : AllStatic {
friend class RefProcTaskProxy;
private:
+ static STWGCTimer _gc_timer;
+ static ParallelOldTracer _gc_tracer;
static elapsedTimer _accumulated_time;
static unsigned int _total_invocations;
static unsigned int _maximum_compaction_gc_num;
@@ -872,7 +1019,8 @@ class PSParallelCompact : AllStatic {
// Mark live objects
static void marking_phase(ParCompactionManager* cm,
- bool maximum_heap_compaction);
+ bool maximum_heap_compaction,
+ ParallelOldTracer *gc_tracer);
template <class T>
static inline void follow_root(ParCompactionManager* cm, T* p);
@@ -961,6 +1109,8 @@ class PSParallelCompact : AllStatic {
// Adjust addresses in roots. Does not adjust addresses in heap.
static void adjust_roots();
+ DEBUG_ONLY(static void write_block_fill_histogram(outputStream* const out);)
+
// Move objects to new locations.
static void compact_perm(ParCompactionManager* cm);
static void compact();
@@ -1127,6 +1277,9 @@ class PSParallelCompact : AllStatic {
fill_region(cm, region);
}
+ // Fill in the block table for the specified region.
+ static void fill_blocks(size_t region_idx);
+
// Update the deferred objects in the space.
static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
@@ -1136,6 +1289,8 @@ class PSParallelCompact : AllStatic {
// Reference Processing
static ReferenceProcessor* const ref_processor() { return _ref_processor; }
+ static STWGCTimer* gc_timer() { return &_gc_timer; }
+
// Return the SpaceId for the given address.
static SpaceId space_id(HeapWord* addr);
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
index ee54e55bf..dd3933b00 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,15 +27,18 @@
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/mutableSpace.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/memRegion.hpp"
+#include "memory/padded.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.psgc.inline.hpp"
-PSPromotionManager** PSPromotionManager::_manager_array = NULL;
-OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
-PSOldGen* PSPromotionManager::_old_gen = NULL;
-MutableSpace* PSPromotionManager::_young_space = NULL;
+PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
+OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
+PSOldGen* PSPromotionManager::_old_gen = NULL;
+MutableSpace* PSPromotionManager::_young_space = NULL;
void PSPromotionManager::initialize() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@@ -44,35 +47,32 @@ void PSPromotionManager::initialize() {
_old_gen = heap->old_gen();
_young_space = heap->young_gen()->to_space();
+ // To prevent false sharing, we pad the PSPromotionManagers
+ // and make sure that the first instance starts at a cache line.
assert(_manager_array == NULL, "Attempt to initialize twice");
- _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1, mtGC);
+ _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(ParallelGCThreads + 1);
guarantee(_manager_array != NULL, "Could not initialize promotion manager");
_stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
- guarantee(_stack_array_depth != NULL, "Cound not initialize promotion manager");
+ guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager");
// Create and register the PSPromotionManager(s) for the worker threads.
for(uint i=0; i<ParallelGCThreads; i++) {
- _manager_array[i] = new PSPromotionManager();
- guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager");
- stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth());
+ stack_array_depth()->register_queue(i, _manager_array[i].claimed_stack_depth());
}
-
// The VMThread gets its own PSPromotionManager, which is not available
// for work stealing.
- _manager_array[ParallelGCThreads] = new PSPromotionManager();
- guarantee(_manager_array[ParallelGCThreads] != NULL, "Could not create PSPromotionManager");
}
PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
assert(_manager_array != NULL, "Sanity");
- return _manager_array[index];
+ return &_manager_array[index];
}
PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
assert(_manager_array != NULL, "Sanity");
- return _manager_array[ParallelGCThreads];
+ return &_manager_array[ParallelGCThreads];
}
void PSPromotionManager::pre_scavenge() {
@@ -86,13 +86,20 @@ void PSPromotionManager::pre_scavenge() {
}
}
-void PSPromotionManager::post_scavenge() {
+bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) {
+ bool promotion_failure_occurred = false;
+
TASKQUEUE_STATS_ONLY(if (PrintGCDetails && ParallelGCVerbose) print_stats());
for (uint i = 0; i < ParallelGCThreads + 1; i++) {
PSPromotionManager* manager = manager_array(i);
assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
+ if (manager->_promotion_failed_info.has_failed()) {
+ gc_tracer.report_promotion_failed(manager->_promotion_failed_info);
+ promotion_failure_occurred = true;
+ }
manager->flush_labs();
}
+ return promotion_failure_occurred;
}
#if TASKQUEUE_STATS
@@ -187,6 +194,8 @@ void PSPromotionManager::reset() {
_old_lab.initialize(MemRegion(lab_base, (size_t)0));
_old_gen_is_full = false;
+ _promotion_failed_info.reset();
+
TASKQUEUE_STATS_ONLY(reset_stats());
}
@@ -305,6 +314,8 @@ oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
// We won any races, we "own" this object.
assert(obj == obj->forwardee(), "Sanity");
+ _promotion_failed_info.register_copy_failure(obj->size());
+
obj->push_contents(this);
// Save the mark if needed
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
index 0e429edc6..6707ade2d 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,14 +26,18 @@
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP
#include "gc_implementation/parallelScavenge/psPromotionLAB.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "memory/allocation.hpp"
+#include "memory/padded.hpp"
+#include "utilities/globalDefinitions.hpp"
#include "utilities/taskqueue.hpp"
//
// psPromotionManager is used by a single thread to manage object survival
// during a scavenge. The promotion manager contains thread local data only.
//
-// NOTE! Be carefull when allocating the stacks on cheap. If you are going
+// NOTE! Be careful when allocating the stacks on cheap. If you are going
// to use a promotion manager in more than one thread, the stacks MUST be
// on cheap. This can lead to memory leaks, though, as they are not auto
// deallocated.
@@ -49,14 +53,14 @@ class MutableSpace;
class PSOldGen;
class ParCompactionManager;
-class PSPromotionManager : public CHeapObj<mtGC> {
+class PSPromotionManager VALUE_OBJ_CLASS_SPEC {
friend class PSScavenge;
friend class PSRefProcTaskExecutor;
private:
- static PSPromotionManager** _manager_array;
- static OopStarTaskQueueSet* _stack_array_depth;
- static PSOldGen* _old_gen;
- static MutableSpace* _young_space;
+ static PaddedEnd<PSPromotionManager>* _manager_array;
+ static OopStarTaskQueueSet* _stack_array_depth;
+ static PSOldGen* _old_gen;
+ static MutableSpace* _young_space;
#if TASKQUEUE_STATS
size_t _masked_pushes;
@@ -85,6 +89,8 @@ class PSPromotionManager : public CHeapObj<mtGC> {
uint _array_chunk_size;
uint _min_array_size_for_chunking;
+ PromotionFailedInfo _promotion_failed_info;
+
// Accessors
static PSOldGen* old_gen() { return _old_gen; }
static MutableSpace* young_space() { return _young_space; }
@@ -149,7 +155,7 @@ class PSPromotionManager : public CHeapObj<mtGC> {
static void initialize();
static void pre_scavenge();
- static void post_scavenge();
+ static bool post_scavenge(YoungGCTracer& gc_tracer);
static PSPromotionManager* gc_thread_promotion_manager(int index);
static PSPromotionManager* vm_thread_promotion_manager();
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
index 8822a481b..34c935408 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
assert(_manager_array != NULL, "access of NULL manager_array");
assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access");
- return _manager_array[index];
+ return &_manager_array[index];
}
template <class T>
@@ -42,7 +42,7 @@ inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
if (o->is_forwarded()) {
o = o->forwardee();
// Card mark
- if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
+ if (PSScavenge::is_obj_in_young(o)) {
PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
}
oopDesc::encode_store_heap_oop_not_null(p, o);
@@ -152,7 +152,7 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
// This is the promotion failed test, and code handling.
// The code belongs here for two reasons. It is slightly
- // different thatn the code below, and cannot share the
+ // different than the code below, and cannot share the
// CAS testing code. Keeping the code here also minimizes
// the impact on the common case fast path code.
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
index 4bc7870b9..60c0267bb 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
@@ -34,6 +34,10 @@
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
#include "gc_implementation/parallelScavenge/psTasks.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "gc_interface/gcCause.hpp"
@@ -61,20 +65,19 @@ CardTableExtension* PSScavenge::_card_table = NULL;
bool PSScavenge::_survivor_overflow = false;
uint PSScavenge::_tenuring_threshold = 0;
HeapWord* PSScavenge::_young_generation_boundary = NULL;
+uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
elapsedTimer PSScavenge::_accumulated_time;
+STWGCTimer PSScavenge::_gc_timer;
+ParallelScavengeTracer PSScavenge::_gc_tracer;
Stack<markOop, mtGC> PSScavenge::_preserved_mark_stack;
Stack<oop, mtGC> PSScavenge::_preserved_oop_stack;
CollectorCounters* PSScavenge::_counters = NULL;
-bool PSScavenge::_promotion_failed = false;
// Define before use
class PSIsAliveClosure: public BoolObjectClosure {
public:
- void do_object(oop p) {
- assert(false, "Do not call.");
- }
bool do_object_b(oop p) {
- return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded();
+ return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
}
};
@@ -261,6 +264,8 @@ bool PSScavenge::invoke_no_policy() {
assert(_preserved_mark_stack.is_empty(), "should be empty");
assert(_preserved_oop_stack.is_empty(), "should be empty");
+ _gc_timer.register_gc_start(os::elapsed_counter());
+
TimeStamp scavenge_entry;
TimeStamp scavenge_midpoint;
TimeStamp scavenge_exit;
@@ -280,11 +285,14 @@ bool PSScavenge::invoke_no_policy() {
return false;
}
+ _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
+
bool promotion_failure_occurred = false;
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
+
heap->increment_total_collections();
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
@@ -301,12 +309,12 @@ bool PSScavenge::invoke_no_policy() {
}
heap->print_heap_before_gc();
+ heap->trace_heap_before_gc(&_gc_tracer);
assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
size_t prev_used = heap->used();
- assert(promotion_failed() == false, "Sanity");
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
@@ -323,7 +331,7 @@ bool PSScavenge::invoke_no_policy() {
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
- TraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
+ GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
@@ -389,7 +397,7 @@ bool PSScavenge::invoke_no_policy() {
// We'll use the promotion manager again later.
PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
{
- // TraceTime("Roots");
+ GCTraceTime tm("Scavenge", false, false, &_gc_timer);
ParallelScavengeHeap::ParStrongRootsScope psrs;
GCTaskQueue* q = GCTaskQueue::create();
@@ -411,6 +419,7 @@ bool PSScavenge::invoke_no_policy() {
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
+ q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
@@ -430,38 +439,41 @@ bool PSScavenge::invoke_no_policy() {
// Process reference objects discovered during scavenge
{
+ GCTraceTime tm("References", false, false, &_gc_timer);
+
reference_processor()->setup_policy(false); // not always_clear
reference_processor()->set_active_mt_degree(active_workers);
PSKeepAliveClosure keep_alive(promotion_manager);
PSEvacuateFollowersClosure evac_followers(promotion_manager);
+ ReferenceProcessorStats stats;
if (reference_processor()->processing_is_mt()) {
PSRefProcTaskExecutor task_executor;
- reference_processor()->process_discovered_references(
- &_is_alive_closure, &keep_alive, &evac_followers, &task_executor);
+ stats = reference_processor()->process_discovered_references(
+ &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
+ &_gc_timer);
} else {
- reference_processor()->process_discovered_references(
- &_is_alive_closure, &keep_alive, &evac_followers, NULL);
+ stats = reference_processor()->process_discovered_references(
+ &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
}
- }
- // Enqueue reference objects discovered during scavenge.
- if (reference_processor()->processing_is_mt()) {
- PSRefProcTaskExecutor task_executor;
- reference_processor()->enqueue_discovered_references(&task_executor);
- } else {
- reference_processor()->enqueue_discovered_references(NULL);
+ _gc_tracer.report_gc_reference_stats(stats);
+
+ // Enqueue reference objects discovered during scavenge.
+ if (reference_processor()->processing_is_mt()) {
+ PSRefProcTaskExecutor task_executor;
+ reference_processor()->enqueue_discovered_references(&task_executor);
+ } else {
+ reference_processor()->enqueue_discovered_references(NULL);
+ }
}
- // Unlink any dead interned Strings
- StringTable::unlink(&_is_alive_closure);
- // Process the remaining live ones
- PSScavengeRootsClosure root_closure(promotion_manager);
- StringTable::oops_do(&root_closure);
+ GCTraceTime tm("StringTable", false, false, &_gc_timer);
+ // Unlink any dead interned Strings and process the remaining live ones.
+ PSScavengeRootsClosure root_closure(promotion_manager);
+ StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
// Finally, flush the promotion_manager's labs, and deallocate its stacks.
- PSPromotionManager::post_scavenge();
-
- promotion_failure_occurred = promotion_failed();
+ promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
if (promotion_failure_occurred) {
clean_up_failed_promotion();
if (PrintGC) {
@@ -476,8 +488,6 @@ bool PSScavenge::invoke_no_policy() {
if (!promotion_failure_occurred) {
// Swap the survivor spaces.
-
-
young_gen->eden_space()->clear(SpaceDecorator::Mangle);
young_gen->from_space()->clear(SpaceDecorator::Mangle);
young_gen->swap_spaces();
@@ -552,19 +562,33 @@ bool PSScavenge::invoke_no_policy() {
young_gen->from_space()->capacity_in_bytes() +
young_gen->to_space()->capacity_in_bytes(),
"Sizes of space in young gen are out-of-bounds");
+
+ size_t young_live = young_gen->used_in_bytes();
+ size_t eden_live = young_gen->eden_space()->used_in_bytes();
+ size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
+ size_t max_old_gen_size = old_gen->max_gen_size();
size_t max_eden_size = young_gen->max_size() -
young_gen->from_space()->capacity_in_bytes() -
young_gen->to_space()->capacity_in_bytes();
- size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
- young_gen->eden_space()->used_in_bytes(),
- old_gen->used_in_bytes(),
- young_gen->eden_space()->capacity_in_bytes(),
- old_gen->max_gen_size(),
- max_eden_size,
- false /* full gc*/,
- gc_cause,
- heap->collector_policy());
+ // Used for diagnostics
+ size_policy->clear_generation_free_space_flags();
+
+ size_policy->compute_eden_space_size(young_live,
+ eden_live,
+ cur_eden,
+ max_eden_size,
+ false /* not full gc*/);
+
+ size_policy->check_gc_overhead_limit(young_live,
+ eden_live,
+ max_old_gen_size,
+ max_eden_size,
+ false /* not full gc*/,
+ gc_cause,
+ heap->collector_policy());
+
+ size_policy->decay_supplemental_growth(false /* not full gc*/);
}
// Resize the young generation at every collection
// even if new sizes have not been calculated. This is
@@ -601,7 +625,11 @@ bool PSScavenge::invoke_no_policy() {
NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
- CodeCache::prune_scavenge_root_nmethods();
+ {
+ GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer);
+
+ CodeCache::prune_scavenge_root_nmethods();
+ }
// Re-verify object start arrays
if (VerifyObjectStartArray &&
@@ -641,6 +669,8 @@ bool PSScavenge::invoke_no_policy() {
}
heap->print_heap_after_gc();
+ heap->trace_heap_after_gc(&_gc_tracer);
+ _gc_tracer.report_tenuring_threshold(tenuring_threshold());
if (ZapUnusedHeapArea) {
young_gen->eden_space()->check_mangled_unused_area_complete();
@@ -661,6 +691,11 @@ bool PSScavenge::invoke_no_policy() {
ParallelTaskTerminator::print_termination_counts();
#endif
+
+ _gc_timer.register_gc_end(os::elapsed_counter());
+
+ _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
+
return !promotion_failure_occurred;
}
@@ -670,7 +705,6 @@ bool PSScavenge::invoke_no_policy() {
void PSScavenge::clean_up_failed_promotion() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
- assert(promotion_failed(), "Sanity");
PSYoungGen* young_gen = heap->young_gen();
@@ -695,7 +729,6 @@ void PSScavenge::clean_up_failed_promotion() {
// Clear the preserved mark and oop stack caches.
_preserved_mark_stack.clear(true);
_preserved_oop_stack.clear(true);
- _promotion_failed = false;
}
// Reset the PromotionFailureALot counters.
@@ -706,11 +739,10 @@ void PSScavenge::clean_up_failed_promotion() {
// fails. Some markOops will need preservation, some will not. Note
// that the entire eden is traversed after a failed promotion, with
// all forwarded headers replaced by the default markOop. This means
-// it is not neccessary to preserve most markOops.
+// it is not necessary to preserve most markOops.
void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
- _promotion_failed = true;
if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
- // Should use per-worker private stakcs hetre rather than
+ // Should use per-worker private stacks here rather than
// locking a common pair of stacks.
ThreadCritical tc;
_preserved_oop_stack.push(obj);
@@ -805,7 +837,7 @@ void PSScavenge::initialize() {
// Set boundary between young_gen and old_gen
assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
"old above young");
- _young_generation_boundary = young_gen->eden_space()->bottom();
+ set_young_generation_boundary(young_gen->eden_space()->bottom());
// Initialize ref handling object for scavenging.
MemRegion mr = young_gen->reserved();
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp
index 43e59ac9c..896b705c2 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
#include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
#include "gc_implementation/shared/collectorCounters.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
#include "utilities/stack.hpp"
@@ -37,8 +38,10 @@ class GCTaskQueue;
class OopStack;
class ReferenceProcessor;
class ParallelScavengeHeap;
+class ParallelScavengeTracer;
class PSIsAliveClosure;
class PSRefProcTaskExecutor;
+class STWGCTimer;
class PSScavenge: AllStatic {
friend class PSIsAliveClosure;
@@ -62,19 +65,23 @@ class PSScavenge: AllStatic {
protected:
// Flags/counters
- static ReferenceProcessor* _ref_processor; // Reference processor for scavenging.
- static PSIsAliveClosure _is_alive_closure; // Closure used for reference processing
- static CardTableExtension* _card_table; // We cache the card table for fast access.
- static bool _survivor_overflow; // Overflow this collection
- static uint _tenuring_threshold; // tenuring threshold for next scavenge
- static elapsedTimer _accumulated_time; // total time spent on scavenge
- static HeapWord* _young_generation_boundary; // The lowest address possible for the young_gen.
- // This is used to decide if an oop should be scavenged,
- // cards should be marked, etc.
+ static ReferenceProcessor* _ref_processor; // Reference processor for scavenging.
+ static PSIsAliveClosure _is_alive_closure; // Closure used for reference processing
+ static CardTableExtension* _card_table; // We cache the card table for fast access.
+ static bool _survivor_overflow; // Overflow this collection
+ static uint _tenuring_threshold; // tenuring threshold for next scavenge
+ static elapsedTimer _accumulated_time; // total time spent on scavenge
+ static STWGCTimer _gc_timer; // GC time book keeper
+ static ParallelScavengeTracer _gc_tracer; // GC tracing
+ // The lowest address possible for the young_gen.
+ // This is used to decide if an oop should be scavenged,
+ // cards should be marked, etc.
+ static HeapWord* _young_generation_boundary;
+ // Used to optimize compressed oops young gen boundary checking.
+ static uintptr_t _young_generation_boundary_compressed;
static Stack<markOop, mtGC> _preserved_mark_stack; // List of marks to be restored after failed promotion
static Stack<oop, mtGC> _preserved_oop_stack; // List of oops that need their mark restored.
- static CollectorCounters* _counters; // collector performance counters
- static bool _promotion_failed;
+ static CollectorCounters* _counters; // collector performance counters
static void clean_up_failed_promotion();
@@ -90,7 +97,6 @@ class PSScavenge: AllStatic {
// Accessors
static uint tenuring_threshold() { return _tenuring_threshold; }
static elapsedTimer* accumulated_time() { return &_accumulated_time; }
- static bool promotion_failed() { return _promotion_failed; }
static int consecutive_skipped_scavenges()
{ return _consecutive_skipped_scavenges; }
@@ -112,6 +118,9 @@ class PSScavenge: AllStatic {
// boundary moves, _young_generation_boundary must be reset
static void set_young_generation_boundary(HeapWord* v) {
_young_generation_boundary = v;
+ if (UseCompressedOops) {
+ _young_generation_boundary_compressed = (uintptr_t)oopDesc::encode_heap_oop((oop)v);
+ }
}
// Called by parallelScavengeHeap to init the tenuring threshold
@@ -140,11 +149,19 @@ class PSScavenge: AllStatic {
static void copy_and_push_safe_barrier_from_klass(PSPromotionManager* pm, oop* p);
// Is an object in the young generation
- // This assumes that the HeapWord argument is in the heap,
+ // This assumes that the 'o' is in the heap,
// so it only checks one side of the complete predicate.
+
+ inline static bool is_obj_in_young(oop o) {
+ return (HeapWord*)o >= _young_generation_boundary;
+ }
+
+ inline static bool is_obj_in_young(narrowOop o) {
+ return (uintptr_t)o >= _young_generation_boundary_compressed;
+ }
+
inline static bool is_obj_in_young(HeapWord* o) {
- const bool result = (o >= _young_generation_boundary);
- return result;
+ return o >= _young_generation_boundary;
}
};
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp
index de015e8eb..e67dedf98 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp
@@ -39,9 +39,7 @@ inline void PSScavenge::save_to_space_top_before_gc() {
template <class T> inline bool PSScavenge::should_scavenge(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
- if (oopDesc::is_null(heap_oop)) return false;
- oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
- return PSScavenge::is_obj_in_young((HeapWord*)obj);
+ return PSScavenge::is_obj_in_young(heap_oop);
}
template <class T>
@@ -94,7 +92,7 @@ inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm,
// or from metadata.
if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
Universe::heap()->is_in_reserved(p)) {
- if (PSScavenge::is_obj_in_young((HeapWord*)new_obj)) {
+ if (PSScavenge::is_obj_in_young(new_obj)) {
card_table()->inline_write_ref_field_gc(p, new_obj);
}
}
@@ -147,7 +145,7 @@ class PSScavengeFromKlassClosure: public OopClosure {
}
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
- if (PSScavenge::is_obj_in_young((HeapWord*)new_obj)) {
+ if (PSScavenge::is_obj_in_young(new_obj)) {
do_klass_barrier();
}
}
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
index ccad48f35..00e466a3e 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp
@@ -79,15 +79,16 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
break;
case system_dictionary:
- {
SystemDictionary::oops_do(&roots_closure);
-
- // Move this to another root_type?
- PSScavengeKlassClosure klass_closure(pm);
- ClassLoaderDataGraph::oops_do(&roots_closure, &klass_closure, false);
- }
break;
+ case class_loader_data:
+ {
+ PSScavengeKlassClosure klass_closure(pm);
+ ClassLoaderDataGraph::oops_do(&roots_closure, &klass_closure, false);
+ }
+ break;
+
case management:
Management::oops_do(&roots_closure);
break;
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp b/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp
index 7769fddf7..7ae1f8a0c 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp
@@ -59,9 +59,10 @@ class ScavengeRootsTask : public GCTask {
object_synchronizer = 4,
flat_profiler = 5,
system_dictionary = 6,
- management = 7,
- jvmti = 8,
- code_cache = 9
+ class_loader_data = 7,
+ management = 8,
+ jvmti = 9,
+ code_cache = 10
};
private:
RootType _root_type;
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp b/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp
index e19476707..999952730 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -101,7 +101,8 @@ bool PSVirtualSpace::expand_by(size_t bytes) {
}
char* const base_addr = committed_high_addr();
- bool result = special() || os::commit_memory(base_addr, bytes, alignment());
+ bool result = special() ||
+ os::commit_memory(base_addr, bytes, alignment(), !ExecMem);
if (result) {
_committed_high_addr += bytes;
}
@@ -154,7 +155,7 @@ PSVirtualSpace::expand_into(PSVirtualSpace* other_space, size_t bytes) {
if (tmp_bytes > 0) {
char* const commit_base = committed_high_addr();
if (other_space->special() ||
- os::commit_memory(commit_base, tmp_bytes, alignment())) {
+ os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem)) {
// Reduce the reserved region in the other space.
other_space->set_reserved(other_space->reserved_low_addr() + tmp_bytes,
other_space->reserved_high_addr(),
@@ -269,7 +270,8 @@ bool PSVirtualSpaceHighToLow::expand_by(size_t bytes) {
}
char* const base_addr = committed_low_addr() - bytes;
- bool result = special() || os::commit_memory(base_addr, bytes, alignment());
+ bool result = special() ||
+ os::commit_memory(base_addr, bytes, alignment(), !ExecMem);
if (result) {
_committed_low_addr -= bytes;
}
@@ -322,7 +324,7 @@ size_t PSVirtualSpaceHighToLow::expand_into(PSVirtualSpace* other_space,
if (tmp_bytes > 0) {
char* const commit_base = committed_low_addr() - tmp_bytes;
if (other_space->special() ||
- os::commit_memory(commit_base, tmp_bytes, alignment())) {
+ os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem)) {
// Reduce the reserved region in the other space.
other_space->set_reserved(other_space->reserved_low_addr(),
other_space->reserved_high_addr() - tmp_bytes,
diff --git a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp
index 90093a2cb..3e7c716d4 100644
--- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp
+++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -467,7 +467,7 @@ void AdaptiveSizePolicy::check_gc_overhead_limit(
(free_in_old_gen < (size_t) mem_free_old_limit &&
free_in_eden < (size_t) mem_free_eden_limit))) {
gclog_or_tty->print_cr(
- "PSAdaptiveSizePolicy::compute_generation_free_space limits:"
+ "PSAdaptiveSizePolicy::check_gc_overhead_limit:"
" promo_limit: " SIZE_FORMAT
" max_eden_size: " SIZE_FORMAT
" total_free_limit: " SIZE_FORMAT
diff --git a/src/share/vm/gc_implementation/shared/allocationStats.hpp b/src/share/vm/gc_implementation/shared/allocationStats.hpp
index cf7cd3ae0..0fb6d7fa2 100644
--- a/src/share/vm/gc_implementation/shared/allocationStats.hpp
+++ b/src/share/vm/gc_implementation/shared/allocationStats.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,9 @@
#define SHARE_VM_GC_IMPLEMENTATION_SHARED_ALLOCATIONSTATS_HPP
#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc_implementation/shared/gcUtil.hpp"
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
-#endif // INCLUDE_ALL_GCS
+#include "gc_implementation/shared/gcUtil.hpp"
class AllocationStats VALUE_OBJ_CLASS_SPEC {
// A duration threshold (in ms) used to filter
diff --git a/src/share/vm/gc_implementation/shared/copyFailedInfo.hpp b/src/share/vm/gc_implementation/shared/copyFailedInfo.hpp
new file mode 100644
index 000000000..2f30f5e8f
--- /dev/null
+++ b/src/share/vm/gc_implementation/shared/copyFailedInfo.hpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_COPYFAILEDINFO_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_COPYFAILEDINFO_HPP
+
+#include "runtime/thread.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class CopyFailedInfo : public CHeapObj<mtGC> {
+ size_t _first_size;
+ size_t _smallest_size;
+ size_t _total_size;
+ uint _count;
+
+ public:
+ CopyFailedInfo() : _first_size(0), _smallest_size(0), _total_size(0), _count(0) {}
+
+ virtual void register_copy_failure(size_t size) {
+ if (_first_size == 0) {
+ _first_size = size;
+ _smallest_size = size;
+ } else if (size < _smallest_size) {
+ _smallest_size = size;
+ }
+ _total_size += size;
+ _count++;
+ }
+
+ virtual void reset() {
+ _first_size = 0;
+ _smallest_size = 0;
+ _total_size = 0;
+ _count = 0;
+ }
+
+ bool has_failed() const { return _count != 0; }
+ size_t first_size() const { return _first_size; }
+ size_t smallest_size() const { return _smallest_size; }
+ size_t total_size() const { return _total_size; }
+ uint failed_count() const { return _count; }
+};
+
+class PromotionFailedInfo : public CopyFailedInfo {
+ OSThread* _thread;
+
+ public:
+ PromotionFailedInfo() : CopyFailedInfo(), _thread(NULL) {}
+
+ void register_copy_failure(size_t size) {
+ CopyFailedInfo::register_copy_failure(size);
+ if (_thread == NULL) {
+ _thread = Thread::current()->osthread();
+ } else {
+ assert(_thread == Thread::current()->osthread(), "The PromotionFailedInfo should be thread local.");
+ }
+ }
+
+ void reset() {
+ CopyFailedInfo::reset();
+ _thread = NULL;
+ }
+
+ OSThread* thread() const { return _thread; }
+};
+
+class EvacuationFailedInfo : public CopyFailedInfo {};
+
+#endif /* SHARE_VM_GC_IMPLEMENTATION_SHARED_COPYFAILEDINFO_HPP */
diff --git a/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp b/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp
new file mode 100644
index 000000000..4e79b8f93
--- /dev/null
+++ b/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP
+
+#include "memory/allocation.hpp"
+
+class VirtualSpaceSummary : public StackObj {
+ HeapWord* _start;
+ HeapWord* _committed_end;
+ HeapWord* _reserved_end;
+public:
+ VirtualSpaceSummary() :
+ _start(NULL), _committed_end(NULL), _reserved_end(NULL) { }
+ VirtualSpaceSummary(HeapWord* start, HeapWord* committed_end, HeapWord* reserved_end) :
+ _start(start), _committed_end(committed_end), _reserved_end(reserved_end) { }
+
+ HeapWord* start() const { return _start; }
+ HeapWord* committed_end() const { return _committed_end; }
+ HeapWord* reserved_end() const { return _reserved_end; }
+ size_t committed_size() const { return (uintptr_t)_committed_end - (uintptr_t)_start; }
+ size_t reserved_size() const { return (uintptr_t)_reserved_end - (uintptr_t)_start; }
+};
+
+class SpaceSummary : public StackObj {
+ HeapWord* _start;
+ HeapWord* _end;
+ size_t _used;
+public:
+ SpaceSummary() :
+ _start(NULL), _end(NULL), _used(0) { }
+ SpaceSummary(HeapWord* start, HeapWord* end, size_t used) :
+ _start(start), _end(end), _used(used) { }
+
+ HeapWord* start() const { return _start; }
+ HeapWord* end() const { return _end; }
+ size_t used() const { return _used; }
+ size_t size() const { return (uintptr_t)_end - (uintptr_t)_start; }
+};
+
+class MetaspaceSizes : public StackObj {
+ size_t _capacity;
+ size_t _used;
+ size_t _reserved;
+
+ public:
+ MetaspaceSizes() : _capacity(0), _used(0), _reserved(0) {}
+ MetaspaceSizes(size_t capacity, size_t used, size_t reserved) :
+ _capacity(capacity), _used(used), _reserved(reserved) {}
+
+ size_t capacity() const { return _capacity; }
+ size_t used() const { return _used; }
+ size_t reserved() const { return _reserved; }
+};
+
+class GCHeapSummary;
+class PSHeapSummary;
+
+class GCHeapSummaryVisitor {
+ public:
+ virtual void visit(const GCHeapSummary* heap_summary) const = 0;
+ virtual void visit(const PSHeapSummary* heap_summary) const {}
+};
+
+class GCHeapSummary : public StackObj {
+ VirtualSpaceSummary _heap;
+ size_t _used;
+
+ public:
+ GCHeapSummary() :
+ _heap(), _used(0) { }
+ GCHeapSummary(VirtualSpaceSummary& heap_space, size_t used) :
+ _heap(heap_space), _used(used) { }
+
+ const VirtualSpaceSummary& heap() const { return _heap; }
+ size_t used() const { return _used; }
+
+ virtual void accept(GCHeapSummaryVisitor* visitor) const {
+ visitor->visit(this);
+ }
+};
+
+class PSHeapSummary : public GCHeapSummary {
+ VirtualSpaceSummary _old;
+ SpaceSummary _old_space;
+ VirtualSpaceSummary _young;
+ SpaceSummary _eden;
+ SpaceSummary _from;
+ SpaceSummary _to;
+ public:
+ PSHeapSummary(VirtualSpaceSummary& heap_space, size_t heap_used, VirtualSpaceSummary old, SpaceSummary old_space, VirtualSpaceSummary young, SpaceSummary eden, SpaceSummary from, SpaceSummary to) :
+ GCHeapSummary(heap_space, heap_used), _old(old), _old_space(old_space), _young(young), _eden(eden), _from(from), _to(to) { }
+ const VirtualSpaceSummary& old() const { return _old; }
+ const SpaceSummary& old_space() const { return _old_space; }
+ const VirtualSpaceSummary& young() const { return _young; }
+ const SpaceSummary& eden() const { return _eden; }
+ const SpaceSummary& from() const { return _from; }
+ const SpaceSummary& to() const { return _to; }
+
+ virtual void accept(GCHeapSummaryVisitor* visitor) const {
+ visitor->visit(this);
+ }
+};
+
+class MetaspaceSummary : public StackObj {
+ MetaspaceSizes _meta_space;
+ MetaspaceSizes _data_space;
+ MetaspaceSizes _class_space;
+
+ public:
+ MetaspaceSummary() : _meta_space(), _data_space(), _class_space() {}
+ MetaspaceSummary(const MetaspaceSizes& meta_space, const MetaspaceSizes& data_space, const MetaspaceSizes& class_space) :
+ _meta_space(meta_space), _data_space(data_space), _class_space(class_space) { }
+
+ const MetaspaceSizes& meta_space() const { return _meta_space; }
+ const MetaspaceSizes& data_space() const { return _data_space; }
+ const MetaspaceSizes& class_space() const { return _class_space; }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP
diff --git a/src/share/vm/gc_implementation/shared/gcTimer.cpp b/src/share/vm/gc_implementation/shared/gcTimer.cpp
new file mode 100644
index 000000000..6d011700c
--- /dev/null
+++ b/src/share/vm/gc_implementation/shared/gcTimer.cpp
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "utilities/growableArray.hpp"
+
+void GCTimer::register_gc_start(jlong time) {
+ _time_partitions.clear();
+ _gc_start = time;
+}
+
+void GCTimer::register_gc_end(jlong time) {
+ assert(!_time_partitions.has_active_phases(),
+ "We should have ended all started phases, before ending the GC");
+
+ _gc_end = time;
+}
+
+void GCTimer::register_gc_pause_start(const char* name, jlong time) {
+ _time_partitions.report_gc_phase_start(name, time);
+}
+
+void GCTimer::register_gc_pause_end(jlong time) {
+ _time_partitions.report_gc_phase_end(time);
+}
+
+void GCTimer::register_gc_phase_start(const char* name, jlong time) {
+ _time_partitions.report_gc_phase_start(name, time);
+}
+
+void GCTimer::register_gc_phase_end(jlong time) {
+ _time_partitions.report_gc_phase_end(time);
+}
+
+
+void STWGCTimer::register_gc_start(jlong time) {
+ GCTimer::register_gc_start(time);
+ register_gc_pause_start("GC Pause", time);
+}
+
+void STWGCTimer::register_gc_end(jlong time) {
+ register_gc_pause_end(time);
+ GCTimer::register_gc_end(time);
+}
+
+void ConcurrentGCTimer::register_gc_pause_start(const char* name, jlong time) {
+ GCTimer::register_gc_pause_start(name, time);
+}
+
+void ConcurrentGCTimer::register_gc_pause_end(jlong time) {
+ GCTimer::register_gc_pause_end(time);
+}
+
+void PhasesStack::clear() {
+ _next_phase_level = 0;
+}
+
+void PhasesStack::push(int phase_index) {
+ assert(_next_phase_level < PHASE_LEVELS, "Overflow");
+
+ _phase_indices[_next_phase_level] = phase_index;
+
+ _next_phase_level++;
+}
+
+int PhasesStack::pop() {
+ assert(_next_phase_level > 0, "Underflow");
+
+ _next_phase_level--;
+
+ return _phase_indices[_next_phase_level];
+}
+
+int PhasesStack::count() const {
+ return _next_phase_level;
+}
+
+
+TimePartitions::TimePartitions() {
+ _phases = new (ResourceObj::C_HEAP, mtGC) GrowableArray<PausePhase>(INITIAL_CAPACITY, true, mtGC);
+ clear();
+}
+
+TimePartitions::~TimePartitions() {
+ delete _phases;
+ _phases = NULL;
+}
+
+void TimePartitions::clear() {
+ _phases->clear();
+ _active_phases.clear();
+ _sum_of_pauses = 0;
+ _longest_pause = 0;
+}
+
+void TimePartitions::report_gc_phase_start(const char* name, jlong time) {
+ assert(_phases->length() <= 1000, "Too many recored phases?");
+
+ int level = _active_phases.count();
+
+ PausePhase phase;
+ phase.set_level(level);
+ phase.set_name(name);
+ phase.set_start(time);
+
+ int index = _phases->append(phase);
+
+ _active_phases.push(index);
+}
+
+void TimePartitions::update_statistics(GCPhase* phase) {
+ // FIXME: This should only be done for pause phases
+ if (phase->level() == 0) {
+ jlong pause = phase->end() - phase->start();
+ _sum_of_pauses += pause;
+ _longest_pause = MAX2(pause, _longest_pause);
+ }
+}
+
+void TimePartitions::report_gc_phase_end(jlong time) {
+ int phase_index = _active_phases.pop();
+ GCPhase* phase = _phases->adr_at(phase_index);
+ phase->set_end(time);
+ update_statistics(phase);
+}
+
+int TimePartitions::num_phases() const {
+ return _phases->length();
+}
+
+GCPhase* TimePartitions::phase_at(int index) const {
+ assert(index >= 0, "Out of bounds");
+ assert(index < _phases->length(), "Out of bounds");
+
+ return _phases->adr_at(index);
+}
+
+jlong TimePartitions::sum_of_pauses() {
+ return _sum_of_pauses;
+}
+
+jlong TimePartitions::longest_pause() {
+ return _longest_pause;
+}
+
+bool TimePartitions::has_active_phases() {
+ return _active_phases.count() > 0;
+}
+
+bool TimePartitionPhasesIterator::has_next() {
+ return _next < _time_partitions->num_phases();
+}
+
+GCPhase* TimePartitionPhasesIterator::next() {
+ assert(has_next(), "Must have phases left");
+ return _time_partitions->phase_at(_next++);
+}
+
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class TimePartitionPhasesIteratorTest {
+ public:
+ static void all() {
+ one_pause();
+ two_pauses();
+ one_sub_pause_phase();
+ many_sub_pause_phases();
+ many_sub_pause_phases2();
+ max_nested_pause_phases();
+ }
+
+ static void validate_pause_phase(GCPhase* phase, int level, const char* name, jlong start, jlong end) {
+ assert(phase->level() == level, "Incorrect level");
+ assert(strcmp(phase->name(), name) == 0, "Incorrect name");
+ assert(phase->start() == start, "Incorrect start");
+ assert(phase->end() == end, "Incorrect end");
+ }
+
+ static void one_pause() {
+ TimePartitions time_partitions;
+ time_partitions.report_gc_phase_start("PausePhase", 2);
+ time_partitions.report_gc_phase_end(8);
+
+ TimePartitionPhasesIterator iter(&time_partitions);
+
+ validate_pause_phase(iter.next(), 0, "PausePhase", 2, 8);
+ assert(time_partitions.sum_of_pauses() == 8-2, "Incorrect");
+ assert(time_partitions.longest_pause() == 8-2, "Incorrect");
+
+ assert(!iter.has_next(), "Too many elements");
+ }
+
+ static void two_pauses() {
+ TimePartitions time_partitions;
+ time_partitions.report_gc_phase_start("PausePhase1", 2);
+ time_partitions.report_gc_phase_end(3);
+ time_partitions.report_gc_phase_start("PausePhase2", 4);
+ time_partitions.report_gc_phase_end(6);
+
+ TimePartitionPhasesIterator iter(&time_partitions);
+
+ validate_pause_phase(iter.next(), 0, "PausePhase1", 2, 3);
+ validate_pause_phase(iter.next(), 0, "PausePhase2", 4, 6);
+
+ assert(time_partitions.sum_of_pauses() == 3, "Incorrect");
+ assert(time_partitions.longest_pause() == 2, "Incorrect");
+
+ assert(!iter.has_next(), "Too many elements");
+ }
+
+ static void one_sub_pause_phase() {
+ TimePartitions time_partitions;
+ time_partitions.report_gc_phase_start("PausePhase", 2);
+ time_partitions.report_gc_phase_start("SubPhase", 3);
+ time_partitions.report_gc_phase_end(4);
+ time_partitions.report_gc_phase_end(5);
+
+ TimePartitionPhasesIterator iter(&time_partitions);
+
+ validate_pause_phase(iter.next(), 0, "PausePhase", 2, 5);
+ validate_pause_phase(iter.next(), 1, "SubPhase", 3, 4);
+
+ assert(time_partitions.sum_of_pauses() == 3, "Incorrect");
+ assert(time_partitions.longest_pause() == 3, "Incorrect");
+
+ assert(!iter.has_next(), "Too many elements");
+ }
+
+ static void max_nested_pause_phases() {
+ TimePartitions time_partitions;
+ time_partitions.report_gc_phase_start("PausePhase", 2);
+ time_partitions.report_gc_phase_start("SubPhase1", 3);
+ time_partitions.report_gc_phase_start("SubPhase2", 4);
+ time_partitions.report_gc_phase_start("SubPhase3", 5);
+ time_partitions.report_gc_phase_end(6);
+ time_partitions.report_gc_phase_end(7);
+ time_partitions.report_gc_phase_end(8);
+ time_partitions.report_gc_phase_end(9);
+
+ TimePartitionPhasesIterator iter(&time_partitions);
+
+ validate_pause_phase(iter.next(), 0, "PausePhase", 2, 9);
+ validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 8);
+ validate_pause_phase(iter.next(), 2, "SubPhase2", 4, 7);
+ validate_pause_phase(iter.next(), 3, "SubPhase3", 5, 6);
+
+ assert(time_partitions.sum_of_pauses() == 7, "Incorrect");
+ assert(time_partitions.longest_pause() == 7, "Incorrect");
+
+ assert(!iter.has_next(), "Too many elements");
+ }
+
+ static void many_sub_pause_phases() {
+ TimePartitions time_partitions;
+ time_partitions.report_gc_phase_start("PausePhase", 2);
+
+ time_partitions.report_gc_phase_start("SubPhase1", 3);
+ time_partitions.report_gc_phase_end(4);
+ time_partitions.report_gc_phase_start("SubPhase2", 5);
+ time_partitions.report_gc_phase_end(6);
+ time_partitions.report_gc_phase_start("SubPhase3", 7);
+ time_partitions.report_gc_phase_end(8);
+ time_partitions.report_gc_phase_start("SubPhase4", 9);
+ time_partitions.report_gc_phase_end(10);
+
+ time_partitions.report_gc_phase_end(11);
+
+ TimePartitionPhasesIterator iter(&time_partitions);
+
+ validate_pause_phase(iter.next(), 0, "PausePhase", 2, 11);
+ validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 4);
+ validate_pause_phase(iter.next(), 1, "SubPhase2", 5, 6);
+ validate_pause_phase(iter.next(), 1, "SubPhase3", 7, 8);
+ validate_pause_phase(iter.next(), 1, "SubPhase4", 9, 10);
+
+ assert(time_partitions.sum_of_pauses() == 9, "Incorrect");
+ assert(time_partitions.longest_pause() == 9, "Incorrect");
+
+ assert(!iter.has_next(), "Too many elements");
+ }
+
+ static void many_sub_pause_phases2() {
+ TimePartitions time_partitions;
+ time_partitions.report_gc_phase_start("PausePhase", 2);
+
+ time_partitions.report_gc_phase_start("SubPhase1", 3);
+ time_partitions.report_gc_phase_start("SubPhase11", 4);
+ time_partitions.report_gc_phase_end(5);
+ time_partitions.report_gc_phase_start("SubPhase12", 6);
+ time_partitions.report_gc_phase_end(7);
+ time_partitions.report_gc_phase_end(8);
+ time_partitions.report_gc_phase_start("SubPhase2", 9);
+ time_partitions.report_gc_phase_start("SubPhase21", 10);
+ time_partitions.report_gc_phase_end(11);
+ time_partitions.report_gc_phase_start("SubPhase22", 12);
+ time_partitions.report_gc_phase_end(13);
+ time_partitions.report_gc_phase_end(14);
+ time_partitions.report_gc_phase_start("SubPhase3", 15);
+ time_partitions.report_gc_phase_end(16);
+
+ time_partitions.report_gc_phase_end(17);
+
+ TimePartitionPhasesIterator iter(&time_partitions);
+
+ validate_pause_phase(iter.next(), 0, "PausePhase", 2, 17);
+ validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 8);
+ validate_pause_phase(iter.next(), 2, "SubPhase11", 4, 5);
+ validate_pause_phase(iter.next(), 2, "SubPhase12", 6, 7);
+ validate_pause_phase(iter.next(), 1, "SubPhase2", 9, 14);
+ validate_pause_phase(iter.next(), 2, "SubPhase21", 10, 11);
+ validate_pause_phase(iter.next(), 2, "SubPhase22", 12, 13);
+ validate_pause_phase(iter.next(), 1, "SubPhase3", 15, 16);
+
+ assert(time_partitions.sum_of_pauses() == 15, "Incorrect");
+ assert(time_partitions.longest_pause() == 15, "Incorrect");
+
+ assert(!iter.has_next(), "Too many elements");
+ }
+};
+
+class GCTimerTest {
+public:
+ static void all() {
+ gc_start();
+ gc_end();
+ }
+
+ static void gc_start() {
+ GCTimer gc_timer;
+ gc_timer.register_gc_start(1);
+
+ assert(gc_timer.gc_start() == 1, "Incorrect");
+ }
+
+ static void gc_end() {
+ GCTimer gc_timer;
+ gc_timer.register_gc_start(1);
+ gc_timer.register_gc_end(2);
+
+ assert(gc_timer.gc_end() == 2, "Incorrect");
+ }
+};
+
+void GCTimerAllTest::all() {
+ GCTimerTest::all();
+ TimePartitionPhasesIteratorTest::all();
+}
+
+#endif
diff --git a/src/share/vm/gc_implementation/shared/gcTimer.hpp b/src/share/vm/gc_implementation/shared/gcTimer.hpp
new file mode 100644
index 000000000..b29e7c544
--- /dev/null
+++ b/src/share/vm/gc_implementation/shared/gcTimer.hpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTIMER_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTIMER_HPP
+
+#include "memory/allocation.hpp"
+#include "prims/jni_md.h"
+#include "utilities/macros.hpp"
+
+class ConcurrentPhase;
+class GCPhase;
+class PausePhase;
+
+template <class E> class GrowableArray;
+
+class PhaseVisitor {
+ public:
+ virtual void visit(GCPhase* phase) = 0;
+ virtual void visit(PausePhase* phase) { visit((GCPhase*)phase); }
+ virtual void visit(ConcurrentPhase* phase) { visit((GCPhase*)phase); }
+};
+
+class GCPhase {
+ const char* _name;
+ int _level;
+ jlong _start;
+ jlong _end;
+
+ public:
+ void set_name(const char* name) { _name = name; }
+ const char* name() { return _name; }
+
+ int level() { return _level; }
+ void set_level(int level) { _level = level; }
+
+ jlong start() { return _start; }
+ void set_start(jlong time) { _start = time; }
+
+ jlong end() { return _end; }
+ void set_end(jlong time) { _end = time; }
+
+ virtual void accept(PhaseVisitor* visitor) = 0;
+};
+
+class PausePhase : public GCPhase {
+ public:
+ void accept(PhaseVisitor* visitor) {
+ visitor->visit(this);
+ }
+};
+
+class ConcurrentPhase : public GCPhase {
+ void accept(PhaseVisitor* visitor) {
+ visitor->visit(this);
+ }
+};
+
+class PhasesStack {
+ public:
+ // FIXME: Temporary set to 5 (used to be 4), since Reference processing needs it.
+ static const int PHASE_LEVELS = 5;
+
+ private:
+ int _phase_indices[PHASE_LEVELS];
+ int _next_phase_level;
+
+ public:
+ PhasesStack() { clear(); }
+ void clear();
+
+ void push(int phase_index);
+ int pop();
+ int count() const;
+};
+
+class TimePartitions {
+ static const int INITIAL_CAPACITY = 10;
+
+ // Currently we only support pause phases.
+ GrowableArray<PausePhase>* _phases;
+ PhasesStack _active_phases;
+
+ jlong _sum_of_pauses;
+ jlong _longest_pause;
+
+ public:
+ TimePartitions();
+ ~TimePartitions();
+ void clear();
+
+ void report_gc_phase_start(const char* name, jlong time);
+ void report_gc_phase_end(jlong time);
+
+ int num_phases() const;
+ GCPhase* phase_at(int index) const;
+
+ jlong sum_of_pauses();
+ jlong longest_pause();
+
+ bool has_active_phases();
+ private:
+ void update_statistics(GCPhase* phase);
+};
+
+class PhasesIterator {
+ public:
+ virtual bool has_next() = 0;
+ virtual GCPhase* next() = 0;
+};
+
+class GCTimer : public ResourceObj {
+ NOT_PRODUCT(friend class GCTimerTest;)
+ protected:
+ jlong _gc_start;
+ jlong _gc_end;
+ TimePartitions _time_partitions;
+
+ public:
+ virtual void register_gc_start(jlong time);
+ virtual void register_gc_end(jlong time);
+
+ void register_gc_phase_start(const char* name, jlong time);
+ void register_gc_phase_end(jlong time);
+
+ jlong gc_start() { return _gc_start; }
+ jlong gc_end() { return _gc_end; }
+
+ TimePartitions* time_partitions() { return &_time_partitions; }
+
+ long longest_pause();
+ long sum_of_pauses();
+
+ protected:
+ void register_gc_pause_start(const char* name, jlong time);
+ void register_gc_pause_end(jlong time);
+};
+
+class STWGCTimer : public GCTimer {
+ public:
+ virtual void register_gc_start(jlong time);
+ virtual void register_gc_end(jlong time);
+};
+
+class ConcurrentGCTimer : public GCTimer {
+ public:
+ void register_gc_pause_start(const char* name, jlong time);
+ void register_gc_pause_end(jlong time);
+};
+
+class TimePartitionPhasesIterator {
+ TimePartitions* _time_partitions;
+ int _next;
+
+ public:
+ TimePartitionPhasesIterator(TimePartitions* time_partitions) : _time_partitions(time_partitions), _next(0) { }
+
+ virtual bool has_next();
+ virtual GCPhase* next();
+};
+
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class GCTimerAllTest {
+ public:
+ static void all();
+};
+
+#endif
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTIMER_HPP
diff --git a/src/share/vm/gc_implementation/shared/gcTrace.cpp b/src/share/vm/gc_implementation/shared/gcTrace.cpp
new file mode 100644
index 000000000..555e2ed9c
--- /dev/null
+++ b/src/share/vm/gc_implementation/shared/gcTrace.cpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/objectCountEventSender.hpp"
+#include "memory/heapInspection.hpp"
+#include "memory/referenceProcessorStats.hpp"
+#include "runtime/os.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/evacuationInfo.hpp"
+#endif
+
+#define assert_unset_gc_id() assert(_shared_gc_info.id() == SharedGCInfo::UNSET_GCID, "GC already started?")
+#define assert_set_gc_id() assert(_shared_gc_info.id() != SharedGCInfo::UNSET_GCID, "GC not started?")
+
+static GCId GCTracer_next_gc_id = 0;
+static GCId create_new_gc_id() {
+ return GCTracer_next_gc_id++;
+}
+
+void GCTracer::report_gc_start_impl(GCCause::Cause cause, jlong timestamp) {
+ assert_unset_gc_id();
+
+ GCId gc_id = create_new_gc_id();
+ _shared_gc_info.set_id(gc_id);
+ _shared_gc_info.set_cause(cause);
+ _shared_gc_info.set_start_timestamp(timestamp);
+}
+
+void GCTracer::report_gc_start(GCCause::Cause cause, jlong timestamp) {
+ assert_unset_gc_id();
+
+ report_gc_start_impl(cause, timestamp);
+}
+
+bool GCTracer::has_reported_gc_start() const {
+ return _shared_gc_info.id() != SharedGCInfo::UNSET_GCID;
+}
+
+void GCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
+ assert_set_gc_id();
+
+ _shared_gc_info.set_sum_of_pauses(time_partitions->sum_of_pauses());
+ _shared_gc_info.set_longest_pause(time_partitions->longest_pause());
+ _shared_gc_info.set_end_timestamp(timestamp);
+
+ send_phase_events(time_partitions);
+ send_garbage_collection_event();
+}
+
+void GCTracer::report_gc_end(jlong timestamp, TimePartitions* time_partitions) {
+ assert_set_gc_id();
+
+ report_gc_end_impl(timestamp, time_partitions);
+
+ _shared_gc_info.set_id(SharedGCInfo::UNSET_GCID);
+}
+
+void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const {
+ assert_set_gc_id();
+
+ send_reference_stats_event(REF_SOFT, rps.soft_count());
+ send_reference_stats_event(REF_WEAK, rps.weak_count());
+ send_reference_stats_event(REF_FINAL, rps.final_count());
+ send_reference_stats_event(REF_PHANTOM, rps.phantom_count());
+}
+
+#if INCLUDE_SERVICES
+class ObjectCountEventSenderClosure : public KlassInfoClosure {
+ const GCId _gc_id;
+ const double _size_threshold_percentage;
+ const size_t _total_size_in_words;
+ const jlong _timestamp;
+
+ public:
+ ObjectCountEventSenderClosure(GCId gc_id, size_t total_size_in_words, jlong timestamp) :
+ _gc_id(gc_id),
+ _size_threshold_percentage(ObjectCountCutOffPercent / 100),
+ _total_size_in_words(total_size_in_words),
+ _timestamp(timestamp)
+ {}
+
+ virtual void do_cinfo(KlassInfoEntry* entry) {
+ if (should_send_event(entry)) {
+ ObjectCountEventSender::send(entry, _gc_id, _timestamp);
+ }
+ }
+
+ private:
+ bool should_send_event(const KlassInfoEntry* entry) const {
+ double percentage_of_heap = ((double) entry->words()) / _total_size_in_words;
+ return percentage_of_heap >= _size_threshold_percentage;
+ }
+};
+
+void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {
+ assert_set_gc_id();
+ assert(is_alive_cl != NULL, "Must supply function to check liveness");
+
+ if (ObjectCountEventSender::should_send_event()) {
+ ResourceMark rm;
+
+ KlassInfoTable cit(false);
+ if (!cit.allocation_failed()) {
+ HeapInspection hi(false, false, false, NULL);
+ hi.populate_table(&cit, is_alive_cl);
+
+ jlong timestamp = os::elapsed_counter();
+ ObjectCountEventSenderClosure event_sender(_shared_gc_info.id(), cit.size_of_instances_in_words(), timestamp);
+ cit.iterate(&event_sender);
+ }
+ }
+}
+#endif // INCLUDE_SERVICES
+
+void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const MetaspaceSummary& meta_space_summary) const {
+ assert_set_gc_id();
+
+ send_gc_heap_summary_event(when, heap_summary);
+ send_meta_space_summary_event(when, meta_space_summary);
+}
+
+void YoungGCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
+ assert_set_gc_id();
+ assert(_tenuring_threshold != UNSET_TENURING_THRESHOLD, "Tenuring threshold has not been reported");
+
+ GCTracer::report_gc_end_impl(timestamp, time_partitions);
+ send_young_gc_event();
+
+ _tenuring_threshold = UNSET_TENURING_THRESHOLD;
+}
+
+void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) {
+ assert_set_gc_id();
+
+ send_promotion_failed_event(pf_info);
+}
+
+void YoungGCTracer::report_tenuring_threshold(const uint tenuring_threshold) {
+ _tenuring_threshold = tenuring_threshold;
+}
+
+void OldGCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
+ assert_set_gc_id();
+
+ GCTracer::report_gc_end_impl(timestamp, time_partitions);
+ send_old_gc_event();
+}
+
+void ParallelOldTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
+ assert_set_gc_id();
+
+ OldGCTracer::report_gc_end_impl(timestamp, time_partitions);
+ send_parallel_old_event();
+}
+
+void ParallelOldTracer::report_dense_prefix(void* dense_prefix) {
+ assert_set_gc_id();
+
+ _parallel_old_gc_info.report_dense_prefix(dense_prefix);
+}
+
+void OldGCTracer::report_concurrent_mode_failure() {
+ assert_set_gc_id();
+
+ send_concurrent_mode_failure_event();
+}
+
+#if INCLUDE_ALL_GCS
+void G1NewTracer::report_yc_type(G1YCType type) {
+ assert_set_gc_id();
+
+ _g1_young_gc_info.set_type(type);
+}
+
+void G1NewTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
+ assert_set_gc_id();
+
+ YoungGCTracer::report_gc_end_impl(timestamp, time_partitions);
+ send_g1_young_gc_event();
+}
+
+void G1NewTracer::report_evacuation_info(EvacuationInfo* info) {
+ assert_set_gc_id();
+
+ send_evacuation_info_event(info);
+}
+
+void G1NewTracer::report_evacuation_failed(EvacuationFailedInfo& ef_info) {
+ assert_set_gc_id();
+
+ send_evacuation_failed_event(ef_info);
+ ef_info.reset();
+}
+#endif
diff --git a/src/share/vm/gc_implementation/shared/gcTrace.hpp b/src/share/vm/gc_implementation/shared/gcTrace.hpp
new file mode 100644
index 000000000..c157d86e7
--- /dev/null
+++ b/src/share/vm/gc_implementation/shared/gcTrace.hpp
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACE_HPP
+
+#include "gc_interface/gcCause.hpp"
+#include "gc_interface/gcName.hpp"
+#include "gc_implementation/shared/gcWhen.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
+#include "memory/allocation.hpp"
+#include "memory/referenceType.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/g1YCTypes.hpp"
+#endif
+#include "utilities/macros.hpp"
+
+typedef uint GCId;
+
+class EvacuationInfo;
+class GCHeapSummary;
+class MetaspaceSummary;
+class PSHeapSummary;
+class ReferenceProcessorStats;
+class TimePartitions;
+class BoolObjectClosure;
+
+class SharedGCInfo VALUE_OBJ_CLASS_SPEC {
+ static const jlong UNSET_TIMESTAMP = -1;
+
+ public:
+ static const GCId UNSET_GCID = (GCId)-1;
+
+ private:
+ GCId _id;
+ GCName _name;
+ GCCause::Cause _cause;
+ jlong _start_timestamp;
+ jlong _end_timestamp;
+ jlong _sum_of_pauses;
+ jlong _longest_pause;
+
+ public:
+ SharedGCInfo(GCName name) : _id(UNSET_GCID), _name(name), _cause(GCCause::_last_gc_cause),
+ _start_timestamp(UNSET_TIMESTAMP), _end_timestamp(UNSET_TIMESTAMP), _sum_of_pauses(0), _longest_pause(0) {}
+
+ void set_id(GCId id) { _id = id; }
+ GCId id() const { return _id; }
+
+ void set_start_timestamp(jlong timestamp) { _start_timestamp = timestamp; }
+ jlong start_timestamp() const { return _start_timestamp; }
+
+ void set_end_timestamp(jlong timestamp) { _end_timestamp = timestamp; }
+ jlong end_timestamp() const { return _end_timestamp; }
+
+ void set_name(GCName name) { _name = name; }
+ GCName name() const { return _name; }
+
+ void set_cause(GCCause::Cause cause) { _cause = cause; }
+ GCCause::Cause cause() const { return _cause; }
+
+ void set_sum_of_pauses(jlong duration) { _sum_of_pauses = duration; }
+ jlong sum_of_pauses() const { return _sum_of_pauses; }
+
+ void set_longest_pause(jlong duration) { _longest_pause = duration; }
+ jlong longest_pause() const { return _longest_pause; }
+};
+
+class ParallelOldGCInfo VALUE_OBJ_CLASS_SPEC {
+ void* _dense_prefix;
+ public:
+ ParallelOldGCInfo() : _dense_prefix(NULL) {}
+ void report_dense_prefix(void* addr) {
+ _dense_prefix = addr;
+ }
+ void* dense_prefix() const { return _dense_prefix; }
+};
+
+#if INCLUDE_ALL_GCS
+
+class G1YoungGCInfo VALUE_OBJ_CLASS_SPEC {
+ G1YCType _type;
+ public:
+ G1YoungGCInfo() : _type(G1YCTypeEndSentinel) {}
+ void set_type(G1YCType type) {
+ _type = type;
+ }
+ G1YCType type() const { return _type; }
+};
+
+#endif // INCLUDE_ALL_GCS
+
+class GCTracer : public ResourceObj {
+ protected:
+ SharedGCInfo _shared_gc_info;
+
+ public:
+ void report_gc_start(GCCause::Cause cause, jlong timestamp);
+ void report_gc_end(jlong timestamp, TimePartitions* time_partitions);
+ void report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const MetaspaceSummary& meta_space_summary) const;
+ void report_gc_reference_stats(const ReferenceProcessorStats& rp) const;
+ void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN;
+ bool has_reported_gc_start() const;
+
+ protected:
+ GCTracer(GCName name) : _shared_gc_info(name) {}
+ virtual void report_gc_start_impl(GCCause::Cause cause, jlong timestamp);
+ virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
+
+ private:
+ void send_garbage_collection_event() const;
+ void send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const;
+ void send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const;
+ void send_reference_stats_event(ReferenceType type, size_t count) const;
+ void send_phase_events(TimePartitions* time_partitions) const;
+};
+
+class YoungGCTracer : public GCTracer {
+ static const uint UNSET_TENURING_THRESHOLD = (uint) -1;
+
+ uint _tenuring_threshold;
+
+ protected:
+ YoungGCTracer(GCName name) : GCTracer(name), _tenuring_threshold(UNSET_TENURING_THRESHOLD) {}
+ virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
+
+ public:
+ void report_promotion_failed(const PromotionFailedInfo& pf_info);
+ void report_tenuring_threshold(const uint tenuring_threshold);
+
+ private:
+ void send_young_gc_event() const;
+ void send_promotion_failed_event(const PromotionFailedInfo& pf_info) const;
+};
+
+class OldGCTracer : public GCTracer {
+ protected:
+ OldGCTracer(GCName name) : GCTracer(name) {}
+ virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
+
+ public:
+ void report_concurrent_mode_failure();
+
+ private:
+ void send_old_gc_event() const;
+ void send_concurrent_mode_failure_event();
+};
+
+class ParallelOldTracer : public OldGCTracer {
+ ParallelOldGCInfo _parallel_old_gc_info;
+
+ public:
+ ParallelOldTracer() : OldGCTracer(ParallelOld) {}
+ void report_dense_prefix(void* dense_prefix);
+
+ protected:
+ void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
+
+ private:
+ void send_parallel_old_event() const;
+};
+
+class SerialOldTracer : public OldGCTracer {
+ public:
+ SerialOldTracer() : OldGCTracer(SerialOld) {}
+};
+
+class ParallelScavengeTracer : public YoungGCTracer {
+ public:
+ ParallelScavengeTracer() : YoungGCTracer(ParallelScavenge) {}
+};
+
+class DefNewTracer : public YoungGCTracer {
+ public:
+ DefNewTracer() : YoungGCTracer(DefNew) {}
+};
+
+class ParNewTracer : public YoungGCTracer {
+ public:
+ ParNewTracer() : YoungGCTracer(ParNew) {}
+};
+
+#if INCLUDE_ALL_GCS
+class G1NewTracer : public YoungGCTracer {
+ G1YoungGCInfo _g1_young_gc_info;
+
+ public:
+ G1NewTracer() : YoungGCTracer(G1New) {}
+
+ void report_yc_type(G1YCType type);
+ void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
+ void report_evacuation_info(EvacuationInfo* info);
+ void report_evacuation_failed(EvacuationFailedInfo& ef_info);
+
+ private:
+ void send_g1_young_gc_event();
+ void send_evacuation_info_event(EvacuationInfo* info);
+ void send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const;
+};
+#endif
+
+class CMSTracer : public OldGCTracer {
+ public:
+ CMSTracer() : OldGCTracer(ConcurrentMarkSweep) {}
+};
+
+class G1OldTracer : public OldGCTracer {
+ public:
+ G1OldTracer() : OldGCTracer(G1Old) {}
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACE_HPP
diff --git a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp
new file mode 100644
index 000000000..f08b7d1c5
--- /dev/null
+++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcWhen.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
+#include "runtime/os.hpp"
+#include "trace/tracing.hpp"
+#include "trace/traceBackend.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/evacuationInfo.hpp"
+#include "gc_implementation/g1/g1YCTypes.hpp"
+#endif
+
+// All GC dependencies against the trace framework is contained within this file.
+
+typedef uintptr_t TraceAddress;
+
+void GCTracer::send_garbage_collection_event() const {
+ EventGCGarbageCollection event(UNTIMED);
+ if (event.should_commit()) {
+ event.set_gcId(_shared_gc_info.id());
+ event.set_name(_shared_gc_info.name());
+ event.set_cause((u2) _shared_gc_info.cause());
+ event.set_sumOfPauses(_shared_gc_info.sum_of_pauses());
+ event.set_longestPause(_shared_gc_info.longest_pause());
+ event.set_starttime(_shared_gc_info.start_timestamp());
+ event.set_endtime(_shared_gc_info.end_timestamp());
+ event.commit();
+ }
+}
+
+void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
+ EventGCReferenceStatistics e(UNTIMED);
+ if (e.should_commit()) {
+ e.set_gcId(_shared_gc_info.id());
+ e.set_type((u1)type);
+ e.set_count(count);
+ e.set_endtime(os::elapsed_counter());
+ e.commit();
+ }
+}
+
+void ParallelOldTracer::send_parallel_old_event() const {
+ EventGCParallelOld e(UNTIMED);
+ if (e.should_commit()) {
+ e.set_gcId(_shared_gc_info.id());
+ e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix());
+ e.set_starttime(_shared_gc_info.start_timestamp());
+ e.set_endtime(_shared_gc_info.end_timestamp());
+ e.commit();
+ }
+}
+
+void YoungGCTracer::send_young_gc_event() const {
+ EventGCYoungGarbageCollection e(UNTIMED);
+ if (e.should_commit()) {
+ e.set_gcId(_shared_gc_info.id());
+ e.set_tenuringThreshold(_tenuring_threshold);
+ e.set_starttime(_shared_gc_info.start_timestamp());
+ e.set_endtime(_shared_gc_info.end_timestamp());
+ e.commit();
+ }
+}
+
+void OldGCTracer::send_old_gc_event() const {
+ EventGCOldGarbageCollection e(UNTIMED);
+ if (e.should_commit()) {
+ e.set_gcId(_shared_gc_info.id());
+ e.set_starttime(_shared_gc_info.start_timestamp());
+ e.set_endtime(_shared_gc_info.end_timestamp());
+ e.commit();
+ }
+}
+
+static TraceStructCopyFailed to_trace_struct(const CopyFailedInfo& cf_info) {
+ TraceStructCopyFailed failed_info;
+ failed_info.set_objectCount(cf_info.failed_count());
+ failed_info.set_firstSize(cf_info.first_size());
+ failed_info.set_smallestSize(cf_info.smallest_size());
+ failed_info.set_totalSize(cf_info.total_size());
+ return failed_info;
+}
+
+void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
+ EventPromotionFailed e(UNTIMED);
+ if (e.should_commit()) {
+ e.set_gcId(_shared_gc_info.id());
+ e.set_data(to_trace_struct(pf_info));
+ e.set_thread(pf_info.thread()->thread_id());
+ e.set_endtime(os::elapsed_counter());
+ e.commit();
+ }
+}
+
+// Common to CMS and G1
+void OldGCTracer::send_concurrent_mode_failure_event() {
+ EventConcurrentModeFailure e(UNTIMED);
+ if (e.should_commit()) {
+ e.set_gcId(_shared_gc_info.id());
+ e.set_endtime(os::elapsed_counter());
+ e.commit();
+ }
+}
+
+#if INCLUDE_ALL_GCS
+void G1NewTracer::send_g1_young_gc_event() {
+ EventGCG1GarbageCollection e(UNTIMED);
+ if (e.should_commit()) {
+ e.set_gcId(_shared_gc_info.id());
+ e.set_type(_g1_young_gc_info.type());
+ e.set_starttime(_shared_gc_info.start_timestamp());
+ e.set_endtime(_shared_gc_info.end_timestamp());
+ e.commit();
+ }
+}
+
+void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
+ EventEvacuationInfo e(UNTIMED);
+ if (e.should_commit()) {
+ e.set_gcId(_shared_gc_info.id());
+ e.set_cSetRegions(info->collectionset_regions());
+ e.set_cSetUsedBefore(info->collectionset_used_before());
+ e.set_cSetUsedAfter(info->collectionset_used_after());
+ e.set_allocationRegions(info->allocation_regions());
+ e.set_allocRegionsUsedBefore(info->alloc_regions_used_before());
+ e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
+ e.set_bytesCopied(info->bytes_copied());
+ e.set_regionsFreed(info->regions_freed());
+ e.set_endtime(os::elapsed_counter());
+ e.commit();
+ }
+}
+
+void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
+ EventEvacuationFailed e(UNTIMED);
+ if (e.should_commit()) {
+ e.set_gcId(_shared_gc_info.id());
+ e.set_data(to_trace_struct(ef_info));
+ e.set_endtime(os::elapsed_counter());
+ e.commit();
+ }
+}
+#endif
+
+static TraceStructVirtualSpace to_trace_struct(const VirtualSpaceSummary& summary) {
+ TraceStructVirtualSpace space;
+ space.set_start((TraceAddress)summary.start());
+ space.set_committedEnd((TraceAddress)summary.committed_end());
+ space.set_committedSize(summary.committed_size());
+ space.set_reservedEnd((TraceAddress)summary.reserved_end());
+ space.set_reservedSize(summary.reserved_size());
+ return space;
+}
+
+static TraceStructObjectSpace to_trace_struct(const SpaceSummary& summary) {
+ TraceStructObjectSpace space;
+ space.set_start((TraceAddress)summary.start());
+ space.set_end((TraceAddress)summary.end());
+ space.set_used(summary.used());
+ space.set_size(summary.size());
+ return space;
+}
+
+class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
+ GCId _id;
+ GCWhen::Type _when;
+ public:
+ GCHeapSummaryEventSender(GCId id, GCWhen::Type when) : _id(id), _when(when) {}
+
+ void visit(const GCHeapSummary* heap_summary) const {
+ const VirtualSpaceSummary& heap_space = heap_summary->heap();
+
+ EventGCHeapSummary e(UNTIMED);
+ if (e.should_commit()) {
+ e.set_gcId(_id);
+ e.set_when((u1)_when);
+ e.set_heapSpace(to_trace_struct(heap_space));
+ e.set_heapUsed(heap_summary->used());
+ e.set_endtime(os::elapsed_counter());
+ e.commit();
+ }
+ }
+
+ void visit(const PSHeapSummary* ps_heap_summary) const {
+ visit((GCHeapSummary*)ps_heap_summary);
+
+ const VirtualSpaceSummary& old_summary = ps_heap_summary->old();
+ const SpaceSummary& old_space = ps_heap_summary->old_space();
+ const VirtualSpaceSummary& young_summary = ps_heap_summary->young();
+ const SpaceSummary& eden_space = ps_heap_summary->eden();
+ const SpaceSummary& from_space = ps_heap_summary->from();
+ const SpaceSummary& to_space = ps_heap_summary->to();
+
+ EventPSHeapSummary e(UNTIMED);
+ if (e.should_commit()) {
+ e.set_gcId(_id);
+ e.set_when((u1)_when);
+
+ e.set_oldSpace(to_trace_struct(ps_heap_summary->old()));
+ e.set_oldObjectSpace(to_trace_struct(ps_heap_summary->old_space()));
+ e.set_youngSpace(to_trace_struct(ps_heap_summary->young()));
+ e.set_edenSpace(to_trace_struct(ps_heap_summary->eden()));
+ e.set_fromSpace(to_trace_struct(ps_heap_summary->from()));
+ e.set_toSpace(to_trace_struct(ps_heap_summary->to()));
+ e.set_endtime(os::elapsed_counter());
+ e.commit();
+ }
+ }
+};
+
+void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
+ GCHeapSummaryEventSender visitor(_shared_gc_info.id(), when);
+ heap_summary.accept(&visitor);
+}
+
+static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) {
+ TraceStructMetaspaceSizes meta_sizes;
+
+ meta_sizes.set_capacity(sizes.capacity());
+ meta_sizes.set_used(sizes.used());
+ meta_sizes.set_reserved(sizes.reserved());
+
+ return meta_sizes;
+}
+
+void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
+ EventMetaspaceSummary e(UNTIMED);
+ if (e.should_commit()) {
+ e.set_gcId(_shared_gc_info.id());
+ e.set_when((u1) when);
+ e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
+ e.set_dataSpace(to_trace_struct(meta_space_summary.data_space()));
+ e.set_classSpace(to_trace_struct(meta_space_summary.class_space()));
+ e.set_endtime(os::elapsed_counter());
+ e.commit();
+ }
+}
+
+class PhaseSender : public PhaseVisitor {
+ GCId _gc_id;
+ public:
+ PhaseSender(GCId gc_id) : _gc_id(gc_id) {}
+
+ template<typename T>
+ void send_phase(PausePhase* pause) {
+ T event(UNTIMED);
+ if (event.should_commit()) {
+ event.set_gcId(_gc_id);
+ event.set_name(pause->name());
+ event.set_starttime(pause->start());
+ event.set_endtime(pause->end());
+ event.commit();
+ }
+ }
+
+ void visit(GCPhase* pause) { ShouldNotReachHere(); }
+ void visit(ConcurrentPhase* pause) { Unimplemented(); }
+ void visit(PausePhase* pause) {
+ assert(PhasesStack::PHASE_LEVELS == 5, "Need more event types");
+
+ switch (pause->level()) {
+ case 0: send_phase<EventGCPhasePause>(pause); break;
+ case 1: send_phase<EventGCPhasePauseLevel1>(pause); break;
+ case 2: send_phase<EventGCPhasePauseLevel2>(pause); break;
+ case 3: send_phase<EventGCPhasePauseLevel3>(pause); break;
+ default: /* Ignore sending this phase */ break;
+ }
+ }
+};
+
+void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
+ PhaseSender phase_reporter(_shared_gc_info.id());
+
+ TimePartitionPhasesIterator iter(time_partitions);
+ while (iter.has_next()) {
+ GCPhase* phase = iter.next();
+ phase->accept(&phase_reporter);
+ }
+}
diff --git a/src/share/vm/gc_implementation/shared/gcTraceTime.cpp b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp
new file mode 100644
index 000000000..1c137047c
--- /dev/null
+++ b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/os.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/thread.inline.hpp"
+#include "runtime/timer.hpp"
+#include "utilities/ostream.hpp"
+
+
+GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer) :
+ _title(title), _doit(doit), _print_cr(print_cr), _timer(timer) {
+ if (_doit || _timer != NULL) {
+ _start_counter = os::elapsed_counter();
+ }
+
+ if (_timer != NULL) {
+ assert(SafepointSynchronize::is_at_safepoint(), "Tracing currently only supported at safepoints");
+ assert(Thread::current()->is_VM_thread(), "Tracing currently only supported from the VM thread");
+
+ _timer->register_gc_phase_start(title, _start_counter);
+ }
+
+ if (_doit) {
+ if (PrintGCTimeStamps) {
+ gclog_or_tty->stamp();
+ gclog_or_tty->print(": ");
+ }
+ gclog_or_tty->print("[%s", title);
+ gclog_or_tty->flush();
+ }
+}
+
+GCTraceTime::~GCTraceTime() {
+ jlong stop_counter = 0;
+
+ if (_doit || _timer != NULL) {
+ stop_counter = os::elapsed_counter();
+ }
+
+ if (_timer != NULL) {
+ _timer->register_gc_phase_end(stop_counter);
+ }
+
+ if (_doit) {
+ double seconds = TimeHelper::counter_to_seconds(stop_counter - _start_counter);
+ if (_print_cr) {
+ gclog_or_tty->print_cr(", %3.7f secs]", seconds);
+ } else {
+ gclog_or_tty->print(", %3.7f secs]", seconds);
+ }
+ gclog_or_tty->flush();
+ }
+}
diff --git a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.inline.hpp b/src/share/vm/gc_implementation/shared/gcTraceTime.hpp
index e7f1dfa54..5d92b4d33 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.inline.hpp
+++ b/src/share/vm/gc_implementation/shared/gcTraceTime.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,15 +22,23 @@
*
*/
-#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARMARKBITMAP_INLINE_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARMARKBITMAP_INLINE_HPP
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP
-#include "oops/oop.hpp"
+#include "prims/jni_md.h"
-inline bool
-ParMarkBitMap::mark_obj(oop obj)
-{
- return mark_obj(obj, obj->size());
-}
+class GCTimer;
-#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARMARKBITMAP_INLINE_HPP
+class GCTraceTime {
+ const char* _title;
+ bool _doit;
+ bool _print_cr;
+ GCTimer* _timer;
+ jlong _start_counter;
+
+ public:
+ GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer);
+ ~GCTraceTime();
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP
diff --git a/src/share/vm/gc_implementation/shared/gcUtil.hpp b/src/share/vm/gc_implementation/shared/gcUtil.hpp
index a01115d06..ad3075c90 100644
--- a/src/share/vm/gc_implementation/shared/gcUtil.hpp
+++ b/src/share/vm/gc_implementation/shared/gcUtil.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -144,9 +144,9 @@ class AdaptivePaddedAverage : public AdaptiveWeightedAverage {
_padded_avg(0.0), _deviation(0.0), _padding(padding) {}
// Placement support
- void* operator new(size_t ignored, void* p) { return p; }
+ void* operator new(size_t ignored, void* p) throw() { return p; }
// Allocator
- void* operator new(size_t size) { return CHeapObj<mtGC>::operator new(size); }
+ void* operator new(size_t size) throw() { return CHeapObj<mtGC>::operator new(size); }
// Accessor
float padded_average() const { return _padded_avg; }
diff --git a/src/share/vm/gc_implementation/shared/gcWhen.hpp b/src/share/vm/gc_implementation/shared/gcWhen.hpp
new file mode 100644
index 000000000..5713ba4e8
--- /dev/null
+++ b/src/share/vm/gc_implementation/shared/gcWhen.hpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCWHEN_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCWHEN_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+
+class GCWhen : AllStatic {
+ public:
+ enum Type {
+ BeforeGC,
+ AfterGC,
+ GCWhenEndSentinel
+ };
+
+ static const char* to_string(GCWhen::Type when) {
+ switch (when) {
+ case BeforeGC: return "Before GC";
+ case AfterGC: return "After GC";
+ default: ShouldNotReachHere(); return NULL;
+ }
+ }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCWHEN_HPP
diff --git a/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp b/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp
index 034d319b0..0b855e7f6 100644
--- a/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp
+++ b/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,9 @@
#define SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
#include "gc_implementation/shared/generationCounters.hpp"
#include "memory/generation.hpp"
#include "runtime/perfData.hpp"
-#endif // INCLUDE_ALL_GCS
// A HSpaceCounter is a holder class for performance counters
// that track a collections (logical spaces) in a heap;
diff --git a/src/share/vm/gc_implementation/shared/markSweep.cpp b/src/share/vm/gc_implementation/shared/markSweep.cpp
index 5e52aa1eb..7bdcd55f5 100644
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,13 +24,15 @@
#include "precompiled.hpp"
#include "compiler/compileBroker.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/markSweep.inline.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/objArrayKlass.inline.hpp"
#include "oops/oop.inline.hpp"
-unsigned int MarkSweep::_total_invocations = 0;
+uint MarkSweep::_total_invocations = 0;
Stack<oop, mtGC> MarkSweep::_marking_stack;
Stack<ObjArrayTask, mtGC> MarkSweep::_objarray_stack;
@@ -41,6 +43,8 @@ size_t MarkSweep::_preserved_count = 0;
size_t MarkSweep::_preserved_count_max = 0;
PreservedMark* MarkSweep::_preserved_marks = NULL;
ReferenceProcessor* MarkSweep::_ref_processor = NULL;
+STWGCTimer* MarkSweep::_gc_timer = NULL;
+SerialOldTracer* MarkSweep::_gc_tracer = NULL;
MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
CodeBlobToOopClosure MarkSweep::follow_code_root_closure(&MarkSweep::follow_root_closure, /*do_marking=*/ true);
@@ -95,7 +99,7 @@ void MarkSweep::follow_stack() {
// Process ObjArrays one at a time to avoid marking stack bloat.
if (!_objarray_stack.is_empty()) {
ObjArrayTask task = _objarray_stack.pop();
- ObjArrayKlass* const k = (ObjArrayKlass*)task.obj()->klass();
+ ObjArrayKlass* k = (ObjArrayKlass*)task.obj()->klass();
k->oop_follow_contents(task.obj(), task.index());
}
} while (!_marking_stack.is_empty() || !_objarray_stack.is_empty());
@@ -166,7 +170,6 @@ void MarkSweep::restore_marks() {
MarkSweep::IsAliveClosure MarkSweep::is_alive;
-void MarkSweep::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
MarkSweep::KeepAliveClosure MarkSweep::keep_alive;
@@ -174,7 +177,10 @@ MarkSweep::KeepAliveClosure MarkSweep::keep_alive;
void MarkSweep::KeepAliveClosure::do_oop(oop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); }
void MarkSweep::KeepAliveClosure::do_oop(narrowOop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); }
-void marksweep_init() { /* empty */ }
+void marksweep_init() {
+ MarkSweep::_gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
+ MarkSweep::_gc_tracer = new (ResourceObj::C_HEAP, mtGC) SerialOldTracer();
+}
#ifndef PRODUCT
diff --git a/src/share/vm/gc_implementation/shared/markSweep.hpp b/src/share/vm/gc_implementation/shared/markSweep.hpp
index ec724afa5..2c08a6897 100644
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp
+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,8 @@
class ReferenceProcessor;
class DataLayout;
+class SerialOldTracer;
+class STWGCTimer;
// MarkSweep takes care of global mark-compact garbage collection for a
// GenCollectedHeap using a four-phase pointer forwarding algorithm. All
@@ -88,7 +90,6 @@ class MarkSweep : AllStatic {
// Used for java/lang/ref handling
class IsAliveClosure: public BoolObjectClosure {
public:
- virtual void do_object(oop p);
virtual bool do_object_b(oop p);
};
@@ -113,7 +114,7 @@ class MarkSweep : AllStatic {
//
protected:
// Total invocations of a MarkSweep collection
- static unsigned int _total_invocations;
+ static uint _total_invocations;
// Traversal stacks used during phase1
static Stack<oop, mtGC> _marking_stack;
@@ -129,6 +130,9 @@ class MarkSweep : AllStatic {
// Reference processing (used in ...follow_contents)
static ReferenceProcessor* _ref_processor;
+ static STWGCTimer* _gc_timer;
+ static SerialOldTracer* _gc_tracer;
+
// Non public closures
static KeepAliveClosure keep_alive;
@@ -147,11 +151,14 @@ class MarkSweep : AllStatic {
static AdjustKlassClosure adjust_klass_closure;
// Accessors
- static unsigned int total_invocations() { return _total_invocations; }
+ static uint total_invocations() { return _total_invocations; }
// Reference Processing
static ReferenceProcessor* const ref_processor() { return _ref_processor; }
+ static STWGCTimer* gc_timer() { return _gc_timer; }
+ static SerialOldTracer* gc_tracer() { return _gc_tracer; }
+
// Call backs for marking
static void mark_object(oop obj);
// Mark pointer and follow contents. Empty marking stack afterwards.
diff --git a/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp b/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp
new file mode 100644
index 000000000..289c458d2
--- /dev/null
+++ b/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+
+#include "precompiled.hpp"
+#include "gc_implementation/shared/objectCountEventSender.hpp"
+#include "memory/heapInspection.hpp"
+#include "trace/tracing.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+#if INCLUDE_SERVICES
+
+void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, jlong timestamp) {
+#if INCLUDE_TRACE
+ assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId),
+ "Only call this method if the event is enabled");
+
+ EventObjectCountAfterGC event(UNTIMED);
+ event.set_gcId(gc_id);
+ event.set_class(entry->klass());
+ event.set_count(entry->count());
+ event.set_totalSize(entry->words() * BytesPerWord);
+ event.set_endtime(timestamp);
+ event.commit();
+#endif // INCLUDE_TRACE
+}
+
+bool ObjectCountEventSender::should_send_event() {
+#if INCLUDE_TRACE
+ return Tracing::is_event_enabled(EventObjectCountAfterGC::eventId);
+#else
+ return false;
+#endif // INCLUDE_TRACE
+}
+
+#endif // INCLUDE_SERVICES
diff --git a/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp b/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp
new file mode 100644
index 000000000..b83e1fa73
--- /dev/null
+++ b/src/share/vm/gc_implementation/shared/objectCountEventSender.hpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OBJECT_COUNT_EVENT_SENDER_HPP
+#define SHARE_VM_OBJECT_COUNT_EVENT_SENDER_HPP
+
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "memory/allocation.hpp"
+#include "utilities/macros.hpp"
+
+#if INCLUDE_SERVICES
+
+class KlassInfoEntry;
+
+class ObjectCountEventSender : public AllStatic {
+ public:
+ static void send(const KlassInfoEntry* entry, GCId gc_id, jlong timestamp);
+ static bool should_send_event();
+};
+
+#endif // INCLUDE_SERVICES
+
+#endif // SHARE_VM_OBJECT_COUNT_EVENT_SENDER
diff --git a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp
index 0666353aa..aced447c9 100644
--- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp
+++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp
@@ -158,7 +158,7 @@ public:
// Fills in the unallocated portion of the buffer with a garbage object.
// If "end_of_gc" is TRUE, is after the last use in the GC. IF "retain"
// is true, attempt to re-use the unused portion in the next GC.
- void retire(bool end_of_gc, bool retain);
+ virtual void retire(bool end_of_gc, bool retain);
void print() PRODUCT_RETURN;
};
diff --git a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
index 211a084ab..31e6bddf4 100644
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
@@ -145,32 +145,37 @@ bool VM_GC_HeapInspection::skip_operation() const {
return false;
}
+bool VM_GC_HeapInspection::collect() {
+ if (GC_locker::is_active()) {
+ return false;
+ }
+ Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
+ return true;
+}
+
void VM_GC_HeapInspection::doit() {
HandleMark hm;
- CollectedHeap* ch = Universe::heap();
- ch->ensure_parsability(false); // must happen, even if collection does
- // not happen (e.g. due to GC_locker)
+ Universe::heap()->ensure_parsability(false); // must happen, even if collection does
+ // not happen (e.g. due to GC_locker)
+ // or _full_gc being false
if (_full_gc) {
- // The collection attempt below would be skipped anyway if
- // the gc locker is held. The following dump may then be a tad
- // misleading to someone expecting only live objects to show
- // up in the dump (see CR 6944195). Just issue a suitable warning
- // in that case and do not attempt to do a collection.
- // The latter is a subtle point, because even a failed attempt
- // to GC will, in fact, induce one in the future, which we
- // probably want to avoid in this case because the GC that we may
- // be about to attempt holds value for us only
- // if it happens now and not if it happens in the eventual
- // future.
- if (GC_locker::is_active()) {
+ if (!collect()) {
+ // The collection attempt was skipped because the gc locker is held.
+ // The following dump may then be a tad misleading to someone expecting
+ // only live objects to show up in the dump (see CR 6944195). Just issue
+ // a suitable warning in that case and do not attempt to do a collection.
+ // The latter is a subtle point, because even a failed attempt
+ // to GC will, in fact, induce one in the future, which we
+ // probably want to avoid in this case because the GC that we may
+ // be about to attempt holds value for us only
+ // if it happens now and not if it happens in the eventual
+ // future.
warning("GC locker is held; pre-dump GC was skipped");
- } else {
- ch->collect_as_vm_thread(GCCause::_heap_inspection);
}
}
HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
_columns);
- inspect.heap_inspection(_out, _need_prologue /* need_prologue */);
+ inspect.heap_inspection(_out);
}
diff --git a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp
index 2a416f228..60b3a9679 100644
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp
@@ -129,21 +129,18 @@ class VM_GC_HeapInspection: public VM_GC_Operation {
private:
outputStream* _out;
bool _full_gc;
- bool _need_prologue;
bool _csv_format; // "comma separated values" format for spreadsheet.
bool _print_help;
bool _print_class_stats;
const char* _columns;
public:
- VM_GC_HeapInspection(outputStream* out, bool request_full_gc,
- bool need_prologue) :
+ VM_GC_HeapInspection(outputStream* out, bool request_full_gc) :
VM_GC_Operation(0 /* total collections, dummy, ignored */,
GCCause::_heap_inspection /* GC Cause */,
0 /* total full collections, dummy, ignored */,
request_full_gc) {
_out = out;
_full_gc = request_full_gc;
- _need_prologue = need_prologue;
_csv_format = false;
_print_help = false;
_print_class_stats = false;
@@ -159,6 +156,8 @@ class VM_GC_HeapInspection: public VM_GC_Operation {
void set_print_help(bool value) {_print_help = value;}
void set_print_class_stats(bool value) {_print_class_stats = value;}
void set_columns(const char* value) {_columns = value;}
+ protected:
+ bool collect();
};
diff --git a/src/share/vm/gc_interface/allocTracer.cpp b/src/share/vm/gc_interface/allocTracer.cpp
new file mode 100644
index 000000000..d3440cd85
--- /dev/null
+++ b/src/share/vm/gc_interface/allocTracer.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_interface/allocTracer.hpp"
+#include "trace/tracing.hpp"
+#include "runtime/handles.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+void AllocTracer::send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size) {
+ EventAllocObjectOutsideTLAB event;
+ if (event.should_commit()) {
+ event.set_class(klass());
+ event.set_allocationSize(alloc_size);
+ event.commit();
+ }
+}
+
+void AllocTracer::send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size) {
+ EventAllocObjectInNewTLAB event;
+ if (event.should_commit()) {
+ event.set_class(klass());
+ event.set_allocationSize(alloc_size);
+ event.set_tlabSize(tlab_size);
+ event.commit();
+ }
+}
diff --git a/src/share/tools/launcher/jli_util.h b/src/share/vm/gc_interface/allocTracer.hpp
index 535f7c482..33e6f19f3 100644
--- a/src/share/tools/launcher/jli_util.h
+++ b/src/share/vm/gc_interface/allocTracer.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,14 +22,16 @@
*
*/
-#ifndef _JLI_UTIL_H
-#define _JLI_UTIL_H
+#ifndef SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP
+#define SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP
-#include <stdlib.h>
+#include "memory/allocation.hpp"
+#include "runtime/handles.hpp"
-void *JLI_MemAlloc(size_t size);
-void *JLI_MemRealloc(void *ptr, size_t size);
-char *JLI_StringDup(const char *s1);
-void JLI_MemFree(void *ptr);
+class AllocTracer : AllStatic {
+ public:
+ static void send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size);
+ static void send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size);
+};
-#endif /* _JLI_UTIL_H */
+#endif /* SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP */
diff --git a/src/share/vm/gc_interface/collectedHeap.cpp b/src/share/vm/gc_interface/collectedHeap.cpp
index f6555979d..3f5364b79 100644
--- a/src/share/vm/gc_interface/collectedHeap.cpp
+++ b/src/share/vm/gc_interface/collectedHeap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,9 +24,15 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
+#include "gc_implementation/shared/gcWhen.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
+#include "gc_interface/allocTracer.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
+#include "memory/metaspace.hpp"
#include "oops/oop.inline.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "runtime/init.hpp"
@@ -65,11 +71,79 @@ void GCHeapLog::log_heap(bool before) {
}
}
+VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
+ size_t capacity_in_words = capacity() / HeapWordSize;
+
+ return VirtualSpaceSummary(
+ reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
+}
+
+GCHeapSummary CollectedHeap::create_heap_summary() {
+ VirtualSpaceSummary heap_space = create_heap_space_summary();
+ return GCHeapSummary(heap_space, used());
+}
+
+MetaspaceSummary CollectedHeap::create_metaspace_summary() {
+ const MetaspaceSizes meta_space(
+ MetaspaceAux::allocated_capacity_bytes(),
+ MetaspaceAux::allocated_used_bytes(),
+ MetaspaceAux::reserved_bytes());
+ const MetaspaceSizes data_space(
+ MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType),
+ MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType),
+ MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
+ const MetaspaceSizes class_space(
+ MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType),
+ MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
+ MetaspaceAux::reserved_bytes(Metaspace::ClassType));
+
+ return MetaspaceSummary(meta_space, data_space, class_space);
+}
+
+void CollectedHeap::print_heap_before_gc() {
+ if (PrintHeapAtGC) {
+ Universe::print_heap_before_gc();
+ }
+ if (_gc_heap_log != NULL) {
+ _gc_heap_log->log_heap_before();
+ }
+}
+
+void CollectedHeap::print_heap_after_gc() {
+ if (PrintHeapAtGC) {
+ Universe::print_heap_after_gc();
+ }
+ if (_gc_heap_log != NULL) {
+ _gc_heap_log->log_heap_after();
+ }
+}
+
+void CollectedHeap::register_nmethod(nmethod* nm) {
+ assert_locked_or_safepoint(CodeCache_lock);
+}
+
+void CollectedHeap::unregister_nmethod(nmethod* nm) {
+ assert_locked_or_safepoint(CodeCache_lock);
+}
+
+void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
+ const GCHeapSummary& heap_summary = create_heap_summary();
+ const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
+ gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary);
+}
+
+void CollectedHeap::trace_heap_before_gc(GCTracer* gc_tracer) {
+ trace_heap(GCWhen::BeforeGC, gc_tracer);
+}
+
+void CollectedHeap::trace_heap_after_gc(GCTracer* gc_tracer) {
+ trace_heap(GCWhen::AfterGC, gc_tracer);
+}
+
// Memory state functions.
CollectedHeap::CollectedHeap() : _n_par_threads(0)
-
{
const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
const size_t elements_per_word = HeapWordSize / sizeof(jint);
@@ -185,7 +259,7 @@ void CollectedHeap::check_for_valid_allocation_state() {
}
#endif
-HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
+HeapWord* CollectedHeap::allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size) {
// Retain tlab and allocate object in shared space if
// the amount free in the tlab is too large to discard.
@@ -209,6 +283,9 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
if (obj == NULL) {
return NULL;
}
+
+ AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
+
if (ZeroTLAB) {
// ..and clear it.
Copy::zero_to_words(obj, new_tlab_size);
@@ -458,28 +535,28 @@ void CollectedHeap::resize_all_tlabs() {
}
}
-void CollectedHeap::pre_full_gc_dump() {
+void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
if (HeapDumpBeforeFullGC) {
- TraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, gclog_or_tty);
+ GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer);
// We are doing a "major" collection and a heap dump before
// major collection has been requested.
HeapDumper::dump_heap();
}
if (PrintClassHistogramBeforeFullGC) {
- TraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, gclog_or_tty);
- VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
+ GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer);
+ VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
inspector.doit();
}
}
-void CollectedHeap::post_full_gc_dump() {
+void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
if (HeapDumpAfterFullGC) {
- TraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, gclog_or_tty);
+ GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer);
HeapDumper::dump_heap();
}
if (PrintClassHistogramAfterFullGC) {
- TraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, gclog_or_tty);
- VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
+ GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer);
+ VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
inspector.doit();
}
}
@@ -490,7 +567,7 @@ oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle r
assert(size >= 0, "int won't convert to size_t");
HeapWord* obj;
assert(ScavengeRootsInCode > 0, "must be");
- obj = common_mem_allocate_init(size, CHECK_NULL);
+ obj = common_mem_allocate_init(real_klass, size, CHECK_NULL);
post_allocation_setup_common(klass, obj);
assert(Universe::is_bootstrapping() ||
!((oop)obj)->is_array(), "must not be an array");
diff --git a/src/share/vm/gc_interface/collectedHeap.hpp b/src/share/vm/gc_interface/collectedHeap.hpp
index 88929343c..c26ca77a8 100644
--- a/src/share/vm/gc_interface/collectedHeap.hpp
+++ b/src/share/vm/gc_interface/collectedHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
#include "gc_interface/gcCause.hpp"
+#include "gc_implementation/shared/gcWhen.hpp"
#include "memory/allocation.hpp"
#include "memory/barrierSet.hpp"
#include "runtime/handles.hpp"
@@ -38,11 +39,17 @@
// class defines the functions that a heap must implement, and contains
// infrastructure common to all heaps.
-class BarrierSet;
-class ThreadClosure;
class AdaptiveSizePolicy;
-class Thread;
+class BarrierSet;
class CollectorPolicy;
+class GCHeapSummary;
+class GCTimer;
+class GCTracer;
+class MetaspaceSummary;
+class Thread;
+class ThreadClosure;
+class VirtualSpaceSummary;
+class nmethod;
class GCMessage : public FormatBuffer<1024> {
public:
@@ -128,16 +135,16 @@ class CollectedHeap : public CHeapObj<mtInternal> {
virtual void resize_all_tlabs();
// Allocate from the current thread's TLAB, with broken-out slow path.
- inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size);
- static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size);
+ inline static HeapWord* allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size);
+ static HeapWord* allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size);
// Allocate an uninitialized block of the given size, or returns NULL if
// this is impossible.
- inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS);
+ inline static HeapWord* common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS);
// Like allocate_init, but the block returned by a successful allocation
// is guaranteed initialized to zeros.
- inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS);
+ inline static HeapWord* common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS);
// Helper functions for (VM) allocation.
inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj);
@@ -166,6 +173,8 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// Fill with a single object (either an int array or a java.lang.Object).
static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
+ virtual void trace_heap(GCWhen::Type when, GCTracer* tracer);
+
// Verification functions
virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
PRODUCT_RETURN;
@@ -202,8 +211,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
MemRegion reserved_region() const { return _reserved; }
address base() const { return (address)reserved_region().start(); }
- // Future cleanup here. The following functions should specify bytes or
- // heapwords as part of their signature.
virtual size_t capacity() const = 0;
virtual size_t used() const = 0;
@@ -550,8 +557,13 @@ class CollectedHeap : public CHeapObj<mtInternal> {
virtual void prepare_for_verify() = 0;
// Generate any dumps preceding or following a full gc
- void pre_full_gc_dump();
- void post_full_gc_dump();
+ void pre_full_gc_dump(GCTimer* timer);
+ void post_full_gc_dump(GCTimer* timer);
+
+ VirtualSpaceSummary create_heap_space_summary();
+ GCHeapSummary create_heap_summary();
+
+ MetaspaceSummary create_metaspace_summary();
// Print heap information on the given outputStream.
virtual void print_on(outputStream* st) const = 0;
@@ -560,7 +572,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
print_on(tty);
}
// Print more detailed heap information on the given
- // outputStream. The default behaviour is to call print_on(). It is
+ // outputStream. The default behavior is to call print_on(). It is
// up to each subclass to override it and add any additional output
// it needs.
virtual void print_extended_on(outputStream* st) const {
@@ -589,23 +601,16 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// Default implementation does nothing.
virtual void print_tracing_info() const = 0;
- // If PrintHeapAtGC is set call the appropriate routi
- void print_heap_before_gc() {
- if (PrintHeapAtGC) {
- Universe::print_heap_before_gc();
- }
- if (_gc_heap_log != NULL) {
- _gc_heap_log->log_heap_before();
- }
- }
- void print_heap_after_gc() {
- if (PrintHeapAtGC) {
- Universe::print_heap_after_gc();
- }
- if (_gc_heap_log != NULL) {
- _gc_heap_log->log_heap_after();
- }
- }
+ void print_heap_before_gc();
+ void print_heap_after_gc();
+
+ // Registering and unregistering an nmethod (compiled code) with the heap.
+ // Override with specific mechanism for each specialized heap type.
+ virtual void register_nmethod(nmethod* nm);
+ virtual void unregister_nmethod(nmethod* nm);
+
+ void trace_heap_before_gc(GCTracer* gc_tracer);
+ void trace_heap_after_gc(GCTracer* gc_tracer);
// Heap verification
virtual void verify(bool silent, VerifyOption option) = 0;
@@ -619,7 +624,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
inline bool promotion_should_fail();
// Reset the PromotionFailureALot counters. Should be called at the end of a
- // GC in which promotion failure ocurred.
+ // GC in which promotion failure occurred.
inline void reset_promotion_should_fail(volatile size_t* count);
inline void reset_promotion_should_fail();
#endif // #ifndef PRODUCT
diff --git a/src/share/vm/gc_interface/collectedHeap.inline.hpp b/src/share/vm/gc_interface/collectedHeap.inline.hpp
index c57b057c6..d17b82f21 100644
--- a/src/share/vm/gc_interface/collectedHeap.inline.hpp
+++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP
#define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP
+#include "gc_interface/allocTracer.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/threadLocalAllocBuffer.inline.hpp"
#include "memory/universe.hpp"
@@ -107,7 +108,7 @@ void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
post_allocation_notify(klass, (oop)obj);
}
-HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, TRAPS) {
+HeapWord* CollectedHeap::common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS) {
// Clear unhandled oops for memory allocation. Memory allocation might
// not take out a lock if from tlab, so clear here.
@@ -120,7 +121,7 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, TRAPS) {
HeapWord* result = NULL;
if (UseTLAB) {
- result = CollectedHeap::allocate_from_tlab(THREAD, size);
+ result = allocate_from_tlab(klass, THREAD, size);
if (result != NULL) {
assert(!HAS_PENDING_EXCEPTION,
"Unexpected exception, will result in uninitialized storage");
@@ -136,6 +137,9 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, TRAPS) {
assert(!HAS_PENDING_EXCEPTION,
"Unexpected exception, will result in uninitialized storage");
THREAD->incr_allocated_bytes(size * HeapWordSize);
+
+ AllocTracer::send_allocation_outside_tlab_event(klass, size * HeapWordSize);
+
return result;
}
@@ -165,13 +169,13 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, TRAPS) {
}
}
-HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, TRAPS) {
- HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL);
+HeapWord* CollectedHeap::common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS) {
+ HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
init_obj(obj, size);
return obj;
}
-HeapWord* CollectedHeap::allocate_from_tlab(Thread* thread, size_t size) {
+HeapWord* CollectedHeap::allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size) {
assert(UseTLAB, "should use UseTLAB");
HeapWord* obj = thread->tlab().allocate(size);
@@ -179,7 +183,7 @@ HeapWord* CollectedHeap::allocate_from_tlab(Thread* thread, size_t size) {
return obj;
}
// Otherwise...
- return allocate_from_tlab_slow(thread, size);
+ return allocate_from_tlab_slow(klass, thread, size);
}
void CollectedHeap::init_obj(HeapWord* obj, size_t size) {
@@ -194,7 +198,7 @@ oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) {
debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t");
- HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
+ HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
post_allocation_setup_obj(klass, obj);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj;
@@ -207,7 +211,7 @@ oop CollectedHeap::array_allocate(KlassHandle klass,
debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t");
- HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
+ HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
post_allocation_setup_array(klass, obj, length);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj;
@@ -220,7 +224,7 @@ oop CollectedHeap::array_allocate_nozero(KlassHandle klass,
debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t");
- HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL);
+ HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
((oop)obj)->set_klass_gap(0);
post_allocation_setup_array(klass, obj, length);
#ifndef PRODUCT
diff --git a/src/share/vm/gc_interface/gcCause.cpp b/src/share/vm/gc_interface/gcCause.cpp
index 0ac45d911..e7e7e43f4 100644
--- a/src/share/vm/gc_interface/gcCause.cpp
+++ b/src/share/vm/gc_interface/gcCause.cpp
@@ -72,6 +72,9 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _cms_final_remark:
return "CMS Final Remark";
+ case _cms_concurrent_mark:
+ return "CMS Concurrent Mark";
+
case _old_generation_expanded_on_last_scavenge:
return "Old Generation Expanded On Last Scavenge";
diff --git a/src/share/vm/gc_interface/gcCause.hpp b/src/share/vm/gc_interface/gcCause.hpp
index 58abc2e6f..06f11882c 100644
--- a/src/share/vm/gc_interface/gcCause.hpp
+++ b/src/share/vm/gc_interface/gcCause.hpp
@@ -60,6 +60,7 @@ class GCCause : public AllStatic {
_cms_generation_full,
_cms_initial_mark,
_cms_final_remark,
+ _cms_concurrent_mark,
_old_generation_expanded_on_last_scavenge,
_old_generation_too_full_to_scavenge,
diff --git a/src/share/vm/gc_interface/gcName.hpp b/src/share/vm/gc_interface/gcName.hpp
new file mode 100644
index 000000000..c48c24838
--- /dev/null
+++ b/src/share/vm/gc_interface/gcName.hpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_INTERFACE_GCNAME_HPP
+#define SHARE_VM_GC_INTERFACE_GCNAME_HPP
+
+#include "utilities/debug.hpp"
+
+enum GCName {
+ ParallelOld,
+ SerialOld,
+ PSMarkSweep,
+ ParallelScavenge,
+ DefNew,
+ ParNew,
+ G1New,
+ ConcurrentMarkSweep,
+ G1Old,
+ GCNameEndSentinel
+};
+
+class GCNameHelper {
+ public:
+ static const char* to_string(GCName name) {
+ switch(name) {
+ case ParallelOld: return "ParallelOld";
+ case SerialOld: return "SerialOld";
+ case PSMarkSweep: return "PSMarkSweep";
+ case ParallelScavenge: return "ParallelScavenge";
+ case DefNew: return "DefNew";
+ case ParNew: return "ParNew";
+ case G1New: return "G1New";
+ case ConcurrentMarkSweep: return "ConcurrentMarkSweep";
+ case G1Old: return "G1Old";
+ default: ShouldNotReachHere(); return NULL;
+ }
+ }
+};
+
+#endif // SHARE_VM_GC_INTERFACE_GCNAME_HPP
diff --git a/src/share/vm/interpreter/abstractInterpreter.hpp b/src/share/vm/interpreter/abstractInterpreter.hpp
index b7e261473..9cbdceee7 100644
--- a/src/share/vm/interpreter/abstractInterpreter.hpp
+++ b/src/share/vm/interpreter/abstractInterpreter.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -105,6 +105,9 @@ class AbstractInterpreter: AllStatic {
java_lang_math_pow, // implementation of java.lang.Math.pow (x,y)
java_lang_math_exp, // implementation of java.lang.Math.exp (x)
java_lang_ref_reference_get, // implementation of java.lang.ref.Reference.get()
+ java_util_zip_CRC32_update, // implementation of java.util.zip.CRC32.update()
+ java_util_zip_CRC32_updateBytes, // implementation of java.util.zip.CRC32.updateBytes()
+ java_util_zip_CRC32_updateByteBuffer, // implementation of java.util.zip.CRC32.updateByteBuffer()
number_of_method_entries,
invalid = -1
};
diff --git a/src/share/vm/interpreter/bytecodeInterpreter.cpp b/src/share/vm/interpreter/bytecodeInterpreter.cpp
index 1ce24efe3..b0e12c6f8 100644
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp
@@ -471,7 +471,25 @@ BytecodeInterpreter::run(interpreterState istate) {
#ifdef ASSERT
if (istate->_msg != initialize) {
- assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
+ // We have a problem here if we are running with a pre-hsx24 JDK (for example during bootstrap)
+ // because in that case, EnableInvokeDynamic is true by default but will be later switched off
+ // if java_lang_invoke_MethodHandle::compute_offsets() detects that the JDK only has the classes
+ // for the old JSR292 implementation.
+ // This leads to a situation where 'istate->_stack_limit' always accounts for
+ // methodOopDesc::extra_stack_entries() because it is computed in
+ // CppInterpreterGenerator::generate_compute_interpreter_state() which was generated while
+ // EnableInvokeDynamic was still true. On the other hand, istate->_method->max_stack() doesn't
+ // account for extra_stack_entries() anymore because at the time when it is called
+ // EnableInvokeDynamic was already set to false.
+ // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was
+ // switched off because of the wrong classes.
+ if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) {
+ assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
+ } else {
+ const int extra_stack_entries = Method::extra_stack_entries_for_jsr292;
+ assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries
+ + 1), "bad stack limit");
+ }
#ifndef SHARK
IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
#endif // !SHARK
@@ -1566,7 +1584,7 @@ run:
#define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \
{ \
ARRAY_INTRO(-2); \
- extra; \
+ (void)extra; \
SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \
-2); \
UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
@@ -1577,8 +1595,8 @@ run:
{ \
ARRAY_INTRO(-2); \
SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \
- extra; \
- UPDATE_PC_AND_CONTINUE(1); \
+ (void)extra; \
+ UPDATE_PC_AND_CONTINUE(1); \
}
CASE(_iaload):
@@ -1602,7 +1620,7 @@ run:
#define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \
{ \
ARRAY_INTRO(-3); \
- extra; \
+ (void)extra; \
*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \
}
@@ -1611,7 +1629,7 @@ run:
#define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \
{ \
ARRAY_INTRO(-4); \
- extra; \
+ (void)extra; \
*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \
}
@@ -2218,7 +2236,7 @@ run:
}
Method* method = cache->f1_as_method();
- VERIFY_OOP(method);
+ if (VerifyOops) method->verify();
if (cache->has_appendix()) {
ConstantPool* constants = METHOD->constants();
@@ -2250,8 +2268,7 @@ run:
}
Method* method = cache->f1_as_method();
-
- VERIFY_OOP(method);
+ if (VerifyOops) method->verify();
if (cache->has_appendix()) {
ConstantPool* constants = METHOD->constants();
diff --git a/src/share/vm/interpreter/interpreter.cpp b/src/share/vm/interpreter/interpreter.cpp
index 06554172b..dfd8b5b14 100644
--- a/src/share/vm/interpreter/interpreter.cpp
+++ b/src/share/vm/interpreter/interpreter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -195,6 +195,17 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
return kind;
}
+#ifndef CC_INTERP
+ if (UseCRC32Intrinsics && m->is_native()) {
+ // Use optimized stub code for CRC32 native methods.
+ switch (m->intrinsic_id()) {
+ case vmIntrinsics::_updateCRC32 : return java_util_zip_CRC32_update;
+ case vmIntrinsics::_updateBytesCRC32 : return java_util_zip_CRC32_updateBytes;
+ case vmIntrinsics::_updateByteBufferCRC32 : return java_util_zip_CRC32_updateByteBuffer;
+ }
+ }
+#endif
+
// Native method?
// Note: This test must come _before_ the test for intrinsic
// methods. See also comments below.
@@ -297,6 +308,9 @@ void AbstractInterpreter::print_method_kind(MethodKind kind) {
case java_lang_math_sqrt : tty->print("java_lang_math_sqrt" ); break;
case java_lang_math_log : tty->print("java_lang_math_log" ); break;
case java_lang_math_log10 : tty->print("java_lang_math_log10" ); break;
+ case java_util_zip_CRC32_update : tty->print("java_util_zip_CRC32_update"); break;
+ case java_util_zip_CRC32_updateBytes : tty->print("java_util_zip_CRC32_updateBytes"); break;
+ case java_util_zip_CRC32_updateByteBuffer : tty->print("java_util_zip_CRC32_updateByteBuffer"); break;
default:
if (kind >= method_handle_invoke_FIRST &&
kind <= method_handle_invoke_LAST) {
diff --git a/src/share/vm/interpreter/interpreterRuntime.cpp b/src/share/vm/interpreter/interpreterRuntime.cpp
index 9ac09f300..a55966d80 100644
--- a/src/share/vm/interpreter/interpreterRuntime.cpp
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp
@@ -499,15 +499,15 @@ IRT_END
IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode))
// resolve field
- FieldAccessInfo info;
+ fieldDescriptor info;
constantPoolHandle pool(thread, method(thread)->constants());
bool is_put = (bytecode == Bytecodes::_putfield || bytecode == Bytecodes::_putstatic);
bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic);
{
JvmtiHideSingleStepping jhss(thread);
- LinkResolver::resolve_field(info, pool, get_index_u2_cpcache(thread, bytecode),
- bytecode, false, CHECK);
+ LinkResolver::resolve_field_access(info, pool, get_index_u2_cpcache(thread, bytecode),
+ bytecode, CHECK);
} // end JvmtiHideSingleStepping
// check if link resolution caused cpCache to be updated
@@ -527,7 +527,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
// class is intitialized. This is required so that access to the static
// field will call the initialization function every time until the class
// is completely initialized ala. in 2.17.5 in JVM Specification.
- InstanceKlass *klass = InstanceKlass::cast(info.klass()());
+ InstanceKlass* klass = InstanceKlass::cast(info.field_holder());
bool uninitialized_static = ((bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic) &&
!klass->is_initialized());
Bytecodes::Code get_code = (Bytecodes::Code)0;
@@ -542,9 +542,9 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
cache_entry(thread)->set_field(
get_code,
put_code,
- info.klass(),
- info.field_index(),
- info.field_offset(),
+ info.field_holder(),
+ info.index(),
+ info.offset(),
state,
info.access_flags().is_final(),
info.access_flags().is_volatile(),
@@ -689,29 +689,55 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
if (already_resolved(thread)) return;
if (bytecode == Bytecodes::_invokeinterface) {
-
if (TraceItables && Verbose) {
ResourceMark rm(thread);
tty->print_cr("Resolving: klass: %s to method: %s", info.resolved_klass()->name()->as_C_string(), info.resolved_method()->name()->as_C_string());
}
+ }
+#ifdef ASSERT
+ if (bytecode == Bytecodes::_invokeinterface) {
if (info.resolved_method()->method_holder() ==
SystemDictionary::Object_klass()) {
// NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec
- // (see also cpCacheOop.cpp for details)
+ // (see also CallInfo::set_interface for details)
+ assert(info.call_kind() == CallInfo::vtable_call ||
+ info.call_kind() == CallInfo::direct_call, "");
methodHandle rm = info.resolved_method();
assert(rm->is_final() || info.has_vtable_index(),
"should have been set already");
- cache_entry(thread)->set_method(bytecode, rm, info.vtable_index());
+ } else if (!info.resolved_method()->has_itable_index()) {
+ // Resolved something like CharSequence.toString. Use vtable not itable.
+ assert(info.call_kind() != CallInfo::itable_call, "");
} else {
// Setup itable entry
- int index = klassItable::compute_itable_index(info.resolved_method()());
- cache_entry(thread)->set_interface_call(info.resolved_method(), index);
+ assert(info.call_kind() == CallInfo::itable_call, "");
+ int index = info.resolved_method()->itable_index();
+ assert(info.itable_index() == index, "");
}
} else {
- cache_entry(thread)->set_method(
+ assert(info.call_kind() == CallInfo::direct_call ||
+ info.call_kind() == CallInfo::vtable_call, "");
+ }
+#endif
+ switch (info.call_kind()) {
+ case CallInfo::direct_call:
+ cache_entry(thread)->set_direct_call(
+ bytecode,
+ info.resolved_method());
+ break;
+ case CallInfo::vtable_call:
+ cache_entry(thread)->set_vtable_call(
bytecode,
info.resolved_method(),
info.vtable_index());
+ break;
+ case CallInfo::itable_call:
+ cache_entry(thread)->set_itable_call(
+ bytecode,
+ info.resolved_method(),
+ info.itable_index());
+ break;
+ default: ShouldNotReachHere();
}
}
IRT_END
@@ -1212,3 +1238,26 @@ IRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* threa
size_of_arguments * Interpreter::stackElementSize);
IRT_END
#endif
+
+#if INCLUDE_JVMTI
+// This is a support of the JVMTI PopFrame interface.
+// Make sure it is an invokestatic of a polymorphic intrinsic that has a member_name argument
+// and return it as a vm_result so that it can be reloaded in the list of invokestatic parameters.
+// The dmh argument is a reference to a DirectMethoHandle that has a member name field.
+IRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* thread, address dmh,
+ Method* method, address bcp))
+ Bytecodes::Code code = Bytecodes::code_at(method, bcp);
+ if (code != Bytecodes::_invokestatic) {
+ return;
+ }
+ ConstantPool* cpool = method->constants();
+ int cp_index = Bytes::get_native_u2(bcp + 1) + ConstantPool::CPCACHE_INDEX_TAG;
+ Symbol* cname = cpool->klass_name_at(cpool->klass_ref_index_at(cp_index));
+ Symbol* mname = cpool->name_ref_at(cp_index);
+
+ if (MethodHandles::has_member_arg(cname, mname)) {
+ oop member_name = java_lang_invoke_DirectMethodHandle::member((oop)dmh);
+ thread->set_vm_result(member_name);
+ }
+IRT_END
+#endif // INCLUDE_JVMTI
diff --git a/src/share/vm/interpreter/interpreterRuntime.hpp b/src/share/vm/interpreter/interpreterRuntime.hpp
index bd29f6e4a..d396eedd5 100644
--- a/src/share/vm/interpreter/interpreterRuntime.hpp
+++ b/src/share/vm/interpreter/interpreterRuntime.hpp
@@ -95,6 +95,9 @@ class InterpreterRuntime: AllStatic {
static void create_exception(JavaThread* thread, char* name, char* message);
static void create_klass_exception(JavaThread* thread, char* name, oopDesc* obj);
static address exception_handler_for_exception(JavaThread* thread, oopDesc* exception);
+#if INCLUDE_JVMTI
+ static void member_name_arg_or_null(JavaThread* thread, address dmh, Method* m, address bcp);
+#endif
static void throw_pending_exception(JavaThread* thread);
// Statics & fields
diff --git a/src/share/vm/interpreter/linkResolver.cpp b/src/share/vm/interpreter/linkResolver.cpp
index b0bd678b5..44ac9e086 100644
--- a/src/share/vm/interpreter/linkResolver.cpp
+++ b/src/share/vm/interpreter/linkResolver.cpp
@@ -46,19 +46,6 @@
#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
-//------------------------------------------------------------------------------------------------------------------------
-// Implementation of FieldAccessInfo
-
-void FieldAccessInfo::set(KlassHandle klass, Symbol* name, int field_index, int field_offset,
-BasicType field_type, AccessFlags access_flags) {
- _klass = klass;
- _name = name;
- _field_index = field_index;
- _field_offset = field_offset;
- _field_type = field_type;
- _access_flags = access_flags;
-}
-
//------------------------------------------------------------------------------------------------------------------------
// Implementation of CallInfo
@@ -66,26 +53,25 @@ BasicType field_type, AccessFlags access_flags) {
void CallInfo::set_static(KlassHandle resolved_klass, methodHandle resolved_method, TRAPS) {
int vtable_index = Method::nonvirtual_vtable_index;
- set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK);
+ set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, CallInfo::direct_call, vtable_index, CHECK);
}
-void CallInfo::set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, TRAPS) {
+void CallInfo::set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int itable_index, TRAPS) {
// This is only called for interface methods. If the resolved_method
// comes from java/lang/Object, it can be the subject of a virtual call, so
// we should pick the vtable index from the resolved method.
- // Other than that case, there is no valid vtable index to specify.
- int vtable_index = Method::invalid_vtable_index;
- if (resolved_method->method_holder() == SystemDictionary::Object_klass()) {
- assert(resolved_method->vtable_index() == selected_method->vtable_index(), "sanity check");
- vtable_index = resolved_method->vtable_index();
- }
- set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK);
+ // In that case, the caller must call set_virtual instead of set_interface.
+ assert(resolved_method->method_holder()->is_interface(), "");
+ assert(itable_index == resolved_method()->itable_index(), "");
+ set_common(resolved_klass, selected_klass, resolved_method, selected_method, CallInfo::itable_call, itable_index, CHECK);
}
void CallInfo::set_virtual(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index, "valid index");
- set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK);
+ assert(vtable_index < 0 || !resolved_method->has_vtable_index() || vtable_index == resolved_method->vtable_index(), "");
+ CallKind kind = (vtable_index >= 0 && !resolved_method->can_be_statically_bound() ? CallInfo::vtable_call : CallInfo::direct_call);
+ set_common(resolved_klass, selected_klass, resolved_method, selected_method, kind, vtable_index, CHECK);
assert(!resolved_method->is_compiled_lambda_form(), "these must be handled via an invokehandle call");
}
@@ -98,20 +84,29 @@ void CallInfo::set_handle(methodHandle resolved_method, Handle resolved_appendix
resolved_method->is_compiled_lambda_form(),
"linkMethod must return one of these");
int vtable_index = Method::nonvirtual_vtable_index;
- assert(resolved_method->vtable_index() == vtable_index, "");
- set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK);
+ assert(!resolved_method->has_vtable_index(), "");
+ set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, CallInfo::direct_call, vtable_index, CHECK);
_resolved_appendix = resolved_appendix;
_resolved_method_type = resolved_method_type;
}
-void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
+void CallInfo::set_common(KlassHandle resolved_klass,
+ KlassHandle selected_klass,
+ methodHandle resolved_method,
+ methodHandle selected_method,
+ CallKind kind,
+ int index,
+ TRAPS) {
assert(resolved_method->signature() == selected_method->signature(), "signatures must correspond");
_resolved_klass = resolved_klass;
_selected_klass = selected_klass;
_resolved_method = resolved_method;
_selected_method = selected_method;
- _vtable_index = vtable_index;
+ _call_kind = kind;
+ _call_index = index;
_resolved_appendix = Handle();
+ DEBUG_ONLY(verify()); // verify before making side effects
+
if (CompilationPolicy::must_be_compiled(selected_method)) {
// This path is unusual, mostly used by the '-Xcomp' stress test mode.
@@ -138,6 +133,65 @@ void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass
}
}
+// utility query for unreflecting a method
+CallInfo::CallInfo(Method* resolved_method, Klass* resolved_klass) {
+ Klass* resolved_method_holder = resolved_method->method_holder();
+ if (resolved_klass == NULL) { // 2nd argument defaults to holder of 1st
+ resolved_klass = resolved_method_holder;
+ }
+ _resolved_klass = resolved_klass;
+ _selected_klass = resolved_klass;
+ _resolved_method = resolved_method;
+ _selected_method = resolved_method;
+ // classify:
+ CallKind kind = CallInfo::unknown_kind;
+ int index = resolved_method->vtable_index();
+ if (resolved_method->can_be_statically_bound()) {
+ kind = CallInfo::direct_call;
+ } else if (!resolved_method_holder->is_interface()) {
+ // Could be an Object method inherited into an interface, but still a vtable call.
+ kind = CallInfo::vtable_call;
+ } else if (!resolved_klass->is_interface()) {
+ // A miranda method. Compute the vtable index.
+ ResourceMark rm;
+ klassVtable* vt = InstanceKlass::cast(resolved_klass)->vtable();
+ index = vt->index_of_miranda(resolved_method->name(),
+ resolved_method->signature());
+ kind = CallInfo::vtable_call;
+ } else {
+ // A regular interface call.
+ kind = CallInfo::itable_call;
+ index = resolved_method->itable_index();
+ }
+ assert(index == Method::nonvirtual_vtable_index || index >= 0, err_msg("bad index %d", index));
+ _call_kind = kind;
+ _call_index = index;
+ _resolved_appendix = Handle();
+ DEBUG_ONLY(verify());
+}
+
+#ifdef ASSERT
+void CallInfo::verify() {
+ switch (call_kind()) { // the meaning and allowed value of index depends on kind
+ case CallInfo::direct_call:
+ if (_call_index == Method::nonvirtual_vtable_index) break;
+ // else fall through to check vtable index:
+ case CallInfo::vtable_call:
+ assert(resolved_klass()->verify_vtable_index(_call_index), "");
+ break;
+ case CallInfo::itable_call:
+ assert(resolved_method()->method_holder()->verify_itable_index(_call_index), "");
+ break;
+ case CallInfo::unknown_kind:
+ assert(call_kind() != CallInfo::unknown_kind, "CallInfo must be set");
+ break;
+ default:
+ fatal(err_msg_res("Unexpected call kind %d", call_kind()));
+ }
+}
+#endif //ASSERT
+
+
//------------------------------------------------------------------------------------------------------------------------
// Klass resolution
@@ -163,13 +217,6 @@ void LinkResolver::resolve_klass(KlassHandle& result, constantPoolHandle pool, i
result = KlassHandle(THREAD, result_oop);
}
-void LinkResolver::resolve_klass_no_update(KlassHandle& result, constantPoolHandle pool, int index, TRAPS) {
- Klass* result_oop =
- ConstantPool::klass_ref_at_if_loaded_check(pool, index, CHECK);
- result = KlassHandle(THREAD, result_oop);
-}
-
-
//------------------------------------------------------------------------------------------------------------------------
// Method resolution
//
@@ -360,7 +407,12 @@ void LinkResolver::check_method_accessability(KlassHandle ref_klass,
void LinkResolver::resolve_method_statically(methodHandle& resolved_method, KlassHandle& resolved_klass,
Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS) {
-
+ // This method is used only
+ // (1) in C2 from InlineTree::ok_to_inline (via ciMethod::check_call),
+ // and
+ // (2) in Bytecode_invoke::static_target
+ // It appears to fail when applied to an invokeinterface call site.
+ // FIXME: Remove this method and ciMethod::check_call; refactor to use the other LinkResolver entry points.
// resolve klass
if (code == Bytecodes::_invokedynamic) {
resolved_klass = SystemDictionary::MethodHandle_klass();
@@ -580,45 +632,49 @@ void LinkResolver::check_field_accessability(KlassHandle ref_klass,
}
}
-void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS) {
- resolve_field(result, pool, index, byte, check_only, true, CHECK);
+void LinkResolver::resolve_field_access(fieldDescriptor& result, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS) {
+ // Load these early in case the resolve of the containing klass fails
+ Symbol* field = pool->name_ref_at(index);
+ Symbol* sig = pool->signature_ref_at(index);
+
+ // resolve specified klass
+ KlassHandle resolved_klass;
+ resolve_klass(resolved_klass, pool, index, CHECK);
+
+ KlassHandle current_klass(THREAD, pool->pool_holder());
+ resolve_field(result, resolved_klass, field, sig, current_klass, byte, true, true, CHECK);
}
-void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, bool update_pool, TRAPS) {
+void LinkResolver::resolve_field(fieldDescriptor& fd, KlassHandle resolved_klass, Symbol* field, Symbol* sig,
+ KlassHandle current_klass, Bytecodes::Code byte, bool check_access, bool initialize_class,
+ TRAPS) {
assert(byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic ||
- byte == Bytecodes::_getfield || byte == Bytecodes::_putfield, "bad bytecode");
+ byte == Bytecodes::_getfield || byte == Bytecodes::_putfield ||
+ (byte == Bytecodes::_nop && !check_access), "bad field access bytecode");
bool is_static = (byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic);
bool is_put = (byte == Bytecodes::_putfield || byte == Bytecodes::_putstatic);
- // resolve specified klass
- KlassHandle resolved_klass;
- if (update_pool) {
- resolve_klass(resolved_klass, pool, index, CHECK);
- } else {
- resolve_klass_no_update(resolved_klass, pool, index, CHECK);
- }
- // Load these early in case the resolve of the containing klass fails
- Symbol* field = pool->name_ref_at(index);
- Symbol* sig = pool->signature_ref_at(index);
// Check if there's a resolved klass containing the field
- if( resolved_klass.is_null() ) {
+ if (resolved_klass.is_null()) {
ResourceMark rm(THREAD);
THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string());
}
// Resolve instance field
- fieldDescriptor fd; // find_field initializes fd if found
KlassHandle sel_klass(THREAD, InstanceKlass::cast(resolved_klass())->find_field(field, sig, &fd));
// check if field exists; i.e., if a klass containing the field def has been selected
- if (sel_klass.is_null()){
+ if (sel_klass.is_null()) {
ResourceMark rm(THREAD);
THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string());
}
+ if (!check_access)
+ // Access checking may be turned off when calling from within the VM.
+ return;
+
// check access
- KlassHandle ref_klass(THREAD, pool->pool_holder());
- check_field_accessability(ref_klass, resolved_klass, sel_klass, fd, CHECK);
+ check_field_accessability(current_klass, resolved_klass, sel_klass, fd, CHECK);
// check for errors
if (is_static != fd.is_static()) {
@@ -629,7 +685,7 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo
}
// Final fields can only be accessed from its own class.
- if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()) {
+ if (is_put && fd.access_flags().is_final() && sel_klass() != current_klass()) {
THROW(vmSymbols::java_lang_IllegalAccessError());
}
@@ -639,19 +695,18 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo
//
// note 2: we don't want to force initialization if we are just checking
// if the field access is legal; e.g., during compilation
- if (is_static && !check_only) {
+ if (is_static && initialize_class) {
sel_klass->initialize(CHECK);
}
- {
+ if (sel_klass() != current_klass()) {
HandleMark hm(THREAD);
- Handle ref_loader (THREAD, InstanceKlass::cast(ref_klass())->class_loader());
+ Handle ref_loader (THREAD, InstanceKlass::cast(current_klass())->class_loader());
Handle sel_loader (THREAD, InstanceKlass::cast(sel_klass())->class_loader());
- Symbol* signature_ref = pool->signature_ref_at(index);
{
ResourceMark rm(THREAD);
Symbol* failed_type_symbol =
- SystemDictionary::check_signature_loaders(signature_ref,
+ SystemDictionary::check_signature_loaders(sig,
ref_loader, sel_loader,
false,
CHECK);
@@ -677,9 +732,6 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo
// return information. note that the klass is set to the actual klass containing the
// field, otherwise access of static fields in superclasses will not work.
- KlassHandle holder (THREAD, fd.field_holder());
- Symbol* name = fd.name();
- result.set(holder, name, fd.index(), fd.offset(), fd.field_type(), fd.access_flags());
}
@@ -907,10 +959,6 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result,
}
// Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s
- // has not been rewritten, and the vtable initialized.
- assert(resolved_method->method_holder()->is_linked(), "must be linked");
-
- // Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s
// has not been rewritten, and the vtable initialized. Make sure to do this after the nullcheck, since
// a missing receiver might result in a bogus lookup.
assert(resolved_method->method_holder()->is_linked(), "must be linked");
@@ -920,6 +968,7 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result,
vtable_index = vtable_index_of_miranda_method(resolved_klass,
resolved_method->name(),
resolved_method->signature(), CHECK);
+
assert(vtable_index >= 0 , "we should have valid vtable index at this point");
InstanceKlass* inst = InstanceKlass::cast(recv_klass());
@@ -927,6 +976,7 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result,
} else {
// at this point we are sure that resolved_method is virtual and not
// a miranda method; therefore, it must have a valid vtable index.
+ assert(!resolved_method->has_itable_index(), "");
vtable_index = resolved_method->vtable_index();
// We could get a negative vtable_index for final methods,
// because as an optimization they are they are never put in the vtable,
@@ -1006,6 +1056,12 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
lookup_instance_method_in_klasses(sel_method, recv_klass,
resolved_method->name(),
resolved_method->signature(), CHECK);
+ if (sel_method.is_null() && !check_null_and_abstract) {
+ // In theory this is a harmless placeholder value, but
+ // in practice leaving in null affects the nsk default method tests.
+ // This needs further study.
+ sel_method = resolved_method;
+ }
// check if method exists
if (sel_method.is_null()) {
ResourceMark rm(THREAD);
@@ -1046,7 +1102,14 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
sel_method->signature()));
}
// setup result
- result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, CHECK);
+ if (!resolved_method->has_itable_index()) {
+ int vtable_index = resolved_method->vtable_index();
+ assert(vtable_index == sel_method->vtable_index(), "sanity check");
+ result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK);
+ return;
+ }
+ int itable_index = resolved_method()->itable_index();
+ result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK);
}
@@ -1293,7 +1356,8 @@ void LinkResolver::resolve_invokedynamic(CallInfo& result, constantPoolHandle po
}
if (TraceMethodHandles) {
- tty->print_cr("resolve_invokedynamic #%d %s %s",
+ ResourceMark rm(THREAD);
+ tty->print_cr("resolve_invokedynamic #%d %s %s",
ConstantPool::decode_invokedynamic_index(index),
method_name->as_C_string(), method_signature->as_C_string());
tty->print(" BSM info: "); bootstrap_specifier->print();
@@ -1342,9 +1406,16 @@ void LinkResolver::resolve_dynamic_call(CallInfo& result,
//------------------------------------------------------------------------------------------------------------------------
#ifndef PRODUCT
-void FieldAccessInfo::print() {
+void CallInfo::print() {
ResourceMark rm;
- tty->print_cr("Field %s@%d", name()->as_C_string(), field_offset());
+ const char* kindstr = "unknown";
+ switch (_call_kind) {
+ case direct_call: kindstr = "direct"; break;
+ case vtable_call: kindstr = "vtable"; break;
+ case itable_call: kindstr = "itable"; break;
+ }
+ tty->print_cr("Call %s@%d %s", kindstr, _call_index,
+ _resolved_method.is_null() ? "(none)" : _resolved_method->name_and_sig_as_C_string());
}
#endif
diff --git a/src/share/vm/interpreter/linkResolver.hpp b/src/share/vm/interpreter/linkResolver.hpp
index 4054eeb35..0fb551a23 100644
--- a/src/share/vm/interpreter/linkResolver.hpp
+++ b/src/share/vm/interpreter/linkResolver.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,63 +30,54 @@
// All the necessary definitions for run-time link resolution.
-// LinkInfo & its subclasses provide all the information gathered
-// for a particular link after resolving it. A link is any reference
+// CallInfo provides all the information gathered for a particular
+// linked call site after resolving it. A link is any reference
// made from within the bytecodes of a method to an object outside of
// that method. If the info is invalid, the link has not been resolved
// successfully.
-class LinkInfo VALUE_OBJ_CLASS_SPEC {
-};
-
-
-// Link information for getfield/putfield & getstatic/putstatic bytecodes.
-
-class FieldAccessInfo: public LinkInfo {
- protected:
- KlassHandle _klass;
- Symbol* _name;
- AccessFlags _access_flags;
- int _field_index; // original index in the klass
- int _field_offset;
- BasicType _field_type;
-
+class CallInfo VALUE_OBJ_CLASS_SPEC {
public:
- void set(KlassHandle klass, Symbol* name, int field_index, int field_offset,
- BasicType field_type, AccessFlags access_flags);
- KlassHandle klass() const { return _klass; }
- Symbol* name() const { return _name; }
- int field_index() const { return _field_index; }
- int field_offset() const { return _field_offset; }
- BasicType field_type() const { return _field_type; }
- AccessFlags access_flags() const { return _access_flags; }
-
- // debugging
- void print() PRODUCT_RETURN;
-};
-
-
-// Link information for all calls.
-
-class CallInfo: public LinkInfo {
+ // Ways that a method call might be selected (or not) based on receiver type.
+ // Note that an invokevirtual instruction might be linked with no_dispatch,
+ // and an invokeinterface instruction might be linked with any of the three options
+ enum CallKind {
+ direct_call, // jump into resolved_method (must be concrete)
+ vtable_call, // select recv.klass.method_at_vtable(index)
+ itable_call, // select recv.klass.method_at_itable(resolved_method.holder, index)
+ unknown_kind = -1
+ };
private:
- KlassHandle _resolved_klass; // static receiver klass
+ KlassHandle _resolved_klass; // static receiver klass, resolved from a symbolic reference
KlassHandle _selected_klass; // dynamic receiver class (same as static, or subklass)
methodHandle _resolved_method; // static target method
methodHandle _selected_method; // dynamic (actual) target method
- int _vtable_index; // vtable index of selected method
+ CallKind _call_kind; // kind of call (static(=bytecode static/special +
+ // others inferred), vtable, itable)
+ int _call_index; // vtable or itable index of selected class method (if any)
Handle _resolved_appendix; // extra argument in constant pool (if CPCE::has_appendix)
Handle _resolved_method_type; // MethodType (for invokedynamic and invokehandle call sites)
void set_static( KlassHandle resolved_klass, methodHandle resolved_method , TRAPS);
- void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method , TRAPS);
+ void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int itable_index , TRAPS);
void set_virtual( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index , TRAPS);
void set_handle( methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS);
- void set_common( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index , TRAPS);
+ void set_common( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, CallKind kind, int index, TRAPS);
friend class LinkResolver;
public:
+ CallInfo() {
+#ifndef PRODUCT
+ _call_kind = CallInfo::unknown_kind;
+ _call_index = Method::garbage_vtable_index;
+#endif //PRODUCT
+ }
+
+ // utility to extract an effective CallInfo from a method and an optional receiver limit
+ // does not queue the method for compilation
+ CallInfo(Method* resolved_method, Klass* resolved_klass = NULL);
+
KlassHandle resolved_klass() const { return _resolved_klass; }
KlassHandle selected_klass() const { return _selected_klass; }
methodHandle resolved_method() const { return _resolved_method; }
@@ -95,21 +86,43 @@ class CallInfo: public LinkInfo {
Handle resolved_method_type() const { return _resolved_method_type; }
BasicType result_type() const { return selected_method()->result_type(); }
- bool has_vtable_index() const { return _vtable_index >= 0; }
- bool is_statically_bound() const { return _vtable_index == Method::nonvirtual_vtable_index; }
+ CallKind call_kind() const { return _call_kind; }
+ int call_index() const { return _call_index; }
int vtable_index() const {
// Even for interface calls the vtable index could be non-negative.
// See CallInfo::set_interface.
assert(has_vtable_index() || is_statically_bound(), "");
- return _vtable_index;
+ assert(call_kind() == vtable_call || call_kind() == direct_call, "");
+ // The returned value is < 0 if the call is statically bound.
+ // But, the returned value may be >= 0 even if the kind is direct_call.
+ // It is up to the caller to decide which way to go.
+ return _call_index;
+ }
+ int itable_index() const {
+ assert(call_kind() == itable_call, "");
+ // The returned value is always >= 0, a valid itable index.
+ return _call_index;
}
+
+ // debugging
+#ifdef ASSERT
+ bool has_vtable_index() const { return _call_index >= 0 && _call_kind != CallInfo::itable_call; }
+ bool is_statically_bound() const { return _call_index == Method::nonvirtual_vtable_index; }
+#endif //ASSERT
+ void verify() PRODUCT_RETURN;
+ void print() PRODUCT_RETURN;
};
+// Link information for getfield/putfield & getstatic/putstatic bytecodes
+// is represented using a fieldDescriptor.
// The LinkResolver is used to resolve constant-pool references at run-time.
// It does all necessary link-time checks & throws exceptions if necessary.
class LinkResolver: AllStatic {
+ friend class klassVtable;
+ friend class klassItable;
+
private:
static void lookup_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
@@ -120,7 +133,6 @@ class LinkResolver: AllStatic {
static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
static void resolve_klass (KlassHandle& result, constantPoolHandle pool, int index, TRAPS);
- static void resolve_klass_no_update (KlassHandle& result, constantPoolHandle pool, int index, TRAPS); // no update of constantPool entry
static void resolve_pool (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
@@ -148,9 +160,16 @@ class LinkResolver: AllStatic {
Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS);
// runtime/static resolving for fields
- static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS);
- // takes an extra bool argument "update_pool" to decide whether to update the constantPool during klass resolution.
- static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, bool update_pool, TRAPS);
+ static void resolve_field_access(fieldDescriptor& result, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS);
+ static void resolve_field(fieldDescriptor& result, KlassHandle resolved_klass, Symbol* field_name, Symbol* field_signature,
+ KlassHandle current_klass, Bytecodes::Code access_kind, bool check_access, bool initialize_class, TRAPS);
+
+ // source of access_kind codes:
+ static Bytecodes::Code field_access_kind(bool is_static, bool is_put) {
+ return (is_static
+ ? (is_put ? Bytecodes::_putstatic : Bytecodes::_getstatic)
+ : (is_put ? Bytecodes::_putfield : Bytecodes::_getfield ));
+ }
// runtime resolving:
// resolved_klass = specified class (i.e., static receiver class)
diff --git a/src/share/vm/interpreter/templateInterpreter.cpp b/src/share/vm/interpreter/templateInterpreter.cpp
index 53e50b1c7..9f7ed4c7e 100644
--- a/src/share/vm/interpreter/templateInterpreter.cpp
+++ b/src/share/vm/interpreter/templateInterpreter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -373,6 +373,12 @@ void TemplateInterpreterGenerator::generate_all() {
method_entry(java_lang_math_pow )
method_entry(java_lang_ref_reference_get)
+ if (UseCRC32Intrinsics) {
+ method_entry(java_util_zip_CRC32_update)
+ method_entry(java_util_zip_CRC32_updateBytes)
+ method_entry(java_util_zip_CRC32_updateByteBuffer)
+ }
+
initialize_method_handle_entries();
// all native method kinds (must be one contiguous block)
diff --git a/src/share/vm/libadt/port.hpp b/src/share/vm/libadt/port.hpp
index ad7e77c1a..5f712a402 100644
--- a/src/share/vm/libadt/port.hpp
+++ b/src/share/vm/libadt/port.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -163,8 +163,8 @@ extern void safe_free (const char *file, unsigned line, void *ptr);
extern void *safe_calloc (const char *file, unsigned line, unsigned nitems, unsigned size);
extern void *safe_realloc(const char *file, unsigned line, void *ptr, unsigned size);
extern char *safe_strdup (const char *file, unsigned line, const char *src);
-inline void *operator new( size_t size ) { return malloc(size); }
-inline void operator delete( void *ptr ) { free(ptr); }
+inline void *operator new( size_t size ) throw() { return malloc(size); }
+inline void operator delete( void *ptr ) { free(ptr); }
#endif
//-----------------------------------------------------------------------------
diff --git a/src/share/vm/memory/allocation.cpp b/src/share/vm/memory/allocation.cpp
index 1c1bf7ab4..36c0d06c3 100644
--- a/src/share/vm/memory/allocation.cpp
+++ b/src/share/vm/memory/allocation.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,29 +49,28 @@
# include "os_bsd.inline.hpp"
#endif
-void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; };
-void StackObj::operator delete(void* p) { ShouldNotCallThis(); };
-void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; };
-void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); };
+void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; }
+void StackObj::operator delete(void* p) { ShouldNotCallThis(); }
+void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
+void StackObj::operator delete [](void* p) { ShouldNotCallThis(); }
+
+void* _ValueObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; }
+void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }
+void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
+void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); }
void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
- size_t word_size, bool read_only, TRAPS) {
+ size_t word_size, bool read_only,
+ MetaspaceObj::Type type, TRAPS) throw() {
// Klass has it's own operator new
return Metaspace::allocate(loader_data, word_size, read_only,
- Metaspace::NonClassType, CHECK_NULL);
+ type, CHECK_NULL);
}
bool MetaspaceObj::is_shared() const {
return MetaspaceShared::is_in_shared_space(this);
}
-bool MetaspaceObj::is_metadata() const {
- // GC Verify checks use this in guarantees.
- // TODO: either replace them with is_metaspace_object() or remove them.
- // is_metaspace_object() is slower than this test. This test doesn't
- // seem very useful for metaspace objects anymore though.
- return !Universe::heap()->is_in_reserved(this);
-}
bool MetaspaceObj::is_metaspace_object() const {
return Metaspace::contains((void*)this);
@@ -81,8 +80,7 @@ void MetaspaceObj::print_address_on(outputStream* st) const {
st->print(" {"INTPTR_FORMAT"}", this);
}
-
-void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) {
+void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() {
address res;
switch (type) {
case C_HEAP:
@@ -99,8 +97,12 @@ void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flag
return res;
}
+void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() {
+ return (address) operator new(size, type, flags);
+}
+
void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant,
- allocation_type type, MEMFLAGS flags) {
+ allocation_type type, MEMFLAGS flags) throw() {
//should only call this with std::nothrow, use other operator new() otherwise
address res;
switch (type) {
@@ -118,6 +120,10 @@ void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_cons
return res;
}
+void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant,
+ allocation_type type, MEMFLAGS flags) throw() {
+ return (address)operator new(size, nothrow_constant, type, flags);
+}
void ResourceObj::operator delete(void* p) {
assert(((ResourceObj *)p)->allocated_on_C_heap(),
@@ -126,6 +132,10 @@ void ResourceObj::operator delete(void* p) {
FreeHeap(p);
}
+void ResourceObj::operator delete [](void* p) {
+ operator delete(p);
+}
+
#ifdef ASSERT
void ResourceObj::set_allocation_type(address res, allocation_type type) {
// Set allocation type in the resource object
@@ -215,8 +225,6 @@ void trace_heap_free(void* p) {
tty->print_cr("Heap free " INTPTR_FORMAT, p);
}
-bool warn_new_operator = false; // see vm_main
-
//--------------------------------------------------------------------------------------
// ChunkPool implementation
@@ -228,10 +236,11 @@ class ChunkPool: public CHeapObj<mtInternal> {
size_t _num_used; // number of chunks currently checked out
const size_t _size; // size of each chunk (must be uniform)
- // Our three static pools
+ // Our four static pools
static ChunkPool* _large_pool;
static ChunkPool* _medium_pool;
static ChunkPool* _small_pool;
+ static ChunkPool* _tiny_pool;
// return first element or null
void* get_first() {
@@ -248,7 +257,7 @@ class ChunkPool: public CHeapObj<mtInternal> {
ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
// Allocate a new chunk from the pool (might expand the pool)
- _NOINLINE_ void* allocate(size_t bytes) {
+ _NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) {
assert(bytes == _size, "bad size");
void* p = NULL;
// No VM lock can be taken inside ThreadCritical lock, so os::malloc
@@ -258,9 +267,9 @@ class ChunkPool: public CHeapObj<mtInternal> {
p = get_first();
}
if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
- if (p == NULL)
+ if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");
-
+ }
return p;
}
@@ -311,15 +320,18 @@ class ChunkPool: public CHeapObj<mtInternal> {
static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
+ static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; }
static void initialize() {
_large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
_medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
_small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
+ _tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size());
}
static void clean() {
enum { BlocksToKeep = 5 };
+ _tiny_pool->free_all_but(BlocksToKeep);
_small_pool->free_all_but(BlocksToKeep);
_medium_pool->free_all_but(BlocksToKeep);
_large_pool->free_all_but(BlocksToKeep);
@@ -329,6 +341,7 @@ class ChunkPool: public CHeapObj<mtInternal> {
ChunkPool* ChunkPool::_large_pool = NULL;
ChunkPool* ChunkPool::_medium_pool = NULL;
ChunkPool* ChunkPool::_small_pool = NULL;
+ChunkPool* ChunkPool::_tiny_pool = NULL;
void chunkpool_init() {
ChunkPool::initialize();
@@ -357,21 +370,23 @@ class ChunkPoolCleaner : public PeriodicTask {
//--------------------------------------------------------------------------------------
// Chunk implementation
-void* Chunk::operator new(size_t requested_size, size_t length) {
+void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() {
// requested_size is equal to sizeof(Chunk) but in order for the arena
// allocations to come out aligned as expected the size must be aligned
- // to expected arean alignment.
+ // to expected arena alignment.
// expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
size_t bytes = ARENA_ALIGN(requested_size) + length;
switch (length) {
- case Chunk::size: return ChunkPool::large_pool()->allocate(bytes);
- case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
- case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes);
+ case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
+ case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
+ case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
+ case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode);
default: {
- void *p = os::malloc(bytes, mtChunk, CALLER_PC);
- if (p == NULL)
+ void* p = os::malloc(bytes, mtChunk, CALLER_PC);
+ if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
+ }
return p;
}
}
@@ -383,6 +398,7 @@ void Chunk::operator delete(void* p) {
case Chunk::size: ChunkPool::large_pool()->free(c); break;
case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
+ case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break;
default: os::free(c, mtChunk);
}
}
@@ -425,7 +441,7 @@ NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
Arena::Arena(size_t init_size) {
size_t round_size = (sizeof (char *)) - 1;
init_size = (init_size+round_size) & ~round_size;
- _first = _chunk = new (init_size) Chunk(init_size);
+ _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
_hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top();
set_size_in_bytes(init_size);
@@ -433,7 +449,7 @@ Arena::Arena(size_t init_size) {
}
Arena::Arena() {
- _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
+ _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
_hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top();
set_size_in_bytes(Chunk::init_size);
@@ -462,18 +478,18 @@ Arena::~Arena() {
NOT_PRODUCT(Atomic::dec(&_instance_count);)
}
-void* Arena::operator new(size_t size) {
+void* Arena::operator new(size_t size) throw() {
assert(false, "Use dynamic memory type binding");
return NULL;
}
-void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) {
+void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() {
assert(false, "Use dynamic memory type binding");
return NULL;
}
// dynamic memory type binding
-void* Arena::operator new(size_t size, MEMFLAGS flags) {
+void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
#ifdef ASSERT
void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
@@ -483,7 +499,7 @@ void* Arena::operator new(size_t size, MEMFLAGS flags) {
#endif
}
-void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) {
+void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
#ifdef ASSERT
void* p = os::malloc(size, flags|otArena, CALLER_PC);
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
@@ -540,12 +556,9 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
size_t len = MAX2(x, (size_t) Chunk::size);
Chunk *k = _chunk; // Get filled-up chunk address
- _chunk = new (len) Chunk(len);
+ _chunk = new (alloc_failmode, len) Chunk(len);
if (_chunk == NULL) {
- if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
- signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
- }
return NULL;
}
if (k) k->set_next(_chunk); // Append new chunk to end of linked list
@@ -669,19 +682,40 @@ void* Arena::internal_malloc_4(size_t x) {
// a memory leak. Use CHeapObj as the base class of such objects to make it explicit
// that they're allocated on the C heap.
// Commented out in product version to avoid conflicts with third-party C++ native code.
-// %% note this is causing a problem on solaris debug build. the global
-// new is being called from jdk source and causing data corruption.
-// src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew
-// define CATCH_OPERATOR_NEW_USAGE if you want to use this.
-#ifdef CATCH_OPERATOR_NEW_USAGE
-void* operator new(size_t size){
- static bool warned = false;
- if (!warned && warn_new_operator)
- warning("should not call global (default) operator new");
- warned = true;
- return (void *) AllocateHeap(size, "global operator new");
+// On certain platforms, such as Mac OS X (Darwin), in debug version, new is being called
+// from jdk source and causing data corruption. Such as
+// Java_sun_security_ec_ECKeyPairGenerator_generateECKeyPair
+// define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed.
+//
+#ifndef ALLOW_OPERATOR_NEW_USAGE
+void* operator new(size_t size) throw() {
+ assert(false, "Should not call global operator new");
+ return 0;
}
-#endif
+
+void* operator new [](size_t size) throw() {
+ assert(false, "Should not call global operator new[]");
+ return 0;
+}
+
+void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
+ assert(false, "Should not call global operator new");
+ return 0;
+}
+
+void* operator new [](size_t size, std::nothrow_t& nothrow_constant) throw() {
+ assert(false, "Should not call global operator new[]");
+ return 0;
+}
+
+void operator delete(void* p) {
+ assert(false, "Should not call global delete");
+}
+
+void operator delete [](void* p) {
+ assert(false, "Should not call global delete []");
+}
+#endif // ALLOW_OPERATOR_NEW_USAGE
void AllocatedObj::print() const { print_on(tty); }
void AllocatedObj::print_value() const { print_value_on(tty); }
diff --git a/src/share/vm/memory/allocation.hpp b/src/share/vm/memory/allocation.hpp
index b65b2979c..739753311 100644
--- a/src/share/vm/memory/allocation.hpp
+++ b/src/share/vm/memory/allocation.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -86,12 +86,23 @@ typedef AllocFailStrategy::AllocFailEnum AllocFailType;
// subclasses.
//
// The following macros and function should be used to allocate memory
-// directly in the resource area or in the C-heap:
+// directly in the resource area or in the C-heap, The _OBJ variants
+// of the NEW/FREE_C_HEAP macros are used for alloc/dealloc simple
+// objects which are not inherited from CHeapObj, note constructor and
+// destructor are not called. The preferable way to allocate objects
+// is using the new operator.
//
-// NEW_RESOURCE_ARRAY(type,size)
+// WARNING: The array variant must only be used for a homogenous array
+// where all objects are of the exact type specified. If subtypes are
+// stored in the array then must pay attention to calling destructors
+// at needed.
+//
+// NEW_RESOURCE_ARRAY(type, size)
// NEW_RESOURCE_OBJ(type)
-// NEW_C_HEAP_ARRAY(type,size)
-// NEW_C_HEAP_OBJ(type)
+// NEW_C_HEAP_ARRAY(type, size)
+// NEW_C_HEAP_OBJ(type, memflags)
+// FREE_C_HEAP_ARRAY(type, old, memflags)
+// FREE_C_HEAP_OBJ(objname, type, memflags)
// char* AllocateHeap(size_t size, const char* name);
// void FreeHeap(void* p);
//
@@ -146,7 +157,8 @@ enum MemoryType {
mtJavaHeap = 0x0C00, // Java heap
mtClassShared = 0x0D00, // class data sharing
mtTest = 0x0E00, // Test type for verifying NMT
- mt_number_of_types = 0x000E, // number of memory types (mtDontTrack
+ mtTracing = 0x0F00, // memory used for Tracing
+ mt_number_of_types = 0x000F, // number of memory types (mtDontTrack
// is not included as validate type)
mtDontTrack = 0x0F00, // memory we do not or cannot track
mt_masks = 0x7F00,
@@ -192,11 +204,14 @@ const bool NMT_track_callsite = false;
template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
public:
- _NOINLINE_ void* operator new(size_t size, address caller_pc = 0);
+ _NOINLINE_ void* operator new(size_t size, address caller_pc = 0) throw();
_NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant,
- address caller_pc = 0);
-
+ address caller_pc = 0) throw();
+ _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0) throw();
+ _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
+ address caller_pc = 0) throw();
void operator delete(void* p);
+ void operator delete [] (void* p);
};
// Base class for objects allocated on the stack only.
@@ -204,8 +219,10 @@ template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
class StackObj ALLOCATION_SUPER_CLASS_SPEC {
private:
- void* operator new(size_t size);
+ void* operator new(size_t size) throw();
void operator delete(void* p);
+ void* operator new [](size_t size) throw();
+ void operator delete [](void* p);
};
// Base class for objects used as value objects.
@@ -228,8 +245,10 @@ class StackObj ALLOCATION_SUPER_CLASS_SPEC {
//
class _ValueObj {
private:
- void* operator new(size_t size);
- void operator delete(void* p);
+ void* operator new(size_t size) throw();
+ void operator delete(void* p);
+ void* operator new [](size_t size) throw();
+ void operator delete [](void* p);
};
@@ -245,13 +264,59 @@ class ClassLoaderData;
class MetaspaceObj {
public:
- bool is_metadata() const;
bool is_metaspace_object() const; // more specific test but slower
bool is_shared() const;
void print_address_on(outputStream* st) const; // nonvirtual address printing
+#define METASPACE_OBJ_TYPES_DO(f) \
+ f(Unknown) \
+ f(Class) \
+ f(Symbol) \
+ f(TypeArrayU1) \
+ f(TypeArrayU2) \
+ f(TypeArrayU4) \
+ f(TypeArrayU8) \
+ f(TypeArrayOther) \
+ f(Method) \
+ f(ConstMethod) \
+ f(MethodData) \
+ f(ConstantPool) \
+ f(ConstantPoolCache) \
+ f(Annotation) \
+ f(MethodCounters)
+
+#define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type,
+#define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
+
+ enum Type {
+ // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
+ METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
+ _number_of_types
+ };
+
+ static const char * type_name(Type type) {
+ switch(type) {
+ METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
+ default:
+ ShouldNotReachHere();
+ return NULL;
+ }
+ }
+
+ static MetaspaceObj::Type array_type(size_t elem_size) {
+ switch (elem_size) {
+ case 1: return TypeArrayU1Type;
+ case 2: return TypeArrayU2Type;
+ case 4: return TypeArrayU4Type;
+ case 8: return TypeArrayU8Type;
+ default:
+ return TypeArrayOtherType;
+ }
+ }
+
void* operator new(size_t size, ClassLoaderData* loader_data,
- size_t word_size, bool read_only, Thread* thread);
+ size_t word_size, bool read_only,
+ Type type, Thread* thread) throw();
// can't use TRAPS from this header file.
void operator delete(void* p) { ShouldNotCallThis(); }
};
@@ -274,7 +339,7 @@ class Chunk: CHeapObj<mtChunk> {
Chunk* _next; // Next Chunk in list
const size_t _len; // Size of this Chunk
public:
- void* operator new(size_t size, size_t length);
+ void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw();
void operator delete(void* p);
Chunk(size_t length);
@@ -288,7 +353,8 @@ class Chunk: CHeapObj<mtChunk> {
slack = 20, // suspected sizeof(Chunk) + internal malloc headers
#endif
- init_size = 1*K - slack, // Size of first chunk
+ tiny_size = 256 - slack, // Size of first chunk (tiny)
+ init_size = 1*K - slack, // Size of first chunk (normal aka small)
medium_size= 10*K - slack, // Size of medium-sized chunk
size = 32*K - slack, // Default size of an Arena chunk (following the first)
non_pool_size = init_size + 32 // An initial size which is not one of above
@@ -337,10 +403,15 @@ protected:
void signal_out_of_memory(size_t request, const char* whence) const;
- void check_for_overflow(size_t request, const char* whence) const {
+ bool check_for_overflow(size_t request, const char* whence,
+ AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const {
if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
+ if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
+ return false;
+ }
signal_out_of_memory(request, whence);
}
+ return true;
}
public:
@@ -351,12 +422,12 @@ protected:
char* hwm() const { return _hwm; }
// new operators
- void* operator new (size_t size);
- void* operator new (size_t size, const std::nothrow_t& nothrow_constant);
+ void* operator new (size_t size) throw();
+ void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw();
// dynamic memory type tagging
- void* operator new(size_t size, MEMFLAGS flags);
- void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags);
+ void* operator new(size_t size, MEMFLAGS flags) throw();
+ void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw();
void operator delete(void* p);
// Fast allocate in the arena. Common case is: pointer test + increment.
@@ -364,7 +435,8 @@ protected:
assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
x = ARENA_ALIGN(x);
debug_only(if (UseMallocOnly) return malloc(x);)
- check_for_overflow(x, "Arena::Amalloc");
+ if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode))
+ return NULL;
NOT_PRODUCT(inc_bytes_allocated(x);)
if (_hwm + x > _max) {
return grow(x, alloc_failmode);
@@ -378,7 +450,8 @@ protected:
void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
debug_only(if (UseMallocOnly) return malloc(x);)
- check_for_overflow(x, "Arena::Amalloc_4");
+ if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode))
+ return NULL;
NOT_PRODUCT(inc_bytes_allocated(x);)
if (_hwm + x > _max) {
return grow(x, alloc_failmode);
@@ -399,7 +472,8 @@ protected:
size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
x += delta;
#endif
- check_for_overflow(x, "Arena::Amalloc_D");
+ if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode))
+ return NULL;
NOT_PRODUCT(inc_bytes_allocated(x);)
if (_hwm + x > _max) {
return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
@@ -509,27 +583,51 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
#endif // ASSERT
public:
- void* operator new(size_t size, allocation_type type, MEMFLAGS flags);
+ void* operator new(size_t size, allocation_type type, MEMFLAGS flags) throw();
+ void* operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw();
void* operator new(size_t size, const std::nothrow_t& nothrow_constant,
- allocation_type type, MEMFLAGS flags);
- void* operator new(size_t size, Arena *arena) {
+ allocation_type type, MEMFLAGS flags) throw();
+ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
+ allocation_type type, MEMFLAGS flags) throw();
+
+ void* operator new(size_t size, Arena *arena) throw() {
address res = (address)arena->Amalloc(size);
DEBUG_ONLY(set_allocation_type(res, ARENA);)
return res;
}
- void* operator new(size_t size) {
+
+ void* operator new [](size_t size, Arena *arena) throw() {
+ address res = (address)arena->Amalloc(size);
+ DEBUG_ONLY(set_allocation_type(res, ARENA);)
+ return res;
+ }
+
+ void* operator new(size_t size) throw() {
+ address res = (address)resource_allocate_bytes(size);
+ DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
+ return res;
+ }
+
+ void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
+ address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
+ DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
+ return res;
+ }
+
+ void* operator new [](size_t size) throw() {
address res = (address)resource_allocate_bytes(size);
DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
return res;
}
- void* operator new(size_t size, const std::nothrow_t& nothrow_constant) {
+ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw() {
address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
return res;
}
void operator delete(void* p);
+ void operator delete [](void* p);
};
// One of the following macros must be used when allocating an array
@@ -545,8 +643,15 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
#define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\
(type*) resource_allocate_bytes(thread, (size) * sizeof(type))
+#define NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(thread, type, size)\
+ (type*) resource_allocate_bytes(thread, (size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
+
#define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\
- (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type) )
+ (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type))
+
+#define REALLOC_RESOURCE_ARRAY_RETURN_NULL(type, old, old_size, new_size)\
+ (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type),\
+ (new_size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
#define FREE_RESOURCE_ARRAY(type, old, size)\
resource_free_bytes((char*)(old), (size) * sizeof(type))
@@ -557,30 +662,43 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
#define NEW_RESOURCE_OBJ(type)\
NEW_RESOURCE_ARRAY(type, 1)
+#define NEW_RESOURCE_OBJ_RETURN_NULL(type)\
+ NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1)
+
+#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\
+ (type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail)
+
+#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
+ (type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
+
#define NEW_C_HEAP_ARRAY(type, size, memflags)\
(type*) (AllocateHeap((size) * sizeof(type), memflags))
+#define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\
+ NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
+
+#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
+ NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL)
+
#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
- (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags))
+ (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
+
+#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\
+ (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
-#define FREE_C_HEAP_ARRAY(type,old,memflags) \
+#define FREE_C_HEAP_ARRAY(type, old, memflags) \
FreeHeap((char*)(old), memflags)
+// allocate type in heap without calling ctor
#define NEW_C_HEAP_OBJ(type, memflags)\
NEW_C_HEAP_ARRAY(type, 1, memflags)
+#define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\
+ NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags)
-#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
- (type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
-
-#define REALLOC_C_HEAP_ARRAY2(type, old, size, memflags, pc)\
- (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, pc))
-
-#define NEW_C_HEAP_OBJ2(type, memflags, pc)\
- NEW_C_HEAP_ARRAY2(type, 1, memflags, pc)
-
-
-extern bool warn_new_operator;
+// deallocate obj of type in heap without calling dtor
+#define FREE_C_HEAP_OBJ(objname, memflags)\
+ FreeHeap((char*)objname, memflags);
// for statistics
#ifndef PRODUCT
@@ -622,13 +740,21 @@ public:
// is set so that we always use malloc except for Solaris where we set the
// limit to get mapped memory.
template <class E, MEMFLAGS F>
-class ArrayAllocator : StackObj {
+class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
char* _addr;
bool _use_malloc;
size_t _size;
+ bool _free_in_destructor;
public:
- ArrayAllocator() : _addr(NULL), _use_malloc(false), _size(0) { }
- ~ArrayAllocator() { free(); }
+ ArrayAllocator(bool free_in_destructor = true) :
+ _addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { }
+
+ ~ArrayAllocator() {
+ if (_free_in_destructor) {
+ free();
+ }
+ }
+
E* allocate(size_t length);
void free();
};
diff --git a/src/share/vm/memory/allocation.inline.hpp b/src/share/vm/memory/allocation.inline.hpp
index 735dd9909..30bcce0e9 100644
--- a/src/share/vm/memory/allocation.inline.hpp
+++ b/src/share/vm/memory/allocation.inline.hpp
@@ -85,31 +85,40 @@ inline void FreeHeap(void* p, MEMFLAGS memflags = mtInternal) {
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
- address caller_pc){
-#ifdef ASSERT
+ address caller_pc) throw() {
void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
+#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
- return p;
-#else
- return (void *) AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
#endif
+ return p;
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
- const std::nothrow_t& nothrow_constant, address caller_pc) {
-#ifdef ASSERT
+ const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
AllocFailStrategy::RETURN_NULL);
+#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
- return p;
-#else
- return (void *) AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
- AllocFailStrategy::RETURN_NULL);
#endif
+ return p;
+}
+
+template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
+ address caller_pc) throw() {
+ return CHeapObj<F>::operator new(size, caller_pc);
+}
+
+template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
+ const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
+ return CHeapObj<F>::operator new(size, nothrow_constant, caller_pc);
}
template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
- FreeHeap(p, F);
+ FreeHeap(p, F);
+}
+
+template <MEMFLAGS F> void CHeapObj<F>::operator delete [](void* p){
+ FreeHeap(p, F);
}
template <class E, MEMFLAGS F>
@@ -137,10 +146,7 @@ E* ArrayAllocator<E, F>::allocate(size_t length) {
vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
}
- bool success = os::commit_memory(_addr, _size, false /* executable */);
- if (!success) {
- vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (commit)");
- }
+ os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)");
return (E*)_addr;
}
diff --git a/src/share/vm/memory/binaryTreeDictionary.cpp b/src/share/vm/memory/binaryTreeDictionary.cpp
index c53b94603..0a726d442 100644
--- a/src/share/vm/memory/binaryTreeDictionary.cpp
+++ b/src/share/vm/memory/binaryTreeDictionary.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,10 +33,10 @@
#include "runtime/globals.hpp"
#include "utilities/ostream.hpp"
#include "utilities/macros.hpp"
+#include "gc_implementation/shared/spaceDecorator.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
-#include "gc_implementation/shared/spaceDecorator.hpp"
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#endif // INCLUDE_ALL_GCS
diff --git a/src/share/vm/memory/cardTableModRefBS.cpp b/src/share/vm/memory/cardTableModRefBS.cpp
index cbe3654cc..82336503e 100644
--- a/src/share/vm/memory/cardTableModRefBS.cpp
+++ b/src/share/vm/memory/cardTableModRefBS.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -80,15 +80,11 @@ CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
_covered = new MemRegion[max_covered_regions];
_committed = new MemRegion[max_covered_regions];
- if (_covered == NULL || _committed == NULL)
+ if (_covered == NULL || _committed == NULL) {
vm_exit_during_initialization("couldn't alloc card table covered region set.");
- int i;
- for (i = 0; i < max_covered_regions; i++) {
- _covered[i].set_word_size(0);
- _committed[i].set_word_size(0);
}
- _cur_covered_regions = 0;
+ _cur_covered_regions = 0;
const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
MAX2(_page_size, (size_t) os::vm_allocation_granularity());
ReservedSpace heap_rs(_byte_map_size, rs_align, false);
@@ -114,11 +110,8 @@ CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
jbyte* guard_card = &_byte_map[_guard_index];
uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
_guard_region = MemRegion((HeapWord*)guard_page, _page_size);
- if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
- // Do better than this for Merlin
- vm_exit_out_of_memory(_page_size, OOM_MMAP_ERROR, "card table last card");
- }
-
+ os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
+ !ExecMem, "card table last card");
*guard_card = last_card;
_lowest_non_clean =
@@ -134,7 +127,7 @@ CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
|| _lowest_non_clean_base_chunk_index == NULL
|| _last_LNC_resizing_collection == NULL)
vm_exit_during_initialization("couldn't allocate an LNC array.");
- for (i = 0; i < max_covered_regions; i++) {
+ for (int i = 0; i < max_covered_regions; i++) {
_lowest_non_clean[i] = NULL;
_lowest_non_clean_chunk_size[i] = 0;
_last_LNC_resizing_collection[i] = -1;
@@ -153,6 +146,33 @@ CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
}
}
+CardTableModRefBS::~CardTableModRefBS() {
+ if (_covered) {
+ delete[] _covered;
+ _covered = NULL;
+ }
+ if (_committed) {
+ delete[] _committed;
+ _committed = NULL;
+ }
+ if (_lowest_non_clean) {
+ FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean, mtGC);
+ _lowest_non_clean = NULL;
+ }
+ if (_lowest_non_clean_chunk_size) {
+ FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size, mtGC);
+ _lowest_non_clean_chunk_size = NULL;
+ }
+ if (_lowest_non_clean_base_chunk_index) {
+ FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index, mtGC);
+ _lowest_non_clean_base_chunk_index = NULL;
+ }
+ if (_last_LNC_resizing_collection) {
+ FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection, mtGC);
+ _last_LNC_resizing_collection = NULL;
+ }
+}
+
int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
int i;
for (i = 0; i < _cur_covered_regions; i++) {
@@ -289,12 +309,9 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
MemRegion(cur_committed.end(), new_end_for_commit);
assert(!new_committed.is_empty(), "Region should not be empty here");
- if (!os::commit_memory((char*)new_committed.start(),
- new_committed.byte_size(), _page_size)) {
- // Do better than this for Merlin
- vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR,
- "card table expansion");
- }
+ os::commit_memory_or_exit((char*)new_committed.start(),
+ new_committed.byte_size(), _page_size,
+ !ExecMem, "card table expansion");
// Use new_end_aligned (as opposed to new_end_for_commit) because
// the cur_committed region may include the guard region.
} else if (new_end_aligned < cur_committed.end()) {
@@ -395,7 +412,7 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
}
// Touch the last card of the covered region to show that it
// is committed (or SEGV).
- debug_only(*byte_for(_covered[ind].last());)
+ debug_only((void) (*byte_for(_covered[ind].last()));)
debug_only(verify_guard();)
}
diff --git a/src/share/vm/memory/cardTableModRefBS.hpp b/src/share/vm/memory/cardTableModRefBS.hpp
index 69af2daa8..6b5de2a44 100644
--- a/src/share/vm/memory/cardTableModRefBS.hpp
+++ b/src/share/vm/memory/cardTableModRefBS.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -280,6 +280,7 @@ public:
}
CardTableModRefBS(MemRegion whole_heap, int max_covered_regions);
+ ~CardTableModRefBS();
// *** Barrier set functions.
diff --git a/src/share/vm/memory/cardTableRS.cpp b/src/share/vm/memory/cardTableRS.cpp
index bd1cd9dd2..ddd65d420 100644
--- a/src/share/vm/memory/cardTableRS.cpp
+++ b/src/share/vm/memory/cardTableRS.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,9 +54,10 @@ CardTableRS::CardTableRS(MemRegion whole_heap,
_ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
#endif
set_bs(_ct_bs);
- _last_cur_val_in_gen = new jbyte[GenCollectedHeap::max_gens + 1];
+ _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
+ mtGC, 0, AllocFailStrategy::RETURN_NULL);
if (_last_cur_val_in_gen == NULL) {
- vm_exit_during_initialization("Could not last_cur_val_in_gen array.");
+ vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
}
for (int i = 0; i < GenCollectedHeap::max_gens + 1; i++) {
_last_cur_val_in_gen[i] = clean_card_val();
@@ -64,6 +65,16 @@ CardTableRS::CardTableRS(MemRegion whole_heap,
_ct_bs->set_CTRS(this);
}
+CardTableRS::~CardTableRS() {
+ if (_ct_bs) {
+ delete _ct_bs;
+ _ct_bs = NULL;
+ }
+ if (_last_cur_val_in_gen) {
+ FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen, mtInternal);
+ }
+}
+
void CardTableRS::resize_covered_region(MemRegion new_region) {
_ct_bs->resize_covered_region(new_region);
}
@@ -299,46 +310,31 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp,
_ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this);
}
-void CardTableRS::clear_into_younger(Generation* gen) {
- GenCollectedHeap* gch = GenCollectedHeap::heap();
- // Generations younger than gen have been evacuated. We can clear
- // card table entries for gen (we know that it has no pointers
- // to younger gens) and for those below. The card tables for
- // the youngest gen need never be cleared.
+void CardTableRS::clear_into_younger(Generation* old_gen) {
+ assert(old_gen->level() == 1, "Should only be called for the old generation");
+ // The card tables for the youngest gen need never be cleared.
// There's a bit of subtlety in the clear() and invalidate()
// methods that we exploit here and in invalidate_or_clear()
// below to avoid missing cards at the fringes. If clear() or
// invalidate() are changed in the future, this code should
// be revisited. 20040107.ysr
- Generation* g = gen;
- for(Generation* prev_gen = gch->prev_gen(g);
- prev_gen != NULL;
- g = prev_gen, prev_gen = gch->prev_gen(g)) {
- MemRegion to_be_cleared_mr = g->prev_used_region();
- clear(to_be_cleared_mr);
- }
+ clear(old_gen->prev_used_region());
}
-void CardTableRS::invalidate_or_clear(Generation* gen, bool younger) {
- GenCollectedHeap* gch = GenCollectedHeap::heap();
- // For each generation gen (and younger)
- // invalidate the cards for the currently occupied part
- // of that generation and clear the cards for the
+void CardTableRS::invalidate_or_clear(Generation* old_gen) {
+ assert(old_gen->level() == 1, "Should only be called for the old generation");
+ // Invalidate the cards for the currently occupied part of
+ // the old generation and clear the cards for the
// unoccupied part of the generation (if any, making use
// of that generation's prev_used_region to determine that
// region). No need to do anything for the youngest
// generation. Also see note#20040107.ysr above.
- Generation* g = gen;
- for(Generation* prev_gen = gch->prev_gen(g); prev_gen != NULL;
- g = prev_gen, prev_gen = gch->prev_gen(g)) {
- MemRegion used_mr = g->used_region();
- MemRegion to_be_cleared_mr = g->prev_used_region().minus(used_mr);
- if (!to_be_cleared_mr.is_empty()) {
- clear(to_be_cleared_mr);
- }
- invalidate(used_mr);
- if (!younger) break;
+ MemRegion used_mr = old_gen->used_region();
+ MemRegion to_be_cleared_mr = old_gen->prev_used_region().minus(used_mr);
+ if (!to_be_cleared_mr.is_empty()) {
+ clear(to_be_cleared_mr);
}
+ invalidate(used_mr);
}
diff --git a/src/share/vm/memory/cardTableRS.hpp b/src/share/vm/memory/cardTableRS.hpp
index 7ac9e4162..25884feac 100644
--- a/src/share/vm/memory/cardTableRS.hpp
+++ b/src/share/vm/memory/cardTableRS.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -102,6 +102,7 @@ class CardTableRS: public GenRemSet {
public:
CardTableRS(MemRegion whole_heap, int max_covered_regions);
+ ~CardTableRS();
// *** GenRemSet functions.
GenRemSet::Name rs_kind() { return GenRemSet::CardTable; }
@@ -141,12 +142,12 @@ public:
void verify_aligned_region_empty(MemRegion mr);
void clear(MemRegion mr) { _ct_bs->clear(mr); }
- void clear_into_younger(Generation* gen);
+ void clear_into_younger(Generation* old_gen);
void invalidate(MemRegion mr, bool whole_heap = false) {
_ct_bs->invalidate(mr, whole_heap);
}
- void invalidate_or_clear(Generation* gen, bool younger);
+ void invalidate_or_clear(Generation* old_gen);
static uintx ct_max_alignment_constraint() {
return CardTableModRefBS::ct_max_alignment_constraint();
diff --git a/src/share/vm/memory/collectorPolicy.cpp b/src/share/vm/memory/collectorPolicy.cpp
index c8543d943..0728997b7 100644
--- a/src/share/vm/memory/collectorPolicy.cpp
+++ b/src/share/vm/memory/collectorPolicy.cpp
@@ -47,6 +47,11 @@
// CollectorPolicy methods.
+// Align down. If the aligning result in 0, return 'alignment'.
+static size_t restricted_align_down(size_t size, size_t alignment) {
+ return MAX2(alignment, align_size_down_(size, alignment));
+}
+
void CollectorPolicy::initialize_flags() {
assert(max_alignment() >= min_alignment(),
err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
@@ -59,18 +64,24 @@ void CollectorPolicy::initialize_flags() {
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
}
- if (MetaspaceSize > MaxMetaspaceSize) {
- MaxMetaspaceSize = MetaspaceSize;
+ if (!is_size_aligned(MaxMetaspaceSize, max_alignment())) {
+ FLAG_SET_ERGO(uintx, MaxMetaspaceSize,
+ restricted_align_down(MaxMetaspaceSize, max_alignment()));
}
- MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment()));
- // Don't increase Metaspace size limit above specified.
- MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment());
+
if (MetaspaceSize > MaxMetaspaceSize) {
- MetaspaceSize = MaxMetaspaceSize;
+ FLAG_SET_ERGO(uintx, MetaspaceSize, MaxMetaspaceSize);
}
- MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment()));
- MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment()));
+ if (!is_size_aligned(MetaspaceSize, min_alignment())) {
+ FLAG_SET_ERGO(uintx, MetaspaceSize,
+ restricted_align_down(MetaspaceSize, min_alignment()));
+ }
+
+ assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
+
+ MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment());
+ MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment());
MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
@@ -145,6 +156,30 @@ void CollectorPolicy::cleared_all_soft_refs() {
_all_soft_refs_clear = true;
}
+size_t CollectorPolicy::compute_max_alignment() {
+ // The card marking array and the offset arrays for old generations are
+ // committed in os pages as well. Make sure they are entirely full (to
+ // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
+ // byte entry and the os page size is 4096, the maximum heap size should
+ // be 512*4096 = 2MB aligned.
+
+ // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable
+ // is supported.
+ // Requirements of any new remembered set implementations must be added here.
+ size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
+
+ // Parallel GC does its own alignment of the generations to avoid requiring a
+ // large page (256M on some platforms) for the permanent generation. The
+ // other collectors should also be updated to do their own alignment and then
+ // this use of lcm() should be removed.
+ if (UseLargePages && !UseParallelGC) {
+ // in presence of large pages we have to make sure that our
+ // alignment is large page aware
+ alignment = lcm(os::large_page_size(), alignment);
+ }
+
+ return alignment;
+}
// GenCollectorPolicy methods.
@@ -175,27 +210,6 @@ void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
GCTimeRatio);
}
-size_t GenCollectorPolicy::compute_max_alignment() {
- // The card marking array and the offset arrays for old generations are
- // committed in os pages as well. Make sure they are entirely full (to
- // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
- // byte entry and the os page size is 4096, the maximum heap size should
- // be 512*4096 = 2MB aligned.
- size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
-
- // Parallel GC does its own alignment of the generations to avoid requiring a
- // large page (256M on some platforms) for the permanent generation. The
- // other collectors should also be updated to do their own alignment and then
- // this use of lcm() should be removed.
- if (UseLargePages && !UseParallelGC) {
- // in presence of large pages we have to make sure that our
- // alignment is large page aware
- alignment = lcm(os::large_page_size(), alignment);
- }
-
- return alignment;
-}
-
void GenCollectorPolicy::initialize_flags() {
// All sizes must be multiples of the generation granularity.
set_min_alignment((uintx) Generation::GenGrain);
@@ -264,6 +278,27 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
// need to do this again
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
+ // adjust max heap size if necessary
+ if (NewSize + OldSize > MaxHeapSize) {
+ if (FLAG_IS_CMDLINE(MaxHeapSize)) {
+ // somebody set a maximum heap size with the intention that we should not
+ // exceed it. Adjust New/OldSize as necessary.
+ uintx calculated_size = NewSize + OldSize;
+ double shrink_factor = (double) MaxHeapSize / calculated_size;
+ // align
+ NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
+ // OldSize is already aligned because above we aligned MaxHeapSize to
+ // max_alignment(), and we just made sure that NewSize is aligned to
+ // min_alignment(). In initialize_flags() we verified that max_alignment()
+ // is a multiple of min_alignment().
+ OldSize = MaxHeapSize - NewSize;
+ } else {
+ MaxHeapSize = NewSize + OldSize;
+ }
+ }
+ // need to do this again
+ MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
+
always_do_update_barrier = UseConcMarkSweepGC;
// Check validity of heap flags
@@ -731,7 +766,7 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
// free memory should be here, especially if they are expensive. If this
// attempt fails, an OOM exception will be thrown.
{
- IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
+ UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
gch->do_collection(true /* full */,
true /* clear_all_soft_refs */,
@@ -856,7 +891,7 @@ MarkSweepPolicy::MarkSweepPolicy() {
}
void MarkSweepPolicy::initialize_generations() {
- _generations = new GenerationSpecPtr[number_of_generations()];
+ _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
if (_generations == NULL)
vm_exit_during_initialization("Unable to allocate gen spec");
diff --git a/src/share/vm/memory/collectorPolicy.hpp b/src/share/vm/memory/collectorPolicy.hpp
index 4acf7ba78..73fd177d7 100644
--- a/src/share/vm/memory/collectorPolicy.hpp
+++ b/src/share/vm/memory/collectorPolicy.hpp
@@ -98,6 +98,9 @@ class CollectorPolicy : public CHeapObj<mtGC> {
{}
public:
+ // Return maximum heap alignment that may be imposed by the policy
+ static size_t compute_max_alignment();
+
void set_min_alignment(size_t align) { _min_alignment = align; }
size_t min_alignment() { return _min_alignment; }
void set_max_alignment(size_t align) { _max_alignment = align; }
@@ -234,9 +237,6 @@ class GenCollectorPolicy : public CollectorPolicy {
// Try to allocate space by expanding the heap.
virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
- // compute max heap alignment
- size_t compute_max_alignment();
-
// Scale the base_size by NewRation according to
// result = base_size / (NewRatio + 1)
// and align by min_alignment()
diff --git a/src/share/vm/memory/defNewGeneration.cpp b/src/share/vm/memory/defNewGeneration.cpp
index 689ce7b8b..0b4af29a3 100644
--- a/src/share/vm/memory/defNewGeneration.cpp
+++ b/src/share/vm/memory/defNewGeneration.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,10 @@
#include "precompiled.hpp"
#include "gc_implementation/shared/collectorCounters.hpp"
#include "gc_implementation/shared/gcPolicyCounters.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "memory/defNewGeneration.inline.hpp"
#include "memory/gcLocker.inline.hpp"
@@ -50,9 +54,6 @@
DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
assert(g->level() == 0, "Optimized for youngest gen.");
}
-void DefNewGeneration::IsAliveClosure::do_object(oop p) {
- assert(false, "Do not call.");
-}
bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
}
@@ -226,6 +227,8 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
_next_gen = NULL;
_tenuring_threshold = MaxTenuringThreshold;
_pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
+
+ _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
}
void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
@@ -447,11 +450,6 @@ void DefNewGeneration::compute_new_size() {
}
}
-void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
- // $$$ This may be wrong in case of "scavenge failure"?
- eden()->object_iterate(cl);
-}
-
void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
assert(false, "NYI -- are you sure you want to call this?");
}
@@ -561,12 +559,16 @@ void DefNewGeneration::collect(bool full,
size_t size,
bool is_tlab) {
assert(full || size > 0, "otherwise we don't want to collect");
+
GenCollectedHeap* gch = GenCollectedHeap::heap();
+
+ _gc_timer->register_gc_start(os::elapsed_counter());
+ DefNewTracer gc_tracer;
+ gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
+
_next_gen = gch->next_gen(this);
- assert(_next_gen != NULL,
- "This must be the youngest gen, and not the only gen");
- // If the next generation is too full to accomodate promotion
+ // If the next generation is too full to accommodate promotion
// from this generation, pass on collection; let the next generation
// do it.
if (!collection_attempt_is_safe()) {
@@ -580,10 +582,12 @@ void DefNewGeneration::collect(bool full,
init_assuming_no_promotion_failure();
- TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty);
+ GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
// Capture heap used before collection (for printing).
size_t gch_prev_used = gch->used();
+ gch->trace_heap_before_gc(&gc_tracer);
+
SpecializationStats::clear();
// These can be shared for all code paths
@@ -634,9 +638,12 @@ void DefNewGeneration::collect(bool full,
FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
ReferenceProcessor* rp = ref_processor();
rp->setup_policy(clear_all_soft_refs);
+ const ReferenceProcessorStats& stats =
rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
- NULL);
- if (!promotion_failed()) {
+ NULL, _gc_timer);
+ gc_tracer.report_gc_reference_stats(stats);
+
+ if (!_promotion_failed) {
// Swap the survivor spaces.
eden()->clear(SpaceDecorator::Mangle);
from()->clear(SpaceDecorator::Mangle);
@@ -683,6 +690,7 @@ void DefNewGeneration::collect(bool full,
// Inform the next generation that a promotion failure occurred.
_next_gen->promotion_failure_occurred();
+ gc_tracer.report_promotion_failed(_promotion_failed_info);
// Reset the PromotionFailureALot counters.
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
@@ -692,11 +700,18 @@ void DefNewGeneration::collect(bool full,
to()->set_concurrent_iteration_safe_limit(to()->top());
SpecializationStats::print();
- // We need to use a monotonically non-deccreasing time in ms
+ // We need to use a monotonically non-decreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
update_time_of_last_gc(now);
+
+ gch->trace_heap_after_gc(&gc_tracer);
+ gc_tracer.report_tenuring_threshold(tenuring_threshold());
+
+ _gc_timer->register_gc_end(os::elapsed_counter());
+
+ gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
}
class RemoveForwardPointerClosure: public ObjectClosure {
@@ -708,6 +723,7 @@ public:
void DefNewGeneration::init_assuming_no_promotion_failure() {
_promotion_failed = false;
+ _promotion_failed_info.reset();
from()->set_next_compaction_space(NULL);
}
@@ -729,7 +745,7 @@ void DefNewGeneration::remove_forwarding_pointers() {
}
void DefNewGeneration::preserve_mark(oop obj, markOop m) {
- assert(promotion_failed() && m->must_be_preserved_for_promotion_failure(obj),
+ assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj),
"Oversaving!");
_objs_with_preserved_marks.push(obj);
_preserved_marks_of_objs.push(m);
@@ -747,6 +763,7 @@ void DefNewGeneration::handle_promotion_failure(oop old) {
old->size());
}
_promotion_failed = true;
+ _promotion_failed_info.register_copy_failure(old->size());
preserve_mark_if_necessary(old, old->mark());
// forward to self
old->forward_to(old);
@@ -882,8 +899,6 @@ bool DefNewGeneration::collection_attempt_is_safe() {
if (_next_gen == NULL) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
_next_gen = gch->next_gen(this);
- assert(_next_gen != NULL,
- "This must be the youngest gen, and not the only gen");
}
return _next_gen->promotion_attempt_is_safe(used());
}
@@ -965,6 +980,10 @@ void DefNewGeneration::record_spaces_top() {
from()->set_top_for_allocations();
}
+void DefNewGeneration::ref_processor_init() {
+ Generation::ref_processor_init();
+}
+
void DefNewGeneration::update_counters() {
if (UsePerfData) {
@@ -1010,6 +1029,9 @@ HeapWord* DefNewGeneration::allocate(size_t word_size,
// have to use it here, as well.
HeapWord* result = eden()->par_allocate(word_size);
if (result != NULL) {
+ if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
+ _next_gen->sample_eden_chunk();
+ }
return result;
}
do {
@@ -1040,13 +1062,19 @@ HeapWord* DefNewGeneration::allocate(size_t word_size,
// circular dependency at compile time.
if (result == NULL) {
result = allocate_from_space(word_size);
+ } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
+ _next_gen->sample_eden_chunk();
}
return result;
}
HeapWord* DefNewGeneration::par_allocate(size_t word_size,
bool is_tlab) {
- return eden()->par_allocate(word_size);
+ HeapWord* res = eden()->par_allocate(word_size);
+ if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
+ _next_gen->sample_eden_chunk();
+ }
+ return res;
}
void DefNewGeneration::gc_prologue(bool full) {
diff --git a/src/share/vm/memory/defNewGeneration.hpp b/src/share/vm/memory/defNewGeneration.hpp
index 38ea742b3..0623d446a 100644
--- a/src/share/vm/memory/defNewGeneration.hpp
+++ b/src/share/vm/memory/defNewGeneration.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,12 +28,14 @@
#include "gc_implementation/shared/ageTable.hpp"
#include "gc_implementation/shared/cSpaceCounters.hpp"
#include "gc_implementation/shared/generationCounters.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "memory/generation.inline.hpp"
#include "utilities/stack.hpp"
class EdenSpace;
class ContiguousSpace;
class ScanClosure;
+class STWGCTimer;
// DefNewGeneration is a young generation containing eden, from- and
// to-space.
@@ -46,15 +48,17 @@ protected:
uint _tenuring_threshold; // Tenuring threshold for next collection.
ageTable _age_table;
// Size of object to pretenure in words; command line provides bytes
- size_t _pretenure_size_threshold_words;
+ size_t _pretenure_size_threshold_words;
ageTable* age_table() { return &_age_table; }
+
// Initialize state to optimistically assume no promotion failure will
// happen.
void init_assuming_no_promotion_failure();
// True iff a promotion has failed in the current collection.
bool _promotion_failed;
bool promotion_failed() { return _promotion_failed; }
+ PromotionFailedInfo _promotion_failed_info;
// Handling promotion failure. A young generation collection
// can fail if a live object cannot be copied out of its
@@ -132,6 +136,8 @@ protected:
ContiguousSpace* _from_space;
ContiguousSpace* _to_space;
+ STWGCTimer* _gc_timer;
+
enum SomeProtectedConstants {
// Generations are GenGrain-aligned and have size that are multiples of
// GenGrain.
@@ -150,7 +156,6 @@ protected:
Generation* _g;
public:
IsAliveClosure(Generation* g);
- void do_object(oop p);
bool do_object_b(oop p);
};
@@ -204,6 +209,8 @@ protected:
DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
const char* policy="Copy");
+ virtual void ref_processor_init();
+
virtual Generation::Name kind() { return Generation::DefNew; }
// Accessing spaces
@@ -245,7 +252,6 @@ protected:
// Iteration
void object_iterate(ObjectClosure* blk);
- void object_iterate_since_last_GC(ObjectClosure* cl);
void younger_refs_iterate(OopsInGenClosure* cl);
diff --git a/src/share/vm/memory/filemap.cpp b/src/share/vm/memory/filemap.cpp
index dbc0c87ed..8eb5f4524 100644
--- a/src/share/vm/memory/filemap.cpp
+++ b/src/share/vm/memory/filemap.cpp
@@ -55,6 +55,7 @@ static void fail(const char *msg, va_list ap) {
" shared archive file.\n");
jio_vfprintf(defaultStream::error_stream(), msg, ap);
jio_fprintf(defaultStream::error_stream(), "\n");
+ // Do not change the text of the below message because some tests check for it.
vm_exit_during_initialization("Unable to use shared archive.", NULL);
}
@@ -362,15 +363,12 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
ReservedSpace FileMapInfo::reserve_shared_memory() {
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0];
char* requested_addr = si->_base;
- size_t alignment = os::vm_allocation_granularity();
- size_t size = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
- SharedMiscDataSize + SharedMiscCodeSize,
- alignment);
+ size_t size = FileMapInfo::shared_spaces_size();
// Reserve the space first, then map otherwise map will go right over some
// other reserved memory (like the code cache).
- ReservedSpace rs(size, alignment, false, requested_addr);
+ ReservedSpace rs(size, os::vm_allocation_granularity(), false, requested_addr);
if (!rs.is_reserved()) {
fail_continue(err_msg("Unable to reserve shared space at required address " INTPTR_FORMAT, requested_addr));
return rs;
@@ -549,3 +547,29 @@ bool FileMapInfo::is_in_shared_space(const void* p) {
return false;
}
+
+void FileMapInfo::print_shared_spaces() {
+ gclog_or_tty->print_cr("Shared Spaces:");
+ for (int i = 0; i < MetaspaceShared::n_regions; i++) {
+ struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
+ gclog_or_tty->print(" %s " INTPTR_FORMAT "-" INTPTR_FORMAT,
+ shared_region_name[i],
+ si->_base, si->_base + si->_used);
+ }
+}
+
+// Unmap mapped regions of shared space.
+void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
+ FileMapInfo *map_info = FileMapInfo::current_info();
+ if (map_info) {
+ map_info->fail_continue(msg);
+ for (int i = 0; i < MetaspaceShared::n_regions; i++) {
+ if (map_info->_header._space[i]._base != NULL) {
+ map_info->unmap_region(i);
+ map_info->_header._space[i]._base = NULL;
+ }
+ }
+ } else if (DumpSharedSpaces) {
+ fail_stop(msg, NULL);
+ }
+}
diff --git a/src/share/vm/memory/filemap.hpp b/src/share/vm/memory/filemap.hpp
index a11914b9c..8d7535ed9 100644
--- a/src/share/vm/memory/filemap.hpp
+++ b/src/share/vm/memory/filemap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -149,6 +149,16 @@ public:
// Return true if given address is in the mapped shared space.
bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
+ void print_shared_spaces() NOT_CDS_RETURN;
+
+ static size_t shared_spaces_size() {
+ return align_size_up(SharedReadOnlySize + SharedReadWriteSize +
+ SharedMiscDataSize + SharedMiscCodeSize,
+ os::vm_allocation_granularity());
+ }
+
+ // Stop CDS sharing and unmap CDS regions.
+ static void stop_sharing_and_unmap(const char* msg);
};
#endif // SHARE_VM_MEMORY_FILEMAP_HPP
diff --git a/src/share/vm/memory/freeList.cpp b/src/share/vm/memory/freeList.cpp
index 05e4ef0a2..385451caf 100644
--- a/src/share/vm/memory/freeList.cpp
+++ b/src/share/vm/memory/freeList.cpp
@@ -55,17 +55,6 @@ FreeList<Chunk>::FreeList() :
}
template <class Chunk>
-FreeList<Chunk>::FreeList(Chunk* fc) :
- _head(fc), _tail(fc)
-#ifdef ASSERT
- , _protecting_lock(NULL)
-#endif
-{
- _size = fc->size();
- _count = 1;
-}
-
-template <class Chunk>
void FreeList<Chunk>::link_head(Chunk* v) {
assert_proper_lock_protection();
set_head(v);
diff --git a/src/share/vm/memory/freeList.hpp b/src/share/vm/memory/freeList.hpp
index 37438cc38..e69c8a32f 100644
--- a/src/share/vm/memory/freeList.hpp
+++ b/src/share/vm/memory/freeList.hpp
@@ -80,8 +80,6 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
// Constructor
// Construct a list without any entries.
FreeList();
- // Construct a list with "fc" as the first (and lone) entry in the list.
- FreeList(Chunk_t* fc);
// Do initialization
void initialize();
@@ -177,9 +175,6 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
// found. Return NULL if "fc" is not found.
bool verify_chunk_in_free_list(Chunk_t* fc) const;
- // Stats verification
-// void verify_stats() const { ShouldNotReachHere(); };
-
// Printing support
static void print_labels_on(outputStream* st, const char* c);
void print_on(outputStream* st, const char* c = NULL) const;
diff --git a/src/share/vm/memory/gcLocker.cpp b/src/share/vm/memory/gcLocker.cpp
index eda728b70..1b0c94ee1 100644
--- a/src/share/vm/memory/gcLocker.cpp
+++ b/src/share/vm/memory/gcLocker.cpp
@@ -122,7 +122,7 @@ void GC_locker::jni_unlock(JavaThread* thread) {
// strictly needed. It's added here to make it clear that
// the GC will NOT be performed if any other caller
// of GC_locker::lock() still needs GC locked.
- if (!is_active()) {
+ if (!is_active_internal()) {
_doing_gc = true;
{
// Must give up the lock while at a safepoint
diff --git a/src/share/vm/memory/gcLocker.hpp b/src/share/vm/memory/gcLocker.hpp
index 8ede8fb27..45b8a8f89 100644
--- a/src/share/vm/memory/gcLocker.hpp
+++ b/src/share/vm/memory/gcLocker.hpp
@@ -88,7 +88,7 @@ class GC_locker: public AllStatic {
public:
// Accessors
static bool is_active() {
- assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
+ assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
return is_active_internal();
}
static bool needs_gc() { return _needs_gc; }
diff --git a/src/share/vm/memory/genCollectedHeap.cpp b/src/share/vm/memory/genCollectedHeap.cpp
index 8c2eb34d2..3a5ab210c 100644
--- a/src/share/vm/memory/genCollectedHeap.cpp
+++ b/src/share/vm/memory/genCollectedHeap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "gc_implementation/shared/collectorCounters.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/filemap.hpp"
@@ -41,7 +42,6 @@
#include "memory/space.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
-#include "runtime/aprofiler.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/handles.hpp"
@@ -95,13 +95,13 @@ jint GenCollectedHeap::initialize() {
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
// The heap must be at least as aligned as generations.
- size_t alignment = Generation::GenGrain;
+ size_t gen_alignment = Generation::GenGrain;
_gen_specs = gen_policy()->generations();
// Make sure the sizes are all aligned.
for (i = 0; i < _n_gens; i++) {
- _gen_specs[i]->align(alignment);
+ _gen_specs[i]->align(gen_alignment);
}
// Allocate space for the heap.
@@ -109,9 +109,11 @@ jint GenCollectedHeap::initialize() {
char* heap_address;
size_t total_reserved = 0;
int n_covered_regions = 0;
- ReservedSpace heap_rs(0);
+ ReservedSpace heap_rs;
- heap_address = allocate(alignment, &total_reserved,
+ size_t heap_alignment = collector_policy()->max_alignment();
+
+ heap_address = allocate(heap_alignment, &total_reserved,
&n_covered_regions, &heap_rs);
if (!heap_rs.is_reserved()) {
@@ -168,6 +170,8 @@ char* GenCollectedHeap::allocate(size_t alignment,
const size_t pageSize = UseLargePages ?
os::large_page_size() : os::vm_page_size();
+ assert(alignment % pageSize == 0, "Must be");
+
for (int i = 0; i < _n_gens; i++) {
total_reserved += _gen_specs[i]->max_size();
if (total_reserved < _gen_specs[i]->max_size()) {
@@ -175,24 +179,17 @@ char* GenCollectedHeap::allocate(size_t alignment,
}
n_covered_regions += _gen_specs[i]->n_covered_regions();
}
- assert(total_reserved % pageSize == 0,
- err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
- SIZE_FORMAT, total_reserved, pageSize));
+ assert(total_reserved % alignment == 0,
+ err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
+ SIZE_FORMAT, total_reserved, alignment));
// Needed until the cardtable is fixed to have the right number
// of covered regions.
n_covered_regions += 2;
- if (UseLargePages) {
- assert(total_reserved != 0, "total_reserved cannot be 0");
- total_reserved = round_to(total_reserved, os::large_page_size());
- if (total_reserved < os::large_page_size()) {
- vm_exit_during_initialization(overflow_msg);
- }
- }
+ *_total_reserved = total_reserved;
+ *_n_covered_regions = n_covered_regions;
- *_total_reserved = total_reserved;
- *_n_covered_regions = n_covered_regions;
*heap_rs = Universe::reserve_heap(total_reserved, alignment);
return heap_rs->base();
}
@@ -388,7 +385,7 @@ void GenCollectedHeap::do_collection(bool full,
const char* gc_cause_prefix = complete ? "Full GC" : "GC";
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
- TraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, gclog_or_tty);
+ GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL);
gc_prologue(complete);
increment_total_collections(complete);
@@ -417,10 +414,11 @@ void GenCollectedHeap::do_collection(bool full,
// The full_collections increment was missed above.
increment_total_full_collections();
}
- pre_full_gc_dump(); // do any pre full gc dumps
+ pre_full_gc_dump(NULL); // do any pre full gc dumps
}
// Timer for individual generations. Last argument is false: no CR
- TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty);
+ // FIXME: We should try to start the timing earlier to cover more of the GC pause
+ GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL);
TraceCollectorStats tcs(_gens[i]->counters());
TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
@@ -534,7 +532,8 @@ void GenCollectedHeap::do_collection(bool full,
complete = complete || (max_level_collected == n_gens() - 1);
if (complete) { // We did a "major" collection
- post_full_gc_dump(); // do any post full gc dumps
+ // FIXME: See comment at pre_full_gc_dump call
+ post_full_gc_dump(NULL); // do any post full gc dumps
}
if (PrintGCDetails) {
@@ -870,12 +869,6 @@ void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
}
}
-void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
- for (int i = 0; i < _n_gens; i++) {
- _gens[i]->object_iterate_since_last_GC(cl);
- }
-}
-
Space* GenCollectedHeap::space_containing(const void* addr) const {
for (int i = 0; i < _n_gens; i++) {
Space* res = _gens[i]->space_containing(addr);
@@ -1074,13 +1067,13 @@ GenCollectedHeap* GenCollectedHeap::heap() {
void GenCollectedHeap::prepare_for_compaction() {
- Generation* scanning_gen = _gens[_n_gens-1];
+ guarantee(_n_gens = 2, "Wrong number of generations");
+ Generation* old_gen = _gens[1];
// Start by compacting into same gen.
- CompactPoint cp(scanning_gen, NULL, NULL);
- while (scanning_gen != NULL) {
- scanning_gen->prepare_for_compaction(&cp);
- scanning_gen = prev_gen(scanning_gen);
- }
+ CompactPoint cp(old_gen, NULL, NULL);
+ old_gen->prepare_for_compaction(&cp);
+ Generation* young_gen = _gens[0];
+ young_gen->prepare_for_compaction(&cp);
}
GCStats* GenCollectedHeap::gc_stats(int level) const {
@@ -1183,8 +1176,6 @@ void GenCollectedHeap::gc_prologue(bool full) {
CollectedHeap::accumulate_statistics_all_tlabs();
ensure_parsability(true); // retire TLABs
- // Call allocation profiler
- AllocationProfiler::iterate_since_last_gc();
// Walk generations
GenGCPrologueClosure blk(full);
generation_iterate(&blk, false); // not old-to-young.
@@ -1217,6 +1208,7 @@ void GenCollectedHeap::gc_epilogue(bool full) {
}
MetaspaceCounters::update_performance_counters();
+ CompressedClassSpaceCounters::update_performance_counters();
always_do_update_barrier = UseConcMarkSweepGC;
};
@@ -1251,27 +1243,14 @@ void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
generation_iterate(&ep_cl, false);
}
-oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
+oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
oop obj,
size_t obj_size) {
+ guarantee(old_gen->level() == 1, "We only get here with an old generation");
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
HeapWord* result = NULL;
- // First give each higher generation a chance to allocate the promoted object.
- Generation* allocator = next_gen(gen);
- if (allocator != NULL) {
- do {
- result = allocator->allocate(obj_size, false);
- } while (result == NULL && (allocator = next_gen(allocator)) != NULL);
- }
-
- if (result == NULL) {
- // Then give gen and higher generations a chance to expand and allocate the
- // object.
- do {
- result = gen->expand_and_allocate(obj_size, false);
- } while (result == NULL && (gen = next_gen(gen)) != NULL);
- }
+ result = old_gen->expand_and_allocate(obj_size, false);
if (result != NULL) {
Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
diff --git a/src/share/vm/memory/genCollectedHeap.hpp b/src/share/vm/memory/genCollectedHeap.hpp
index 783cd372d..8f814132a 100644
--- a/src/share/vm/memory/genCollectedHeap.hpp
+++ b/src/share/vm/memory/genCollectedHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -148,6 +148,11 @@ public:
return gen_policy()->size_policy();
}
+ // Return the (conservative) maximum heap alignment
+ static size_t conservative_max_heap_alignment() {
+ return Generation::GenGrain;
+ }
+
size_t capacity() const;
size_t used() const;
@@ -222,7 +227,6 @@ public:
void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
void object_iterate(ObjectClosure* cl);
void safe_object_iterate(ObjectClosure* cl);
- void object_iterate_since_last_GC(ObjectClosure* cl);
Space* space_containing(const void* addr) const;
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,
@@ -369,25 +373,23 @@ public:
// collection.
virtual bool is_maximal_no_gc() const;
- // Return the generation before "gen", or else NULL.
+ // Return the generation before "gen".
Generation* prev_gen(Generation* gen) const {
int l = gen->level();
- if (l == 0) return NULL;
- else return _gens[l-1];
+ guarantee(l > 0, "Out of bounds");
+ return _gens[l-1];
}
- // Return the generation after "gen", or else NULL.
+ // Return the generation after "gen".
Generation* next_gen(Generation* gen) const {
int l = gen->level() + 1;
- if (l == _n_gens) return NULL;
- else return _gens[l];
+ guarantee(l < _n_gens, "Out of bounds");
+ return _gens[l];
}
Generation* get_gen(int i) const {
- if (i >= 0 && i < _n_gens)
- return _gens[i];
- else
- return NULL;
+ guarantee(i >= 0 && i < _n_gens, "Out of bounds");
+ return _gens[i];
}
int n_gens() const {
@@ -486,9 +488,9 @@ public:
// Promotion of obj into gen failed. Try to promote obj to higher
// gens in ascending order; return the new location of obj if successful.
- // Otherwise, try expand-and-allocate for obj in each generation starting at
- // gen; return the new location of obj if successful. Otherwise, return NULL.
- oop handle_failed_promotion(Generation* gen,
+ // Otherwise, try expand-and-allocate for obj in both the young and old
+ // generation; return the new location of obj if successful. Otherwise, return NULL.
+ oop handle_failed_promotion(Generation* old_gen,
oop obj,
size_t obj_size);
diff --git a/src/share/vm/memory/genMarkSweep.cpp b/src/share/vm/memory/genMarkSweep.cpp
index 6d0fd9b49..fdeba2ada 100644
--- a/src/share/vm/memory/genMarkSweep.cpp
+++ b/src/share/vm/memory/genMarkSweep.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,10 @@
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/genMarkSweep.hpp"
@@ -48,8 +52,8 @@
#include "utilities/copy.hpp"
#include "utilities/events.hpp"
-void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
- bool clear_all_softrefs) {
+void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool clear_all_softrefs) {
+ guarantee(level == 1, "We always collect both old and young.");
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
GenCollectedHeap* gch = GenCollectedHeap::heap();
@@ -65,7 +69,9 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
_ref_processor = rp;
rp->setup_policy(clear_all_softrefs);
- TraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty);
+ GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
+
+ gch->trace_heap_before_gc(_gc_tracer);
// When collecting the permanent generation Method*s may be moving,
// so we either have to flush all bcp data or convert it into bci.
@@ -78,11 +84,6 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
// Capture heap size before collection for printing.
size_t gch_prev_used = gch->used();
- // Some of the card table updates below assume that the perm gen is
- // also being collected.
- assert(level == gch->n_gens() - 1,
- "All generations are being collected, ergo perm gen too.");
-
// Capture used regions for each generation that will be
// subject to collection, so that card table adjustments can
// be made intelligently (see clear / invalidate further below).
@@ -120,17 +121,15 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
all_empty = all_empty && gch->get_gen(i)->used() == 0;
}
GenRemSet* rs = gch->rem_set();
+ Generation* old_gen = gch->get_gen(level);
// Clear/invalidate below make use of the "prev_used_regions" saved earlier.
if (all_empty) {
// We've evacuated all generations below us.
- Generation* g = gch->get_gen(level);
- rs->clear_into_younger(g);
+ rs->clear_into_younger(old_gen);
} else {
// Invalidate the cards corresponding to the currently used
- // region and clear those corresponding to the evacuated region
- // of all generations just collected (i.e. level and younger).
- rs->invalidate_or_clear(gch->get_gen(level),
- true /* younger */);
+ // region and clear those corresponding to the evacuated region.
+ rs->invalidate_or_clear(old_gen);
}
Threads::gc_epilogue();
@@ -155,6 +154,8 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
// does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
gch->update_time_of_last_gc(now);
+
+ gch->trace_heap_after_gc(_gc_tracer);
}
void GenMarkSweep::allocate_stacks() {
@@ -192,7 +193,7 @@ void GenMarkSweep::deallocate_stacks() {
void GenMarkSweep::mark_sweep_phase1(int level,
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
- TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
+ GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer);
trace(" 1");
GenCollectedHeap* gch = GenCollectedHeap::heap();
@@ -219,8 +220,10 @@ void GenMarkSweep::mark_sweep_phase1(int level,
// Process reference objects found during marking
{
ref_processor()->setup_policy(clear_all_softrefs);
- ref_processor()->process_discovered_references(
- &is_alive, &keep_alive, &follow_stack_closure, NULL);
+ const ReferenceProcessorStats& stats =
+ ref_processor()->process_discovered_references(
+ &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer);
+ gc_tracer()->report_gc_reference_stats(stats);
}
// This is the point where the entire marking should have completed.
@@ -240,6 +243,8 @@ void GenMarkSweep::mark_sweep_phase1(int level,
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
+
+ gc_tracer()->report_object_count_after_gc(&is_alive);
}
@@ -259,7 +264,7 @@ void GenMarkSweep::mark_sweep_phase2() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
- TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
+ GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer);
trace("2");
gch->prepare_for_compaction();
@@ -276,7 +281,7 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
// Adjust the pointers to reflect the new locations
- TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
+ GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer);
trace("3");
// Need new claim bits for the pointer adjustment tracing.
@@ -331,7 +336,7 @@ void GenMarkSweep::mark_sweep_phase4() {
// to use a higher index (saved from phase2) when verifying perm_gen.
GenCollectedHeap* gch = GenCollectedHeap::heap();
- TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
+ GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer);
trace("4");
GenCompactClosure blk;
diff --git a/src/share/vm/memory/genRemSet.hpp b/src/share/vm/memory/genRemSet.hpp
index e6b8302ad..44a435407 100644
--- a/src/share/vm/memory/genRemSet.hpp
+++ b/src/share/vm/memory/genRemSet.hpp
@@ -135,7 +135,7 @@ public:
// younger than gen from generations gen and older.
// The parameter clear_perm indicates if the perm_gen's
// remembered set should also be processed/cleared.
- virtual void clear_into_younger(Generation* gen) = 0;
+ virtual void clear_into_younger(Generation* old_gen) = 0;
// Informs the RS that refs in the given "mr" may have changed
// arbitrarily, and therefore may contain old-to-young pointers.
@@ -146,11 +146,8 @@ public:
// Informs the RS that refs in this generation
// may have changed arbitrarily, and therefore may contain
- // old-to-young pointers in arbitrary locations. The parameter
- // younger indicates if the same should be done for younger generations
- // as well. The parameter perm indicates if the same should be done for
- // perm gen as well.
- virtual void invalidate_or_clear(Generation* gen, bool younger) = 0;
+ // old-to-young pointers in arbitrary locations.
+ virtual void invalidate_or_clear(Generation* old_gen) = 0;
};
#endif // SHARE_VM_MEMORY_GENREMSET_HPP
diff --git a/src/share/vm/memory/generation.cpp b/src/share/vm/memory/generation.cpp
index b60887465..f9c986fa7 100644
--- a/src/share/vm/memory/generation.cpp
+++ b/src/share/vm/memory/generation.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,8 @@
*/
#include "precompiled.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/allocation.inline.hpp"
@@ -624,12 +626,26 @@ void OneContigSpaceCardGeneration::collect(bool full,
bool clear_all_soft_refs,
size_t size,
bool is_tlab) {
+ GenCollectedHeap* gch = GenCollectedHeap::heap();
+
SpecializationStats::clear();
// Temporarily expand the span of our ref processor, so
// refs discovery is over the entire heap, not just this generation
ReferenceProcessorSpanMutator
- x(ref_processor(), GenCollectedHeap::heap()->reserved_region());
+ x(ref_processor(), gch->reserved_region());
+
+ STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
+ gc_timer->register_gc_start(os::elapsed_counter());
+
+ SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
+ gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
+
GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
+
+ gc_timer->register_gc_end(os::elapsed_counter());
+
+ gc_tracer->report_gc_end(os::elapsed_counter(), gc_timer->time_partitions());
+
SpecializationStats::print();
}
@@ -795,16 +811,6 @@ void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
blk->do_space(_the_space);
}
-void OneContigSpaceCardGeneration::object_iterate_since_last_GC(ObjectClosure* blk) {
- // Deal with delayed initialization of _the_space,
- // and lack of initialization of _last_gc.
- if (_last_gc.space() == NULL) {
- assert(the_space() != NULL, "shouldn't be NULL");
- _last_gc = the_space()->bottom_mark();
- }
- the_space()->object_iterate_from(_last_gc, blk);
-}
-
void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
blk->set_generation(this);
younger_refs_in_space_iterate(_the_space, blk);
diff --git a/src/share/vm/memory/generation.hpp b/src/share/vm/memory/generation.hpp
index feb4fde18..290cce583 100644
--- a/src/share/vm/memory/generation.hpp
+++ b/src/share/vm/memory/generation.hpp
@@ -455,6 +455,7 @@ class Generation: public CHeapObj<mtGC> {
// expected to be GC worker thread-local, with the worker index
// indicated by "thr_num".
virtual void* get_data_recorder(int thr_num) { return NULL; }
+ virtual void sample_eden_chunk() {}
// Some generations may require some cleanup actions before allowing
// a verification.
@@ -551,12 +552,6 @@ class Generation: public CHeapObj<mtGC> {
// the heap. This defaults to object_iterate() unless overridden.
virtual void safe_object_iterate(ObjectClosure* cl);
- // Iterate over all objects allocated in the generation since the last
- // collection, calling "cl.do_object" on each. The generation must have
- // been initialized properly to support this function, or else this call
- // will fail.
- virtual void object_iterate_since_last_GC(ObjectClosure* cl) = 0;
-
// Apply "cl->do_oop" to (the address of) all and only all the ref fields
// in the current generation that contain pointers to objects in younger
// generations. Objects allocated since the last "save_marks" call are
@@ -724,7 +719,6 @@ class OneContigSpaceCardGeneration: public CardGeneration {
// Iteration
void object_iterate(ObjectClosure* blk);
void space_iterate(SpaceClosure* blk, bool usedOnly = false);
- void object_iterate_since_last_GC(ObjectClosure* cl);
void younger_refs_iterate(OopsInGenClosure* blk);
diff --git a/src/share/vm/memory/heap.cpp b/src/share/vm/memory/heap.cpp
index 727690b5c..f00709684 100644
--- a/src/share/vm/memory/heap.cpp
+++ b/src/share/vm/memory/heap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -118,9 +118,12 @@ bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
_number_of_committed_segments = size_to_segments(_memory.committed_size());
_number_of_reserved_segments = size_to_segments(_memory.reserved_size());
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
+ const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
+ const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
+ const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
// reserve space for _segmap
- if (!_segmap.initialize(align_to_page_size(_number_of_reserved_segments), align_to_page_size(_number_of_committed_segments))) {
+ if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
return false;
}
diff --git a/src/share/vm/memory/heapInspection.cpp b/src/share/vm/memory/heapInspection.cpp
index a51ea1d15..bf65c882c 100644
--- a/src/share/vm/memory/heapInspection.cpp
+++ b/src/share/vm/memory/heapInspection.cpp
@@ -95,7 +95,7 @@ KlassInfoEntry* KlassInfoBucket::lookup(Klass* const k) {
}
elt = elt->next();
}
- elt = new KlassInfoEntry(k, list());
+ elt = new (std::nothrow) KlassInfoEntry(k, list());
// We may be out of space to allocate the new entry.
if (elt != NULL) {
set_list(elt);
@@ -127,13 +127,15 @@ void KlassInfoTable::AllClassesFinder::do_klass(Klass* k) {
_table->lookup(k);
}
-KlassInfoTable::KlassInfoTable(int size, HeapWord* ref,
- bool need_class_stats) {
+KlassInfoTable::KlassInfoTable(bool need_class_stats) {
+ _size_of_instances_in_words = 0;
_size = 0;
- _ref = ref;
- _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, size, mtInternal);
+ _ref = (HeapWord*) Universe::boolArrayKlassObj();
+ _buckets =
+ (KlassInfoBucket*) AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets,
+ mtInternal, 0, AllocFailStrategy::RETURN_NULL);
if (_buckets != NULL) {
- _size = size;
+ _size = _num_buckets;
for (int index = 0; index < _size; index++) {
_buckets[index].initialize();
}
@@ -154,12 +156,11 @@ KlassInfoTable::~KlassInfoTable() {
}
}
-uint KlassInfoTable::hash(Klass* p) {
- assert(p->is_metadata(), "all klasses are metadata");
+uint KlassInfoTable::hash(const Klass* p) {
return (uint)(((uintptr_t)p - (uintptr_t)_ref) >> 2);
}
-KlassInfoEntry* KlassInfoTable::lookup(Klass* const k) {
+KlassInfoEntry* KlassInfoTable::lookup(Klass* k) {
uint idx = hash(k) % _size;
assert(_buckets != NULL, "Allocation failure should have been caught");
KlassInfoEntry* e = _buckets[idx].lookup(k);
@@ -179,6 +180,7 @@ bool KlassInfoTable::record_instance(const oop obj) {
if (elt != NULL) {
elt->set_count(elt->count() + 1);
elt->set_words(elt->words() + obj->size());
+ _size_of_instances_in_words += obj->size();
return true;
} else {
return false;
@@ -192,14 +194,18 @@ void KlassInfoTable::iterate(KlassInfoClosure* cic) {
}
}
+size_t KlassInfoTable::size_of_instances_in_words() const {
+ return _size_of_instances_in_words;
+}
+
int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
return (*e1)->compare(*e1,*e2);
}
-KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit, const char* title, int estimatedCount) :
+KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit, const char* title) :
_cit(cit),
_title(title) {
- _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(estimatedCount,true);
+ _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(_histo_initial_size, true);
}
KlassInfoHisto::~KlassInfoHisto() {
@@ -444,25 +450,37 @@ class RecordInstanceClosure : public ObjectClosure {
private:
KlassInfoTable* _cit;
size_t _missed_count;
+ BoolObjectClosure* _filter;
public:
- RecordInstanceClosure(KlassInfoTable* cit) :
- _cit(cit), _missed_count(0) {}
+ RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
+ _cit(cit), _missed_count(0), _filter(filter) {}
void do_object(oop obj) {
- if (!_cit->record_instance(obj)) {
- _missed_count++;
+ if (should_visit(obj)) {
+ if (!_cit->record_instance(obj)) {
+ _missed_count++;
+ }
}
}
size_t missed_count() { return _missed_count; }
+
+ private:
+ bool should_visit(oop obj) {
+ return _filter == NULL || _filter->do_object_b(obj);
+ }
};
-void HeapInspection::heap_inspection(outputStream* st, bool need_prologue) {
+size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) {
+ ResourceMark rm;
+
+ RecordInstanceClosure ric(cit, filter);
+ Universe::heap()->object_iterate(&ric);
+ return ric.missed_count();
+}
+
+void HeapInspection::heap_inspection(outputStream* st) {
ResourceMark rm;
- // Get some random number for ref (the hash key)
- HeapWord* ref = (HeapWord*) Universe::boolArrayKlassObj();
- CollectedHeap* heap = Universe::heap();
- bool is_shared_heap = false;
if (_print_help) {
for (int c=0; c<KlassSizeStats::_num_columns; c++) {
@@ -482,39 +500,30 @@ void HeapInspection::heap_inspection(outputStream* st, bool need_prologue) {
return;
}
- // Collect klass instance info
- KlassInfoTable cit(KlassInfoTable::cit_size, ref, _print_class_stats);
+ KlassInfoTable cit(_print_class_stats);
if (!cit.allocation_failed()) {
- // Iterate over objects in the heap
- RecordInstanceClosure ric(&cit);
- Universe::heap()->object_iterate(&ric);
-
- // Report if certain classes are not counted because of
- // running out of C-heap for the histogram.
- size_t missed_count = ric.missed_count();
+ size_t missed_count = populate_table(&cit);
if (missed_count != 0) {
st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
" total instances in data below",
missed_count);
}
+
// Sort and print klass instance info
const char *title = "\n"
" num #instances #bytes class name\n"
"----------------------------------------------";
- KlassInfoHisto histo(&cit, title, KlassInfoHisto::histo_initial_size);
+ KlassInfoHisto histo(&cit, title);
HistoClosure hc(&histo);
+
cit.iterate(&hc);
+
histo.sort();
histo.print_histo_on(st, _print_class_stats, _csv_format, _columns);
} else {
st->print_cr("WARNING: Ran out of C-heap; histogram not generated");
}
st->flush();
-
- if (need_prologue && is_shared_heap) {
- SharedHeap* sh = (SharedHeap*)heap;
- sh->gc_epilogue(false /* !full */); // release all acquired locks, etc.
- }
}
class FindInstanceClosure : public ObjectClosure {
diff --git a/src/share/vm/memory/heapInspection.hpp b/src/share/vm/memory/heapInspection.hpp
index a5de4551e..09558b0a2 100644
--- a/src/share/vm/memory/heapInspection.hpp
+++ b/src/share/vm/memory/heapInspection.hpp
@@ -189,21 +189,21 @@ class KlassInfoEntry: public CHeapObj<mtInternal> {
KlassInfoEntry(Klass* k, KlassInfoEntry* next) :
_klass(k), _instance_count(0), _instance_words(0), _next(next), _index(-1)
{}
- KlassInfoEntry* next() { return _next; }
- bool is_equal(Klass* k) { return k == _klass; }
- Klass* klass() { return _klass; }
- long count() { return _instance_count; }
+ KlassInfoEntry* next() const { return _next; }
+ bool is_equal(const Klass* k) { return k == _klass; }
+ Klass* klass() const { return _klass; }
+ long count() const { return _instance_count; }
void set_count(long ct) { _instance_count = ct; }
- size_t words() { return _instance_words; }
+ size_t words() const { return _instance_words; }
void set_words(size_t wds) { _instance_words = wds; }
void set_index(long index) { _index = index; }
- long index() { return _index; }
+ long index() const { return _index; }
int compare(KlassInfoEntry* e1, KlassInfoEntry* e2);
void print_on(outputStream* st) const;
const char* name() const;
};
-class KlassInfoClosure: public StackObj {
+class KlassInfoClosure : public StackObj {
public:
// Called for each KlassInfoEntry.
virtual void do_cinfo(KlassInfoEntry* cie) = 0;
@@ -215,7 +215,7 @@ class KlassInfoBucket: public CHeapObj<mtInternal> {
KlassInfoEntry* list() { return _list; }
void set_list(KlassInfoEntry* l) { _list = l; }
public:
- KlassInfoEntry* lookup(Klass* const k);
+ KlassInfoEntry* lookup(Klass* k);
void initialize() { _list = NULL; }
void empty();
void iterate(KlassInfoClosure* cic);
@@ -224,6 +224,8 @@ class KlassInfoBucket: public CHeapObj<mtInternal> {
class KlassInfoTable: public StackObj {
private:
int _size;
+ static const int _num_buckets = 20011;
+ size_t _size_of_instances_in_words;
// An aligned reference address (typically the least
// address in the perm gen) used for hashing klass
@@ -231,8 +233,8 @@ class KlassInfoTable: public StackObj {
HeapWord* _ref;
KlassInfoBucket* _buckets;
- uint hash(Klass* p);
- KlassInfoEntry* lookup(Klass* const k); // allocates if not found!
+ uint hash(const Klass* p);
+ KlassInfoEntry* lookup(Klass* k); // allocates if not found!
class AllClassesFinder : public KlassClosure {
KlassInfoTable *_table;
@@ -242,21 +244,19 @@ class KlassInfoTable: public StackObj {
};
public:
- // Table size
- enum {
- cit_size = 20011
- };
- KlassInfoTable(int size, HeapWord* ref, bool need_class_stats);
+ KlassInfoTable(bool need_class_stats);
~KlassInfoTable();
bool record_instance(const oop obj);
void iterate(KlassInfoClosure* cic);
bool allocation_failed() { return _buckets == NULL; }
+ size_t size_of_instances_in_words() const;
friend class KlassInfoHisto;
};
class KlassInfoHisto : public StackObj {
private:
+ static const int _histo_initial_size = 1000;
KlassInfoTable *_cit;
GrowableArray<KlassInfoEntry*>* _elements;
GrowableArray<KlassInfoEntry*>* elements() const { return _elements; }
@@ -334,11 +334,7 @@ class KlassInfoHisto : public StackObj {
}
public:
- enum {
- histo_initial_size = 1000
- };
- KlassInfoHisto(KlassInfoTable* cit, const char* title,
- int estimatedCount);
+ KlassInfoHisto(KlassInfoTable* cit, const char* title);
~KlassInfoHisto();
void add(KlassInfoEntry* cie);
void print_histo_on(outputStream* st, bool print_class_stats, bool csv_format, const char *columns);
@@ -347,6 +343,11 @@ class KlassInfoHisto : public StackObj {
#endif // INCLUDE_SERVICES
+// These declarations are needed since teh declaration of KlassInfoTable and
+// KlassInfoClosure are guarded by #if INLCUDE_SERVICES
+class KlassInfoTable;
+class KlassInfoClosure;
+
class HeapInspection : public StackObj {
bool _csv_format; // "comma separated values" format for spreadsheet.
bool _print_help;
@@ -357,8 +358,11 @@ class HeapInspection : public StackObj {
bool print_class_stats, const char *columns) :
_csv_format(csv_format), _print_help(print_help),
_print_class_stats(print_class_stats), _columns(columns) {}
- void heap_inspection(outputStream* st, bool need_prologue) NOT_SERVICES_RETURN;
+ void heap_inspection(outputStream* st) NOT_SERVICES_RETURN;
+ size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN;
static void find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) NOT_SERVICES_RETURN;
+ private:
+ void iterate_over_heap(KlassInfoTable* cit, BoolObjectClosure* filter = NULL);
};
#endif // SHARE_VM_MEMORY_HEAPINSPECTION_HPP
diff --git a/src/share/vm/memory/iterator.cpp b/src/share/vm/memory/iterator.cpp
index e33a5614d..545ab9213 100644
--- a/src/share/vm/memory/iterator.cpp
+++ b/src/share/vm/memory/iterator.cpp
@@ -64,7 +64,7 @@ void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
}
void CodeBlobToOopClosure::do_newly_marked_nmethod(nmethod* nm) {
- nm->oops_do(_cl, /*do_strong_roots_only=*/ true);
+ nm->oops_do(_cl, /*allow_zombie=*/ false);
}
void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
diff --git a/src/share/vm/memory/iterator.hpp b/src/share/vm/memory/iterator.hpp
index 736efdd61..e590a3e9b 100644
--- a/src/share/vm/memory/iterator.hpp
+++ b/src/share/vm/memory/iterator.hpp
@@ -158,7 +158,7 @@ class ObjectClosure : public Closure {
};
-class BoolObjectClosure : public ObjectClosure {
+class BoolObjectClosure : public Closure {
public:
virtual bool do_object_b(oop obj) = 0;
};
diff --git a/src/share/vm/memory/memRegion.cpp b/src/share/vm/memory/memRegion.cpp
index 70483f9ad..9eb2be569 100644
--- a/src/share/vm/memory/memRegion.cpp
+++ b/src/share/vm/memory/memRegion.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,8 @@
*/
#include "precompiled.hpp"
+#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/memRegion.hpp"
#include "runtime/globals.hpp"
@@ -99,3 +101,19 @@ MemRegion MemRegion::minus(const MemRegion mr2) const {
ShouldNotReachHere();
return MemRegion();
}
+
+void* MemRegion::operator new(size_t size) throw() {
+ return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
+}
+
+void* MemRegion::operator new [](size_t size) throw() {
+ return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
+}
+void MemRegion::operator delete(void* p) {
+ FreeHeap(p, mtGC);
+}
+
+void MemRegion::operator delete [](void* p) {
+ FreeHeap(p, mtGC);
+}
+
diff --git a/src/share/vm/memory/memRegion.hpp b/src/share/vm/memory/memRegion.hpp
index 66f90f3bc..9a72fac75 100644
--- a/src/share/vm/memory/memRegion.hpp
+++ b/src/share/vm/memory/memRegion.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,9 @@
// Note that MemRegions are passed by value, not by reference.
// The intent is that they remain very small and contain no
-// objects.
+// objects. _ValueObj should never be allocated in heap but we do
+// create MemRegions (in CardTableModRefBS) in heap so operator
+// new and operator new [] added for this special case.
class MetaWord;
@@ -92,6 +94,10 @@ public:
size_t word_size() const { return _word_size; }
bool is_empty() const { return word_size() == 0; }
+ void* operator new(size_t size) throw();
+ void* operator new [](size_t size) throw();
+ void operator delete(void* p);
+ void operator delete [](void* p);
};
// For iteration over MemRegion's.
@@ -105,13 +111,13 @@ public:
class MemRegionClosureRO: public MemRegionClosure {
public:
- void* operator new(size_t size, ResourceObj::allocation_type type, MEMFLAGS flags) {
+ void* operator new(size_t size, ResourceObj::allocation_type type, MEMFLAGS flags) throw() {
return ResourceObj::operator new(size, type, flags);
}
- void* operator new(size_t size, Arena *arena) {
+ void* operator new(size_t size, Arena *arena) throw() {
return ResourceObj::operator new(size, arena);
}
- void* operator new(size_t size) {
+ void* operator new(size_t size) throw() {
return ResourceObj::operator new(size);
}
diff --git a/src/share/vm/memory/metablock.cpp b/src/share/vm/memory/metablock.cpp
index 450d2c319..b6c6947e1 100644
--- a/src/share/vm/memory/metablock.cpp
+++ b/src/share/vm/memory/metablock.cpp
@@ -50,13 +50,6 @@
// Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
size_t Metablock::_min_block_byte_size = sizeof(Metablock);
-#ifdef ASSERT
-size_t Metablock::_overhead =
- Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
-#else
-size_t Metablock::_overhead = 0;
-#endif
-
// New blocks returned by the Metaspace are zero initialized.
// We should fix the constructors to not assume this instead.
Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
diff --git a/src/share/vm/memory/metablock.hpp b/src/share/vm/memory/metablock.hpp
index 220d36148..fa4c6c0b4 100644
--- a/src/share/vm/memory/metablock.hpp
+++ b/src/share/vm/memory/metablock.hpp
@@ -48,7 +48,6 @@ class Metablock VALUE_OBJ_CLASS_SPEC {
} _header;
} _block;
static size_t _min_block_byte_size;
- static size_t _overhead;
typedef union block_t Block;
typedef struct header_t Header;
@@ -73,7 +72,6 @@ class Metablock VALUE_OBJ_CLASS_SPEC {
void set_prev(Metablock* v) { _block._header._prev = v; }
static size_t min_block_byte_size() { return _min_block_byte_size; }
- static size_t overhead() { return _overhead; }
bool is_free() { return header()->_word_size != 0; }
void clear_next() { set_next(NULL); }
diff --git a/src/share/vm/memory/metaspace.cpp b/src/share/vm/memory/metaspace.cpp
index 68189bf3e..06f3d0937 100644
--- a/src/share/vm/memory/metaspace.cpp
+++ b/src/share/vm/memory/metaspace.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc_interface/collectedHeap.hpp"
+#include "memory/allocation.hpp"
#include "memory/binaryTreeDictionary.hpp"
#include "memory/freeList.hpp"
#include "memory/collectorPolicy.hpp"
@@ -35,6 +36,7 @@
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "runtime/globals.hpp"
+#include "runtime/java.hpp"
#include "runtime/mutex.hpp"
#include "runtime/orderAccess.hpp"
#include "services/memTracker.hpp"
@@ -50,10 +52,12 @@ const bool metaspace_slow_verify = false;
// Parameters for stress mode testing
const uint metadata_deallocate_a_lot_block = 10;
const uint metadata_deallocate_a_lock_chunk = 3;
-size_t const allocation_from_dictionary_limit = 64 * K;
+size_t const allocation_from_dictionary_limit = 4 * K;
MetaWord* last_allocated = 0;
+size_t Metaspace::_class_metaspace_size;
+
// Used in declarations in SpaceManager and ChunkManager
enum ChunkIndex {
ZeroIndex = 0,
@@ -70,7 +74,7 @@ enum ChunkSizes { // in words.
SpecializedChunk = 128,
ClassSmallChunk = 256,
SmallChunk = 512,
- ClassMediumChunk = 1 * K,
+ ClassMediumChunk = 4 * K,
MediumChunk = 8 * K,
HumongousChunkGranularity = 8
};
@@ -108,7 +112,7 @@ typedef class FreeList<Metachunk> ChunkList;
// Has three lists of free chunks, and a total size and
// count that includes all three
-class ChunkManager VALUE_OBJ_CLASS_SPEC {
+class ChunkManager : public CHeapObj<mtInternal> {
// Free list of chunks of different sizes.
// SpecializedChunk
@@ -155,7 +159,12 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC {
public:
- ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
+ ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
+ : _free_chunks_total(0), _free_chunks_count(0) {
+ _free_chunks[SpecializedIndex].set_size(specialized_size);
+ _free_chunks[SmallIndex].set_size(small_size);
+ _free_chunks[MediumIndex].set_size(medium_size);
+ }
// add or delete (return) a chunk to the global freelist.
Metachunk* chunk_freelist_allocate(size_t word_size);
@@ -174,8 +183,8 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC {
void return_chunks(ChunkIndex index, Metachunk* chunks);
// Total of the space in the free chunks list
- size_t free_chunks_total();
- size_t free_chunks_total_in_bytes();
+ size_t free_chunks_total_words();
+ size_t free_chunks_total_bytes();
// Number of chunks in the free chunks list
size_t free_chunks_count();
@@ -216,7 +225,7 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC {
void locked_print_free_chunks(outputStream* st);
void locked_print_sum_free_chunks(outputStream* st);
- void print_on(outputStream* st);
+ void print_on(outputStream* st) const;
};
// Used to manage the free list of Metablocks (a block corresponds
@@ -225,6 +234,10 @@ class BlockFreelist VALUE_OBJ_CLASS_SPEC {
BlockTreeDictionary* _dictionary;
static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
+ // Only allocate and split from freelist if the size of the allocation
+ // is at least 1/4th the size of the available block.
+ const static int WasteMultiplier = 4;
+
// Accessors
BlockTreeDictionary* dictionary() const { return _dictionary; }
@@ -261,10 +274,6 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
// count of chunks contained in this VirtualSpace
uintx _container_count;
- // Convenience functions for logical bottom and end
- MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
- MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
-
// Convenience functions to access the _virtual_space
char* low() const { return virtual_space()->low(); }
char* high() const { return virtual_space()->high(); }
@@ -273,17 +282,20 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
// VirtualSpace
Metachunk* first_chunk() { return (Metachunk*) bottom(); }
- void inc_container_count();
-#ifdef ASSERT
- uint container_count_slow();
-#endif
-
public:
VirtualSpaceNode(size_t byte_size);
VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
~VirtualSpaceNode();
+ // Convenience functions for logical bottom and end
+ MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
+ MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
+
+ size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
+ size_t expanded_words() const { return _virtual_space.committed_size() / BytesPerWord; }
+ size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
+
// address of next available space in _virtual_space;
// Accessors
VirtualSpaceNode* next() { return _next; }
@@ -303,8 +315,10 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
void inc_top(size_t word_size) { _top += word_size; }
uintx container_count() { return _container_count; }
+ void inc_container_count();
void dec_container_count();
#ifdef ASSERT
+ uint container_count_slow();
void verify_container_count();
#endif
@@ -320,12 +334,10 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
// Allocate a chunk from the virtual space and return it.
Metachunk* get_chunk_vs(size_t chunk_word_size);
- Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
// Expands/shrinks the committed space in a virtual space. Delegates
// to Virtualspace
bool expand_by(size_t words, bool pre_touch = false);
- bool shrink_by(size_t words);
// In preparation for deleting this node, remove all the chunks
// in the node from any freelist.
@@ -333,8 +345,6 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
#ifdef ASSERT
// Debug support
- static void verify_virtual_space_total();
- static void verify_virtual_space_count();
void mangle();
#endif
@@ -342,7 +352,7 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
};
// byte_size is the size of the associated virtualspace.
-VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) {
+VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
// align up to vm allocation granularity
byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
@@ -414,16 +424,17 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
VirtualSpaceNode* _virtual_space_list;
// virtual space currently being used for allocations
VirtualSpaceNode* _current_virtual_space;
- // Free chunk list for all other metadata
- ChunkManager _chunk_manager;
// Can this virtual list allocate >1 spaces? Also, used to determine
// whether to allocate unlimited small chunks in this virtual space
bool _is_class;
- bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
+ bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
+
+ // Sum of reserved and committed memory in the virtual spaces
+ size_t _reserved_words;
+ size_t _committed_words;
- // Sum of space in all virtual spaces and number of virtual spaces
- size_t _virtual_space_total;
+ // Number of virtual spaces
size_t _virtual_space_count;
~VirtualSpaceList();
@@ -437,7 +448,7 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
_current_virtual_space = v;
}
- void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
+ void link_vs(VirtualSpaceNode* new_entry);
// Get another virtual space and add it to the list. This
// is typically prompted by a failed attempt to allocate a chunk
@@ -454,6 +465,8 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
size_t grow_chunks_by_words,
size_t medium_chunk_bunch);
+ bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false);
+
// Get the first chunk for a Metaspace. Used for
// special cases such as the boot class loader, reflection
// class loader and anonymous class loader.
@@ -463,28 +476,25 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
return _current_virtual_space;
}
- ChunkManager* chunk_manager() { return &_chunk_manager; }
bool is_class() const { return _is_class; }
// Allocate the first virtualspace.
void initialize(size_t word_size);
- size_t virtual_space_total() { return _virtual_space_total; }
+ size_t reserved_words() { return _reserved_words; }
+ size_t reserved_bytes() { return reserved_words() * BytesPerWord; }
+ size_t committed_words() { return _committed_words; }
+ size_t committed_bytes() { return committed_words() * BytesPerWord; }
- void inc_virtual_space_total(size_t v);
- void dec_virtual_space_total(size_t v);
+ void inc_reserved_words(size_t v);
+ void dec_reserved_words(size_t v);
+ void inc_committed_words(size_t v);
+ void dec_committed_words(size_t v);
void inc_virtual_space_count();
void dec_virtual_space_count();
// Unlink empty VirtualSpaceNodes and free it.
- void purge();
-
- // Used and capacity in the entire list of virtual spaces.
- // These are global values shared by all Metaspaces
- size_t capacity_words_sum();
- size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
- size_t used_words_sum();
- size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
+ void purge(ChunkManager* chunk_manager);
bool contains(const void *ptr);
@@ -562,8 +572,8 @@ class SpaceManager : public CHeapObj<mtClass> {
// protects allocations and contains.
Mutex* const _lock;
- // Chunk related size
- size_t _medium_chunk_bunch;
+ // Type of metadata allocated.
+ Metaspace::MetadataType _mdtype;
// List of chunks in use by this SpaceManager. Allocations
// are done from the current chunk. The list is used for deallocating
@@ -571,13 +581,9 @@ class SpaceManager : public CHeapObj<mtClass> {
Metachunk* _chunks_in_use[NumberOfInUseLists];
Metachunk* _current_chunk;
- // Virtual space where allocation comes from.
- VirtualSpaceList* _vs_list;
-
// Number of small chunks to allocate to a manager
// If class space manager, small chunks are unlimited
static uint const _small_chunk_limit;
- bool has_small_chunk_limit() { return !vs_list()->is_class(); }
// Sum of all space in allocated chunks
size_t _allocated_blocks_words;
@@ -606,7 +612,10 @@ class SpaceManager : public CHeapObj<mtClass> {
return (BlockFreelist*) &_block_freelists;
}
- VirtualSpaceList* vs_list() const { return _vs_list; }
+ Metaspace::MetadataType mdtype() { return _mdtype; }
+
+ VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); }
+ ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
Metachunk* current_chunk() const { return _current_chunk; }
void set_current_chunk(Metachunk* v) {
@@ -617,6 +626,7 @@ class SpaceManager : public CHeapObj<mtClass> {
// Add chunk to the list of chunks in use
void add_chunk(Metachunk* v, bool make_current);
+ void retire_current_chunk();
Mutex* lock() const { return _lock; }
@@ -626,18 +636,20 @@ class SpaceManager : public CHeapObj<mtClass> {
void initialize();
public:
- SpaceManager(Mutex* lock,
- VirtualSpaceList* vs_list);
+ SpaceManager(Metaspace::MetadataType mdtype,
+ Mutex* lock);
~SpaceManager();
enum ChunkMultiples {
MediumChunkMultiple = 4
};
+ bool is_class() { return _mdtype == Metaspace::ClassType; }
+
// Accessors
size_t specialized_chunk_size() { return SpecializedChunk; }
- size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
- size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
+ size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
+ size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
size_t allocated_blocks_words() const { return _allocated_blocks_words; }
@@ -708,6 +720,21 @@ class SpaceManager : public CHeapObj<mtClass> {
#ifdef ASSERT
void verify_allocated_blocks_words();
#endif
+
+ size_t get_raw_word_size(size_t word_size) {
+ // If only the dictionary is going to be used (i.e., no
+ // indexed free list), then there is a minimum size requirement.
+ // MinChunkSize is a placeholder for the real minimum size JJJ
+ size_t byte_size = word_size * BytesPerWord;
+
+ size_t raw_bytes_size = MAX2(byte_size,
+ Metablock::min_block_byte_size());
+ raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
+ size_t raw_word_size = raw_bytes_size / BytesPerWord;
+ assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
+
+ return raw_word_size;
+ }
};
uint const SpaceManager::_small_chunk_limit = 4;
@@ -725,7 +752,7 @@ void VirtualSpaceNode::inc_container_count() {
_container_count++;
assert(_container_count == container_count_slow(),
err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
- "container_count_slow() " SIZE_FORMAT,
+ " container_count_slow() " SIZE_FORMAT,
_container_count, container_count_slow()));
}
@@ -738,7 +765,7 @@ void VirtualSpaceNode::dec_container_count() {
void VirtualSpaceNode::verify_container_count() {
assert(_container_count == container_count_slow(),
err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
- "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
+ " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
}
#endif
@@ -783,12 +810,25 @@ MetaWord* BlockFreelist::get_block(size_t word_size) {
}
Metablock* free_block =
- dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
+ dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
if (free_block == NULL) {
return NULL;
}
- return (MetaWord*) free_block;
+ const size_t block_size = free_block->size();
+ if (block_size > WasteMultiplier * word_size) {
+ return_block((MetaWord*)free_block, block_size);
+ return NULL;
+ }
+
+ MetaWord* new_block = (MetaWord*)free_block;
+ assert(block_size >= word_size, "Incorrect size of block from freelist");
+ const size_t unused = block_size - word_size;
+ if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
+ return_block(new_block + word_size, unused);
+ }
+
+ return new_block;
}
void BlockFreelist::print_on(outputStream* st) const {
@@ -831,9 +871,9 @@ Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
if (!is_available(chunk_word_size)) {
if (TraceMetadataChunkAllocation) {
- tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
+ gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
// Dump some information about the virtual space that is nearly full
- print_on(tty);
+ print_on(gclog_or_tty);
}
return NULL;
}
@@ -854,20 +894,11 @@ bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
if (TraceMetavirtualspaceAllocation && !result) {
gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
"for byte size " SIZE_FORMAT, bytes);
- virtual_space()->print();
+ virtual_space()->print_on(gclog_or_tty);
}
return result;
}
-// Shrink the virtual space (commit more of the reserved space)
-bool VirtualSpaceNode::shrink_by(size_t words) {
- size_t bytes = words * BytesPerWord;
- virtual_space()->shrink_by(bytes);
- return true;
-}
-
-// Add another chunk to the chunk list.
-
Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
assert_lock_strong(SpaceManager::expand_lock());
Metachunk* result = take_from_committed(chunk_word_size);
@@ -877,23 +908,6 @@ Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
return result;
}
-Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
- assert_lock_strong(SpaceManager::expand_lock());
-
- Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
-
- if (new_chunk == NULL) {
- // Only a small part of the virtualspace is committed when first
- // allocated so committing more here can be expected.
- size_t page_size_words = os::vm_page_size() / BytesPerWord;
- size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
- page_size_words);
- expand_by(aligned_expand_vs_by_words, false);
- new_chunk = get_chunk_vs(chunk_word_size);
- }
- return new_chunk;
-}
-
bool VirtualSpaceNode::initialize() {
if (!_rs.is_reserved()) {
@@ -953,13 +967,22 @@ VirtualSpaceList::~VirtualSpaceList() {
}
}
-void VirtualSpaceList::inc_virtual_space_total(size_t v) {
+void VirtualSpaceList::inc_reserved_words(size_t v) {
assert_lock_strong(SpaceManager::expand_lock());
- _virtual_space_total = _virtual_space_total + v;
+ _reserved_words = _reserved_words + v;
}
-void VirtualSpaceList::dec_virtual_space_total(size_t v) {
+void VirtualSpaceList::dec_reserved_words(size_t v) {
assert_lock_strong(SpaceManager::expand_lock());
- _virtual_space_total = _virtual_space_total - v;
+ _reserved_words = _reserved_words - v;
+}
+
+void VirtualSpaceList::inc_committed_words(size_t v) {
+ assert_lock_strong(SpaceManager::expand_lock());
+ _committed_words = _committed_words + v;
+}
+void VirtualSpaceList::dec_committed_words(size_t v) {
+ assert_lock_strong(SpaceManager::expand_lock());
+ _committed_words = _committed_words - v;
}
void VirtualSpaceList::inc_virtual_space_count() {
@@ -987,7 +1010,7 @@ void ChunkManager::remove_chunk(Metachunk* chunk) {
// Walk the list of VirtualSpaceNodes and delete
// nodes with a 0 container_count. Remove Metachunks in
// the node from their respective freelists.
-void VirtualSpaceList::purge() {
+void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
assert_lock_strong(SpaceManager::expand_lock());
// Don't use a VirtualSpaceListIterator because this
// list is being changed and a straightforward use of an iterator is not safe.
@@ -1009,8 +1032,9 @@ void VirtualSpaceList::purge() {
prev_vsl->set_next(vsl->next());
}
- vsl->purge(chunk_manager());
- dec_virtual_space_total(vsl->reserved()->word_size());
+ vsl->purge(chunk_manager);
+ dec_reserved_words(vsl->reserved_words());
+ dec_committed_words(vsl->committed_words());
dec_virtual_space_count();
purged_vsl = vsl;
delete vsl;
@@ -1030,49 +1054,16 @@ void VirtualSpaceList::purge() {
#endif
}
-size_t VirtualSpaceList::used_words_sum() {
- size_t allocated_by_vs = 0;
- VirtualSpaceListIterator iter(virtual_space_list());
- while (iter.repeat()) {
- VirtualSpaceNode* vsl = iter.get_next();
- // Sum used region [bottom, top) in each virtualspace
- allocated_by_vs += vsl->used_words_in_vs();
- }
- assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
- err_msg("Total in free chunks " SIZE_FORMAT
- " greater than total from virtual_spaces " SIZE_FORMAT,
- allocated_by_vs, chunk_manager()->free_chunks_total()));
- size_t used =
- allocated_by_vs - chunk_manager()->free_chunks_total();
- return used;
-}
-
-// Space available in all MetadataVirtualspaces allocated
-// for metadata. This is the upper limit on the capacity
-// of chunks allocated out of all the MetadataVirtualspaces.
-size_t VirtualSpaceList::capacity_words_sum() {
- size_t capacity = 0;
- VirtualSpaceListIterator iter(virtual_space_list());
- while (iter.repeat()) {
- VirtualSpaceNode* vsl = iter.get_next();
- capacity += vsl->capacity_words_in_vs();
- }
- return capacity;
-}
-
VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
_is_class(false),
_virtual_space_list(NULL),
_current_virtual_space(NULL),
- _virtual_space_total(0),
+ _reserved_words(0),
+ _committed_words(0),
_virtual_space_count(0) {
MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
bool initialization_succeeded = grow_vs(word_size);
-
- _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
- _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
- _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
assert(initialization_succeeded,
" VirtualSpaceList initialization should not fail");
}
@@ -1081,17 +1072,15 @@ VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
_is_class(true),
_virtual_space_list(NULL),
_current_virtual_space(NULL),
- _virtual_space_total(0),
+ _reserved_words(0),
+ _committed_words(0),
_virtual_space_count(0) {
MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
bool succeeded = class_entry->initialize();
- _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
- _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
- _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
assert(succeeded, " VirtualSpaceList initialization should not fail");
- link_vs(class_entry, rs.size()/BytesPerWord);
+ link_vs(class_entry);
}
size_t VirtualSpaceList::free_bytes() {
@@ -1106,7 +1095,7 @@ bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
}
// Reserve the space
size_t vs_byte_size = vs_word_size * BytesPerWord;
- assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
+ assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned");
// Allocate the meta virtual space and initialize it.
VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
@@ -1114,44 +1103,53 @@ bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
delete new_entry;
return false;
} else {
+ assert(new_entry->reserved_words() == vs_word_size, "Must be");
// ensure lock-free iteration sees fully initialized node
OrderAccess::storestore();
- link_vs(new_entry, vs_word_size);
+ link_vs(new_entry);
return true;
}
}
-void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
+void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
if (virtual_space_list() == NULL) {
set_virtual_space_list(new_entry);
} else {
current_virtual_space()->set_next(new_entry);
}
set_current_virtual_space(new_entry);
- inc_virtual_space_total(vs_word_size);
+ inc_reserved_words(new_entry->reserved_words());
+ inc_committed_words(new_entry->committed_words());
inc_virtual_space_count();
#ifdef ASSERT
new_entry->mangle();
#endif
if (TraceMetavirtualspaceAllocation && Verbose) {
VirtualSpaceNode* vsl = current_virtual_space();
- vsl->print_on(tty);
+ vsl->print_on(gclog_or_tty);
}
}
+bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) {
+ size_t before = node->committed_words();
+
+ bool result = node->expand_by(word_size, pre_touch);
+
+ size_t after = node->committed_words();
+
+ // after and before can be the same if the memory was pre-committed.
+ assert(after >= before, "Must be");
+ inc_committed_words(after - before);
+
+ return result;
+}
+
Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
size_t grow_chunks_by_words,
size_t medium_chunk_bunch) {
- // Get a chunk from the chunk freelist
- Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
-
- if (next != NULL) {
- next->container()->inc_container_count();
- } else {
- // Allocate a chunk out of the current virtual space.
- next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
- }
+ // Allocate a chunk out of the current virtual space.
+ Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
if (next == NULL) {
// Not enough room in current virtual space. Try to commit
@@ -1162,18 +1160,27 @@ Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
page_size_words);
bool vs_expanded =
- current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
+ expand_by(current_virtual_space(), aligned_expand_vs_by_words);
if (!vs_expanded) {
// Should the capacity of the metaspaces be expanded for
// this allocation? If it's the virtual space for classes and is
// being used for CompressedHeaders, don't allocate a new virtualspace.
if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
// Get another virtual space.
- size_t grow_vs_words =
- MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
+ size_t allocation_aligned_expand_words =
+ align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
+ size_t grow_vs_words =
+ MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
if (grow_vs(grow_vs_words)) {
// Got it. It's on the list now. Get a chunk from it.
- next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
+ assert(current_virtual_space()->expanded_words() == 0,
+ "New virtual space nodes should not have expanded");
+
+ size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
+ page_size_words);
+ // We probably want to expand by aligned_expand_vs_by_words here.
+ expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
+ next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
}
} else {
// Allocation will fail and induce a GC
@@ -1276,18 +1283,25 @@ size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
- size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
// If the user wants a limit, impose one.
- size_t max_metaspace_size_bytes = MaxMetaspaceSize;
- size_t metaspace_size_bytes = MetaspaceSize;
- if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) &&
- MetaspaceAux::reserved_in_bytes() >= MaxMetaspaceSize) {
- return false;
+ // The reason for someone using this flag is to limit reserved space. So
+ // for non-class virtual space, compare against virtual spaces that are reserved.
+ // For class virtual space, we only compare against the committed space, not
+ // reserved space, because this is a larger space prereserved for compressed
+ // class pointers.
+ if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
+ size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
+ size_t class_allocated = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
+ size_t real_allocated = nonclass_allocated + class_allocated;
+ if (real_allocated >= MaxMetaspaceSize) {
+ return false;
+ }
}
// Class virtual space should always be expanded. Call GC for the other
// metadata virtual space.
- if (vsl == Metaspace::class_space_list()) return true;
+ if (Metaspace::using_class_space() &&
+ (vsl == Metaspace::class_space_list())) return true;
// If this is part of an allocation after a GC, expand
// unconditionally.
@@ -1296,11 +1310,12 @@ bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
}
-
// If the capacity is below the minimum capacity, allow the
// expansion. Also set the high-water-mark (capacity_until_GC)
// to that minimum capacity so that a GC will not be induced
// until that minimum capacity is exceeded.
+ size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
+ size_t metaspace_size_bytes = MetaspaceSize;
if (committed_capacity_bytes < metaspace_size_bytes ||
capacity_until_GC() == 0) {
set_capacity_until_GC(metaspace_size_bytes);
@@ -1470,15 +1485,15 @@ void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
if (dummy_chunk == NULL) {
break;
}
- vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
+ sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
if (TraceMetadataChunkAllocation && Verbose) {
gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
sm->sum_count_in_chunks_in_use());
dummy_chunk->print_on(gclog_or_tty);
gclog_or_tty->print_cr(" Free chunks total %d count %d",
- vsl->chunk_manager()->free_chunks_total(),
- vsl->chunk_manager()->free_chunks_count());
+ sm->chunk_manager()->free_chunks_total_words(),
+ sm->chunk_manager()->free_chunks_count());
}
}
} else {
@@ -1534,24 +1549,12 @@ bool Metadebug::test_metadata_failure() {
// ChunkManager methods
-// Verification of _free_chunks_total and _free_chunks_count does not
-// work with the CMS collector because its use of additional locks
-// complicate the mutex deadlock detection but it can still be useful
-// for detecting errors in the chunk accounting with other collectors.
-
-size_t ChunkManager::free_chunks_total() {
-#ifdef ASSERT
- if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
- MutexLockerEx cl(SpaceManager::expand_lock(),
- Mutex::_no_safepoint_check_flag);
- slow_locked_verify_free_chunks_total();
- }
-#endif
+size_t ChunkManager::free_chunks_total_words() {
return _free_chunks_total;
}
-size_t ChunkManager::free_chunks_total_in_bytes() {
- return free_chunks_total() * BytesPerWord;
+size_t ChunkManager::free_chunks_total_bytes() {
+ return free_chunks_total_words() * BytesPerWord;
}
size_t ChunkManager::free_chunks_count() {
@@ -1679,9 +1682,9 @@ void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
assert_lock_strong(SpaceManager::expand_lock());
slow_locked_verify();
if (TraceMetadataChunkAllocation) {
- tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
- PTR_FORMAT " size " SIZE_FORMAT,
- chunk, chunk->word_size());
+ gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
+ PTR_FORMAT " size " SIZE_FORMAT,
+ chunk, chunk->word_size());
}
free_chunks_put(chunk);
}
@@ -1710,9 +1713,9 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
dec_free_chunks_total(chunk->capacity_word_size());
if (TraceMetadataChunkAllocation && Verbose) {
- tty->print_cr("ChunkManager::free_chunks_get: free_list "
- PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
- free_list, chunk, chunk->word_size());
+ gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
+ PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
+ free_list, chunk, chunk->word_size());
}
} else {
chunk = humongous_dictionary()->get_chunk(
@@ -1722,10 +1725,10 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
if (chunk != NULL) {
if (TraceMetadataHumongousAllocation) {
size_t waste = chunk->word_size() - word_size;
- tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
- " for requested size " SIZE_FORMAT
- " waste " SIZE_FORMAT,
- chunk->word_size(), word_size, waste);
+ gclog_or_tty->print_cr("Free list allocate humongous chunk size "
+ SIZE_FORMAT " for requested size " SIZE_FORMAT
+ " waste " SIZE_FORMAT,
+ chunk->word_size(), word_size, waste);
}
// Chunk is being removed from the chunks free list.
dec_free_chunks_total(chunk->capacity_word_size());
@@ -1742,6 +1745,8 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
// work.
chunk->set_is_free(false);
#endif
+ chunk->container()->inc_container_count();
+
slow_locked_verify();
return chunk;
}
@@ -1767,18 +1772,18 @@ Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
} else {
list_count = humongous_dictionary()->total_count();
}
- tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
- PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
- this, chunk, chunk->word_size(), list_count);
- locked_print_free_chunks(tty);
+ gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
+ PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
+ this, chunk, chunk->word_size(), list_count);
+ locked_print_free_chunks(gclog_or_tty);
}
return chunk;
}
-void ChunkManager::print_on(outputStream* out) {
+void ChunkManager::print_on(outputStream* out) const {
if (PrintFLSStatistics != 0) {
- humongous_dictionary()->report_statistics();
+ const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
}
}
@@ -1844,13 +1849,11 @@ size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
Metachunk* chunk = chunks_in_use(index);
// Count the free space in all the chunk but not the
// current chunk from which allocations are still being done.
- if (chunk != NULL) {
- Metachunk* prev = chunk;
- while (chunk != NULL && chunk != current_chunk()) {
+ while (chunk != NULL) {
+ if (chunk != current_chunk()) {
result += chunk->free_word_size();
- prev = chunk;
- chunk = chunk->next();
}
+ chunk = chunk->next();
}
return result;
}
@@ -1927,8 +1930,8 @@ void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
}
}
- vs_list()->chunk_manager()->locked_print_free_chunks(st);
- vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
+ chunk_manager()->locked_print_free_chunks(st);
+ chunk_manager()->locked_print_sum_free_chunks(st);
}
size_t SpaceManager::calc_chunk_size(size_t word_size) {
@@ -1939,8 +1942,7 @@ size_t SpaceManager::calc_chunk_size(size_t word_size) {
// chunks will be allocated.
size_t chunk_word_size;
if (chunks_in_use(MediumIndex) == NULL &&
- (!has_small_chunk_limit() ||
- sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit)) {
+ sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
chunk_word_size = (size_t) small_chunk_size();
if (word_size + Metachunk::overhead() > small_chunk_size()) {
chunk_word_size = medium_chunk_size();
@@ -2032,9 +2034,9 @@ void SpaceManager::print_on(outputStream* st) const {
}
}
-SpaceManager::SpaceManager(Mutex* lock,
- VirtualSpaceList* vs_list) :
- _vs_list(vs_list),
+SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
+ Mutex* lock) :
+ _mdtype(mdtype),
_allocated_blocks_words(0),
_allocated_chunks_words(0),
_allocated_chunks_count(0),
@@ -2050,27 +2052,27 @@ void SpaceManager::inc_size_metrics(size_t words) {
_allocated_chunks_words = _allocated_chunks_words + words;
_allocated_chunks_count++;
// Global total of capacity in allocated Metachunks
- MetaspaceAux::inc_capacity(words);
+ MetaspaceAux::inc_capacity(mdtype(), words);
// Global total of allocated Metablocks.
// used_words_slow() includes the overhead in each
// Metachunk so include it in the used when the
// Metachunk is first added (so only added once per
// Metachunk).
- MetaspaceAux::inc_used(Metachunk::overhead());
+ MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
}
void SpaceManager::inc_used_metrics(size_t words) {
// Add to the per SpaceManager total
Atomic::add_ptr(words, &_allocated_blocks_words);
// Add to the global total
- MetaspaceAux::inc_used(words);
+ MetaspaceAux::inc_used(mdtype(), words);
}
void SpaceManager::dec_total_from_size_metrics() {
- MetaspaceAux::dec_capacity(allocated_chunks_words());
- MetaspaceAux::dec_used(allocated_blocks_words());
+ MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
+ MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
// Also deduct the overhead per Metachunk
- MetaspaceAux::dec_used(allocated_chunks_count() * Metachunk::overhead());
+ MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
}
void SpaceManager::initialize() {
@@ -2119,9 +2121,7 @@ SpaceManager::~SpaceManager() {
MutexLockerEx fcl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
- ChunkManager* chunk_manager = vs_list()->chunk_manager();
-
- chunk_manager->slow_locked_verify();
+ chunk_manager()->slow_locked_verify();
dec_total_from_size_metrics();
@@ -2135,8 +2135,8 @@ SpaceManager::~SpaceManager() {
// Have to update before the chunks_in_use lists are emptied
// below.
- chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
- sum_count_in_chunks_in_use());
+ chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
+ sum_count_in_chunks_in_use());
// Add all the chunks in use by this space manager
// to the global list of free chunks.
@@ -2151,11 +2151,11 @@ SpaceManager::~SpaceManager() {
chunk_size_name(i));
}
Metachunk* chunks = chunks_in_use(i);
- chunk_manager->return_chunks(i, chunks);
+ chunk_manager()->return_chunks(i, chunks);
set_chunks_in_use(i, NULL);
if (TraceMetadataChunkAllocation && Verbose) {
gclog_or_tty->print_cr("updated freelist count %d %s",
- chunk_manager->free_chunks(i)->count(),
+ chunk_manager()->free_chunks(i)->count(),
chunk_size_name(i));
}
assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
@@ -2192,16 +2192,16 @@ SpaceManager::~SpaceManager() {
humongous_chunks->word_size(), HumongousChunkGranularity));
Metachunk* next_humongous_chunks = humongous_chunks->next();
humongous_chunks->container()->dec_container_count();
- chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
+ chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
humongous_chunks = next_humongous_chunks;
}
if (TraceMetadataChunkAllocation && Verbose) {
gclog_or_tty->print_cr("");
gclog_or_tty->print_cr("updated dictionary count %d %s",
- chunk_manager->humongous_dictionary()->total_count(),
+ chunk_manager()->humongous_dictionary()->total_count(),
chunk_size_name(HumongousIndex));
}
- chunk_manager->slow_locked_verify();
+ chunk_manager()->slow_locked_verify();
}
const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
@@ -2240,10 +2240,11 @@ ChunkIndex ChunkManager::list_index(size_t size) {
void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
assert_lock_strong(_lock);
+ size_t raw_word_size = get_raw_word_size(word_size);
size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
- assert(word_size >= min_size,
- err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
- block_freelists()->return_block(p, word_size);
+ assert(raw_word_size >= min_size,
+ err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
+ block_freelists()->return_block(p, raw_word_size);
}
// Adds a chunk to the list of chunks in use.
@@ -2259,6 +2260,7 @@ void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
if (index != HumongousIndex) {
+ retire_current_chunk();
set_current_chunk(new_chunk);
new_chunk->set_next(chunks_in_use(index));
set_chunks_in_use(index, new_chunk);
@@ -2288,23 +2290,35 @@ void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
sum_count_in_chunks_in_use());
new_chunk->print_on(gclog_or_tty);
- if (vs_list() != NULL) {
- vs_list()->chunk_manager()->locked_print_free_chunks(tty);
+ chunk_manager()->locked_print_free_chunks(gclog_or_tty);
+ }
+}
+
+void SpaceManager::retire_current_chunk() {
+ if (current_chunk() != NULL) {
+ size_t remaining_words = current_chunk()->free_word_size();
+ if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
+ block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
+ inc_used_metrics(remaining_words);
}
}
}
Metachunk* SpaceManager::get_new_chunk(size_t word_size,
size_t grow_chunks_by_words) {
+ // Get a chunk from the chunk freelist
+ Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
- Metachunk* next = vs_list()->get_new_chunk(word_size,
- grow_chunks_by_words,
- medium_chunk_bunch());
+ if (next == NULL) {
+ next = vs_list()->get_new_chunk(word_size,
+ grow_chunks_by_words,
+ medium_chunk_bunch());
+ }
- if (TraceMetadataHumongousAllocation &&
+ if (TraceMetadataHumongousAllocation && next != NULL &&
SpaceManager::is_humongous(next->word_size())) {
- gclog_or_tty->print_cr(" new humongous chunk word size " PTR_FORMAT,
- next->word_size());
+ gclog_or_tty->print_cr(" new humongous chunk word size "
+ PTR_FORMAT, next->word_size());
}
return next;
@@ -2313,19 +2327,7 @@ Metachunk* SpaceManager::get_new_chunk(size_t word_size,
MetaWord* SpaceManager::allocate(size_t word_size) {
MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
- // If only the dictionary is going to be used (i.e., no
- // indexed free list), then there is a minimum size requirement.
- // MinChunkSize is a placeholder for the real minimum size JJJ
- size_t byte_size = word_size * BytesPerWord;
-
- size_t byte_size_with_overhead = byte_size + Metablock::overhead();
-
- size_t raw_bytes_size = MAX2(byte_size_with_overhead,
- Metablock::min_block_byte_size());
- raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
- size_t raw_word_size = raw_bytes_size / BytesPerWord;
- assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
-
+ size_t raw_word_size = get_raw_word_size(word_size);
BlockFreelist* fl = block_freelists();
MetaWord* p = NULL;
// Allocation from the dictionary is expensive in the sense that
@@ -2371,7 +2373,7 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) {
if (result == NULL) {
result = grow_and_allocate(word_size);
}
- if (result > 0) {
+ if (result != 0) {
inc_used_metrics(word_size);
assert(result != (MetaWord*) chunks_in_use(MediumIndex),
"Head of the list is being allocated");
@@ -2434,9 +2436,6 @@ void SpaceManager::dump(outputStream* const out) const {
curr = curr->next()) {
out->print("%d) ", i++);
curr->print_on(out);
- if (TraceMetadataChunkAllocation && Verbose) {
- block_freelists()->print_on(out);
- }
curr_total += curr->word_size();
used += curr->used_word_size();
capacity += curr->capacity_word_size();
@@ -2444,6 +2443,10 @@ void SpaceManager::dump(outputStream* const out) const {
}
}
+ if (TraceMetadataChunkAllocation && Verbose) {
+ block_freelists()->print_on(out);
+ }
+
size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
// Free space isn't wasted.
waste -= free;
@@ -2470,54 +2473,52 @@ void SpaceManager::mangle_freed_chunks() {
// MetaspaceAux
-size_t MetaspaceAux::_allocated_capacity_words = 0;
-size_t MetaspaceAux::_allocated_used_words = 0;
+size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
+size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
+
+size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
+ VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
+ return list == NULL ? 0 : list->free_bytes();
+}
size_t MetaspaceAux::free_bytes() {
- size_t result = 0;
- if (Metaspace::class_space_list() != NULL) {
- result = result + Metaspace::class_space_list()->free_bytes();
- }
- if (Metaspace::space_list() != NULL) {
- result = result + Metaspace::space_list()->free_bytes();
- }
- return result;
+ return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
}
-void MetaspaceAux::dec_capacity(size_t words) {
+void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
assert_lock_strong(SpaceManager::expand_lock());
- assert(words <= _allocated_capacity_words,
+ assert(words <= allocated_capacity_words(mdtype),
err_msg("About to decrement below 0: words " SIZE_FORMAT
- " is greater than _allocated_capacity_words " SIZE_FORMAT,
- words, _allocated_capacity_words));
- _allocated_capacity_words = _allocated_capacity_words - words;
+ " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
+ words, mdtype, allocated_capacity_words(mdtype)));
+ _allocated_capacity_words[mdtype] -= words;
}
-void MetaspaceAux::inc_capacity(size_t words) {
+void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
assert_lock_strong(SpaceManager::expand_lock());
// Needs to be atomic
- _allocated_capacity_words = _allocated_capacity_words + words;
+ _allocated_capacity_words[mdtype] += words;
}
-void MetaspaceAux::dec_used(size_t words) {
- assert(words <= _allocated_used_words,
+void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
+ assert(words <= allocated_used_words(mdtype),
err_msg("About to decrement below 0: words " SIZE_FORMAT
- " is greater than _allocated_used_words " SIZE_FORMAT,
- words, _allocated_used_words));
+ " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
+ words, mdtype, allocated_used_words(mdtype)));
// For CMS deallocation of the Metaspaces occurs during the
// sweep which is a concurrent phase. Protection by the expand_lock()
// is not enough since allocation is on a per Metaspace basis
// and protected by the Metaspace lock.
jlong minus_words = (jlong) - (jlong) words;
- Atomic::add_ptr(minus_words, &_allocated_used_words);
+ Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
}
-void MetaspaceAux::inc_used(size_t words) {
+void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
// _allocated_used_words tracks allocations for
// each piece of metadata. Those allocations are
// generally done concurrently by different application
// threads so must be done atomically.
- Atomic::add_ptr(words, &_allocated_used_words);
+ Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
}
size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
@@ -2533,19 +2534,22 @@ size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
return used * BytesPerWord;
}
-size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
+size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
size_t free = 0;
ClassLoaderDataGraphMetaspaceIterator iter;
while (iter.repeat()) {
Metaspace* msp = iter.get_next();
if (msp != NULL) {
- free += msp->free_words(mdtype);
+ free += msp->free_words_slow(mdtype);
}
}
return free * BytesPerWord;
}
size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
+ if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
+ return 0;
+ }
// Don't count the space in the freelists. That space will be
// added to the capacity calculation as needed.
size_t capacity = 0;
@@ -2559,34 +2563,55 @@ size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
return capacity * BytesPerWord;
}
-size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
- size_t reserved = (mdtype == Metaspace::ClassType) ?
- Metaspace::class_space_list()->virtual_space_total() :
- Metaspace::space_list()->virtual_space_total();
- return reserved * BytesPerWord;
+size_t MetaspaceAux::capacity_bytes_slow() {
+#ifdef PRODUCT
+ // Use allocated_capacity_bytes() in PRODUCT instead of this function.
+ guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
+#endif
+ size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
+ size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
+ assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
+ err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
+ " class_capacity + non_class_capacity " SIZE_FORMAT
+ " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
+ allocated_capacity_bytes(), class_capacity + non_class_capacity,
+ class_capacity, non_class_capacity));
+
+ return class_capacity + non_class_capacity;
}
-size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
+size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
+ VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
+ return list == NULL ? 0 : list->reserved_bytes();
+}
-size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
- ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
- Metaspace::class_space_list()->chunk_manager() :
- Metaspace::space_list()->chunk_manager();
- chunk->slow_verify();
- return chunk->free_chunks_total();
+size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
+ VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
+ return list == NULL ? 0 : list->committed_bytes();
}
-size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
- return free_chunks_total(mdtype) * BytesPerWord;
+size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
+
+size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
+ ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
+ if (chunk_manager == NULL) {
+ return 0;
+ }
+ chunk_manager->slow_verify();
+ return chunk_manager->free_chunks_total_words();
+}
+
+size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
+ return free_chunks_total_words(mdtype) * BytesPerWord;
}
-size_t MetaspaceAux::free_chunks_total() {
- return free_chunks_total(Metaspace::ClassType) +
- free_chunks_total(Metaspace::NonClassType);
+size_t MetaspaceAux::free_chunks_total_words() {
+ return free_chunks_total_words(Metaspace::ClassType) +
+ free_chunks_total_words(Metaspace::NonClassType);
}
-size_t MetaspaceAux::free_chunks_total_in_bytes() {
- return free_chunks_total() * BytesPerWord;
+size_t MetaspaceAux::free_chunks_total_bytes() {
+ return free_chunks_total_words() * BytesPerWord;
}
void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
@@ -2596,15 +2621,15 @@ void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
"->" SIZE_FORMAT
"(" SIZE_FORMAT ")",
prev_metadata_used,
- allocated_capacity_bytes(),
- reserved_in_bytes());
+ allocated_used_bytes(),
+ reserved_bytes());
} else {
gclog_or_tty->print(" " SIZE_FORMAT "K"
"->" SIZE_FORMAT "K"
"(" SIZE_FORMAT "K)",
- prev_metadata_used / K,
- allocated_capacity_bytes() / K,
- reserved_in_bytes()/ K);
+ prev_metadata_used/K,
+ allocated_used_bytes()/K,
+ reserved_bytes()/K);
}
gclog_or_tty->print("]");
@@ -2612,37 +2637,37 @@ void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
// This is printed when PrintGCDetails
void MetaspaceAux::print_on(outputStream* out) {
- Metaspace::MetadataType ct = Metaspace::ClassType;
Metaspace::MetadataType nct = Metaspace::NonClassType;
out->print_cr(" Metaspace total "
SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
" reserved " SIZE_FORMAT "K",
- allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
-#if 0
-// The calls to capacity_bytes_slow() and used_bytes_slow() cause
-// lock ordering assertion failures with some collectors. Do
-// not include this code until the lock ordering is fixed.
- if (PrintGCDetails && Verbose) {
- out->print_cr(" data space "
- SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
- " reserved " SIZE_FORMAT "K",
- capacity_bytes_slow(nct)/K, used_bytes_slow(nct)/K, reserved_in_bytes(nct)/K);
+ allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K);
+
+ out->print_cr(" data space "
+ SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
+ " reserved " SIZE_FORMAT "K",
+ allocated_capacity_bytes(nct)/K,
+ allocated_used_bytes(nct)/K,
+ reserved_bytes(nct)/K);
+ if (Metaspace::using_class_space()) {
+ Metaspace::MetadataType ct = Metaspace::ClassType;
out->print_cr(" class space "
SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
" reserved " SIZE_FORMAT "K",
- capacity_bytes_slow(ct)/K, used_bytes_slow(ct)/K, reserved_in_bytes(ct)/K);
+ allocated_capacity_bytes(ct)/K,
+ allocated_used_bytes(ct)/K,
+ reserved_bytes(ct)/K);
}
-#endif
}
// Print information for class space and data space separately.
// This is almost the same as above.
void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
- size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
+ size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
size_t capacity_bytes = capacity_bytes_slow(mdtype);
size_t used_bytes = used_bytes_slow(mdtype);
- size_t free_bytes = free_in_bytes(mdtype);
+ size_t free_bytes = free_bytes_slow(mdtype);
size_t used_and_free = used_bytes + free_bytes +
free_chunks_capacity_bytes;
out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT
@@ -2658,13 +2683,37 @@ void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
}
-// Print total fragmentation for class and data metaspaces separately
-void MetaspaceAux::print_waste(outputStream* out) {
+// Print total fragmentation for class metaspaces
+void MetaspaceAux::print_class_waste(outputStream* out) {
+ assert(Metaspace::using_class_space(), "class metaspace not used");
+ size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
+ size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
+ ClassLoaderDataGraphMetaspaceIterator iter;
+ while (iter.repeat()) {
+ Metaspace* msp = iter.get_next();
+ if (msp != NULL) {
+ cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
+ cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
+ cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
+ cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
+ cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
+ cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
+ cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
+ }
+ }
+ out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
+ SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
+ SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
+ "large count " SIZE_FORMAT,
+ cls_specialized_count, cls_specialized_waste,
+ cls_small_count, cls_small_waste,
+ cls_medium_count, cls_medium_waste, cls_humongous_count);
+}
- size_t specialized_waste = 0, small_waste = 0, medium_waste = 0, large_waste = 0;
- size_t specialized_count = 0, small_count = 0, medium_count = 0, large_count = 0;
- size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0, cls_large_waste = 0;
- size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_large_count = 0;
+// Print total fragmentation for data and class metaspaces separately
+void MetaspaceAux::print_waste(outputStream* out) {
+ size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
+ size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
ClassLoaderDataGraphMetaspaceIterator iter;
while (iter.repeat()) {
@@ -2676,29 +2725,19 @@ void MetaspaceAux::print_waste(outputStream* out) {
small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
- large_waste += msp->vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
- large_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
-
- cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
- cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
- cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
- cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
- cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
- cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
- cls_large_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(HumongousIndex);
- cls_large_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
+ humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
}
}
out->print_cr("Total fragmentation waste (words) doesn't count free space");
out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
- SIZE_FORMAT " medium(s) " SIZE_FORMAT,
+ SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
+ "large count " SIZE_FORMAT,
specialized_count, specialized_waste, small_count,
- small_waste, medium_count, medium_waste);
- out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
- SIZE_FORMAT " small(s) " SIZE_FORMAT,
- cls_specialized_count, cls_specialized_waste,
- cls_small_count, cls_small_waste);
+ small_waste, medium_count, medium_waste, humongous_count);
+ if (Metaspace::using_class_space()) {
+ print_class_waste(out);
+ }
}
// Dump global metaspace things from the end of ClassLoaderDataGraph
@@ -2710,31 +2749,51 @@ void MetaspaceAux::dump(outputStream* out) {
}
void MetaspaceAux::verify_free_chunks() {
- Metaspace::space_list()->chunk_manager()->verify();
- Metaspace::class_space_list()->chunk_manager()->verify();
+ Metaspace::chunk_manager_metadata()->verify();
+ if (Metaspace::using_class_space()) {
+ Metaspace::chunk_manager_class()->verify();
+ }
}
void MetaspaceAux::verify_capacity() {
#ifdef ASSERT
size_t running_sum_capacity_bytes = allocated_capacity_bytes();
- // For purposes of the running sum of used, verify against capacity
+ // For purposes of the running sum of capacity, verify against capacity
size_t capacity_in_use_bytes = capacity_bytes_slow();
assert(running_sum_capacity_bytes == capacity_in_use_bytes,
err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
" capacity_bytes_slow()" SIZE_FORMAT,
running_sum_capacity_bytes, capacity_in_use_bytes));
+ for (Metaspace::MetadataType i = Metaspace::ClassType;
+ i < Metaspace:: MetadataTypeCount;
+ i = (Metaspace::MetadataType)(i + 1)) {
+ size_t capacity_in_use_bytes = capacity_bytes_slow(i);
+ assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
+ err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
+ " capacity_bytes_slow(%u)" SIZE_FORMAT,
+ i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
+ }
#endif
}
void MetaspaceAux::verify_used() {
#ifdef ASSERT
size_t running_sum_used_bytes = allocated_used_bytes();
- // For purposes of the running sum of used, verify against capacity
+ // For purposes of the running sum of used, verify against used
size_t used_in_use_bytes = used_bytes_slow();
assert(allocated_used_bytes() == used_in_use_bytes,
err_msg("allocated_used_bytes() " SIZE_FORMAT
- " used_bytes_slow()()" SIZE_FORMAT,
+ " used_bytes_slow()" SIZE_FORMAT,
allocated_used_bytes(), used_in_use_bytes));
+ for (Metaspace::MetadataType i = Metaspace::ClassType;
+ i < Metaspace:: MetadataTypeCount;
+ i = (Metaspace::MetadataType)(i + 1)) {
+ size_t used_in_use_bytes = used_bytes_slow(i);
+ assert(allocated_used_bytes(i) == used_in_use_bytes,
+ err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
+ " used_bytes_slow(%u)" SIZE_FORMAT,
+ i, allocated_used_bytes(i), i, used_in_use_bytes));
+ }
#endif
}
@@ -2755,17 +2814,137 @@ Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
Metaspace::~Metaspace() {
delete _vsm;
- delete _class_vsm;
+ if (using_class_space()) {
+ delete _class_vsm;
+ }
}
VirtualSpaceList* Metaspace::_space_list = NULL;
VirtualSpaceList* Metaspace::_class_space_list = NULL;
+ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
+ChunkManager* Metaspace::_chunk_manager_class = NULL;
+
#define VIRTUALSPACEMULTIPLIER 2
+#ifdef _LP64
+void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
+ // Figure out the narrow_klass_base and the narrow_klass_shift. The
+ // narrow_klass_base is the lower of the metaspace base and the cds base
+ // (if cds is enabled). The narrow_klass_shift depends on the distance
+ // between the lower base and higher address.
+ address lower_base;
+ address higher_address;
+ if (UseSharedSpaces) {
+ higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
+ (address)(metaspace_base + class_metaspace_size()));
+ lower_base = MIN2(metaspace_base, cds_base);
+ } else {
+ higher_address = metaspace_base + class_metaspace_size();
+ lower_base = metaspace_base;
+ }
+ Universe::set_narrow_klass_base(lower_base);
+ if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
+ Universe::set_narrow_klass_shift(0);
+ } else {
+ assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
+ Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
+ }
+}
+
+// Return TRUE if the specified metaspace_base and cds_base are close enough
+// to work with compressed klass pointers.
+bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
+ assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
+ assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
+ address lower_base = MIN2((address)metaspace_base, cds_base);
+ address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
+ (address)(metaspace_base + class_metaspace_size()));
+ return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
+}
+
+// Try to allocate the metaspace at the requested addr.
+void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
+ assert(using_class_space(), "called improperly");
+ assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
+ assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
+ "Metaspace size is too big");
+
+ ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
+ os::vm_allocation_granularity(),
+ false, requested_addr, 0);
+ if (!metaspace_rs.is_reserved()) {
+ if (UseSharedSpaces) {
+ // Keep trying to allocate the metaspace, increasing the requested_addr
+ // by 1GB each time, until we reach an address that will no longer allow
+ // use of CDS with compressed klass pointers.
+ char *addr = requested_addr;
+ while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
+ can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
+ addr = addr + 1*G;
+ metaspace_rs = ReservedSpace(class_metaspace_size(),
+ os::vm_allocation_granularity(), false, addr, 0);
+ }
+ }
+
+ // If no successful allocation then try to allocate the space anywhere. If
+ // that fails then OOM doom. At this point we cannot try allocating the
+ // metaspace as if UseCompressedClassPointers is off because too much
+ // initialization has happened that depends on UseCompressedClassPointers.
+ // So, UseCompressedClassPointers cannot be turned off at this point.
+ if (!metaspace_rs.is_reserved()) {
+ metaspace_rs = ReservedSpace(class_metaspace_size(),
+ os::vm_allocation_granularity(), false);
+ if (!metaspace_rs.is_reserved()) {
+ vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
+ class_metaspace_size()));
+ }
+ }
+ }
+
+ // If we got here then the metaspace got allocated.
+ MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
+
+ // Verify that we can use shared spaces. Otherwise, turn off CDS.
+ if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
+ FileMapInfo::stop_sharing_and_unmap(
+ "Could not allocate metaspace at a compatible address");
+ }
+
+ set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
+ UseSharedSpaces ? (address)cds_base : 0);
+
+ initialize_class_space(metaspace_rs);
+
+ if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
+ gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
+ Universe::narrow_klass_base(), Universe::narrow_klass_shift());
+ gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
+ class_metaspace_size(), metaspace_rs.base(), requested_addr);
+ }
+}
+
+// For UseCompressedClassPointers the class space is reserved above the top of
+// the Java heap. The argument passed in is at the base of the compressed space.
+void Metaspace::initialize_class_space(ReservedSpace rs) {
+ // The reserved space size may be bigger because of alignment, esp with UseLargePages
+ assert(rs.size() >= CompressedClassSpaceSize,
+ err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
+ assert(using_class_space(), "Must be using class space");
+ _class_space_list = new VirtualSpaceList(rs);
+ _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
+}
+
+#endif
+
void Metaspace::global_initialize() {
// Initialize the alignment for shared spaces.
int max_alignment = os::vm_page_size();
+ size_t cds_total = 0;
+
+ set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
+ os::vm_allocation_granularity()));
+
MetaspaceShared::set_max_alignment(max_alignment);
if (DumpSharedSpaces) {
@@ -2777,15 +2956,32 @@ void Metaspace::global_initialize() {
// Initialize with the sum of the shared space sizes. The read-only
// and read write metaspace chunks will be allocated out of this and the
// remainder is the misc code and data chunks.
- size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
- SharedMiscDataSize + SharedMiscCodeSize,
- os::vm_allocation_granularity());
- size_t word_size = total/wordSize;
- _space_list = new VirtualSpaceList(word_size);
+ cds_total = FileMapInfo::shared_spaces_size();
+ _space_list = new VirtualSpaceList(cds_total/wordSize);
+ _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
+
+#ifdef _LP64
+ // Set the compressed klass pointer base so that decoding of these pointers works
+ // properly when creating the shared archive.
+ assert(UseCompressedOops && UseCompressedClassPointers,
+ "UseCompressedOops and UseCompressedClassPointers must be set");
+ Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
+ if (TraceMetavirtualspaceAllocation && Verbose) {
+ gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
+ _space_list->current_virtual_space()->bottom());
+ }
+
+ // Set the shift to zero.
+ assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
+ "CDS region is too large");
+ Universe::set_narrow_klass_shift(0);
+#endif
+
} else {
// If using shared space, open the file that contains the shared space
// and map in the memory before initializing the rest of metaspace (so
// the addresses don't conflict)
+ address cds_address = NULL;
if (UseSharedSpaces) {
FileMapInfo* mapinfo = new FileMapInfo();
memset(mapinfo, 0, sizeof(FileMapInfo));
@@ -2800,8 +2996,22 @@ void Metaspace::global_initialize() {
assert(!mapinfo->is_open() && !UseSharedSpaces,
"archive file not closed or shared spaces not disabled.");
}
+ cds_total = FileMapInfo::shared_spaces_size();
+ cds_address = (address)mapinfo->region_base(0);
}
+#ifdef _LP64
+ // If UseCompressedClassPointers is set then allocate the metaspace area
+ // above the heap and above the CDS area (if it exists).
+ if (using_class_space()) {
+ if (UseSharedSpaces) {
+ allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
+ } else {
+ allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
+ }
+ }
+#endif
+
// Initialize these before initializing the VirtualSpaceList
_first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
_first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
@@ -2809,57 +3019,63 @@ void Metaspace::global_initialize() {
// on the medium chunk list. The next chunk will be small and progress
// from there. This size calculated by -version.
_first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
- (ClassMetaspaceSize/BytesPerWord)*2);
+ (CompressedClassSpaceSize/BytesPerWord)*2);
_first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
// Arbitrarily set the initial virtual space to a multiple
// of the boot class loader size.
size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
// Initialize the list of virtual spaces.
_space_list = new VirtualSpaceList(word_size);
+ _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
}
}
-// For UseCompressedKlassPointers the class space is reserved as a piece of the
-// Java heap because the compression algorithm is the same for each. The
-// argument passed in is at the top of the compressed space
-void Metaspace::initialize_class_space(ReservedSpace rs) {
- // The reserved space size may be bigger because of alignment, esp with UseLargePages
- assert(rs.size() >= ClassMetaspaceSize,
- err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
- _class_space_list = new VirtualSpaceList(rs);
+Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
+ size_t chunk_word_size,
+ size_t chunk_bunch) {
+ // Get a chunk from the chunk freelist
+ Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
+ if (chunk != NULL) {
+ return chunk;
+ }
+
+ return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
}
-void Metaspace::initialize(Mutex* lock,
- MetaspaceType type) {
+void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
assert(space_list() != NULL,
"Metadata VirtualSpaceList has not been initialized");
+ assert(chunk_manager_metadata() != NULL,
+ "Metadata ChunkManager has not been initialized");
- _vsm = new SpaceManager(lock, space_list());
+ _vsm = new SpaceManager(NonClassType, lock);
if (_vsm == NULL) {
return;
}
size_t word_size;
size_t class_word_size;
- vsm()->get_initial_chunk_sizes(type,
- &word_size,
- &class_word_size);
+ vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
+ if (using_class_space()) {
assert(class_space_list() != NULL,
"Class VirtualSpaceList has not been initialized");
+ assert(chunk_manager_class() != NULL,
+ "Class ChunkManager has not been initialized");
- // Allocate SpaceManager for classes.
- _class_vsm = new SpaceManager(lock, class_space_list());
- if (_class_vsm == NULL) {
- return;
+ // Allocate SpaceManager for classes.
+ _class_vsm = new SpaceManager(ClassType, lock);
+ if (_class_vsm == NULL) {
+ return;
+ }
}
MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
// Allocate chunk for metadata objects
- Metachunk* new_chunk =
- space_list()->get_initialization_chunk(word_size,
- vsm()->medium_chunk_bunch());
+ Metachunk* new_chunk = get_initialization_chunk(NonClassType,
+ word_size,
+ vsm()->medium_chunk_bunch());
assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
if (new_chunk != NULL) {
// Add to this manager's list of chunks in use and current_chunk().
@@ -2867,12 +3083,17 @@ void Metaspace::initialize(Mutex* lock,
}
// Allocate chunk for class metadata objects
- Metachunk* class_chunk =
- class_space_list()->get_initialization_chunk(class_word_size,
- class_vsm()->medium_chunk_bunch());
- if (class_chunk != NULL) {
- class_vsm()->add_chunk(class_chunk, true);
+ if (using_class_space()) {
+ Metachunk* class_chunk = get_initialization_chunk(ClassType,
+ class_word_size,
+ class_vsm()->medium_chunk_bunch());
+ if (class_chunk != NULL) {
+ class_vsm()->add_chunk(class_chunk, true);
+ }
}
+
+ _alloc_record_head = NULL;
+ _alloc_record_tail = NULL;
}
size_t Metaspace::align_word_size_up(size_t word_size) {
@@ -2882,7 +3103,8 @@ size_t Metaspace::align_word_size_up(size_t word_size) {
MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
// DumpSharedSpaces doesn't use class metadata area (yet)
- if (mdtype == ClassType && !DumpSharedSpaces) {
+ // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
+ if (mdtype == ClassType && using_class_space()) {
return class_vsm()->allocate(word_size);
} else {
return vsm()->allocate(word_size);
@@ -2913,14 +3135,19 @@ char* Metaspace::bottom() const {
}
size_t Metaspace::used_words_slow(MetadataType mdtype) const {
- // return vsm()->allocated_used_words();
- return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
- vsm()->sum_used_in_chunks_in_use(); // includes overhead!
+ if (mdtype == ClassType) {
+ return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
+ } else {
+ return vsm()->sum_used_in_chunks_in_use(); // includes overhead!
+ }
}
-size_t Metaspace::free_words(MetadataType mdtype) const {
- return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
- vsm()->sum_free_in_chunks_in_use();
+size_t Metaspace::free_words_slow(MetadataType mdtype) const {
+ if (mdtype == ClassType) {
+ return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
+ } else {
+ return vsm()->sum_free_in_chunks_in_use();
+ }
}
// Space capacity in the Metaspace. It includes
@@ -2929,8 +3156,11 @@ size_t Metaspace::free_words(MetadataType mdtype) const {
// in the space available in the dictionary which
// is already counted in some chunk.
size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
- return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
- vsm()->sum_capacity_in_chunks_in_use();
+ if (mdtype == ClassType) {
+ return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
+ } else {
+ return vsm()->sum_capacity_in_chunks_in_use();
+ }
}
size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
@@ -2953,8 +3183,8 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
#endif
return;
}
- if (is_class) {
- class_vsm()->deallocate(ptr, word_size);
+ if (is_class && using_class_space()) {
+ class_vsm()->deallocate(ptr, word_size);
} else {
vsm()->deallocate(ptr, word_size);
}
@@ -2968,7 +3198,7 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
#endif
return;
}
- if (is_class) {
+ if (is_class && using_class_space()) {
class_vsm()->deallocate(ptr, word_size);
} else {
vsm()->deallocate(ptr, word_size);
@@ -2977,12 +3207,14 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
}
Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
- bool read_only, MetadataType mdtype, TRAPS) {
+ bool read_only, MetaspaceObj::Type type, TRAPS) {
if (HAS_PENDING_EXCEPTION) {
assert(false, "Should not allocate with exception pending");
return NULL; // caller does a CHECK_NULL too
}
+ MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
+
// SSS: Should we align the allocations and make sure the sizes are aligned.
MetaWord* result = NULL;
@@ -2992,13 +3224,13 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
// with the SymbolTable_lock. Dumping is single threaded for now. We'll have
// to revisit this for application class data sharing.
if (DumpSharedSpaces) {
- if (read_only) {
- result = loader_data->ro_metaspace()->allocate(word_size, NonClassType);
- } else {
- result = loader_data->rw_metaspace()->allocate(word_size, NonClassType);
- }
+ assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
+ Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
+ result = space->allocate(word_size, NonClassType);
if (result == NULL) {
report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
+ } else {
+ space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
}
return Metablock::initialize(result, word_size);
}
@@ -3016,35 +3248,81 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
if (Verbose && TraceMetadataChunkAllocation) {
gclog_or_tty->print_cr("Metaspace allocation failed for size "
SIZE_FORMAT, word_size);
- if (loader_data->metaspace_or_null() != NULL) loader_data->metaspace_or_null()->dump(gclog_or_tty);
+ if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty);
MetaspaceAux::dump(gclog_or_tty);
}
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
- report_java_out_of_memory("Metadata space");
+ const char* space_string = (mdtype == ClassType) ? "Compressed class space" :
+ "Metadata space";
+ report_java_out_of_memory(space_string);
if (JvmtiExport::should_post_resource_exhausted()) {
JvmtiExport::post_resource_exhausted(
JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
- "Metadata space");
+ space_string);
+ }
+ if (mdtype == ClassType) {
+ THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
+ } else {
+ THROW_OOP_0(Universe::out_of_memory_error_metaspace());
}
- THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
}
}
return Metablock::initialize(result, word_size);
}
+void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
+ assert(DumpSharedSpaces, "sanity");
+
+ AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
+ if (_alloc_record_head == NULL) {
+ _alloc_record_head = _alloc_record_tail = rec;
+ } else {
+ _alloc_record_tail->_next = rec;
+ _alloc_record_tail = rec;
+ }
+}
+
+void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
+ assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
+
+ address last_addr = (address)bottom();
+
+ for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
+ address ptr = rec->_ptr;
+ if (last_addr < ptr) {
+ closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
+ }
+ closure->doit(ptr, rec->_type, rec->_byte_size);
+ last_addr = ptr + rec->_byte_size;
+ }
+
+ address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
+ if (last_addr < top) {
+ closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
+ }
+}
+
+void Metaspace::purge(MetadataType mdtype) {
+ get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
+}
+
void Metaspace::purge() {
MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
- space_list()->purge();
- class_space_list()->purge();
+ purge(NonClassType);
+ if (using_class_space()) {
+ purge(ClassType);
+ }
}
void Metaspace::print_on(outputStream* out) const {
// Print both class virtual space counts and metaspace.
if (Verbose) {
- vsm()->print_on(out);
+ vsm()->print_on(out);
+ if (using_class_space()) {
class_vsm()->print_on(out);
+ }
}
}
@@ -3058,17 +3336,88 @@ bool Metaspace::contains(const void * ptr) {
// be needed. Note, locking this can cause inversion problems with the
// caller in MetaspaceObj::is_metadata() function.
return space_list()->contains(ptr) ||
- class_space_list()->contains(ptr);
+ (using_class_space() && class_space_list()->contains(ptr));
}
void Metaspace::verify() {
vsm()->verify();
- class_vsm()->verify();
+ if (using_class_space()) {
+ class_vsm()->verify();
+ }
}
void Metaspace::dump(outputStream* const out) const {
out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
vsm()->dump(out);
- out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
- class_vsm()->dump(out);
+ if (using_class_space()) {
+ out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
+ class_vsm()->dump(out);
+ }
}
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class TestMetaspaceAuxTest : AllStatic {
+ public:
+ static void test_reserved() {
+ size_t reserved = MetaspaceAux::reserved_bytes();
+
+ assert(reserved > 0, "assert");
+
+ size_t committed = MetaspaceAux::committed_bytes();
+ assert(committed <= reserved, "assert");
+
+ size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
+ assert(reserved_metadata > 0, "assert");
+ assert(reserved_metadata <= reserved, "assert");
+
+ if (UseCompressedClassPointers) {
+ size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
+ assert(reserved_class > 0, "assert");
+ assert(reserved_class < reserved, "assert");
+ }
+ }
+
+ static void test_committed() {
+ size_t committed = MetaspaceAux::committed_bytes();
+
+ assert(committed > 0, "assert");
+
+ size_t reserved = MetaspaceAux::reserved_bytes();
+ assert(committed <= reserved, "assert");
+
+ size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
+ assert(committed_metadata > 0, "assert");
+ assert(committed_metadata <= committed, "assert");
+
+ if (UseCompressedClassPointers) {
+ size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType);
+ assert(committed_class > 0, "assert");
+ assert(committed_class < committed, "assert");
+ }
+ }
+
+ static void test_virtual_space_list_large_chunk() {
+ VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
+ MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+ // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
+ // vm_allocation_granularity aligned on Windows.
+ size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
+ large_size += (os::vm_page_size()/BytesPerWord);
+ vs_list->get_new_chunk(large_size, large_size, 0);
+ }
+
+ static void test() {
+ test_reserved();
+ test_committed();
+ test_virtual_space_list_large_chunk();
+ }
+};
+
+void TestMetaspaceAux_test() {
+ TestMetaspaceAuxTest::test();
+}
+
+#endif
diff --git a/src/share/vm/memory/metaspace.hpp b/src/share/vm/memory/metaspace.hpp
index 1108c79fe..29c07e151 100644
--- a/src/share/vm/memory/metaspace.hpp
+++ b/src/share/vm/memory/metaspace.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -56,12 +56,15 @@
// +-------------------+
//
+class ChunkManager;
class ClassLoaderData;
class Metablock;
+class Metachunk;
class MetaWord;
class Mutex;
class outputStream;
class SpaceManager;
+class VirtualSpaceList;
// Metaspaces each have a SpaceManager and allocations
// are done by the SpaceManager. Allocations are done
@@ -76,8 +79,6 @@ class SpaceManager;
// allocate() method returns a block for use as a
// quantum of metadata.
-class VirtualSpaceList;
-
class Metaspace : public CHeapObj<mtClass> {
friend class VMStructs;
friend class SpaceManager;
@@ -86,7 +87,10 @@ class Metaspace : public CHeapObj<mtClass> {
friend class MetaspaceAux;
public:
- enum MetadataType {ClassType, NonClassType};
+ enum MetadataType {ClassType = 0,
+ NonClassType = ClassType + 1,
+ MetadataTypeCount = ClassType + 2
+ };
enum MetaspaceType {
StandardMetaspaceType,
BootMetaspaceType,
@@ -99,9 +103,23 @@ class Metaspace : public CHeapObj<mtClass> {
private:
void initialize(Mutex* lock, MetaspaceType type);
+ Metachunk* get_initialization_chunk(MetadataType mdtype,
+ size_t chunk_word_size,
+ size_t chunk_bunch);
+
// Align up the word size to the allocation word size
static size_t align_word_size_up(size_t);
+ // Aligned size of the metaspace.
+ static size_t _class_metaspace_size;
+
+ static size_t class_metaspace_size() {
+ return _class_metaspace_size;
+ }
+ static void set_class_metaspace_size(size_t metaspace_size) {
+ _class_metaspace_size = metaspace_size;
+ }
+
static size_t _first_chunk_word_size;
static size_t _first_class_chunk_word_size;
@@ -121,8 +139,52 @@ class Metaspace : public CHeapObj<mtClass> {
static VirtualSpaceList* _space_list;
static VirtualSpaceList* _class_space_list;
+ static ChunkManager* _chunk_manager_metadata;
+ static ChunkManager* _chunk_manager_class;
+
+ public:
static VirtualSpaceList* space_list() { return _space_list; }
static VirtualSpaceList* class_space_list() { return _class_space_list; }
+ static VirtualSpaceList* get_space_list(MetadataType mdtype) {
+ assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
+ return mdtype == ClassType ? class_space_list() : space_list();
+ }
+
+ static ChunkManager* chunk_manager_metadata() { return _chunk_manager_metadata; }
+ static ChunkManager* chunk_manager_class() { return _chunk_manager_class; }
+ static ChunkManager* get_chunk_manager(MetadataType mdtype) {
+ assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
+ return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata();
+ }
+
+ private:
+ // This is used by DumpSharedSpaces only, where only _vsm is used. So we will
+ // maintain a single list for now.
+ void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
+
+#ifdef _LP64
+ static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base);
+
+ // Returns true if can use CDS with metaspace allocated as specified address.
+ static bool can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base);
+
+ static void allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base);
+
+ static void initialize_class_space(ReservedSpace rs);
+#endif
+
+ class AllocRecord : public CHeapObj<mtClass> {
+ public:
+ AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size)
+ : _next(NULL), _ptr(ptr), _type(type), _byte_size(byte_size) {}
+ AllocRecord *_next;
+ address _ptr;
+ MetaspaceObj::Type _type;
+ int _byte_size;
+ };
+
+ AllocRecord * _alloc_record_head;
+ AllocRecord * _alloc_record_tail;
public:
@@ -131,73 +193,78 @@ class Metaspace : public CHeapObj<mtClass> {
// Initialize globals for Metaspace
static void global_initialize();
- static void initialize_class_space(ReservedSpace rs);
static size_t first_chunk_word_size() { return _first_chunk_word_size; }
static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
char* bottom() const;
size_t used_words_slow(MetadataType mdtype) const;
- size_t free_words(MetadataType mdtype) const;
+ size_t free_words_slow(MetadataType mdtype) const;
size_t capacity_words_slow(MetadataType mdtype) const;
- size_t waste_words(MetadataType mdtype) const;
size_t used_bytes_slow(MetadataType mdtype) const;
size_t capacity_bytes_slow(MetadataType mdtype) const;
- static Metablock* allocate(ClassLoaderData* loader_data, size_t size,
- bool read_only, MetadataType mdtype, TRAPS);
+ static Metablock* allocate(ClassLoaderData* loader_data, size_t word_size,
+ bool read_only, MetaspaceObj::Type type, TRAPS);
void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
MetaWord* expand_and_allocate(size_t size,
MetadataType mdtype);
- static bool is_initialized() { return _class_space_list != NULL; }
-
static bool contains(const void *ptr);
void dump(outputStream* const out) const;
// Free empty virtualspaces
+ static void purge(MetadataType mdtype);
static void purge();
void print_on(outputStream* st) const;
// Debugging support
void verify();
+
+ class AllocRecordClosure : public StackObj {
+ public:
+ virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) = 0;
+ };
+
+ void iterate(AllocRecordClosure *closure);
+
+ // Return TRUE only if UseCompressedClassPointers is True and DumpSharedSpaces is False.
+ static bool using_class_space() {
+ return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers && !DumpSharedSpaces);
+ }
+
};
class MetaspaceAux : AllStatic {
-
- // Statistics for class space and data space in metaspace.
+ static size_t free_chunks_total_words(Metaspace::MetadataType mdtype);
// These methods iterate over the classloader data graph
// for the given Metaspace type. These are slow.
static size_t used_bytes_slow(Metaspace::MetadataType mdtype);
- static size_t free_in_bytes(Metaspace::MetadataType mdtype);
+ static size_t free_bytes_slow(Metaspace::MetadataType mdtype);
static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype);
+ static size_t capacity_bytes_slow();
- // Iterates over the virtual space list.
- static size_t reserved_in_bytes(Metaspace::MetadataType mdtype);
-
- static size_t free_chunks_total(Metaspace::MetadataType mdtype);
- static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
-
- public:
// Running sum of space in all Metachunks that has been
// allocated to a Metaspace. This is used instead of
- // iterating over all the classloaders
- static size_t _allocated_capacity_words;
+ // iterating over all the classloaders. One for each
+ // type of Metadata
+ static size_t _allocated_capacity_words[Metaspace:: MetadataTypeCount];
// Running sum of space in all Metachunks that have
- // are being used for metadata.
- static size_t _allocated_used_words;
+ // are being used for metadata. One for each
+ // type of Metadata.
+ static size_t _allocated_used_words[Metaspace:: MetadataTypeCount];
public:
// Decrement and increment _allocated_capacity_words
- static void dec_capacity(size_t words);
- static void inc_capacity(size_t words);
+ static void dec_capacity(Metaspace::MetadataType type, size_t words);
+ static void inc_capacity(Metaspace::MetadataType type, size_t words);
// Decrement and increment _allocated_used_words
- static void dec_used(size_t words);
- static void inc_used(size_t words);
+ static void dec_used(Metaspace::MetadataType type, size_t words);
+ static void inc_used(Metaspace::MetadataType type, size_t words);
// Total of space allocated to metadata in all Metaspaces.
// This sums the space used in each Metachunk by
@@ -208,56 +275,64 @@ class MetaspaceAux : AllStatic {
}
// Used by MetaspaceCounters
- static size_t free_chunks_total();
- static size_t free_chunks_total_in_bytes();
+ static size_t free_chunks_total_words();
+ static size_t free_chunks_total_bytes();
+ static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype);
+ static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
+ return _allocated_capacity_words[mdtype];
+ }
static size_t allocated_capacity_words() {
- return _allocated_capacity_words;
+ return allocated_capacity_words(Metaspace::NonClassType) +
+ allocated_capacity_words(Metaspace::ClassType);
+ }
+ static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
+ return allocated_capacity_words(mdtype) * BytesPerWord;
}
static size_t allocated_capacity_bytes() {
- return _allocated_capacity_words * BytesPerWord;
+ return allocated_capacity_words() * BytesPerWord;
}
+ static size_t allocated_used_words(Metaspace::MetadataType mdtype) {
+ return _allocated_used_words[mdtype];
+ }
static size_t allocated_used_words() {
- return _allocated_used_words;
+ return allocated_used_words(Metaspace::NonClassType) +
+ allocated_used_words(Metaspace::ClassType);
+ }
+ static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
+ return allocated_used_words(mdtype) * BytesPerWord;
}
static size_t allocated_used_bytes() {
- return _allocated_used_words * BytesPerWord;
+ return allocated_used_words() * BytesPerWord;
}
static size_t free_bytes();
+ static size_t free_bytes(Metaspace::MetadataType mdtype);
- // Total capacity in all Metaspaces
- static size_t capacity_bytes_slow() {
-#ifdef PRODUCT
- // Use allocated_capacity_bytes() in PRODUCT instead of this function.
- guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
-#endif
- size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
- size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
- assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
- err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
- " class_capacity + non_class_capacity " SIZE_FORMAT
- " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
- allocated_capacity_bytes(), class_capacity + non_class_capacity,
- class_capacity, non_class_capacity));
-
- return class_capacity + non_class_capacity;
+ static size_t reserved_bytes(Metaspace::MetadataType mdtype);
+ static size_t reserved_bytes() {
+ return reserved_bytes(Metaspace::ClassType) +
+ reserved_bytes(Metaspace::NonClassType);
}
- // Total space reserved in all Metaspaces
- static size_t reserved_in_bytes() {
- return reserved_in_bytes(Metaspace::ClassType) +
- reserved_in_bytes(Metaspace::NonClassType);
+ static size_t committed_bytes(Metaspace::MetadataType mdtype);
+ static size_t committed_bytes() {
+ return committed_bytes(Metaspace::ClassType) +
+ committed_bytes(Metaspace::NonClassType);
}
- static size_t min_chunk_size();
+ static size_t min_chunk_size_words();
+ static size_t min_chunk_size_bytes() {
+ return min_chunk_size_words() * BytesPerWord;
+ }
// Print change in used metadata.
static void print_metaspace_change(size_t prev_metadata_used);
static void print_on(outputStream * out);
static void print_on(outputStream * out, Metaspace::MetadataType mdtype);
+ static void print_class_waste(outputStream* out);
static void print_waste(outputStream* out);
static void dump(outputStream* out);
static void verify_free_chunks();
diff --git a/src/share/vm/memory/metaspaceCounters.cpp b/src/share/vm/memory/metaspaceCounters.cpp
index b2be29bca..60e26b8c7 100644
--- a/src/share/vm/memory/metaspaceCounters.cpp
+++ b/src/share/vm/memory/metaspaceCounters.cpp
@@ -25,109 +25,109 @@
#include "precompiled.hpp"
#include "memory/metaspaceCounters.hpp"
#include "memory/resourceArea.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/perfData.hpp"
#include "utilities/exceptions.hpp"
-MetaspaceCounters* MetaspaceCounters::_metaspace_counters = NULL;
+class MetaspacePerfCounters: public CHeapObj<mtInternal> {
+ friend class VMStructs;
+ PerfVariable* _capacity;
+ PerfVariable* _used;
+ PerfVariable* _max_capacity;
-size_t MetaspaceCounters::calc_total_capacity() {
- // The total capacity is the sum of
- // 1) capacity of Metachunks in use by all Metaspaces
- // 2) unused space at the end of each Metachunk
- // 3) space in the freelist
- size_t total_capacity = MetaspaceAux::allocated_capacity_bytes()
- + MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes();
- return total_capacity;
-}
+ PerfVariable* create_variable(const char *ns, const char *name, size_t value, TRAPS) {
+ const char *path = PerfDataManager::counter_name(ns, name);
+ return PerfDataManager::create_variable(SUN_GC, path, PerfData::U_Bytes, value, THREAD);
+ }
-MetaspaceCounters::MetaspaceCounters() :
- _capacity(NULL),
- _used(NULL),
- _max_capacity(NULL) {
- if (UsePerfData) {
- size_t min_capacity = MetaspaceAux::min_chunk_size();
- size_t max_capacity = MetaspaceAux::reserved_in_bytes();
- size_t curr_capacity = calc_total_capacity();
- size_t used = MetaspaceAux::allocated_used_bytes();
+ void create_constant(const char *ns, const char *name, size_t value, TRAPS) {
+ const char *path = PerfDataManager::counter_name(ns, name);
+ PerfDataManager::create_constant(SUN_GC, path, PerfData::U_Bytes, value, THREAD);
+ }
+
+ public:
+ MetaspacePerfCounters(const char* ns, size_t min_capacity, size_t curr_capacity, size_t max_capacity, size_t used) {
+ EXCEPTION_MARK;
+ ResourceMark rm;
- initialize(min_capacity, max_capacity, curr_capacity, used);
+ create_constant(ns, "minCapacity", min_capacity, THREAD);
+ _capacity = create_variable(ns, "capacity", curr_capacity, THREAD);
+ _max_capacity = create_variable(ns, "maxCapacity", max_capacity, THREAD);
+ _used = create_variable(ns, "used", used, THREAD);
}
-}
-static PerfVariable* create_ms_variable(const char *ns,
- const char *name,
- size_t value,
- TRAPS) {
- const char *path = PerfDataManager::counter_name(ns, name);
- PerfVariable *result =
- PerfDataManager::create_variable(SUN_GC, path, PerfData::U_Bytes, value,
- CHECK_NULL);
- return result;
+ void update(size_t capacity, size_t max_capacity, size_t used) {
+ _capacity->set_value(capacity);
+ _max_capacity->set_value(max_capacity);
+ _used->set_value(used);
+ }
+};
+
+MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
+
+size_t MetaspaceCounters::used() {
+ return MetaspaceAux::allocated_used_bytes();
}
-static void create_ms_constant(const char *ns,
- const char *name,
- size_t value,
- TRAPS) {
- const char *path = PerfDataManager::counter_name(ns, name);
- PerfDataManager::create_constant(SUN_GC, path, PerfData::U_Bytes, value, CHECK);
+size_t MetaspaceCounters::capacity() {
+ return MetaspaceAux::committed_bytes();
}
-void MetaspaceCounters::initialize(size_t min_capacity,
- size_t max_capacity,
- size_t curr_capacity,
- size_t used) {
+size_t MetaspaceCounters::max_capacity() {
+ return MetaspaceAux::reserved_bytes();
+}
+void MetaspaceCounters::initialize_performance_counters() {
if (UsePerfData) {
- EXCEPTION_MARK;
- ResourceMark rm;
-
- const char *ms = "metaspace";
+ assert(_perf_counters == NULL, "Should only be initialized once");
- create_ms_constant(ms, "minCapacity", min_capacity, CHECK);
- _max_capacity = create_ms_variable(ms, "maxCapacity", max_capacity, CHECK);
- _capacity = create_ms_variable(ms, "capacity", curr_capacity, CHECK);
- _used = create_ms_variable(ms, "used", used, CHECK);
+ size_t min_capacity = 0;
+ _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity,
+ capacity(), max_capacity(), used());
}
}
-void MetaspaceCounters::update_capacity() {
- assert(UsePerfData, "Should not be called unless being used");
- size_t total_capacity = calc_total_capacity();
- _capacity->set_value(total_capacity);
+void MetaspaceCounters::update_performance_counters() {
+ if (UsePerfData) {
+ assert(_perf_counters != NULL, "Should be initialized");
+
+ _perf_counters->update(capacity(), max_capacity(), used());
+ }
}
-void MetaspaceCounters::update_used() {
- assert(UsePerfData, "Should not be called unless being used");
- size_t used_in_bytes = MetaspaceAux::allocated_used_bytes();
- _used->set_value(used_in_bytes);
+MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
+
+size_t CompressedClassSpaceCounters::used() {
+ return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
}
-void MetaspaceCounters::update_max_capacity() {
- assert(UsePerfData, "Should not be called unless being used");
- assert(_max_capacity != NULL, "Should be initialized");
- size_t reserved_in_bytes = MetaspaceAux::reserved_in_bytes();
- _max_capacity->set_value(reserved_in_bytes);
+size_t CompressedClassSpaceCounters::capacity() {
+ return MetaspaceAux::committed_bytes(Metaspace::ClassType);
}
-void MetaspaceCounters::update_all() {
- if (UsePerfData) {
- update_used();
- update_capacity();
- update_max_capacity();
- }
+size_t CompressedClassSpaceCounters::max_capacity() {
+ return MetaspaceAux::reserved_bytes(Metaspace::ClassType);
}
-void MetaspaceCounters::initialize_performance_counters() {
- if (UsePerfData) {
- assert(_metaspace_counters == NULL, "Should only be initialized once");
- _metaspace_counters = new MetaspaceCounters();
+void CompressedClassSpaceCounters::update_performance_counters() {
+ if (UsePerfData && UseCompressedClassPointers) {
+ assert(_perf_counters != NULL, "Should be initialized");
+
+ _perf_counters->update(capacity(), max_capacity(), used());
}
}
-void MetaspaceCounters::update_performance_counters() {
+void CompressedClassSpaceCounters::initialize_performance_counters() {
if (UsePerfData) {
- assert(_metaspace_counters != NULL, "Should be initialized");
- _metaspace_counters->update_all();
+ assert(_perf_counters == NULL, "Should only be initialized once");
+ const char* ns = "compressedclassspace";
+
+ if (UseCompressedClassPointers) {
+ size_t min_capacity = 0;
+ _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity(),
+ max_capacity(), used());
+ } else {
+ _perf_counters = new MetaspacePerfCounters(ns, 0, 0, 0, 0);
+ }
}
}
-
diff --git a/src/share/vm/memory/metaspaceCounters.hpp b/src/share/vm/memory/metaspaceCounters.hpp
index 46a930888..0fa991291 100644
--- a/src/share/vm/memory/metaspaceCounters.hpp
+++ b/src/share/vm/memory/metaspaceCounters.hpp
@@ -25,31 +25,30 @@
#ifndef SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
#define SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
-#include "runtime/perfData.hpp"
-
-class MetaspaceCounters: public CHeapObj<mtClass> {
- friend class VMStructs;
- PerfVariable* _capacity;
- PerfVariable* _used;
- PerfVariable* _max_capacity;
- static MetaspaceCounters* _metaspace_counters;
- void initialize(size_t min_capacity,
- size_t max_capacity,
- size_t curr_capacity,
- size_t used);
- size_t calc_total_capacity();
- public:
- MetaspaceCounters();
- ~MetaspaceCounters();
+#include "memory/allocation.hpp"
- void update_capacity();
- void update_used();
- void update_max_capacity();
+class MetaspacePerfCounters;
- void update_all();
+class MetaspaceCounters: public AllStatic {
+ static MetaspacePerfCounters* _perf_counters;
+ static size_t used();
+ static size_t capacity();
+ static size_t max_capacity();
+ public:
static void initialize_performance_counters();
static void update_performance_counters();
+};
+
+class CompressedClassSpaceCounters: public AllStatic {
+ static MetaspacePerfCounters* _perf_counters;
+ static size_t used();
+ static size_t capacity();
+ static size_t max_capacity();
+ public:
+ static void initialize_performance_counters();
+ static void update_performance_counters();
};
+
#endif // SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
diff --git a/src/share/vm/memory/metaspaceShared.cpp b/src/share/vm/memory/metaspaceShared.cpp
index 5f0f152e9..ef51c9266 100644
--- a/src/share/vm/memory/metaspaceShared.cpp
+++ b/src/share/vm/memory/metaspaceShared.cpp
@@ -52,7 +52,6 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
int tag = 0;
soc->do_tag(--tag);
- assert(!UseCompressedOops, "UseCompressedOops doesn't work with shared archive");
// Verify the sizes of various metadata in the system.
soc->do_tag(sizeof(Method));
soc->do_tag(sizeof(ConstMethod));
@@ -104,9 +103,10 @@ static void calculate_fingerprints() {
if (k->oop_is_instance()) {
InstanceKlass* ik = InstanceKlass::cast(k);
for (int i = 0; i < ik->methods()->length(); i++) {
- ResourceMark rm;
Method* m = ik->methods()->at(i);
- (new Fingerprinter(m))->fingerprint();
+ Fingerprinter fp(m);
+ // The side effect of this call sets method's fingerprint field.
+ fp.fingerprint();
}
}
}
@@ -243,6 +243,147 @@ public:
bool reading() const { return false; }
};
+// This is for dumping detailed statistics for the allocations
+// in the shared spaces.
+class DumpAllocClosure : public Metaspace::AllocRecordClosure {
+public:
+
+ // Here's poor man's enum inheritance
+#define SHAREDSPACE_OBJ_TYPES_DO(f) \
+ METASPACE_OBJ_TYPES_DO(f) \
+ f(SymbolHashentry) \
+ f(SymbolBuckets) \
+ f(Other)
+
+#define SHAREDSPACE_OBJ_TYPE_DECLARE(name) name ## Type,
+#define SHAREDSPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
+
+ enum Type {
+ // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
+ SHAREDSPACE_OBJ_TYPES_DO(SHAREDSPACE_OBJ_TYPE_DECLARE)
+ _number_of_types
+ };
+
+ static const char * type_name(Type type) {
+ switch(type) {
+ SHAREDSPACE_OBJ_TYPES_DO(SHAREDSPACE_OBJ_TYPE_NAME_CASE)
+ default:
+ ShouldNotReachHere();
+ return NULL;
+ }
+ }
+
+public:
+ enum {
+ RO = 0,
+ RW = 1
+ };
+
+ int _counts[2][_number_of_types];
+ int _bytes [2][_number_of_types];
+ int _which;
+
+ DumpAllocClosure() {
+ memset(_counts, 0, sizeof(_counts));
+ memset(_bytes, 0, sizeof(_bytes));
+ };
+
+ void iterate_metaspace(Metaspace* space, int which) {
+ assert(which == RO || which == RW, "sanity");
+ _which = which;
+ space->iterate(this);
+ }
+
+ virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) {
+ assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
+ _counts[_which][type] ++;
+ _bytes [_which][type] += byte_size;
+ }
+
+ void dump_stats(int ro_all, int rw_all, int md_all, int mc_all);
+};
+
+void DumpAllocClosure::dump_stats(int ro_all, int rw_all, int md_all, int mc_all) {
+ rw_all += (md_all + mc_all); // md and mc are all mapped Read/Write
+ int other_bytes = md_all + mc_all;
+
+ // Calculate size of data that was not allocated by Metaspace::allocate()
+ int symbol_count = _counts[RO][MetaspaceObj::SymbolType];
+ int symhash_bytes = symbol_count * sizeof (HashtableEntry<Symbol*, mtSymbol>);
+ int symbuck_count = SymbolTable::the_table()->table_size();
+ int symbuck_bytes = symbuck_count * sizeof(HashtableBucket<mtSymbol>);
+
+ _counts[RW][SymbolHashentryType] = symbol_count;
+ _bytes [RW][SymbolHashentryType] = symhash_bytes;
+ other_bytes -= symhash_bytes;
+
+ _counts[RW][SymbolBucketsType] = symbuck_count;
+ _bytes [RW][SymbolBucketsType] = symbuck_bytes;
+ other_bytes -= symbuck_bytes;
+
+ // TODO: count things like dictionary, vtable, etc
+ _bytes[RW][OtherType] = other_bytes;
+
+ // prevent divide-by-zero
+ if (ro_all < 1) {
+ ro_all = 1;
+ }
+ if (rw_all < 1) {
+ rw_all = 1;
+ }
+
+ int all_ro_count = 0;
+ int all_ro_bytes = 0;
+ int all_rw_count = 0;
+ int all_rw_bytes = 0;
+
+ const char *fmt = "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f";
+ const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
+ const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %";
+
+ tty->print_cr("Detailed metadata info (rw includes md and mc):");
+ tty->print_cr(hdr);
+ tty->print_cr(sep);
+ for (int type = 0; type < int(_number_of_types); type ++) {
+ const char *name = type_name((Type)type);
+ int ro_count = _counts[RO][type];
+ int ro_bytes = _bytes [RO][type];
+ int rw_count = _counts[RW][type];
+ int rw_bytes = _bytes [RW][type];
+ int count = ro_count + rw_count;
+ int bytes = ro_bytes + rw_bytes;
+
+ double ro_perc = 100.0 * double(ro_bytes) / double(ro_all);
+ double rw_perc = 100.0 * double(rw_bytes) / double(rw_all);
+ double perc = 100.0 * double(bytes) / double(ro_all + rw_all);
+
+ tty->print_cr(fmt, name,
+ ro_count, ro_bytes, ro_perc,
+ rw_count, rw_bytes, rw_perc,
+ count, bytes, perc);
+
+ all_ro_count += ro_count;
+ all_ro_bytes += ro_bytes;
+ all_rw_count += rw_count;
+ all_rw_bytes += rw_bytes;
+ }
+
+ int all_count = all_ro_count + all_rw_count;
+ int all_bytes = all_ro_bytes + all_rw_bytes;
+
+ double all_ro_perc = 100.0 * double(all_ro_bytes) / double(ro_all);
+ double all_rw_perc = 100.0 * double(all_rw_bytes) / double(rw_all);
+ double all_perc = 100.0 * double(all_bytes) / double(ro_all + rw_all);
+
+ tty->print_cr(sep);
+ tty->print_cr(fmt, "Total",
+ all_ro_count, all_ro_bytes, all_ro_perc,
+ all_rw_count, all_rw_bytes, all_rw_perc,
+ all_count, all_bytes, all_perc);
+
+ assert(all_ro_bytes == ro_all, "everything should have been counted");
+ assert(all_rw_bytes == rw_all, "everything should have been counted");
+}
// Populate the shared space.
@@ -454,6 +595,14 @@ void VM_PopulateDumpSharedSpace::doit() {
mapinfo->close();
memmove(vtbl_list, saved_vtbl, vtbl_list_size * sizeof(void*));
+
+ if (PrintSharedSpaces) {
+ DumpAllocClosure dac;
+ dac.iterate_metaspace(_loader_data->ro_metaspace(), DumpAllocClosure::RO);
+ dac.iterate_metaspace(_loader_data->rw_metaspace(), DumpAllocClosure::RW);
+
+ dac.dump_stats(int(ro_bytes), int(rw_bytes), int(md_bytes), int(mc_bytes));
+ }
}
static void link_shared_classes(Klass* obj, TRAPS) {
@@ -677,35 +826,15 @@ public:
bool reading() const { return true; }
};
-
-// Save bounds of shared spaces mapped in.
-static char* _ro_base = NULL;
-static char* _rw_base = NULL;
-static char* _md_base = NULL;
-static char* _mc_base = NULL;
-
// Return true if given address is in the mapped shared space.
bool MetaspaceShared::is_in_shared_space(const void* p) {
- if (_ro_base == NULL || _rw_base == NULL) {
- return false;
- } else {
- return ((p >= _ro_base && p < (_ro_base + SharedReadOnlySize)) ||
- (p >= _rw_base && p < (_rw_base + SharedReadWriteSize)));
- }
+ return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_space(p);
}
void MetaspaceShared::print_shared_spaces() {
- gclog_or_tty->print_cr("Shared Spaces:");
- gclog_or_tty->print(" read-only " INTPTR_FORMAT "-" INTPTR_FORMAT,
- _ro_base, _ro_base + SharedReadOnlySize);
- gclog_or_tty->print(" read-write " INTPTR_FORMAT "-" INTPTR_FORMAT,
- _rw_base, _rw_base + SharedReadWriteSize);
- gclog_or_tty->cr();
- gclog_or_tty->print(" misc-data " INTPTR_FORMAT "-" INTPTR_FORMAT,
- _md_base, _md_base + SharedMiscDataSize);
- gclog_or_tty->print(" misc-code " INTPTR_FORMAT "-" INTPTR_FORMAT,
- _mc_base, _mc_base + SharedMiscCodeSize);
- gclog_or_tty->cr();
+ if (UseSharedSpaces) {
+ FileMapInfo::current_info()->print_shared_spaces();
+ }
}
@@ -725,6 +854,11 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
+ char* _ro_base = NULL;
+ char* _rw_base = NULL;
+ char* _md_base = NULL;
+ char* _mc_base = NULL;
+
// Map each shared region
if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
(_rw_base = mapinfo->map_region(rw)) != NULL &&
diff --git a/src/share/vm/memory/oopFactory.hpp b/src/share/vm/memory/oopFactory.hpp
index bd0346690..1d14010e2 100644
--- a/src/share/vm/memory/oopFactory.hpp
+++ b/src/share/vm/memory/oopFactory.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
+#include "memory/referenceType.hpp"
#include "memory/universe.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.hpp"
diff --git a/src/share/vm/memory/padded.hpp b/src/share/vm/memory/padded.hpp
new file mode 100644
index 000000000..4c50b3996
--- /dev/null
+++ b/src/share/vm/memory/padded.hpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_PADDED_HPP
+#define SHARE_VM_MEMORY_PADDED_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// Bytes needed to pad type to avoid cache-line sharing; alignment should be the
+// expected cache line size (a power of two). The first addend avoids sharing
+// when the start address is not a multiple of alignment; the second maintains
+// alignment of starting addresses that happen to be a multiple.
+#define PADDING_SIZE(type, alignment) \
+ ((alignment) + align_size_up_(sizeof(type), alignment))
+
+// Templates to create a subclass padded to avoid cache line sharing. These are
+// effective only when applied to derived-most (leaf) classes.
+
+// When no args are passed to the base ctor.
+template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class Padded : public T {
+ private:
+ char _pad_buf_[PADDING_SIZE(T, alignment)];
+};
+
+// When either 0 or 1 args may be passed to the base ctor.
+template <class T, typename Arg1T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class Padded01 : public T {
+ public:
+ Padded01(): T() { }
+ Padded01(Arg1T arg1): T(arg1) { }
+ private:
+ char _pad_buf_[PADDING_SIZE(T, alignment)];
+};
+
+// Super class of PaddedEnd when pad_size != 0.
+template <class T, size_t pad_size>
+class PaddedEndImpl : public T {
+ private:
+ char _pad_buf[pad_size];
+};
+
+// Super class of PaddedEnd when pad_size == 0.
+template <class T>
+class PaddedEndImpl<T, /*pad_size*/ 0> : public T {
+ // No padding.
+};
+
+#define PADDED_END_SIZE(type, alignment) (align_size_up_(sizeof(type), alignment) - sizeof(type))
+
+// More memory conservative implementation of Padded. The subclass adds the
+// minimal amount of padding needed to make the size of the objects be aligned.
+// This will help reducing false sharing,
+// if the start address is a multiple of alignment.
+template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class PaddedEnd : public PaddedEndImpl<T, PADDED_END_SIZE(T, alignment)> {
+ // C++ don't allow zero-length arrays. The padding is put in a
+ // super class that is specialized for the pad_size == 0 case.
+};
+
+// Helper class to create an array of PaddedEnd<T> objects. All elements will
+// start at a multiple of alignment and the size will be aligned to alignment.
+template <class T, MEMFLAGS flags, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
+class PaddedArray {
+ public:
+ // Creates an aligned padded array.
+ // The memory can't be deleted since the raw memory chunk is not returned.
+ static PaddedEnd<T>* create_unfreeable(uint length);
+};
+
+#endif // SHARE_VM_MEMORY_PADDED_HPP
diff --git a/src/share/vm/memory/padded.inline.hpp b/src/share/vm/memory/padded.inline.hpp
new file mode 100644
index 000000000..1e9994ab6
--- /dev/null
+++ b/src/share/vm/memory/padded.inline.hpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "memory/allocation.inline.hpp"
+#include "memory/padded.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// Creates an aligned padded array.
+// The memory can't be deleted since the raw memory chunk is not returned.
+template <class T, MEMFLAGS flags, size_t alignment>
+PaddedEnd<T>* PaddedArray<T, flags, alignment>::create_unfreeable(uint length) {
+ // Check that the PaddedEnd class works as intended.
+ STATIC_ASSERT(is_size_aligned_(sizeof(PaddedEnd<T>), alignment));
+
+ // Allocate a chunk of memory large enough to allow for some alignment.
+ void* chunk = AllocateHeap(length * sizeof(PaddedEnd<T, alignment>) + alignment, flags);
+
+ // Make the initial alignment.
+ PaddedEnd<T>* aligned_padded_array = (PaddedEnd<T>*)align_pointer_up(chunk, alignment);
+
+ // Call the default constructor for each element.
+ for (uint i = 0; i < length; i++) {
+ ::new (&aligned_padded_array[i]) T();
+ }
+
+ return aligned_padded_array;
+}
diff --git a/src/share/vm/memory/referenceProcessor.cpp b/src/share/vm/memory/referenceProcessor.cpp
index 765e5085c..1333c082c 100644
--- a/src/share/vm/memory/referenceProcessor.cpp
+++ b/src/share/vm/memory/referenceProcessor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/systemDictionary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/referencePolicy.hpp"
@@ -180,11 +182,20 @@ void ReferenceProcessor::update_soft_ref_master_clock() {
// past clock value.
}
-void ReferenceProcessor::process_discovered_references(
+size_t ReferenceProcessor::total_count(DiscoveredList lists[]) {
+ size_t total = 0;
+ for (uint i = 0; i < _max_num_q; ++i) {
+ total += lists[i].length();
+ }
+ return total;
+}
+
+ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc,
- AbstractRefProcTaskExecutor* task_executor) {
+ AbstractRefProcTaskExecutor* task_executor,
+ GCTimer* gc_timer) {
NOT_PRODUCT(verify_ok_to_handle_reflists());
assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
@@ -202,34 +213,43 @@ void ReferenceProcessor::process_discovered_references(
_soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
bool trace_time = PrintGCDetails && PrintReferenceGC;
+
// Soft references
+ size_t soft_count = 0;
{
- TraceTime tt("SoftReference", trace_time, false, gclog_or_tty);
- process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
- is_alive, keep_alive, complete_gc, task_executor);
+ GCTraceTime tt("SoftReference", trace_time, false, gc_timer);
+ soft_count =
+ process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
+ is_alive, keep_alive, complete_gc, task_executor);
}
update_soft_ref_master_clock();
// Weak references
+ size_t weak_count = 0;
{
- TraceTime tt("WeakReference", trace_time, false, gclog_or_tty);
- process_discovered_reflist(_discoveredWeakRefs, NULL, true,
- is_alive, keep_alive, complete_gc, task_executor);
+ GCTraceTime tt("WeakReference", trace_time, false, gc_timer);
+ weak_count =
+ process_discovered_reflist(_discoveredWeakRefs, NULL, true,
+ is_alive, keep_alive, complete_gc, task_executor);
}
// Final references
+ size_t final_count = 0;
{
- TraceTime tt("FinalReference", trace_time, false, gclog_or_tty);
- process_discovered_reflist(_discoveredFinalRefs, NULL, false,
- is_alive, keep_alive, complete_gc, task_executor);
+ GCTraceTime tt("FinalReference", trace_time, false, gc_timer);
+ final_count =
+ process_discovered_reflist(_discoveredFinalRefs, NULL, false,
+ is_alive, keep_alive, complete_gc, task_executor);
}
// Phantom references
+ size_t phantom_count = 0;
{
- TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty);
- process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
- is_alive, keep_alive, complete_gc, task_executor);
+ GCTraceTime tt("PhantomReference", trace_time, false, gc_timer);
+ phantom_count =
+ process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
+ is_alive, keep_alive, complete_gc, task_executor);
}
// Weak global JNI references. It would make more sense (semantically) to
@@ -238,12 +258,14 @@ void ReferenceProcessor::process_discovered_references(
// thus use JNI weak references to circumvent the phantom references and
// resurrect a "post-mortem" object.
{
- TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty);
+ GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer);
if (task_executor != NULL) {
task_executor->set_single_threaded_mode();
}
process_phaseJNI(is_alive, keep_alive, complete_gc);
}
+
+ return ReferenceProcessorStats(soft_count, weak_count, final_count, phantom_count);
}
#ifndef PRODUCT
@@ -252,7 +274,6 @@ uint ReferenceProcessor::count_jni_refs() {
class AlwaysAliveClosure: public BoolObjectClosure {
public:
virtual bool do_object_b(oop obj) { return true; }
- virtual void do_object(oop obj) { assert(false, "Don't call"); }
};
class CountHandleClosure: public OopClosure {
@@ -879,7 +900,7 @@ void ReferenceProcessor::balance_all_queues() {
balance_queues(_discoveredPhantomRefs);
}
-void
+size_t
ReferenceProcessor::process_discovered_reflist(
DiscoveredList refs_lists[],
ReferencePolicy* policy,
@@ -902,12 +923,11 @@ ReferenceProcessor::process_discovered_reflist(
must_balance) {
balance_queues(refs_lists);
}
+
+ size_t total_list_count = total_count(refs_lists);
+
if (PrintReferenceGC && PrintGCDetails) {
- size_t total = 0;
- for (uint i = 0; i < _max_num_q; ++i) {
- total += refs_lists[i].length();
- }
- gclog_or_tty->print(", %u refs", total);
+ gclog_or_tty->print(", %u refs", total_list_count);
}
// Phase 1 (soft refs only):
@@ -952,6 +972,8 @@ ReferenceProcessor::process_discovered_reflist(
is_alive, keep_alive, complete_gc);
}
}
+
+ return total_list_count;
}
void ReferenceProcessor::clean_up_discovered_references() {
@@ -1267,14 +1289,15 @@ void ReferenceProcessor::preclean_discovered_references(
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc,
- YieldClosure* yield) {
+ YieldClosure* yield,
+ GCTimer* gc_timer) {
NOT_PRODUCT(verify_ok_to_handle_reflists());
// Soft references
{
- TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
- false, gclog_or_tty);
+ GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
+ false, gc_timer);
for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) {
return;
@@ -1286,8 +1309,8 @@ void ReferenceProcessor::preclean_discovered_references(
// Weak references
{
- TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
- false, gclog_or_tty);
+ GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
+ false, gc_timer);
for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) {
return;
@@ -1299,8 +1322,8 @@ void ReferenceProcessor::preclean_discovered_references(
// Final references
{
- TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
- false, gclog_or_tty);
+ GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
+ false, gc_timer);
for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) {
return;
@@ -1312,8 +1335,8 @@ void ReferenceProcessor::preclean_discovered_references(
// Phantom references
{
- TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
- false, gclog_or_tty);
+ GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
+ false, gc_timer);
for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) {
return;
diff --git a/src/share/vm/memory/referenceProcessor.hpp b/src/share/vm/memory/referenceProcessor.hpp
index 1050863f4..252cc6d62 100644
--- a/src/share/vm/memory/referenceProcessor.hpp
+++ b/src/share/vm/memory/referenceProcessor.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,8 +26,12 @@
#define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
#include "memory/referencePolicy.hpp"
+#include "memory/referenceProcessorStats.hpp"
+#include "memory/referenceType.hpp"
#include "oops/instanceRefKlass.hpp"
+class GCTimer;
+
// ReferenceProcessor class encapsulates the per-"collector" processing
// of java.lang.Reference objects for GC. The interface is useful for supporting
// a generational abstraction, in particular when there are multiple
@@ -204,6 +208,10 @@ public:
};
class ReferenceProcessor : public CHeapObj<mtGC> {
+
+ private:
+ size_t total_count(DiscoveredList lists[]);
+
protected:
// Compatibility with pre-4965777 JDK's
static bool _pending_list_uses_discovered_field;
@@ -282,13 +290,13 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
}
// Process references with a certain reachability level.
- void process_discovered_reflist(DiscoveredList refs_lists[],
- ReferencePolicy* policy,
- bool clear_referent,
- BoolObjectClosure* is_alive,
- OopClosure* keep_alive,
- VoidClosure* complete_gc,
- AbstractRefProcTaskExecutor* task_executor);
+ size_t process_discovered_reflist(DiscoveredList refs_lists[],
+ ReferencePolicy* policy,
+ bool clear_referent,
+ BoolObjectClosure* is_alive,
+ OopClosure* keep_alive,
+ VoidClosure* complete_gc,
+ AbstractRefProcTaskExecutor* task_executor);
void process_phaseJNI(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
@@ -349,7 +357,8 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
void preclean_discovered_references(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc,
- YieldClosure* yield);
+ YieldClosure* yield,
+ GCTimer* gc_timer);
// Delete entries in the discovered lists that have
// either a null referent or are not active. Such
@@ -500,12 +509,13 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
bool discover_reference(oop obj, ReferenceType rt);
// Process references found during GC (called by the garbage collector)
- void process_discovered_references(BoolObjectClosure* is_alive,
- OopClosure* keep_alive,
- VoidClosure* complete_gc,
- AbstractRefProcTaskExecutor* task_executor);
+ ReferenceProcessorStats
+ process_discovered_references(BoolObjectClosure* is_alive,
+ OopClosure* keep_alive,
+ VoidClosure* complete_gc,
+ AbstractRefProcTaskExecutor* task_executor,
+ GCTimer *gc_timer);
- public:
// Enqueue references at end of GC (called by the garbage collector)
bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
diff --git a/src/share/vm/memory/referenceProcessorStats.hpp b/src/share/vm/memory/referenceProcessorStats.hpp
new file mode 100644
index 000000000..7497c09b9
--- /dev/null
+++ b/src/share/vm/memory/referenceProcessorStats.hpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_REFERENCEPROCESSORSTATS_HPP
+#define SHARE_VM_MEMORY_REFERENCEPROCESSORSTATS_HPP
+
+#include "utilities/globalDefinitions.hpp"
+
+class ReferenceProcessor;
+
+// ReferenceProcessorStats contains statistics about how many references that
+// have been traversed when processing references during garbage collection.
+class ReferenceProcessorStats {
+ size_t _soft_count;
+ size_t _weak_count;
+ size_t _final_count;
+ size_t _phantom_count;
+
+ public:
+ ReferenceProcessorStats() :
+ _soft_count(0),
+ _weak_count(0),
+ _final_count(0),
+ _phantom_count(0) {}
+
+ ReferenceProcessorStats(size_t soft_count,
+ size_t weak_count,
+ size_t final_count,
+ size_t phantom_count) :
+ _soft_count(soft_count),
+ _weak_count(weak_count),
+ _final_count(final_count),
+ _phantom_count(phantom_count)
+ {}
+
+ size_t soft_count() const {
+ return _soft_count;
+ }
+
+ size_t weak_count() const {
+ return _weak_count;
+ }
+
+ size_t final_count() const {
+ return _final_count;
+ }
+
+ size_t phantom_count() const {
+ return _phantom_count;
+ }
+};
+#endif
diff --git a/src/share/vm/memory/referenceType.hpp b/src/share/vm/memory/referenceType.hpp
new file mode 100644
index 000000000..9496e41ed
--- /dev/null
+++ b/src/share/vm/memory/referenceType.hpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_REFRERENCETYPE_HPP
+#define SHARE_VM_MEMORY_REFRERENCETYPE_HPP
+
+#include "utilities/debug.hpp"
+
+// ReferenceType is used to distinguish between java/lang/ref/Reference subclasses
+
+enum ReferenceType {
+ REF_NONE, // Regular class
+ REF_OTHER, // Subclass of java/lang/ref/Reference, but not subclass of one of the classes below
+ REF_SOFT, // Subclass of java/lang/ref/SoftReference
+ REF_WEAK, // Subclass of java/lang/ref/WeakReference
+ REF_FINAL, // Subclass of java/lang/ref/FinalReference
+ REF_PHANTOM // Subclass of java/lang/ref/PhantomReference
+};
+
+#endif // SHARE_VM_MEMORY_REFRERENCETYPE_HPP
diff --git a/src/share/vm/memory/resourceArea.hpp b/src/share/vm/memory/resourceArea.hpp
index 0699334a5..1357081fd 100644
--- a/src/share/vm/memory/resourceArea.hpp
+++ b/src/share/vm/memory/resourceArea.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -83,6 +83,10 @@ protected:
Chunk *_chunk; // saved arena chunk
char *_hwm, *_max;
size_t _size_in_bytes;
+#ifdef ASSERT
+ Thread* _thread;
+ ResourceMark* _previous_resource_mark;
+#endif //ASSERT
void initialize(Thread *thread) {
_area = thread->resource_area();
@@ -92,6 +96,11 @@ protected:
_size_in_bytes = _area->size_in_bytes();
debug_only(_area->_nesting++;)
assert( _area->_nesting > 0, "must stack allocate RMs" );
+#ifdef ASSERT
+ _thread = thread;
+ _previous_resource_mark = thread->current_resource_mark();
+ thread->set_current_resource_mark(this);
+#endif // ASSERT
}
public:
@@ -111,6 +120,17 @@ protected:
_size_in_bytes = r->_size_in_bytes;
debug_only(_area->_nesting++;)
assert( _area->_nesting > 0, "must stack allocate RMs" );
+#ifdef ASSERT
+ Thread* thread = ThreadLocalStorage::thread();
+ if (thread != NULL) {
+ _thread = thread;
+ _previous_resource_mark = thread->current_resource_mark();
+ thread->set_current_resource_mark(this);
+ } else {
+ _thread = NULL;
+ _previous_resource_mark = NULL;
+ }
+#endif // ASSERT
}
void reset_to_mark() {
@@ -137,6 +157,11 @@ protected:
assert( _area->_nesting > 0, "must stack allocate RMs" );
debug_only(_area->_nesting--;)
reset_to_mark();
+#ifdef ASSERT
+ if (_thread != NULL) {
+ _thread->set_current_resource_mark(_previous_resource_mark);
+ }
+#endif // ASSERT
}
diff --git a/src/share/vm/memory/sharedHeap.cpp b/src/share/vm/memory/sharedHeap.cpp
index cd577d4b5..79455db9b 100644
--- a/src/share/vm/memory/sharedHeap.cpp
+++ b/src/share/vm/memory/sharedHeap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,8 +45,8 @@ enum SH_process_strong_roots_tasks {
SH_PS_FlatProfiler_oops_do,
SH_PS_Management_oops_do,
SH_PS_SystemDictionary_oops_do,
+ SH_PS_ClassLoaderDataGraph_oops_do,
SH_PS_jvmti_oops_do,
- SH_PS_StringTable_oops_do,
SH_PS_CodeCache_oops_do,
// Leave this one last.
SH_PS_NumElements
@@ -65,7 +65,8 @@ SharedHeap::SharedHeap(CollectorPolicy* policy_) :
}
_sh = this; // ch is static, should be set only once.
if ((UseParNewGC ||
- (UseConcMarkSweepGC && CMSParallelRemarkEnabled) ||
+ (UseConcMarkSweepGC && (CMSParallelInitialMarkEnabled ||
+ CMSParallelRemarkEnabled)) ||
UseG1GC) &&
ParallelGCThreads > 0) {
_workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
@@ -126,6 +127,8 @@ SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
{
if (_active) {
outer->change_strong_roots_parity();
+ // Zero the claimed high water mark in the StringTable
+ StringTable::clear_parallel_claimed_index();
}
}
@@ -153,14 +156,16 @@ void SharedHeap::process_strong_roots(bool activate_scope,
// Global (strong) JNI handles
if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
JNIHandles::oops_do(roots);
+
// All threads execute this; the individual threads are task groups.
CLDToOopClosure roots_from_clds(roots);
CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds);
- if (ParallelGCThreads > 0) {
- Threads::possibly_parallel_oops_do(roots, roots_from_clds_p ,code_roots);
+ if (CollectedHeap::use_parallel_gc_threads()) {
+ Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots);
} else {
Threads::oops_do(roots, roots_from_clds_p, code_roots);
}
+
if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
ObjectSynchronizer::oops_do(roots);
if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
@@ -173,17 +178,27 @@ void SharedHeap::process_strong_roots(bool activate_scope,
if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
if (so & SO_AllClasses) {
SystemDictionary::oops_do(roots);
- ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging);
} else if (so & SO_SystemClasses) {
SystemDictionary::always_strong_oops_do(roots);
- ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging);
} else {
fatal("We should always have selected either SO_AllClasses or SO_SystemClasses");
}
}
- if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
- if (so & SO_Strings) {
+ if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
+ if (so & SO_AllClasses) {
+ ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging);
+ } else if (so & SO_SystemClasses) {
+ ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging);
+ }
+ }
+
+ // All threads execute the following. A specific chunk of buckets
+ // from the StringTable are the individual tasks.
+ if (so & SO_Strings) {
+ if (CollectedHeap::use_parallel_gc_threads()) {
+ StringTable::possibly_parallel_oops_do(roots);
+ } else {
StringTable::oops_do(roots);
}
}
@@ -212,7 +227,6 @@ void SharedHeap::process_strong_roots(bool activate_scope,
class AlwaysTrueClosure: public BoolObjectClosure {
public:
- void do_object(oop p) { ShouldNotReachHere(); }
bool do_object_b(oop p) { return true; }
};
static AlwaysTrueClosure always_true;
diff --git a/src/share/vm/memory/sharedHeap.hpp b/src/share/vm/memory/sharedHeap.hpp
index b13bf15b8..cd810c036 100644
--- a/src/share/vm/memory/sharedHeap.hpp
+++ b/src/share/vm/memory/sharedHeap.hpp
@@ -166,11 +166,6 @@ public:
// Same as above, restricted to a memory region.
virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0;
- // Iterate over all objects allocated since the last collection, calling
- // "cl->do_object" on each. The heap must have been initialized properly
- // to support this function, or else this call will fail.
- virtual void object_iterate_since_last_GC(ObjectClosure* cl) = 0;
-
// Iterate over all spaces in use in the heap, in an undefined order.
virtual void space_iterate(SpaceClosure* cl) = 0;
diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp
index a434b0a33..eb1e209a8 100644
--- a/src/share/vm/memory/space.hpp
+++ b/src/share/vm/memory/space.hpp
@@ -537,9 +537,8 @@ protected:
* Occasionally, we want to ensure a full compaction, which is determined \
* by the MarkSweepAlwaysCompactCount parameter. \
*/ \
- int invocations = MarkSweep::total_invocations(); \
- bool skip_dead = (MarkSweepAlwaysCompactCount < 1) \
- ||((invocations % MarkSweepAlwaysCompactCount) != 0); \
+ uint invocations = MarkSweep::total_invocations(); \
+ bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \
\
size_t allowed_deadspace = 0; \
if (skip_dead) { \
diff --git a/src/share/vm/memory/universe.cpp b/src/share/vm/memory/universe.cpp
index a0849df99..1f632ae47 100644
--- a/src/share/vm/memory/universe.cpp
+++ b/src/share/vm/memory/universe.cpp
@@ -52,7 +52,6 @@
#include "oops/oop.inline.hpp"
#include "oops/typeArrayKlass.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
-#include "runtime/aprofiler.hpp"
#include "runtime/arguments.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/fprofiler.hpp"
@@ -106,11 +105,12 @@ objArrayOop Universe::_the_empty_class_klass_array = NULL;
Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
oop Universe::_the_null_string = NULL;
oop Universe::_the_min_jint_string = NULL;
-LatestMethodOopCache* Universe::_finalizer_register_cache = NULL;
-LatestMethodOopCache* Universe::_loader_addClass_cache = NULL;
-ActiveMethodOopsCache* Universe::_reflect_invoke_cache = NULL;
+LatestMethodCache* Universe::_finalizer_register_cache = NULL;
+LatestMethodCache* Universe::_loader_addClass_cache = NULL;
+LatestMethodCache* Universe::_pd_implies_cache = NULL;
oop Universe::_out_of_memory_error_java_heap = NULL;
-oop Universe::_out_of_memory_error_perm_gen = NULL;
+oop Universe::_out_of_memory_error_metaspace = NULL;
+oop Universe::_out_of_memory_error_class_metaspace = NULL;
oop Universe::_out_of_memory_error_array_size = NULL;
oop Universe::_out_of_memory_error_gc_overhead_limit = NULL;
objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
@@ -145,8 +145,6 @@ NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
address Universe::_narrow_ptrs_base;
-size_t Universe::_class_metaspace_size;
-
void Universe::basic_type_classes_do(void f(Klass*)) {
f(boolArrayKlassObj());
f(byteArrayKlassObj());
@@ -179,7 +177,8 @@ void Universe::oops_do(OopClosure* f, bool do_all) {
f->do_oop((oop*)&_the_null_string);
f->do_oop((oop*)&_the_min_jint_string);
f->do_oop((oop*)&_out_of_memory_error_java_heap);
- f->do_oop((oop*)&_out_of_memory_error_perm_gen);
+ f->do_oop((oop*)&_out_of_memory_error_metaspace);
+ f->do_oop((oop*)&_out_of_memory_error_class_metaspace);
f->do_oop((oop*)&_out_of_memory_error_array_size);
f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
@@ -223,16 +222,13 @@ void Universe::serialize(SerializeClosure* f, bool do_all) {
f->do_ptr((void**)&_the_empty_klass_array);
_finalizer_register_cache->serialize(f);
_loader_addClass_cache->serialize(f);
- _reflect_invoke_cache->serialize(f);
+ _pd_implies_cache->serialize(f);
}
void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
if (size < alignment || size % alignment != 0) {
- ResourceMark rm;
- stringStream st;
- st.print("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment);
- char* error = st.as_string();
- vm_exit_during_initialization(error);
+ vm_exit_during_initialization(
+ err_msg("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment));
}
}
@@ -532,7 +528,9 @@ void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) {
if (vt) vt->initialize_vtable(false, CHECK);
if (ko->oop_is_instance()) {
InstanceKlass* ik = (InstanceKlass*)ko;
- for (KlassHandle s_h(THREAD, ik->subklass()); s_h() != NULL; s_h = (THREAD, s_h()->next_sibling())) {
+ for (KlassHandle s_h(THREAD, ik->subklass());
+ s_h() != NULL;
+ s_h = KlassHandle(THREAD, s_h()->next_sibling())) {
reinitialize_vtable_of(s_h, CHECK);
}
}
@@ -562,7 +560,8 @@ bool Universe::should_fill_in_stack_trace(Handle throwable) {
// a potential loop which could happen if an out of memory occurs when attempting
// to allocate the backtrace.
return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
- (throwable() != Universe::_out_of_memory_error_perm_gen) &&
+ (throwable() != Universe::_out_of_memory_error_metaspace) &&
+ (throwable() != Universe::_out_of_memory_error_class_metaspace) &&
(throwable() != Universe::_out_of_memory_error_array_size) &&
(throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
}
@@ -603,7 +602,7 @@ oop Universe::gen_out_of_memory_error(oop default_err) {
}
}
-static intptr_t non_oop_bits = 0;
+intptr_t Universe::_non_oop_bits = 0;
void* Universe::non_oop_word() {
// Neither the high bits nor the low bits of this value is allowed
@@ -617,11 +616,11 @@ void* Universe::non_oop_word() {
// Using the OS-supplied non-memory-address word (usually 0 or -1)
// will take care of the high bits, however many there are.
- if (non_oop_bits == 0) {
- non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
+ if (_non_oop_bits == 0) {
+ _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
}
- return (void*)non_oop_bits;
+ return (void*)_non_oop_bits;
}
jint universe_init() {
@@ -640,15 +639,17 @@ jint universe_init() {
return status;
}
+ Metaspace::global_initialize();
+
// Create memory for metadata. Must be after initializing heap for
// DumpSharedSpaces.
ClassLoaderData::init_null_class_loader_data();
// We have a heap so create the Method* caches before
// Metaspace::initialize_shared_spaces() tries to populate them.
- Universe::_finalizer_register_cache = new LatestMethodOopCache();
- Universe::_loader_addClass_cache = new LatestMethodOopCache();
- Universe::_reflect_invoke_cache = new ActiveMethodOopsCache();
+ Universe::_finalizer_register_cache = new LatestMethodCache();
+ Universe::_loader_addClass_cache = new LatestMethodCache();
+ Universe::_pd_implies_cache = new LatestMethodCache();
if (UseSharedSpaces) {
// Read the data structures supporting the shared spaces (shared
@@ -680,25 +681,27 @@ static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
// 32Gb
// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
-char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
+char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
+ assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
+ assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be");
+ assert(is_size_aligned(heap_size, alignment), "Must be");
+
+ uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
+
size_t base = 0;
#ifdef _LP64
if (UseCompressedOops) {
assert(mode == UnscaledNarrowOop ||
mode == ZeroBasedNarrowOop ||
mode == HeapBasedNarrowOop, "mode is invalid");
- const size_t total_size = heap_size + HeapBaseMinAddress;
+ const size_t total_size = heap_size + heap_base_min_address_aligned;
// Return specified base for the first request.
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
- base = HeapBaseMinAddress;
-
- // If the total size and the metaspace size are small enough to allow
- // UnscaledNarrowOop then just use UnscaledNarrowOop.
- } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop) &&
- (!UseCompressedKlassPointers ||
- (((OopEncodingHeapMax - heap_size) + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax))) {
- // We don't need to check the metaspace size here because it is always smaller
- // than total_size.
+ base = heap_base_min_address_aligned;
+
+ // If the total size is small enough to allow UnscaledNarrowOop then
+ // just use UnscaledNarrowOop.
+ } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
(Universe::narrow_oop_shift() == 0)) {
// Use 32-bits oops without encoding and
@@ -715,13 +718,6 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
base = (OopEncodingHeapMax - heap_size);
}
}
-
- // See if ZeroBaseNarrowOop encoding will work for a heap based at
- // (KlassEncodingMetaspaceMax - class_metaspace_size()).
- } else if (UseCompressedKlassPointers && (mode != HeapBasedNarrowOop) &&
- (Universe::class_metaspace_size() + HeapBaseMinAddress <= KlassEncodingMetaspaceMax) &&
- (KlassEncodingMetaspaceMax + heap_size - Universe::class_metaspace_size() <= OopEncodingHeapMax)) {
- base = (KlassEncodingMetaspaceMax - Universe::class_metaspace_size());
} else {
// UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
// HeapBasedNarrowOop encoding was requested. So, can't reserve below 32Gb.
@@ -731,8 +727,7 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
// Set narrow_oop_base and narrow_oop_use_implicit_null_checks
// used in ReservedHeapSpace() constructors.
// The final values will be set in initialize_heap() below.
- if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax) &&
- (!UseCompressedKlassPointers || (base + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax)) {
+ if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) {
// Use zero based compressed oops
Universe::set_narrow_oop_base(NULL);
// Don't need guard page for implicit checks in indexed
@@ -753,6 +748,8 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
}
}
#endif
+
+ assert(is_ptr_aligned((char*)base, alignment), "Must be");
return (char*)base; // also return NULL (don't care) for 32-bit VM
}
@@ -815,19 +812,19 @@ jint Universe::initialize_heap() {
tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
}
- if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) ||
- (UseCompressedKlassPointers &&
- ((uint64_t)Universe::heap()->base() + Universe::class_metaspace_size() > KlassEncodingMetaspaceMax))) {
+ if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {
// Can't reserve heap below 32Gb.
// keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
if (verbose) {
- tty->print(", Compressed Oops with base: "PTR_FORMAT, Universe::narrow_oop_base());
+ tty->print(", %s: "PTR_FORMAT,
+ narrow_oop_mode_to_string(HeapBasedNarrowOop),
+ Universe::narrow_oop_base());
}
} else {
Universe::set_narrow_oop_base(0);
if (verbose) {
- tty->print(", zero based Compressed Oops");
+ tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop));
}
#ifdef _WIN64
if (!Universe::narrow_oop_use_implicit_null_checks()) {
@@ -842,24 +839,20 @@ jint Universe::initialize_heap() {
} else {
Universe::set_narrow_oop_shift(0);
if (verbose) {
- tty->print(", 32-bits Oops");
+ tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop));
}
}
}
+
if (verbose) {
tty->cr();
tty->cr();
}
- if (UseCompressedKlassPointers) {
- Universe::set_narrow_klass_base(Universe::narrow_oop_base());
- Universe::set_narrow_klass_shift(MIN2(Universe::narrow_oop_shift(), LogKlassAlignmentInBytes));
- }
Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
}
- // Universe::narrow_oop_base() is one page below the metaspace
- // base. The actual metaspace base depends on alignment constraints
- // so we don't know its exact location here.
- assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size() - ClassMetaspaceSize) ||
+ // Universe::narrow_oop_base() is one page below the heap.
+ assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
+ os::vm_page_size()) ||
Universe::narrow_oop_base() == NULL, "invalid value");
assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
Universe::narrow_oop_shift() == 0, "invalid value");
@@ -879,35 +872,39 @@ jint Universe::initialize_heap() {
// Reserve the Java heap, which is now the same for all GCs.
ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
- // Add in the class metaspace area so the classes in the headers can
- // be compressed the same as instances.
- // Need to round class space size up because it's below the heap and
- // the actual alignment depends on its size.
- Universe::set_class_metaspace_size(align_size_up(ClassMetaspaceSize, alignment));
- size_t total_reserved = align_size_up(heap_size + Universe::class_metaspace_size(), alignment);
+ assert(alignment <= Arguments::conservative_max_heap_alignment(),
+ err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
+ alignment, Arguments::conservative_max_heap_alignment()));
+ size_t total_reserved = align_size_up(heap_size, alignment);
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
"heap size is too big for compressed oops");
- char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
- ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
+ bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
+ assert(!UseLargePages
+ || UseParallelGC
+ || use_large_pages, "Wrong alignment to use large pages");
+
+ char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
+
+ ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
if (UseCompressedOops) {
if (addr != NULL && !total_rs.is_reserved()) {
// Failed to reserve at specified address - the requested memory
// region is taken already, for example, by 'java' launcher.
// Try again to reserver heap higher.
- addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
+ addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
ReservedHeapSpace total_rs0(total_reserved, alignment,
- UseLargePages, addr);
+ use_large_pages, addr);
if (addr != NULL && !total_rs0.is_reserved()) {
// Failed to reserve at specified address again - give up.
- addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
+ addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
assert(addr == NULL, "");
ReservedHeapSpace total_rs1(total_reserved, alignment,
- UseLargePages, addr);
+ use_large_pages, addr);
total_rs = total_rs1;
} else {
total_rs = total_rs0;
@@ -916,32 +913,21 @@ ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
}
if (!total_rs.is_reserved()) {
- vm_exit_during_initialization(err_msg("Could not reserve enough space for object heap %d bytes", total_reserved));
+ vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K));
return total_rs;
}
- // Split the reserved space into main Java heap and a space for
- // classes so that they can be compressed using the same algorithm
- // as compressed oops. If compress oops and compress klass ptrs are
- // used we need the meta space first: if the alignment used for
- // compressed oops is greater than the one used for compressed klass
- // ptrs, a metadata space on top of the heap could become
- // unreachable.
- ReservedSpace class_rs = total_rs.first_part(Universe::class_metaspace_size());
- ReservedSpace heap_rs = total_rs.last_part(Universe::class_metaspace_size(), alignment);
- Metaspace::initialize_class_space(class_rs);
-
if (UseCompressedOops) {
// Universe::initialize_heap() will reset this to NULL if unscaled
// or zero-based narrow oops are actually used.
address base = (address)(total_rs.base() - os::vm_page_size());
Universe::set_narrow_oop_base(base);
}
- return heap_rs;
+ return total_rs;
}
-// It's the caller's repsonsibility to ensure glitch-freedom
+// It's the caller's responsibility to ensure glitch-freedom
// (if required).
void Universe::update_heap_info_at_gc() {
_heap_capacity_at_last_gc = heap()->capacity();
@@ -949,6 +935,33 @@ void Universe::update_heap_info_at_gc() {
}
+const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
+ switch (mode) {
+ case UnscaledNarrowOop:
+ return "32-bits Oops";
+ case ZeroBasedNarrowOop:
+ return "zero based Compressed Oops";
+ case HeapBasedNarrowOop:
+ return "Compressed Oops with base";
+ }
+
+ ShouldNotReachHere();
+ return "";
+}
+
+
+Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() {
+ if (narrow_oop_base() != 0) {
+ return HeapBasedNarrowOop;
+ }
+
+ if (narrow_oop_shift() != 0) {
+ return ZeroBasedNarrowOop;
+ }
+
+ return UnscaledNarrowOop;
+}
+
void universe2_init() {
EXCEPTION_MARK;
@@ -983,7 +996,8 @@ bool universe_post_init() {
k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false);
k_h = instanceKlassHandle(THREAD, k);
Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false);
- Universe::_out_of_memory_error_perm_gen = k_h->allocate_instance(CHECK_false);
+ Universe::_out_of_memory_error_metaspace = k_h->allocate_instance(CHECK_false);
+ Universe::_out_of_memory_error_class_metaspace = k_h->allocate_instance(CHECK_false);
Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
Universe::_out_of_memory_error_gc_overhead_limit =
k_h->allocate_instance(CHECK_false);
@@ -1016,7 +1030,9 @@ bool universe_post_init() {
java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg());
msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
- java_lang_Throwable::set_message(Universe::_out_of_memory_error_perm_gen, msg());
+ java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
+ msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
+ java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg());
@@ -1052,35 +1068,38 @@ bool universe_post_init() {
vmSymbols::register_method_name(),
vmSymbols::register_method_signature());
if (m == NULL || !m->is_static()) {
- THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
- "java.lang.ref.Finalizer.register", false);
+ tty->print_cr("Unable to link/verify Finalizer.register method");
+ return false; // initialization failed (cannot throw exception yet)
}
Universe::_finalizer_register_cache->init(
- SystemDictionary::Finalizer_klass(), m, CHECK_false);
-
- // Resolve on first use and initialize class.
- // Note: No race-condition here, since a resolve will always return the same result
-
- // Setup method for security checks
- k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_reflect_Method(), true, CHECK_false);
- k_h = instanceKlassHandle(THREAD, k);
- k_h->link_class(CHECK_false);
- m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_object_array_object_signature());
- if (m == NULL || m->is_static()) {
- THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
- "java.lang.reflect.Method.invoke", false);
- }
- Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false);
+ SystemDictionary::Finalizer_klass(), m);
// Setup method for registering loaded classes in class loader vector
InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
if (m == NULL || m->is_static()) {
- THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
- "java.lang.ClassLoader.addClass", false);
+ tty->print_cr("Unable to link/verify ClassLoader.addClass method");
+ return false; // initialization failed (cannot throw exception yet)
}
Universe::_loader_addClass_cache->init(
- SystemDictionary::ClassLoader_klass(), m, CHECK_false);
+ SystemDictionary::ClassLoader_klass(), m);
+
+ // Setup method for checking protection domain
+ InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false);
+ m = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->
+ find_method(vmSymbols::impliesCreateAccessControlContext_name(),
+ vmSymbols::void_boolean_signature());
+ // Allow NULL which should only happen with bootstrapping.
+ if (m != NULL) {
+ if (m->is_static()) {
+ // NoSuchMethodException doesn't actually work because it tries to run the
+ // <init> function before java_lang_Class is linked. Print error and exit.
+ tty->print_cr("ProtectionDomain.impliesCreateAccessControlContext() has the wrong linkage");
+ return false; // initialization failed
+ }
+ Universe::_pd_implies_cache->init(
+ SystemDictionary::ProtectionDomain_klass(), m);;
+ }
// The folowing is initializing converter functions for serialization in
// JVM.cpp. If we clean up the StrictMath code above we may want to find
@@ -1099,6 +1118,9 @@ bool universe_post_init() {
// Initialize performance counters for metaspaces
MetaspaceCounters::initialize_performance_counters();
+ CompressedClassSpaceCounters::initialize_performance_counters();
+
+ MemoryService::add_metaspace_memory_pools();
GC_locker::unlock(); // allow gc after bootstrapping
@@ -1401,7 +1423,7 @@ void Universe::compute_verify_oop_data() {
}
-void CommonMethodOopCache::init(Klass* k, Method* m, TRAPS) {
+void LatestMethodCache::init(Klass* k, Method* m) {
if (!UseSharedSpaces) {
_klass = k;
}
@@ -1417,87 +1439,8 @@ void CommonMethodOopCache::init(Klass* k, Method* m, TRAPS) {
}
-ActiveMethodOopsCache::~ActiveMethodOopsCache() {
- if (_prev_methods != NULL) {
- delete _prev_methods;
- _prev_methods = NULL;
- }
-}
-
-
-void ActiveMethodOopsCache::add_previous_version(Method* const method) {
- assert(Thread::current()->is_VM_thread(),
- "only VMThread can add previous versions");
-
- // Only append the previous method if it is executing on the stack.
- if (method->on_stack()) {
-
- if (_prev_methods == NULL) {
- // This is the first previous version so make some space.
- // Start with 2 elements under the assumption that the class
- // won't be redefined much.
- _prev_methods = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Method*>(2, true);
- }
-
- // RC_TRACE macro has an embedded ResourceMark
- RC_TRACE(0x00000100,
- ("add: %s(%s): adding prev version ref for cached method @%d",
- method->name()->as_C_string(), method->signature()->as_C_string(),
- _prev_methods->length()));
-
- _prev_methods->append(method);
- }
-
-
- // Since the caller is the VMThread and we are at a safepoint, this is a good
- // time to clear out unused method references.
-
- if (_prev_methods == NULL) return;
-
- for (int i = _prev_methods->length() - 1; i >= 0; i--) {
- Method* method = _prev_methods->at(i);
- assert(method != NULL, "weak method ref was unexpectedly cleared");
-
- if (!method->on_stack()) {
- // This method isn't running anymore so remove it
- _prev_methods->remove_at(i);
- MetadataFactory::free_metadata(method->method_holder()->class_loader_data(), method);
- } else {
- // RC_TRACE macro has an embedded ResourceMark
- RC_TRACE(0x00000400, ("add: %s(%s): previous cached method @%d is alive",
- method->name()->as_C_string(), method->signature()->as_C_string(), i));
- }
- }
-} // end add_previous_version()
-
-
-bool ActiveMethodOopsCache::is_same_method(Method* const method) const {
- InstanceKlass* ik = InstanceKlass::cast(klass());
- Method* check_method = ik->method_with_idnum(method_idnum());
- assert(check_method != NULL, "sanity check");
- if (check_method == method) {
- // done with the easy case
- return true;
- }
-
- if (_prev_methods != NULL) {
- // The cached method has been redefined at least once so search
- // the previous versions for a match.
- for (int i = 0; i < _prev_methods->length(); i++) {
- check_method = _prev_methods->at(i);
- if (check_method == method) {
- // a previous version matches
- return true;
- }
- }
- }
-
- // either no previous versions or no previous version matched
- return false;
-}
-
-
-Method* LatestMethodOopCache::get_Method() {
+Method* LatestMethodCache::get_method() {
+ if (klass() == NULL) return NULL;
InstanceKlass* ik = InstanceKlass::cast(klass());
Method* m = ik->method_with_idnum(method_idnum());
assert(m != NULL, "sanity check");
diff --git a/src/share/vm/memory/universe.hpp b/src/share/vm/memory/universe.hpp
index 48d32f71e..a891d2a11 100644
--- a/src/share/vm/memory/universe.hpp
+++ b/src/share/vm/memory/universe.hpp
@@ -41,10 +41,11 @@ class CollectedHeap;
class DeferredObjAllocEvent;
-// Common parts of a Method* cache. This cache safely interacts with
-// the RedefineClasses API.
-//
-class CommonMethodOopCache : public CHeapObj<mtClass> {
+// A helper class for caching a Method* when the user of the cache
+// only cares about the latest version of the Method*. This cache safely
+// interacts with the RedefineClasses API.
+
+class LatestMethodCache : public CHeapObj<mtClass> {
// We save the Klass* and the idnum of Method* in order to get
// the current cached Method*.
private:
@@ -52,12 +53,14 @@ class CommonMethodOopCache : public CHeapObj<mtClass> {
int _method_idnum;
public:
- CommonMethodOopCache() { _klass = NULL; _method_idnum = -1; }
- ~CommonMethodOopCache() { _klass = NULL; _method_idnum = -1; }
+ LatestMethodCache() { _klass = NULL; _method_idnum = -1; }
+ ~LatestMethodCache() { _klass = NULL; _method_idnum = -1; }
+
+ void init(Klass* k, Method* m);
+ Klass* klass() const { return _klass; }
+ int method_idnum() const { return _method_idnum; }
- void init(Klass* k, Method* m, TRAPS);
- Klass* klass() const { return _klass; }
- int method_idnum() const { return _method_idnum; }
+ Method* get_method();
// Enhanced Class Redefinition support
void classes_do(void f(Klass*)) {
@@ -72,43 +75,10 @@ class CommonMethodOopCache : public CHeapObj<mtClass> {
};
-// A helper class for caching a Method* when the user of the cache
-// cares about all versions of the Method*.
-//
-class ActiveMethodOopsCache : public CommonMethodOopCache {
- // This subclass adds weak references to older versions of the
- // Method* and a query method for a Method*.
-
- private:
- // If the cached Method* has not been redefined, then
- // _prev_methods will be NULL. If all of the previous
- // versions of the method have been collected, then
- // _prev_methods can have a length of zero.
- GrowableArray<Method*>* _prev_methods;
-
- public:
- ActiveMethodOopsCache() { _prev_methods = NULL; }
- ~ActiveMethodOopsCache();
-
- void add_previous_version(Method* const method);
- bool is_same_method(Method* const method) const;
-};
-
-
-// A helper class for caching a Method* when the user of the cache
-// only cares about the latest version of the Method*.
-//
-class LatestMethodOopCache : public CommonMethodOopCache {
- // This subclass adds a getter method for the latest Method*.
-
- public:
- Method* get_Method();
-};
-
-// For UseCompressedOops and UseCompressedKlassPointers.
+// For UseCompressedOops.
struct NarrowPtrStruct {
- // Base address for oop/klass-within-java-object materialization.
- // NULL if using wide oops/klasses or zero based narrow oops/klasses.
+ // Base address for oop-within-java-object materialization.
+ // NULL if using wide oops or zero based narrow oops.
address _base;
// Number of shift bits for encoding/decoding narrow ptrs.
// 0 if using wide ptrs or zero based unscaled narrow ptrs,
@@ -136,6 +106,7 @@ class Universe: AllStatic {
friend class SystemDictionary;
friend class VMStructs;
friend class VM_PopulateDumpSharedSpace;
+ friend class Metaspace;
friend jint universe_init();
friend void universe2_init();
@@ -174,13 +145,16 @@ class Universe: AllStatic {
static objArrayOop _the_empty_class_klass_array; // Canonicalized obj array of type java.lang.Class
static oop _the_null_string; // A cache of "null" as a Java string
static oop _the_min_jint_string; // A cache of "-2147483648" as a Java string
- static LatestMethodOopCache* _finalizer_register_cache; // static method for registering finalizable objects
- static LatestMethodOopCache* _loader_addClass_cache; // method for registering loaded classes in class loader vector
- static ActiveMethodOopsCache* _reflect_invoke_cache; // method for security checks
- static oop _out_of_memory_error_java_heap; // preallocated error object (no backtrace)
- static oop _out_of_memory_error_perm_gen; // preallocated error object (no backtrace)
- static oop _out_of_memory_error_array_size;// preallocated error object (no backtrace)
- static oop _out_of_memory_error_gc_overhead_limit; // preallocated error object (no backtrace)
+ static LatestMethodCache* _finalizer_register_cache; // static method for registering finalizable objects
+ static LatestMethodCache* _loader_addClass_cache; // method for registering loaded classes in class loader vector
+ static LatestMethodCache* _pd_implies_cache; // method for checking protection domain attributes
+
+ // preallocated error objects (no backtrace)
+ static oop _out_of_memory_error_java_heap;
+ static oop _out_of_memory_error_metaspace;
+ static oop _out_of_memory_error_class_metaspace;
+ static oop _out_of_memory_error_array_size;
+ static oop _out_of_memory_error_gc_overhead_limit;
static Array<int>* _the_empty_int_array; // Canonicalized int array
static Array<u2>* _the_empty_short_array; // Canonicalized short array
@@ -205,15 +179,14 @@ class Universe: AllStatic {
// The particular choice of collected heap.
static CollectedHeap* _collectedHeap;
+ static intptr_t _non_oop_bits;
+
// For UseCompressedOops.
static struct NarrowPtrStruct _narrow_oop;
- // For UseCompressedKlassPointers.
+ // For UseCompressedClassPointers.
static struct NarrowPtrStruct _narrow_klass;
static address _narrow_ptrs_base;
- // Aligned size of the metaspace.
- static size_t _class_metaspace_size;
-
// array of dummy objects used with +FullGCAlot
debug_only(static objArrayOop _fullgc_alot_dummy_array;)
// index of next entry to clear
@@ -253,40 +226,18 @@ class Universe: AllStatic {
return m;
}
- // Narrow Oop encoding mode:
- // 0 - Use 32-bits oops without encoding when
- // NarrowOopHeapBaseMin + heap_size < 4Gb
- // 1 - Use zero based compressed oops with encoding when
- // NarrowOopHeapBaseMin + heap_size < 32Gb
- // 2 - Use compressed oops with heap base + encoding.
- enum NARROW_OOP_MODE {
- UnscaledNarrowOop = 0,
- ZeroBasedNarrowOop = 1,
- HeapBasedNarrowOop = 2
- };
- static char* preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode);
- static char* preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode);
static void set_narrow_oop_base(address base) {
assert(UseCompressedOops, "no compressed oops?");
_narrow_oop._base = base;
}
static void set_narrow_klass_base(address base) {
- assert(UseCompressedKlassPointers, "no compressed klass ptrs?");
+ assert(UseCompressedClassPointers, "no compressed klass ptrs?");
_narrow_klass._base = base;
}
static void set_narrow_oop_use_implicit_null_checks(bool use) {
assert(UseCompressedOops, "no compressed ptrs?");
_narrow_oop._use_implicit_null_checks = use;
}
- static bool reserve_metaspace_helper(bool with_base = false);
- static ReservedHeapSpace reserve_heap_metaspace(size_t heap_size, size_t alignment, bool& contiguous);
-
- static size_t class_metaspace_size() {
- return _class_metaspace_size;
- }
- static void set_class_metaspace_size(size_t metaspace_size) {
- _class_metaspace_size = metaspace_size;
- }
// Debugging
static int _verify_count; // number of verifies done
@@ -344,9 +295,12 @@ class Universe: AllStatic {
static Array<Klass*>* the_array_interfaces_array() { return _the_array_interfaces_array; }
static oop the_null_string() { return _the_null_string; }
static oop the_min_jint_string() { return _the_min_jint_string; }
- static Method* finalizer_register_method() { return _finalizer_register_cache->get_Method(); }
- static Method* loader_addClass_method() { return _loader_addClass_cache->get_Method(); }
- static ActiveMethodOopsCache* reflect_invoke_cache() { return _reflect_invoke_cache; }
+
+ static Method* finalizer_register_method() { return _finalizer_register_cache->get_method(); }
+ static Method* loader_addClass_method() { return _loader_addClass_cache->get_method(); }
+
+ static Method* protection_domain_implies_method() { return _pd_implies_cache->get_method(); }
+
static oop null_ptr_exception_instance() { return _null_ptr_exception_instance; }
static oop arithmetic_exception_instance() { return _arithmetic_exception_instance; }
static oop virtual_machine_error_instance() { return _virtual_machine_error_instance; }
@@ -361,7 +315,8 @@ class Universe: AllStatic {
// may or may not have a backtrace. If error has a backtrace then the stack trace is already
// filled in.
static oop out_of_memory_error_java_heap() { return gen_out_of_memory_error(_out_of_memory_error_java_heap); }
- static oop out_of_memory_error_perm_gen() { return gen_out_of_memory_error(_out_of_memory_error_perm_gen); }
+ static oop out_of_memory_error_metaspace() { return gen_out_of_memory_error(_out_of_memory_error_metaspace); }
+ static oop out_of_memory_error_class_metaspace() { return gen_out_of_memory_error(_out_of_memory_error_class_metaspace); }
static oop out_of_memory_error_array_size() { return gen_out_of_memory_error(_out_of_memory_error_array_size); }
static oop out_of_memory_error_gc_overhead_limit() { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit); }
@@ -380,12 +335,27 @@ class Universe: AllStatic {
static CollectedHeap* heap() { return _collectedHeap; }
// For UseCompressedOops
+ // Narrow Oop encoding mode:
+ // 0 - Use 32-bits oops without encoding when
+ // NarrowOopHeapBaseMin + heap_size < 4Gb
+ // 1 - Use zero based compressed oops with encoding when
+ // NarrowOopHeapBaseMin + heap_size < 32Gb
+ // 2 - Use compressed oops with heap base + encoding.
+ enum NARROW_OOP_MODE {
+ UnscaledNarrowOop = 0,
+ ZeroBasedNarrowOop = 1,
+ HeapBasedNarrowOop = 2
+ };
+ static NARROW_OOP_MODE narrow_oop_mode();
+ static const char* narrow_oop_mode_to_string(NARROW_OOP_MODE mode);
+ static char* preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode);
+ static char* preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode);
static address narrow_oop_base() { return _narrow_oop._base; }
static bool is_narrow_oop_base(void* addr) { return (narrow_oop_base() == (address)addr); }
static int narrow_oop_shift() { return _narrow_oop._shift; }
static bool narrow_oop_use_implicit_null_checks() { return _narrow_oop._use_implicit_null_checks; }
- // For UseCompressedKlassPointers
+ // For UseCompressedClassPointers
static address narrow_klass_base() { return _narrow_klass._base; }
static bool is_narrow_klass_base(void* addr) { return (narrow_klass_base() == (address)addr); }
static int narrow_klass_shift() { return _narrow_klass._shift; }
diff --git a/src/share/vm/oops/annotations.cpp b/src/share/vm/oops/annotations.cpp
index 546257a17..1eb3afbb7 100644
--- a/src/share/vm/oops/annotations.cpp
+++ b/src/share/vm/oops/annotations.cpp
@@ -33,7 +33,7 @@
// Allocate annotations in metadata area
Annotations* Annotations::allocate(ClassLoaderData* loader_data, TRAPS) {
- return new (loader_data, size(), true, THREAD) Annotations();
+ return new (loader_data, size(), true, MetaspaceObj::AnnotationType, THREAD) Annotations();
}
// helper
diff --git a/src/share/vm/oops/arrayKlass.cpp b/src/share/vm/oops/arrayKlass.cpp
index 9b5ec951b..6e04c3ac1 100644
--- a/src/share/vm/oops/arrayKlass.cpp
+++ b/src/share/vm/oops/arrayKlass.cpp
@@ -71,7 +71,6 @@ Method* ArrayKlass::uncached_lookup_method(Symbol* name, Symbol* signature) cons
}
ArrayKlass::ArrayKlass(Symbol* name) {
- set_alloc_size(0);
set_name(name);
set_super(Universe::is_bootstrapping() ? (Klass*)NULL : SystemDictionary::Object_klass());
@@ -94,7 +93,7 @@ void ArrayKlass::complete_create_array_klass(ArrayKlass* k, KlassHandle super_kl
ResourceMark rm(THREAD);
k->initialize_supers(super_klass(), CHECK);
k->vtable()->initialize_vtable(false, CHECK);
- java_lang_Class::create_mirror(k, CHECK);
+ java_lang_Class::create_mirror(k, Handle(NULL), CHECK);
}
GrowableArray<Klass*>* ArrayKlass::compute_secondary_supers(int num_extra_slots) {
@@ -161,12 +160,6 @@ void ArrayKlass::array_klasses_do(void f(Klass* k)) {
}
}
-
-void ArrayKlass::with_array_klasses_do(void f(Klass* k)) {
- array_klasses_do(f);
-}
-
-
// GC support
void ArrayKlass::oops_do(OopClosure* cl) {
@@ -221,8 +214,8 @@ void ArrayKlass::oop_print_on(oop obj, outputStream* st) {
// Verification
-void ArrayKlass::verify_on(outputStream* st) {
- Klass::verify_on(st);
+void ArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
+ Klass::verify_on(st, check_dictionary);
if (component_mirror() != NULL) {
guarantee(component_mirror()->klass() != NULL, "should have a class");
diff --git a/src/share/vm/oops/arrayKlass.hpp b/src/share/vm/oops/arrayKlass.hpp
index f37a4d500..4b06f1c0e 100644
--- a/src/share/vm/oops/arrayKlass.hpp
+++ b/src/share/vm/oops/arrayKlass.hpp
@@ -39,7 +39,6 @@ class ArrayKlass: public Klass {
Klass* volatile _higher_dimension; // Refers the (n+1)'th-dimensional array (if present).
Klass* volatile _lower_dimension; // Refers the (n-1)'th-dimensional array (if present).
int _vtable_len; // size of vtable for this klass
- juint _alloc_size; // allocation profiling support
oop _component_mirror; // component type, as a java/lang/Class
protected:
@@ -65,10 +64,6 @@ class ArrayKlass: public Klass {
void set_lower_dimension(Klass* k) { _lower_dimension = k; }
Klass** adr_lower_dimension() { return (Klass**)&this->_lower_dimension;}
- // Allocation profiling support
- juint alloc_size() const { return _alloc_size; }
- void set_alloc_size(juint n) { _alloc_size = n; }
-
// offset of first element, including any padding for the sake of alignment
int array_header_in_bytes() const { return layout_helper_header_size(layout_helper()); }
int log2_element_size() const { return layout_helper_log2_element_size(layout_helper()); }
@@ -126,7 +121,6 @@ class ArrayKlass: public Klass {
// Iterators
void array_klasses_do(void f(Klass* k));
void array_klasses_do(void f(Klass* k, TRAPS), TRAPS);
- void with_array_klasses_do(void f(Klass* k));
// GC support
virtual void oops_do(OopClosure* cl);
@@ -152,7 +146,7 @@ class ArrayKlass: public Klass {
void oop_print_on(oop obj, outputStream* st);
// Verification
- void verify_on(outputStream* st);
+ void verify_on(outputStream* st, bool check_dictionary);
void oop_verify_on(oop obj, outputStream* st);
};
diff --git a/src/share/vm/oops/arrayOop.hpp b/src/share/vm/oops/arrayOop.hpp
index 806b7b728..0e5ceffe3 100644
--- a/src/share/vm/oops/arrayOop.hpp
+++ b/src/share/vm/oops/arrayOop.hpp
@@ -65,7 +65,7 @@ class arrayOopDesc : public oopDesc {
// declared nonstatic fields in arrayOopDesc if not compressed, otherwise
// it occupies the second half of the _klass field in oopDesc.
static int length_offset_in_bytes() {
- return UseCompressedKlassPointers ? klass_gap_offset_in_bytes() :
+ return UseCompressedClassPointers ? klass_gap_offset_in_bytes() :
sizeof(arrayOopDesc);
}
diff --git a/src/share/vm/oops/compiledICHolder.cpp b/src/share/vm/oops/compiledICHolder.cpp
index c13b7559a..2b2cd2ae6 100644
--- a/src/share/vm/oops/compiledICHolder.cpp
+++ b/src/share/vm/oops/compiledICHolder.cpp
@@ -48,8 +48,6 @@ void CompiledICHolder::print_value_on(outputStream* st) const {
// Verification
void CompiledICHolder::verify_on(outputStream* st) {
- guarantee(holder_method()->is_metadata(), "should be in metaspace");
guarantee(holder_method()->is_method(), "should be method");
- guarantee(holder_klass()->is_metadata(), "should be in metaspace");
guarantee(holder_klass()->is_klass(), "should be klass");
}
diff --git a/src/share/vm/oops/constMethod.cpp b/src/share/vm/oops/constMethod.cpp
index 1d0376a0b..4c0720908 100644
--- a/src/share/vm/oops/constMethod.cpp
+++ b/src/share/vm/oops/constMethod.cpp
@@ -40,7 +40,7 @@ ConstMethod* ConstMethod::allocate(ClassLoaderData* loader_data,
MethodType method_type,
TRAPS) {
int size = ConstMethod::size(byte_code_size, sizes);
- return new (loader_data, size, true, THREAD) ConstMethod(
+ return new (loader_data, size, true, MetaspaceObj::ConstMethodType, THREAD) ConstMethod(
byte_code_size, sizes, method_type, size);
}
@@ -440,7 +440,6 @@ void ConstMethod::collect_statistics(KlassSizeStats *sz) const {
void ConstMethod::verify_on(outputStream* st) {
guarantee(is_constMethod(), "object must be constMethod");
- guarantee(is_metadata(), err_msg("Should be metadata " PTR_FORMAT, this));
// Verification can occur during oop construction before the method or
// other fields have been initialized.
diff --git a/src/share/vm/oops/constantPool.cpp b/src/share/vm/oops/constantPool.cpp
index 5c5ae945b..66493a879 100644
--- a/src/share/vm/oops/constantPool.cpp
+++ b/src/share/vm/oops/constantPool.cpp
@@ -55,7 +55,7 @@ ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, T
// the resolved_references array, which is recreated at startup time.
// But that could be moved to InstanceKlass (although a pain to access from
// assembly code). Maybe it could be moved to the cpCache which is RW.
- return new (loader_data, size, false, THREAD) ConstantPool(tags);
+ return new (loader_data, size, false, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags);
}
ConstantPool::ConstantPool(Array<u1>* tags) {
@@ -108,16 +108,16 @@ objArrayOop ConstantPool::resolved_references() const {
void ConstantPool::initialize_resolved_references(ClassLoaderData* loader_data,
intStack reference_map,
int constant_pool_map_length,
- TRAPS) {
+ TRAPS) {
// Initialized the resolved object cache.
int map_length = reference_map.length();
if (map_length > 0) {
// Only need mapping back to constant pool entries. The map isn't used for
- // invokedynamic resolved_reference entries. The constant pool cache index
- // has the mapping back to both the constant pool and to the resolved
- // reference index.
+ // invokedynamic resolved_reference entries. For invokedynamic entries,
+ // the constant pool cache index has the mapping back to both the constant
+ // pool and to the resolved reference index.
if (constant_pool_map_length > 0) {
- Array<u2>* om = MetadataFactory::new_array<u2>(loader_data, map_length, CHECK);
+ Array<u2>* om = MetadataFactory::new_array<u2>(loader_data, constant_pool_map_length, CHECK);
for (int i = 0; i < constant_pool_map_length; i++) {
int x = reference_map.at(i);
@@ -182,16 +182,9 @@ oop ConstantPool::lock() {
int ConstantPool::cp_to_object_index(int cp_index) {
// this is harder don't do this so much.
- for (int i = 0; i< reference_map()->length(); i++) {
- if (reference_map()->at(i) == cp_index) return i;
- // Zero entry is divider between constant pool indices for strings,
- // method handles and method types. After that the index is a constant
- // pool cache index for invokedynamic. Stop when zero (which can never
- // be a constant pool index)
- if (reference_map()->at(i) == 0) break;
- }
- // We might not find the index.
- return _no_index_sentinel;
+ int i = reference_map()->find(cp_index);
+ // We might not find the index for jsr292 call.
+ return (i < 0) ? _no_index_sentinel : i;
}
Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS) {
@@ -396,32 +389,6 @@ Klass* ConstantPool::klass_ref_at_if_loaded(constantPoolHandle this_oop, int whi
}
-// This is an interface for the compiler that allows accessing non-resolved entries
-// in the constant pool - but still performs the validations tests. Must be used
-// in a pre-parse of the compiler - to determine what it can do and not do.
-// Note: We cannot update the ConstantPool from the vm_thread.
-Klass* ConstantPool::klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int index, TRAPS) {
- int which = this_oop->klass_ref_index_at(index);
- CPSlot entry = this_oop->slot_at(which);
- if (entry.is_resolved()) {
- assert(entry.get_klass()->is_klass(), "must be");
- return entry.get_klass();
- } else {
- assert(entry.is_unresolved(), "must be either symbol or klass");
- Symbol* name = entry.get_symbol();
- oop loader = this_oop->pool_holder()->class_loader();
- oop protection_domain = this_oop->pool_holder()->protection_domain();
- Handle h_loader(THREAD, loader);
- Handle h_prot (THREAD, protection_domain);
- KlassHandle k(THREAD, SystemDictionary::find(name, h_loader, h_prot, THREAD));
-
- // Do access check for klasses
- if( k.not_null() ) verify_constant_pool_resolve(this_oop, k, CHECK_NULL);
- return k();
- }
-}
-
-
Method* ConstantPool::method_at_if_loaded(constantPoolHandle cpool,
int which) {
if (cpool->cache() == NULL) return NULL; // nothing to load yet
@@ -866,8 +833,7 @@ oop ConstantPool::string_at_impl(constantPoolHandle this_oop, int which, int obj
// If the string has already been interned, this entry will be non-null
oop str = this_oop->resolved_references()->obj_at(obj_index);
if (str != NULL) return str;
-
- Symbol* sym = this_oop->unresolved_string_at(which);
+ Symbol* sym = this_oop->unresolved_string_at(which);
str = StringTable::intern(sym, CHECK_(NULL));
this_oop->string_at_put(which, obj_index, str);
assert(java_lang_String::is_instance(str), "must be string");
@@ -1063,9 +1029,10 @@ bool ConstantPool::compare_entry_to(int index1, constantPoolHandle cp2,
int k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2);
int i1 = invoke_dynamic_bootstrap_specifier_index(index1);
int i2 = cp2->invoke_dynamic_bootstrap_specifier_index(index2);
- bool match = compare_entry_to(k1, cp2, k2, CHECK_false) &&
- compare_operand_to(i1, cp2, i2, CHECK_false);
- return match;
+ // separate statements and variables because CHECK_false is used
+ bool match_entry = compare_entry_to(k1, cp2, k2, CHECK_false);
+ bool match_operand = compare_operand_to(i1, cp2, i2, CHECK_false);
+ return (match_entry && match_operand);
} break;
case JVM_CONSTANT_String:
@@ -1644,9 +1611,11 @@ jint ConstantPool::cpool_entry_size(jint idx) {
case JVM_CONSTANT_UnresolvedClassInError:
case JVM_CONSTANT_StringIndex:
case JVM_CONSTANT_MethodType:
+ case JVM_CONSTANT_MethodTypeInError:
return 3;
case JVM_CONSTANT_MethodHandle:
+ case JVM_CONSTANT_MethodHandleInError:
return 4; //tag, ref_kind, ref_index
case JVM_CONSTANT_Integer:
@@ -1827,8 +1796,8 @@ int ConstantPool::copy_cpool_bytes(int cpool_size,
case JVM_CONSTANT_MethodHandle:
case JVM_CONSTANT_MethodHandleInError: {
*bytes = JVM_CONSTANT_MethodHandle;
- int kind = method_handle_ref_kind_at(idx);
- idx1 = method_handle_index_at(idx);
+ int kind = method_handle_ref_kind_at_error_ok(idx);
+ idx1 = method_handle_index_at_error_ok(idx);
*(bytes+1) = (unsigned char) kind;
Bytes::put_Java_u2((address) (bytes+2), idx1);
DBG(printf("JVM_CONSTANT_MethodHandle: %d %hd", kind, idx1));
@@ -1837,7 +1806,7 @@ int ConstantPool::copy_cpool_bytes(int cpool_size,
case JVM_CONSTANT_MethodType:
case JVM_CONSTANT_MethodTypeInError: {
*bytes = JVM_CONSTANT_MethodType;
- idx1 = method_type_index_at(idx);
+ idx1 = method_type_index_at_error_ok(idx);
Bytes::put_Java_u2((address) (bytes+1), idx1);
DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1));
break;
@@ -2025,12 +1994,12 @@ void ConstantPool::print_entry_on(const int index, outputStream* st) {
break;
case JVM_CONSTANT_MethodHandle :
case JVM_CONSTANT_MethodHandleInError :
- st->print("ref_kind=%d", method_handle_ref_kind_at(index));
- st->print(" ref_index=%d", method_handle_index_at(index));
+ st->print("ref_kind=%d", method_handle_ref_kind_at_error_ok(index));
+ st->print(" ref_index=%d", method_handle_index_at_error_ok(index));
break;
case JVM_CONSTANT_MethodType :
case JVM_CONSTANT_MethodTypeInError :
- st->print("signature_index=%d", method_type_index_at(index));
+ st->print("signature_index=%d", method_type_index_at_error_ok(index));
break;
case JVM_CONSTANT_InvokeDynamic :
{
@@ -2094,12 +2063,10 @@ void ConstantPool::verify_on(outputStream* st) {
CPSlot entry = slot_at(i);
if (tag.is_klass()) {
if (entry.is_resolved()) {
- guarantee(entry.get_klass()->is_metadata(), "should be metadata");
guarantee(entry.get_klass()->is_klass(), "should be klass");
}
} else if (tag.is_unresolved_klass()) {
if (entry.is_resolved()) {
- guarantee(entry.get_klass()->is_metadata(), "should be metadata");
guarantee(entry.get_klass()->is_klass(), "should be klass");
}
} else if (tag.is_symbol()) {
@@ -2111,13 +2078,11 @@ void ConstantPool::verify_on(outputStream* st) {
if (cache() != NULL) {
// Note: cache() can be NULL before a class is completely setup or
// in temporary constant pools used during constant pool merging
- guarantee(cache()->is_metadata(), "should be metadata");
guarantee(cache()->is_constantPoolCache(), "should be constant pool cache");
}
if (pool_holder() != NULL) {
// Note: pool_holder() can be NULL in temporary constant pools
// used during constant pool merging
- guarantee(pool_holder()->is_metadata(), "should be metadata");
guarantee(pool_holder()->is_klass(), "should be klass");
}
}
diff --git a/src/share/vm/oops/constantPool.hpp b/src/share/vm/oops/constantPool.hpp
index 288d243df..ab7f5c1a3 100644
--- a/src/share/vm/oops/constantPool.hpp
+++ b/src/share/vm/oops/constantPool.hpp
@@ -234,7 +234,6 @@ class ConstantPool : public Metadata {
static int cache_offset_in_bytes() { return offset_of(ConstantPool, _cache); }
static int pool_holder_offset_in_bytes() { return offset_of(ConstantPool, _pool_holder); }
static int resolved_references_offset_in_bytes() { return offset_of(ConstantPool, _resolved_references); }
- static int reference_map_offset_in_bytes() { return offset_of(ConstantPool, _reference_map); }
// Storing constants
@@ -357,7 +356,7 @@ class ConstantPool : public Metadata {
Symbol* klass_name_at(int which); // Returns the name, w/o resolving.
- Klass* resolved_klass_at(int which) { // Used by Compiler
+ Klass* resolved_klass_at(int which) const { // Used by Compiler
guarantee(tag_at(which).is_klass(), "Corrupted constant pool");
// Must do an acquire here in case another thread resolved the klass
// behind our back, lest we later load stale values thru the oop.
@@ -478,18 +477,42 @@ class ConstantPool : public Metadata {
return *int_at_addr(which);
}
- int method_handle_ref_kind_at(int which) {
- assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
+ private:
+ int method_handle_ref_kind_at(int which, bool error_ok) {
+ assert(tag_at(which).is_method_handle() ||
+ (error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool");
return extract_low_short_from_int(*int_at_addr(which)); // mask out unwanted ref_index bits
}
- int method_handle_index_at(int which) {
- assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
+ int method_handle_index_at(int which, bool error_ok) {
+ assert(tag_at(which).is_method_handle() ||
+ (error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool");
return extract_high_short_from_int(*int_at_addr(which)); // shift out unwanted ref_kind bits
}
- int method_type_index_at(int which) {
- assert(tag_at(which).is_method_type(), "Corrupted constant pool");
+ int method_type_index_at(int which, bool error_ok) {
+ assert(tag_at(which).is_method_type() ||
+ (error_ok && tag_at(which).is_method_type_in_error()), "Corrupted constant pool");
return *int_at_addr(which);
}
+ public:
+ int method_handle_ref_kind_at(int which) {
+ return method_handle_ref_kind_at(which, false);
+ }
+ int method_handle_ref_kind_at_error_ok(int which) {
+ return method_handle_ref_kind_at(which, true);
+ }
+ int method_handle_index_at(int which) {
+ return method_handle_index_at(which, false);
+ }
+ int method_handle_index_at_error_ok(int which) {
+ return method_handle_index_at(which, true);
+ }
+ int method_type_index_at(int which) {
+ return method_type_index_at(which, false);
+ }
+ int method_type_index_at_error_ok(int which) {
+ return method_type_index_at(which, true);
+ }
+
// Derived queries:
Symbol* method_handle_name_ref_at(int which) {
int member = method_handle_index_at(which);
@@ -733,8 +756,6 @@ class ConstantPool : public Metadata {
static oop method_type_at_if_loaded (constantPoolHandle this_oop, int which);
static Klass* klass_at_if_loaded (constantPoolHandle this_oop, int which);
static Klass* klass_ref_at_if_loaded (constantPoolHandle this_oop, int which);
- // Same as above - but does LinkResolving.
- static Klass* klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS);
// Routines currently used for annotations (only called by jvm.cpp) but which might be used in the
// future by other Java code. These take constant pool indices rather than
diff --git a/src/share/vm/oops/cpCache.cpp b/src/share/vm/oops/cpCache.cpp
index bc15e282b..9d3589550 100644
--- a/src/share/vm/oops/cpCache.cpp
+++ b/src/share/vm/oops/cpCache.cpp
@@ -140,9 +140,10 @@ void ConstantPoolCacheEntry::set_parameter_size(int value) {
err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
}
-void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
- methodHandle method,
- int vtable_index) {
+void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
+ methodHandle method,
+ int vtable_index) {
+ bool is_vtable_call = (vtable_index >= 0); // FIXME: split this method on this boolean
assert(method->interpreter_entry() != NULL, "should have been set at this point");
assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache");
@@ -160,7 +161,8 @@ void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
// ...and fall through as if we were handling invokevirtual:
case Bytecodes::_invokevirtual:
{
- if (method->can_be_statically_bound()) {
+ if (!is_vtable_call) {
+ assert(method->can_be_statically_bound(), "");
// set_f2_as_vfinal_method checks if is_vfinal flag is true.
set_method_flags(as_TosState(method->result_type()),
( 1 << is_vfinal_shift) |
@@ -169,6 +171,7 @@ void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
method()->size_of_parameters());
set_f2_as_vfinal_method(method());
} else {
+ assert(!method->can_be_statically_bound(), "");
assert(vtable_index >= 0, "valid index");
assert(!method->is_final_method(), "sanity");
set_method_flags(as_TosState(method->result_type()),
@@ -182,6 +185,7 @@ void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
+ assert(!is_vtable_call, "");
// Note: Read and preserve the value of the is_vfinal flag on any
// invokevirtual bytecode shared with this constant pool cache entry.
// It is cheap and safe to consult is_vfinal() at all times.
@@ -232,8 +236,22 @@ void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
NOT_PRODUCT(verify(tty));
}
+void ConstantPoolCacheEntry::set_direct_call(Bytecodes::Code invoke_code, methodHandle method) {
+ int index = Method::nonvirtual_vtable_index;
+ // index < 0; FIXME: inline and customize set_direct_or_vtable_call
+ set_direct_or_vtable_call(invoke_code, method, index);
+}
+
+void ConstantPoolCacheEntry::set_vtable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
+ // either the method is a miranda or its holder should accept the given index
+ assert(method->method_holder()->is_interface() || method->method_holder()->verify_vtable_index(index), "");
+ // index >= 0; FIXME: inline and customize set_direct_or_vtable_call
+ set_direct_or_vtable_call(invoke_code, method, index);
+}
-void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
+void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
+ assert(method->method_holder()->verify_itable_index(index), "");
+ assert(invoke_code == Bytecodes::_invokeinterface, "");
InstanceKlass* interf = method->method_holder();
assert(interf->is_interface(), "must be an interface");
assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
@@ -542,7 +560,8 @@ ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data,
const intStack& invokedynamic_map, TRAPS) {
int size = ConstantPoolCache::size(length);
- return new (loader_data, size, false, THREAD) ConstantPoolCache(length, index_map, invokedynamic_map);
+ return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD)
+ ConstantPoolCache(length, index_map, invokedynamic_map);
}
void ConstantPoolCache::initialize(const intArray& inverse_index_map,
diff --git a/src/share/vm/oops/cpCache.hpp b/src/share/vm/oops/cpCache.hpp
index 27ca7980c..77c0deb9d 100644
--- a/src/share/vm/oops/cpCache.hpp
+++ b/src/share/vm/oops/cpCache.hpp
@@ -140,8 +140,15 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
_f1 = f1;
}
void release_set_f1(Metadata* f1);
- void set_f2(intx f2) { assert(_f2 == 0 || _f2 == f2, "illegal field change"); _f2 = f2; }
- void set_f2_as_vfinal_method(Method* f2) { assert(_f2 == 0 || _f2 == (intptr_t) f2, "illegal field change"); assert(is_vfinal(), "flags must be set"); _f2 = (intptr_t) f2; }
+ void set_f2(intx f2) {
+ intx existing_f2 = _f2; // read once
+ assert(existing_f2 == 0 || existing_f2 == f2, "illegal field change");
+ _f2 = f2;
+ }
+ void set_f2_as_vfinal_method(Method* f2) {
+ assert(is_vfinal(), "flags must be set");
+ set_f2((intx)f2);
+ }
int make_flags(TosState state, int option_bits, int field_index_or_method_params);
void set_flags(intx flags) { _flags = flags; }
bool init_flags_atomic(intx flags);
@@ -212,15 +219,29 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
Klass* root_klass // needed by the GC to dirty the klass
);
- void set_method( // sets entry to resolved method entry
+ private:
+ void set_direct_or_vtable_call(
Bytecodes::Code invoke_code, // the bytecode used for invoking the method
methodHandle method, // the method/prototype if any (NULL, otherwise)
int vtable_index // the vtable index if any, else negative
);
- void set_interface_call(
- methodHandle method, // Resolved method
- int index // Method index into interface
+ public:
+ void set_direct_call( // sets entry to exact concrete method entry
+ Bytecodes::Code invoke_code, // the bytecode used for invoking the method
+ methodHandle method // the method to call
+ );
+
+ void set_vtable_call( // sets entry to vtable index
+ Bytecodes::Code invoke_code, // the bytecode used for invoking the method
+ methodHandle method, // resolved method which declares the vtable index
+ int vtable_index // the vtable index
+ );
+
+ void set_itable_call(
+ Bytecodes::Code invoke_code, // the bytecode used; must be invokeinterface
+ methodHandle method, // the resolved interface method
+ int itable_index // index into itable for the method
);
void set_method_handle(
diff --git a/src/share/vm/oops/fieldInfo.hpp b/src/share/vm/oops/fieldInfo.hpp
index 5da8ed962..6763c42d1 100644
--- a/src/share/vm/oops/fieldInfo.hpp
+++ b/src/share/vm/oops/fieldInfo.hpp
@@ -240,6 +240,14 @@ class FieldInfo VALUE_OBJ_CLASS_SPEC {
return (access_flags() & JVM_ACC_FIELD_INTERNAL) != 0;
}
+ bool is_stable() const {
+ return (access_flags() & JVM_ACC_FIELD_STABLE) != 0;
+ }
+ void set_stable(bool z) {
+ if (z) _shorts[access_flags_offset] |= JVM_ACC_FIELD_STABLE;
+ else _shorts[access_flags_offset] &= ~JVM_ACC_FIELD_STABLE;
+ }
+
Symbol* lookup_symbol(int symbol_index) const {
assert(is_internal(), "only internal fields");
return vmSymbols::symbol_at((vmSymbols::SID)symbol_index);
diff --git a/src/share/vm/oops/fieldStreams.hpp b/src/share/vm/oops/fieldStreams.hpp
index acc590c97..17f1b0a85 100644
--- a/src/share/vm/oops/fieldStreams.hpp
+++ b/src/share/vm/oops/fieldStreams.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "oops/instanceKlass.hpp"
#include "oops/fieldInfo.hpp"
+#include "runtime/fieldDescriptor.hpp"
// The is the base class for iteration over the fields array
// describing the declared fields in the class. Several subclasses
@@ -43,8 +44,10 @@ class FieldStreamBase : public StackObj {
int _index;
int _limit;
int _generic_signature_slot;
+ fieldDescriptor _fd_buf;
FieldInfo* field() const { return FieldInfo::from_field_array(_fields, _index); }
+ InstanceKlass* field_holder() const { return _constants->pool_holder(); }
int init_generic_signature_start_slot() {
int length = _fields->length();
@@ -102,6 +105,7 @@ class FieldStreamBase : public StackObj {
_index = 0;
_limit = klass->java_fields_count();
init_generic_signature_start_slot();
+ assert(klass == field_holder(), "");
}
FieldStreamBase(instanceKlassHandle klass) {
_fields = klass->fields();
@@ -109,6 +113,7 @@ class FieldStreamBase : public StackObj {
_index = 0;
_limit = klass->java_fields_count();
init_generic_signature_start_slot();
+ assert(klass == field_holder(), "");
}
// accessors
@@ -180,6 +185,12 @@ class FieldStreamBase : public StackObj {
return field()->contended_group();
}
+ // bridge to a heavier API:
+ fieldDescriptor& field_descriptor() const {
+ fieldDescriptor& field = const_cast<fieldDescriptor&>(_fd_buf);
+ field.reinitialize(field_holder(), _index);
+ return field;
+ }
};
// Iterate over only the internal fields
diff --git a/src/share/vm/oops/generateOopMap.cpp b/src/share/vm/oops/generateOopMap.cpp
index 8c12b7ac7..9a9dc23d4 100644
--- a/src/share/vm/oops/generateOopMap.cpp
+++ b/src/share/vm/oops/generateOopMap.cpp
@@ -642,11 +642,21 @@ int GenerateOopMap::next_bb_start_pc(BasicBlock *bb) {
// CellType handling methods
//
+// Allocate memory and throw LinkageError if failure.
+#define ALLOC_RESOURCE_ARRAY(var, type, count) \
+ var = NEW_RESOURCE_ARRAY_RETURN_NULL(type, count); \
+ if (var == NULL) { \
+ report_error("Cannot reserve enough memory to analyze this method"); \
+ return; \
+ }
+
+
void GenerateOopMap::init_state() {
_state_len = _max_locals + _max_stack + _max_monitors;
- _state = NEW_RESOURCE_ARRAY(CellTypeState, _state_len);
+ ALLOC_RESOURCE_ARRAY(_state, CellTypeState, _state_len);
memset(_state, 0, _state_len * sizeof(CellTypeState));
- _state_vec_buf = NEW_RESOURCE_ARRAY(char, MAX3(_max_locals, _max_stack, _max_monitors) + 1/*for null terminator char */);
+ int count = MAX3(_max_locals, _max_stack, _max_monitors) + 1/*for null terminator char */;
+ ALLOC_RESOURCE_ARRAY(_state_vec_buf, char, count);
}
void GenerateOopMap::make_context_uninitialized() {
@@ -905,7 +915,7 @@ void GenerateOopMap::init_basic_blocks() {
// But cumbersome since we don't know the stack heights yet. (Nor the
// monitor stack heights...)
- _basic_blocks = NEW_RESOURCE_ARRAY(BasicBlock, _bb_count);
+ ALLOC_RESOURCE_ARRAY(_basic_blocks, BasicBlock, _bb_count);
// Make a pass through the bytecodes. Count the number of monitorenters.
// This can be used an upper bound on the monitor stack depth in programs
@@ -976,8 +986,8 @@ void GenerateOopMap::init_basic_blocks() {
return;
}
- CellTypeState *basicBlockState =
- NEW_RESOURCE_ARRAY(CellTypeState, bbNo * _state_len);
+ CellTypeState *basicBlockState;
+ ALLOC_RESOURCE_ARRAY(basicBlockState, CellTypeState, bbNo * _state_len);
memset(basicBlockState, 0, bbNo * _state_len * sizeof(CellTypeState));
// Make a pass over the basicblocks and assign their state vectors.
diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKlass.cpp
index c506e466b..a9de9df2b 100644
--- a/src/share/vm/oops/instanceKlass.cpp
+++ b/src/share/vm/oops/instanceKlass.cpp
@@ -48,6 +48,7 @@
#include "oops/symbol.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
+#include "prims/jvmtiRedefineClasses.hpp"
#include "prims/methodComparator.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/handles.inline.hpp"
@@ -268,9 +269,7 @@ InstanceKlass::InstanceKlass(int vtable_len,
set_fields(NULL, 0);
set_constants(NULL);
set_class_loader_data(NULL);
- set_protection_domain(NULL);
- set_signers(NULL);
- set_source_file_name(NULL);
+ set_source_file_name_index(0);
set_source_debug_extension(NULL, 0);
set_array_name(NULL);
set_inner_classes(NULL);
@@ -279,22 +278,20 @@ InstanceKlass::InstanceKlass(int vtable_len,
set_is_marked_dependent(false);
set_init_state(InstanceKlass::allocated);
set_init_thread(NULL);
- set_init_lock(NULL);
set_reference_type(rt);
set_oop_map_cache(NULL);
set_jni_ids(NULL);
set_osr_nmethods_head(NULL);
set_breakpoints(NULL);
init_previous_versions();
- set_generic_signature(NULL);
+ set_generic_signature_index(0);
release_set_methods_jmethod_ids(NULL);
- release_set_methods_cached_itable_indices(NULL);
set_annotations(NULL);
set_jvmti_cached_class_field_map(NULL);
set_initial_method_idnum(0);
_dependencies = NULL;
set_jvmti_cached_class_field_map(NULL);
- set_cached_class_file(NULL, 0);
+ set_cached_class_file(NULL);
set_initial_method_idnum(0);
set_minor_version(0);
set_major_version(0);
@@ -408,12 +405,6 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) {
}
set_inner_classes(NULL);
- // Null out Java heap objects, although these won't be walked to keep
- // alive once this InstanceKlass is deallocated.
- set_protection_domain(NULL);
- set_signers(NULL);
- set_init_lock(NULL);
-
// We should deallocate the Annotations instance
MetadataFactory::free_metadata(loader_data, annotations());
set_annotations(NULL);
@@ -451,6 +442,24 @@ void InstanceKlass::eager_initialize(Thread *thread) {
}
}
+// JVMTI spec thinks there are signers and protection domain in the
+// instanceKlass. These accessors pretend these fields are there.
+// The hprof specification also thinks these fields are in InstanceKlass.
+oop InstanceKlass::protection_domain() const {
+ // return the protection_domain from the mirror
+ return java_lang_Class::protection_domain(java_mirror());
+}
+
+// To remove these from requires an incompatible change and CCC request.
+objArrayOop InstanceKlass::signers() const {
+ // return the signers from the mirror
+ return java_lang_Class::signers(java_mirror());
+}
+
+volatile oop InstanceKlass::init_lock() const {
+ // return the init lock from the mirror
+ return java_lang_Class::init_lock(java_mirror());
+}
void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
EXCEPTION_MARK;
@@ -1139,7 +1148,7 @@ bool InstanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor*
Symbol* f_name = fs.name();
Symbol* f_sig = fs.signature();
if (f_name == name && f_sig == sig) {
- fd->initialize(const_cast<InstanceKlass*>(this), fs.index());
+ fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index());
return true;
}
}
@@ -1208,7 +1217,7 @@ Klass* InstanceKlass::find_field(Symbol* name, Symbol* sig, bool is_static, fiel
bool InstanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
if (fs.offset() == offset) {
- fd->initialize(const_cast<InstanceKlass*>(this), fs.index());
+ fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index());
if (fd->is_static() == is_static) return true;
}
}
@@ -1241,8 +1250,7 @@ void InstanceKlass::methods_do(void f(Method* method)) {
void InstanceKlass::do_local_static_fields(FieldClosure* cl) {
for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
if (fs.access_flags().is_static()) {
- fieldDescriptor fd;
- fd.initialize(this, fs.index());
+ fieldDescriptor& fd = fs.field_descriptor();
cl->do_field(&fd);
}
}
@@ -1258,8 +1266,7 @@ void InstanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAP
void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
for (JavaFieldStream fs(this_oop()); !fs.done(); fs.next()) {
if (fs.access_flags().is_static()) {
- fieldDescriptor fd;
- fd.initialize(this_oop(), fs.index());
+ fieldDescriptor& fd = fs.field_descriptor();
f(&fd, CHECK);
}
}
@@ -1281,7 +1288,7 @@ void InstanceKlass::do_nonstatic_fields(FieldClosure* cl) {
int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1), mtClass);
int j = 0;
for (int i = 0; i < length; i += 1) {
- fd.initialize(this, i);
+ fd.reinitialize(this, i);
if (!fd.is_static()) {
fields_sorted[j + 0] = fd.offset();
fields_sorted[j + 1] = i;
@@ -1293,7 +1300,7 @@ void InstanceKlass::do_nonstatic_fields(FieldClosure* cl) {
// _sort_Fn is defined in growableArray.hpp.
qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
for (int i = 0; i < length; i += 2) {
- fd.initialize(this, fields_sorted[i + 1]);
+ fd.reinitialize(this, fields_sorted[i + 1]);
assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
cl->do_field(&fd);
}
@@ -1312,12 +1319,6 @@ void InstanceKlass::array_klasses_do(void f(Klass* k)) {
ArrayKlass::cast(array_klasses())->array_klasses_do(f);
}
-
-void InstanceKlass::with_array_klasses_do(void f(Klass* k)) {
- f(this);
- array_klasses_do(f);
-}
-
#ifdef ASSERT
static int linear_search(Array<Method*>* methods, Symbol* name, Symbol* signature) {
int len = methods->length();
@@ -1682,87 +1683,6 @@ jmethodID InstanceKlass::jmethod_id_or_null(Method* method) {
}
-// Cache an itable index
-void InstanceKlass::set_cached_itable_index(size_t idnum, int index) {
- int* indices = methods_cached_itable_indices_acquire();
- int* to_dealloc_indices = NULL;
-
- // We use a double-check locking idiom here because this cache is
- // performance sensitive. In the normal system, this cache only
- // transitions from NULL to non-NULL which is safe because we use
- // release_set_methods_cached_itable_indices() to advertise the
- // new cache. A partially constructed cache should never be seen
- // by a racing thread. Cache reads and writes proceed without a
- // lock, but creation of the cache itself requires no leaks so a
- // lock is generally acquired in that case.
- //
- // If the RedefineClasses() API has been used, then this cache can
- // grow and we'll have transitions from non-NULL to bigger non-NULL.
- // Cache creation requires no leaks and we require safety between all
- // cache accesses and freeing of the old cache so a lock is generally
- // acquired when the RedefineClasses() API has been used.
-
- if (indices == NULL || idnum_can_increment()) {
- // we need a cache or the cache can grow
- MutexLocker ml(JNICachedItableIndex_lock);
- // reacquire the cache to see if another thread already did the work
- indices = methods_cached_itable_indices_acquire();
- size_t length = 0;
- // cache size is stored in element[0], other elements offset by one
- if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
- size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
- int* new_indices = NEW_C_HEAP_ARRAY(int, size+1, mtClass);
- new_indices[0] = (int)size;
- // copy any existing entries
- size_t i;
- for (i = 0; i < length; i++) {
- new_indices[i+1] = indices[i+1];
- }
- // Set all the rest to -1
- for (i = length; i < size; i++) {
- new_indices[i+1] = -1;
- }
- if (indices != NULL) {
- // We have an old cache to delete so save it for after we
- // drop the lock.
- to_dealloc_indices = indices;
- }
- release_set_methods_cached_itable_indices(indices = new_indices);
- }
-
- if (idnum_can_increment()) {
- // this cache can grow so we have to write to it safely
- indices[idnum+1] = index;
- }
- } else {
- CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
- }
-
- if (!idnum_can_increment()) {
- // The cache cannot grow and this JNI itable index value does not
- // have to be unique like a jmethodID. If there is a race to set it,
- // it doesn't matter.
- indices[idnum+1] = index;
- }
-
- if (to_dealloc_indices != NULL) {
- // we allocated a new cache so free the old one
- FreeHeap(to_dealloc_indices);
- }
-}
-
-
-// Retrieve a cached itable index
-int InstanceKlass::cached_itable_index(size_t idnum) {
- int* indices = methods_cached_itable_indices_acquire();
- if (indices != NULL && ((size_t)indices[0]) > idnum) {
- // indices exist and are long enough, retrieve possible cached
- return indices[idnum+1];
- }
- return -1;
-}
-
-
//
// Walk the list of dependent nmethods searching for nmethods which
// are dependent on the changes that were passed in and mark them for
@@ -1883,16 +1803,6 @@ bool InstanceKlass::is_dependent_nmethod(nmethod* nm) {
// Garbage collection
-void InstanceKlass::oops_do(OopClosure* cl) {
- Klass::oops_do(cl);
-
- cl->do_oop(adr_protection_domain());
- cl->do_oop(adr_signers());
- cl->do_oop(adr_init_lock());
-
- // Don't walk the arrays since they are walked from the ClassLoaderData objects.
-}
-
#ifdef ASSERT
template <class T> void assert_is_in(T *p) {
T heap_oop = oopDesc::load_heap_oop(p);
@@ -2241,9 +2151,6 @@ void InstanceKlass::remove_unshareable_info() {
m->remove_unshareable_info();
}
- // Need to reinstate when reading back the class.
- set_init_lock(NULL);
-
// do array classes also.
array_klasses_do(remove_unshareable_in_class);
}
@@ -2275,13 +2182,6 @@ void InstanceKlass::restore_unshareable_info(TRAPS) {
ik->itable()->initialize_itable(false, CHECK);
}
- // Allocate a simple java object for a lock.
- // This needs to be a java object because during class initialization
- // it can be held across a java call.
- typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK);
- Handle h(THREAD, (oop)r);
- ik->set_init_lock(h());
-
// restore constant pool resolved references
ik->constants()->restore_unshareable_info(CHECK);
@@ -2331,16 +2231,15 @@ void InstanceKlass::release_C_heap_structures() {
FreeHeap(jmeths);
}
- MemberNameTable* mnt = member_names();
- if (mnt != NULL) {
- delete mnt;
- set_member_names(NULL);
- }
-
- int* indices = methods_cached_itable_indices_acquire();
- if (indices != (int*)NULL) {
- release_set_methods_cached_itable_indices(NULL);
- FreeHeap(indices);
+ // Deallocate MemberNameTable
+ {
+ Mutex* lock_or_null = SafepointSynchronize::is_at_safepoint() ? NULL : MemberNameTable_lock;
+ MutexLockerEx ml(lock_or_null, Mutex::_no_safepoint_check_flag);
+ MemberNameTable* mnt = member_names();
+ if (mnt != NULL) {
+ delete mnt;
+ set_member_names(NULL);
+ }
}
// release dependencies
@@ -2369,10 +2268,9 @@ void InstanceKlass::release_C_heap_structures() {
}
// deallocate the cached class file
- if (_cached_class_file_bytes != NULL) {
- os::free(_cached_class_file_bytes, mtClass);
- _cached_class_file_bytes = NULL;
- _cached_class_file_len = 0;
+ if (_cached_class_file != NULL) {
+ os::free(_cached_class_file, mtClass);
+ _cached_class_file = NULL;
}
// Decrement symbol reference counts associated with the unloaded class.
@@ -2380,18 +2278,12 @@ void InstanceKlass::release_C_heap_structures() {
// unreference array name derived from this class name (arrays of an unloaded
// class can't be referenced anymore).
if (_array_name != NULL) _array_name->decrement_refcount();
- if (_source_file_name != NULL) _source_file_name->decrement_refcount();
if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension, mtClass);
assert(_total_instanceKlass_count >= 1, "Sanity check");
Atomic::dec(&_total_instanceKlass_count);
}
-void InstanceKlass::set_source_file_name(Symbol* n) {
- _source_file_name = n;
- if (_source_file_name != NULL) _source_file_name->increment_refcount();
-}
-
void InstanceKlass::set_source_debug_extension(char* array, int length) {
if (array == NULL) {
_source_debug_extension = NULL;
@@ -2724,7 +2616,7 @@ void InstanceKlass::remove_osr_nmethod(nmethod* n) {
OsrList_lock->unlock();
}
-nmethod* InstanceKlass::lookup_osr_nmethod(Method* const m, int bci, int comp_level, bool match_level) const {
+nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_level, bool match_level) const {
// This is a short non-blocking critical region, so the no safepoint check is ok.
OsrList_lock->lock_without_safepoint_check();
nmethod* osr = osr_nmethods_head();
@@ -2765,15 +2657,28 @@ nmethod* InstanceKlass::lookup_osr_nmethod(Method* const m, int bci, int comp_le
return NULL;
}
-void InstanceKlass::add_member_name(Handle mem_name) {
+void InstanceKlass::add_member_name(int index, Handle mem_name) {
jweak mem_name_wref = JNIHandles::make_weak_global(mem_name);
MutexLocker ml(MemberNameTable_lock);
+ assert(0 <= index && index < idnum_allocated_count(), "index is out of bounds");
DEBUG_ONLY(No_Safepoint_Verifier nsv);
if (_member_names == NULL) {
- _member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable();
+ _member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable(idnum_allocated_count());
}
- _member_names->add_member_name(mem_name_wref);
+ _member_names->add_member_name(index, mem_name_wref);
+}
+
+oop InstanceKlass::get_member_name(int index) {
+ MutexLocker ml(MemberNameTable_lock);
+ assert(0 <= index && index < idnum_allocated_count(), "index is out of bounds");
+ DEBUG_ONLY(No_Safepoint_Verifier nsv);
+
+ if (_member_names == NULL) {
+ return NULL;
+ }
+ oop mem_name =_member_names->get_member_name(index);
+ return mem_name;
}
// -----------------------------------------------------------------------------------------------------
@@ -2787,6 +2692,18 @@ static const char* state_names[] = {
"allocated", "loaded", "linked", "being_initialized", "fully_initialized", "initialization_error"
};
+static void print_vtable(intptr_t* start, int len, outputStream* st) {
+ for (int i = 0; i < len; i++) {
+ intptr_t e = start[i];
+ st->print("%d : " INTPTR_FORMAT, i, e);
+ if (e != 0 && ((Metadata*)e)->is_metaspace_object()) {
+ st->print(" ");
+ ((Metadata*)e)->print_value_on(st);
+ }
+ st->cr();
+ }
+}
+
void InstanceKlass::print_on(outputStream* st) const {
assert(is_klass(), "must be klass");
Klass::print_on(st);
@@ -2838,7 +2755,7 @@ void InstanceKlass::print_on(outputStream* st) const {
st->print(BULLET"arrays: "); array_klasses()->print_value_on_maybe_null(st); st->cr();
st->print(BULLET"methods: "); methods()->print_value_on(st); st->cr();
- if (Verbose) {
+ if (Verbose || WizardMode) {
Array<Method*>* method_array = methods();
for(int i = 0; i < method_array->length(); i++) {
st->print("%d : ", i); method_array->at(i)->print_value(); st->cr();
@@ -2853,10 +2770,7 @@ void InstanceKlass::print_on(outputStream* st) const {
class_loader_data()->print_value_on(st);
st->cr();
}
- st->print(BULLET"protection domain: "); ((InstanceKlass*)this)->protection_domain()->print_value_on(st); st->cr();
st->print(BULLET"host class: "); host_klass()->print_value_on_maybe_null(st); st->cr();
- st->print(BULLET"signers: "); signers()->print_value_on(st); st->cr();
- st->print(BULLET"init_lock: "); ((oop)_init_lock)->print_value_on(st); st->cr();
if (source_file_name() != NULL) {
st->print(BULLET"source file: ");
source_file_name()->print_value_on(st);
@@ -2872,24 +2786,17 @@ void InstanceKlass::print_on(outputStream* st) const {
st->print(BULLET"field annotations: "); fields_annotations()->print_value_on(st); st->cr();
st->print(BULLET"field type annotations: "); fields_type_annotations()->print_value_on(st); st->cr();
{
- ResourceMark rm;
- // PreviousVersionInfo objects returned via PreviousVersionWalker
- // contain a GrowableArray of handles. We have to clean up the
- // GrowableArray _after_ the PreviousVersionWalker destructor
- // has destroyed the handles.
- {
- bool have_pv = false;
- PreviousVersionWalker pvw((InstanceKlass*)this);
- for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
- pv_info != NULL; pv_info = pvw.next_previous_version()) {
- if (!have_pv)
- st->print(BULLET"previous version: ");
- have_pv = true;
- pv_info->prev_constant_pool_handle()()->print_value_on(st);
- }
- if (have_pv) st->cr();
- } // pvw is cleaned up
- } // rm is cleaned up
+ bool have_pv = false;
+ PreviousVersionWalker pvw(Thread::current(), (InstanceKlass*)this);
+ for (PreviousVersionNode * pv_node = pvw.next_previous_version();
+ pv_node != NULL; pv_node = pvw.next_previous_version()) {
+ if (!have_pv)
+ st->print(BULLET"previous version: ");
+ have_pv = true;
+ pv_node->prev_constant_pool()->print_value_on(st);
+ }
+ if (have_pv) st->cr();
+ } // pvw is cleaned up
if (generic_signature() != NULL) {
st->print(BULLET"generic signature: ");
@@ -2899,7 +2806,9 @@ void InstanceKlass::print_on(outputStream* st) const {
st->print(BULLET"inner classes: "); inner_classes()->print_value_on(st); st->cr();
st->print(BULLET"java mirror: "); java_mirror()->print_value_on(st); st->cr();
st->print(BULLET"vtable length %d (start addr: " INTPTR_FORMAT ")", vtable_length(), start_of_vtable()); st->cr();
+ if (vtable_length() > 0 && (Verbose || WizardMode)) print_vtable(start_of_vtable(), vtable_length(), st);
st->print(BULLET"itable length %d (start addr: " INTPTR_FORMAT ")", itable_length(), start_of_itable()); st->cr();
+ if (itable_length() > 0 && (Verbose || WizardMode)) print_vtable(start_of_itable(), itable_length(), st);
st->print_cr(BULLET"---- static fields (%d words):", static_field_size());
FieldPrinter print_static_field(st);
((InstanceKlass*)this)->do_local_static_fields(&print_static_field);
@@ -2921,6 +2830,7 @@ void InstanceKlass::print_on(outputStream* st) const {
void InstanceKlass::print_value_on(outputStream* st) const {
assert(is_klass(), "must be klass");
+ if (Verbose || WizardMode) access_flags().print_on(st);
name()->print_value_on(st);
}
@@ -3057,7 +2967,6 @@ void InstanceKlass::collect_statistics(KlassSizeStats *sz) const {
n += (sz->_method_ordering_bytes = sz->count_array(method_ordering()));
n += (sz->_local_interfaces_bytes = sz->count_array(local_interfaces()));
n += (sz->_transitive_interfaces_bytes = sz->count_array(transitive_interfaces()));
- n += (sz->_signers_bytes = sz->count_array(signers()));
n += (sz->_fields_bytes = sz->count_array(fields()));
n += (sz->_inner_classes_bytes = sz->count_array(inner_classes()));
sz->_ro_bytes += n;
@@ -3102,27 +3011,26 @@ class VerifyFieldClosure: public OopClosure {
virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
};
-void InstanceKlass::verify_on(outputStream* st) {
- Klass::verify_on(st);
- Thread *thread = Thread::current();
-
+void InstanceKlass::verify_on(outputStream* st, bool check_dictionary) {
#ifndef PRODUCT
- // Avoid redundant verifies
+ // Avoid redundant verifies, this really should be in product.
if (_verify_count == Universe::verify_count()) return;
_verify_count = Universe::verify_count();
#endif
- // Verify that klass is present in SystemDictionary
- if (is_loaded() && !is_anonymous()) {
+
+ // Verify Klass
+ Klass::verify_on(st, check_dictionary);
+
+ // Verify that klass is present in SystemDictionary if not already
+ // verifying the SystemDictionary.
+ if (is_loaded() && !is_anonymous() && check_dictionary) {
Symbol* h_name = name();
SystemDictionary::verify_obj_klass_present(h_name, class_loader_data());
}
- // Verify static fields
- VerifyFieldClosure blk;
-
// Verify vtables
if (is_linked()) {
- ResourceMark rm(thread);
+ ResourceMark rm;
// $$$ This used to be done only for m/s collections. Doing it
// always seemed a valid generalization. (DLD -- 6/00)
vtable()->verify(st);
@@ -3130,7 +3038,6 @@ void InstanceKlass::verify_on(outputStream* st) {
// Verify first subklass
if (subklass_oop() != NULL) {
- guarantee(subklass_oop()->is_metadata(), "should be in metaspace");
guarantee(subklass_oop()->is_klass(), "should be klass");
}
@@ -3142,7 +3049,6 @@ void InstanceKlass::verify_on(outputStream* st) {
fatal(err_msg("subclass points to itself " PTR_FORMAT, sib));
}
- guarantee(sib->is_metadata(), "should be in metaspace");
guarantee(sib->is_klass(), "should be klass");
guarantee(sib->super() == super, "siblings should have same superklass");
}
@@ -3178,7 +3084,6 @@ void InstanceKlass::verify_on(outputStream* st) {
if (methods() != NULL) {
Array<Method*>* methods = this->methods();
for (int j = 0; j < methods->length(); j++) {
- guarantee(methods->at(j)->is_metadata(), "should be in metaspace");
guarantee(methods->at(j)->is_method(), "non-method in methods array");
}
for (int j = 0; j < methods->length() - 1; j++) {
@@ -3216,24 +3121,15 @@ void InstanceKlass::verify_on(outputStream* st) {
// Verify other fields
if (array_klasses() != NULL) {
- guarantee(array_klasses()->is_metadata(), "should be in metaspace");
guarantee(array_klasses()->is_klass(), "should be klass");
}
if (constants() != NULL) {
- guarantee(constants()->is_metadata(), "should be in metaspace");
guarantee(constants()->is_constantPool(), "should be constant pool");
}
- if (protection_domain() != NULL) {
- guarantee(protection_domain()->is_oop(), "should be oop");
- }
const Klass* host = host_klass();
if (host != NULL) {
- guarantee(host->is_metadata(), "should be in metaspace");
guarantee(host->is_klass(), "should be klass");
}
- if (signers() != NULL) {
- guarantee(signers()->is_objArray(), "should be obj array");
- }
}
void InstanceKlass::oop_verify_on(oop obj, outputStream* st) {
@@ -3431,34 +3327,34 @@ void InstanceKlass::add_previous_version(instanceKlassHandle ikh,
Array<Method*>* old_methods = ikh->methods();
if (cp_ref->on_stack()) {
- PreviousVersionNode * pv_node = NULL;
- if (emcp_method_count == 0) {
+ PreviousVersionNode * pv_node = NULL;
+ if (emcp_method_count == 0) {
// non-shared ConstantPool gets a reference
- pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), NULL);
- RC_TRACE(0x00000400,
- ("add: all methods are obsolete; flushing any EMCP refs"));
- } else {
- int local_count = 0;
+ pv_node = new PreviousVersionNode(cp_ref, NULL);
+ RC_TRACE(0x00000400,
+ ("add: all methods are obsolete; flushing any EMCP refs"));
+ } else {
+ int local_count = 0;
GrowableArray<Method*>* method_refs = new (ResourceObj::C_HEAP, mtClass)
- GrowableArray<Method*>(emcp_method_count, true);
- for (int i = 0; i < old_methods->length(); i++) {
- if (emcp_methods->at(i)) {
- // this old method is EMCP. Save it only if it's on the stack
- Method* old_method = old_methods->at(i);
- if (old_method->on_stack()) {
- method_refs->append(old_method);
+ GrowableArray<Method*>(emcp_method_count, true);
+ for (int i = 0; i < old_methods->length(); i++) {
+ if (emcp_methods->at(i)) {
+ // this old method is EMCP. Save it only if it's on the stack
+ Method* old_method = old_methods->at(i);
+ if (old_method->on_stack()) {
+ method_refs->append(old_method);
+ }
+ if (++local_count >= emcp_method_count) {
+ // no more EMCP methods so bail out now
+ break;
}
- if (++local_count >= emcp_method_count) {
- // no more EMCP methods so bail out now
- break;
}
}
- }
// non-shared ConstantPool gets a reference
- pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), method_refs);
+ pv_node = new PreviousVersionNode(cp_ref, method_refs);
}
// append new previous version.
- _previous_versions->append(pv_node);
+ _previous_versions->append(pv_node);
}
// Since the caller is the VMThread and we are at a safepoint, this
@@ -3559,18 +3455,27 @@ Method* InstanceKlass::method_with_idnum(int idnum) {
return m;
}
}
+ // None found, return null for the caller to handle.
+ return NULL;
}
return m;
}
+jint InstanceKlass::get_cached_class_file_len() {
+ return VM_RedefineClasses::get_cached_class_file_len(_cached_class_file);
+}
+
+unsigned char * InstanceKlass::get_cached_class_file_bytes() {
+ return VM_RedefineClasses::get_cached_class_file_bytes(_cached_class_file);
+}
+
// Construct a PreviousVersionNode entry for the array hung off
// the InstanceKlass.
PreviousVersionNode::PreviousVersionNode(ConstantPool* prev_constant_pool,
- bool prev_cp_is_weak, GrowableArray<Method*>* prev_EMCP_methods) {
+ GrowableArray<Method*>* prev_EMCP_methods) {
_prev_constant_pool = prev_constant_pool;
- _prev_cp_is_weak = prev_cp_is_weak;
_prev_EMCP_methods = prev_EMCP_methods;
}
@@ -3586,99 +3491,38 @@ PreviousVersionNode::~PreviousVersionNode() {
}
}
-
-// Construct a PreviousVersionInfo entry
-PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
- _prev_constant_pool_handle = constantPoolHandle(); // NULL handle
- _prev_EMCP_method_handles = NULL;
-
- ConstantPool* cp = pv_node->prev_constant_pool();
- assert(cp != NULL, "constant pool ref was unexpectedly cleared");
- if (cp == NULL) {
- return; // robustness
- }
-
- // make the ConstantPool* safe to return
- _prev_constant_pool_handle = constantPoolHandle(cp);
-
- GrowableArray<Method*>* method_refs = pv_node->prev_EMCP_methods();
- if (method_refs == NULL) {
- // the InstanceKlass did not have any EMCP methods
- return;
- }
-
- _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
-
- int n_methods = method_refs->length();
- for (int i = 0; i < n_methods; i++) {
- Method* method = method_refs->at(i);
- assert (method != NULL, "method has been cleared");
- if (method == NULL) {
- continue; // robustness
- }
- // make the Method* safe to return
- _prev_EMCP_method_handles->append(methodHandle(method));
- }
-}
-
-
-// Destroy a PreviousVersionInfo
-PreviousVersionInfo::~PreviousVersionInfo() {
- // Since _prev_EMCP_method_handles is not C-heap allocated, we
- // don't have to delete it.
-}
-
-
// Construct a helper for walking the previous versions array
-PreviousVersionWalker::PreviousVersionWalker(InstanceKlass *ik) {
+PreviousVersionWalker::PreviousVersionWalker(Thread* thread, InstanceKlass *ik) {
+ _thread = thread;
_previous_versions = ik->previous_versions();
_current_index = 0;
- // _hm needs no initialization
_current_p = NULL;
-}
-
-
-// Destroy a PreviousVersionWalker
-PreviousVersionWalker::~PreviousVersionWalker() {
- // Delete the current info just in case the caller didn't walk to
- // the end of the previous versions list. No harm if _current_p is
- // already NULL.
- delete _current_p;
-
- // When _hm is destroyed, all the Handles returned in
- // PreviousVersionInfo objects will be destroyed.
- // Also, after this destructor is finished it will be
- // safe to delete the GrowableArray allocated in the
- // PreviousVersionInfo objects.
+ _current_constant_pool_handle = constantPoolHandle(thread, ik->constants());
}
// Return the interesting information for the next previous version
// of the klass. Returns NULL if there are no more previous versions.
-PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
+PreviousVersionNode* PreviousVersionWalker::next_previous_version() {
if (_previous_versions == NULL) {
// no previous versions so nothing to return
return NULL;
}
- delete _current_p; // cleanup the previous info for the caller
- _current_p = NULL; // reset to NULL so we don't delete same object twice
+ _current_p = NULL; // reset to NULL
+ _current_constant_pool_handle = NULL;
int length = _previous_versions->length();
while (_current_index < length) {
PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
- PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP, mtClass)
- PreviousVersionInfo(pv_node);
-
- constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
- assert (!cp_h.is_null(), "null cp found in previous version");
- // The caller will need to delete pv_info when they are done with it.
- _current_p = pv_info;
- return pv_info;
+ // Save a handle to the constant pool for this previous version,
+ // which keeps all the methods from being deallocated.
+ _current_constant_pool_handle = constantPoolHandle(_thread, pv_node->prev_constant_pool());
+ _current_p = pv_node;
+ return pv_node;
}
- // all of the underlying nodes' info has been deleted
return NULL;
} // end next_previous_version()
diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKlass.hpp
index 55b8ff48c..c3745ee24 100644
--- a/src/share/vm/oops/instanceKlass.hpp
+++ b/src/share/vm/oops/instanceKlass.hpp
@@ -26,6 +26,7 @@
#define SHARE_VM_OOPS_INSTANCEKLASS_HPP
#include "classfile/classLoaderData.hpp"
+#include "memory/referenceType.hpp"
#include "oops/annotations.hpp"
#include "oops/constMethod.hpp"
#include "oops/fieldInfo.hpp"
@@ -37,6 +38,7 @@
#include "utilities/accessFlags.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/macros.hpp"
+#include "trace/traceMacros.hpp"
// An InstanceKlass is the VM level representation of a Java class.
// It contains all information needed for at class at execution runtime.
@@ -58,8 +60,6 @@
// [fields ]
// [constants ]
// [class loader ]
-// [protection domain ]
-// [signers ]
// [source file name ]
// [inner classes ]
// [static field size ]
@@ -133,6 +133,8 @@ class OopMapBlock VALUE_OBJ_CLASS_SPEC {
uint _count;
};
+struct JvmtiCachedClassFileData;
+
class InstanceKlass: public Klass {
friend class VMStructs;
friend class ClassFileParser;
@@ -180,16 +182,6 @@ class InstanceKlass: public Klass {
static volatile int _total_instanceKlass_count;
protected:
- // Protection domain.
- oop _protection_domain;
- // Class signers.
- objArrayOop _signers;
- // Lock for (1) initialization; (2) access to the ConstantPool of this class.
- // Must be one per class and it has to be a VM internal object so java code
- // cannot lock it (like the mirror).
- // It has to be an object not a Mutex because it's held through java calls.
- volatile oop _init_lock;
-
// Annotations for this class
Annotations* _annotations;
// Array classes holding elements of this class.
@@ -209,14 +201,10 @@ class InstanceKlass: public Klass {
// number_of_inner_classes * 4 + enclosing_method_attribute_size.
Array<jushort>* _inner_classes;
- // Name of source file containing this klass, NULL if not specified.
- Symbol* _source_file_name;
// the source debug extension for this klass, NULL if not specified.
// Specified as UTF-8 string without terminating zero byte in the classfile,
// it is stored in the instanceklass as a NULL-terminated UTF-8 string
char* _source_debug_extension;
- // Generic signature, or null if none.
- Symbol* _generic_signature;
// Array name derived from this class which needs unreferencing
// if this class is unloaded.
Symbol* _array_name;
@@ -225,6 +213,12 @@ class InstanceKlass: public Klass {
// (including inherited fields but after header_size()).
int _nonstatic_field_size;
int _static_field_size; // number words used by static fields (oop and non-oop) in this klass
+ // Constant pool index to the utf8 entry of the Generic signature,
+ // or 0 if none.
+ u2 _generic_signature_index;
+ // Constant pool index to the utf8 entry for the name of source file
+ // containing this klass, 0 if not specified.
+ u2 _source_file_name_index;
u2 _static_oop_field_count;// number of static oop fields in this klass
u2 _java_fields_count; // The number of declared Java fields
int _nonstatic_oop_map_size;// size in words of nonstatic oop map blocks
@@ -251,7 +245,6 @@ class InstanceKlass: public Klass {
MemberNameTable* _member_names; // Member names
JNIid* _jni_ids; // First JNI identifier for static fields in this class
jmethodID* _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or NULL if none
- int* _methods_cached_itable_indices; // itable_index cache for JNI invoke corresponding to methods idnum, or NULL
nmethodBucket* _dependencies; // list of dependent nmethods
nmethod* _osr_nmethods_head; // Head of list of on-stack replacement nmethods for this class
BreakpointInfo* _breakpoints; // bpt lists, managed by Method*
@@ -259,8 +252,8 @@ class InstanceKlass: public Klass {
// InstanceKlass. See PreviousVersionWalker below.
GrowableArray<PreviousVersionNode *>* _previous_versions;
// JVMTI fields can be moved to their own structure - see 6315920
- unsigned char * _cached_class_file_bytes; // JVMTI: cached class file, before retransformable agent modified it in CFLH
- jint _cached_class_file_len; // JVMTI: length of above
+ // JVMTI: cached class file, before retransformable agent modified it in CFLH
+ JvmtiCachedClassFileData* _cached_class_file;
volatile u2 _idnum_allocated_count; // JNI/JVMTI: increments with the addition of methods, old ids don't change
@@ -527,8 +520,10 @@ class InstanceKlass: public Klass {
void set_constants(ConstantPool* c) { _constants = c; }
// protection domain
- oop protection_domain() { return _protection_domain; }
- void set_protection_domain(oop pd) { klass_oop_store(&_protection_domain, pd); }
+ oop protection_domain() const;
+
+ // signers
+ objArrayOop signers() const;
// host class
Klass* host_klass() const {
@@ -575,13 +570,17 @@ class InstanceKlass: public Klass {
}
}
- // signers
- objArrayOop signers() const { return _signers; }
- void set_signers(objArrayOop s) { klass_oop_store((oop*)&_signers, s); }
-
// source file name
- Symbol* source_file_name() const { return _source_file_name; }
- void set_source_file_name(Symbol* n);
+ Symbol* source_file_name() const {
+ return (_source_file_name_index == 0) ?
+ (Symbol*)NULL : _constants->symbol_at(_source_file_name_index);
+ }
+ u2 source_file_name_index() const {
+ return _source_file_name_index;
+ }
+ void set_source_file_name_index(u2 sourcefile_index) {
+ _source_file_name_index = sourcefile_index;
+ }
// minor and major version numbers of class file
u2 minor_version() const { return _minor_version; }
@@ -627,11 +626,12 @@ class InstanceKlass: public Klass {
static void purge_previous_versions(InstanceKlass* ik);
// JVMTI: Support for caching a class file before it is modified by an agent that can do retransformation
- void set_cached_class_file(unsigned char *class_file_bytes,
- jint class_file_len) { _cached_class_file_len = class_file_len;
- _cached_class_file_bytes = class_file_bytes; }
- jint get_cached_class_file_len() { return _cached_class_file_len; }
- unsigned char * get_cached_class_file_bytes() { return _cached_class_file_bytes; }
+ void set_cached_class_file(JvmtiCachedClassFileData *data) {
+ _cached_class_file = data;
+ }
+ JvmtiCachedClassFileData * get_cached_class_file() { return _cached_class_file; }
+ jint get_cached_class_file_len();
+ unsigned char * get_cached_class_file_bytes();
// JVMTI: Support for caching of field indices, types, and offsets
void set_jvmti_cached_class_field_map(JvmtiCachedClassFieldMap* descriptor) {
@@ -657,8 +657,16 @@ class InstanceKlass: public Klass {
void set_initial_method_idnum(u2 value) { _idnum_allocated_count = value; }
// generics support
- Symbol* generic_signature() const { return _generic_signature; }
- void set_generic_signature(Symbol* sig) { _generic_signature = sig; }
+ Symbol* generic_signature() const {
+ return (_generic_signature_index == 0) ?
+ (Symbol*)NULL : _constants->symbol_at(_generic_signature_index);
+ }
+ u2 generic_signature_index() const {
+ return _generic_signature_index;
+ }
+ void set_generic_signature_index(u2 sig_index) {
+ _generic_signature_index = sig_index;
+ }
u2 enclosing_method_data(int offset);
u2 enclosing_method_class_index() {
@@ -681,10 +689,6 @@ class InstanceKlass: public Klass {
size_t *length_p, jmethodID* id_p);
jmethodID jmethod_id_or_null(Method* method);
- // cached itable index support
- void set_cached_itable_index(size_t idnum, int index);
- int cached_itable_index(size_t idnum);
-
// annotations support
Annotations* annotations() const { return _annotations; }
void set_annotations(Annotations* anno) { _annotations = anno; }
@@ -739,7 +743,7 @@ class InstanceKlass: public Klass {
void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; };
void add_osr_nmethod(nmethod* n);
void remove_osr_nmethod(nmethod* n);
- nmethod* lookup_osr_nmethod(Method* const m, int bci, int level, bool match_level) const;
+ nmethod* lookup_osr_nmethod(const Method* m, int bci, int level, bool match_level) const;
// Breakpoint support (see methods on Method* for details)
BreakpointInfo* breakpoints() const { return _breakpoints; };
@@ -806,7 +810,6 @@ class InstanceKlass: public Klass {
void methods_do(void f(Method* method));
void array_klasses_do(void f(Klass* k));
void array_klasses_do(void f(Klass* k, TRAPS), TRAPS);
- void with_array_klasses_do(void f(Klass* k));
bool super_types_do(SuperTypeClosure* blk);
// Casting from Klass*
@@ -886,10 +889,6 @@ class InstanceKlass: public Klass {
}
}
- // Allocation profiling support
- juint alloc_size() const { return _alloc_count * size_helper(); }
- void set_alloc_size(juint n) {}
-
// Use this to return the size of an instance in heap words:
int size_helper() const {
return layout_helper_to_size_helper(layout_helper());
@@ -912,8 +911,6 @@ class InstanceKlass: public Klass {
Method* method_at_itable(Klass* holder, int index, TRAPS);
// Garbage collection
- virtual void oops_do(OopClosure* cl);
-
void oop_follow_contents(oop obj);
int oop_adjust_pointers(oop obj);
@@ -992,21 +989,14 @@ private:
void release_set_methods_jmethod_ids(jmethodID* jmeths)
{ OrderAccess::release_store_ptr(&_methods_jmethod_ids, jmeths); }
- int* methods_cached_itable_indices_acquire() const
- { return (int*)OrderAccess::load_ptr_acquire(&_methods_cached_itable_indices); }
- void release_set_methods_cached_itable_indices(int* indices)
- { OrderAccess::release_store_ptr(&_methods_cached_itable_indices, indices); }
-
// Lock during initialization
public:
- volatile oop init_lock() const {return _init_lock; }
+ // Lock for (1) initialization; (2) access to the ConstantPool of this class.
+ // Must be one per class and it has to be a VM internal object so java code
+ // cannot lock it (like the mirror).
+ // It has to be an object not a Mutex because it's held through java calls.
+ volatile oop init_lock() const;
private:
- void set_init_lock(oop value) { klass_oop_store(&_init_lock, value); }
-
- // Offsets for memory management
- oop* adr_protection_domain() const { return (oop*)&this->_protection_domain;}
- oop* adr_signers() const { return (oop*)&this->_signers;}
- oop* adr_init_lock() const { return (oop*)&this->_init_lock;}
// Static methods that are used to implement member methods where an exposed this pointer
// is needed due to possible GCs
@@ -1040,7 +1030,8 @@ public:
// JSR-292 support
MemberNameTable* member_names() { return _member_names; }
void set_member_names(MemberNameTable* member_names) { _member_names = member_names; }
- void add_member_name(Handle member_name);
+ void add_member_name(int index, Handle member_name);
+ oop get_member_name(int index);
public:
// JVMTI support
@@ -1065,7 +1056,7 @@ public:
const char* internal_name() const;
// Verification
- void verify_on(outputStream* st);
+ void verify_on(outputStream* st, bool check_dictionary);
void oop_verify_on(oop obj, outputStream* st);
};
@@ -1135,21 +1126,11 @@ class BreakpointInfo;
// A collection point for interesting information about the previous
-// version(s) of an InstanceKlass. This class uses weak references to
-// the information so that the information may be collected as needed
-// by the system. If the information is shared, then a regular
-// reference must be used because a weak reference would be seen as
-// collectible. A GrowableArray of PreviousVersionNodes is attached
-// to the InstanceKlass as needed. See PreviousVersionWalker below.
+// version(s) of an InstanceKlass. A GrowableArray of PreviousVersionNodes
+// is attached to the InstanceKlass as needed. See PreviousVersionWalker below.
class PreviousVersionNode : public CHeapObj<mtClass> {
private:
- // A shared ConstantPool is never collected so we'll always have
- // a reference to it so we can update items in the cache. We'll
- // have a weak reference to a non-shared ConstantPool until all
- // of the methods (EMCP or obsolete) have been collected; the
- // non-shared ConstantPool becomes collectible at that point.
- ConstantPool* _prev_constant_pool; // regular or weak reference
- bool _prev_cp_is_weak; // true if not a shared ConstantPool
+ ConstantPool* _prev_constant_pool;
// If the previous version of the InstanceKlass doesn't have any
// EMCP methods, then _prev_EMCP_methods will be NULL. If all the
@@ -1158,8 +1139,8 @@ class PreviousVersionNode : public CHeapObj<mtClass> {
GrowableArray<Method*>* _prev_EMCP_methods;
public:
- PreviousVersionNode(ConstantPool* prev_constant_pool, bool prev_cp_is_weak,
- GrowableArray<Method*>* prev_EMCP_methods);
+ PreviousVersionNode(ConstantPool* prev_constant_pool,
+ GrowableArray<Method*>* prev_EMCP_methods);
~PreviousVersionNode();
ConstantPool* prev_constant_pool() const {
return _prev_constant_pool;
@@ -1170,59 +1151,26 @@ public:
};
-// A Handle-ized version of PreviousVersionNode.
-class PreviousVersionInfo : public ResourceObj {
- private:
- constantPoolHandle _prev_constant_pool_handle;
- // If the previous version of the InstanceKlass doesn't have any
- // EMCP methods, then _prev_EMCP_methods will be NULL. Since the
- // methods cannot be collected while we hold a handle,
- // _prev_EMCP_methods should never have a length of zero.
- GrowableArray<methodHandle>* _prev_EMCP_method_handles;
-
-public:
- PreviousVersionInfo(PreviousVersionNode *pv_node);
- ~PreviousVersionInfo();
- constantPoolHandle prev_constant_pool_handle() const {
- return _prev_constant_pool_handle;
- }
- GrowableArray<methodHandle>* prev_EMCP_method_handles() const {
- return _prev_EMCP_method_handles;
- }
-};
-
-
-// Helper object for walking previous versions. This helper cleans up
-// the Handles that it allocates when the helper object is destroyed.
-// The PreviousVersionInfo object returned by next_previous_version()
-// is only valid until a subsequent call to next_previous_version() or
-// the helper object is destroyed.
+// Helper object for walking previous versions.
class PreviousVersionWalker : public StackObj {
private:
+ Thread* _thread;
GrowableArray<PreviousVersionNode *>* _previous_versions;
int _current_index;
- // Fields for cleaning up when we are done walking the previous versions:
- // A HandleMark for the PreviousVersionInfo handles:
- HandleMark _hm;
- // It would be nice to have a ResourceMark field in this helper also,
- // but the ResourceMark code says to be careful to delete handles held
- // in GrowableArrays _before_ deleting the GrowableArray. Since we
- // can't guarantee the order in which the fields are destroyed, we
- // have to let the creator of the PreviousVersionWalker object do
- // the right thing. Also, adding a ResourceMark here causes an
- // include loop.
+ // A pointer to the current node object so we can handle the deletes.
+ PreviousVersionNode* _current_p;
- // A pointer to the current info object so we can handle the deletes.
- PreviousVersionInfo * _current_p;
+ // The constant pool handle keeps all the methods in this class from being
+ // deallocated from the metaspace during class unloading.
+ constantPoolHandle _current_constant_pool_handle;
public:
- PreviousVersionWalker(InstanceKlass *ik);
- ~PreviousVersionWalker();
+ PreviousVersionWalker(Thread* thread, InstanceKlass *ik);
// Return the interesting information for the next previous version
// of the klass. Returns NULL if there are no more previous versions.
- PreviousVersionInfo* next_previous_version();
+ PreviousVersionNode* next_previous_version();
};
diff --git a/src/share/vm/oops/instanceOop.hpp b/src/share/vm/oops/instanceOop.hpp
index 9ebb9d472..bdac1992e 100644
--- a/src/share/vm/oops/instanceOop.hpp
+++ b/src/share/vm/oops/instanceOop.hpp
@@ -37,9 +37,9 @@ class instanceOopDesc : public oopDesc {
// If compressed, the offset of the fields of the instance may not be aligned.
static int base_offset_in_bytes() {
- // offset computation code breaks if UseCompressedKlassPointers
+ // offset computation code breaks if UseCompressedClassPointers
// only is true
- return (UseCompressedOops && UseCompressedKlassPointers) ?
+ return (UseCompressedOops && UseCompressedClassPointers) ?
klass_gap_offset_in_bytes() :
sizeof(instanceOopDesc);
}
diff --git a/src/share/vm/oops/klass.cpp b/src/share/vm/oops/klass.cpp
index 06c644c34..22b570bbf 100644
--- a/src/share/vm/oops/klass.cpp
+++ b/src/share/vm/oops/klass.cpp
@@ -37,6 +37,7 @@
#include "oops/klass.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "runtime/atomic.hpp"
+#include "trace/traceMacros.hpp"
#include "utilities/stack.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
@@ -50,7 +51,7 @@ void Klass::set_name(Symbol* n) {
if (_name != NULL) _name->increment_refcount();
}
-bool Klass::is_subclass_of(Klass* k) const {
+bool Klass::is_subclass_of(const Klass* k) const {
// Run up the super chain and check
if (this == k) return true;
@@ -138,9 +139,9 @@ Method* Klass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
return NULL;
}
-void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) {
+void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw() {
return Metaspace::allocate(loader_data, word_size, /*read_only*/false,
- Metaspace::ClassType, CHECK_NULL);
+ MetaspaceObj::ClassType, CHECK_NULL);
}
Klass::Klass() {
@@ -167,8 +168,7 @@ Klass::Klass() {
set_subklass(NULL);
set_next_sibling(NULL);
set_next_link(NULL);
- set_alloc_count(0);
- TRACE_SET_KLASS_TRACE_ID(this, 0);
+ TRACE_INIT_ID(this);
set_prototype_header(markOopDesc::prototype());
set_biased_lock_revocation_count(0);
@@ -376,7 +376,6 @@ void Klass::append_to_sibling_list() {
}
bool Klass::is_loader_alive(BoolObjectClosure* is_alive) {
- assert(is_metadata(), "p is not meta-data");
assert(ClassLoaderDataGraph::contains((address)this), "is in the metaspace");
#ifdef ASSERT
@@ -511,8 +510,9 @@ void Klass::restore_unshareable_info(TRAPS) {
// (same order as class file parsing)
loader_data->add_class(this);
- // Recreate the class mirror
- java_lang_Class::create_mirror(this, CHECK);
+ // Recreate the class mirror. The protection_domain is always null for
+ // boot loader, for now.
+ java_lang_Class::create_mirror(this, Handle(NULL), CHECK);
}
Klass* Klass::array_klass_or_null(int rank) {
@@ -542,12 +542,6 @@ Klass* Klass::array_klass_impl(bool or_null, TRAPS) {
return NULL;
}
-
-void Klass::with_array_klasses_do(void f(Klass* k)) {
- f(this);
-}
-
-
oop Klass::class_loader() const { return class_loader_data()->class_loader(); }
const char* Klass::external_name() const {
@@ -646,27 +640,24 @@ void Klass::collect_statistics(KlassSizeStats *sz) const {
// Verification
-void Klass::verify_on(outputStream* st) {
- guarantee(!Universe::heap()->is_in_reserved(this), "Shouldn't be");
- guarantee(this->is_metadata(), "should be in metaspace");
+void Klass::verify_on(outputStream* st, bool check_dictionary) {
+ // This can be expensive, but it is worth checking that this klass is actually
+ // in the CLD graph but not in production.
assert(ClassLoaderDataGraph::contains((address)this), "Should be");
guarantee(this->is_klass(),"should be klass");
if (super() != NULL) {
- guarantee(super()->is_metadata(), "should be in metaspace");
guarantee(super()->is_klass(), "should be klass");
}
if (secondary_super_cache() != NULL) {
Klass* ko = secondary_super_cache();
- guarantee(ko->is_metadata(), "should be in metaspace");
guarantee(ko->is_klass(), "should be klass");
}
for ( uint i = 0; i < primary_super_limit(); i++ ) {
Klass* ko = _primary_supers[i];
if (ko != NULL) {
- guarantee(ko->is_metadata(), "should be in metaspace");
guarantee(ko->is_klass(), "should be klass");
}
}
@@ -678,19 +669,28 @@ void Klass::verify_on(outputStream* st) {
void Klass::oop_verify_on(oop obj, outputStream* st) {
guarantee(obj->is_oop(), "should be oop");
- guarantee(obj->klass()->is_metadata(), "should not be in Java heap");
guarantee(obj->klass()->is_klass(), "klass field is not a klass");
}
#ifndef PRODUCT
-void Klass::verify_vtable_index(int i) {
+bool Klass::verify_vtable_index(int i) {
if (oop_is_instance()) {
- assert(i>=0 && i<((InstanceKlass*)this)->vtable_length()/vtableEntry::size(), "index out of bounds");
+ int limit = ((InstanceKlass*)this)->vtable_length()/vtableEntry::size();
+ assert(i >= 0 && i < limit, err_msg("index %d out of bounds %d", i, limit));
} else {
assert(oop_is_array(), "Must be");
- assert(i>=0 && i<((ArrayKlass*)this)->vtable_length()/vtableEntry::size(), "index out of bounds");
+ int limit = ((ArrayKlass*)this)->vtable_length()/vtableEntry::size();
+ assert(i >= 0 && i < limit, err_msg("index %d out of bounds %d", i, limit));
}
+ return true;
+}
+
+bool Klass::verify_itable_index(int i) {
+ assert(oop_is_instance(), "");
+ int method_count = klassItable::method_count_for_interface(this);
+ assert(i >= 0 && i < method_count, "index out of bounds");
+ return true;
}
#endif
diff --git a/src/share/vm/oops/klass.hpp b/src/share/vm/oops/klass.hpp
index d2a419146..9855fdd32 100644
--- a/src/share/vm/oops/klass.hpp
+++ b/src/share/vm/oops/klass.hpp
@@ -79,7 +79,6 @@
// [last_biased_lock_bulk_revocation_time] (64 bits)
// [prototype_header]
// [biased_lock_revocation_count]
-// [alloc_count ]
// [_modified_oops]
// [_accumulated_modified_oops]
// [trace_id]
@@ -171,8 +170,6 @@ class Klass : public Metadata {
markOop _prototype_header; // Used when biased locking is both enabled and disabled for this type
jint _biased_lock_revocation_count;
- juint _alloc_count; // allocation profiling support
-
TRACE_DEFINE_KLASS_TRACE_ID;
// Remembered sets support for the oops in the klasses.
@@ -182,7 +179,7 @@ class Klass : public Metadata {
// Constructor
Klass();
- void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS);
+ void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw();
public:
bool is_klass() const volatile { return true; }
@@ -290,11 +287,6 @@ class Klass : public Metadata {
void set_next_sibling(Klass* s);
public:
- // Allocation profiling support
- juint alloc_count() const { return _alloc_count; }
- void set_alloc_count(juint n) { _alloc_count = n; }
- virtual juint alloc_size() const = 0;
- virtual void set_alloc_size(juint n) = 0;
// Compiler support
static ByteSize super_offset() { return in_ByteSize(offset_of(Klass, _super)); }
@@ -360,7 +352,8 @@ class Klass : public Metadata {
static int layout_helper_log2_element_size(jint lh) {
assert(lh < (jint)_lh_neutral_value, "must be array");
int l2esz = (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
- assert(l2esz <= LogBitsPerLong, "sanity");
+ assert(l2esz <= LogBitsPerLong,
+ err_msg("sanity. l2esz: 0x%x for lh: 0x%x", (uint)l2esz, (uint)lh));
return l2esz;
}
static jint array_layout_helper(jint tag, int hsize, BasicType etype, int log2_esize) {
@@ -393,9 +386,10 @@ class Klass : public Metadata {
// vtables
virtual klassVtable* vtable() const { return NULL; }
+ virtual int vtable_length() const { return 0; }
// subclass check
- bool is_subclass_of(Klass* k) const;
+ bool is_subclass_of(const Klass* k) const;
// subtype check: true if is_subclass_of, or if k is interface and receiver implements it
bool is_subtype_of(Klass* k) const {
juint off = k->super_check_offset();
@@ -444,7 +438,7 @@ class Klass : public Metadata {
Klass* array_klass_or_null(int rank);
Klass* array_klass_or_null();
- virtual oop protection_domain() { return NULL; }
+ virtual oop protection_domain() const = 0;
oop class_loader() const;
@@ -676,7 +670,6 @@ class Klass : public Metadata {
#endif // INCLUDE_ALL_GCS
virtual void array_klasses_do(void f(Klass* k)) {}
- virtual void with_array_klasses_do(void f(Klass* k));
// Return self, except for abstract classes with exactly 1
// implementor. Then return the 1 concrete implementation.
@@ -702,15 +695,26 @@ class Klass : public Metadata {
virtual const char* internal_name() const = 0;
// Verification
- virtual void verify_on(outputStream* st);
- void verify() { verify_on(tty); }
+ virtual void verify_on(outputStream* st, bool check_dictionary);
+ void verify(bool check_dictionary = true) { verify_on(tty, check_dictionary); }
#ifndef PRODUCT
- void verify_vtable_index(int index);
+ bool verify_vtable_index(int index);
+ bool verify_itable_index(int index);
#endif
virtual void oop_verify_on(oop obj, outputStream* st);
+ static bool is_null(narrowKlass obj);
+ static bool is_null(Klass* obj);
+
+ // klass encoding for klass pointer in objects.
+ static narrowKlass encode_klass_not_null(Klass* v);
+ static narrowKlass encode_klass(Klass* v);
+
+ static Klass* decode_klass_not_null(narrowKlass v);
+ static Klass* decode_klass(narrowKlass v);
+
private:
// barriers used by klass_oop_store
void klass_update_barrier_set(oop v);
diff --git a/src/share/vm/oops/klass.inline.hpp b/src/share/vm/oops/klass.inline.hpp
index 3eb62afe8..841a4873a 100644
--- a/src/share/vm/oops/klass.inline.hpp
+++ b/src/share/vm/oops/klass.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_OOPS_KLASS_INLINE_HPP
#define SHARE_VM_OOPS_KLASS_INLINE_HPP
+#include "memory/universe.hpp"
#include "oops/klass.hpp"
#include "oops/markOop.hpp"
@@ -33,4 +34,41 @@ inline void Klass::set_prototype_header(markOop header) {
_prototype_header = header;
}
+inline bool Klass::is_null(Klass* obj) { return obj == NULL; }
+inline bool Klass::is_null(narrowKlass obj) { return obj == 0; }
+
+// Encoding and decoding for klass field.
+
+inline bool check_klass_alignment(Klass* obj) {
+ return (intptr_t)obj % KlassAlignmentInBytes == 0;
+}
+
+inline narrowKlass Klass::encode_klass_not_null(Klass* v) {
+ assert(!is_null(v), "klass value can never be zero");
+ assert(check_klass_alignment(v), "Address not aligned");
+ int shift = Universe::narrow_klass_shift();
+ uint64_t pd = (uint64_t)(pointer_delta((void*)v, Universe::narrow_klass_base(), 1));
+ assert(KlassEncodingMetaspaceMax > pd, "change encoding max if new encoding");
+ uint64_t result = pd >> shift;
+ assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow");
+ assert(decode_klass(result) == v, "reversibility");
+ return (narrowKlass)result;
+}
+
+inline narrowKlass Klass::encode_klass(Klass* v) {
+ return is_null(v) ? (narrowKlass)0 : encode_klass_not_null(v);
+}
+
+inline Klass* Klass::decode_klass_not_null(narrowKlass v) {
+ assert(!is_null(v), "narrow klass value can never be zero");
+ int shift = Universe::narrow_klass_shift();
+ Klass* result = (Klass*)(void*)((uintptr_t)Universe::narrow_klass_base() + ((uintptr_t)v << shift));
+ assert(check_klass_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
+ return result;
+}
+
+inline Klass* Klass::decode_klass(narrowKlass v) {
+ return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v);
+}
+
#endif // SHARE_VM_OOPS_KLASS_INLINE_HPP
diff --git a/src/share/vm/oops/klassVtable.cpp b/src/share/vm/oops/klassVtable.cpp
index ebc6e0aea..710536884 100644
--- a/src/share/vm/oops/klassVtable.cpp
+++ b/src/share/vm/oops/klassVtable.cpp
@@ -47,11 +47,12 @@ inline InstanceKlass* klassVtable::ik() const {
// this function computes the vtable size (including the size needed for miranda
-// methods) and the number of miranda methods in this class
+// methods) and the number of miranda methods in this class.
// Note on Miranda methods: Let's say there is a class C that implements
-// interface I. Let's say there is a method m in I that neither C nor any
-// of its super classes implement (i.e there is no method of any access, with
-// the same name and signature as m), then m is a Miranda method which is
+// interface I, and none of C's superclasses implements I.
+// Let's say there is an abstract method m in I that neither C
+// nor any of its super classes implement (i.e there is no method of any access,
+// with the same name and signature as m), then m is a Miranda method which is
// entered as a public abstract method in C's vtable. From then on it should
// treated as any other public method in C for method over-ride purposes.
void klassVtable::compute_vtable_size_and_num_mirandas(
@@ -111,10 +112,13 @@ void klassVtable::compute_vtable_size_and_num_mirandas(
}
int klassVtable::index_of(Method* m, int len) const {
- assert(m->vtable_index() >= 0, "do not ask this of non-vtable methods");
+ assert(m->has_vtable_index(), "do not ask this of non-vtable methods");
return m->vtable_index();
}
+// Copy super class's vtable to the first part (prefix) of this class's vtable,
+// and return the number of entries copied. Expects that 'super' is the Java
+// super class (arrays can have "array" super classes that must be skipped).
int klassVtable::initialize_from_super(KlassHandle super) {
if (super.is_null()) {
return 0;
@@ -139,14 +143,14 @@ int klassVtable::initialize_from_super(KlassHandle super) {
}
}
-// Revised lookup semantics introduced 1.3 (Kestral beta)
+//
+// Revised lookup semantics introduced 1.3 (Kestrel beta)
void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) {
// Note: Arrays can have intermediate array supers. Use java_super to skip them.
KlassHandle super (THREAD, klass()->java_super());
int nofNewEntries = 0;
-
if (PrintVtables && !klass()->oop_is_array()) {
ResourceMark rm(THREAD);
tty->print_cr("Initializing: %s", _klass->name()->as_C_string());
@@ -174,8 +178,10 @@ void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) {
int len = methods->length();
int initialized = super_vtable_len;
- // update_inherited_vtable can stop for gc - ensure using handles
+ // Check each of this class's methods against super;
+ // if override, replace in copy of super vtable, otherwise append to end
for (int i = 0; i < len; i++) {
+ // update_inherited_vtable can stop for gc - ensure using handles
HandleMark hm(THREAD);
assert(methods->at(i)->is_method(), "must be a Method*");
methodHandle mh(THREAD, methods->at(i));
@@ -189,11 +195,11 @@ void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) {
}
}
- // add miranda methods; it will also update the value of initialized
- fill_in_mirandas(&initialized);
+ // add miranda methods to end of vtable.
+ initialized = fill_in_mirandas(initialized);
// In class hierarchies where the accessibility is not increasing (i.e., going from private ->
- // package_private -> publicprotected), the vtable might actually be smaller than our initial
+ // package_private -> public/protected), the vtable might actually be smaller than our initial
// calculation.
assert(initialized <= _length, "vtable initialization failed");
for(;initialized < _length; initialized++) {
@@ -248,14 +254,8 @@ InstanceKlass* klassVtable::find_transitive_override(InstanceKlass* initialsuper
return superk;
}
-// Methods that are "effectively" final don't need vtable entries.
-bool method_is_effectively_final(
- AccessFlags klass_flags, methodHandle target) {
- return target->is_final() || klass_flags.is_final() && !target->is_overpass();
-}
-
// Update child's copy of super vtable for overrides
-// OR return true if a new vtable entry is required
+// OR return true if a new vtable entry is required.
// Only called for InstanceKlass's, i.e. not for arrays
// If that changed, could not use _klass as handle for klass
bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len,
@@ -263,6 +263,7 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
ResourceMark rm;
bool allocate_new = true;
assert(klass->oop_is_instance(), "must be InstanceKlass");
+ assert(klass == target_method()->method_holder(), "caller resp.");
// Initialize the method's vtable index to "nonvirtual".
// If we allocate a vtable entry, we will update it to a non-negative number.
@@ -273,11 +274,17 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
return false;
}
- if (method_is_effectively_final(klass->access_flags(), target_method)) {
+ if (target_method->is_final_method(klass->access_flags())) {
// a final method never needs a new entry; final methods can be statically
// resolved and they have to be present in the vtable only if they override
// a super's method, in which case they re-use its entry
allocate_new = false;
+ } else if (klass->is_interface()) {
+ allocate_new = false; // see note below in needs_new_vtable_entry
+ // An interface never allocates new vtable slots, only inherits old ones.
+ // This method will either be assigned its own itable index later,
+ // or be assigned an inherited vtable index in the loop below.
+ target_method()->set_vtable_index(Method::pending_itable_index);
}
// we need a new entry if there is no superclass
@@ -411,8 +418,14 @@ bool klassVtable::needs_new_vtable_entry(methodHandle target_method,
Symbol* classname,
AccessFlags class_flags,
TRAPS) {
+ if (class_flags.is_interface()) {
+ // Interfaces do not use vtables, so there is no point to assigning
+ // a vtable index to any of their methods. If we refrain from doing this,
+ // we can use Method::_vtable_index to hold the itable index
+ return false;
+ }
- if (method_is_effectively_final(class_flags, target_method) ||
+ if (target_method->is_final_method(class_flags) ||
// a final method never needs a new entry; final methods can be statically
// resolved and they have to be present in the vtable only if they override
// a super's method, in which case they re-use its entry
@@ -500,7 +513,8 @@ int klassVtable::index_of_miranda(Symbol* name, Symbol* signature) {
return Method::invalid_vtable_index;
}
-// check if an entry is miranda
+// check if an entry at an index is miranda
+// requires that method m at entry be declared ("held") by an interface.
bool klassVtable::is_miranda_entry_at(int i) {
Method* m = method_at(i);
Klass* method_holder = m->method_holder();
@@ -516,7 +530,9 @@ bool klassVtable::is_miranda_entry_at(int i) {
return false;
}
-// check if a method is a miranda method, given a class's methods table and it's super
+// check if a method is a miranda method, given a class's methods table and its super
+// "miranda" means not static, not defined by this class, and not defined
+// in super unless it is private and therefore inaccessible to this class.
// the caller must make sure that the method belongs to an interface implemented by the class
bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods, Klass* super) {
if (m->is_static()) {
@@ -541,6 +557,14 @@ bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods, Klass* su
return false;
}
+// Scans current_interface_methods for miranda methods that do not
+// already appear in new_mirandas and are also not defined-and-non-private
+// in super (superclass). These mirandas are added to all_mirandas if it is
+// not null; in addition, those that are not duplicates of miranda methods
+// inherited by super from its interfaces are added to new_mirandas.
+// Thus, new_mirandas will be the set of mirandas that this class introduces,
+// all_mirandas will be the set of all mirandas applicable to this class
+// including all defined in superclasses.
void klassVtable::add_new_mirandas_to_lists(
GrowableArray<Method*>* new_mirandas, GrowableArray<Method*>* all_mirandas,
Array<Method*>* current_interface_methods, Array<Method*>* class_methods,
@@ -599,17 +623,22 @@ void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
}
}
-// fill in mirandas
-void klassVtable::fill_in_mirandas(int* initialized) {
+// Discover miranda methods ("miranda" = "interface abstract, no binding"),
+// and append them into the vtable starting at index initialized,
+// return the new value of initialized.
+int klassVtable::fill_in_mirandas(int initialized) {
GrowableArray<Method*> mirandas(20);
get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
ik()->local_interfaces());
for (int i = 0; i < mirandas.length(); i++) {
- put_method_at(mirandas.at(i), *initialized);
- ++(*initialized);
+ put_method_at(mirandas.at(i), initialized);
+ ++initialized;
}
+ return initialized;
}
+// Copy this class's vtable to the vtable beginning at start.
+// Used to copy superclass vtable to prefix of subclass's vtable.
void klassVtable::copy_vtable_to(vtableEntry* start) {
Copy::disjoint_words((HeapWord*)table(), (HeapWord*)start, _length * vtableEntry::size());
}
@@ -723,6 +752,12 @@ static int initialize_count = 0;
// Initialization
void klassItable::initialize_itable(bool checkconstraints, TRAPS) {
+ if (_klass->is_interface()) {
+ // This needs to go after vtable indexes are assigned but
+ // before implementors need to know the number of itable indexes.
+ assign_itable_indexes_for_interface(_klass());
+ }
+
// Cannot be setup doing bootstrapping, interfaces don't have
// itables, and klass with only ones entry have empty itables
if (Universe::is_bootstrapping() ||
@@ -754,45 +789,89 @@ void klassItable::initialize_itable(bool checkconstraints, TRAPS) {
}
+inline bool interface_method_needs_itable_index(Method* m) {
+ if (m->is_static()) return false; // e.g., Stream.empty
+ if (m->is_initializer()) return false; // <init> or <clinit>
+ // If an interface redeclares a method from java.lang.Object,
+ // it should already have a vtable index, don't touch it.
+ // e.g., CharSequence.toString (from initialize_vtable)
+ // if (m->has_vtable_index()) return false; // NO!
+ return true;
+}
+
+int klassItable::assign_itable_indexes_for_interface(Klass* klass) {
+ // an interface does not have an itable, but its methods need to be numbered
+ if (TraceItables) tty->print_cr("%3d: Initializing itable for interface %s", ++initialize_count,
+ klass->name()->as_C_string());
+ Array<Method*>* methods = InstanceKlass::cast(klass)->methods();
+ int nof_methods = methods->length();
+ int ime_num = 0;
+ for (int i = 0; i < nof_methods; i++) {
+ Method* m = methods->at(i);
+ if (interface_method_needs_itable_index(m)) {
+ assert(!m->is_final_method(), "no final interface methods");
+ // If m is already assigned a vtable index, do not disturb it.
+ if (!m->has_vtable_index()) {
+ assert(m->vtable_index() == Method::pending_itable_index, "set by initialize_vtable");
+ m->set_itable_index(ime_num);
+ // Progress to next itable entry
+ ime_num++;
+ }
+ }
+ }
+ assert(ime_num == method_count_for_interface(klass), "proper sizing");
+ return ime_num;
+}
+
+int klassItable::method_count_for_interface(Klass* interf) {
+ assert(interf->oop_is_instance(), "must be");
+ assert(interf->is_interface(), "must be");
+ Array<Method*>* methods = InstanceKlass::cast(interf)->methods();
+ int nof_methods = methods->length();
+ while (nof_methods > 0) {
+ Method* m = methods->at(nof_methods-1);
+ if (m->has_itable_index()) {
+ int length = m->itable_index() + 1;
+#ifdef ASSERT
+ while (nof_methods = 0) {
+ m = methods->at(--nof_methods);
+ assert(!m->has_itable_index() || m->itable_index() < length, "");
+ }
+#endif //ASSERT
+ return length; // return the rightmost itable index, plus one
+ }
+ nof_methods -= 1;
+ }
+ // no methods have itable indexes
+ return 0;
+}
+
+
void klassItable::initialize_itable_for_interface(int method_table_offset, KlassHandle interf_h, bool checkconstraints, TRAPS) {
Array<Method*>* methods = InstanceKlass::cast(interf_h())->methods();
int nof_methods = methods->length();
HandleMark hm;
- KlassHandle klass = _klass;
assert(nof_methods > 0, "at least one method must exist for interface to be in vtable");
Handle interface_loader (THREAD, InstanceKlass::cast(interf_h())->class_loader());
- int ime_num = 0;
-
- // Skip first Method* if it is a class initializer
- int i = methods->at(0)->is_static_initializer() ? 1 : 0;
- // m, method_name, method_signature, klass reset each loop so they
- // don't need preserving across check_signature_loaders call
- // methods needs a handle in case of gc from check_signature_loaders
- for(; i < nof_methods; i++) {
+ int ime_count = method_count_for_interface(interf_h());
+ for (int i = 0; i < nof_methods; i++) {
Method* m = methods->at(i);
- Symbol* method_name = m->name();
- Symbol* method_signature = m->signature();
-
- // This is same code as in Linkresolver::lookup_instance_method_in_klasses
- Method* target = klass->uncached_lookup_method(method_name, method_signature);
- while (target != NULL && target->is_static()) {
- // continue with recursive lookup through the superclass
- Klass* super = target->method_holder()->super();
- target = (super == NULL) ? (Method*)NULL : super->uncached_lookup_method(method_name, method_signature);
+ methodHandle target;
+ if (m->has_itable_index()) {
+ LinkResolver::lookup_instance_method_in_klasses(target, _klass, m->name(), m->signature(), CHECK);
}
if (target == NULL || !target->is_public() || target->is_abstract()) {
// Entry do not resolve. Leave it empty
} else {
// Entry did resolve, check loader constraints before initializing
// if checkconstraints requested
- methodHandle target_h (THREAD, target); // preserve across gc
if (checkconstraints) {
Handle method_holder_loader (THREAD, target->method_holder()->class_loader());
if (method_holder_loader() != interface_loader()) {
ResourceMark rm(THREAD);
Symbol* failed_type_symbol =
- SystemDictionary::check_signature_loaders(method_signature,
+ SystemDictionary::check_signature_loaders(m->signature(),
method_holder_loader,
interface_loader,
true, CHECK);
@@ -803,9 +882,9 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Klass
"and the class loader (instance of %s) for interface "
"%s have different Class objects for the type %s "
"used in the signature";
- char* sig = target_h()->name_and_sig_as_C_string();
+ char* sig = target()->name_and_sig_as_C_string();
const char* loader1 = SystemDictionary::loader_name(method_holder_loader());
- char* current = klass->name()->as_C_string();
+ char* current = _klass->name()->as_C_string();
const char* loader2 = SystemDictionary::loader_name(interface_loader());
char* iface = InstanceKlass::cast(interf_h())->name()->as_C_string();
char* failed_type_name = failed_type_symbol->as_C_string();
@@ -821,10 +900,10 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Klass
}
// ime may have moved during GC so recalculate address
- itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target_h());
+ int ime_num = m->itable_index();
+ assert(ime_num < ime_count, "oob");
+ itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target());
}
- // Progress to next entry
- ime_num++;
}
}
@@ -913,20 +992,22 @@ class InterfaceVisiterClosure : public StackObj {
virtual void doit(Klass* intf, int method_count) = 0;
};
-// Visit all interfaces with at-least one method (excluding <clinit>)
+// Visit all interfaces with at least one itable method
void visit_all_interfaces(Array<Klass*>* transitive_intf, InterfaceVisiterClosure *blk) {
// Handle array argument
for(int i = 0; i < transitive_intf->length(); i++) {
Klass* intf = transitive_intf->at(i);
assert(intf->is_interface(), "sanity check");
- // Find no. of methods excluding a <clinit>
- int method_count = InstanceKlass::cast(intf)->methods()->length();
- if (method_count > 0) {
- Method* m = InstanceKlass::cast(intf)->methods()->at(0);
- assert(m != NULL && m->is_method(), "sanity check");
- if (m->name() == vmSymbols::object_initializer_name()) {
- method_count--;
+ // Find no. of itable methods
+ int method_count = 0;
+ // method_count = klassItable::method_count_for_interface(intf);
+ Array<Method*>* methods = InstanceKlass::cast(intf)->methods();
+ if (methods->length() > 0) {
+ for (int i = methods->length(); --i >= 0; ) {
+ if (interface_method_needs_itable_index(methods->at(i))) {
+ method_count++;
+ }
}
}
@@ -1024,40 +1105,26 @@ void klassItable::setup_itable_offset_table(instanceKlassHandle klass) {
}
-// m must be a method in an interface
-int klassItable::compute_itable_index(Method* m) {
- InstanceKlass* intf = m->method_holder();
- assert(intf->is_interface(), "sanity check");
- Array<Method*>* methods = intf->methods();
- int index = 0;
- while(methods->at(index) != m) {
- index++;
- assert(index < methods->length(), "should find index for resolve_invoke");
- }
- // Adjust for <clinit>, which is left out of table if first method
- if (methods->length() > 0 && methods->at(0)->is_static_initializer()) {
- index--;
- }
- return index;
-}
-
-
-// inverse to compute_itable_index
+// inverse to itable_index
Method* klassItable::method_for_itable_index(Klass* intf, int itable_index) {
assert(InstanceKlass::cast(intf)->is_interface(), "sanity check");
+ assert(intf->verify_itable_index(itable_index), "");
Array<Method*>* methods = InstanceKlass::cast(intf)->methods();
- int index = itable_index;
- // Adjust for <clinit>, which is left out of table if first method
- if (methods->length() > 0 && methods->at(0)->is_static_initializer()) {
- index++;
- }
-
- if (itable_index < 0 || index >= methods->length())
+ if (itable_index < 0 || itable_index >= method_count_for_interface(intf))
return NULL; // help caller defend against bad indexes
+ int index = itable_index;
Method* m = methods->at(index);
- assert(compute_itable_index(m) == itable_index, "correct inverse");
+ int index2 = -1;
+ while (!m->has_itable_index() ||
+ (index2 = m->itable_index()) != itable_index) {
+ assert(index2 < itable_index, "monotonic");
+ if (++index == methods->length())
+ return NULL;
+ m = methods->at(index);
+ }
+ assert(m->itable_index() == itable_index, "correct inverse");
return m;
}
diff --git a/src/share/vm/oops/klassVtable.hpp b/src/share/vm/oops/klassVtable.hpp
index 01495e35d..06d55af25 100644
--- a/src/share/vm/oops/klassVtable.hpp
+++ b/src/share/vm/oops/klassVtable.hpp
@@ -124,7 +124,7 @@ class klassVtable : public ResourceObj {
// support for miranda methods
bool is_miranda_entry_at(int i);
- void fill_in_mirandas(int* initialized);
+ int fill_in_mirandas(int initialized);
static bool is_miranda(Method* m, Array<Method*>* class_methods, Klass* super);
static void add_new_mirandas_to_lists(
GrowableArray<Method*>* new_mirandas,
@@ -150,6 +150,8 @@ class klassVtable : public ResourceObj {
// from_compiled_code_entry_point -> nmethod entry point
// from_interpreter_entry_point -> i2cadapter
class vtableEntry VALUE_OBJ_CLASS_SPEC {
+ friend class VMStructs;
+
public:
// size in words
static int size() {
@@ -288,12 +290,12 @@ class klassItable : public ResourceObj {
#endif // INCLUDE_JVMTI
// Setup of itable
+ static int assign_itable_indexes_for_interface(Klass* klass);
+ static int method_count_for_interface(Klass* klass);
static int compute_itable_size(Array<Klass*>* transitive_interfaces);
static void setup_itable_offset_table(instanceKlassHandle klass);
// Resolving of method to index
- static int compute_itable_index(Method* m);
- // ...and back again:
static Method* method_for_itable_index(Klass* klass, int itable_index);
// Debugging/Statistics
diff --git a/src/share/vm/oops/method.cpp b/src/share/vm/oops/method.cpp
index 51b7e9123..029384df0 100644
--- a/src/share/vm/oops/method.cpp
+++ b/src/share/vm/oops/method.cpp
@@ -74,7 +74,7 @@ Method* Method::allocate(ClassLoaderData* loader_data,
int size = Method::size(access_flags.is_native());
- return new (loader_data, size, false, THREAD) Method(cm, access_flags, size);
+ return new (loader_data, size, false, MetaspaceObj::MethodType, THREAD) Method(cm, access_flags, size);
}
Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) {
@@ -514,25 +514,32 @@ bool Method::compute_has_loops_flag() {
return _access_flags.has_loops();
}
+bool Method::is_final_method(AccessFlags class_access_flags) const {
+ // or "does_not_require_vtable_entry"
+ // overpass can occur, is not final (reuses vtable entry)
+ // private methods get vtable entries for backward class compatibility.
+ if (is_overpass()) return false;
+ return is_final() || class_access_flags.is_final();
+}
bool Method::is_final_method() const {
- // %%% Should return true for private methods also,
- // since there is no way to override them.
- return is_final() || method_holder()->is_final();
+ return is_final_method(method_holder()->access_flags());
}
-
-bool Method::is_strict_method() const {
- return is_strict();
+bool Method::can_be_statically_bound(AccessFlags class_access_flags) const {
+ if (is_final_method(class_access_flags)) return true;
+#ifdef ASSERT
+ bool is_nonv = (vtable_index() == nonvirtual_vtable_index);
+ if (class_access_flags.is_interface()) assert(is_nonv == is_static(), err_msg("is_nonv=%s", is_nonv));
+#endif
+ assert(valid_vtable_index() || valid_itable_index(), "method must be linked before we ask this question");
+ return vtable_index() == nonvirtual_vtable_index;
}
-
bool Method::can_be_statically_bound() const {
- if (is_final_method()) return true;
- return vtable_index() == nonvirtual_vtable_index;
+ return can_be_statically_bound(method_holder()->access_flags());
}
-
bool Method::is_accessor() const {
if (code_size() != 5) return false;
if (size_of_parameters() != 1) return false;
@@ -737,11 +744,22 @@ void Method::print_made_not_compilable(int comp_level, bool is_osr, bool report,
}
}
+bool Method::is_always_compilable() const {
+ // Generated adapters must be compiled
+ if (is_method_handle_intrinsic() && is_synthetic()) {
+ assert(!is_not_c1_compilable(), "sanity check");
+ assert(!is_not_c2_compilable(), "sanity check");
+ return true;
+ }
+
+ return false;
+}
+
bool Method::is_not_compilable(int comp_level) const {
if (number_of_breakpoints() > 0)
return true;
- if (is_method_handle_intrinsic())
- return !is_synthetic(); // the generated adapters must be compiled
+ if (is_always_compilable())
+ return false;
if (comp_level == CompLevel_any)
return is_not_c1_compilable() || is_not_c2_compilable();
if (is_c1_compile(comp_level))
@@ -753,6 +771,10 @@ bool Method::is_not_compilable(int comp_level) const {
// call this when compiler finds that this method is not compilable
void Method::set_not_compilable(int comp_level, bool report, const char* reason) {
+ if (is_always_compilable()) {
+ // Don't mark a method which should be always compilable
+ return;
+ }
print_made_not_compilable(comp_level, /*is_osr*/ false, report, reason);
if (comp_level == CompLevel_all) {
set_not_c1_compilable();
@@ -764,6 +786,7 @@ void Method::set_not_compilable(int comp_level, bool report, const char* reason)
set_not_c2_compilable();
}
CompilationPolicy::policy()->disable_compilation(this);
+ assert(!CompilationPolicy::can_be_compiled(this, comp_level), "sanity check");
}
bool Method::is_not_osr_compilable(int comp_level) const {
@@ -790,6 +813,7 @@ void Method::set_not_osr_compilable(int comp_level, bool report, const char* rea
set_not_c2_osr_compilable();
}
CompilationPolicy::policy()->disable_compilation(this);
+ assert(!CompilationPolicy::can_be_osr_compiled(this, comp_level), "sanity check");
}
// Revert to using the interpreter and clear out the nmethod
@@ -849,7 +873,9 @@ void Method::link_method(methodHandle h_method, TRAPS) {
assert(entry != NULL, "interpreter entry must be non-null");
// Sets both _i2i_entry and _from_interpreted_entry
set_interpreter_entry(entry);
- if (is_native() && !is_method_handle_intrinsic()) {
+
+ // Don't overwrite already registered native entries.
+ if (is_native() && !has_native_function()) {
set_native_function(
SharedRuntime::native_method_throw_unsatisfied_link_error_entry(),
!native_bind_event_is_interesting);
@@ -965,7 +991,7 @@ bool Method::is_overridden_in(Klass* k) const {
assert(ik->is_subclass_of(method_holder()), "should be subklass");
assert(ik->vtable() != NULL, "vtable should exist");
- if (vtable_index() == nonvirtual_vtable_index) {
+ if (!has_vtable_index()) {
return false;
} else {
Method* vt_m = ik->method_at_vtable(vtable_index());
@@ -996,7 +1022,6 @@ bool Method::should_not_be_cached() const {
bool Method::is_ignored_by_security_stack_walk() const {
const bool use_new_reflection = JDK_Version::is_gte_jdk14x_version() && UseNewReflection;
- assert(intrinsic_id() != vmIntrinsics::_invoke || Universe::reflect_invoke_cache()->is_same_method((Method*)this), "sanity");
if (intrinsic_id() == vmIntrinsics::_invoke) {
// This is Method.invoke() -- ignore it
return true;
@@ -1178,6 +1203,7 @@ methodHandle Method::clone_with_new_data(methodHandle m, u_char* new_code, int n
newm->constMethod()->set_constMethod_size(new_const_method_size);
newm->set_method_size(new_method_size);
assert(newm->code_size() == new_code_length, "check");
+ assert(newm->method_parameters_length() == method_parameters_len, "check");
assert(newm->checked_exceptions_length() == checked_exceptions_len, "check");
assert(newm->exception_table_length() == exception_table_len, "check");
assert(newm->localvariable_table_length() == localvariable_len, "check");
@@ -1189,6 +1215,12 @@ methodHandle Method::clone_with_new_data(methodHandle m, u_char* new_code, int n
new_compressed_linenumber_table,
new_compressed_linenumber_size);
}
+ // Copy method_parameters
+ if (method_parameters_len > 0) {
+ memcpy(newm->method_parameters_start(),
+ m->method_parameters_start(),
+ method_parameters_len * sizeof(MethodParametersElement));
+ }
// Copy checked_exceptions
if (checked_exceptions_len > 0) {
memcpy(newm->checked_exceptions_start(),
@@ -1598,7 +1630,7 @@ int Method::backedge_count() {
}
int Method::highest_comp_level() const {
- MethodData* mdo = method_data();
+ const MethodData* mdo = method_data();
if (mdo != NULL) {
return mdo->highest_comp_level();
} else {
@@ -1607,7 +1639,7 @@ int Method::highest_comp_level() const {
}
int Method::highest_osr_comp_level() const {
- MethodData* mdo = method_data();
+ const MethodData* mdo = method_data();
if (mdo != NULL) {
return mdo->highest_osr_comp_level();
} else {
@@ -1951,7 +1983,7 @@ void Method::print_on(outputStream* st) const {
void Method::print_value_on(outputStream* st) const {
assert(is_method(), "must be method");
- st->print_cr(internal_name());
+ st->print(internal_name());
print_address_on(st);
st->print(" ");
name()->print_value_on(st);
@@ -1959,6 +1991,7 @@ void Method::print_value_on(outputStream* st) const {
signature()->print_value_on(st);
st->print(" in ");
method_holder()->print_value_on(st);
+ if (WizardMode) st->print("#%d", _vtable_index);
if (WizardMode) st->print("[%d,%d]", size_of_parameters(), max_locals());
if (WizardMode && code() != NULL) st->print(" ((nmethod*)%p)", code());
}
@@ -1984,14 +2017,9 @@ void Method::collect_statistics(KlassSizeStats *sz) const {
void Method::verify_on(outputStream* st) {
guarantee(is_method(), "object must be method");
- guarantee(is_metadata(), "should be metadata");
guarantee(constants()->is_constantPool(), "should be constant pool");
- guarantee(constants()->is_metadata(), "should be metadata");
guarantee(constMethod()->is_constMethod(), "should be ConstMethod*");
- guarantee(constMethod()->is_metadata(), "should be metadata");
MethodData* md = method_data();
guarantee(md == NULL ||
- md->is_metadata(), "should be metadata");
- guarantee(md == NULL ||
md->is_methodData(), "should be method data");
}
diff --git a/src/share/vm/oops/method.hpp b/src/share/vm/oops/method.hpp
index 4856ec8ea..ca1e0b1f2 100644
--- a/src/share/vm/oops/method.hpp
+++ b/src/share/vm/oops/method.hpp
@@ -447,16 +447,22 @@ class Method : public Metadata {
enum VtableIndexFlag {
// Valid vtable indexes are non-negative (>= 0).
// These few negative values are used as sentinels.
- highest_unused_vtable_index_value = -5,
+ itable_index_max = -10, // first itable index, growing downward
+ pending_itable_index = -9, // itable index will be assigned
invalid_vtable_index = -4, // distinct from any valid vtable index
garbage_vtable_index = -3, // not yet linked; no vtable layout yet
nonvirtual_vtable_index = -2 // there is no need for vtable dispatch
// 6330203 Note: Do not use -1, which was overloaded with many meanings.
};
DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; })
- int vtable_index() const { assert(valid_vtable_index(), "");
- return _vtable_index; }
+ bool has_vtable_index() const { return _vtable_index >= 0; }
+ int vtable_index() const { return _vtable_index; }
void set_vtable_index(int index) { _vtable_index = index; }
+ DEBUG_ONLY(bool valid_itable_index() const { return _vtable_index <= pending_itable_index; })
+ bool has_itable_index() const { return _vtable_index <= itable_index_max; }
+ int itable_index() const { assert(valid_itable_index(), "");
+ return itable_index_max - _vtable_index; }
+ void set_itable_index(int index) { _vtable_index = itable_index_max - index; assert(valid_itable_index(), ""); }
// interpreter entry
address interpreter_entry() const { return _i2i_entry; }
@@ -565,10 +571,11 @@ class Method : public Metadata {
// checks method and its method holder
bool is_final_method() const;
- bool is_strict_method() const;
+ bool is_final_method(AccessFlags class_access_flags) const;
// true if method needs no dynamic dispatch (final and/or no vtable entry)
bool can_be_statically_bound() const;
+ bool can_be_statically_bound(AccessFlags class_access_flags) const;
// returns true if the method has any backward branches.
bool has_loops() {
@@ -676,13 +683,15 @@ class Method : public Metadata {
Symbol* signature, //anything at all
TRAPS);
static Klass* check_non_bcp_klass(Klass* klass);
- // these operate only on invoke methods:
+
+ // How many extra stack entries for invokedynamic when it's enabled
+ static const int extra_stack_entries_for_jsr292 = 1;
+
+ // this operates only on invoke methods:
// presize interpreter frames for extra interpreter stack entries, if needed
- // method handles want to be able to push a few extra values (e.g., a bound receiver), and
- // invokedynamic sometimes needs to push a bootstrap method, call site, and arglist,
- // all without checking for a stack overflow
- static int extra_stack_entries() { return EnableInvokeDynamic ? 2 : 0; }
- static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize()
+ // Account for the extra appendix argument for invokehandle/invokedynamic
+ static int extra_stack_entries() { return EnableInvokeDynamic ? extra_stack_entries_for_jsr292 : 0; }
+ static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize
// RedefineClasses() support:
bool is_old() const { return access_flags().is_old(); }
@@ -743,10 +752,6 @@ class Method : public Metadata {
// so handles are not used to avoid deadlock.
jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); }
- // JNI static invoke cached itable index accessors
- int cached_itable_index() { return method_holder()->cached_itable_index(method_idnum()); }
- void set_cached_itable_index(int index) { method_holder()->set_cached_itable_index(method_idnum(), index); }
-
// Support for inlining of intrinsic methods
vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; }
void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; }
@@ -799,6 +804,7 @@ class Method : public Metadata {
void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) {
set_not_osr_compilable(comp_level, false);
}
+ bool is_always_compilable() const;
private:
void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
@@ -991,7 +997,7 @@ class ExceptionTable : public StackObj {
u2 _length;
public:
- ExceptionTable(Method* m) {
+ ExceptionTable(const Method* m) {
if (m->has_exception_handler()) {
_table = m->exception_table_start();
_length = m->exception_table_length();
diff --git a/src/share/vm/oops/methodCounters.cpp b/src/share/vm/oops/methodCounters.cpp
index 53d3e682b..1ee8eb170 100644
--- a/src/share/vm/oops/methodCounters.cpp
+++ b/src/share/vm/oops/methodCounters.cpp
@@ -26,7 +26,7 @@
#include "runtime/thread.inline.hpp"
MethodCounters* MethodCounters::allocate(ClassLoaderData* loader_data, TRAPS) {
- return new(loader_data, size(), false, THREAD) MethodCounters();
+ return new(loader_data, size(), false, MetaspaceObj::MethodCountersType, THREAD) MethodCounters();
}
void MethodCounters::clear_counters() {
diff --git a/src/share/vm/oops/methodData.cpp b/src/share/vm/oops/methodData.cpp
index 73f70bac9..89a4cd4aa 100644
--- a/src/share/vm/oops/methodData.cpp
+++ b/src/share/vm/oops/methodData.cpp
@@ -388,7 +388,8 @@ void ArgInfoData::print_data_on(outputStream* st) {
MethodData* MethodData::allocate(ClassLoaderData* loader_data, methodHandle method, TRAPS) {
int size = MethodData::compute_allocation_size_in_words(method);
- return new (loader_data, size, false, THREAD) MethodData(method(), size, CHECK_NULL);
+ return new (loader_data, size, false, MetaspaceObj::MethodDataType, THREAD)
+ MethodData(method(), size, CHECK_NULL);
}
int MethodData::bytecode_cell_count(Bytecodes::Code code) {
diff --git a/src/share/vm/oops/methodData.hpp b/src/share/vm/oops/methodData.hpp
index b9fc63e30..7ff9b2bbb 100644
--- a/src/share/vm/oops/methodData.hpp
+++ b/src/share/vm/oops/methodData.hpp
@@ -72,6 +72,8 @@ class ProfileData;
//
// Overlay for generic profiling data.
class DataLayout VALUE_OBJ_CLASS_SPEC {
+ friend class VMStructs;
+
private:
// Every data layout begins with a header. This header
// contains a tag, which is used to indicate the size/layout
@@ -1338,9 +1340,9 @@ public:
void set_would_profile(bool p) { _would_profile = p; }
bool would_profile() const { return _would_profile; }
- int highest_comp_level() { return _highest_comp_level; }
+ int highest_comp_level() const { return _highest_comp_level; }
void set_highest_comp_level(int level) { _highest_comp_level = level; }
- int highest_osr_comp_level() { return _highest_osr_comp_level; }
+ int highest_osr_comp_level() const { return _highest_osr_comp_level; }
void set_highest_osr_comp_level(int level) { _highest_osr_comp_level = level; }
int num_loops() const { return _num_loops; }
diff --git a/src/share/vm/oops/objArrayKlass.cpp b/src/share/vm/oops/objArrayKlass.cpp
index f04020639..f2f349106 100644
--- a/src/share/vm/oops/objArrayKlass.cpp
+++ b/src/share/vm/oops/objArrayKlass.cpp
@@ -676,11 +676,9 @@ const char* ObjArrayKlass::internal_name() const {
// Verification
-void ObjArrayKlass::verify_on(outputStream* st) {
- ArrayKlass::verify_on(st);
- guarantee(element_klass()->is_metadata(), "should be in metaspace");
+void ObjArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
+ ArrayKlass::verify_on(st, check_dictionary);
guarantee(element_klass()->is_klass(), "should be klass");
- guarantee(bottom_klass()->is_metadata(), "should be in metaspace");
guarantee(bottom_klass()->is_klass(), "should be klass");
Klass* bk = bottom_klass();
guarantee(bk->oop_is_instance() || bk->oop_is_typeArray(), "invalid bottom klass");
diff --git a/src/share/vm/oops/objArrayKlass.hpp b/src/share/vm/oops/objArrayKlass.hpp
index 74aa4f8d7..d56a3de84 100644
--- a/src/share/vm/oops/objArrayKlass.hpp
+++ b/src/share/vm/oops/objArrayKlass.hpp
@@ -75,7 +75,7 @@ class ObjArrayKlass : public ArrayKlass {
void copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS);
// Compute protection domain
- oop protection_domain() { return bottom_klass()->protection_domain(); }
+ oop protection_domain() const { return bottom_klass()->protection_domain(); }
private:
// Either oop or narrowOop depending on UseCompressedOops.
@@ -151,7 +151,7 @@ class ObjArrayKlass : public ArrayKlass {
const char* internal_name() const;
// Verification
- void verify_on(outputStream* st);
+ void verify_on(outputStream* st, bool check_dictionary);
void oop_verify_on(oop obj, outputStream* st);
};
diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp
index 94e68ed32..66a62eaab 100644
--- a/src/share/vm/oops/oop.hpp
+++ b/src/share/vm/oops/oop.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -62,7 +62,7 @@ class oopDesc {
volatile markOop _mark;
union _metadata {
Klass* _klass;
- narrowOop _compressed_klass;
+ narrowKlass _compressed_klass;
} _metadata;
// Fast access to barrier set. Must be initialized.
@@ -84,7 +84,7 @@ class oopDesc {
Klass* klass() const;
Klass* klass_or_null() const volatile;
Klass** klass_addr();
- narrowOop* compressed_klass_addr();
+ narrowKlass* compressed_klass_addr();
void set_klass(Klass* k);
@@ -189,13 +189,6 @@ class oopDesc {
oop compare_value,
bool prebarrier = false);
- // klass encoding for klass pointer in objects.
- static narrowOop encode_klass_not_null(Klass* v);
- static narrowOop encode_klass(Klass* v);
-
- static Klass* decode_klass_not_null(narrowOop v);
- static Klass* decode_klass(narrowOop v);
-
// Access to fields in a instanceOop through these methods.
oop obj_field(int offset) const;
volatile oop obj_field_volatile(int offset) const;
diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp
index 532539b19..3be052272 100644
--- a/src/share/vm/oops/oop.inline.hpp
+++ b/src/share/vm/oops/oop.inline.hpp
@@ -35,7 +35,7 @@
#include "memory/specialized_oop_closures.hpp"
#include "oops/arrayKlass.hpp"
#include "oops/arrayOop.hpp"
-#include "oops/klass.hpp"
+#include "oops/klass.inline.hpp"
#include "oops/markOop.inline.hpp"
#include "oops/oop.hpp"
#include "runtime/atomic.hpp"
@@ -72,8 +72,8 @@ inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
}
inline Klass* oopDesc::klass() const {
- if (UseCompressedKlassPointers) {
- return decode_klass_not_null(_metadata._compressed_klass);
+ if (UseCompressedClassPointers) {
+ return Klass::decode_klass_not_null(_metadata._compressed_klass);
} else {
return _metadata._klass;
}
@@ -81,36 +81,36 @@ inline Klass* oopDesc::klass() const {
inline Klass* oopDesc::klass_or_null() const volatile {
// can be NULL in CMS
- if (UseCompressedKlassPointers) {
- return decode_klass(_metadata._compressed_klass);
+ if (UseCompressedClassPointers) {
+ return Klass::decode_klass(_metadata._compressed_klass);
} else {
return _metadata._klass;
}
}
inline int oopDesc::klass_gap_offset_in_bytes() {
- assert(UseCompressedKlassPointers, "only applicable to compressed klass pointers");
- return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
+ assert(UseCompressedClassPointers, "only applicable to compressed klass pointers");
+ return oopDesc::klass_offset_in_bytes() + sizeof(narrowKlass);
}
inline Klass** oopDesc::klass_addr() {
// Only used internally and with CMS and will not work with
// UseCompressedOops
- assert(!UseCompressedKlassPointers, "only supported with uncompressed klass pointers");
+ assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
return (Klass**) &_metadata._klass;
}
-inline narrowOop* oopDesc::compressed_klass_addr() {
- assert(UseCompressedKlassPointers, "only called by compressed klass pointers");
- return (narrowOop*) &_metadata._compressed_klass;
+inline narrowKlass* oopDesc::compressed_klass_addr() {
+ assert(UseCompressedClassPointers, "only called by compressed klass pointers");
+ return &_metadata._compressed_klass;
}
inline void oopDesc::set_klass(Klass* k) {
// since klasses are promoted no store check is needed
assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
- if (UseCompressedKlassPointers) {
- *compressed_klass_addr() = encode_klass_not_null(k);
+ if (UseCompressedClassPointers) {
+ *compressed_klass_addr() = Klass::encode_klass_not_null(k);
} else {
*klass_addr() = k;
}
@@ -121,7 +121,7 @@ inline int oopDesc::klass_gap() const {
}
inline void oopDesc::set_klass_gap(int v) {
- if (UseCompressedKlassPointers) {
+ if (UseCompressedClassPointers) {
*(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
}
}
@@ -129,8 +129,8 @@ inline void oopDesc::set_klass_gap(int v) {
inline void oopDesc::set_klass_to_list_ptr(oop k) {
// This is only to be used during GC, for from-space objects, so no
// barrier is needed.
- if (UseCompressedKlassPointers) {
- _metadata._compressed_klass = encode_heap_oop(k); // may be null (parnew overflow handling)
+ if (UseCompressedClassPointers) {
+ _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling)
} else {
_metadata._klass = (Klass*)(address)k;
}
@@ -138,8 +138,8 @@ inline void oopDesc::set_klass_to_list_ptr(oop k) {
inline oop oopDesc::list_ptr_from_klass() {
// This is only to be used during GC, for from-space objects.
- if (UseCompressedKlassPointers) {
- return decode_heap_oop(_metadata._compressed_klass);
+ if (UseCompressedClassPointers) {
+ return decode_heap_oop((narrowOop)_metadata._compressed_klass);
} else {
// Special case for GC
return (oop)(address)_metadata._klass;
@@ -179,7 +179,6 @@ inline address* oopDesc::address_field_addr(int offset) const { return (address
// the right type and inlines the appopriate code).
inline bool oopDesc::is_null(oop obj) { return obj == NULL; }
-inline bool oopDesc::is_null(Klass* obj) { return obj == NULL; }
inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
@@ -189,9 +188,6 @@ inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
inline bool check_obj_alignment(oop obj) {
return (intptr_t)obj % MinObjAlignmentInBytes == 0;
}
-inline bool check_klass_alignment(Klass* obj) {
- return (intptr_t)obj % KlassAlignmentInBytes == 0;
-}
inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
assert(!is_null(v), "oop value can never be zero");
@@ -227,39 +223,6 @@ inline oop oopDesc::decode_heap_oop(narrowOop v) {
inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
inline oop oopDesc::decode_heap_oop(oop v) { return v; }
-// Encoding and decoding for klass field. It is copied code, but someday
-// might not be the same as oop.
-
-inline narrowOop oopDesc::encode_klass_not_null(Klass* v) {
- assert(!is_null(v), "klass value can never be zero");
- assert(check_klass_alignment(v), "Address not aligned");
- address base = Universe::narrow_klass_base();
- int shift = Universe::narrow_klass_shift();
- uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
- assert(KlassEncodingMetaspaceMax > pd, "change encoding max if new encoding");
- uint64_t result = pd >> shift;
- assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow");
- assert(decode_klass(result) == v, "reversibility");
- return (narrowOop)result;
-}
-
-inline narrowOop oopDesc::encode_klass(Klass* v) {
- return (is_null(v)) ? (narrowOop)0 : encode_klass_not_null(v);
-}
-
-inline Klass* oopDesc::decode_klass_not_null(narrowOop v) {
- assert(!is_null(v), "narrow oop value can never be zero");
- address base = Universe::narrow_klass_base();
- int shift = Universe::narrow_klass_shift();
- Klass* result = (Klass*)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
- assert(check_klass_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
- return result;
-}
-
-inline Klass* oopDesc::decode_klass(narrowOop v) {
- return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v);
-}
-
// Load an oop out of the Java heap as is without decoding.
// Called by GC to check for null before decoding.
inline oop oopDesc::load_heap_oop(oop* p) { return *p; }
diff --git a/src/share/vm/oops/oopsHierarchy.hpp b/src/share/vm/oops/oopsHierarchy.hpp
index d599b1bae..ccf7a5f99 100644
--- a/src/share/vm/oops/oopsHierarchy.hpp
+++ b/src/share/vm/oops/oopsHierarchy.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,10 @@
// of B, A's representation is a prefix of B's representation.
typedef juint narrowOop; // Offset instead of address for an oop within a java object
+
+// If compressed klass pointers then use narrowKlass.
+typedef juint narrowKlass;
+
typedef void* OopOrNarrowOopStar;
typedef class markOopDesc* markOop;
diff --git a/src/share/vm/oops/symbol.cpp b/src/share/vm/oops/symbol.cpp
index 253d0df88..2a2c975b6 100644
--- a/src/share/vm/oops/symbol.cpp
+++ b/src/share/vm/oops/symbol.cpp
@@ -32,30 +32,32 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
-Symbol::Symbol(const u1* name, int length, int refcount) : _refcount(refcount), _length(length) {
+Symbol::Symbol(const u1* name, int length, int refcount) {
+ _refcount = refcount;
+ _length = length;
_identity_hash = os::random();
for (int i = 0; i < _length; i++) {
byte_at_put(i, name[i]);
}
}
-void* Symbol::operator new(size_t sz, int len, TRAPS) {
+void* Symbol::operator new(size_t sz, int len, TRAPS) throw() {
int alloc_size = size(len)*HeapWordSize;
address res = (address) AllocateHeap(alloc_size, mtSymbol);
return res;
}
-void* Symbol::operator new(size_t sz, int len, Arena* arena, TRAPS) {
+void* Symbol::operator new(size_t sz, int len, Arena* arena, TRAPS) throw() {
int alloc_size = size(len)*HeapWordSize;
address res = (address)arena->Amalloc(alloc_size);
return res;
}
-void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRAPS) {
+void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRAPS) throw() {
address res;
int alloc_size = size(len)*HeapWordSize;
res = (address) Metaspace::allocate(loader_data, size(len), true,
- Metaspace::NonClassType, CHECK_NULL);
+ MetaspaceObj::SymbolType, CHECK_NULL);
return res;
}
diff --git a/src/share/vm/oops/symbol.hpp b/src/share/vm/oops/symbol.hpp
index d06edf052..e747c4646 100644
--- a/src/share/vm/oops/symbol.hpp
+++ b/src/share/vm/oops/symbol.hpp
@@ -27,6 +27,7 @@
#include "utilities/utf8.hpp"
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
// A Symbol is a canonicalized string.
// All Symbols reside in global SymbolTable and are reference counted.
@@ -44,7 +45,7 @@
// in the SymbolTable bucket (the _literal field in HashtableEntry)
// that points to the Symbol. All other stores of a Symbol*
// to a field of a persistent variable (e.g., the _name filed in
-// FieldAccessInfo or _ptr in a CPSlot) is reference counted.
+// fieldDescriptor or _ptr in a CPSlot) is reference counted.
//
// 1) The lookup of a "name" in the SymbolTable either creates a Symbol F for
// "name" and returns a pointer to F or finds a pre-existing Symbol F for
@@ -101,14 +102,22 @@
// type without virtual functions.
class ClassLoaderData;
-class Symbol : public MetaspaceObj {
+// We separate the fields in SymbolBase from Symbol::_body so that
+// Symbol::size(int) can correctly calculate the space needed.
+class SymbolBase : public MetaspaceObj {
+ public:
+ ATOMIC_SHORT_PAIR(
+ volatile short _refcount, // needs atomic operation
+ unsigned short _length // number of UTF8 characters in the symbol (does not need atomic op)
+ );
+ int _identity_hash;
+};
+
+class Symbol : private SymbolBase {
friend class VMStructs;
friend class SymbolTable;
friend class MoveSymbols;
private:
- volatile int _refcount;
- int _identity_hash;
- unsigned short _length; // number of UTF8 characters in the symbol
jbyte _body[1];
enum {
@@ -117,7 +126,7 @@ class Symbol : public MetaspaceObj {
};
static int size(int length) {
- size_t sz = heap_word_size(sizeof(Symbol) + (length > 0 ? length - 1 : 0));
+ size_t sz = heap_word_size(sizeof(SymbolBase) + (length > 0 ? length : 0));
return align_object_size(sz);
}
@@ -127,9 +136,9 @@ class Symbol : public MetaspaceObj {
}
Symbol(const u1* name, int length, int refcount);
- void* operator new(size_t size, int len, TRAPS);
- void* operator new(size_t size, int len, Arena* arena, TRAPS);
- void* operator new(size_t size, int len, ClassLoaderData* loader_data, TRAPS);
+ void* operator new(size_t size, int len, TRAPS) throw();
+ void* operator new(size_t size, int len, Arena* arena, TRAPS) throw();
+ void* operator new(size_t size, int len, ClassLoaderData* loader_data, TRAPS) throw();
void operator delete(void* p);
diff --git a/src/share/vm/oops/typeArrayKlass.hpp b/src/share/vm/oops/typeArrayKlass.hpp
index 3fe8312a0..f8bf2ac74 100644
--- a/src/share/vm/oops/typeArrayKlass.hpp
+++ b/src/share/vm/oops/typeArrayKlass.hpp
@@ -67,6 +67,8 @@ class TypeArrayKlass : public ArrayKlass {
typeArrayOop allocate(int length, TRAPS) { return allocate_common(length, true, THREAD); }
oop multi_allocate(int rank, jint* sizes, TRAPS);
+ oop protection_domain() const { return NULL; }
+
// Copying
void copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS);
diff --git a/src/share/vm/opto/block.cpp b/src/share/vm/opto/block.cpp
index 0aa41de01..fade19bdb 100644
--- a/src/share/vm/opto/block.cpp
+++ b/src/share/vm/opto/block.cpp
@@ -35,10 +35,6 @@
#include "opto/rootnode.hpp"
#include "utilities/copy.hpp"
-// Optimization - Graph Style
-
-
-//-----------------------------------------------------------------------------
void Block_Array::grow( uint i ) {
assert(i >= Max(), "must be an overflow");
debug_only(_limit = i+1);
@@ -54,7 +50,6 @@ void Block_Array::grow( uint i ) {
Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) );
}
-//=============================================================================
void Block_List::remove(uint i) {
assert(i < _cnt, "index out of bounds");
Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*)));
@@ -76,8 +71,6 @@ void Block_List::print() {
}
#endif
-//=============================================================================
-
uint Block::code_alignment() {
// Check for Root block
if (_pre_order == 0) return CodeEntryAlignment;
@@ -113,16 +106,15 @@ uint Block::compute_loop_alignment() {
return unit_sz; // no particular alignment
}
-//-----------------------------------------------------------------------------
// Compute the size of first 'inst_cnt' instructions in this block.
// Return the number of instructions left to compute if the block has
// less then 'inst_cnt' instructions. Stop, and return 0 if sum_size
// exceeds OptoLoopAlignment.
uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
PhaseRegAlloc* ra) {
- uint last_inst = _nodes.size();
+ uint last_inst = number_of_nodes();
for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
- uint inst_size = _nodes[j]->size(ra);
+ uint inst_size = get_node(j)->size(ra);
if( inst_size > 0 ) {
inst_cnt--;
uint sz = sum_size + inst_size;
@@ -138,10 +130,9 @@ uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
return inst_cnt;
}
-//-----------------------------------------------------------------------------
uint Block::find_node( const Node *n ) const {
- for( uint i = 0; i < _nodes.size(); i++ ) {
- if( _nodes[i] == n )
+ for( uint i = 0; i < number_of_nodes(); i++ ) {
+ if( get_node(i) == n )
return i;
}
ShouldNotReachHere();
@@ -150,10 +141,9 @@ uint Block::find_node( const Node *n ) const {
// Find and remove n from block list
void Block::find_remove( const Node *n ) {
- _nodes.remove(find_node(n));
+ remove_node(find_node(n));
}
-//------------------------------is_Empty---------------------------------------
// Return empty status of a block. Empty blocks contain only the head, other
// ideal nodes, and an optional trailing goto.
int Block::is_Empty() const {
@@ -164,10 +154,10 @@ int Block::is_Empty() const {
}
int success_result = completely_empty;
- int end_idx = _nodes.size()-1;
+ int end_idx = number_of_nodes() - 1;
// Check for ending goto
- if ((end_idx > 0) && (_nodes[end_idx]->is_MachGoto())) {
+ if ((end_idx > 0) && (get_node(end_idx)->is_MachGoto())) {
success_result = empty_with_goto;
end_idx--;
}
@@ -180,7 +170,7 @@ int Block::is_Empty() const {
// Ideal nodes are allowable in empty blocks: skip them Only MachNodes
// turn directly into code, because only MachNodes have non-trivial
// emit() functions.
- while ((end_idx > 0) && !_nodes[end_idx]->is_Mach()) {
+ while ((end_idx > 0) && !get_node(end_idx)->is_Mach()) {
end_idx--;
}
@@ -192,7 +182,6 @@ int Block::is_Empty() const {
return not_empty;
}
-//------------------------------has_uncommon_code------------------------------
// Return true if the block's code implies that it is likely to be
// executed infrequently. Check to see if the block ends in a Halt or
// a low probability call.
@@ -218,18 +207,17 @@ bool Block::has_uncommon_code() const {
return op == Op_Halt;
}
-//------------------------------is_uncommon------------------------------------
// True if block is low enough frequency or guarded by a test which
// mostly does not go here.
-bool Block::is_uncommon( Block_Array &bbs ) const {
+bool PhaseCFG::is_uncommon(const Block* block) {
// Initial blocks must never be moved, so are never uncommon.
- if (head()->is_Root() || head()->is_Start()) return false;
+ if (block->head()->is_Root() || block->head()->is_Start()) return false;
// Check for way-low freq
- if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true;
+ if(block->_freq < BLOCK_FREQUENCY(0.00001f) ) return true;
// Look for code shape indicating uncommon_trap or slow path
- if (has_uncommon_code()) return true;
+ if (block->has_uncommon_code()) return true;
const float epsilon = 0.05f;
const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
@@ -237,8 +225,8 @@ bool Block::is_uncommon( Block_Array &bbs ) const {
uint freq_preds = 0;
uint uncommon_for_freq_preds = 0;
- for( uint i=1; i<num_preds(); i++ ) {
- Block* guard = bbs[pred(i)->_idx];
+ for( uint i=1; i< block->num_preds(); i++ ) {
+ Block* guard = get_block_for_node(block->pred(i));
// Check to see if this block follows its guard 1 time out of 10000
// or less.
//
@@ -256,14 +244,14 @@ bool Block::is_uncommon( Block_Array &bbs ) const {
uncommon_preds++;
} else {
freq_preds++;
- if( _freq < guard->_freq * guard_factor ) {
+ if(block->_freq < guard->_freq * guard_factor ) {
uncommon_for_freq_preds++;
}
}
}
- if( num_preds() > 1 &&
+ if( block->num_preds() > 1 &&
// The block is uncommon if all preds are uncommon or
- (uncommon_preds == (num_preds()-1) ||
+ (uncommon_preds == (block->num_preds()-1) ||
// it is uncommon for all frequent preds.
uncommon_for_freq_preds == freq_preds) ) {
return true;
@@ -271,7 +259,6 @@ bool Block::is_uncommon( Block_Array &bbs ) const {
return false;
}
-//------------------------------dump-------------------------------------------
#ifndef PRODUCT
void Block::dump_bidx(const Block* orig, outputStream* st) const {
if (_pre_order) st->print("B%d",_pre_order);
@@ -285,11 +272,11 @@ void Block::dump_bidx(const Block* orig, outputStream* st) const {
}
}
-void Block::dump_pred(const Block_Array *bbs, Block* orig, outputStream* st) const {
+void Block::dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st) const {
if (is_connector()) {
for (uint i=1; i<num_preds(); i++) {
- Block *p = ((*bbs)[pred(i)->_idx]);
- p->dump_pred(bbs, orig, st);
+ Block *p = cfg->get_block_for_node(pred(i));
+ p->dump_pred(cfg, orig, st);
}
} else {
dump_bidx(orig, st);
@@ -297,7 +284,7 @@ void Block::dump_pred(const Block_Array *bbs, Block* orig, outputStream* st) con
}
}
-void Block::dump_head( const Block_Array *bbs, outputStream* st ) const {
+void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const {
// Print the basic block
dump_bidx(this, st);
st->print(": #\t");
@@ -311,26 +298,28 @@ void Block::dump_head( const Block_Array *bbs, outputStream* st ) const {
if( head()->is_block_start() ) {
for (uint i=1; i<num_preds(); i++) {
Node *s = pred(i);
- if (bbs) {
- Block *p = (*bbs)[s->_idx];
- p->dump_pred(bbs, p, st);
+ if (cfg != NULL) {
+ Block *p = cfg->get_block_for_node(s);
+ p->dump_pred(cfg, p, st);
} else {
while (!s->is_block_start())
s = s->in(0);
st->print("N%d ", s->_idx );
}
}
- } else
+ } else {
st->print("BLOCK HEAD IS JUNK ");
+ }
// Print loop, if any
const Block *bhead = this; // Head of self-loop
Node *bh = bhead->head();
- if( bbs && bh->is_Loop() && !head()->is_Root() ) {
+
+ if ((cfg != NULL) && bh->is_Loop() && !head()->is_Root()) {
LoopNode *loop = bh->as_Loop();
- const Block *bx = (*bbs)[loop->in(LoopNode::LoopBackControl)->_idx];
+ const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl));
while (bx->is_connector()) {
- bx = (*bbs)[bx->pred(1)->_idx];
+ bx = cfg->get_block_for_node(bx->pred(1));
}
st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
// Dump any loop-specific bits, especially for CountedLoops.
@@ -349,29 +338,31 @@ void Block::dump_head( const Block_Array *bbs, outputStream* st ) const {
st->print_cr("");
}
-void Block::dump() const { dump(NULL); }
+void Block::dump() const {
+ dump(NULL);
+}
-void Block::dump( const Block_Array *bbs ) const {
- dump_head(bbs);
- uint cnt = _nodes.size();
- for( uint i=0; i<cnt; i++ )
- _nodes[i]->dump();
+void Block::dump(const PhaseCFG* cfg) const {
+ dump_head(cfg);
+ for (uint i=0; i< number_of_nodes(); i++) {
+ get_node(i)->dump();
+ }
tty->print("\n");
}
#endif
-//=============================================================================
-//------------------------------PhaseCFG---------------------------------------
-PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
- Phase(CFG),
- _bbs(a),
- _root(r),
- _node_latency(NULL)
+PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher)
+: Phase(CFG)
+, _block_arena(arena)
+, _root(root)
+, _matcher(matcher)
+, _node_to_block_mapping(arena)
+, _node_latency(NULL)
#ifndef PRODUCT
- , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
+, _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
#endif
#ifdef ASSERT
- , _raw_oops(a)
+, _raw_oops(arena)
#endif
{
ResourceMark rm;
@@ -380,16 +371,15 @@ PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
// Node on demand.
Node *x = new (C) GotoNode(NULL);
x->init_req(0, x);
- _goto = m.match_tree(x);
+ _goto = matcher.match_tree(x);
assert(_goto != NULL, "");
_goto->set_req(0,_goto);
// Build the CFG in Reverse Post Order
- _num_blocks = build_cfg();
- _broot = _bbs[_root->_idx];
+ _number_of_blocks = build_cfg();
+ _root_block = get_block_for_node(_root);
}
-//------------------------------build_cfg--------------------------------------
// Build a proper looking CFG. Make every block begin with either a StartNode
// or a RegionNode. Make every block end with either a Goto, If or Return.
// The RootNode both starts and ends it's own block. Do this with a recursive
@@ -440,11 +430,11 @@ uint PhaseCFG::build_cfg() {
// 'p' now points to the start of this basic block
// Put self in array of basic blocks
- Block *bb = new (_bbs._arena) Block(_bbs._arena,p);
- _bbs.map(p->_idx,bb);
- _bbs.map(x->_idx,bb);
+ Block *bb = new (_block_arena) Block(_block_arena, p);
+ map_node_to_block(p, bb);
+ map_node_to_block(x, bb);
if( x != p ) { // Only for root is x == p
- bb->_nodes.push((Node*)x);
+ bb->push_node((Node*)x);
}
// Now handle predecessors
++sum; // Count 1 for self block
@@ -473,17 +463,17 @@ uint PhaseCFG::build_cfg() {
// Check if it the fist node pushed on stack at the beginning.
if (idx == 0) break; // end of the build
// Find predecessor basic block
- Block *pb = _bbs[x->_idx];
+ Block *pb = get_block_for_node(x);
// Insert into nodes array, if not already there
- if( !_bbs.lookup(proj->_idx) ) {
+ if (!has_block(proj)) {
assert( x != proj, "" );
// Map basic block of projection
- _bbs.map(proj->_idx,pb);
- pb->_nodes.push(proj);
+ map_node_to_block(proj, pb);
+ pb->push_node(proj);
}
// Insert self as a child of my predecessor block
- pb->_succs.map(pb->_num_succs++, _bbs[np->_idx]);
- assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
+ pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
+ assert( pb->get_node(pb->number_of_nodes() - pb->_num_succs)->is_block_proj(),
"too many control users, not a CFG?" );
}
}
@@ -491,13 +481,12 @@ uint PhaseCFG::build_cfg() {
return sum;
}
-//------------------------------insert_goto_at---------------------------------
// Inserts a goto & corresponding basic block between
// block[block_no] and its succ_no'th successor block
void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
// get block with block_no
- assert(block_no < _num_blocks, "illegal block number");
- Block* in = _blocks[block_no];
+ assert(block_no < number_of_blocks(), "illegal block number");
+ Block* in = get_block(block_no);
// get successor block succ_no
assert(succ_no < in->_num_succs, "illegal successor number");
Block* out = in->_succs[succ_no];
@@ -506,20 +495,20 @@ void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
// surrounding blocks.
float freq = in->_freq * in->succ_prob(succ_no);
// get ProjNode corresponding to the succ_no'th successor of the in block
- ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
+ ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj();
// create region for basic block
RegionNode* region = new (C) RegionNode(2);
region->init_req(1, proj);
// setup corresponding basic block
- Block* block = new (_bbs._arena) Block(_bbs._arena, region);
- _bbs.map(region->_idx, block);
+ Block* block = new (_block_arena) Block(_block_arena, region);
+ map_node_to_block(region, block);
C->regalloc()->set_bad(region->_idx);
// add a goto node
Node* gto = _goto->clone(); // get a new goto node
gto->set_req(0, region);
// add it to the basic block
- block->_nodes.push(gto);
- _bbs.map(gto->_idx, block);
+ block->push_node(gto);
+ map_node_to_block(gto, block);
C->regalloc()->set_bad(gto->_idx);
// hook up successor block
block->_succs.map(block->_num_succs++, out);
@@ -532,17 +521,15 @@ void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
// Set the frequency of the new block
block->_freq = freq;
// add new basic block to basic block list
- _blocks.insert(block_no + 1, block);
- _num_blocks++;
+ add_block_at(block_no + 1, block);
}
-//------------------------------no_flip_branch---------------------------------
// Does this block end in a multiway branch that cannot have the default case
// flipped for another case?
static bool no_flip_branch( Block *b ) {
- int branch_idx = b->_nodes.size() - b->_num_succs-1;
+ int branch_idx = b->number_of_nodes() - b->_num_succs-1;
if( branch_idx < 1 ) return false;
- Node *bra = b->_nodes[branch_idx];
+ Node *bra = b->get_node(branch_idx);
if( bra->is_Catch() )
return true;
if( bra->is_Mach() ) {
@@ -555,7 +542,6 @@ static bool no_flip_branch( Block *b ) {
return false;
}
-//------------------------------convert_NeverBranch_to_Goto--------------------
// Check for NeverBranch at block end. This needs to become a GOTO to the
// true target. NeverBranch are treated as a conditional branch that always
// goes the same direction for most of the optimizer and are used to give a
@@ -564,16 +550,16 @@ static bool no_flip_branch( Block *b ) {
void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
// Find true target
int end_idx = b->end_idx();
- int idx = b->_nodes[end_idx+1]->as_Proj()->_con;
+ int idx = b->get_node(end_idx+1)->as_Proj()->_con;
Block *succ = b->_succs[idx];
Node* gto = _goto->clone(); // get a new goto node
gto->set_req(0, b->head());
- Node *bp = b->_nodes[end_idx];
- b->_nodes.map(end_idx,gto); // Slam over NeverBranch
- _bbs.map(gto->_idx, b);
+ Node *bp = b->get_node(end_idx);
+ b->map_node(gto, end_idx); // Slam over NeverBranch
+ map_node_to_block(gto, b);
C->regalloc()->set_bad(gto->_idx);
- b->_nodes.pop(); // Yank projections
- b->_nodes.pop(); // Yank projections
+ b->pop_node(); // Yank projections
+ b->pop_node(); // Yank projections
b->_succs.map(0,succ); // Map only successor
b->_num_succs = 1;
// remap successor's predecessors if necessary
@@ -589,11 +575,10 @@ void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
// Scan through block, yanking dead path from
// all regions and phis.
dead->head()->del_req(j);
- for( int k = 1; dead->_nodes[k]->is_Phi(); k++ )
- dead->_nodes[k]->del_req(j);
+ for( int k = 1; dead->get_node(k)->is_Phi(); k++ )
+ dead->get_node(k)->del_req(j);
}
-//------------------------------move_to_next-----------------------------------
// Helper function to move block bx to the slot following b_index. Return
// true if the move is successful, otherwise false
bool PhaseCFG::move_to_next(Block* bx, uint b_index) {
@@ -601,20 +586,22 @@ bool PhaseCFG::move_to_next(Block* bx, uint b_index) {
// Return false if bx is already scheduled.
uint bx_index = bx->_pre_order;
- if ((bx_index <= b_index) && (_blocks[bx_index] == bx)) {
+ if ((bx_index <= b_index) && (get_block(bx_index) == bx)) {
return false;
}
// Find the current index of block bx on the block list
bx_index = b_index + 1;
- while( bx_index < _num_blocks && _blocks[bx_index] != bx ) bx_index++;
- assert(_blocks[bx_index] == bx, "block not found");
+ while (bx_index < number_of_blocks() && get_block(bx_index) != bx) {
+ bx_index++;
+ }
+ assert(get_block(bx_index) == bx, "block not found");
// If the previous block conditionally falls into bx, return false,
// because moving bx will create an extra jump.
for(uint k = 1; k < bx->num_preds(); k++ ) {
- Block* pred = _bbs[bx->pred(k)->_idx];
- if (pred == _blocks[bx_index-1]) {
+ Block* pred = get_block_for_node(bx->pred(k));
+ if (pred == get_block(bx_index - 1)) {
if (pred->_num_succs != 1) {
return false;
}
@@ -627,14 +614,13 @@ bool PhaseCFG::move_to_next(Block* bx, uint b_index) {
return true;
}
-//------------------------------move_to_end------------------------------------
// Move empty and uncommon blocks to the end.
void PhaseCFG::move_to_end(Block *b, uint i) {
int e = b->is_Empty();
if (e != Block::not_empty) {
if (e == Block::empty_with_goto) {
// Remove the goto, but leave the block.
- b->_nodes.pop();
+ b->pop_node();
}
// Mark this block as a connector block, which will cause it to be
// ignored in certain functions such as non_connector_successor().
@@ -645,31 +631,31 @@ void PhaseCFG::move_to_end(Block *b, uint i) {
_blocks.push(b);
}
-//---------------------------set_loop_alignment--------------------------------
// Set loop alignment for every block
void PhaseCFG::set_loop_alignment() {
- uint last = _num_blocks;
- assert( _blocks[0] == _broot, "" );
+ uint last = number_of_blocks();
+ assert(get_block(0) == get_root_block(), "");
- for (uint i = 1; i < last; i++ ) {
- Block *b = _blocks[i];
- if (b->head()->is_Loop()) {
- b->set_loop_alignment(b);
+ for (uint i = 1; i < last; i++) {
+ Block* block = get_block(i);
+ if (block->head()->is_Loop()) {
+ block->set_loop_alignment(block);
}
}
}
-//-----------------------------remove_empty------------------------------------
// Make empty basic blocks to be "connector" blocks, Move uncommon blocks
// to the end.
-void PhaseCFG::remove_empty() {
+void PhaseCFG::remove_empty_blocks() {
// Move uncommon blocks to the end
- uint last = _num_blocks;
- assert( _blocks[0] == _broot, "" );
+ uint last = number_of_blocks();
+ assert(get_block(0) == get_root_block(), "");
for (uint i = 1; i < last; i++) {
- Block *b = _blocks[i];
- if (b->is_connector()) break;
+ Block* block = get_block(i);
+ if (block->is_connector()) {
+ break;
+ }
// Check for NeverBranch at block end. This needs to become a GOTO to the
// true target. NeverBranch are treated as a conditional branch that
@@ -677,124 +663,127 @@ void PhaseCFG::remove_empty() {
// to give a fake exit path to infinite loops. At this late stage they
// need to turn into Goto's so that when you enter the infinite loop you
// indeed hang.
- if( b->_nodes[b->end_idx()]->Opcode() == Op_NeverBranch )
- convert_NeverBranch_to_Goto(b);
+ if (block->get_node(block->end_idx())->Opcode() == Op_NeverBranch) {
+ convert_NeverBranch_to_Goto(block);
+ }
// Look for uncommon blocks and move to end.
if (!C->do_freq_based_layout()) {
- if( b->is_uncommon(_bbs) ) {
- move_to_end(b, i);
+ if (is_uncommon(block)) {
+ move_to_end(block, i);
last--; // No longer check for being uncommon!
- if( no_flip_branch(b) ) { // Fall-thru case must follow?
- b = _blocks[i]; // Find the fall-thru block
- move_to_end(b, i);
+ if (no_flip_branch(block)) { // Fall-thru case must follow?
+ // Find the fall-thru block
+ block = get_block(i);
+ move_to_end(block, i);
last--;
}
- i--; // backup block counter post-increment
+ // backup block counter post-increment
+ i--;
}
}
}
// Move empty blocks to the end
- last = _num_blocks;
+ last = number_of_blocks();
for (uint i = 1; i < last; i++) {
- Block *b = _blocks[i];
- if (b->is_Empty() != Block::not_empty) {
- move_to_end(b, i);
+ Block* block = get_block(i);
+ if (block->is_Empty() != Block::not_empty) {
+ move_to_end(block, i);
last--;
i--;
}
} // End of for all blocks
}
-//-----------------------------fixup_flow--------------------------------------
// Fix up the final control flow for basic blocks.
void PhaseCFG::fixup_flow() {
// Fixup final control flow for the blocks. Remove jump-to-next
// block. If neither arm of a IF follows the conditional branch, we
// have to add a second jump after the conditional. We place the
// TRUE branch target in succs[0] for both GOTOs and IFs.
- for (uint i=0; i < _num_blocks; i++) {
- Block *b = _blocks[i];
- b->_pre_order = i; // turn pre-order into block-index
+ for (uint i = 0; i < number_of_blocks(); i++) {
+ Block* block = get_block(i);
+ block->_pre_order = i; // turn pre-order into block-index
// Connector blocks need no further processing.
- if (b->is_connector()) {
- assert((i+1) == _num_blocks || _blocks[i+1]->is_connector(),
- "All connector blocks should sink to the end");
+ if (block->is_connector()) {
+ assert((i+1) == number_of_blocks() || get_block(i + 1)->is_connector(), "All connector blocks should sink to the end");
continue;
}
- assert(b->is_Empty() != Block::completely_empty,
- "Empty blocks should be connectors");
+ assert(block->is_Empty() != Block::completely_empty, "Empty blocks should be connectors");
- Block *bnext = (i < _num_blocks-1) ? _blocks[i+1] : NULL;
- Block *bs0 = b->non_connector_successor(0);
+ Block* bnext = (i < number_of_blocks() - 1) ? get_block(i + 1) : NULL;
+ Block* bs0 = block->non_connector_successor(0);
// Check for multi-way branches where I cannot negate the test to
// exchange the true and false targets.
- if( no_flip_branch( b ) ) {
+ if (no_flip_branch(block)) {
// Find fall through case - if must fall into its target
- int branch_idx = b->_nodes.size() - b->_num_succs;
- for (uint j2 = 0; j2 < b->_num_succs; j2++) {
- const ProjNode* p = b->_nodes[branch_idx + j2]->as_Proj();
+ int branch_idx = block->number_of_nodes() - block->_num_succs;
+ for (uint j2 = 0; j2 < block->_num_succs; j2++) {
+ const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj();
if (p->_con == 0) {
// successor j2 is fall through case
- if (b->non_connector_successor(j2) != bnext) {
+ if (block->non_connector_successor(j2) != bnext) {
// but it is not the next block => insert a goto
insert_goto_at(i, j2);
}
// Put taken branch in slot 0
- if( j2 == 0 && b->_num_succs == 2) {
+ if (j2 == 0 && block->_num_succs == 2) {
// Flip targets in succs map
- Block *tbs0 = b->_succs[0];
- Block *tbs1 = b->_succs[1];
- b->_succs.map( 0, tbs1 );
- b->_succs.map( 1, tbs0 );
+ Block *tbs0 = block->_succs[0];
+ Block *tbs1 = block->_succs[1];
+ block->_succs.map(0, tbs1);
+ block->_succs.map(1, tbs0);
}
break;
}
}
+
// Remove all CatchProjs
- for (uint j1 = 0; j1 < b->_num_succs; j1++) b->_nodes.pop();
+ for (uint j = 0; j < block->_num_succs; j++) {
+ block->pop_node();
+ }
- } else if (b->_num_succs == 1) {
+ } else if (block->_num_succs == 1) {
// Block ends in a Goto?
if (bnext == bs0) {
// We fall into next block; remove the Goto
- b->_nodes.pop();
+ block->pop_node();
}
- } else if( b->_num_succs == 2 ) { // Block ends in a If?
+ } else if(block->_num_succs == 2) { // Block ends in a If?
// Get opcode of 1st projection (matches _succs[0])
// Note: Since this basic block has 2 exits, the last 2 nodes must
// be projections (in any order), the 3rd last node must be
// the IfNode (we have excluded other 2-way exits such as
// CatchNodes already).
- MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach();
- ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
- ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
+ MachNode* iff = block->get_node(block->number_of_nodes() - 3)->as_Mach();
+ ProjNode* proj0 = block->get_node(block->number_of_nodes() - 2)->as_Proj();
+ ProjNode* proj1 = block->get_node(block->number_of_nodes() - 1)->as_Proj();
// Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
- assert(proj0->raw_out(0) == b->_succs[0]->head(), "Mismatch successor 0");
- assert(proj1->raw_out(0) == b->_succs[1]->head(), "Mismatch successor 1");
+ assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0");
+ assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1");
- Block *bs1 = b->non_connector_successor(1);
+ Block* bs1 = block->non_connector_successor(1);
// Check for neither successor block following the current
// block ending in a conditional. If so, move one of the
// successors after the current one, provided that the
// successor was previously unscheduled, but moveable
// (i.e., all paths to it involve a branch).
- if( !C->do_freq_based_layout() && bnext != bs0 && bnext != bs1 ) {
+ if (!C->do_freq_based_layout() && bnext != bs0 && bnext != bs1) {
// Choose the more common successor based on the probability
// of the conditional branch.
- Block *bx = bs0;
- Block *by = bs1;
+ Block* bx = bs0;
+ Block* by = bs1;
// _prob is the probability of taking the true path. Make
// p the probability of taking successor #1.
float p = iff->as_MachIf()->_prob;
- if( proj0->Opcode() == Op_IfTrue ) {
+ if (proj0->Opcode() == Op_IfTrue) {
p = 1.0 - p;
}
@@ -821,14 +810,16 @@ void PhaseCFG::fixup_flow() {
// succs[1].
if (bnext == bs0) {
// Fall-thru case in succs[0], so flip targets in succs map
- Block *tbs0 = b->_succs[0];
- Block *tbs1 = b->_succs[1];
- b->_succs.map( 0, tbs1 );
- b->_succs.map( 1, tbs0 );
+ Block* tbs0 = block->_succs[0];
+ Block* tbs1 = block->_succs[1];
+ block->_succs.map(0, tbs1);
+ block->_succs.map(1, tbs0);
// Flip projection for each target
- { ProjNode *tmp = proj0; proj0 = proj1; proj1 = tmp; }
+ ProjNode* tmp = proj0;
+ proj0 = proj1;
+ proj1 = tmp;
- } else if( bnext != bs1 ) {
+ } else if(bnext != bs1) {
// Need a double-branch
// The existing conditional branch need not change.
// Add a unconditional branch to the false target.
@@ -838,12 +829,12 @@ void PhaseCFG::fixup_flow() {
}
// Make sure we TRUE branch to the target
- if( proj0->Opcode() == Op_IfFalse ) {
+ if (proj0->Opcode() == Op_IfFalse) {
iff->as_MachIf()->negate();
}
- b->_nodes.pop(); // Remove IfFalse & IfTrue projections
- b->_nodes.pop();
+ block->pop_node(); // Remove IfFalse & IfTrue projections
+ block->pop_node();
} else {
// Multi-exit block, e.g. a switch statement
@@ -853,7 +844,6 @@ void PhaseCFG::fixup_flow() {
}
-//------------------------------dump-------------------------------------------
#ifndef PRODUCT
void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const {
const Node *x = end->is_block_proj();
@@ -870,57 +860,58 @@ void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const {
} while( !p->is_block_start() );
// Recursively visit
- for( uint i=1; i<p->req(); i++ )
- _dump_cfg(p->in(i),visited);
+ for (uint i = 1; i < p->req(); i++) {
+ _dump_cfg(p->in(i), visited);
+ }
// Dump the block
- _bbs[p->_idx]->dump(&_bbs);
+ get_block_for_node(p)->dump(this);
}
void PhaseCFG::dump( ) const {
- tty->print("\n--- CFG --- %d BBs\n",_num_blocks);
- if( _blocks.size() ) { // Did we do basic-block layout?
- for( uint i=0; i<_num_blocks; i++ )
- _blocks[i]->dump(&_bbs);
+ tty->print("\n--- CFG --- %d BBs\n", number_of_blocks());
+ if (_blocks.size()) { // Did we do basic-block layout?
+ for (uint i = 0; i < number_of_blocks(); i++) {
+ const Block* block = get_block(i);
+ block->dump(this);
+ }
} else { // Else do it with a DFS
- VectorSet visited(_bbs._arena);
+ VectorSet visited(_block_arena);
_dump_cfg(_root,visited);
}
}
void PhaseCFG::dump_headers() {
- for( uint i = 0; i < _num_blocks; i++ ) {
- if( _blocks[i] == NULL ) continue;
- _blocks[i]->dump_head(&_bbs);
+ for (uint i = 0; i < number_of_blocks(); i++) {
+ Block* block = get_block(i);
+ if (block != NULL) {
+ block->dump_head(this);
+ }
}
}
-void PhaseCFG::verify( ) const {
+void PhaseCFG::verify() const {
#ifdef ASSERT
// Verify sane CFG
- for (uint i = 0; i < _num_blocks; i++) {
- Block *b = _blocks[i];
- uint cnt = b->_nodes.size();
+ for (uint i = 0; i < number_of_blocks(); i++) {
+ Block* block = get_block(i);
+ uint cnt = block->number_of_nodes();
uint j;
for (j = 0; j < cnt; j++) {
- Node *n = b->_nodes[j];
- assert( _bbs[n->_idx] == b, "" );
- if (j >= 1 && n->is_Mach() &&
- n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
- assert(j == 1 || b->_nodes[j-1]->is_Phi(),
- "CreateEx must be first instruction in block");
+ Node *n = block->get_node(j);
+ assert(get_block_for_node(n) == block, "");
+ if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
+ assert(j == 1 || block->get_node(j-1)->is_Phi(), "CreateEx must be first instruction in block");
}
for (uint k = 0; k < n->req(); k++) {
Node *def = n->in(k);
if (def && def != n) {
- assert(_bbs[def->_idx] || def->is_Con(),
- "must have block; constants for debug info ok");
+ assert(get_block_for_node(def) || def->is_Con(), "must have block; constants for debug info ok");
// Verify that instructions in the block is in correct order.
// Uses must follow their definition if they are at the same block.
// Mostly done to check that MachSpillCopy nodes are placed correctly
// when CreateEx node is moved in build_ifg_physical().
- if (_bbs[def->_idx] == b &&
- !(b->head()->is_Loop() && n->is_Phi()) &&
+ if (get_block_for_node(def) == block && !(block->head()->is_Loop() && n->is_Phi()) &&
// See (+++) comment in reg_split.cpp
!(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
bool is_loop = false;
@@ -932,29 +923,29 @@ void PhaseCFG::verify( ) const {
}
}
}
- assert(is_loop || b->find_node(def) < j, "uses must follow definitions");
+ assert(is_loop || block->find_node(def) < j, "uses must follow definitions");
}
}
}
}
- j = b->end_idx();
- Node *bp = (Node*)b->_nodes[b->_nodes.size()-1]->is_block_proj();
- assert( bp, "last instruction must be a block proj" );
- assert( bp == b->_nodes[j], "wrong number of successors for this block" );
+ j = block->end_idx();
+ Node* bp = (Node*)block->get_node(block->number_of_nodes() - 1)->is_block_proj();
+ assert(bp, "last instruction must be a block proj");
+ assert(bp == block->get_node(j), "wrong number of successors for this block");
if (bp->is_Catch()) {
- while (b->_nodes[--j]->is_MachProj()) ;
- assert(b->_nodes[j]->is_MachCall(), "CatchProj must follow call");
+ while (block->get_node(--j)->is_MachProj()) {
+ ;
+ }
+ assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
} else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) {
- assert(b->_num_succs == 2, "Conditional branch must have two targets");
+ assert(block->_num_succs == 2, "Conditional branch must have two targets");
}
}
#endif
}
#endif
-//=============================================================================
-//------------------------------UnionFind--------------------------------------
UnionFind::UnionFind( uint max ) : _cnt(max), _max(max), _indices(NEW_RESOURCE_ARRAY(uint,max)) {
Copy::zero_to_bytes( _indices, sizeof(uint)*max );
}
@@ -979,7 +970,6 @@ void UnionFind::reset( uint max ) {
for( uint i=0; i<max; i++ ) map(i,i);
}
-//------------------------------Find_compress----------------------------------
// Straight out of Tarjan's union-find algorithm
uint UnionFind::Find_compress( uint idx ) {
uint cur = idx;
@@ -999,7 +989,6 @@ uint UnionFind::Find_compress( uint idx ) {
return idx;
}
-//------------------------------Find_const-------------------------------------
// Like Find above, but no path compress, so bad asymptotic behavior
uint UnionFind::Find_const( uint idx ) const {
if( idx == 0 ) return idx; // Ignore the zero idx
@@ -1014,7 +1003,6 @@ uint UnionFind::Find_const( uint idx ) const {
return next;
}
-//------------------------------Union------------------------------------------
// union 2 sets together.
void UnionFind::Union( uint idx1, uint idx2 ) {
uint src = Find(idx1);
@@ -1063,9 +1051,6 @@ void CFGEdge::dump( ) const {
}
#endif
-//=============================================================================
-
-//------------------------------edge_order-------------------------------------
// Comparison function for edges
static int edge_order(CFGEdge **e0, CFGEdge **e1) {
float freq0 = (*e0)->freq();
@@ -1080,7 +1065,6 @@ static int edge_order(CFGEdge **e0, CFGEdge **e1) {
return dist1 - dist0;
}
-//------------------------------trace_frequency_order--------------------------
// Comparison function for edges
extern "C" int trace_frequency_order(const void *p0, const void *p1) {
Trace *tr0 = *(Trace **) p0;
@@ -1106,17 +1090,15 @@ extern "C" int trace_frequency_order(const void *p0, const void *p1) {
return diff;
}
-//------------------------------find_edges-------------------------------------
// Find edges of interest, i.e, those which can fall through. Presumes that
// edges which don't fall through are of low frequency and can be generally
// ignored. Initialize the list of traces.
-void PhaseBlockLayout::find_edges()
-{
+void PhaseBlockLayout::find_edges() {
// Walk the blocks, creating edges and Traces
uint i;
Trace *tr = NULL;
- for (i = 0; i < _cfg._num_blocks; i++) {
- Block *b = _cfg._blocks[i];
+ for (i = 0; i < _cfg.number_of_blocks(); i++) {
+ Block* b = _cfg.get_block(i);
tr = new Trace(b, next, prev);
traces[tr->id()] = tr;
@@ -1140,7 +1122,7 @@ void PhaseBlockLayout::find_edges()
if (n->num_preds() != 1) break;
i++;
- assert(n = _cfg._blocks[i], "expecting next block");
+ assert(n = _cfg.get_block(i), "expecting next block");
tr->append(n);
uf->map(n->_pre_order, tr->id());
traces[n->_pre_order] = NULL;
@@ -1164,8 +1146,8 @@ void PhaseBlockLayout::find_edges()
}
// Group connector blocks into one trace
- for (i++; i < _cfg._num_blocks; i++) {
- Block *b = _cfg._blocks[i];
+ for (i++; i < _cfg.number_of_blocks(); i++) {
+ Block *b = _cfg.get_block(i);
assert(b->is_connector(), "connector blocks at the end");
tr->append(b);
uf->map(b->_pre_order, tr->id());
@@ -1173,10 +1155,8 @@ void PhaseBlockLayout::find_edges()
}
}
-//------------------------------union_traces----------------------------------
// Union two traces together in uf, and null out the trace in the list
-void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace)
-{
+void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) {
uint old_id = old_trace->id();
uint updated_id = updated_trace->id();
@@ -1200,10 +1180,8 @@ void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace)
traces[hi_id] = NULL;
}
-//------------------------------grow_traces-------------------------------------
// Append traces together via the most frequently executed edges
-void PhaseBlockLayout::grow_traces()
-{
+void PhaseBlockLayout::grow_traces() {
// Order the edges, and drive the growth of Traces via the most
// frequently executed edges.
edges->sort(edge_order);
@@ -1245,11 +1223,9 @@ void PhaseBlockLayout::grow_traces()
}
}
-//------------------------------merge_traces-----------------------------------
// Embed one trace into another, if the fork or join points are sufficiently
// balanced.
-void PhaseBlockLayout::merge_traces(bool fall_thru_only)
-{
+void PhaseBlockLayout::merge_traces(bool fall_thru_only) {
// Walk the edge list a another time, looking at unprocessed edges.
// Fold in diamonds
for (int i = 0; i < edges->length(); i++) {
@@ -1303,7 +1279,7 @@ void PhaseBlockLayout::merge_traces(bool fall_thru_only)
src_trace->insert_after(src_block, targ_trace);
union_traces(src_trace, targ_trace);
} else if (src_at_tail) {
- if (src_trace != trace(_cfg._broot)) {
+ if (src_trace != trace(_cfg.get_root_block())) {
e->set_state(CFGEdge::connected);
targ_trace->insert_before(targ_block, src_trace);
union_traces(targ_trace, src_trace);
@@ -1312,7 +1288,7 @@ void PhaseBlockLayout::merge_traces(bool fall_thru_only)
} else if (e->state() == CFGEdge::open) {
// Append traces, even without a fall-thru connection.
// But leave root entry at the beginning of the block list.
- if (targ_trace != trace(_cfg._broot)) {
+ if (targ_trace != trace(_cfg.get_root_block())) {
e->set_state(CFGEdge::connected);
src_trace->append(targ_trace);
union_traces(src_trace, targ_trace);
@@ -1321,11 +1297,9 @@ void PhaseBlockLayout::merge_traces(bool fall_thru_only)
}
}
-//----------------------------reorder_traces-----------------------------------
// Order the sequence of the traces in some desirable way, and fixup the
// jumps at the end of each block.
-void PhaseBlockLayout::reorder_traces(int count)
-{
+void PhaseBlockLayout::reorder_traces(int count) {
ResourceArea *area = Thread::current()->resource_area();
Trace ** new_traces = NEW_ARENA_ARRAY(area, Trace *, count);
Block_List worklist;
@@ -1340,15 +1314,14 @@ void PhaseBlockLayout::reorder_traces(int count)
}
// The entry block should be first on the new trace list.
- Trace *tr = trace(_cfg._broot);
+ Trace *tr = trace(_cfg.get_root_block());
assert(tr == new_traces[0], "entry trace misplaced");
// Sort the new trace list by frequency
qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order);
// Patch up the successor blocks
- _cfg._blocks.reset();
- _cfg._num_blocks = 0;
+ _cfg.clear_blocks();
for (int i = 0; i < new_count; i++) {
Trace *tr = new_traces[i];
if (tr != NULL) {
@@ -1357,17 +1330,15 @@ void PhaseBlockLayout::reorder_traces(int count)
}
}
-//------------------------------PhaseBlockLayout-------------------------------
// Order basic blocks based on frequency
-PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg) :
- Phase(BlockLayout),
- _cfg(cfg)
-{
+PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg)
+: Phase(BlockLayout)
+, _cfg(cfg) {
ResourceMark rm;
ResourceArea *area = Thread::current()->resource_area();
// List of traces
- int size = _cfg._num_blocks + 1;
+ int size = _cfg.number_of_blocks() + 1;
traces = NEW_ARENA_ARRAY(area, Trace *, size);
memset(traces, 0, size*sizeof(Trace*));
next = NEW_ARENA_ARRAY(area, Block *, size);
@@ -1400,11 +1371,10 @@ PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg) :
// Re-order all the remaining traces by frequency
reorder_traces(size);
- assert(_cfg._num_blocks >= (uint) (size - 1), "number of blocks can not shrink");
+ assert(_cfg.number_of_blocks() >= (uint) (size - 1), "number of blocks can not shrink");
}
-//------------------------------backedge---------------------------------------
// Edge e completes a loop in a trace. If the target block is head of the
// loop, rotate the loop block so that the loop ends in a conditional branch.
bool Trace::backedge(CFGEdge *e) {
@@ -1456,14 +1426,12 @@ bool Trace::backedge(CFGEdge *e) {
return loop_rotated;
}
-//------------------------------fixup_blocks-----------------------------------
// push blocks onto the CFG list
// ensure that blocks have the correct two-way branch sense
void Trace::fixup_blocks(PhaseCFG &cfg) {
Block *last = last_block();
for (Block *b = first_block(); b != NULL; b = next(b)) {
- cfg._blocks.push(b);
- cfg._num_blocks++;
+ cfg.add_block(b);
if (!b->is_connector()) {
int nfallthru = b->num_fall_throughs();
if (b != last) {
@@ -1472,9 +1440,9 @@ void Trace::fixup_blocks(PhaseCFG &cfg) {
Block *bnext = next(b);
Block *bs0 = b->non_connector_successor(0);
- MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach();
- ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
- ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
+ MachNode *iff = b->get_node(b->number_of_nodes() - 3)->as_Mach();
+ ProjNode *proj0 = b->get_node(b->number_of_nodes() - 2)->as_Proj();
+ ProjNode *proj1 = b->get_node(b->number_of_nodes() - 1)->as_Proj();
if (bnext == bs0) {
// Fall-thru case in succs[0], should be in succs[1]
@@ -1486,8 +1454,8 @@ void Trace::fixup_blocks(PhaseCFG &cfg) {
b->_succs.map( 1, tbs0 );
// Flip projections to match targets
- b->_nodes.map(b->_nodes.size()-2, proj1);
- b->_nodes.map(b->_nodes.size()-1, proj0);
+ b->map_node(proj1, b->number_of_nodes() - 2);
+ b->map_node(proj0, b->number_of_nodes() - 1);
}
}
}
diff --git a/src/share/vm/opto/block.hpp b/src/share/vm/opto/block.hpp
index a2e4615b8..e15746918 100644
--- a/src/share/vm/opto/block.hpp
+++ b/src/share/vm/opto/block.hpp
@@ -48,13 +48,12 @@ class Block_Array : public ResourceObj {
friend class VMStructs;
uint _size; // allocated size, as opposed to formal limit
debug_only(uint _limit;) // limit to formal domain
+ Arena *_arena; // Arena to allocate in
protected:
Block **_blocks;
void grow( uint i ); // Grow array node to fit
public:
- Arena *_arena; // Arena to allocate in
-
Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) {
debug_only(_limit=0);
_blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize );
@@ -77,7 +76,7 @@ class Block_List : public Block_Array {
public:
uint _cnt;
Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {}
- void push( Block *b ) { map(_cnt++,b); }
+ void push( Block *b ) { map(_cnt++,b); }
Block *pop() { return _blocks[--_cnt]; }
Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;}
void remove( uint i );
@@ -106,15 +105,53 @@ class CFGElement : public ResourceObj {
// any optimization pass. They are created late in the game.
class Block : public CFGElement {
friend class VMStructs;
- public:
+
+private:
// Nodes in this block, in order
Node_List _nodes;
+public:
+
+ // Get the node at index 'at_index', if 'at_index' is out of bounds return NULL
+ Node* get_node(uint at_index) const {
+ return _nodes[at_index];
+ }
+
+ // Get the number of nodes in this block
+ uint number_of_nodes() const {
+ return _nodes.size();
+ }
+
+ // Map a node 'node' to index 'to_index' in the block, if the index is out of bounds the size of the node list is increased
+ void map_node(Node* node, uint to_index) {
+ _nodes.map(to_index, node);
+ }
+
+ // Insert a node 'node' at index 'at_index', moving all nodes that are on a higher index one step, if 'at_index' is out of bounds we crash
+ void insert_node(Node* node, uint at_index) {
+ _nodes.insert(at_index, node);
+ }
+
+ // Remove a node at index 'at_index'
+ void remove_node(uint at_index) {
+ _nodes.remove(at_index);
+ }
+
+ // Push a node 'node' onto the node list
+ void push_node(Node* node) {
+ _nodes.push(node);
+ }
+
+ // Pop the last node off the node list
+ Node* pop_node() {
+ return _nodes.pop();
+ }
+
// Basic blocks have a Node which defines Control for all Nodes pinned in
// this block. This Node is a RegionNode. Exception-causing Nodes
// (division, subroutines) and Phi functions are always pinned. Later,
// every Node will get pinned to some block.
- Node *head() const { return _nodes[0]; }
+ Node *head() const { return get_node(0); }
// CAUTION: num_preds() is ONE based, so that predecessor numbers match
// input edges to Regions and Phis.
@@ -275,29 +312,12 @@ class Block : public CFGElement {
// Add an instruction to an existing block. It must go after the head
// instruction and before the end instruction.
- void add_inst( Node *n ) { _nodes.insert(end_idx(),n); }
+ void add_inst( Node *n ) { insert_node(n, end_idx()); }
// Find node in block
uint find_node( const Node *n ) const;
// Find and remove n from block list
void find_remove( const Node *n );
- // helper function that adds caller save registers to MachProjNode
- void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
- // Schedule a call next in the block
- uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
-
- // Perform basic-block local scheduling
- Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
- void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs );
- void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs);
- bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
- // Cleanup if any code lands between a Call and his Catch
- void call_catch_cleanup(Block_Array &bbs, Compile *C);
- // Detect implicit-null-check opportunities. Basically, find NULL checks
- // with suitable memory ops nearby. Use the memory op to do the NULL check.
- // I can generate a memory op if there is not one nearby.
- void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons);
-
// Return the empty status of a block
enum { not_empty, empty_with_goto, completely_empty };
int is_Empty() const;
@@ -329,17 +349,13 @@ class Block : public CFGElement {
// Examine block's code shape to predict if it is not commonly executed.
bool has_uncommon_code() const;
- // Use frequency calculations and code shape to predict if the block
- // is uncommon.
- bool is_uncommon( Block_Array &bbs ) const;
-
#ifndef PRODUCT
// Debugging print of basic block
void dump_bidx(const Block* orig, outputStream* st = tty) const;
- void dump_pred(const Block_Array *bbs, Block* orig, outputStream* st = tty) const;
- void dump_head( const Block_Array *bbs, outputStream* st = tty ) const;
+ void dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st = tty) const;
+ void dump_head(const PhaseCFG* cfg, outputStream* st = tty) const;
void dump() const;
- void dump( const Block_Array *bbs ) const;
+ void dump(const PhaseCFG* cfg) const;
#endif
};
@@ -349,73 +365,50 @@ class Block : public CFGElement {
class PhaseCFG : public Phase {
friend class VMStructs;
private:
- // Build a proper looking cfg. Return count of basic blocks
- uint build_cfg();
- // Perform DFS search.
- // Setup 'vertex' as DFS to vertex mapping.
- // Setup 'semi' as vertex to DFS mapping.
- // Set 'parent' to DFS parent.
- uint DFS( Tarjan *tarjan );
+ // Root of whole program
+ RootNode* _root;
- // Helper function to insert a node into a block
- void schedule_node_into_block( Node *n, Block *b );
+ // The block containing the root node
+ Block* _root_block;
- void replace_block_proj_ctrl( Node *n );
+ // List of basic blocks that are created during CFG creation
+ Block_List _blocks;
- // Set the basic block for pinned Nodes
- void schedule_pinned_nodes( VectorSet &visited );
+ // Count of basic blocks
+ uint _number_of_blocks;
- // I'll need a few machine-specific GotoNodes. Clone from this one.
- MachNode *_goto;
+ // Arena for the blocks to be stored in
+ Arena* _block_arena;
- Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false);
- void verify_anti_dependences(Block* LCA, Node* load) {
- assert(LCA == _bbs[load->_idx], "should already be scheduled");
- insert_anti_dependences(LCA, load, true);
- }
+ // The matcher for this compilation
+ Matcher& _matcher;
- public:
- PhaseCFG( Arena *a, RootNode *r, Matcher &m );
-
- uint _num_blocks; // Count of basic blocks
- Block_List _blocks; // List of basic blocks
- RootNode *_root; // Root of whole program
- Block_Array _bbs; // Map Nodes to owning Basic Block
- Block *_broot; // Basic block of root
- uint _rpo_ctr;
+ // Map nodes to owning basic block
+ Block_Array _node_to_block_mapping;
+
+ // Loop from the root
CFGLoop* _root_loop;
- float _outer_loop_freq; // Outmost loop frequency
- // Per node latency estimation, valid only during GCM
- GrowableArray<uint> *_node_latency;
+ // Outmost loop frequency
+ float _outer_loop_frequency;
-#ifndef PRODUCT
- bool _trace_opto_pipelining; // tracing flag
-#endif
+ // Per node latency estimation, valid only during GCM
+ GrowableArray<uint>* _node_latency;
-#ifdef ASSERT
- Unique_Node_List _raw_oops;
-#endif
+ // Build a proper looking cfg. Return count of basic blocks
+ uint build_cfg();
- // Build dominators
- void Dominators();
+ // Build the dominator tree so that we know where we can move instructions
+ void build_dominator_tree();
- // Estimate block frequencies based on IfNode probabilities
- void Estimate_Block_Frequency();
+ // Estimate block frequencies based on IfNode probabilities, so that we know where we want to move instructions
+ void estimate_block_frequency();
// Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific
- // basic blocks; i.e. _bbs now maps _idx for all Nodes to some Block.
- void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list );
-
- // Compute the (backwards) latency of a node from the uses
- void latency_from_uses(Node *n);
-
- // Compute the (backwards) latency of a node from a single use
- int latency_from_use(Node *n, const Node *def, Node *use);
-
- // Compute the (backwards) latency of a node from the uses of this instruction
- void partial_latency_of_defs(Node *n);
+ // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block.
+ // Move nodes to ensure correctness from GVN and also try to move nodes out of loops.
+ void global_code_motion();
// Schedule Nodes early in their basic blocks.
bool schedule_early(VectorSet &visited, Node_List &roots);
@@ -425,21 +418,67 @@ class PhaseCFG : public Phase {
// block to place the node.
void schedule_late(VectorSet &visited, Node_List &stack);
+ // Compute the (backwards) latency of a node from a single use
+ int latency_from_use(Node *n, const Node *def, Node *use);
+
+ // Compute the (backwards) latency of a node from the uses of this instruction
+ void partial_latency_of_defs(Node *n);
+
+ // Compute the instruction global latency with a backwards walk
+ void compute_latencies_backwards(VectorSet &visited, Node_List &stack);
+
// Pick a block between early and late that is a cheaper alternative
// to late. Helper for schedule_late.
Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
- // Compute the instruction global latency with a backwards walk
- void ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack);
+ bool schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call);
+ void set_next_call(Block* block, Node* n, VectorSet& next_call);
+ void needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call);
- // Set loop alignment
- void set_loop_alignment();
+ // Perform basic-block local scheduling
+ Node* select(Block* block, Node_List& worklist, GrowableArray<int>& ready_cnt, VectorSet& next_call, uint sched_slot);
+
+ // Schedule a call next in the block
+ uint sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call);
+
+ // Cleanup if any code lands between a Call and his Catch
+ void call_catch_cleanup(Block* block);
+
+ Node* catch_cleanup_find_cloned_def(Block* use_blk, Node* def, Block* def_blk, int n_clone_idx);
+ void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx);
+
+ // Detect implicit-null-check opportunities. Basically, find NULL checks
+ // with suitable memory ops nearby. Use the memory op to do the NULL check.
+ // I can generate a memory op if there is not one nearby.
+ void implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons);
+
+ // Perform a Depth First Search (DFS).
+ // Setup 'vertex' as DFS to vertex mapping.
+ // Setup 'semi' as vertex to DFS mapping.
+ // Set 'parent' to DFS parent.
+ uint do_DFS(Tarjan* tarjan, uint rpo_counter);
+
+ // Helper function to insert a node into a block
+ void schedule_node_into_block( Node *n, Block *b );
+
+ void replace_block_proj_ctrl( Node *n );
+
+ // Set the basic block for pinned Nodes
+ void schedule_pinned_nodes( VectorSet &visited );
+
+ // I'll need a few machine-specific GotoNodes. Clone from this one.
+ // Used when building the CFG and creating end nodes for blocks.
+ MachNode* _goto;
+
+ Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false);
+ void verify_anti_dependences(Block* LCA, Node* load) {
+ assert(LCA == get_block_for_node(load), "should already be scheduled");
+ insert_anti_dependences(LCA, load, true);
+ }
- // Remove empty basic blocks
- void remove_empty();
- void fixup_flow();
bool move_to_next(Block* bx, uint b_index);
void move_to_end(Block* bx, uint b_index);
+
void insert_goto_at(uint block_no, uint succ_no);
// Check for NeverBranch at block end. This needs to become a GOTO to the
@@ -451,10 +490,110 @@ class PhaseCFG : public Phase {
CFGLoop* create_loop_tree();
- // Insert a node into a block, and update the _bbs
- void insert( Block *b, uint idx, Node *n ) {
- b->_nodes.insert( idx, n );
- _bbs.map( n->_idx, b );
+ #ifndef PRODUCT
+ bool _trace_opto_pipelining; // tracing flag
+ #endif
+
+ public:
+ PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher);
+
+ void set_latency_for_node(Node* node, int latency) {
+ _node_latency->at_put_grow(node->_idx, latency);
+ }
+
+ uint get_latency_for_node(Node* node) {
+ return _node_latency->at_grow(node->_idx);
+ }
+
+ // Get the outer most frequency
+ float get_outer_loop_frequency() const {
+ return _outer_loop_frequency;
+ }
+
+ // Get the root node of the CFG
+ RootNode* get_root_node() const {
+ return _root;
+ }
+
+ // Get the block of the root node
+ Block* get_root_block() const {
+ return _root_block;
+ }
+
+ // Add a block at a position and moves the later ones one step
+ void add_block_at(uint pos, Block* block) {
+ _blocks.insert(pos, block);
+ _number_of_blocks++;
+ }
+
+ // Adds a block to the top of the block list
+ void add_block(Block* block) {
+ _blocks.push(block);
+ _number_of_blocks++;
+ }
+
+ // Clear the list of blocks
+ void clear_blocks() {
+ _blocks.reset();
+ _number_of_blocks = 0;
+ }
+
+ // Get the block at position pos in _blocks
+ Block* get_block(uint pos) const {
+ return _blocks[pos];
+ }
+
+ // Number of blocks
+ uint number_of_blocks() const {
+ return _number_of_blocks;
+ }
+
+ // set which block this node should reside in
+ void map_node_to_block(const Node* node, Block* block) {
+ _node_to_block_mapping.map(node->_idx, block);
+ }
+
+ // removes the mapping from a node to a block
+ void unmap_node_from_block(const Node* node) {
+ _node_to_block_mapping.map(node->_idx, NULL);
+ }
+
+ // get the block in which this node resides
+ Block* get_block_for_node(const Node* node) const {
+ return _node_to_block_mapping[node->_idx];
+ }
+
+ // does this node reside in a block; return true
+ bool has_block(const Node* node) const {
+ return (_node_to_block_mapping.lookup(node->_idx) != NULL);
+ }
+
+ // Use frequency calculations and code shape to predict if the block
+ // is uncommon.
+ bool is_uncommon(const Block* block);
+
+#ifdef ASSERT
+ Unique_Node_List _raw_oops;
+#endif
+
+ // Do global code motion by first building dominator tree and estimate block frequency
+ // Returns true on success
+ bool do_global_code_motion();
+
+ // Compute the (backwards) latency of a node from the uses
+ void latency_from_uses(Node *n);
+
+ // Set loop alignment
+ void set_loop_alignment();
+
+ // Remove empty basic blocks
+ void remove_empty_blocks();
+ void fixup_flow();
+
+ // Insert a node into a block at index and map the node to the block
+ void insert(Block *b, uint idx, Node *n) {
+ b->insert_node(n , idx);
+ map_node_to_block(n, b);
}
#ifndef PRODUCT
@@ -543,7 +682,7 @@ class CFGLoop : public CFGElement {
_child(NULL),
_exit_prob(1.0f) {}
CFGLoop* parent() { return _parent; }
- void push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk);
+ void push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg);
void add_member(CFGElement *s) { _members.push(s); }
void add_nested_loop(CFGLoop* cl);
Block* head() {
diff --git a/src/share/vm/opto/buildOopMap.cpp b/src/share/vm/opto/buildOopMap.cpp
index 6d6b42163..38a535fca 100644
--- a/src/share/vm/opto/buildOopMap.cpp
+++ b/src/share/vm/opto/buildOopMap.cpp
@@ -90,7 +90,6 @@
// OptoReg::Bad for not-callee-saved.
-//------------------------------OopFlow----------------------------------------
// Structure to pass around
struct OopFlow : public ResourceObj {
short *_callees; // Array mapping register to callee-saved
@@ -122,12 +121,11 @@ struct OopFlow : public ResourceObj {
OopMap *build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live );
};
-//------------------------------compute_reach----------------------------------
// Given reaching-defs for this block start, compute it for this block end
void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash ) {
- for( uint i=0; i<_b->_nodes.size(); i++ ) {
- Node *n = _b->_nodes[i];
+ for( uint i=0; i<_b->number_of_nodes(); i++ ) {
+ Node *n = _b->get_node(i);
if( n->jvms() ) { // Build an OopMap here?
JVMState *jvms = n->jvms();
@@ -180,7 +178,6 @@ void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehas
}
}
-//------------------------------merge------------------------------------------
// Merge the given flow into the 'this' flow
void OopFlow::merge( OopFlow *flow, int max_reg ) {
assert( _b == NULL, "merging into a happy flow" );
@@ -200,14 +197,12 @@ void OopFlow::merge( OopFlow *flow, int max_reg ) {
}
-//------------------------------clone------------------------------------------
void OopFlow::clone( OopFlow *flow, int max_size ) {
_b = flow->_b;
memcpy( _callees, flow->_callees, sizeof(short)*max_size);
memcpy( _defs , flow->_defs , sizeof(Node*)*max_size);
}
-//------------------------------make-------------------------------------------
OopFlow *OopFlow::make( Arena *A, int max_size, Compile* C ) {
short *callees = NEW_ARENA_ARRAY(A,short,max_size+1);
Node **defs = NEW_ARENA_ARRAY(A,Node*,max_size+1);
@@ -218,7 +213,6 @@ OopFlow *OopFlow::make( Arena *A, int max_size, Compile* C ) {
return flow;
}
-//------------------------------bit twiddlers----------------------------------
static int get_live_bit( int *live, int reg ) {
return live[reg>>LogBitsPerInt] & (1<<(reg&(BitsPerInt-1))); }
static void set_live_bit( int *live, int reg ) {
@@ -226,7 +220,6 @@ static void set_live_bit( int *live, int reg ) {
static void clr_live_bit( int *live, int reg ) {
live[reg>>LogBitsPerInt] &= ~(1<<(reg&(BitsPerInt-1))); }
-//------------------------------build_oop_map----------------------------------
// Build an oopmap from the current flow info
OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live ) {
int framesize = regalloc->_framesize;
@@ -415,28 +408,29 @@ OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, i
return omap;
}
-//------------------------------do_liveness------------------------------------
// Compute backwards liveness on registers
-static void do_liveness( PhaseRegAlloc *regalloc, PhaseCFG *cfg, Block_List *worklist, int max_reg_ints, Arena *A, Dict *safehash ) {
- int *live = NEW_ARENA_ARRAY(A, int, (cfg->_num_blocks+1) * max_reg_ints);
- int *tmp_live = &live[cfg->_num_blocks * max_reg_ints];
- Node *root = cfg->C->root();
+static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* worklist, int max_reg_ints, Arena* A, Dict* safehash) {
+ int* live = NEW_ARENA_ARRAY(A, int, (cfg->number_of_blocks() + 1) * max_reg_ints);
+ int* tmp_live = &live[cfg->number_of_blocks() * max_reg_ints];
+ Node* root = cfg->get_root_node();
// On CISC platforms, get the node representing the stack pointer that regalloc
// used for spills
Node *fp = NodeSentinel;
if (UseCISCSpill && root->req() > 1) {
fp = root->in(1)->in(TypeFunc::FramePtr);
}
- memset( live, 0, cfg->_num_blocks * (max_reg_ints<<LogBytesPerInt) );
+ memset(live, 0, cfg->number_of_blocks() * (max_reg_ints << LogBytesPerInt));
// Push preds onto worklist
- for( uint i=1; i<root->req(); i++ )
- worklist->push(cfg->_bbs[root->in(i)->_idx]);
+ for (uint i = 1; i < root->req(); i++) {
+ Block* block = cfg->get_block_for_node(root->in(i));
+ worklist->push(block);
+ }
// ZKM.jar includes tiny infinite loops which are unreached from below.
// If we missed any blocks, we'll retry here after pushing all missed
// blocks on the worklist. Normally this outer loop never trips more
// than once.
- while( 1 ) {
+ while (1) {
while( worklist->size() ) { // Standard worklist algorithm
Block *b = worklist->rpop();
@@ -456,8 +450,8 @@ static void do_liveness( PhaseRegAlloc *regalloc, PhaseCFG *cfg, Block_List *wor
}
// Now walk tmp_live up the block backwards, computing live
- for( int k=b->_nodes.size()-1; k>=0; k-- ) {
- Node *n = b->_nodes[k];
+ for( int k=b->number_of_nodes()-1; k>=0; k-- ) {
+ Node *n = b->get_node(k);
// KILL def'd bits
int first = regalloc->get_reg_first(n);
int second = regalloc->get_reg_second(n);
@@ -540,37 +534,42 @@ static void do_liveness( PhaseRegAlloc *regalloc, PhaseCFG *cfg, Block_List *wor
for( l=0; l<max_reg_ints; l++ )
old_live[l] = tmp_live[l];
// Push preds onto worklist
- for( l=1; l<(int)b->num_preds(); l++ )
- worklist->push(cfg->_bbs[b->pred(l)->_idx]);
+ for (l = 1; l < (int)b->num_preds(); l++) {
+ Block* block = cfg->get_block_for_node(b->pred(l));
+ worklist->push(block);
+ }
}
}
// Scan for any missing safepoints. Happens to infinite loops
// ala ZKM.jar
uint i;
- for( i=1; i<cfg->_num_blocks; i++ ) {
- Block *b = cfg->_blocks[i];
+ for (i = 1; i < cfg->number_of_blocks(); i++) {
+ Block* block = cfg->get_block(i);
uint j;
- for( j=1; j<b->_nodes.size(); j++ )
- if( b->_nodes[j]->jvms() &&
- (*safehash)[b->_nodes[j]] == NULL )
+ for (j = 1; j < block->number_of_nodes(); j++) {
+ if (block->get_node(j)->jvms() && (*safehash)[block->get_node(j)] == NULL) {
break;
- if( j<b->_nodes.size() ) break;
+ }
+ }
+ if (j < block->number_of_nodes()) {
+ break;
+ }
}
- if( i == cfg->_num_blocks )
+ if (i == cfg->number_of_blocks()) {
break; // Got 'em all
+ }
#ifndef PRODUCT
if( PrintOpto && Verbose )
tty->print_cr("retripping live calc");
#endif
// Force the issue (expensively): recheck everybody
- for( i=1; i<cfg->_num_blocks; i++ )
- worklist->push(cfg->_blocks[i]);
+ for (i = 1; i < cfg->number_of_blocks(); i++) {
+ worklist->push(cfg->get_block(i));
+ }
}
-
}
-//------------------------------BuildOopMaps-----------------------------------
// Collect GC mask info - where are all the OOPs?
void Compile::BuildOopMaps() {
NOT_PRODUCT( TracePhase t3("bldOopMaps", &_t_buildOopMaps, TimeCompiler); )
@@ -591,12 +590,12 @@ void Compile::BuildOopMaps() {
OopFlow *free_list = NULL; // Free, unused
// Array mapping blocks to completed oopflows
- OopFlow **flows = NEW_ARENA_ARRAY(A, OopFlow*, _cfg->_num_blocks);
- memset( flows, 0, _cfg->_num_blocks*sizeof(OopFlow*) );
+ OopFlow **flows = NEW_ARENA_ARRAY(A, OopFlow*, _cfg->number_of_blocks());
+ memset( flows, 0, _cfg->number_of_blocks() * sizeof(OopFlow*) );
// Do the first block 'by hand' to prime the worklist
- Block *entry = _cfg->_blocks[1];
+ Block *entry = _cfg->get_block(1);
OopFlow *rootflow = OopFlow::make(A,max_reg,this);
// Initialize to 'bottom' (not 'top')
memset( rootflow->_callees, OptoReg::Bad, max_reg*sizeof(short) );
@@ -622,7 +621,9 @@ void Compile::BuildOopMaps() {
Block *b = worklist.pop();
// Ignore root block
- if( b == _cfg->_broot ) continue;
+ if (b == _cfg->get_root_block()) {
+ continue;
+ }
// Block is already done? Happens if block has several predecessors,
// he can get on the worklist more than once.
if( flows[b->_pre_order] ) continue;
@@ -632,10 +633,9 @@ void Compile::BuildOopMaps() {
// pred to this block. Otherwise we have to grab a new OopFlow.
OopFlow *flow = NULL; // Flag for finding optimized flow
Block *pred = (Block*)0xdeadbeef;
- uint j;
// Scan this block's preds to find a done predecessor
- for( j=1; j<b->num_preds(); j++ ) {
- Block *p = _cfg->_bbs[b->pred(j)->_idx];
+ for (uint j = 1; j < b->num_preds(); j++) {
+ Block* p = _cfg->get_block_for_node(b->pred(j));
OopFlow *p_flow = flows[p->_pre_order];
if( p_flow ) { // Predecessor is done
assert( p_flow->_b == p, "cross check" );
diff --git a/src/share/vm/opto/bytecodeInfo.cpp b/src/share/vm/opto/bytecodeInfo.cpp
index 6d8f1b3da..2ba7b1cf3 100644
--- a/src/share/vm/opto/bytecodeInfo.cpp
+++ b/src/share/vm/opto/bytecodeInfo.cpp
@@ -85,16 +85,35 @@ InlineTree::InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvm
assert(!UseOldInlining, "do not use for old stuff");
}
+/**
+ * Return true when EA is ON and a java constructor is called or
+ * a super constructor is called from an inlined java constructor.
+ * Also return true for boxing methods.
+ */
static bool is_init_with_ea(ciMethod* callee_method,
ciMethod* caller_method, Compile* C) {
- // True when EA is ON and a java constructor is called or
- // a super constructor is called from an inlined java constructor.
- return C->do_escape_analysis() && EliminateAllocations &&
- ( callee_method->is_initializer() ||
- (caller_method->is_initializer() &&
- caller_method != C->method() &&
- caller_method->holder()->is_subclass_of(callee_method->holder()))
- );
+ if (!C->do_escape_analysis() || !EliminateAllocations) {
+ return false; // EA is off
+ }
+ if (callee_method->is_initializer()) {
+ return true; // constuctor
+ }
+ if (caller_method->is_initializer() &&
+ caller_method != C->method() &&
+ caller_method->holder()->is_subclass_of(callee_method->holder())) {
+ return true; // super constructor is called from inlined constructor
+ }
+ if (C->eliminate_boxing() && callee_method->is_boxing_method()) {
+ return true;
+ }
+ return false;
+}
+
+/**
+ * Force inlining unboxing accessor.
+ */
+static bool is_unboxing_method(ciMethod* callee_method, Compile* C) {
+ return C->eliminate_boxing() && callee_method->is_unboxing_method();
}
// positive filter: should callee be inlined?
@@ -104,7 +123,7 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
// Allows targeted inlining
if(callee_method->should_inline()) {
*wci_result = *(WarmCallInfo::always_hot());
- if (PrintInlining && Verbose) {
+ if (C->print_inlining() && Verbose) {
CompileTask::print_inline_indent(inline_level());
tty->print_cr("Inlined method is hot: ");
}
@@ -118,7 +137,7 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
size < InlineThrowMaxSize ) {
wci_result->set_profit(wci_result->profit() * 100);
- if (PrintInlining && Verbose) {
+ if (C->print_inlining() && Verbose) {
CompileTask::print_inline_indent(inline_level());
tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
}
@@ -144,6 +163,7 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
// bump the max size if the call is frequent
if ((freq >= InlineFrequencyRatio) ||
(call_site_count >= InlineFrequencyCount) ||
+ is_unboxing_method(callee_method, C) ||
is_init_with_ea(callee_method, caller_method, C)) {
max_inline_size = C->freq_inline_size();
@@ -237,8 +257,25 @@ bool InlineTree::should_not_inline(ciMethod *callee_method,
return false;
}
+ if (callee_method->should_not_inline()) {
+ set_msg("disallowed by CompilerOracle");
+ return true;
+ }
+
+#ifndef PRODUCT
+ if (ciReplay::should_not_inline(callee_method)) {
+ set_msg("disallowed by ciReplay");
+ return true;
+ }
+#endif
+
// Now perform checks which are heuristic
+ if (is_unboxing_method(callee_method, C)) {
+ // Inline unboxing methods.
+ return false;
+ }
+
if (!callee_method->force_inline()) {
if (callee_method->has_compiled_code() &&
callee_method->instructions_size() > InlineSmallCode) {
@@ -260,27 +297,6 @@ bool InlineTree::should_not_inline(ciMethod *callee_method,
}
}
- if (callee_method->should_not_inline()) {
- set_msg("disallowed by CompilerOracle");
- return true;
- }
-
-#ifndef PRODUCT
- if (ciReplay::should_not_inline(callee_method)) {
- set_msg("disallowed by ciReplay");
- return true;
- }
-#endif
-
- if (UseStringCache) {
- // Do not inline StringCache::profile() method used only at the beginning.
- if (callee_method->name() == ciSymbol::profile_name() &&
- callee_method->holder()->name() == ciSymbol::java_lang_StringCache()) {
- set_msg("profiling method");
- return true;
- }
- }
-
// use frequency-based objections only for non-trivial methods
if (callee_method->code_size() <= MaxTrivialSize) {
return false;
@@ -296,9 +312,8 @@ bool InlineTree::should_not_inline(ciMethod *callee_method,
}
if (is_init_with_ea(callee_method, caller_method, C)) {
-
// Escape Analysis: inline all executed constructors
-
+ return false;
} else if (!callee_method->was_executed_more_than(MIN2(MinInliningThreshold,
CompileThreshold >> 1))) {
set_msg("executed < MinInliningThreshold times");
@@ -476,7 +491,7 @@ void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci,
C->log()->inline_fail(inline_msg);
}
}
- if (PrintInlining) {
+ if (C->print_inlining()) {
C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg);
if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
if (Verbose && callee_method) {
@@ -525,7 +540,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
#ifndef PRODUCT
if (UseOldInlining && InlineWarmCalls
- && (PrintOpto || PrintOptoInlining || PrintInlining)) {
+ && (PrintOpto || C->print_inlining())) {
bool cold = wci.is_cold();
bool hot = !cold && wci.is_hot();
bool old_cold = !success;
@@ -602,7 +617,7 @@ InlineTree *InlineTree::build_inline_tree_for_callee( ciMethod* callee_method, J
callee_method->is_compiled_lambda_form()) {
max_inline_level_adjust += 1; // don't count method handle calls from java.lang.invoke implem
}
- if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) {
+ if (max_inline_level_adjust != 0 && C->print_inlining() && (Verbose || WizardMode)) {
CompileTask::print_inline_indent(inline_level());
tty->print_cr(" \\-> discounting inline depth");
}
diff --git a/src/share/vm/opto/c2_globals.hpp b/src/share/vm/opto/c2_globals.hpp
index b6f0629db..76bafe3ce 100644
--- a/src/share/vm/opto/c2_globals.hpp
+++ b/src/share/vm/opto/c2_globals.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -182,6 +182,9 @@
product_pd(intx, LoopUnrollLimit, \
"Unroll loop bodies with node count less than this") \
\
+ product(intx, LoopMaxUnroll, 16, \
+ "Maximum number of unrolls for main loop") \
+ \
product(intx, LoopUnrollMin, 4, \
"Minimum number of unroll loop bodies before checking progress" \
"of rounds of unroll,optimize,..") \
@@ -409,10 +412,10 @@
develop(intx, WarmCallMaxSize, 999999, \
"size of the largest inlinable method") \
\
- product(intx, MaxNodeLimit, 65000, \
+ product(intx, MaxNodeLimit, 80000, \
"Maximum number of nodes") \
\
- product(intx, NodeLimitFudgeFactor, 1000, \
+ product(intx, NodeLimitFudgeFactor, 2000, \
"Fudge Factor for certain optimizations") \
\
product(bool, UseJumpTables, true, \
@@ -421,7 +424,7 @@
product(bool, UseDivMod, true, \
"Use combined DivMod instruction if available") \
\
- product(intx, MinJumpTableSize, 18, \
+ product_pd(intx, MinJumpTableSize, \
"Minimum number of targets in a generated jump table") \
\
product(intx, MaxJumpTableSize, 65000, \
@@ -445,12 +448,18 @@
notproduct(bool, PrintEliminateLocks, false, \
"Print out when locks are eliminated") \
\
- diagnostic(bool, EliminateAutoBox, false, \
- "Private flag to control optimizations for autobox elimination") \
+ product(bool, EliminateAutoBox, true, \
+ "Control optimizations for autobox elimination") \
+ \
+ experimental(bool, UseImplicitStableValues, false, \
+ "Mark well-known stable fields as such (e.g. String.value)") \
\
product(intx, AutoBoxCacheMax, 128, \
"Sets max value cached by the java.lang.Integer autobox cache") \
\
+ experimental(bool, AggressiveUnboxing, false, \
+ "Control optimizations for aggressive boxing elimination") \
+ \
product(bool, DoEscapeAnalysis, true, \
"Perform escape analysis") \
\
diff --git a/src/share/vm/opto/c2compiler.cpp b/src/share/vm/opto/c2compiler.cpp
index 7083c6c33..95aa2db07 100644
--- a/src/share/vm/opto/c2compiler.cpp
+++ b/src/share/vm/opto/c2compiler.cpp
@@ -128,9 +128,10 @@ void C2Compiler::compile_method(ciEnv* env,
bool subsume_loads = SubsumeLoads;
bool do_escape_analysis = DoEscapeAnalysis &&
!env->jvmti_can_access_local_variables();
+ bool eliminate_boxing = EliminateAutoBox;
while (!env->failing()) {
// Attempt to compile while subsuming loads into machine instructions.
- Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis);
+ Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis, eliminate_boxing);
// Check result and retry if appropriate.
@@ -145,6 +146,12 @@ void C2Compiler::compile_method(ciEnv* env,
do_escape_analysis = false;
continue; // retry
}
+ if (C.has_boxed_value()) {
+ // Recompile without boxing elimination regardless failure reason.
+ assert(eliminate_boxing, "must make progress");
+ eliminate_boxing = false;
+ continue; // retry
+ }
// Pass any other failure reason up to the ciEnv.
// Note that serious, irreversible failures are already logged
// on the ciEnv via env->record_method_not_compilable().
diff --git a/src/share/vm/opto/callGenerator.cpp b/src/share/vm/opto/callGenerator.cpp
index 8cac8ee76..470a36e5c 100644
--- a/src/share/vm/opto/callGenerator.cpp
+++ b/src/share/vm/opto/callGenerator.cpp
@@ -134,7 +134,7 @@ JVMState* DirectCallGenerator::generate(JVMState* jvms) {
kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
}
- CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(tf(), target, method(), kit.bci());
+ CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
_call_node = call; // Save the call node in case we need it later
if (!is_static) {
// Make an explicit receiver null_check as part of this call.
@@ -304,29 +304,34 @@ class LateInlineCallGenerator : public DirectCallGenerator {
void LateInlineCallGenerator::do_late_inline() {
// Can't inline it
- if (call_node() == NULL || call_node()->outcnt() == 0 ||
- call_node()->in(0) == NULL || call_node()->in(0)->is_top()) {
+ CallStaticJavaNode* call = call_node();
+ if (call == NULL || call->outcnt() == 0 ||
+ call->in(0) == NULL || call->in(0)->is_top()) {
return;
}
- const TypeTuple *r = call_node()->tf()->domain();
+ const TypeTuple *r = call->tf()->domain();
for (int i1 = 0; i1 < method()->arg_size(); i1++) {
- if (call_node()->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
+ if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
return;
}
}
- if (call_node()->in(TypeFunc::Memory)->is_top()) {
+ if (call->in(TypeFunc::Memory)->is_top()) {
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
return;
}
- CallStaticJavaNode* call = call_node();
+ Compile* C = Compile::current();
+ // Remove inlined methods from Compiler's lists.
+ if (call->is_macro()) {
+ C->remove_macro_node(call);
+ }
// Make a clone of the JVMState that appropriate to use for driving a parse
- Compile* C = Compile::current();
- JVMState* jvms = call->jvms()->clone_shallow(C);
+ JVMState* old_jvms = call->jvms();
+ JVMState* jvms = old_jvms->clone_shallow(C);
uint size = call->req();
SafePointNode* map = new (C) SafePointNode(size, jvms);
for (uint i1 = 0; i1 < size; i1++) {
@@ -340,16 +345,23 @@ void LateInlineCallGenerator::do_late_inline() {
map->set_req(TypeFunc::Memory, mem);
}
- // Make enough space for the expression stack and transfer the incoming arguments
- int nargs = method()->arg_size();
+ uint nargs = method()->arg_size();
+ // blow away old call arguments
+ Node* top = C->top();
+ for (uint i1 = 0; i1 < nargs; i1++) {
+ map->set_req(TypeFunc::Parms + i1, top);
+ }
jvms->set_map(map);
+
+ // Make enough space in the expression stack to transfer
+ // the incoming arguments and return value.
map->ensure_stack(jvms, jvms->method()->max_stack());
- if (nargs > 0) {
- for (int i1 = 0; i1 < nargs; i1++) {
- map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
- }
+ for (uint i1 = 0; i1 < nargs; i1++) {
+ map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
}
+ // This check is done here because for_method_handle_inline() method
+ // needs jvms for inlined state.
if (!do_late_inline_check(jvms)) {
map->disconnect_inputs(NULL, C);
return;
@@ -480,6 +492,26 @@ CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGener
return new LateInlineStringCallGenerator(method, inline_cg);
}
+class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
+
+ public:
+ LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
+ LateInlineCallGenerator(method, inline_cg) {}
+
+ virtual JVMState* generate(JVMState* jvms) {
+ Compile *C = Compile::current();
+ C->print_inlining_skip(this);
+
+ C->add_boxing_late_inline(this);
+
+ JVMState* new_jvms = DirectCallGenerator::generate(jvms);
+ return new_jvms;
+ }
+};
+
+CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
+ return new LateInlineBoxingCallGenerator(method, inline_cg);
+}
//---------------------------WarmCallGenerator--------------------------------
// Internal class which handles initial deferral of inlining decisions.
diff --git a/src/share/vm/opto/callGenerator.hpp b/src/share/vm/opto/callGenerator.hpp
index 7051dbe5c..a1616de4d 100644
--- a/src/share/vm/opto/callGenerator.hpp
+++ b/src/share/vm/opto/callGenerator.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -125,6 +125,7 @@ class CallGenerator : public ResourceObj {
static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg);
static CallGenerator* for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const);
static CallGenerator* for_string_late_inline(ciMethod* m, CallGenerator* inline_cg);
+ static CallGenerator* for_boxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
// How to make a call but defer the decision whether to inline or not.
static CallGenerator* for_warm_call(WarmCallInfo* ci,
@@ -158,8 +159,9 @@ class CallGenerator : public ResourceObj {
virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
- if (PrintInlining)
+ if (C->print_inlining()) {
C->print_inlining(callee, inline_level, bci, msg);
+ }
}
};
@@ -259,7 +261,7 @@ class WarmCallInfo : public ResourceObj {
// Because WarmInfo objects live over the entire lifetime of the
// Compile object, they are allocated into the comp_arena, which
// does not get resource marked or reset during the compile process
- void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
+ void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
void operator delete( void * ) { } // fast deallocation
static WarmCallInfo* always_hot();
diff --git a/src/share/vm/opto/callnode.cpp b/src/share/vm/opto/callnode.cpp
index c90b76d4a..594b650de 100644
--- a/src/share/vm/opto/callnode.cpp
+++ b/src/share/vm/opto/callnode.cpp
@@ -458,7 +458,7 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st)
st->print("={");
uint nf = spobj->n_fields();
if (nf > 0) {
- uint first_ind = spobj->first_index();
+ uint first_ind = spobj->first_index(mcall->jvms());
Node* fld_node = mcall->in(first_ind);
ciField* cifield;
if (iklass != NULL) {
@@ -523,7 +523,9 @@ void JVMState::dump_spec(outputStream *st) const {
void JVMState::dump_on(outputStream* st) const {
- if (_map && !((uintptr_t)_map & 1)) {
+ bool print_map = _map && !((uintptr_t)_map & 1) &&
+ ((caller() == NULL) || (caller()->map() != _map));
+ if (print_map) {
if (_map->len() > _map->req()) { // _map->has_exceptions()
Node* ex = _map->in(_map->req()); // _map->next_exception()
// skip the first one; it's already being printed
@@ -532,7 +534,10 @@ void JVMState::dump_on(outputStream* st) const {
ex->dump(1);
}
}
- _map->dump(2);
+ _map->dump(Verbose ? 2 : 1);
+ }
+ if (caller() != NULL) {
+ caller()->dump_on(st);
}
st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=",
depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false");
@@ -546,9 +551,6 @@ void JVMState::dump_on(outputStream* st) const {
_method->print_codes_on(bci(), bci()+1, st);
}
}
- if (caller() != NULL) {
- caller()->dump_on(st);
- }
}
// Extra way to dump a jvms from the debugger,
@@ -584,6 +586,15 @@ JVMState* JVMState::clone_deep(Compile* C) const {
return n;
}
+/**
+ * Reset map for all callers
+ */
+void JVMState::set_map_deep(SafePointNode* map) {
+ for (JVMState* p = this; p->_caller != NULL; p = p->_caller) {
+ p->set_map(map);
+ }
+}
+
//=============================================================================
uint CallNode::cmp( const Node &n ) const
{ return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
@@ -663,17 +674,49 @@ uint CallNode::match_edge(uint idx) const {
// Determine whether the call could modify the field of the specified
// instance at the specified offset.
//
-bool CallNode::may_modify(const TypePtr *addr_t, PhaseTransform *phase) {
- const TypeOopPtr *adrInst_t = addr_t->isa_oopptr();
-
- // If not an OopPtr or not an instance type, assume the worst.
- // Note: currently this method is called only for instance types.
- if (adrInst_t == NULL || !adrInst_t->is_known_instance()) {
- return true;
+bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
+ assert((t_oop != NULL), "sanity");
+ if (t_oop->is_known_instance()) {
+ // The instance_id is set only for scalar-replaceable allocations which
+ // are not passed as arguments according to Escape Analysis.
+ return false;
}
- // The instance_id is set only for scalar-replaceable allocations which
- // are not passed as arguments according to Escape Analysis.
- return false;
+ if (t_oop->is_ptr_to_boxed_value()) {
+ ciKlass* boxing_klass = t_oop->klass();
+ if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) {
+ // Skip unrelated boxing methods.
+ Node* proj = proj_out(TypeFunc::Parms);
+ if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) {
+ return false;
+ }
+ }
+ if (is_CallJava() && as_CallJava()->method() != NULL) {
+ ciMethod* meth = as_CallJava()->method();
+ if (meth->is_accessor()) {
+ return false;
+ }
+ // May modify (by reflection) if an boxing object is passed
+ // as argument or returned.
+ if (returns_pointer() && (proj_out(TypeFunc::Parms) != NULL)) {
+ Node* proj = proj_out(TypeFunc::Parms);
+ const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
+ if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
+ (inst_t->klass() == boxing_klass))) {
+ return true;
+ }
+ }
+ const TypeTuple* d = tf()->domain();
+ for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
+ const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
+ if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
+ (inst_t->klass() == boxing_klass))) {
+ return true;
+ }
+ }
+ return false;
+ }
+ }
+ return true;
}
// Does this call have a direct reference to n other than debug information?
@@ -1037,28 +1080,29 @@ void SafePointNode::push_monitor(const FastLockNode *lock) {
assert(req() == jvms()->endoff(), "correct sizing");
int nextmon = jvms()->scloff();
if (GenerateSynchronizationCode) {
- add_req(lock->box_node());
- add_req(lock->obj_node());
+ ins_req(nextmon, lock->box_node());
+ ins_req(nextmon+1, lock->obj_node());
} else {
Node* top = Compile::current()->top();
- add_req(top);
- add_req(top);
+ ins_req(nextmon, top);
+ ins_req(nextmon, top);
}
- jvms()->set_scloff(nextmon+MonitorEdges);
+ jvms()->set_scloff(nextmon + MonitorEdges);
jvms()->set_endoff(req());
}
void SafePointNode::pop_monitor() {
// Delete last monitor from debug info
debug_only(int num_before_pop = jvms()->nof_monitors());
- const int MonitorEdges = (1<<JVMState::logMonitorEdges);
+ const int MonitorEdges = 2;
+ assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
int scloff = jvms()->scloff();
int endoff = jvms()->endoff();
int new_scloff = scloff - MonitorEdges;
int new_endoff = endoff - MonitorEdges;
jvms()->set_scloff(new_scloff);
jvms()->set_endoff(new_endoff);
- while (scloff > new_scloff) del_req(--scloff);
+ while (scloff > new_scloff) del_req_ordered(--scloff);
assert(jvms()->nof_monitors() == num_before_pop-1, "");
}
@@ -1123,13 +1167,12 @@ uint SafePointScalarObjectNode::match_edge(uint idx) const {
}
SafePointScalarObjectNode*
-SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const {
+SafePointScalarObjectNode::clone(Dict* sosn_map) const {
void* cached = (*sosn_map)[(void*)this];
if (cached != NULL) {
return (SafePointScalarObjectNode*)cached;
}
SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
- res->_first_index += jvms_adj;
sosn_map->Insert((void*)this, (void*)res);
return res;
}
@@ -1154,6 +1197,7 @@ AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
init_class_id(Class_Allocate);
init_flags(Flag_is_macro);
_is_scalar_replaceable = false;
+ _is_non_escaping = false;
Node *topnode = C->top();
init_req( TypeFunc::Control , ctrl );
@@ -1169,8 +1213,6 @@ AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
}
//=============================================================================
-uint AllocateArrayNode::size_of() const { return sizeof(*this); }
-
Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (remove_dead_region(phase, can_reshape)) return this;
// Don't bother trying to transform a dead node
@@ -1235,6 +1277,8 @@ Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTran
// - the narrow_length is 0
// - the narrow_length is not wider than length
assert(narrow_length_type == TypeInt::ZERO ||
+ length_type->is_con() && narrow_length_type->is_con() &&
+ (narrow_length_type->_hi <= length_type->_lo) ||
(narrow_length_type->_hi <= length_type->_hi &&
narrow_length_type->_lo >= length_type->_lo),
"narrow type must be narrower than length type");
diff --git a/src/share/vm/opto/callnode.hpp b/src/share/vm/opto/callnode.hpp
index 0aa35c214..699f7e7d0 100644
--- a/src/share/vm/opto/callnode.hpp
+++ b/src/share/vm/opto/callnode.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,6 +49,7 @@ class CallLeafNode;
class CallLeafNoFPNode;
class AllocateNode;
class AllocateArrayNode;
+class BoxLockNode;
class LockNode;
class UnlockNode;
class JVMState;
@@ -215,7 +216,7 @@ public:
// Because JVMState objects live over the entire lifetime of the
// Compile object, they are allocated into the comp_arena, which
// does not get resource marked or reset during the compile process
- void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
+ void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
void operator delete( void * ) { } // fast deallocation
// Create a new JVMState, ready for abstract interpretation.
@@ -235,7 +236,6 @@ public:
int loc_size() const { return stkoff() - locoff(); }
int stk_size() const { return monoff() - stkoff(); }
- int arg_size() const { return monoff() - argoff(); }
int mon_size() const { return scloff() - monoff(); }
int scl_size() const { return endoff() - scloff(); }
@@ -298,6 +298,7 @@ public:
// Miscellaneous utility functions
JVMState* clone_deep(Compile* C) const; // recursively clones caller chain
JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
+ void set_map_deep(SafePointNode *map);// reset map for all callers
#ifndef PRODUCT
void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
@@ -439,7 +440,7 @@ public:
static bool needs_polling_address_input();
#ifndef PRODUCT
- virtual void dump_spec(outputStream *st) const;
+ virtual void dump_spec(outputStream *st) const;
#endif
};
@@ -448,14 +449,17 @@ public:
// at a safepoint.
class SafePointScalarObjectNode: public TypeNode {
- uint _first_index; // First input edge index of a SafePoint node where
+ uint _first_index; // First input edge relative index of a SafePoint node where
// states of the scalarized object fields are collected.
+ // It is relative to the last (youngest) jvms->_scloff.
uint _n_fields; // Number of non-static fields of the scalarized object.
DEBUG_ONLY(AllocateNode* _alloc;)
virtual uint hash() const ; // { return NO_HASH; }
virtual uint cmp( const Node &n ) const;
+ uint first_index() const { return _first_index; }
+
public:
SafePointScalarObjectNode(const TypeOopPtr* tp,
#ifdef ASSERT
@@ -468,7 +472,10 @@ public:
virtual const RegMask &out_RegMask() const;
virtual uint match_edge(uint idx) const;
- uint first_index() const { return _first_index; }
+ uint first_index(JVMState* jvms) const {
+ assert(jvms != NULL, "missed JVMS");
+ return jvms->scloff() + _first_index;
+ }
uint n_fields() const { return _n_fields; }
#ifdef ASSERT
@@ -484,7 +491,7 @@ public:
// corresponds appropriately to "this" in "new_call". Assumes that
// "sosn_map" is a map, specific to the translation of "s" to "new_call",
// mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
- SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const;
+ SafePointScalarObjectNode* clone(Dict* sosn_map) const;
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
@@ -554,10 +561,10 @@ public:
virtual bool guaranteed_safepoint() { return true; }
// For macro nodes, the JVMState gets modified during expansion, so when cloning
// the node the JVMState must be cloned.
- virtual void clone_jvms() { } // default is not to clone
+ virtual void clone_jvms(Compile* C) { } // default is not to clone
// Returns true if the call may modify n
- virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase);
+ virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase);
// Does this node have a use of n other than in debug information?
bool has_non_debug_use(Node *n);
// Returns the unique CheckCastPP of a call
@@ -630,9 +637,15 @@ class CallStaticJavaNode : public CallJavaNode {
virtual uint cmp( const Node &n ) const;
virtual uint size_of() const; // Size is bigger
public:
- CallStaticJavaNode(const TypeFunc* tf, address addr, ciMethod* method, int bci)
+ CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci)
: CallJavaNode(tf, addr, method, bci), _name(NULL) {
init_class_id(Class_CallStaticJava);
+ if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
+ init_flags(Flag_is_macro);
+ C->add_macro_node(this);
+ }
+ _is_scalar_replaceable = false;
+ _is_non_escaping = false;
}
CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
const TypePtr* adr_type)
@@ -640,13 +653,31 @@ public:
init_class_id(Class_CallStaticJava);
// This node calls a runtime stub, which often has narrow memory effects.
_adr_type = adr_type;
+ _is_scalar_replaceable = false;
+ _is_non_escaping = false;
}
- const char *_name; // Runtime wrapper name
+ const char *_name; // Runtime wrapper name
+
+ // Result of Escape Analysis
+ bool _is_scalar_replaceable;
+ bool _is_non_escaping;
// If this is an uncommon trap, return the request code, else zero.
int uncommon_trap_request() const;
static int extract_uncommon_trap_request(const Node* call);
+ bool is_boxing_method() const {
+ return is_macro() && (method() != NULL) && method()->is_boxing_method();
+ }
+ // Later inlining modifies the JVMState, so we need to clone it
+ // when the call node is cloned (because it is macro node).
+ virtual void clone_jvms(Compile* C) {
+ if ((jvms() != NULL) && is_boxing_method()) {
+ set_jvms(jvms()->clone_deep(C));
+ jvms()->set_map_deep(this);
+ }
+ }
+
virtual int Opcode() const;
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
@@ -748,12 +779,12 @@ public:
ParmLimit
};
- static const TypeFunc* alloc_type() {
+ static const TypeFunc* alloc_type(const Type* t) {
const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
fields[AllocSize] = TypeInt::POS;
fields[KlassNode] = TypeInstPtr::NOTNULL;
fields[InitialTest] = TypeInt::BOOL;
- fields[ALength] = TypeInt::INT; // length (can be a bad length)
+ fields[ALength] = t; // length (can be a bad length)
const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
@@ -766,21 +797,26 @@ public:
return TypeFunc::make(domain, range);
}
- bool _is_scalar_replaceable; // Result of Escape Analysis
+ // Result of Escape Analysis
+ bool _is_scalar_replaceable;
+ bool _is_non_escaping;
virtual uint size_of() const; // Size is bigger
AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
Node *size, Node *klass_node, Node *initial_test);
// Expansion modifies the JVMState, so we need to clone it
- virtual void clone_jvms() {
- set_jvms(jvms()->clone_deep(Compile::current()));
+ virtual void clone_jvms(Compile* C) {
+ if (jvms() != NULL) {
+ set_jvms(jvms()->clone_deep(C));
+ jvms()->set_map_deep(this);
+ }
}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegP; }
virtual bool guaranteed_safepoint() { return false; }
// allocations do not modify their arguments
- virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase) { return false;}
+ virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
// Pattern-match a possible usage of AllocateNode.
// Return null if no allocation is recognized.
@@ -815,10 +851,6 @@ public:
// are defined in graphKit.cpp, which sets up the bidirectional relation.)
InitializeNode* initialization();
- // Return the corresponding storestore barrier (or null if none).
- // Walks out edges to find it...
- MemBarStoreStoreNode* storestore();
-
// Convenience for initialization->maybe_set_complete(phase)
bool maybe_set_complete(PhaseGVN* phase);
};
@@ -840,7 +872,6 @@ public:
set_req(AllocateNode::ALength, count_val);
}
virtual int Opcode() const;
- virtual uint size_of() const; // Size is bigger
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
// Dig the length operand out of a array allocation site.
@@ -918,7 +949,7 @@ public:
void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }
// locking does not modify its arguments
- virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;}
+ virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;}
#ifndef PRODUCT
void create_lock_counter(JVMState* s);
@@ -965,8 +996,11 @@ public:
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
// Expansion modifies the JVMState, so we need to clone it
- virtual void clone_jvms() {
- set_jvms(jvms()->clone_deep(Compile::current()));
+ virtual void clone_jvms(Compile* C) {
+ if (jvms() != NULL) {
+ set_jvms(jvms()->clone_deep(C));
+ jvms()->set_map_deep(this);
+ }
}
bool is_nested_lock_region(); // Is this Lock nested?
diff --git a/src/share/vm/opto/cfgnode.cpp b/src/share/vm/opto/cfgnode.cpp
index 68fcc9cbb..36347fb92 100644
--- a/src/share/vm/opto/cfgnode.cpp
+++ b/src/share/vm/opto/cfgnode.cpp
@@ -806,7 +806,7 @@ PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) cons
Node *in = ophi->in(i);
if (in == NULL || igvn->type(in) == Type::TOP)
continue;
- Node *opt = MemNode::optimize_simple_memory_chain(in, at, igvn);
+ Node *opt = MemNode::optimize_simple_memory_chain(in, t_oop, NULL, igvn);
PhiNode *optphi = opt->is_Phi() ? opt->as_Phi() : NULL;
if (optphi != NULL && optphi->adr_type() == TypePtr::BOTTOM) {
opt = node_map[optphi->_idx];
@@ -1921,7 +1921,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
const TypePtr* at = adr_type();
for( uint i=1; i<req(); ++i ) {// For all paths in
Node *ii = in(i);
- Node *new_in = MemNode::optimize_memory_chain(ii, at, phase);
+ Node *new_in = MemNode::optimize_memory_chain(ii, at, NULL, phase);
if (ii != new_in ) {
set_req(i, new_in);
progress = this;
@@ -1932,7 +1932,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
#ifdef _LP64
// Push DecodeN/DecodeNKlass down through phi.
// The rest of phi graph will transform by split EncodeP node though phis up.
- if ((UseCompressedOops || UseCompressedKlassPointers) && can_reshape && progress == NULL) {
+ if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == NULL) {
bool may_push = true;
bool has_decodeN = false;
bool is_decodeN = false;
diff --git a/src/share/vm/opto/chaitin.cpp b/src/share/vm/opto/chaitin.cpp
index 9d69b0f3b..492bf384f 100644
--- a/src/share/vm/opto/chaitin.cpp
+++ b/src/share/vm/opto/chaitin.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,10 +40,8 @@
#include "opto/opcodes.hpp"
#include "opto/rootnode.hpp"
-//=============================================================================
-
#ifndef PRODUCT
-void LRG::dump( ) const {
+void LRG::dump() const {
ttyLocker ttyl;
tty->print("%d ",num_regs());
_mask.dump();
@@ -94,7 +92,6 @@ void LRG::dump( ) const {
}
#endif
-//------------------------------score------------------------------------------
// Compute score from cost and area. Low score is best to spill.
static double raw_score( double cost, double area ) {
return cost - (area*RegisterCostAreaRatio) * 1.52588e-5;
@@ -125,41 +122,23 @@ double LRG::score() const {
return score;
}
-//------------------------------LRG_List---------------------------------------
-LRG_List::LRG_List( uint max ) : _cnt(max), _max(max), _lidxs(NEW_RESOURCE_ARRAY(uint,max)) {
- memset( _lidxs, 0, sizeof(uint)*max );
-}
-
-void LRG_List::extend( uint nidx, uint lidx ) {
- _nesting.check();
- if( nidx >= _max ) {
- uint size = 16;
- while( size <= nidx ) size <<=1;
- _lidxs = REALLOC_RESOURCE_ARRAY( uint, _lidxs, _max, size );
- _max = size;
- }
- while( _cnt <= nidx )
- _lidxs[_cnt++] = 0;
- _lidxs[nidx] = lidx;
-}
-
#define NUMBUCKS 3
// Straight out of Tarjan's union-find algorithm
uint LiveRangeMap::find_compress(uint lrg) {
uint cur = lrg;
- uint next = _uf_map[cur];
+ uint next = _uf_map.at(cur);
while (next != cur) { // Scan chain of equivalences
assert( next < cur, "always union smaller");
cur = next; // until find a fixed-point
- next = _uf_map[cur];
+ next = _uf_map.at(cur);
}
// Core of union-find algorithm: update chain of
// equivalences to be equal to the root.
while (lrg != next) {
- uint tmp = _uf_map[lrg];
- _uf_map.map(lrg, next);
+ uint tmp = _uf_map.at(lrg);
+ _uf_map.at_put(lrg, next);
lrg = tmp;
}
return lrg;
@@ -169,10 +148,10 @@ uint LiveRangeMap::find_compress(uint lrg) {
void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
_max_lrg_id= max_lrg_id;
// Force the Union-Find mapping to be at least this large
- _uf_map.extend(_max_lrg_id, 0);
+ _uf_map.at_put_grow(_max_lrg_id, 0);
// Initialize it to be the ID mapping.
for (uint i = 0; i < _max_lrg_id; ++i) {
- _uf_map.map(i, i);
+ _uf_map.at_put(i, i);
}
}
@@ -180,12 +159,12 @@ void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
// the Union-Find mapping after this call.
void LiveRangeMap::compress_uf_map_for_nodes() {
// For all Nodes, compress mapping
- uint unique = _names.Size();
+ uint unique = _names.length();
for (uint i = 0; i < unique; ++i) {
- uint lrg = _names[i];
+ uint lrg = _names.at(i);
uint compressed_lrg = find(lrg);
if (lrg != compressed_lrg) {
- _names.map(i, compressed_lrg);
+ _names.at_put(i, compressed_lrg);
}
}
}
@@ -202,16 +181,15 @@ uint LiveRangeMap::find_const(uint lrg) const {
return lrg;
}
- uint next = _uf_map[lrg];
+ uint next = _uf_map.at(lrg);
while (next != lrg) { // Scan chain of equivalences
assert(next < lrg, "always union smaller");
lrg = next; // until find a fixed-point
- next = _uf_map[lrg];
+ next = _uf_map.at(lrg);
}
return next;
}
-//------------------------------Chaitin----------------------------------------
PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
: PhaseRegAlloc(unique, cfg, matcher,
#ifndef PRODUCT
@@ -220,7 +198,7 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
NULL
#endif
)
- , _lrg_map(unique)
+ , _lrg_map(Thread::current()->resource_area(), unique)
, _live(0)
, _spilled_once(Thread::current()->resource_area())
, _spilled_twice(Thread::current()->resource_area())
@@ -232,31 +210,31 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
{
NOT_PRODUCT( Compile::TracePhase t3("ctorChaitin", &_t_ctorChaitin, TimeCompiler); )
- _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg._outer_loop_freq);
+ _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency());
// Build a list of basic blocks, sorted by frequency
- _blks = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks );
+ _blks = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks());
// Experiment with sorting strategies to speed compilation
double cutoff = BLOCK_FREQUENCY(1.0); // Cutoff for high frequency bucket
Block **buckets[NUMBUCKS]; // Array of buckets
uint buckcnt[NUMBUCKS]; // Array of bucket counters
double buckval[NUMBUCKS]; // Array of bucket value cutoffs
for (uint i = 0; i < NUMBUCKS; i++) {
- buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg._num_blocks);
+ buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks());
buckcnt[i] = 0;
// Bump by three orders of magnitude each time
cutoff *= 0.001;
buckval[i] = cutoff;
- for (uint j = 0; j < _cfg._num_blocks; j++) {
+ for (uint j = 0; j < _cfg.number_of_blocks(); j++) {
buckets[i][j] = NULL;
}
}
// Sort blocks into buckets
- for (uint i = 0; i < _cfg._num_blocks; i++) {
+ for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
for (uint j = 0; j < NUMBUCKS; j++) {
- if ((j == NUMBUCKS - 1) || (_cfg._blocks[i]->_freq > buckval[j])) {
+ if ((j == NUMBUCKS - 1) || (_cfg.get_block(i)->_freq > buckval[j])) {
// Assign block to end of list for appropriate bucket
- buckets[j][buckcnt[j]++] = _cfg._blocks[i];
+ buckets[j][buckcnt[j]++] = _cfg.get_block(i);
break; // kick out of inner loop
}
}
@@ -269,10 +247,9 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
}
}
- assert(blkcnt == _cfg._num_blocks, "Block array not totally filled");
+ assert(blkcnt == _cfg.number_of_blocks(), "Block array not totally filled");
}
-//------------------------------Union------------------------------------------
// union 2 sets together.
void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
uint src = _lrg_map.find(src_n);
@@ -285,7 +262,6 @@ void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
_lrg_map.uf_map(dst, src);
}
-//------------------------------new_lrg----------------------------------------
void PhaseChaitin::new_lrg(const Node *x, uint lrg) {
// Make the Node->LRG mapping
_lrg_map.extend(x->_idx,lrg);
@@ -294,24 +270,28 @@ void PhaseChaitin::new_lrg(const Node *x, uint lrg) {
}
-bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) {
- Block *bcon = _cfg._bbs[con->_idx];
- uint cindex = bcon->find_node(con);
- Node *con_next = bcon->_nodes[cindex+1];
- if (con_next->in(0) != con || !con_next->is_MachProj()) {
- return false; // No MachProj's follow
+int PhaseChaitin::clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint& max_lrg_id) {
+ assert(b->find_node(copy) == (idx - 1), "incorrect insert index for copy kill projections");
+ DEBUG_ONLY( Block* borig = _cfg.get_block_for_node(orig); )
+ int found_projs = 0;
+ uint cnt = orig->outcnt();
+ for (uint i = 0; i < cnt; i++) {
+ Node* proj = orig->raw_out(i);
+ if (proj->is_MachProj()) {
+ assert(proj->outcnt() == 0, "only kill projections are expected here");
+ assert(_cfg.get_block_for_node(proj) == borig, "incorrect block for kill projections");
+ found_projs++;
+ // Copy kill projections after the cloned node
+ Node* kills = proj->clone();
+ kills->set_req(0, copy);
+ b->insert_node(kills, idx++);
+ _cfg.map_node_to_block(kills, b);
+ new_lrg(kills, max_lrg_id++);
+ }
}
-
- // Copy kills after the cloned constant
- Node *kills = con_next->clone();
- kills->set_req(0, copy);
- b->_nodes.insert(idx, kills);
- _cfg._bbs.map(kills->_idx, b);
- new_lrg(kills, max_lrg_id);
- return true;
+ return found_projs;
}
-//------------------------------compact----------------------------------------
// Renumber the live ranges to compact them. Makes the IFG smaller.
void PhaseChaitin::compact() {
// Current the _uf_map contains a series of short chains which are headed
@@ -435,6 +415,9 @@ void PhaseChaitin::Register_Allocate() {
// Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do
// not match the Phi itself, insert a copy.
coalesce.insert_copies(_matcher);
+ if (C->failing()) {
+ return;
+ }
}
// After aggressive coalesce, attempt a first cut at coloring.
@@ -674,76 +657,79 @@ void PhaseChaitin::Register_Allocate() {
C->set_indexSet_arena(NULL); // ResourceArea is at end of scope
}
-//------------------------------de_ssa-----------------------------------------
void PhaseChaitin::de_ssa() {
// Set initial Names for all Nodes. Most Nodes get the virtual register
// number. A few get the ZERO live range number. These do not
// get allocated, but instead rely on correct scheduling to ensure that
// only one instance is simultaneously live at a time.
uint lr_counter = 1;
- for( uint i = 0; i < _cfg._num_blocks; i++ ) {
- Block *b = _cfg._blocks[i];
- uint cnt = b->_nodes.size();
+ for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
+ Block* block = _cfg.get_block(i);
+ uint cnt = block->number_of_nodes();
// Handle all the normal Nodes in the block
for( uint j = 0; j < cnt; j++ ) {
- Node *n = b->_nodes[j];
+ Node *n = block->get_node(j);
// Pre-color to the zero live range, or pick virtual register
const RegMask &rm = n->out_RegMask();
_lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
}
}
+
// Reset the Union-Find mapping to be identity
_lrg_map.reset_uf_map(lr_counter);
}
-//------------------------------gather_lrg_masks-------------------------------
// Gather LiveRanGe information, including register masks. Modification of
// cisc spillable in_RegMasks should not be done before AggressiveCoalesce.
void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
// Nail down the frame pointer live range
- uint fp_lrg = _lrg_map.live_range_id(_cfg._root->in(1)->in(TypeFunc::FramePtr));
+ uint fp_lrg = _lrg_map.live_range_id(_cfg.get_root_node()->in(1)->in(TypeFunc::FramePtr));
lrgs(fp_lrg)._cost += 1e12; // Cost is infinite
// For all blocks
- for( uint i = 0; i < _cfg._num_blocks; i++ ) {
- Block *b = _cfg._blocks[i];
+ for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
+ Block* block = _cfg.get_block(i);
// For all instructions
- for( uint j = 1; j < b->_nodes.size(); j++ ) {
- Node *n = b->_nodes[j];
+ for (uint j = 1; j < block->number_of_nodes(); j++) {
+ Node* n = block->get_node(j);
uint input_edge_start =1; // Skip control most nodes
- if( n->is_Mach() ) input_edge_start = n->as_Mach()->oper_input_base();
+ if (n->is_Mach()) {
+ input_edge_start = n->as_Mach()->oper_input_base();
+ }
uint idx = n->is_Copy();
// Get virtual register number, same as LiveRanGe index
uint vreg = _lrg_map.live_range_id(n);
- LRG &lrg = lrgs(vreg);
- if( vreg ) { // No vreg means un-allocable (e.g. memory)
+ LRG& lrg = lrgs(vreg);
+ if (vreg) { // No vreg means un-allocable (e.g. memory)
// Collect has-copy bit
- if( idx ) {
+ if (idx) {
lrg._has_copy = 1;
uint clidx = _lrg_map.live_range_id(n->in(idx));
- LRG &copy_src = lrgs(clidx);
+ LRG& copy_src = lrgs(clidx);
copy_src._has_copy = 1;
}
// Check for float-vs-int live range (used in register-pressure
// calculations)
const Type *n_type = n->bottom_type();
- if (n_type->is_floatingpoint())
+ if (n_type->is_floatingpoint()) {
lrg._is_float = 1;
+ }
// Check for twice prior spilling. Once prior spilling might have
// spilled 'soft', 2nd prior spill should have spilled 'hard' and
// further spilling is unlikely to make progress.
- if( _spilled_once.test(n->_idx) ) {
+ if (_spilled_once.test(n->_idx)) {
lrg._was_spilled1 = 1;
- if( _spilled_twice.test(n->_idx) )
+ if (_spilled_twice.test(n->_idx)) {
lrg._was_spilled2 = 1;
+ }
}
#ifndef PRODUCT
@@ -780,16 +766,18 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
// Check for bound register masks
const RegMask &lrgmask = lrg.mask();
- if (lrgmask.is_bound(ireg))
+ if (lrgmask.is_bound(ireg)) {
lrg._is_bound = 1;
+ }
// Check for maximum frequency value
- if (lrg._maxfreq < b->_freq)
- lrg._maxfreq = b->_freq;
+ if (lrg._maxfreq < block->_freq) {
+ lrg._maxfreq = block->_freq;
+ }
// Check for oop-iness, or long/double
// Check for multi-kill projection
- switch( ireg ) {
+ switch (ireg) {
case MachProjNode::fat_proj:
// Fat projections have size equal to number of registers killed
lrg.set_num_regs(rm.Size());
@@ -959,8 +947,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
// AggressiveCoalesce. This effectively pre-virtual-splits
// around uncommon uses of common defs.
const RegMask &rm = n->in_RegMask(k);
- if( !after_aggressive &&
- _cfg._bbs[n->in(k)->_idx]->_freq > 1000*b->_freq ) {
+ if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * block->_freq) {
// Since we are BEFORE aggressive coalesce, leave the register
// mask untrimmed by the call. This encourages more coalescing.
// Later, AFTER aggressive, this live range will have to spill
@@ -1004,8 +991,9 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
}
// Check for maximum frequency value
- if( lrg._maxfreq < b->_freq )
- lrg._maxfreq = b->_freq;
+ if (lrg._maxfreq < block->_freq) {
+ lrg._maxfreq = block->_freq;
+ }
} // End for all allocated inputs
} // end for all instructions
@@ -1027,7 +1015,6 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
}
}
-//------------------------------set_was_low------------------------------------
// Set the was-lo-degree bit. Conservative coalescing should not change the
// colorability of the graph. If any live range was of low-degree before
// coalescing, it should Simplify. This call sets the was-lo-degree bit.
@@ -1064,7 +1051,6 @@ void PhaseChaitin::set_was_low() {
#define REGISTER_CONSTRAINED 16
-//------------------------------cache_lrg_info---------------------------------
// Compute cost/area ratio, in case we spill. Build the lo-degree list.
void PhaseChaitin::cache_lrg_info( ) {
@@ -1098,7 +1084,6 @@ void PhaseChaitin::cache_lrg_info( ) {
}
}
-//------------------------------Pre-Simplify-----------------------------------
// Simplify the IFG by removing LRGs of low degree that have NO copies
void PhaseChaitin::Pre_Simplify( ) {
@@ -1149,7 +1134,6 @@ void PhaseChaitin::Pre_Simplify( ) {
// No more lo-degree no-copy live ranges to simplify
}
-//------------------------------Simplify---------------------------------------
// Simplify the IFG by removing LRGs of low degree.
void PhaseChaitin::Simplify( ) {
@@ -1286,7 +1270,6 @@ void PhaseChaitin::Simplify( ) {
}
-//------------------------------is_legal_reg-----------------------------------
// Is 'reg' register legal for 'lrg'?
static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) {
if (reg >= chunk && reg < (chunk + RegMask::CHUNK_SIZE) &&
@@ -1313,7 +1296,6 @@ static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) {
return false;
}
-//------------------------------bias_color-------------------------------------
// Choose a color using the biasing heuristic
OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) {
@@ -1375,7 +1357,6 @@ OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) {
return OptoReg::add( reg, chunk );
}
-//------------------------------choose_color-----------------------------------
// Choose a color in the current chunk
OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) {
assert( C->in_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP-1)), "must not allocate stack0 (inside preserve area)");
@@ -1397,7 +1378,6 @@ OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) {
return lrg.mask().find_last_elem();
}
-//------------------------------Select-----------------------------------------
// Select colors by re-inserting LRGs back into the IFG. LRGs are re-inserted
// in reverse order of removal. As long as nothing of hi-degree was yanked,
// everything going back is guaranteed a color. Select that color. If some
@@ -1572,8 +1552,6 @@ uint PhaseChaitin::Select( ) {
return spill_reg-LRG::SPILL_REG; // Return number of spills
}
-
-//------------------------------copy_was_spilled-------------------------------
// Copy 'was_spilled'-edness from the source Node to the dst Node.
void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) {
if( _spilled_once.test(src->_idx) ) {
@@ -1586,14 +1564,12 @@ void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) {
}
}
-//------------------------------set_was_spilled--------------------------------
// Set the 'spilled_once' or 'spilled_twice' flag on a node.
void PhaseChaitin::set_was_spilled( Node *n ) {
if( _spilled_once.test_set(n->_idx) )
_spilled_twice.set(n->_idx);
}
-//------------------------------fixup_spills-----------------------------------
// Convert Ideal spill instructions into proper FramePtr + offset Loads and
// Stores. Use-def chains are NOT preserved, but Node->LRG->reg maps are.
void PhaseChaitin::fixup_spills() {
@@ -1603,16 +1579,16 @@ void PhaseChaitin::fixup_spills() {
NOT_PRODUCT( Compile::TracePhase t3("fixupSpills", &_t_fixupSpills, TimeCompiler); )
// Grab the Frame Pointer
- Node *fp = _cfg._broot->head()->in(1)->in(TypeFunc::FramePtr);
+ Node *fp = _cfg.get_root_block()->head()->in(1)->in(TypeFunc::FramePtr);
// For all blocks
- for( uint i = 0; i < _cfg._num_blocks; i++ ) {
- Block *b = _cfg._blocks[i];
+ for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
+ Block* block = _cfg.get_block(i);
// For all instructions in block
- uint last_inst = b->end_idx();
- for( uint j = 1; j <= last_inst; j++ ) {
- Node *n = b->_nodes[j];
+ uint last_inst = block->end_idx();
+ for (uint j = 1; j <= last_inst; j++) {
+ Node* n = block->get_node(j);
// Dead instruction???
assert( n->outcnt() != 0 ||// Nothing dead after post alloc
@@ -1649,7 +1625,7 @@ void PhaseChaitin::fixup_spills() {
assert( cisc->oper_input_base() == 2, "Only adding one edge");
cisc->ins_req(1,src); // Requires a memory edge
}
- b->_nodes.map(j,cisc); // Insert into basic block
+ block->map_node(cisc, j); // Insert into basic block
n->subsume_by(cisc, C); // Correct graph
//
++_used_cisc_instructions;
@@ -1675,7 +1651,6 @@ void PhaseChaitin::fixup_spills() {
} // End of for all blocks
}
-//------------------------------find_base_for_derived--------------------------
// Helper to stretch above; recursively discover the base Node for a
// given derived Node. Easy for AddP-related machine nodes, but needs
// to be recursive for derived Phis.
@@ -1705,17 +1680,16 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive
// Initialize it once and make it shared:
// set control to _root and place it into Start block
// (where top() node is placed).
- base->init_req(0, _cfg._root);
- Block *startb = _cfg._bbs[C->top()->_idx];
- startb->_nodes.insert(startb->find_node(C->top()), base );
- _cfg._bbs.map( base->_idx, startb );
+ base->init_req(0, _cfg.get_root_node());
+ Block *startb = _cfg.get_block_for_node(C->top());
+ startb->insert_node(base, startb->find_node(C->top()));
+ _cfg.map_node_to_block(base, startb);
assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
}
if (_lrg_map.live_range_id(base) == 0) {
new_lrg(base, maxlrg++);
}
- assert(base->in(0) == _cfg._root &&
- _cfg._bbs[base->_idx] == _cfg._bbs[C->top()->_idx], "base NULL should be shared");
+ assert(base->in(0) == _cfg.get_root_node() && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared");
derived_base_map[derived->_idx] = base;
return base;
}
@@ -1751,12 +1725,12 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive
base->as_Phi()->set_type(t);
// Search the current block for an existing base-Phi
- Block *b = _cfg._bbs[derived->_idx];
+ Block *b = _cfg.get_block_for_node(derived);
for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
- Node *phi = b->_nodes[i];
+ Node *phi = b->get_node(i);
if( !phi->is_Phi() ) { // Found end of Phis with no match?
- b->_nodes.insert( i, base ); // Must insert created Phi here as base
- _cfg._bbs.map( base->_idx, b );
+ b->insert_node(base, i); // Must insert created Phi here as base
+ _cfg.map_node_to_block(base, b);
new_lrg(base,maxlrg++);
break;
}
@@ -1778,8 +1752,6 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive
return base;
}
-
-//------------------------------stretch_base_pointer_live_ranges---------------
// At each Safepoint, insert extra debug edges for each pair of derived value/
// base pointer that is live across the Safepoint for oopmap building. The
// edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the
@@ -1791,14 +1763,14 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
memset( derived_base_map, 0, sizeof(Node*)*C->unique() );
// For all blocks in RPO do...
- for( uint i=0; i<_cfg._num_blocks; i++ ) {
- Block *b = _cfg._blocks[i];
+ for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
+ Block* block = _cfg.get_block(i);
// Note use of deep-copy constructor. I cannot hammer the original
// liveout bits, because they are needed by the following coalesce pass.
- IndexSet liveout(_live->live(b));
+ IndexSet liveout(_live->live(block));
- for( uint j = b->end_idx() + 1; j > 1; j-- ) {
- Node *n = b->_nodes[j-1];
+ for (uint j = block->end_idx() + 1; j > 1; j--) {
+ Node* n = block->get_node(j - 1);
// Pre-split compares of loop-phis. Loop-phis form a cycle we would
// like to see in the same register. Compare uses the loop-phi and so
@@ -1812,8 +1784,8 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) {
Node *phi = n->in(1);
if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
- Block *phi_block = _cfg._bbs[phi->_idx];
- if( _cfg._bbs[phi_block->pred(2)->_idx] == b ) {
+ Block *phi_block = _cfg.get_block_for_node(phi);
+ if (_cfg.get_block_for_node(phi_block->pred(2)) == block) {
const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask );
insert_proj( phi_block, 1, spill, maxlrg++ );
@@ -1867,7 +1839,7 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
!liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
(_lrg_map.live_range_id(base) > 0) && // not a constant
- _cfg._bbs[base->_idx] != b) { // base not def'd in blk)
+ _cfg.get_block_for_node(base) != block) { // base not def'd in blk)
// Base pointer is not currently live. Since I stretched
// the base pointer to here and it crosses basic-block
// boundaries, the global live info is now incorrect.
@@ -1902,15 +1874,12 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
return must_recompute_live != 0;
}
-
-//------------------------------add_reference----------------------------------
// Extend the node to LRG mapping
void PhaseChaitin::add_reference(const Node *node, const Node *old_node) {
_lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node));
}
-//------------------------------dump-------------------------------------------
#ifndef PRODUCT
void PhaseChaitin::dump(const Node *n) const {
uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0;
@@ -1990,12 +1959,12 @@ void PhaseChaitin::dump(const Node *n) const {
tty->print("\n");
}
-void PhaseChaitin::dump( const Block * b ) const {
- b->dump_head( &_cfg._bbs );
+void PhaseChaitin::dump(const Block *b) const {
+ b->dump_head(&_cfg);
// For all instructions
- for( uint j = 0; j < b->_nodes.size(); j++ )
- dump(b->_nodes[j]);
+ for( uint j = 0; j < b->number_of_nodes(); j++ )
+ dump(b->get_node(j));
// Print live-out info at end of block
if( _live ) {
tty->print("Liveout: ");
@@ -2016,8 +1985,9 @@ void PhaseChaitin::dump() const {
_matcher._new_SP, _framesize );
// For all blocks
- for( uint i = 0; i < _cfg._num_blocks; i++ )
- dump(_cfg._blocks[i]);
+ for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
+ dump(_cfg.get_block(i));
+ }
// End of per-block dump
tty->print("\n");
@@ -2058,7 +2028,6 @@ void PhaseChaitin::dump() const {
tty->print_cr("");
}
-//------------------------------dump_degree_lists------------------------------
void PhaseChaitin::dump_degree_lists() const {
// Dump lo-degree list
tty->print("Lo degree: ");
@@ -2079,7 +2048,6 @@ void PhaseChaitin::dump_degree_lists() const {
tty->print_cr("");
}
-//------------------------------dump_simplified--------------------------------
void PhaseChaitin::dump_simplified() const {
tty->print("Simplified: ");
for( uint i = _simplified; i; i = lrgs(i)._next )
@@ -2098,7 +2066,6 @@ static char *print_reg( OptoReg::Name reg, const PhaseChaitin *pc, char *buf ) {
return buf+strlen(buf);
}
-//------------------------------dump_register----------------------------------
// Dump a register name into a buffer. Be intelligent if we get called
// before allocation is complete.
char *PhaseChaitin::dump_register( const Node *n, char *buf ) const {
@@ -2132,7 +2099,6 @@ char *PhaseChaitin::dump_register( const Node *n, char *buf ) const {
return buf+strlen(buf);
}
-//----------------------dump_for_spill_split_recycle--------------------------
void PhaseChaitin::dump_for_spill_split_recycle() const {
if( WizardMode && (PrintCompilation || PrintOpto) ) {
// Display which live ranges need to be split and the allocator's state
@@ -2148,7 +2114,6 @@ void PhaseChaitin::dump_for_spill_split_recycle() const {
}
}
-//------------------------------dump_frame------------------------------------
void PhaseChaitin::dump_frame() const {
const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
const TypeTuple *domain = C->tf()->domain();
@@ -2254,17 +2219,16 @@ void PhaseChaitin::dump_frame() const {
tty->print_cr("#");
}
-//------------------------------dump_bb----------------------------------------
void PhaseChaitin::dump_bb( uint pre_order ) const {
tty->print_cr("---dump of B%d---",pre_order);
- for( uint i = 0; i < _cfg._num_blocks; i++ ) {
- Block *b = _cfg._blocks[i];
- if( b->_pre_order == pre_order )
- dump(b);
+ for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
+ Block* block = _cfg.get_block(i);
+ if (block->_pre_order == pre_order) {
+ dump(block);
+ }
}
}
-//------------------------------dump_lrg---------------------------------------
void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
tty->print_cr("---dump of L%d---",lidx);
@@ -2286,17 +2250,17 @@ void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
tty->cr();
}
// For all blocks
- for( uint i = 0; i < _cfg._num_blocks; i++ ) {
- Block *b = _cfg._blocks[i];
+ for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
+ Block* block = _cfg.get_block(i);
int dump_once = 0;
// For all instructions
- for( uint j = 0; j < b->_nodes.size(); j++ ) {
- Node *n = b->_nodes[j];
+ for( uint j = 0; j < block->number_of_nodes(); j++ ) {
+ Node *n = block->get_node(j);
if (_lrg_map.find_const(n) == lidx) {
if (!dump_once++) {
tty->cr();
- b->dump_head( &_cfg._bbs );
+ block->dump_head(&_cfg);
}
dump(n);
continue;
@@ -2311,7 +2275,7 @@ void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
if (_lrg_map.find_const(m) == lidx) {
if (!dump_once++) {
tty->cr();
- b->dump_head(&_cfg._bbs);
+ block->dump_head(&_cfg);
}
dump(n);
}
@@ -2323,7 +2287,6 @@ void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
}
#endif // not PRODUCT
-//------------------------------print_chaitin_statistics-------------------------------
int PhaseChaitin::_final_loads = 0;
int PhaseChaitin::_final_stores = 0;
int PhaseChaitin::_final_memoves= 0;
diff --git a/src/share/vm/opto/chaitin.hpp b/src/share/vm/opto/chaitin.hpp
index 3455005f3..41276efa5 100644
--- a/src/share/vm/opto/chaitin.hpp
+++ b/src/share/vm/opto/chaitin.hpp
@@ -283,8 +283,8 @@ private:
// Straight out of Tarjan's union-find algorithm
uint find_compress(const Node *node) {
- uint lrg_id = find_compress(_names[node->_idx]);
- _names.map(node->_idx, lrg_id);
+ uint lrg_id = find_compress(_names.at(node->_idx));
+ _names.at_put(node->_idx, lrg_id);
return lrg_id;
}
@@ -305,40 +305,40 @@ public:
}
uint size() const {
- return _names.Size();
+ return _names.length();
}
uint live_range_id(uint idx) const {
- return _names[idx];
+ return _names.at(idx);
}
uint live_range_id(const Node *node) const {
- return _names[node->_idx];
+ return _names.at(node->_idx);
}
uint uf_live_range_id(uint lrg_id) const {
- return _uf_map[lrg_id];
+ return _uf_map.at(lrg_id);
}
void map(uint idx, uint lrg_id) {
- _names.map(idx, lrg_id);
+ _names.at_put(idx, lrg_id);
}
void uf_map(uint dst_lrg_id, uint src_lrg_id) {
- _uf_map.map(dst_lrg_id, src_lrg_id);
+ _uf_map.at_put(dst_lrg_id, src_lrg_id);
}
void extend(uint idx, uint lrg_id) {
- _names.extend(idx, lrg_id);
+ _names.at_put_grow(idx, lrg_id);
}
void uf_extend(uint dst_lrg_id, uint src_lrg_id) {
- _uf_map.extend(dst_lrg_id, src_lrg_id);
+ _uf_map.at_put_grow(dst_lrg_id, src_lrg_id);
}
- LiveRangeMap(uint unique)
- : _names(unique)
- , _uf_map(unique)
+ LiveRangeMap(Arena* arena, uint unique)
+ : _names(arena, unique, unique, 0)
+ , _uf_map(arena, unique, unique, 0)
, _max_lrg_id(0) {}
uint find_id( const Node *n ) {
@@ -355,14 +355,14 @@ public:
void compress_uf_map_for_nodes();
uint find(uint lidx) {
- uint uf_lidx = _uf_map[lidx];
+ uint uf_lidx = _uf_map.at(lidx);
return (uf_lidx == lidx) ? uf_lidx : find_compress(lidx);
}
// Convert a Node into a Live Range Index - a lidx
uint find(const Node *node) {
uint lidx = live_range_id(node);
- uint uf_lidx = _uf_map[lidx];
+ uint uf_lidx = _uf_map.at(lidx);
return (uf_lidx == lidx) ? uf_lidx : find_compress(node);
}
@@ -371,10 +371,10 @@ public:
// Like Find above, but no path compress, so bad asymptotic behavior
uint find_const(const Node *node) const {
- if(node->_idx >= _names.Size()) {
+ if(node->_idx >= (uint)_names.length()) {
return 0; // not mapped, usual for debug dump
}
- return find_const(_names[node->_idx]);
+ return find_const(_names.at(node->_idx));
}
};
@@ -412,33 +412,22 @@ class PhaseChaitin : public PhaseRegAlloc {
uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx );
uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx );
- bool clone_projs(Block *b, uint idx, Node *con, Node *copy, LiveRangeMap &lrg_map) {
- bool found_projs = clone_projs_shared(b, idx, con, copy, lrg_map.max_lrg_id());
-
- if(found_projs) {
- uint max_lrg_id = lrg_map.max_lrg_id();
- lrg_map.set_max_lrg_id(max_lrg_id + 1);
- }
-
- return found_projs;
- }
-
//------------------------------clone_projs------------------------------------
// After cloning some rematerialized instruction, clone any MachProj's that
// follow it. Example: Intel zero is XOR, kills flags. Sparc FP constants
// use G3 as an address temp.
- bool clone_projs(Block *b, uint idx, Node *con, Node *copy, uint &max_lrg_id) {
- bool found_projs = clone_projs_shared(b, idx, con, copy, max_lrg_id);
-
- if(found_projs) {
- max_lrg_id++;
+ int clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint& max_lrg_id);
+
+ int clone_projs(Block* b, uint idx, Node* orig, Node* copy, LiveRangeMap& lrg_map) {
+ uint max_lrg_id = lrg_map.max_lrg_id();
+ int found_projs = clone_projs(b, idx, orig, copy, max_lrg_id);
+ if (found_projs > 0) {
+ // max_lrg_id is updated during call above
+ lrg_map.set_max_lrg_id(max_lrg_id);
}
-
return found_projs;
}
- bool clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id);
-
Node *split_Rematerialize(Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits,
int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru);
// True if lidx is used before any real register is def'd in the block
diff --git a/src/share/vm/opto/coalesce.cpp b/src/share/vm/opto/coalesce.cpp
index 74618fb41..bd207b584 100644
--- a/src/share/vm/opto/coalesce.cpp
+++ b/src/share/vm/opto/coalesce.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,8 +34,6 @@
#include "opto/matcher.hpp"
#include "opto/regmask.hpp"
-//=============================================================================
-//------------------------------Dump-------------------------------------------
#ifndef PRODUCT
void PhaseCoalesce::dump(Node *n) const {
// Being a const function means I cannot use 'Find'
@@ -43,23 +41,22 @@ void PhaseCoalesce::dump(Node *n) const {
tty->print("L%d/N%d ",r,n->_idx);
}
-//------------------------------dump-------------------------------------------
void PhaseCoalesce::dump() const {
// I know I have a block layout now, so I can print blocks in a loop
- for( uint i=0; i<_phc._cfg._num_blocks; i++ ) {
+ for( uint i=0; i<_phc._cfg.number_of_blocks(); i++ ) {
uint j;
- Block *b = _phc._cfg._blocks[i];
+ Block* b = _phc._cfg.get_block(i);
// Print a nice block header
tty->print("B%d: ",b->_pre_order);
for( j=1; j<b->num_preds(); j++ )
- tty->print("B%d ", _phc._cfg._bbs[b->pred(j)->_idx]->_pre_order);
+ tty->print("B%d ", _phc._cfg.get_block_for_node(b->pred(j))->_pre_order);
tty->print("-> ");
for( j=0; j<b->_num_succs; j++ )
tty->print("B%d ",b->_succs[j]->_pre_order);
tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth);
- uint cnt = b->_nodes.size();
+ uint cnt = b->number_of_nodes();
for( j=0; j<cnt; j++ ) {
- Node *n = b->_nodes[j];
+ Node *n = b->get_node(j);
dump( n );
tty->print("\t%s\t",n->Name());
@@ -85,7 +82,6 @@ void PhaseCoalesce::dump() const {
}
#endif
-//------------------------------combine_these_two------------------------------
// Combine the live ranges def'd by these 2 Nodes. N2 is an input to N1.
void PhaseCoalesce::combine_these_two(Node *n1, Node *n2) {
uint lr1 = _phc._lrg_map.find(n1);
@@ -127,18 +123,15 @@ void PhaseCoalesce::combine_these_two(Node *n1, Node *n2) {
}
}
-//------------------------------coalesce_driver--------------------------------
// Copy coalescing
-void PhaseCoalesce::coalesce_driver( ) {
-
+void PhaseCoalesce::coalesce_driver() {
verify();
// Coalesce from high frequency to low
- for( uint i=0; i<_phc._cfg._num_blocks; i++ )
- coalesce( _phc._blks[i] );
-
+ for (uint i = 0; i < _phc._cfg.number_of_blocks(); i++) {
+ coalesce(_phc._blks[i]);
+ }
}
-//------------------------------insert_copy_with_overlap-----------------------
// I am inserting copies to come out of SSA form. In the general case, I am
// doing a parallel renaming. I'm in the Named world now, so I can't do a
// general parallel renaming. All the copies now use "names" (live-ranges)
@@ -159,7 +152,7 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui
// after the last use. Last use is really first-use on a backwards scan.
uint i = b->end_idx()-1;
while(1) {
- Node *n = b->_nodes[i];
+ Node *n = b->get_node(i);
// Check for end of virtual copies; this is also the end of the
// parallel renaming effort.
if (n->_idx < _unique) {
@@ -181,7 +174,7 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui
// the last kill. Thus it is the first kill on a backwards scan.
i = b->end_idx()-1;
while (1) {
- Node *n = b->_nodes[i];
+ Node *n = b->get_node(i);
// Check for end of virtual copies; this is also the end of the
// parallel renaming effort.
if (n->_idx < _unique) {
@@ -207,16 +200,15 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui
tmp ->set_req(idx,copy->in(idx));
copy->set_req(idx,tmp);
// Save source in temp early, before source is killed
- b->_nodes.insert(kill_src_idx,tmp);
- _phc._cfg._bbs.map( tmp->_idx, b );
+ b->insert_node(tmp, kill_src_idx);
+ _phc._cfg.map_node_to_block(tmp, b);
last_use_idx++;
}
// Insert just after last use
- b->_nodes.insert(last_use_idx+1,copy);
+ b->insert_node(copy, last_use_idx + 1);
}
-//------------------------------insert_copies----------------------------------
void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
// We do LRGs compressing and fix a liveout data only here since the other
// place in Split() is guarded by the assert which we never hit.
@@ -225,8 +217,8 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
for (uint lrg = 1; lrg < _phc._lrg_map.max_lrg_id(); lrg++) {
uint compressed_lrg = _phc._lrg_map.find(lrg);
if (lrg != compressed_lrg) {
- for (uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++) {
- IndexSet *liveout = _phc._live->live(_phc._cfg._blocks[bidx]);
+ for (uint bidx = 0; bidx < _phc._cfg.number_of_blocks(); bidx++) {
+ IndexSet *liveout = _phc._live->live(_phc._cfg.get_block(bidx));
if (liveout->member(lrg)) {
liveout->remove(lrg);
liveout->insert(compressed_lrg);
@@ -239,12 +231,14 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
// Nodes with index less than '_unique' are original, non-virtual Nodes.
_unique = C->unique();
- for( uint i=0; i<_phc._cfg._num_blocks; i++ ) {
- Block *b = _phc._cfg._blocks[i];
+ for (uint i = 0; i < _phc._cfg.number_of_blocks(); i++) {
+ C->check_node_count(NodeLimitFudgeFactor, "out of nodes in coalesce");
+ if (C->failing()) return;
+ Block *b = _phc._cfg.get_block(i);
uint cnt = b->num_preds(); // Number of inputs to the Phi
- for( uint l = 1; l<b->_nodes.size(); l++ ) {
- Node *n = b->_nodes[l];
+ for( uint l = 1; l<b->number_of_nodes(); l++ ) {
+ Node *n = b->get_node(l);
// Do not use removed-copies, use copied value instead
uint ncnt = n->req();
@@ -266,7 +260,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) {
n->replace_by(def);
n->set_req(cidx,NULL);
- b->_nodes.remove(l);
+ b->remove_node(l);
l--;
continue;
}
@@ -284,7 +278,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
Node *m = n->in(j);
uint src_name = _phc._lrg_map.find(m);
if (src_name != phi_name) {
- Block *pred = _phc._cfg._bbs[b->pred(j)->_idx];
+ Block *pred = _phc._cfg.get_block_for_node(b->pred(j));
Node *copy;
assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
// Rematerialize constants instead of copying them
@@ -303,7 +297,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
}
// Insert the copy in the use-def chain
n->set_req(j, copy);
- _phc._cfg._bbs.map( copy->_idx, pred );
+ _phc._cfg.map_node_to_block(copy, pred);
// Extend ("register allocate") the names array for the copy.
_phc._lrg_map.extend(copy->_idx, phi_name);
} // End of if Phi names do not match
@@ -327,27 +321,25 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
m->as_Mach()->rematerialize()) {
copy = m->clone();
// Insert the copy in the basic block, just before us
- b->_nodes.insert(l++, copy);
- if(_phc.clone_projs(b, l, m, copy, _phc._lrg_map)) {
- l++;
- }
+ b->insert_node(copy, l++);
+ l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map);
} else {
const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
copy = new (C) MachSpillCopyNode(m, *rm, *rm);
// Insert the copy in the basic block, just before us
- b->_nodes.insert(l++, copy);
+ b->insert_node(copy, l++);
}
// Insert the copy in the use-def chain
n->set_req(idx, copy);
// Extend ("register allocate") the names array for the copy.
_phc._lrg_map.extend(copy->_idx, name);
- _phc._cfg._bbs.map( copy->_idx, b );
+ _phc._cfg.map_node_to_block(copy, b);
}
} // End of is two-adr
// Insert a copy at a debug use for a lrg which has high frequency
- if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs)) {
+ if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || _phc._cfg.is_uncommon(b)) {
// Walk the debug inputs to the node and check for lrg freq
JVMState* jvms = n->jvms();
uint debug_start = jvms ? jvms->debug_start() : 999999;
@@ -384,12 +376,12 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
// Insert the copy in the use-def chain
n->set_req(inpidx, copy );
// Insert the copy in the basic block, just before us
- b->_nodes.insert( l++, copy );
+ b->insert_node(copy, l++);
// Extend ("register allocate") the names array for the copy.
uint max_lrg_id = _phc._lrg_map.max_lrg_id();
_phc.new_lrg(copy, max_lrg_id);
_phc._lrg_map.set_max_lrg_id(max_lrg_id + 1);
- _phc._cfg._bbs.map(copy->_idx, b);
+ _phc._cfg.map_node_to_block(copy, b);
//tty->print_cr("Split a debug use in Aggressive Coalesce");
} // End of if high frequency use/def
} // End of for all debug inputs
@@ -401,8 +393,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
} // End of for all blocks
}
-//=============================================================================
-//------------------------------coalesce---------------------------------------
+
// Aggressive (but pessimistic) copy coalescing of a single block
// The following coalesce pass represents a single round of aggressive
@@ -435,10 +426,13 @@ void PhaseAggressiveCoalesce::coalesce( Block *b ) {
Block *bs = b->_succs[i];
// Find index of 'b' in 'bs' predecessors
uint j=1;
- while( _phc._cfg._bbs[bs->pred(j)->_idx] != b ) j++;
+ while (_phc._cfg.get_block_for_node(bs->pred(j)) != b) {
+ j++;
+ }
+
// Visit all the Phis in successor block
- for( uint k = 1; k<bs->_nodes.size(); k++ ) {
- Node *n = bs->_nodes[k];
+ for( uint k = 1; k<bs->number_of_nodes(); k++ ) {
+ Node *n = bs->get_node(k);
if( !n->is_Phi() ) break;
combine_these_two( n, n->in(j) );
}
@@ -448,7 +442,7 @@ void PhaseAggressiveCoalesce::coalesce( Block *b ) {
// Check _this_ block for 2-address instructions and copies.
uint cnt = b->end_idx();
for( i = 1; i<cnt; i++ ) {
- Node *n = b->_nodes[i];
+ Node *n = b->get_node(i);
uint idx;
// 2-address instructions have a virtual Copy matching their input
// to their output
@@ -459,20 +453,16 @@ void PhaseAggressiveCoalesce::coalesce( Block *b ) {
} // End of for all instructions in block
}
-//=============================================================================
-//------------------------------PhaseConservativeCoalesce----------------------
PhaseConservativeCoalesce::PhaseConservativeCoalesce(PhaseChaitin &chaitin) : PhaseCoalesce(chaitin) {
_ulr.initialize(_phc._lrg_map.max_lrg_id());
}
-//------------------------------verify-----------------------------------------
void PhaseConservativeCoalesce::verify() {
#ifdef ASSERT
_phc.set_was_low();
#endif
}
-//------------------------------union_helper-----------------------------------
void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, uint lr1, uint lr2, Node *src_def, Node *dst_copy, Node *src_copy, Block *b, uint bindex ) {
// Join live ranges. Merge larger into smaller. Union lr2 into lr1 in the
// union-find tree
@@ -500,22 +490,21 @@ void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, ui
dst_copy->set_req( didx, src_def );
// Add copy to free list
// _phc.free_spillcopy(b->_nodes[bindex]);
- assert( b->_nodes[bindex] == dst_copy, "" );
+ assert( b->get_node(bindex) == dst_copy, "" );
dst_copy->replace_by( dst_copy->in(didx) );
dst_copy->set_req( didx, NULL);
- b->_nodes.remove(bindex);
+ b->remove_node(bindex);
if( bindex < b->_ihrp_index ) b->_ihrp_index--;
if( bindex < b->_fhrp_index ) b->_fhrp_index--;
// Stretched lr1; add it to liveness of intermediate blocks
- Block *b2 = _phc._cfg._bbs[src_copy->_idx];
+ Block *b2 = _phc._cfg.get_block_for_node(src_copy);
while( b != b2 ) {
- b = _phc._cfg._bbs[b->pred(1)->_idx];
+ b = _phc._cfg.get_block_for_node(b->pred(1));
_phc._live->live(b)->insert(lr1);
}
}
-//------------------------------compute_separating_interferences---------------
// Factored code from copy_copy that computes extra interferences from
// lengthening a live range by double-coalescing.
uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy, Node *src_copy, Block *b, uint bindex, RegMask &rm, uint reg_degree, uint rm_size, uint lr1, uint lr2 ) {
@@ -530,12 +519,12 @@ uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy,
bindex2--; // Chain backwards 1 instruction
while( bindex2 == 0 ) { // At block start, find prior block
assert( b2->num_preds() == 2, "cannot double coalesce across c-flow" );
- b2 = _phc._cfg._bbs[b2->pred(1)->_idx];
+ b2 = _phc._cfg.get_block_for_node(b2->pred(1));
bindex2 = b2->end_idx()-1;
}
// Get prior instruction
- assert(bindex2 < b2->_nodes.size(), "index out of bounds");
- Node *x = b2->_nodes[bindex2];
+ assert(bindex2 < b2->number_of_nodes(), "index out of bounds");
+ Node *x = b2->get_node(bindex2);
if( x == prev_copy ) { // Previous copy in copy chain?
if( prev_copy == src_copy)// Found end of chain and all interferences
break; // So break out of loop
@@ -581,7 +570,6 @@ uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy,
return reg_degree;
}
-//------------------------------update_ifg-------------------------------------
void PhaseConservativeCoalesce::update_ifg(uint lr1, uint lr2, IndexSet *n_lr1, IndexSet *n_lr2) {
// Some original neighbors of lr1 might have gone away
// because the constrained register mask prevented them.
@@ -611,7 +599,6 @@ void PhaseConservativeCoalesce::update_ifg(uint lr1, uint lr2, IndexSet *n_lr1,
lrgs(neighbor).inc_degree( lrg1.compute_degree(lrgs(neighbor)) );
}
-//------------------------------record_bias------------------------------------
static void record_bias( const PhaseIFG *ifg, int lr1, int lr2 ) {
// Tag copy bias here
if( !ifg->lrgs(lr1)._copy_bias )
@@ -620,7 +607,6 @@ static void record_bias( const PhaseIFG *ifg, int lr1, int lr2 ) {
ifg->lrgs(lr2)._copy_bias = lr1;
}
-//------------------------------copy_copy--------------------------------------
// See if I can coalesce a series of multiple copies together. I need the
// final dest copy and the original src copy. They can be the same Node.
// Compute the compatible register masks.
@@ -674,8 +660,8 @@ bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block
if (UseFPUForSpilling && rm.is_AllStack() ) {
// Don't coalesce when frequency difference is large
- Block *dst_b = _phc._cfg._bbs[dst_copy->_idx];
- Block *src_def_b = _phc._cfg._bbs[src_def->_idx];
+ Block *dst_b = _phc._cfg.get_block_for_node(dst_copy);
+ Block *src_def_b = _phc._cfg.get_block_for_node(src_def);
if (src_def_b->_freq > 10*dst_b->_freq )
return false;
}
@@ -688,7 +674,7 @@ bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block
// Another early bail-out test is when we are double-coalescing and the
// 2 copies are separated by some control flow.
if( dst_copy != src_copy ) {
- Block *src_b = _phc._cfg._bbs[src_copy->_idx];
+ Block *src_b = _phc._cfg.get_block_for_node(src_copy);
Block *b2 = b;
while( b2 != src_b ) {
if( b2->num_preds() > 2 ){// Found merge-point
@@ -699,7 +685,7 @@ bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block
//record_bias( _phc._lrgs, lr1, lr2 );
return false; // To hard to find all interferences
}
- b2 = _phc._cfg._bbs[b2->pred(1)->_idx];
+ b2 = _phc._cfg.get_block_for_node(b2->pred(1));
}
}
@@ -780,17 +766,17 @@ bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block
return true;
}
-//------------------------------coalesce---------------------------------------
// Conservative (but pessimistic) copy coalescing of a single block
void PhaseConservativeCoalesce::coalesce( Block *b ) {
// Bail out on infrequent blocks
- if( b->is_uncommon(_phc._cfg._bbs) )
+ if (_phc._cfg.is_uncommon(b)) {
return;
+ }
// Check this block for copies.
for( uint i = 1; i<b->end_idx(); i++ ) {
// Check for actual copies on inputs. Coalesce a copy into its
// input if use and copy's input are compatible.
- Node *copy1 = b->_nodes[i];
+ Node *copy1 = b->get_node(i);
uint idx1 = copy1->is_Copy();
if( !idx1 ) continue; // Not a copy
diff --git a/src/share/vm/opto/coalesce.hpp b/src/share/vm/opto/coalesce.hpp
index a6359af10..3a361b25f 100644
--- a/src/share/vm/opto/coalesce.hpp
+++ b/src/share/vm/opto/coalesce.hpp
@@ -29,7 +29,6 @@
class LoopTree;
class LRG;
-class LRG_List;
class Matcher;
class PhaseIFG;
class PhaseCFG;
diff --git a/src/share/vm/opto/compile.cpp b/src/share/vm/opto/compile.cpp
index a59e344a4..d50182b8b 100644
--- a/src/share/vm/opto/compile.cpp
+++ b/src/share/vm/opto/compile.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -63,6 +63,7 @@
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/timer.hpp"
+#include "trace/tracing.hpp"
#include "utilities/copy.hpp"
#ifdef TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"
@@ -424,6 +425,7 @@ void Compile::remove_useless_nodes(Unique_Node_List &useful) {
}
// clean up the late inline lists
remove_useless_late_inlines(&_string_late_inlines, useful);
+ remove_useless_late_inlines(&_boxing_late_inlines, useful);
remove_useless_late_inlines(&_late_inlines, useful);
debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
}
@@ -491,6 +493,12 @@ void Compile::print_compile_messages() {
tty->print_cr("** Bailout: Recompile without escape analysis **");
tty->print_cr("*********************************************************");
}
+ if (_eliminate_boxing != EliminateAutoBox && PrintOpto) {
+ // Recompiling without boxing elimination
+ tty->print_cr("*********************************************************");
+ tty->print_cr("** Bailout: Recompile without boxing elimination **");
+ tty->print_cr("*********************************************************");
+ }
if (env()->break_at_compile()) {
// Open the debugger when compiling this method.
tty->print("### Breaking when compiling: ");
@@ -607,7 +615,8 @@ debug_only( int Compile::_debug_idx = 100000; )
// the continuation bci for on stack replacement.
-Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci, bool subsume_loads, bool do_escape_analysis )
+Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci,
+ bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing )
: Phase(Compiler),
_env(ci_env),
_log(ci_env->log()),
@@ -623,6 +632,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
_warm_calls(NULL),
_subsume_loads(subsume_loads),
_do_escape_analysis(do_escape_analysis),
+ _eliminate_boxing(eliminate_boxing),
_failure_reason(NULL),
_code_buffer("Compile::Fill_buffer"),
_orig_pc_slot(0),
@@ -644,12 +654,13 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
_congraph(NULL),
_late_inlines(comp_arena(), 2, 0, NULL),
_string_late_inlines(comp_arena(), 2, 0, NULL),
+ _boxing_late_inlines(comp_arena(), 2, 0, NULL),
_late_inlines_pos(0),
_number_of_mh_late_inlines(0),
_inlining_progress(false),
_inlining_incrementally(false),
_print_inlining_list(NULL),
- _print_inlining(0) {
+ _print_inlining_idx(0) {
C = this;
CompileWrapper cw(this);
@@ -674,6 +685,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
set_print_assembly(print_opto_assembly);
set_parsed_irreducible_loop(false);
#endif
+ set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
+ set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
if (ProfileTraps) {
// Make sure the method being compiled gets its own MDO,
@@ -705,7 +718,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
PhaseGVN gvn(node_arena(), estimated_size);
set_initial_gvn(&gvn);
- if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
+ if (print_inlining() || print_intrinsics()) {
_print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
}
{ // Scope for timing the parser
@@ -782,7 +795,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
if (failing()) return;
- print_method("Before RemoveUseless", 3);
+ print_method(PHASE_BEFORE_REMOVEUSELESS, 3);
// Remove clutter produced by parsing.
if (!failing()) {
@@ -943,6 +956,7 @@ Compile::Compile( ciEnv* ci_env,
_orig_pc_slot_offset_in_bytes(0),
_subsume_loads(true),
_do_escape_analysis(false),
+ _eliminate_boxing(false),
_failure_reason(NULL),
_code_buffer("Compile::Fill_buffer"),
_has_method_handle_invokes(false),
@@ -962,7 +976,7 @@ Compile::Compile( ciEnv* ci_env,
_inlining_progress(false),
_inlining_incrementally(false),
_print_inlining_list(NULL),
- _print_inlining(0) {
+ _print_inlining_idx(0) {
C = this;
#ifndef PRODUCT
@@ -1053,6 +1067,7 @@ void Compile::Init(int aliaslevel) {
set_has_split_ifs(false);
set_has_loops(has_method() && method()->has_loops()); // first approximation
set_has_stringbuilder(false);
+ set_has_boxed_value(false);
_trap_can_recompile = false; // no traps emitted yet
_major_progress = true; // start out assuming good things will happen
set_has_unsafe_access(false);
@@ -1321,6 +1336,10 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
// Array pointers need some flattening
const TypeAryPtr *ta = tj->isa_aryptr();
+ if (ta && ta->is_stable()) {
+ // Erase stability property for alias analysis.
+ tj = ta = ta->cast_to_stable(false);
+ }
if( ta && is_known_inst ) {
if ( offset != Type::OffsetBot &&
offset > arrayOopDesc::length_offset_in_bytes() ) {
@@ -1521,6 +1540,7 @@ void Compile::AliasType::Init(int i, const TypePtr* at) {
_index = i;
_adr_type = at;
_field = NULL;
+ _element = NULL;
_is_rewritable = true; // default
const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
if (atoop != NULL && atoop->is_known_instance()) {
@@ -1639,6 +1659,16 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr
&& flat->is_instptr()->klass() == env()->Class_klass())
alias_type(idx)->set_rewritable(false);
}
+ if (flat->isa_aryptr()) {
+#ifdef ASSERT
+ const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
+ // (T_BYTE has the weakest alignment and size restrictions...)
+ assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
+#endif
+ if (flat->offset() == TypePtr::OffsetBot) {
+ alias_type(idx)->set_element(flat->is_aryptr()->elem());
+ }
+ }
if (flat->isa_klassptr()) {
if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
alias_type(idx)->set_rewritable(false);
@@ -1701,7 +1731,7 @@ Compile::AliasType* Compile::alias_type(ciField* field) {
else
t = TypeOopPtr::make_from_klass_raw(field->holder());
AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
- assert(field->is_final() == !atp->is_rewritable(), "must get the rewritable bits correct");
+ assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
return atp;
}
@@ -1826,9 +1856,9 @@ void Compile::inline_string_calls(bool parse_time) {
{
ResourceMark rm;
- print_method("Before StringOpts", 3);
+ print_method(PHASE_BEFORE_STRINGOPTS, 3);
PhaseStringOpts pso(initial_gvn(), for_igvn());
- print_method("After StringOpts", 3);
+ print_method(PHASE_AFTER_STRINGOPTS, 3);
}
// now inline anything that we skipped the first time around
@@ -1844,6 +1874,38 @@ void Compile::inline_string_calls(bool parse_time) {
_string_late_inlines.trunc_to(0);
}
+// Late inlining of boxing methods
+void Compile::inline_boxing_calls(PhaseIterGVN& igvn) {
+ if (_boxing_late_inlines.length() > 0) {
+ assert(has_boxed_value(), "inconsistent");
+
+ PhaseGVN* gvn = initial_gvn();
+ set_inlining_incrementally(true);
+
+ assert( igvn._worklist.size() == 0, "should be done with igvn" );
+ for_igvn()->clear();
+ gvn->replace_with(&igvn);
+
+ while (_boxing_late_inlines.length() > 0) {
+ CallGenerator* cg = _boxing_late_inlines.pop();
+ cg->do_late_inline();
+ if (failing()) return;
+ }
+ _boxing_late_inlines.trunc_to(0);
+
+ {
+ ResourceMark rm;
+ PhaseRemoveUseless pru(gvn, for_igvn());
+ }
+
+ igvn = PhaseIterGVN(gvn);
+ igvn.optimize();
+
+ set_inlining_progress(false);
+ set_inlining_incrementally(false);
+ }
+}
+
void Compile::inline_incrementally_one(PhaseIterGVN& igvn) {
assert(IncrementalInline, "incremental inlining should be on");
PhaseGVN* gvn = initial_gvn();
@@ -1868,7 +1930,7 @@ void Compile::inline_incrementally_one(PhaseIterGVN& igvn) {
{
ResourceMark rm;
- PhaseRemoveUseless pru(C->initial_gvn(), C->for_igvn());
+ PhaseRemoveUseless pru(gvn, for_igvn());
}
igvn = PhaseIterGVN(gvn);
@@ -1951,7 +2013,7 @@ void Compile::Optimize() {
NOT_PRODUCT( verify_graph_edges(); )
- print_method("After Parsing");
+ print_method(PHASE_AFTER_PARSING);
{
// Iterative Global Value Numbering, including ideal transforms
@@ -1962,16 +2024,29 @@ void Compile::Optimize() {
igvn.optimize();
}
- print_method("Iter GVN 1", 2);
+ print_method(PHASE_ITER_GVN1, 2);
if (failing()) return;
- inline_incrementally(igvn);
+ {
+ NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
+ inline_incrementally(igvn);
+ }
- print_method("Incremental Inline", 2);
+ print_method(PHASE_INCREMENTAL_INLINE, 2);
if (failing()) return;
+ if (eliminate_boxing()) {
+ NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
+ // Inline valueOf() methods now.
+ inline_boxing_calls(igvn);
+
+ print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
+
+ if (failing()) return;
+ }
+
// No more new expensive nodes will be added to the list from here
// so keep only the actual candidates for optimizations.
cleanup_expensive_nodes(igvn);
@@ -1982,7 +2057,7 @@ void Compile::Optimize() {
// Cleanup graph (remove dead nodes).
TracePhase t2("idealLoop", &_t_idealLoop, true);
PhaseIdealLoop ideal_loop( igvn, false, true );
- if (major_progress()) print_method("PhaseIdealLoop before EA", 2);
+ if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
if (failing()) return;
}
ConnectionGraph::do_analysis(this, &igvn);
@@ -1991,7 +2066,7 @@ void Compile::Optimize() {
// Optimize out fields loads from scalar replaceable allocations.
igvn.optimize();
- print_method("Iter GVN after EA", 2);
+ print_method(PHASE_ITER_GVN_AFTER_EA, 2);
if (failing()) return;
@@ -2002,7 +2077,7 @@ void Compile::Optimize() {
igvn.set_delay_transform(false);
igvn.optimize();
- print_method("Iter GVN after eliminating allocations and locks", 2);
+ print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
if (failing()) return;
}
@@ -2018,7 +2093,7 @@ void Compile::Optimize() {
TracePhase t2("idealLoop", &_t_idealLoop, true);
PhaseIdealLoop ideal_loop( igvn, true );
loop_opts_cnt--;
- if (major_progress()) print_method("PhaseIdealLoop 1", 2);
+ if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
if (failing()) return;
}
// Loop opts pass if partial peeling occurred in previous pass
@@ -2026,7 +2101,7 @@ void Compile::Optimize() {
TracePhase t3("idealLoop", &_t_idealLoop, true);
PhaseIdealLoop ideal_loop( igvn, false );
loop_opts_cnt--;
- if (major_progress()) print_method("PhaseIdealLoop 2", 2);
+ if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
if (failing()) return;
}
// Loop opts pass for loop-unrolling before CCP
@@ -2034,7 +2109,7 @@ void Compile::Optimize() {
TracePhase t4("idealLoop", &_t_idealLoop, true);
PhaseIdealLoop ideal_loop( igvn, false );
loop_opts_cnt--;
- if (major_progress()) print_method("PhaseIdealLoop 3", 2);
+ if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
}
if (!failing()) {
// Verify that last round of loop opts produced a valid graph
@@ -2051,7 +2126,7 @@ void Compile::Optimize() {
TracePhase t2("ccp", &_t_ccp, true);
ccp.do_transform();
}
- print_method("PhaseCPP 1", 2);
+ print_method(PHASE_CPP1, 2);
assert( true, "Break here to ccp.dump_old2new_map()");
@@ -2062,7 +2137,7 @@ void Compile::Optimize() {
igvn.optimize();
}
- print_method("Iter GVN 2", 2);
+ print_method(PHASE_ITER_GVN2, 2);
if (failing()) return;
@@ -2075,7 +2150,7 @@ void Compile::Optimize() {
assert( cnt++ < 40, "infinite cycle in loop optimization" );
PhaseIdealLoop ideal_loop( igvn, true);
loop_opts_cnt--;
- if (major_progress()) print_method("PhaseIdealLoop iterations", 2);
+ if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
if (failing()) return;
}
}
@@ -2108,14 +2183,16 @@ void Compile::Optimize() {
}
}
- print_method("Optimize finished", 2);
+ print_method(PHASE_OPTIMIZE_FINISHED, 2);
}
//------------------------------Code_Gen---------------------------------------
// Given a graph, generate code for it
void Compile::Code_Gen() {
- if (failing()) return;
+ if (failing()) {
+ return;
+ }
// Perform instruction selection. You might think we could reclaim Matcher
// memory PDQ, but actually the Matcher is used in generating spill code.
@@ -2127,12 +2204,11 @@ void Compile::Code_Gen() {
// nodes. Mapping is only valid at the root of each matched subtree.
NOT_PRODUCT( verify_graph_edges(); )
- Node_List proj_list;
- Matcher m(proj_list);
- _matcher = &m;
+ Matcher matcher;
+ _matcher = &matcher;
{
TracePhase t2("matcher", &_t_matcher, true);
- m.match();
+ matcher.match();
}
// In debug mode can dump m._nodes.dump() for mapping of ideal to machine
// nodes. Mapping is only valid at the root of each matched subtree.
@@ -2140,31 +2216,26 @@ void Compile::Code_Gen() {
// If you have too many nodes, or if matching has failed, bail out
check_node_count(0, "out of nodes matching instructions");
- if (failing()) return;
+ if (failing()) {
+ return;
+ }
// Build a proper-looking CFG
- PhaseCFG cfg(node_arena(), root(), m);
+ PhaseCFG cfg(node_arena(), root(), matcher);
_cfg = &cfg;
{
NOT_PRODUCT( TracePhase t2("scheduler", &_t_scheduler, TimeCompiler); )
- cfg.Dominators();
- if (failing()) return;
-
- NOT_PRODUCT( verify_graph_edges(); )
-
- cfg.Estimate_Block_Frequency();
- cfg.GlobalCodeMotion(m,unique(),proj_list);
- if (failing()) return;
-
- print_method("Global code motion", 2);
+ bool success = cfg.do_global_code_motion();
+ if (!success) {
+ return;
+ }
+ print_method(PHASE_GLOBAL_CODE_MOTION, 2);
NOT_PRODUCT( verify_graph_edges(); )
-
debug_only( cfg.verify(); )
}
- NOT_PRODUCT( verify_graph_edges(); )
- PhaseChaitin regalloc(unique(), cfg, m);
+ PhaseChaitin regalloc(unique(), cfg, matcher);
_regalloc = &regalloc;
{
TracePhase t2("regalloc", &_t_registerAllocation, true);
@@ -2185,7 +2256,7 @@ void Compile::Code_Gen() {
// can now safely remove it.
{
NOT_PRODUCT( TracePhase t2("blockOrdering", &_t_blockOrdering, TimeCompiler); )
- cfg.remove_empty();
+ cfg.remove_empty_blocks();
if (do_freq_based_layout()) {
PhaseBlockLayout layout(cfg);
} else {
@@ -2209,7 +2280,7 @@ void Compile::Code_Gen() {
Output();
}
- print_method("Final Code");
+ print_method(PHASE_FINAL_CODE);
// He's dead, Jim.
_cfg = (PhaseCFG*)0xdeadbeef;
@@ -2232,38 +2303,50 @@ void Compile::dump_asm(int *pcs, uint pc_limit) {
_regalloc->dump_frame();
Node *n = NULL;
- for( uint i=0; i<_cfg->_num_blocks; i++ ) {
- if (VMThread::should_terminate()) { cut_short = true; break; }
- Block *b = _cfg->_blocks[i];
- if (b->is_connector() && !Verbose) continue;
- n = b->_nodes[0];
- if (pcs && n->_idx < pc_limit)
+ for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
+ if (VMThread::should_terminate()) {
+ cut_short = true;
+ break;
+ }
+ Block* block = _cfg->get_block(i);
+ if (block->is_connector() && !Verbose) {
+ continue;
+ }
+ n = block->head();
+ if (pcs && n->_idx < pc_limit) {
tty->print("%3.3x ", pcs[n->_idx]);
- else
+ } else {
tty->print(" ");
- b->dump_head( &_cfg->_bbs );
- if (b->is_connector()) {
+ }
+ block->dump_head(_cfg);
+ if (block->is_connector()) {
tty->print_cr(" # Empty connector block");
- } else if (b->num_preds() == 2 && b->pred(1)->is_CatchProj() && b->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
+ } else if (block->num_preds() == 2 && block->pred(1)->is_CatchProj() && block->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
tty->print_cr(" # Block is sole successor of call");
}
// For all instructions
Node *delay = NULL;
- for( uint j = 0; j<b->_nodes.size(); j++ ) {
- if (VMThread::should_terminate()) { cut_short = true; break; }
- n = b->_nodes[j];
+ for (uint j = 0; j < block->number_of_nodes(); j++) {
+ if (VMThread::should_terminate()) {
+ cut_short = true;
+ break;
+ }
+ n = block->get_node(j);
if (valid_bundle_info(n)) {
- Bundle *bundle = node_bundling(n);
+ Bundle* bundle = node_bundling(n);
if (bundle->used_in_unconditional_delay()) {
delay = n;
continue;
}
- if (bundle->starts_bundle())
+ if (bundle->starts_bundle()) {
starts_bundle = '+';
+ }
}
- if (WizardMode) n->dump();
+ if (WizardMode) {
+ n->dump();
+ }
if( !n->is_Region() && // Dont print in the Assembly
!n->is_Phi() && // a few noisely useless nodes
@@ -2602,7 +2685,7 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
addp->in(AddPNode::Base) == n->in(AddPNode::Base),
"Base pointers must match" );
#ifdef _LP64
- if ((UseCompressedOops || UseCompressedKlassPointers) &&
+ if ((UseCompressedOops || UseCompressedClassPointers) &&
addp->Opcode() == Op_ConP &&
addp == n->in(AddPNode::Base) &&
n->in(AddPNode::Offset)->is_Con()) {
@@ -2933,6 +3016,7 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
}
break;
case Op_MemBarStoreStore:
+ case Op_MemBarRelease:
// Break the link with AllocateNode: it is no longer useful and
// confuses register allocation.
if (n->req() > MemBarNode::Precedent) {
@@ -2988,7 +3072,7 @@ void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_
// Skip next transformation if compressed oops are not used.
if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
- (!UseCompressedOops && !UseCompressedKlassPointers))
+ (!UseCompressedOops && !UseCompressedClassPointers))
return;
// Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
@@ -3295,8 +3379,16 @@ void Compile::record_failure(const char* reason) {
// Record the first failure reason.
_failure_reason = reason;
}
+
+ EventCompilerFailure event;
+ if (event.should_commit()) {
+ event.set_compileID(Compile::compile_id());
+ event.set_failure(reason);
+ event.commit();
+ }
+
if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
- C->print_method(_failure_reason);
+ C->print_method(PHASE_FAILURE);
}
_root = NULL; // flush the graph, too
}
@@ -3495,7 +3587,7 @@ void Compile::ConstantTable::add(Constant& con) {
}
Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, BasicType type, jvalue value) {
- Block* b = Compile::current()->cfg()->_bbs[n->_idx];
+ Block* b = Compile::current()->cfg()->get_block_for_node(n);
Constant con(type, value, b->_freq);
add(con);
return con;
@@ -3558,7 +3650,7 @@ void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n
}
void Compile::dump_inlining() {
- if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
+ if (print_inlining() || print_intrinsics()) {
// Print inlining message for candidates that we couldn't inline
// for lack of space or non constant receiver
for (int i = 0; i < _late_inlines.length(); i++) {
@@ -3582,7 +3674,7 @@ void Compile::dump_inlining() {
}
}
for (int i = 0; i < _print_inlining_list->length(); i++) {
- tty->print(_print_inlining_list->at(i).ss()->as_string());
+ tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
}
}
}
diff --git a/src/share/vm/opto/compile.hpp b/src/share/vm/opto/compile.hpp
index d951fbf5f..631372efa 100644
--- a/src/share/vm/opto/compile.hpp
+++ b/src/share/vm/opto/compile.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,10 +36,12 @@
#include "libadt/vectset.hpp"
#include "memory/resourceArea.hpp"
#include "opto/idealGraphPrinter.hpp"
+#include "opto/phasetype.hpp"
#include "opto/phase.hpp"
#include "opto/regmask.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/vmThread.hpp"
+#include "trace/tracing.hpp"
class Block;
class Bundle;
@@ -70,6 +72,7 @@ class Scope;
class StartNode;
class SafePointNode;
class JVMState;
+class Type;
class TypeData;
class TypePtr;
class TypeOopPtr;
@@ -117,6 +120,7 @@ class Compile : public Phase {
int _index; // unique index, used with MergeMemNode
const TypePtr* _adr_type; // normalized address type
ciField* _field; // relevant instance field, or null if none
+ const Type* _element; // relevant array element type, or null if none
bool _is_rewritable; // false if the memory is write-once only
int _general_index; // if this is type is an instance, the general
// type that this is an instance of
@@ -127,6 +131,7 @@ class Compile : public Phase {
int index() const { return _index; }
const TypePtr* adr_type() const { return _adr_type; }
ciField* field() const { return _field; }
+ const Type* element() const { return _element; }
bool is_rewritable() const { return _is_rewritable; }
bool is_volatile() const { return (_field ? _field->is_volatile() : false); }
int general_index() const { return (_general_index != 0) ? _general_index : _index; }
@@ -135,7 +140,14 @@ class Compile : public Phase {
void set_field(ciField* f) {
assert(!_field,"");
_field = f;
- if (f->is_final()) _is_rewritable = false;
+ if (f->is_final() || f->is_stable()) {
+ // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops.
+ _is_rewritable = false;
+ }
+ }
+ void set_element(const Type* e) {
+ assert(_element == NULL, "");
+ _element = e;
}
void print_on(outputStream* st) PRODUCT_RETURN;
@@ -262,6 +274,7 @@ class Compile : public Phase {
const bool _save_argument_registers; // save/restore arg regs for trampolines
const bool _subsume_loads; // Load can be matched as part of a larger op.
const bool _do_escape_analysis; // Do escape analysis.
+ const bool _eliminate_boxing; // Do boxing elimination.
ciMethod* _method; // The method being compiled.
int _entry_bci; // entry bci for osr methods.
const TypeFunc* _tf; // My kind of signature
@@ -287,6 +300,7 @@ class Compile : public Phase {
bool _has_split_ifs; // True if the method _may_ have some split-if
bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores.
bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated
+ bool _has_boxed_value; // True if a boxed object is allocated
int _max_vector_size; // Maximum size of generated vectors
uint _trap_hist[trapHistLength]; // Cumulative traps
bool _trap_can_recompile; // Have we emitted a recompiling trap?
@@ -298,6 +312,8 @@ class Compile : public Phase {
bool _do_method_data_update; // True if we generate code to update MethodData*s
int _AliasLevel; // Locally-adjusted version of AliasLevel flag.
bool _print_assembly; // True if we should dump assembly code for this compilation
+ bool _print_inlining; // True if we should print inlining for this compilation
+ bool _print_intrinsics; // True if we should print intrinsics for this compilation
#ifndef PRODUCT
bool _trace_opto_output;
bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
@@ -320,6 +336,7 @@ class Compile : public Phase {
IdealGraphPrinter* _printer;
#endif
+
// Node management
uint _unique; // Counter for unique Node indices
VectorSet _dead_node_list; // Set of dead nodes
@@ -375,6 +392,8 @@ class Compile : public Phase {
// main parsing has finished.
GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
+ GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations
+
int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
uint _number_of_mh_late_inlines; // number of method handle late inlining still pending
@@ -397,7 +416,7 @@ class Compile : public Phase {
};
GrowableArray<PrintInliningBuffer>* _print_inlining_list;
- int _print_inlining;
+ int _print_inlining_idx;
// Only keep nodes in the expensive node list that need to be optimized
void cleanup_expensive_nodes(PhaseIterGVN &igvn);
@@ -409,24 +428,24 @@ class Compile : public Phase {
public:
outputStream* print_inlining_stream() const {
- return _print_inlining_list->at(_print_inlining).ss();
+ return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
}
void print_inlining_skip(CallGenerator* cg) {
- if (PrintInlining) {
- _print_inlining_list->at(_print_inlining).set_cg(cg);
- _print_inlining++;
- _print_inlining_list->insert_before(_print_inlining, PrintInliningBuffer());
+ if (_print_inlining) {
+ _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
+ _print_inlining_idx++;
+ _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
}
}
void print_inlining_insert(CallGenerator* cg) {
- if (PrintInlining) {
+ if (_print_inlining) {
for (int i = 0; i < _print_inlining_list->length(); i++) {
- if (_print_inlining_list->at(i).cg() == cg) {
+ if (_print_inlining_list->adr_at(i)->cg() == cg) {
_print_inlining_list->insert_before(i+1, PrintInliningBuffer());
- _print_inlining = i+1;
- _print_inlining_list->at(i).set_cg(NULL);
+ _print_inlining_idx = i+1;
+ _print_inlining_list->adr_at(i)->set_cg(NULL);
return;
}
}
@@ -486,8 +505,12 @@ class Compile : public Phase {
// instructions that subsume a load may result in an unschedulable
// instruction sequence.
bool subsume_loads() const { return _subsume_loads; }
- // Do escape analysis.
+ /** Do escape analysis. */
bool do_escape_analysis() const { return _do_escape_analysis; }
+ /** Do boxing elimination. */
+ bool eliminate_boxing() const { return _eliminate_boxing; }
+ /** Do aggressive boxing elimination. */
+ bool aggressive_unboxing() const { return _eliminate_boxing && AggressiveUnboxing; }
bool save_argument_registers() const { return _save_argument_registers; }
@@ -527,6 +550,8 @@ class Compile : public Phase {
void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
bool has_stringbuilder() const { return _has_stringbuilder; }
void set_has_stringbuilder(bool z) { _has_stringbuilder = z; }
+ bool has_boxed_value() const { return _has_boxed_value; }
+ void set_has_boxed_value(bool z) { _has_boxed_value = z; }
int max_vector_size() const { return _max_vector_size; }
void set_max_vector_size(int s) { _max_vector_size = s; }
void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; }
@@ -549,6 +574,10 @@ class Compile : public Phase {
int AliasLevel() const { return _AliasLevel; }
bool print_assembly() const { return _print_assembly; }
void set_print_assembly(bool z) { _print_assembly = z; }
+ bool print_inlining() const { return _print_inlining; }
+ void set_print_inlining(bool z) { _print_inlining = z; }
+ bool print_intrinsics() const { return _print_intrinsics; }
+ void set_print_intrinsics(bool z) { _print_intrinsics = z; }
// check the CompilerOracle for special behaviours for this compile
bool method_has_option(const char * option) {
return method() != NULL && method()->has_option(option);
@@ -563,28 +592,54 @@ class Compile : public Phase {
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
+ jlong _latest_stage_start_counter;
+
void begin_method() {
#ifndef PRODUCT
if (_printer) _printer->begin_method(this);
#endif
+ C->_latest_stage_start_counter = os::elapsed_counter();
}
- void print_method(const char * name, int level = 1) {
+
+ void print_method(CompilerPhaseType cpt, int level = 1) {
+ EventCompilerPhase event(UNTIMED);
+ if (event.should_commit()) {
+ event.set_starttime(C->_latest_stage_start_counter);
+ event.set_endtime(os::elapsed_counter());
+ event.set_phase((u1) cpt);
+ event.set_compileID(C->_compile_id);
+ event.set_phaseLevel(level);
+ event.commit();
+ }
+
+
#ifndef PRODUCT
- if (_printer) _printer->print_method(this, name, level);
+ if (_printer) _printer->print_method(this, CompilerPhaseTypeHelper::to_string(cpt), level);
#endif
+ C->_latest_stage_start_counter = os::elapsed_counter();
}
- void end_method() {
+
+ void end_method(int level = 1) {
+ EventCompilerPhase event(UNTIMED);
+ if (event.should_commit()) {
+ event.set_starttime(C->_latest_stage_start_counter);
+ event.set_endtime(os::elapsed_counter());
+ event.set_phase((u1) PHASE_END);
+ event.set_compileID(C->_compile_id);
+ event.set_phaseLevel(level);
+ event.commit();
+ }
#ifndef PRODUCT
if (_printer) _printer->end_method();
#endif
}
- int macro_count() { return _macro_nodes->length(); }
- int predicate_count() { return _predicate_opaqs->length();}
- int expensive_count() { return _expensive_nodes->length(); }
- Node* macro_node(int idx) { return _macro_nodes->at(idx); }
- Node* predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);}
- Node* expensive_node(int idx) { return _expensive_nodes->at(idx); }
+ int macro_count() const { return _macro_nodes->length(); }
+ int predicate_count() const { return _predicate_opaqs->length();}
+ int expensive_count() const { return _expensive_nodes->length(); }
+ Node* macro_node(int idx) const { return _macro_nodes->at(idx); }
+ Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);}
+ Node* expensive_node(int idx) const { return _expensive_nodes->at(idx); }
ConnectionGraph* congraph() { return _congraph;}
void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;}
void add_macro_node(Node * n) {
@@ -766,7 +821,12 @@ class Compile : public Phase {
// Decide how to build a call.
// The profile factor is a discount to apply to this site's interp. profile.
CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false);
- bool should_delay_inlining(ciMethod* call_method, JVMState* jvms);
+ bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
+ return should_delay_string_inlining(call_method, jvms) ||
+ should_delay_boxing_inlining(call_method, jvms);
+ }
+ bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
+ bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
// Helper functions to identify inlining potential at call-site
ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
@@ -822,6 +882,10 @@ class Compile : public Phase {
_string_late_inlines.push(cg);
}
+ void add_boxing_late_inline(CallGenerator* cg) {
+ _boxing_late_inlines.push(cg);
+ }
+
void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
void dump_inlining();
@@ -841,6 +905,7 @@ class Compile : public Phase {
void inline_incrementally_one(PhaseIterGVN& igvn);
void inline_incrementally(PhaseIterGVN& igvn);
void inline_string_calls(bool parse_time);
+ void inline_boxing_calls(PhaseIterGVN& igvn);
// Matching, CFG layout, allocation, code generation
PhaseCFG* cfg() { return _cfg; }
@@ -913,7 +978,8 @@ class Compile : public Phase {
// replacement, entry_bci indicates the bytecode for which to compile a
// continuation.
Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target,
- int entry_bci, bool subsume_loads, bool do_escape_analysis);
+ int entry_bci, bool subsume_loads, bool do_escape_analysis,
+ bool eliminate_boxing);
// Second major entry point. From the TypeFunc signature, generate code
// to pass arguments from the Java calling convention to the C calling
diff --git a/src/share/vm/opto/connode.cpp b/src/share/vm/opto/connode.cpp
index eb343a822..b59025ad4 100644
--- a/src/share/vm/opto/connode.cpp
+++ b/src/share/vm/opto/connode.cpp
@@ -630,7 +630,7 @@ const Type *EncodePKlassNode::Value( PhaseTransform *phase ) const {
if (t == Type::TOP) return Type::TOP;
assert (t != TypePtr::NULL_PTR, "null klass?");
- assert(UseCompressedKlassPointers && t->isa_klassptr(), "only klass ptr here");
+ assert(UseCompressedClassPointers && t->isa_klassptr(), "only klass ptr here");
return t->make_narrowklass();
}
diff --git a/src/share/vm/opto/doCall.cpp b/src/share/vm/opto/doCall.cpp
index 9a7562d01..8784bbe2d 100644
--- a/src/share/vm/opto/doCall.cpp
+++ b/src/share/vm/opto/doCall.cpp
@@ -41,9 +41,9 @@
#include "runtime/sharedRuntime.hpp"
void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
- if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) {
+ if (TraceTypeProfile || C->print_inlining()) {
outputStream* out = tty;
- if (!PrintInlining) {
+ if (!C->print_inlining()) {
if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
method->print_short_name();
tty->cr();
@@ -176,9 +176,12 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// Delay the inlining of this method to give us the
// opportunity to perform some high level optimizations
// first.
- if (should_delay_inlining(callee, jvms)) {
+ if (should_delay_string_inlining(callee, jvms)) {
assert(!delayed_forbidden, "strange");
return CallGenerator::for_string_late_inline(callee, cg);
+ } else if (should_delay_boxing_inlining(callee, jvms)) {
+ assert(!delayed_forbidden, "strange");
+ return CallGenerator::for_boxing_late_inline(callee, cg);
} else if ((should_delay || AlwaysIncrementalInline) && !delayed_forbidden) {
return CallGenerator::for_late_inline(callee, cg);
}
@@ -276,7 +279,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// Return true for methods that shouldn't be inlined early so that
// they are easier to analyze and optimize as intrinsics.
-bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
+bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms) {
if (has_stringbuilder()) {
if ((call_method->holder() == C->env()->StringBuilder_klass() ||
@@ -327,6 +330,13 @@ bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
return false;
}
+bool Compile::should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms) {
+ if (eliminate_boxing() && call_method->is_boxing_method()) {
+ set_has_boxed_value(true);
+ return true;
+ }
+ return false;
+}
// uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
diff --git a/src/share/vm/opto/domgraph.cpp b/src/share/vm/opto/domgraph.cpp
index 9973cb562..5e1886c24 100644
--- a/src/share/vm/opto/domgraph.cpp
+++ b/src/share/vm/opto/domgraph.cpp
@@ -32,9 +32,6 @@
// Portions of code courtesy of Clifford Click
-// Optimization - Graph Style
-
-//------------------------------Tarjan-----------------------------------------
// A data structure that holds all the information needed to find dominators.
struct Tarjan {
Block *_block; // Basic block for this info
@@ -60,23 +57,21 @@ struct Tarjan {
};
-//------------------------------Dominator--------------------------------------
// Compute the dominator tree of the CFG. The CFG must already have been
// constructed. This is the Lengauer & Tarjan O(E-alpha(E,V)) algorithm.
-void PhaseCFG::Dominators( ) {
+void PhaseCFG::build_dominator_tree() {
// Pre-grow the blocks array, prior to the ResourceMark kicking in
- _blocks.map(_num_blocks,0);
+ _blocks.map(number_of_blocks(), 0);
ResourceMark rm;
// Setup mappings from my Graph to Tarjan's stuff and back
// Note: Tarjan uses 1-based arrays
- Tarjan *tarjan = NEW_RESOURCE_ARRAY(Tarjan,_num_blocks+1);
+ Tarjan* tarjan = NEW_RESOURCE_ARRAY(Tarjan, number_of_blocks() + 1);
// Tarjan's algorithm, almost verbatim:
// Step 1:
- _rpo_ctr = _num_blocks;
- uint dfsnum = DFS( tarjan );
- if( dfsnum-1 != _num_blocks ) {// Check for unreachable loops!
+ uint dfsnum = do_DFS(tarjan, number_of_blocks());
+ if (dfsnum - 1 != number_of_blocks()) { // Check for unreachable loops!
// If the returned dfsnum does not match the number of blocks, then we
// must have some unreachable loops. These can be made at any time by
// IterGVN. They are cleaned up by CCP or the loop opts, but the last
@@ -93,20 +88,19 @@ void PhaseCFG::Dominators( ) {
C->record_method_not_compilable("unreachable loop");
return;
}
- _blocks._cnt = _num_blocks;
+ _blocks._cnt = number_of_blocks();
// Tarjan is using 1-based arrays, so these are some initialize flags
tarjan[0]._size = tarjan[0]._semi = 0;
tarjan[0]._label = &tarjan[0];
- uint i;
- for( i=_num_blocks; i>=2; i-- ) { // For all vertices in DFS order
+ for (uint i = number_of_blocks(); i >= 2; i--) { // For all vertices in DFS order
Tarjan *w = &tarjan[i]; // Get vertex from DFS
// Step 2:
Node *whead = w->_block->head();
- for( uint j=1; j < whead->req(); j++ ) {
- Block *b = _bbs[whead->in(j)->_idx];
+ for (uint j = 1; j < whead->req(); j++) {
+ Block* b = get_block_for_node(whead->in(j));
Tarjan *vx = &tarjan[b->_pre_order];
Tarjan *u = vx->EVAL();
if( u->_semi < w->_semi )
@@ -130,19 +124,19 @@ void PhaseCFG::Dominators( ) {
}
// Step 4:
- for( i=2; i <= _num_blocks; i++ ) {
+ for (uint i = 2; i <= number_of_blocks(); i++) {
Tarjan *w = &tarjan[i];
if( w->_dom != &tarjan[w->_semi] )
w->_dom = w->_dom->_dom;
w->_dom_next = w->_dom_child = NULL; // Initialize for building tree later
}
// No immediate dominator for the root
- Tarjan *w = &tarjan[_broot->_pre_order];
+ Tarjan *w = &tarjan[get_root_block()->_pre_order];
w->_dom = NULL;
w->_dom_next = w->_dom_child = NULL; // Initialize for building tree later
// Convert the dominator tree array into my kind of graph
- for( i=1; i<=_num_blocks;i++){// For all Tarjan vertices
+ for(uint i = 1; i <= number_of_blocks(); i++){ // For all Tarjan vertices
Tarjan *t = &tarjan[i]; // Handy access
Tarjan *tdom = t->_dom; // Handy access to immediate dominator
if( tdom ) { // Root has no immediate dominator
@@ -152,11 +146,10 @@ void PhaseCFG::Dominators( ) {
} else
t->_block->_idom = NULL; // Root
}
- w->setdepth( _num_blocks+1 ); // Set depth in dominator tree
+ w->setdepth(number_of_blocks() + 1); // Set depth in dominator tree
}
-//----------------------------Block_Stack--------------------------------------
class Block_Stack {
private:
struct Block_Descr {
@@ -214,26 +207,25 @@ class Block_Stack {
}
};
-//-------------------------most_frequent_successor-----------------------------
// Find the index into the b->succs[] array of the most frequent successor.
uint Block_Stack::most_frequent_successor( Block *b ) {
uint freq_idx = 0;
int eidx = b->end_idx();
- Node *n = b->_nodes[eidx];
+ Node *n = b->get_node(eidx);
int op = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : n->Opcode();
switch( op ) {
case Op_CountedLoopEnd:
case Op_If: { // Split frequency amongst children
float prob = n->as_MachIf()->_prob;
// Is succ[0] the TRUE branch or the FALSE branch?
- if( b->_nodes[eidx+1]->Opcode() == Op_IfFalse )
+ if( b->get_node(eidx+1)->Opcode() == Op_IfFalse )
prob = 1.0f - prob;
freq_idx = prob < PROB_FAIR; // freq=1 for succ[0] < 0.5 prob
break;
}
case Op_Catch: // Split frequency amongst children
for( freq_idx = 0; freq_idx < b->_num_succs; freq_idx++ )
- if( b->_nodes[eidx+1+freq_idx]->as_CatchProj()->_con == CatchProjNode::fall_through_index )
+ if( b->get_node(eidx+1+freq_idx)->as_CatchProj()->_con == CatchProjNode::fall_through_index )
break;
// Handle case of no fall-thru (e.g., check-cast MUST throw an exception)
if( freq_idx == b->_num_succs ) freq_idx = 0;
@@ -258,40 +250,38 @@ uint Block_Stack::most_frequent_successor( Block *b ) {
return freq_idx;
}
-//------------------------------DFS--------------------------------------------
// Perform DFS search. Setup 'vertex' as DFS to vertex mapping. Setup
// 'semi' as vertex to DFS mapping. Set 'parent' to DFS parent.
-uint PhaseCFG::DFS( Tarjan *tarjan ) {
- Block *b = _broot;
+uint PhaseCFG::do_DFS(Tarjan *tarjan, uint rpo_counter) {
+ Block* root_block = get_root_block();
uint pre_order = 1;
- // Allocate stack of size _num_blocks+1 to avoid frequent realloc
- Block_Stack bstack(tarjan, _num_blocks+1);
+ // Allocate stack of size number_of_blocks() + 1 to avoid frequent realloc
+ Block_Stack bstack(tarjan, number_of_blocks() + 1);
// Push on stack the state for the first block
- bstack.push(pre_order, b);
+ bstack.push(pre_order, root_block);
++pre_order;
while (bstack.is_nonempty()) {
if (!bstack.last_successor()) {
// Walk over all successors in pre-order (DFS).
- Block *s = bstack.next_successor();
- if (s->_pre_order == 0) { // Check for no-pre-order, not-visited
+ Block* next_block = bstack.next_successor();
+ if (next_block->_pre_order == 0) { // Check for no-pre-order, not-visited
// Push on stack the state of successor
- bstack.push(pre_order, s);
+ bstack.push(pre_order, next_block);
++pre_order;
}
}
else {
// Build a reverse post-order in the CFG _blocks array
Block *stack_top = bstack.pop();
- stack_top->_rpo = --_rpo_ctr;
+ stack_top->_rpo = --rpo_counter;
_blocks.map(stack_top->_rpo, stack_top);
}
}
return pre_order;
}
-//------------------------------COMPRESS---------------------------------------
void Tarjan::COMPRESS()
{
assert( _ancestor != 0, "" );
@@ -303,14 +293,12 @@ void Tarjan::COMPRESS()
}
}
-//------------------------------EVAL-------------------------------------------
Tarjan *Tarjan::EVAL() {
if( !_ancestor ) return _label;
COMPRESS();
return (_ancestor->_label->_semi >= _label->_semi) ? _label : _ancestor->_label;
}
-//------------------------------LINK-------------------------------------------
void Tarjan::LINK( Tarjan *w, Tarjan *tarjan0 ) {
Tarjan *s = w;
while( w->_label->_semi < s->_child->_label->_semi ) {
@@ -333,7 +321,6 @@ void Tarjan::LINK( Tarjan *w, Tarjan *tarjan0 ) {
}
}
-//------------------------------setdepth---------------------------------------
void Tarjan::setdepth( uint stack_size ) {
Tarjan **top = NEW_RESOURCE_ARRAY(Tarjan*, stack_size);
Tarjan **next = top;
@@ -362,8 +349,7 @@ void Tarjan::setdepth( uint stack_size ) {
} while (last < top);
}
-//*********************** DOMINATORS ON THE SEA OF NODES***********************
-//------------------------------NTarjan----------------------------------------
+// Compute dominators on the Sea of Nodes form
// A data structure that holds all the information needed to find dominators.
struct NTarjan {
Node *_control; // Control node associated with this info
@@ -396,7 +382,6 @@ struct NTarjan {
#endif
};
-//------------------------------Dominator--------------------------------------
// Compute the dominator tree of the sea of nodes. This version walks all CFG
// nodes (using the is_CFG() call) and places them in a dominator tree. Thus,
// it needs a count of the CFG nodes for the mapping table. This is the
@@ -517,7 +502,6 @@ void PhaseIdealLoop::Dominators() {
}
}
-//------------------------------DFS--------------------------------------------
// Perform DFS search. Setup 'vertex' as DFS to vertex mapping. Setup
// 'semi' as vertex to DFS mapping. Set 'parent' to DFS parent.
int NTarjan::DFS( NTarjan *ntarjan, VectorSet &visited, PhaseIdealLoop *pil, uint *dfsorder) {
@@ -560,7 +544,6 @@ int NTarjan::DFS( NTarjan *ntarjan, VectorSet &visited, PhaseIdealLoop *pil, uin
return dfsnum;
}
-//------------------------------COMPRESS---------------------------------------
void NTarjan::COMPRESS()
{
assert( _ancestor != 0, "" );
@@ -572,14 +555,12 @@ void NTarjan::COMPRESS()
}
}
-//------------------------------EVAL-------------------------------------------
NTarjan *NTarjan::EVAL() {
if( !_ancestor ) return _label;
COMPRESS();
return (_ancestor->_label->_semi >= _label->_semi) ? _label : _ancestor->_label;
}
-//------------------------------LINK-------------------------------------------
void NTarjan::LINK( NTarjan *w, NTarjan *ntarjan0 ) {
NTarjan *s = w;
while( w->_label->_semi < s->_child->_label->_semi ) {
@@ -602,7 +583,6 @@ void NTarjan::LINK( NTarjan *w, NTarjan *ntarjan0 ) {
}
}
-//------------------------------setdepth---------------------------------------
void NTarjan::setdepth( uint stack_size, uint *dom_depth ) {
NTarjan **top = NEW_RESOURCE_ARRAY(NTarjan*, stack_size);
NTarjan **next = top;
@@ -631,7 +611,6 @@ void NTarjan::setdepth( uint stack_size, uint *dom_depth ) {
} while (last < top);
}
-//------------------------------dump-------------------------------------------
#ifndef PRODUCT
void NTarjan::dump(int offset) const {
// Dump the data from this node
diff --git a/src/share/vm/opto/escape.cpp b/src/share/vm/opto/escape.cpp
index c5a93dfa6..c95226f11 100644
--- a/src/share/vm/opto/escape.cpp
+++ b/src/share/vm/opto/escape.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -63,15 +63,19 @@ bool ConnectionGraph::has_candidates(Compile *C) {
// EA brings benefits only when the code has allocations and/or locks which
// are represented by ideal Macro nodes.
int cnt = C->macro_count();
- for( int i=0; i < cnt; i++ ) {
+ for (int i = 0; i < cnt; i++) {
Node *n = C->macro_node(i);
- if ( n->is_Allocate() )
+ if (n->is_Allocate())
return true;
- if( n->is_Lock() ) {
+ if (n->is_Lock()) {
Node* obj = n->as_Lock()->obj_node()->uncast();
- if( !(obj->is_Parm() || obj->is_Con()) )
+ if (!(obj->is_Parm() || obj->is_Con()))
return true;
}
+ if (n->is_CallStaticJava() &&
+ n->as_CallStaticJava()->is_boxing_method()) {
+ return true;
+ }
}
return false;
}
@@ -115,7 +119,7 @@ bool ConnectionGraph::compute_escape() {
{ Compile::TracePhase t3("connectionGraph", &Phase::_t_connectionGraph, true);
// 1. Populate Connection Graph (CG) with PointsTo nodes.
- ideal_nodes.map(C->unique(), NULL); // preallocate space
+ ideal_nodes.map(C->live_nodes(), NULL); // preallocate space
// Initialize worklist
if (C->root() != NULL) {
ideal_nodes.push(C->root());
@@ -152,8 +156,11 @@ bool ConnectionGraph::compute_escape() {
// escape status of the associated Allocate node some of them
// may be eliminated.
storestore_worklist.append(n);
+ } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) &&
+ (n->req() > MemBarNode::Precedent)) {
+ record_for_optimizer(n);
#ifdef ASSERT
- } else if(n->is_AddP()) {
+ } else if (n->is_AddP()) {
// Collect address nodes for graph verification.
addp_worklist.append(n);
#endif
@@ -206,8 +213,15 @@ bool ConnectionGraph::compute_escape() {
int non_escaped_length = non_escaped_worklist.length();
for (int next = 0; next < non_escaped_length; next++) {
JavaObjectNode* ptn = non_escaped_worklist.at(next);
- if (ptn->escape_state() == PointsToNode::NoEscape &&
- ptn->scalar_replaceable()) {
+ bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);
+ Node* n = ptn->ideal_node();
+ if (n->is_Allocate()) {
+ n->as_Allocate()->_is_non_escaping = noescape;
+ }
+ if (n->is_CallStaticJava()) {
+ n->as_CallStaticJava()->_is_non_escaping = noescape;
+ }
+ if (noescape && ptn->scalar_replaceable()) {
adjust_scalar_replaceable_state(ptn);
if (ptn->scalar_replaceable()) {
alloc_worklist.append(ptn->ideal_node());
@@ -263,7 +277,7 @@ bool ConnectionGraph::compute_escape() {
// scalar replaceable objects.
split_unique_types(alloc_worklist);
if (C->failing()) return false;
- C->print_method("After Escape Analysis", 2);
+ C->print_method(PHASE_AFTER_EA, 2);
#ifdef ASSERT
} else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
@@ -330,8 +344,10 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de
// Don't mark as processed since call's arguments have to be processed.
delayed_worklist->push(n);
// Check if a call returns an object.
- if (n->as_Call()->returns_pointer() &&
- n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
+ if ((n->as_Call()->returns_pointer() &&
+ n->as_Call()->proj_out(TypeFunc::Parms) != NULL) ||
+ (n->is_CallStaticJava() &&
+ n->as_CallStaticJava()->is_boxing_method())) {
add_call_node(n->as_Call());
}
}
@@ -387,8 +403,8 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de
case Op_ConNKlass: {
// assume all oop constants globally escape except for null
PointsToNode::EscapeState es;
- if (igvn->type(n) == TypePtr::NULL_PTR ||
- igvn->type(n) == TypeNarrowOop::NULL_PTR) {
+ const Type* t = igvn->type(n);
+ if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
es = PointsToNode::NoEscape;
} else {
es = PointsToNode::GlobalEscape;
@@ -468,6 +484,9 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de
Node* adr = n->in(MemNode::Address);
const Type *adr_type = igvn->type(adr);
adr_type = adr_type->make_ptr();
+ if (adr_type == NULL) {
+ break; // skip dead nodes
+ }
if (adr_type->isa_oopptr() ||
(opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) &&
(adr_type == TypeRawPtr::NOTNULL &&
@@ -660,14 +679,18 @@ void ConnectionGraph::add_final_edges(Node *n) {
case Op_GetAndSetP:
case Op_GetAndSetN: {
Node* adr = n->in(MemNode::Address);
- if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN) {
- const Type* t = _igvn->type(n);
- if (t->make_ptr() != NULL) {
- add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
- }
- }
const Type *adr_type = _igvn->type(adr);
adr_type = adr_type->make_ptr();
+#ifdef ASSERT
+ if (adr_type == NULL) {
+ n->dump(1);
+ assert(adr_type != NULL, "dead node should not be on list");
+ break;
+ }
+#endif
+ if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN) {
+ add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
+ }
if (adr_type->isa_oopptr() ||
(opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) &&
(adr_type == TypeRawPtr::NOTNULL &&
@@ -797,6 +820,18 @@ void ConnectionGraph::add_call_node(CallNode* call) {
// Returns a newly allocated unescaped object.
add_java_object(call, PointsToNode::NoEscape);
ptnode_adr(call_idx)->set_scalar_replaceable(false);
+ } else if (meth->is_boxing_method()) {
+ // Returns boxing object
+ PointsToNode::EscapeState es;
+ vmIntrinsics::ID intr = meth->intrinsic_id();
+ if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
+ // It does not escape if object is always allocated.
+ es = PointsToNode::NoEscape;
+ } else {
+ // It escapes globally if object could be loaded from cache.
+ es = PointsToNode::GlobalEscape;
+ }
+ add_java_object(call, es);
} else {
BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
call_analyzer->copy_dependencies(_compile->dependencies());
@@ -898,6 +933,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call) {
(call->as_CallLeaf()->_name != NULL &&
(strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 ||
strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ||
+ strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
@@ -943,6 +979,9 @@ void ConnectionGraph::process_call_arguments(CallNode *call) {
assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
#endif
ciMethod* meth = call->as_CallJava()->method();
+ if ((meth != NULL) && meth->is_boxing_method()) {
+ break; // Boxing methods do not modify any oops.
+ }
BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
// fall-through if not a Java method or no analyzer information
if (call_analyzer != NULL) {
@@ -1791,9 +1830,8 @@ Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
jobj2->ideal_node()->is_Con()) {
// Klass or String constants compare. Need to be careful with
// compressed pointers - compare types of ConN and ConP instead of nodes.
- const Type* t1 = jobj1->ideal_node()->bottom_type()->make_ptr();
- const Type* t2 = jobj2->ideal_node()->bottom_type()->make_ptr();
- assert(t1 != NULL && t2 != NULL, "sanity");
+ const Type* t1 = jobj1->ideal_node()->get_ptr_type();
+ const Type* t2 = jobj2->ideal_node()->get_ptr_type();
if (t1->make_ptr() == t2->make_ptr()) {
return _pcmp_eq;
} else {
@@ -2165,7 +2203,7 @@ Node* ConnectionGraph::get_addp_base(Node *addp) {
int opcode = uncast_base->Opcode();
assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
- (uncast_base->is_Mem() && uncast_base->bottom_type() == TypeRawPtr::NOTNULL) ||
+ (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
(uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
}
return base;
@@ -2744,6 +2782,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
// so it could be eliminated if it has no uses.
alloc->as_Allocate()->_is_scalar_replaceable = true;
}
+ if (alloc->is_CallStaticJava()) {
+ // Set the scalar_replaceable flag for boxing method
+ // so it could be eliminated if it has no uses.
+ alloc->as_CallStaticJava()->_is_scalar_replaceable = true;
+ }
continue;
}
if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
@@ -2782,6 +2825,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
// so it could be eliminated.
alloc->as_Allocate()->_is_scalar_replaceable = true;
}
+ if (alloc->is_CallStaticJava()) {
+ // Set the scalar_replaceable flag for boxing method
+ // so it could be eliminated.
+ alloc->as_CallStaticJava()->_is_scalar_replaceable = true;
+ }
set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state
// in order for an object to be scalar-replaceable, it must be:
// - a direct allocation (not a call returning an object)
@@ -2911,7 +2959,9 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
// Load/store to instance's field
memnode_worklist.append_if_missing(use);
} else if (use->is_MemBar()) {
- memnode_worklist.append_if_missing(use);
+ if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
+ memnode_worklist.append_if_missing(use);
+ }
} else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
Node* addp2 = find_second_addp(use, n);
if (addp2 != NULL) {
@@ -3028,7 +3078,9 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
continue;
memnode_worklist.append_if_missing(use);
} else if (use->is_MemBar()) {
- memnode_worklist.append_if_missing(use);
+ if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
+ memnode_worklist.append_if_missing(use);
+ }
#ifdef ASSERT
} else if(use->is_Mem()) {
assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
@@ -3264,7 +3316,12 @@ void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
if (ptn == NULL || !ptn->is_JavaObject())
continue;
PointsToNode::EscapeState es = ptn->escape_state();
- if (ptn->ideal_node()->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
+ if ((es != PointsToNode::NoEscape) && !Verbose) {
+ continue;
+ }
+ Node* n = ptn->ideal_node();
+ if (n->is_Allocate() || (n->is_CallStaticJava() &&
+ n->as_CallStaticJava()->is_boxing_method())) {
if (first) {
tty->cr();
tty->print("======== Connection graph for ");
diff --git a/src/share/vm/opto/gcm.cpp b/src/share/vm/opto/gcm.cpp
index 53da37d6f..0c54792a7 100644
--- a/src/share/vm/opto/gcm.cpp
+++ b/src/share/vm/opto/gcm.cpp
@@ -69,7 +69,7 @@
// are in b also.
void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
// Set basic block of n, Add n to b,
- _bbs.map(n->_idx, b);
+ map_node_to_block(n, b);
b->add_inst(n);
// After Matching, nearly any old Node may have projections trailing it.
@@ -78,11 +78,12 @@ void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node* use = n->fast_out(i);
if (use->is_Proj()) {
- Block* buse = _bbs[use->_idx];
+ Block* buse = get_block_for_node(use);
if (buse != b) { // In wrong block?
- if (buse != NULL)
+ if (buse != NULL) {
buse->find_remove(use); // Remove from wrong block
- _bbs.map(use->_idx, b); // Re-insert in this block
+ }
+ map_node_to_block(use, b);
b->add_inst(use);
}
}
@@ -100,16 +101,16 @@ void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
if (p != NULL && p != n) { // Control from a block projection?
assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
// Find trailing Region
- Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block
+ Block *pb = get_block_for_node(in0); // Block-projection already has basic block
uint j = 0;
if (pb->_num_succs != 1) { // More then 1 successor?
// Search for successor
- uint max = pb->_nodes.size();
+ uint max = pb->number_of_nodes();
assert( max > 1, "" );
uint start = max - pb->_num_succs;
// Find which output path belongs to projection
for (j = start; j < max; j++) {
- if( pb->_nodes[j] == in0 )
+ if( pb->get_node(j) == in0 )
break;
}
assert( j < max, "must find" );
@@ -123,26 +124,30 @@ void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
//------------------------------schedule_pinned_nodes--------------------------
// Set the basic block for Nodes pinned into blocks
-void PhaseCFG::schedule_pinned_nodes( VectorSet &visited ) {
+void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
// Allocate node stack of size C->unique()+8 to avoid frequent realloc
- GrowableArray <Node *> spstack(C->unique()+8);
+ GrowableArray <Node *> spstack(C->unique() + 8);
spstack.push(_root);
- while ( spstack.is_nonempty() ) {
- Node *n = spstack.pop();
- if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited
- if( n->pinned() && !_bbs.lookup(n->_idx) ) { // Pinned? Nail it down!
- assert( n->in(0), "pinned Node must have Control" );
+ while (spstack.is_nonempty()) {
+ Node* node = spstack.pop();
+ if (!visited.test_set(node->_idx)) { // Test node and flag it as visited
+ if (node->pinned() && !has_block(node)) { // Pinned? Nail it down!
+ assert(node->in(0), "pinned Node must have Control");
// Before setting block replace block_proj control edge
- replace_block_proj_ctrl(n);
- Node *input = n->in(0);
- while( !input->is_block_start() )
+ replace_block_proj_ctrl(node);
+ Node* input = node->in(0);
+ while (!input->is_block_start()) {
input = input->in(0);
- Block *b = _bbs[input->_idx]; // Basic block of controlling input
- schedule_node_into_block(n, b);
+ }
+ Block* block = get_block_for_node(input); // Basic block of controlling input
+ schedule_node_into_block(node, block);
}
- for( int i = n->req() - 1; i >= 0; --i ) { // For all inputs
- if( n->in(i) != NULL )
- spstack.push(n->in(i));
+
+ // process all inputs that are non NULL
+ for (int i = node->req() - 1; i >= 0; --i) {
+ if (node->in(i) != NULL) {
+ spstack.push(node->in(i));
+ }
}
}
}
@@ -152,7 +157,7 @@ void PhaseCFG::schedule_pinned_nodes( VectorSet &visited ) {
// Assert that new input b2 is dominated by all previous inputs.
// Check this by by seeing that it is dominated by b1, the deepest
// input observed until b2.
-static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) {
+static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
if (b1 == NULL) return;
assert(b1->_dom_depth < b2->_dom_depth, "sanity");
Block* tmp = b2;
@@ -165,7 +170,7 @@ static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) {
for (uint j=0; j<n->len(); j++) { // For all inputs
Node* inn = n->in(j); // Get input
if (inn == NULL) continue; // Ignore NULL, missing inputs
- Block* inb = bbs[inn->_idx];
+ Block* inb = cfg->get_block_for_node(inn);
tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
inn->dump();
@@ -177,20 +182,20 @@ static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) {
}
#endif
-static Block* find_deepest_input(Node* n, Block_Array &bbs) {
+static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
// Find the last input dominated by all other inputs.
Block* deepb = NULL; // Deepest block so far
int deepb_dom_depth = 0;
for (uint k = 0; k < n->len(); k++) { // For all inputs
Node* inn = n->in(k); // Get input
if (inn == NULL) continue; // Ignore NULL, missing inputs
- Block* inb = bbs[inn->_idx];
+ Block* inb = cfg->get_block_for_node(inn);
assert(inb != NULL, "must already have scheduled this input");
if (deepb_dom_depth < (int) inb->_dom_depth) {
// The new inb must be dominated by the previous deepb.
// The various inputs must be linearly ordered in the dom
// tree, or else there will not be a unique deepest block.
- DEBUG_ONLY(assert_dom(deepb, inb, n, bbs));
+ DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
deepb = inb; // Save deepest block
deepb_dom_depth = deepb->_dom_depth;
}
@@ -206,32 +211,29 @@ static Block* find_deepest_input(Node* n, Block_Array &bbs) {
// which all their inputs occur.
bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
// Allocate stack with enough space to avoid frequent realloc
- Node_Stack nstack(roots.Size() + 8); // (unique >> 1) + 24 from Java2D stats
- // roots.push(_root); _root will be processed among C->top() inputs
+ Node_Stack nstack(roots.Size() + 8);
+ // _root will be processed among C->top() inputs
roots.push(C->top());
visited.set(C->top()->_idx);
while (roots.size() != 0) {
// Use local variables nstack_top_n & nstack_top_i to cache values
// on stack's top.
- Node *nstack_top_n = roots.pop();
- uint nstack_top_i = 0;
-//while_nstack_nonempty:
- while (true) {
- // Get parent node and next input's index from stack's top.
- Node *n = nstack_top_n;
- uint i = nstack_top_i;
+ Node* parent_node = roots.pop();
+ uint input_index = 0;
- if (i == 0) {
+ while (true) {
+ if (input_index == 0) {
// Fixup some control. Constants without control get attached
// to root and nodes that use is_block_proj() nodes should be attached
// to the region that starts their block.
- const Node *in0 = n->in(0);
- if (in0 != NULL) { // Control-dependent?
- replace_block_proj_ctrl(n);
- } else { // n->in(0) == NULL
- if (n->req() == 1) { // This guy is a constant with NO inputs?
- n->set_req(0, _root);
+ const Node* control_input = parent_node->in(0);
+ if (control_input != NULL) {
+ replace_block_proj_ctrl(parent_node);
+ } else {
+ // Is a constant with NO inputs?
+ if (parent_node->req() == 1) {
+ parent_node->set_req(0, _root);
}
}
}
@@ -240,37 +242,47 @@ bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
// input is already in a block we quit following inputs (to avoid
// cycles). Instead we put that Node on a worklist to be handled
// later (since IT'S inputs may not have a block yet).
- bool done = true; // Assume all n's inputs will be processed
- while (i < n->len()) { // For all inputs
- Node *in = n->in(i); // Get input
- ++i;
- if (in == NULL) continue; // Ignore NULL, missing inputs
+
+ // Assume all n's inputs will be processed
+ bool done = true;
+
+ while (input_index < parent_node->len()) {
+ Node* in = parent_node->in(input_index++);
+ if (in == NULL) {
+ continue;
+ }
+
int is_visited = visited.test_set(in->_idx);
- if (!_bbs.lookup(in->_idx)) { // Missing block selection?
+ if (!has_block(in)) {
if (is_visited) {
- // assert( !visited.test(in->_idx), "did not schedule early" );
return false;
}
- nstack.push(n, i); // Save parent node and next input's index.
- nstack_top_n = in; // Process current input now.
- nstack_top_i = 0;
- done = false; // Not all n's inputs processed.
- break; // continue while_nstack_nonempty;
- } else if (!is_visited) { // Input not yet visited?
- roots.push(in); // Visit this guy later, using worklist
+ // Save parent node and next input's index.
+ nstack.push(parent_node, input_index);
+ // Process current input now.
+ parent_node = in;
+ input_index = 0;
+ // Not all n's inputs processed.
+ done = false;
+ break;
+ } else if (!is_visited) {
+ // Visit this guy later, using worklist
+ roots.push(in);
}
}
+
if (done) {
// All of n's inputs have been processed, complete post-processing.
// Some instructions are pinned into a block. These include Region,
// Phi, Start, Return, and other control-dependent instructions and
// any projections which depend on them.
- if (!n->pinned()) {
+ if (!parent_node->pinned()) {
// Set earliest legal block.
- _bbs.map(n->_idx, find_deepest_input(n, _bbs));
+ Block* earliest_block = find_deepest_input(parent_node, this);
+ map_node_to_block(parent_node, earliest_block);
} else {
- assert(_bbs[n->_idx] == _bbs[n->in(0)->_idx], "Pinned Node should be at the same block as its control edge");
+ assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
}
if (nstack.is_empty()) {
@@ -279,12 +291,12 @@ bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
break;
}
// Get saved parent node and next input's index.
- nstack_top_n = nstack.node();
- nstack_top_i = nstack.index();
+ parent_node = nstack.node();
+ input_index = nstack.index();
nstack.pop();
- } // if (done)
- } // while (true)
- } // while (roots.size() != 0)
+ }
+ }
+ }
return true;
}
@@ -316,8 +328,8 @@ Block* Block::dom_lca(Block* LCA) {
// The definition must dominate the use, so move the LCA upward in the
// dominator tree to dominate the use. If the use is a phi, adjust
// the LCA only with the phi input paths which actually use this def.
-static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array &bbs) {
- Block* buse = bbs[use->_idx];
+static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
+ Block* buse = cfg->get_block_for_node(use);
if (buse == NULL) return LCA; // Unused killing Projs have no use block
if (!use->is_Phi()) return buse->dom_lca(LCA);
uint pmax = use->req(); // Number of Phi inputs
@@ -332,7 +344,7 @@ static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array
// more than once.
for (uint j=1; j<pmax; j++) { // For all inputs
if (use->in(j) == def) { // Found matching input?
- Block* pred = bbs[buse->pred(j)->_idx];
+ Block* pred = cfg->get_block_for_node(buse->pred(j));
LCA = pred->dom_lca(LCA);
}
}
@@ -345,8 +357,7 @@ static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array
// which are marked with the given index. Return the LCA (in the dom tree)
// of all marked blocks. If there are none marked, return the original
// LCA.
-static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark,
- Block* early, Block_Array &bbs) {
+static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
Block_List worklist;
worklist.push(LCA);
while (worklist.size() > 0) {
@@ -369,7 +380,7 @@ static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark,
} else {
// Keep searching through this block's predecessors.
for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
- Block* mid_parent = bbs[ mid->pred(j)->_idx ];
+ Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
worklist.push(mid_parent);
}
}
@@ -387,7 +398,7 @@ static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark,
// be earlier (at a shallower dom_depth) than the true schedule_early
// point of the node. We compute this earlier block as a more permissive
// site for anti-dependency insertion, but only if subsume_loads is enabled.
-static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) {
+static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
Node* base;
Node* index;
Node* store = load->in(MemNode::Memory);
@@ -415,12 +426,12 @@ static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) {
Block* deepb = NULL; // Deepest block so far
int deepb_dom_depth = 0;
for (int i = 0; i < mem_inputs_length; i++) {
- Block* inb = bbs[mem_inputs[i]->_idx];
+ Block* inb = cfg->get_block_for_node(mem_inputs[i]);
if (deepb_dom_depth < (int) inb->_dom_depth) {
// The new inb must be dominated by the previous deepb.
// The various inputs must be linearly ordered in the dom
// tree, or else there will not be a unique deepest block.
- DEBUG_ONLY(assert_dom(deepb, inb, load, bbs));
+ DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
deepb = inb; // Save deepest block
deepb_dom_depth = deepb->_dom_depth;
}
@@ -491,14 +502,14 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
// and other inputs are first available. (Computed by schedule_early.)
// For normal loads, 'early' is the shallowest place (dom graph wise)
// to look for anti-deps between this load and any store.
- Block* early = _bbs[load_index];
+ Block* early = get_block_for_node(load);
// If we are subsuming loads, compute an "early" block that only considers
// memory or address inputs. This block may be different than the
// schedule_early block in that it could be at an even shallower depth in the
// dominator tree, and allow for a broader discovery of anti-dependences.
if (C->subsume_loads()) {
- early = memory_early_block(load, early, _bbs);
+ early = memory_early_block(load, early, this);
}
ResourceArea *area = Thread::current()->resource_area();
@@ -622,7 +633,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
// or else observe that 'store' is all the way up in the
// earliest legal block for 'load'. In the latter case,
// immediately insert an anti-dependence edge.
- Block* store_block = _bbs[store->_idx];
+ Block* store_block = get_block_for_node(store);
assert(store_block != NULL, "unused killing projections skipped above");
if (store->is_Phi()) {
@@ -640,7 +651,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
if (store->in(j) == mem) { // Found matching input?
DEBUG_ONLY(found_match = true);
- Block* pred_block = _bbs[store_block->pred(j)->_idx];
+ Block* pred_block = get_block_for_node(store_block->pred(j));
if (pred_block != early) {
// If any predecessor of the Phi matches the load's "early block",
// we do not need a precedence edge between the Phi and 'load'
@@ -714,7 +725,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
// preventing the load from sinking past any block containing
// a store that may invalidate the memory state required by 'load'.
if (must_raise_LCA)
- LCA = raise_LCA_above_marks(LCA, load->_idx, early, _bbs);
+ LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
if (LCA == early) return LCA;
// Insert anti-dependence edges from 'load' to each store
@@ -723,7 +734,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
if (LCA->raise_LCA_mark() == load_index) {
while (non_early_stores.size() > 0) {
Node* store = non_early_stores.pop();
- Block* store_block = _bbs[store->_idx];
+ Block* store_block = get_block_for_node(store);
if (store_block == LCA) {
// add anti_dependence from store to load in its own block
assert(store != load->in(0), "dependence cycle found");
@@ -757,7 +768,7 @@ private:
public:
// Constructor for the iterator
- Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs);
+ Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg);
// Postincrement operator to iterate over the nodes
Node *next();
@@ -765,12 +776,12 @@ public:
private:
VectorSet &_visited;
Node_List &_stack;
- Block_Array &_bbs;
+ PhaseCFG &_cfg;
};
// Constructor for the Node_Backward_Iterator
-Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs )
- : _visited(visited), _stack(stack), _bbs(bbs) {
+Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg)
+ : _visited(visited), _stack(stack), _cfg(cfg) {
// The stack should contain exactly the root
stack.clear();
stack.push(root);
@@ -800,8 +811,8 @@ Node *Node_Backward_Iterator::next() {
_visited.set(self->_idx);
// Now schedule all uses as late as possible.
- uint src = self->is_Proj() ? self->in(0)->_idx : self->_idx;
- uint src_rpo = _bbs[src]->_rpo;
+ const Node* src = self->is_Proj() ? self->in(0) : self;
+ uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
// Schedule all nodes in a post-order visit
Node *unvisited = NULL; // Unvisited anti-dependent Node, if any
@@ -817,7 +828,7 @@ Node *Node_Backward_Iterator::next() {
// do not traverse backward control edges
Node *use = n->is_Proj() ? n->in(0) : n;
- uint use_rpo = _bbs[use->_idx]->_rpo;
+ uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
if ( use_rpo < src_rpo )
continue;
@@ -849,13 +860,13 @@ Node *Node_Backward_Iterator::next() {
//------------------------------ComputeLatenciesBackwards----------------------
// Compute the latency of all the instructions.
-void PhaseCFG::ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack) {
+void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_List &stack) {
#ifndef PRODUCT
if (trace_opto_pipelining())
tty->print("\n#---- ComputeLatenciesBackwards ----\n");
#endif
- Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
+ Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
Node *n;
// Walk over all the nodes from last to first
@@ -872,31 +883,34 @@ void PhaseCFG::partial_latency_of_defs(Node *n) {
// Set the latency for this instruction
#ifndef PRODUCT
if (trace_opto_pipelining()) {
- tty->print("# latency_to_inputs: node_latency[%d] = %d for node",
- n->_idx, _node_latency->at_grow(n->_idx));
+ tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
dump();
}
#endif
- if (n->is_Proj())
+ if (n->is_Proj()) {
n = n->in(0);
+ }
- if (n->is_Root())
+ if (n->is_Root()) {
return;
+ }
uint nlen = n->len();
- uint use_latency = _node_latency->at_grow(n->_idx);
- uint use_pre_order = _bbs[n->_idx]->_pre_order;
+ uint use_latency = get_latency_for_node(n);
+ uint use_pre_order = get_block_for_node(n)->_pre_order;
- for ( uint j=0; j<nlen; j++ ) {
+ for (uint j = 0; j < nlen; j++) {
Node *def = n->in(j);
- if (!def || def == n)
+ if (!def || def == n) {
continue;
+ }
// Walk backwards thru projections
- if (def->is_Proj())
+ if (def->is_Proj()) {
def = def->in(0);
+ }
#ifndef PRODUCT
if (trace_opto_pipelining()) {
@@ -906,25 +920,23 @@ void PhaseCFG::partial_latency_of_defs(Node *n) {
#endif
// If the defining block is not known, assume it is ok
- Block *def_block = _bbs[def->_idx];
+ Block *def_block = get_block_for_node(def);
uint def_pre_order = def_block ? def_block->_pre_order : 0;
- if ( (use_pre_order < def_pre_order) ||
- (use_pre_order == def_pre_order && n->is_Phi()) )
+ if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {
continue;
+ }
uint delta_latency = n->latency(j);
uint current_latency = delta_latency + use_latency;
- if (_node_latency->at_grow(def->_idx) < current_latency) {
- _node_latency->at_put_grow(def->_idx, current_latency);
+ if (get_latency_for_node(def) < current_latency) {
+ set_latency_for_node(def, current_latency);
}
#ifndef PRODUCT
if (trace_opto_pipelining()) {
- tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d",
- use_latency, j, delta_latency, current_latency, def->_idx,
- _node_latency->at_grow(def->_idx));
+ tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));
}
#endif
}
@@ -934,10 +946,11 @@ void PhaseCFG::partial_latency_of_defs(Node *n) {
// Compute the latency of a specific use
int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
// If self-reference, return no latency
- if (use == n || use->is_Root())
+ if (use == n || use->is_Root()) {
return 0;
+ }
- uint def_pre_order = _bbs[def->_idx]->_pre_order;
+ uint def_pre_order = get_block_for_node(def)->_pre_order;
uint latency = 0;
// If the use is not a projection, then it is simple...
@@ -949,7 +962,7 @@ int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
}
#endif
- uint use_pre_order = _bbs[use->_idx]->_pre_order;
+ uint use_pre_order = get_block_for_node(use)->_pre_order;
if (use_pre_order < def_pre_order)
return 0;
@@ -958,7 +971,7 @@ int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
return 0;
uint nlen = use->len();
- uint nl = _node_latency->at_grow(use->_idx);
+ uint nl = get_latency_for_node(use);
for ( uint j=0; j<nlen; j++ ) {
if (use->in(j) == n) {
@@ -993,8 +1006,7 @@ void PhaseCFG::latency_from_uses(Node *n) {
// Set the latency for this instruction
#ifndef PRODUCT
if (trace_opto_pipelining()) {
- tty->print("# latency_from_outputs: node_latency[%d] = %d for node",
- n->_idx, _node_latency->at_grow(n->_idx));
+ tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
dump();
}
#endif
@@ -1007,7 +1019,7 @@ void PhaseCFG::latency_from_uses(Node *n) {
if (latency < l) latency = l;
}
- _node_latency->at_put_grow(n->_idx, latency);
+ set_latency_for_node(n, latency);
}
//------------------------------hoist_to_cheaper_block-------------------------
@@ -1017,11 +1029,11 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
const double delta = 1+PROB_UNLIKELY_MAG(4);
Block* least = LCA;
double least_freq = least->_freq;
- uint target = _node_latency->at_grow(self->_idx);
- uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx);
- uint end_latency = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
+ uint target = get_latency_for_node(self);
+ uint start_latency = get_latency_for_node(LCA->head());
+ uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx()));
bool in_latency = (target <= start_latency);
- const Block* root_block = _bbs[_root->_idx];
+ const Block* root_block = get_block_for_node(_root);
// Turn off latency scheduling if scheduling is just plain off
if (!C->do_scheduling())
@@ -1036,14 +1048,13 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
#ifndef PRODUCT
if (trace_opto_pipelining()) {
- tty->print("# Find cheaper block for latency %d: ",
- _node_latency->at_grow(self->_idx));
+ tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));
self->dump();
tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
LCA->_pre_order,
- LCA->_nodes[0]->_idx,
+ LCA->head()->_idx,
start_latency,
- LCA->_nodes[LCA->end_idx()]->_idx,
+ LCA->get_node(LCA->end_idx())->_idx,
end_latency,
least_freq);
}
@@ -1066,14 +1077,14 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
if (mach && LCA == root_block)
break;
- uint start_lat = _node_latency->at_grow(LCA->_nodes[0]->_idx);
+ uint start_lat = get_latency_for_node(LCA->head());
uint end_idx = LCA->end_idx();
- uint end_lat = _node_latency->at_grow(LCA->_nodes[end_idx]->_idx);
+ uint end_lat = get_latency_for_node(LCA->get_node(end_idx));
double LCA_freq = LCA->_freq;
#ifndef PRODUCT
if (trace_opto_pipelining()) {
tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
- LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq);
+ LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
}
#endif
cand_cnt++;
@@ -1110,7 +1121,7 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
}
#endif
- _node_latency->at_put_grow(self->_idx, end_latency);
+ set_latency_for_node(self, end_latency);
partial_latency_of_defs(self);
}
@@ -1129,12 +1140,12 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
tty->print("\n#---- schedule_late ----\n");
#endif
- Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
+ Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
Node *self;
// Walk over all the nodes from last to first
while (self = iter.next()) {
- Block* early = _bbs[self->_idx]; // Earliest legal placement
+ Block* early = get_block_for_node(self); // Earliest legal placement
if (self->is_top()) {
// Top node goes in bb #2 with other constants.
@@ -1182,7 +1193,7 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
// For all uses, find LCA
Node* use = self->fast_out(i);
- LCA = raise_LCA_above_use(LCA, use, self, _bbs);
+ LCA = raise_LCA_above_use(LCA, use, self, this);
}
} // (Hide defs of imax, i from rest of block.)
@@ -1190,7 +1201,7 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
// requirement for correctness but it reduces useless
// interference between temps and other nodes.
if (mach != NULL && mach->is_MachTemp()) {
- _bbs.map(self->_idx, LCA);
+ map_node_to_block(self, LCA);
LCA->add_inst(self);
continue;
}
@@ -1256,7 +1267,7 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
} // end ScheduleLate
//------------------------------GlobalCodeMotion-------------------------------
-void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_list ) {
+void PhaseCFG::global_code_motion() {
ResourceMark rm;
#ifndef PRODUCT
@@ -1265,22 +1276,23 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_
}
#endif
- // Initialize the bbs.map for things on the proj_list
- uint i;
- for( i=0; i < proj_list.size(); i++ )
- _bbs.map(proj_list[i]->_idx, NULL);
+ // Initialize the node to block mapping for things on the proj_list
+ for (uint i = 0; i < _matcher.number_of_projections(); i++) {
+ unmap_node_from_block(_matcher.get_projection(i));
+ }
// Set the basic block for Nodes pinned into blocks
- Arena *a = Thread::current()->resource_area();
- VectorSet visited(a);
- schedule_pinned_nodes( visited );
+ Arena* arena = Thread::current()->resource_area();
+ VectorSet visited(arena);
+ schedule_pinned_nodes(visited);
// Find the earliest Block any instruction can be placed in. Some
// instructions are pinned into Blocks. Unpinned instructions can
// appear in last block in which all their inputs occur.
visited.Clear();
- Node_List stack(a);
- stack.map( (unique >> 1) + 16, NULL); // Pre-grow the list
+ Node_List stack(arena);
+ // Pre-grow the list
+ stack.map((C->unique() >> 1) + 16, NULL);
if (!schedule_early(visited, stack)) {
// Bailout without retry
C->record_method_not_compilable("early schedule failed");
@@ -1288,29 +1300,25 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_
}
// Build Def-Use edges.
- proj_list.push(_root); // Add real root as another root
- proj_list.pop();
-
// Compute the latency information (via backwards walk) for all the
// instructions in the graph
_node_latency = new GrowableArray<uint>(); // resource_area allocation
- if( C->do_scheduling() )
- ComputeLatenciesBackwards(visited, stack);
+ if (C->do_scheduling()) {
+ compute_latencies_backwards(visited, stack);
+ }
// Now schedule all codes as LATE as possible. This is the LCA in the
// dominator tree of all USES of a value. Pick the block with the least
// loop nesting depth that is lowest in the dominator tree.
// ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )
schedule_late(visited, stack);
- if( C->failing() ) {
+ if (C->failing()) {
// schedule_late fails only when graph is incorrect.
assert(!VerifyGraphEdges, "verification should have failed");
return;
}
- unique = C->unique();
-
#ifndef PRODUCT
if (trace_opto_pipelining()) {
tty->print("\n---- Detect implicit null checks ----\n");
@@ -1333,10 +1341,11 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_
// By reversing the loop direction we get a very minor gain on mpegaudio.
// Feel free to revert to a forward loop for clarity.
// for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
- for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) {
- Node *proj = matcher._null_check_tests[i ];
- Node *val = matcher._null_check_tests[i+1];
- _bbs[proj->_idx]->implicit_null_check(this, proj, val, allowed_reasons);
+ for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
+ Node* proj = _matcher._null_check_tests[i];
+ Node* val = _matcher._null_check_tests[i + 1];
+ Block* block = get_block_for_node(proj);
+ implicit_null_check(block, proj, val, allowed_reasons);
// The implicit_null_check will only perform the transformation
// if the null branch is truly uncommon, *and* it leads to an
// uncommon trap. Combined with the too_many_traps guards
@@ -1353,11 +1362,11 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_
// Schedule locally. Right now a simple topological sort.
// Later, do a real latency aware scheduler.
- uint max_idx = C->unique();
- GrowableArray<int> ready_cnt(max_idx, max_idx, -1);
+ GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
visited.Clear();
- for (i = 0; i < _num_blocks; i++) {
- if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
+ for (uint i = 0; i < number_of_blocks(); i++) {
+ Block* block = get_block(i);
+ if (!schedule_local(block, ready_cnt, visited)) {
if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
C->record_method_not_compilable("local schedule failed");
}
@@ -1367,14 +1376,17 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_
// If we inserted any instructions between a Call and his CatchNode,
// clone the instructions on all paths below the Catch.
- for( i=0; i < _num_blocks; i++ )
- _blocks[i]->call_catch_cleanup(_bbs, C);
+ for (uint i = 0; i < number_of_blocks(); i++) {
+ Block* block = get_block(i);
+ call_catch_cleanup(block);
+ }
#ifndef PRODUCT
if (trace_opto_pipelining()) {
tty->print("\n---- After GlobalCodeMotion ----\n");
- for (uint i = 0; i < _num_blocks; i++) {
- _blocks[i]->dump();
+ for (uint i = 0; i < number_of_blocks(); i++) {
+ Block* block = get_block(i);
+ block->dump();
}
}
#endif
@@ -1382,10 +1394,29 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_
_node_latency = (GrowableArray<uint> *)0xdeadbeef;
}
+bool PhaseCFG::do_global_code_motion() {
+
+ build_dominator_tree();
+ if (C->failing()) {
+ return false;
+ }
+
+ NOT_PRODUCT( C->verify_graph_edges(); )
+
+ estimate_block_frequency();
+
+ global_code_motion();
+
+ if (C->failing()) {
+ return false;
+ }
+
+ return true;
+}
//------------------------------Estimate_Block_Frequency-----------------------
// Estimate block frequencies based on IfNode probabilities.
-void PhaseCFG::Estimate_Block_Frequency() {
+void PhaseCFG::estimate_block_frequency() {
// Force conditional branches leading to uncommon traps to be unlikely,
// not because we get to the uncommon_trap with less relative frequency,
@@ -1393,18 +1424,20 @@ void PhaseCFG::Estimate_Block_Frequency() {
// there once.
if (C->do_freq_based_layout()) {
Block_List worklist;
- Block* root_blk = _blocks[0];
+ Block* root_blk = get_block(0);
for (uint i = 1; i < root_blk->num_preds(); i++) {
- Block *pb = _bbs[root_blk->pred(i)->_idx];
+ Block *pb = get_block_for_node(root_blk->pred(i));
if (pb->has_uncommon_code()) {
worklist.push(pb);
}
}
while (worklist.size() > 0) {
Block* uct = worklist.pop();
- if (uct == _broot) continue;
+ if (uct == get_root_block()) {
+ continue;
+ }
for (uint i = 1; i < uct->num_preds(); i++) {
- Block *pb = _bbs[uct->pred(i)->_idx];
+ Block *pb = get_block_for_node(uct->pred(i));
if (pb->_num_succs == 1) {
worklist.push(pb);
} else if (pb->num_fall_throughs() == 2) {
@@ -1426,14 +1459,14 @@ void PhaseCFG::Estimate_Block_Frequency() {
_root_loop->scale_freq();
// Save outmost loop frequency for LRG frequency threshold
- _outer_loop_freq = _root_loop->outer_loop_freq();
+ _outer_loop_frequency = _root_loop->outer_loop_freq();
// force paths ending at uncommon traps to be infrequent
if (!C->do_freq_based_layout()) {
Block_List worklist;
- Block* root_blk = _blocks[0];
+ Block* root_blk = get_block(0);
for (uint i = 1; i < root_blk->num_preds(); i++) {
- Block *pb = _bbs[root_blk->pred(i)->_idx];
+ Block *pb = get_block_for_node(root_blk->pred(i));
if (pb->has_uncommon_code()) {
worklist.push(pb);
}
@@ -1442,7 +1475,7 @@ void PhaseCFG::Estimate_Block_Frequency() {
Block* uct = worklist.pop();
uct->_freq = PROB_MIN;
for (uint i = 1; i < uct->num_preds(); i++) {
- Block *pb = _bbs[uct->pred(i)->_idx];
+ Block *pb = get_block_for_node(uct->pred(i));
if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
worklist.push(pb);
}
@@ -1451,8 +1484,8 @@ void PhaseCFG::Estimate_Block_Frequency() {
}
#ifdef ASSERT
- for (uint i = 0; i < _num_blocks; i++ ) {
- Block *b = _blocks[i];
+ for (uint i = 0; i < number_of_blocks(); i++) {
+ Block* b = get_block(i);
assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
}
#endif
@@ -1476,16 +1509,16 @@ void PhaseCFG::Estimate_Block_Frequency() {
CFGLoop* PhaseCFG::create_loop_tree() {
#ifdef ASSERT
- assert( _blocks[0] == _broot, "" );
- for (uint i = 0; i < _num_blocks; i++ ) {
- Block *b = _blocks[i];
+ assert(get_block(0) == get_root_block(), "first block should be root block");
+ for (uint i = 0; i < number_of_blocks(); i++) {
+ Block* block = get_block(i);
// Check that _loop field are clear...we could clear them if not.
- assert(b->_loop == NULL, "clear _loop expected");
+ assert(block->_loop == NULL, "clear _loop expected");
// Sanity check that the RPO numbering is reflected in the _blocks array.
// It doesn't have to be for the loop tree to be built, but if it is not,
// then the blocks have been reordered since dom graph building...which
// may question the RPO numbering
- assert(b->_rpo == i, "unexpected reverse post order number");
+ assert(block->_rpo == i, "unexpected reverse post order number");
}
#endif
@@ -1495,14 +1528,14 @@ CFGLoop* PhaseCFG::create_loop_tree() {
Block_List worklist;
// Assign blocks to loops
- for(uint i = _num_blocks - 1; i > 0; i-- ) { // skip Root block
- Block *b = _blocks[i];
+ for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block
+ Block* block = get_block(i);
- if (b->head()->is_Loop()) {
- Block* loop_head = b;
+ if (block->head()->is_Loop()) {
+ Block* loop_head = block;
assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
- Block* tail = _bbs[tail_n->_idx];
+ Block* tail = get_block_for_node(tail_n);
// Defensively filter out Loop nodes for non-single-entry loops.
// For all reasonable loops, the head occurs before the tail in RPO.
@@ -1517,13 +1550,13 @@ CFGLoop* PhaseCFG::create_loop_tree() {
loop_head->_loop = nloop;
// Add to nloop so push_pred() will skip over inner loops
nloop->add_member(loop_head);
- nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, _bbs);
+ nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
while (worklist.size() > 0) {
Block* member = worklist.pop();
if (member != loop_head) {
for (uint j = 1; j < member->num_preds(); j++) {
- nloop->push_pred(member, j, worklist, _bbs);
+ nloop->push_pred(member, j, worklist, this);
}
}
}
@@ -1533,23 +1566,23 @@ CFGLoop* PhaseCFG::create_loop_tree() {
// Create a member list for each loop consisting
// of both blocks and (immediate child) loops.
- for (uint i = 0; i < _num_blocks; i++) {
- Block *b = _blocks[i];
- CFGLoop* lp = b->_loop;
+ for (uint i = 0; i < number_of_blocks(); i++) {
+ Block* block = get_block(i);
+ CFGLoop* lp = block->_loop;
if (lp == NULL) {
// Not assigned to a loop. Add it to the method's pseudo loop.
- b->_loop = root_loop;
+ block->_loop = root_loop;
lp = root_loop;
}
- if (lp == root_loop || b != lp->head()) { // loop heads are already members
- lp->add_member(b);
+ if (lp == root_loop || block != lp->head()) { // loop heads are already members
+ lp->add_member(block);
}
if (lp != root_loop) {
if (lp->parent() == NULL) {
// Not a nested loop. Make it a child of the method's pseudo loop.
root_loop->add_nested_loop(lp);
}
- if (b == lp->head()) {
+ if (block == lp->head()) {
// Add nested loop to member list of parent loop.
lp->parent()->add_member(lp);
}
@@ -1560,9 +1593,9 @@ CFGLoop* PhaseCFG::create_loop_tree() {
}
//------------------------------push_pred--------------------------------------
-void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk) {
+void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
Node* pred_n = blk->pred(i);
- Block* pred = node_to_blk[pred_n->_idx];
+ Block* pred = cfg->get_block_for_node(pred_n);
CFGLoop *pred_loop = pred->_loop;
if (pred_loop == NULL) {
// Filter out blocks for non-single-entry loops.
@@ -1583,7 +1616,7 @@ void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& no
Block* pred_head = pred_loop->head();
assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
assert(pred_head != head(), "loop head in only one loop");
- push_pred(pred_head, LoopNode::EntryControl, worklist, node_to_blk);
+ push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
} else {
assert(pred_loop->_parent == this && _parent == NULL, "just checking");
}
@@ -1696,7 +1729,7 @@ void CFGLoop::compute_freq() {
// Determine the probability of reaching successor 'i' from the receiver block.
float Block::succ_prob(uint i) {
int eidx = end_idx();
- Node *n = _nodes[eidx]; // Get ending Node
+ Node *n = get_node(eidx); // Get ending Node
int op = n->Opcode();
if (n->is_Mach()) {
@@ -1731,7 +1764,7 @@ float Block::succ_prob(uint i) {
float prob = n->as_MachIf()->_prob;
assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
// If succ[i] is the FALSE branch, invert path info
- if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) {
+ if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
return 1.0f - prob; // not taken
} else {
return prob; // taken
@@ -1743,7 +1776,7 @@ float Block::succ_prob(uint i) {
return 1.0f/_num_succs;
case Op_Catch: {
- const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
+ const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
if (ci->_con == CatchProjNode::fall_through_index) {
// Fall-thru path gets the lion's share.
return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
@@ -1780,7 +1813,7 @@ float Block::succ_prob(uint i) {
// Return the number of fall-through candidates for a block
int Block::num_fall_throughs() {
int eidx = end_idx();
- Node *n = _nodes[eidx]; // Get ending Node
+ Node *n = get_node(eidx); // Get ending Node
int op = n->Opcode();
if (n->is_Mach()) {
@@ -1804,7 +1837,7 @@ int Block::num_fall_throughs() {
case Op_Catch: {
for (uint i = 0; i < _num_succs; i++) {
- const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
+ const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
if (ci->_con == CatchProjNode::fall_through_index) {
return 1;
}
@@ -1832,14 +1865,14 @@ int Block::num_fall_throughs() {
// Return true if a specific successor could be fall-through target.
bool Block::succ_fall_through(uint i) {
int eidx = end_idx();
- Node *n = _nodes[eidx]; // Get ending Node
+ Node *n = get_node(eidx); // Get ending Node
int op = n->Opcode();
if (n->is_Mach()) {
if (n->is_MachNullCheck()) {
// In theory, either side can fall-thru, for simplicity sake,
// let's say only the false branch can now.
- return _nodes[i + eidx + 1]->Opcode() == Op_IfFalse;
+ return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
}
op = n->as_Mach()->ideal_Opcode();
}
@@ -1853,7 +1886,7 @@ bool Block::succ_fall_through(uint i) {
return true;
case Op_Catch: {
- const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
+ const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
return ci->_con == CatchProjNode::fall_through_index;
}
@@ -1877,7 +1910,7 @@ bool Block::succ_fall_through(uint i) {
// Update the probability of a two-branch to be uncommon
void Block::update_uncommon_branch(Block* ub) {
int eidx = end_idx();
- Node *n = _nodes[eidx]; // Get ending Node
+ Node *n = get_node(eidx); // Get ending Node
int op = n->as_Mach()->ideal_Opcode();
@@ -1893,7 +1926,7 @@ void Block::update_uncommon_branch(Block* ub) {
// If ub is the true path, make the proability small, else
// ub is the false path, and make the probability large
- bool invert = (_nodes[s + eidx + 1]->Opcode() == Op_IfFalse);
+ bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
// Get existing probability
float p = n->as_MachIf()->_prob;
diff --git a/src/share/vm/opto/generateOptoStub.cpp b/src/share/vm/opto/generateOptoStub.cpp
index bc1c0ccf0..b390d2147 100644
--- a/src/share/vm/opto/generateOptoStub.cpp
+++ b/src/share/vm/opto/generateOptoStub.cpp
@@ -61,6 +61,7 @@ void GraphKit::gen_stub(address C_function,
JVMState* jvms = new (C) JVMState(0);
jvms->set_bci(InvocationEntryBci);
jvms->set_monoff(max_map);
+ jvms->set_scloff(max_map);
jvms->set_endoff(max_map);
{
SafePointNode *map = new (C) SafePointNode( max_map, jvms );
diff --git a/src/share/vm/opto/graphKit.cpp b/src/share/vm/opto/graphKit.cpp
index e2285916c..dcdd104ee 100644
--- a/src/share/vm/opto/graphKit.cpp
+++ b/src/share/vm/opto/graphKit.cpp
@@ -333,6 +333,7 @@ void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* ph
assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals");
assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes");
assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS");
+ assert(ex_jvms->scloff() == phi_map->_jvms->scloff(), "matching scalar replaced objects");
assert(ex_map->req() == phi_map->req(), "matching maps");
uint tos = ex_jvms->stkoff() + ex_jvms->sp();
Node* hidden_merge_mark = root();
@@ -409,7 +410,7 @@ void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* ph
while (dst->req() > orig_width) dst->del_req(dst->req()-1);
} else {
assert(dst->is_Phi(), "nobody else uses a hidden region");
- phi = (PhiNode*)dst;
+ phi = dst->as_Phi();
}
if (add_multiple && src->in(0) == ex_control) {
// Both are phis.
@@ -1438,7 +1439,12 @@ Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
} else {
ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt);
}
- return _gvn.transform(ld);
+ ld = _gvn.transform(ld);
+ if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
+ // Improve graph before escape analysis and boxing elimination.
+ record_for_igvn(ld);
+ }
+ return ld;
}
Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
@@ -1495,6 +1501,25 @@ void GraphKit::pre_barrier(bool do_load,
}
}
+bool GraphKit::can_move_pre_barrier() const {
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ switch (bs->kind()) {
+ case BarrierSet::G1SATBCT:
+ case BarrierSet::G1SATBCTLogging:
+ return true; // Can move it if no safepoint
+
+ case BarrierSet::CardTableModRef:
+ case BarrierSet::CardTableExtension:
+ case BarrierSet::ModRef:
+ return true; // There is no pre-barrier
+
+ case BarrierSet::Other:
+ default :
+ ShouldNotReachHere();
+ }
+ return false;
+}
+
void GraphKit::post_barrier(Node* ctl,
Node* store,
Node* obj,
@@ -3144,7 +3169,7 @@ Node* GraphKit::new_instance(Node* klass_node,
set_all_memory(mem); // Create new memory state
AllocateNode* alloc
- = new (C) AllocateNode(C, AllocateNode::alloc_type(),
+ = new (C) AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
control(), mem, i_o(),
size, klass_node,
initial_slow_test);
@@ -3285,7 +3310,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
// Create the AllocateArrayNode and its result projections
AllocateArrayNode* alloc
- = new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(),
+ = new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
control(), mem, i_o(),
size, klass_node,
initial_slow_test,
@@ -3326,10 +3351,14 @@ AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
if (ptr == NULL) { // reduce dumb test in callers
return NULL;
}
- if (ptr->is_CheckCastPP()) { // strip a raw-to-oop cast
+ if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
ptr = ptr->in(1);
- if (ptr == NULL) return NULL;
+ if (ptr == NULL) return NULL;
}
+ // Return NULL for allocations with several casts:
+ // j.l.reflect.Array.newInstance(jobject, jint)
+ // Object.clone()
+ // to keep more precise type from last cast.
if (ptr->is_Proj()) {
Node* allo = ptr->in(0);
if (allo != NULL && allo->is_Allocate()) {
@@ -3374,19 +3403,6 @@ InitializeNode* AllocateNode::initialization() {
return NULL;
}
-// Trace Allocate -> Proj[Parm] -> MemBarStoreStore
-MemBarStoreStoreNode* AllocateNode::storestore() {
- ProjNode* rawoop = proj_out(AllocateNode::RawAddress);
- if (rawoop == NULL) return NULL;
- for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
- Node* storestore = rawoop->fast_out(i);
- if (storestore->is_MemBarStoreStore()) {
- return storestore->as_MemBarStoreStore();
- }
- }
- return NULL;
-}
-
//----------------------------- loop predicates ---------------------------
//------------------------------add_predicate_impl----------------------------
@@ -3554,6 +3570,8 @@ void GraphKit::g1_write_barrier_pre(bool do_load,
} else {
// In this case both val_type and alias_idx are unused.
assert(pre_val != NULL, "must be loaded already");
+ // Nothing to be done if pre_val is null.
+ if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
}
assert(bt == T_OBJECT, "or we shouldn't be here");
@@ -3598,7 +3616,7 @@ void GraphKit::g1_write_barrier_pre(bool do_load,
if (do_load) {
// load original value
// alias_idx correct??
- pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx);
+ pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
}
// if (pre_val != NULL)
@@ -3807,8 +3825,13 @@ Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
TypeAry::make(TypeInt::CHAR,TypeInt::POS),
ciTypeArrayKlass::make(T_CHAR), true, 0);
int value_field_idx = C->get_alias_index(value_field_type);
- return make_load(ctrl, basic_plus_adr(str, str, value_offset),
- value_type, T_OBJECT, value_field_idx);
+ Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
+ value_type, T_OBJECT, value_field_idx);
+ // String.value field is known to be @Stable.
+ if (UseImplicitStableValues) {
+ load = cast_array_to_stable(load, value_type);
+ }
+ return load;
}
void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) {
@@ -3826,9 +3849,6 @@ void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
false, NULL, 0);
const TypePtr* value_field_type = string_type->add_offset(value_offset);
- const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
- TypeAry::make(TypeInt::CHAR,TypeInt::POS),
- ciTypeArrayKlass::make(T_CHAR), true, 0);
int value_field_idx = C->get_alias_index(value_field_type);
store_to_memory(ctrl, basic_plus_adr(str, value_offset),
value, T_OBJECT, value_field_idx);
@@ -3843,3 +3863,9 @@ void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) {
store_to_memory(ctrl, basic_plus_adr(str, count_offset),
value, T_INT, count_field_idx);
}
+
+Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
+ // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
+ // assumption of CCP analysis.
+ return _gvn.transform(new(C) CastPPNode(ary, ary_type->cast_to_stable(true)));
+}
diff --git a/src/share/vm/opto/graphKit.hpp b/src/share/vm/opto/graphKit.hpp
index 4d7d96178..1fd4e86d2 100644
--- a/src/share/vm/opto/graphKit.hpp
+++ b/src/share/vm/opto/graphKit.hpp
@@ -695,6 +695,10 @@ class GraphKit : public Phase {
void write_barrier_post(Node *store, Node* obj,
Node* adr, uint adr_idx, Node* val, bool use_precise);
+ // Allow reordering of pre-barrier with oop store and/or post-barrier.
+ // Used for load_store operations which loads old value.
+ bool can_move_pre_barrier() const;
+
// G1 pre/post barriers
void g1_write_barrier_pre(bool do_load,
Node* obj,
@@ -832,6 +836,9 @@ class GraphKit : public Phase {
// Insert a loop predicate into the graph
void add_predicate(int nargs = 0);
void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
+
+ // Produce new array node of stable type
+ Node* cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type);
};
// Helper class to support building of control flow branches. Upon
diff --git a/src/share/vm/opto/idealGraphPrinter.cpp b/src/share/vm/opto/idealGraphPrinter.cpp
index e69090543..9f67a652e 100644
--- a/src/share/vm/opto/idealGraphPrinter.cpp
+++ b/src/share/vm/opto/idealGraphPrinter.cpp
@@ -413,10 +413,10 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
print_prop("debug_idx", node->_debug_idx);
#endif
- if(C->cfg() != NULL) {
- Block *block = C->cfg()->_bbs[node->_idx];
- if(block == NULL) {
- print_prop("block", C->cfg()->_blocks[0]->_pre_order);
+ if (C->cfg() != NULL) {
+ Block* block = C->cfg()->get_block_for_node(node);
+ if (block == NULL) {
+ print_prop("block", C->cfg()->get_block(0)->_pre_order);
} else {
print_prop("block", block->_pre_order);
}
@@ -637,10 +637,10 @@ void IdealGraphPrinter::walk_nodes(Node *start, bool edges, VectorSet* temp_set)
if (C->cfg() != NULL) {
// once we have a CFG there are some nodes that aren't really
// reachable but are in the CFG so add them here.
- for (uint i = 0; i < C->cfg()->_blocks.size(); i++) {
- Block *b = C->cfg()->_blocks[i];
- for (uint s = 0; s < b->_nodes.size(); s++) {
- nodeStack.push(b->_nodes[s]);
+ for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
+ Block* block = C->cfg()->get_block(i);
+ for (uint s = 0; s < block->number_of_nodes(); s++) {
+ nodeStack.push(block->get_node(s));
}
}
}
@@ -698,24 +698,24 @@ void IdealGraphPrinter::print(Compile* compile, const char *name, Node *node, in
tail(EDGES_ELEMENT);
if (C->cfg() != NULL) {
head(CONTROL_FLOW_ELEMENT);
- for (uint i = 0; i < C->cfg()->_blocks.size(); i++) {
- Block *b = C->cfg()->_blocks[i];
+ for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
+ Block* block = C->cfg()->get_block(i);
begin_head(BLOCK_ELEMENT);
- print_attr(BLOCK_NAME_PROPERTY, b->_pre_order);
+ print_attr(BLOCK_NAME_PROPERTY, block->_pre_order);
end_head();
head(SUCCESSORS_ELEMENT);
- for (uint s = 0; s < b->_num_succs; s++) {
+ for (uint s = 0; s < block->_num_succs; s++) {
begin_elem(SUCCESSOR_ELEMENT);
- print_attr(BLOCK_NAME_PROPERTY, b->_succs[s]->_pre_order);
+ print_attr(BLOCK_NAME_PROPERTY, block->_succs[s]->_pre_order);
end_elem();
}
tail(SUCCESSORS_ELEMENT);
head(NODES_ELEMENT);
- for (uint s = 0; s < b->_nodes.size(); s++) {
+ for (uint s = 0; s < block->number_of_nodes(); s++) {
begin_elem(NODE_ELEMENT);
- print_attr(NODE_ID_PROPERTY, get_node_id(b->_nodes[s]));
+ print_attr(NODE_ID_PROPERTY, get_node_id(block->get_node(s)));
end_elem();
}
tail(NODES_ELEMENT);
diff --git a/src/share/vm/opto/idealGraphPrinter.hpp b/src/share/vm/opto/idealGraphPrinter.hpp
index 7d1863f4a..f2892d5a9 100644
--- a/src/share/vm/opto/idealGraphPrinter.hpp
+++ b/src/share/vm/opto/idealGraphPrinter.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,9 +41,8 @@ class Node;
class InlineTree;
class ciMethod;
-class IdealGraphPrinter
-{
-private:
+class IdealGraphPrinter : public CHeapObj<mtCompiler> {
+ private:
static const char *INDENT;
static const char *TOP_ELEMENT;
@@ -121,7 +120,7 @@ private:
IdealGraphPrinter();
~IdealGraphPrinter();
-public:
+ public:
static void clean_up();
static IdealGraphPrinter *printer();
@@ -135,8 +134,6 @@ public:
void print_method(Compile* compile, const char *name, int level=1, bool clear_nodes = false);
void print(Compile* compile, const char *name, Node *root, int level=1, bool clear_nodes = false);
void print_xml(const char *name);
-
-
};
#endif
diff --git a/src/share/vm/opto/ifg.cpp b/src/share/vm/opto/ifg.cpp
index 96c0957cf..db8dbea84 100644
--- a/src/share/vm/opto/ifg.cpp
+++ b/src/share/vm/opto/ifg.cpp
@@ -37,12 +37,9 @@
#include "opto/memnode.hpp"
#include "opto/opcodes.hpp"
-//=============================================================================
-//------------------------------IFG--------------------------------------------
PhaseIFG::PhaseIFG( Arena *arena ) : Phase(Interference_Graph), _arena(arena) {
}
-//------------------------------init-------------------------------------------
void PhaseIFG::init( uint maxlrg ) {
_maxlrg = maxlrg;
_yanked = new (_arena) VectorSet(_arena);
@@ -59,7 +56,6 @@ void PhaseIFG::init( uint maxlrg ) {
}
}
-//------------------------------add--------------------------------------------
// Add edge between vertices a & b. These are sorted (triangular matrix),
// then the smaller number is inserted in the larger numbered array.
int PhaseIFG::add_edge( uint a, uint b ) {
@@ -71,7 +67,6 @@ int PhaseIFG::add_edge( uint a, uint b ) {
return _adjs[a].insert( b );
}
-//------------------------------add_vector-------------------------------------
// Add an edge between 'a' and everything in the vector.
void PhaseIFG::add_vector( uint a, IndexSet *vec ) {
// IFG is triangular, so do the inserts where 'a' < 'b'.
@@ -86,7 +81,6 @@ void PhaseIFG::add_vector( uint a, IndexSet *vec ) {
}
}
-//------------------------------test-------------------------------------------
// Is there an edge between a and b?
int PhaseIFG::test_edge( uint a, uint b ) const {
// Sort a and b, so that a is larger
@@ -95,7 +89,6 @@ int PhaseIFG::test_edge( uint a, uint b ) const {
return _adjs[a].member(b);
}
-//------------------------------SquareUp---------------------------------------
// Convert triangular matrix to square matrix
void PhaseIFG::SquareUp() {
assert( !_is_square, "only on triangular" );
@@ -111,7 +104,6 @@ void PhaseIFG::SquareUp() {
_is_square = true;
}
-//------------------------------Compute_Effective_Degree-----------------------
// Compute effective degree in bulk
void PhaseIFG::Compute_Effective_Degree() {
assert( _is_square, "only on square" );
@@ -120,7 +112,6 @@ void PhaseIFG::Compute_Effective_Degree() {
lrgs(i).set_degree(effective_degree(i));
}
-//------------------------------test_edge_sq-----------------------------------
int PhaseIFG::test_edge_sq( uint a, uint b ) const {
assert( _is_square, "only on square" );
// Swap, so that 'a' has the lesser count. Then binary search is on
@@ -130,7 +121,6 @@ int PhaseIFG::test_edge_sq( uint a, uint b ) const {
return _adjs[a].member(b);
}
-//------------------------------Union------------------------------------------
// Union edges of B into A
void PhaseIFG::Union( uint a, uint b ) {
assert( _is_square, "only on square" );
@@ -146,7 +136,6 @@ void PhaseIFG::Union( uint a, uint b ) {
}
}
-//------------------------------remove_node------------------------------------
// Yank a Node and all connected edges from the IFG. Return a
// list of neighbors (edges) yanked.
IndexSet *PhaseIFG::remove_node( uint a ) {
@@ -165,7 +154,6 @@ IndexSet *PhaseIFG::remove_node( uint a ) {
return neighbors(a);
}
-//------------------------------re_insert--------------------------------------
// Re-insert a yanked Node.
void PhaseIFG::re_insert( uint a ) {
assert( _is_square, "only on square" );
@@ -180,7 +168,6 @@ void PhaseIFG::re_insert( uint a ) {
}
}
-//------------------------------compute_degree---------------------------------
// Compute the degree between 2 live ranges. If both live ranges are
// aligned-adjacent powers-of-2 then we use the MAX size. If either is
// mis-aligned (or for Fat-Projections, not-adjacent) then we have to
@@ -196,7 +183,6 @@ int LRG::compute_degree( LRG &l ) const {
return tmp;
}
-//------------------------------effective_degree-------------------------------
// Compute effective degree for this live range. If both live ranges are
// aligned-adjacent powers-of-2 then we use the MAX size. If either is
// mis-aligned (or for Fat-Projections, not-adjacent) then we have to
@@ -221,7 +207,6 @@ int PhaseIFG::effective_degree( uint lidx ) const {
#ifndef PRODUCT
-//------------------------------dump-------------------------------------------
void PhaseIFG::dump() const {
tty->print_cr("-- Interference Graph --%s--",
_is_square ? "square" : "triangular" );
@@ -260,7 +245,6 @@ void PhaseIFG::dump() const {
tty->print("\n");
}
-//------------------------------stats------------------------------------------
void PhaseIFG::stats() const {
ResourceMark rm;
int *h_cnt = NEW_RESOURCE_ARRAY(int,_maxlrg*2);
@@ -276,7 +260,6 @@ void PhaseIFG::stats() const {
tty->print_cr("");
}
-//------------------------------verify-----------------------------------------
void PhaseIFG::verify( const PhaseChaitin *pc ) const {
// IFG is square, sorted and no need for Find
for( uint i = 0; i < _maxlrg; i++ ) {
@@ -298,7 +281,6 @@ void PhaseIFG::verify( const PhaseChaitin *pc ) const {
}
#endif
-//------------------------------interfere_with_live----------------------------
// Interfere this register with everything currently live. Use the RegMasks
// to trim the set of possible interferences. Return a count of register-only
// interferences as an estimate of register pressure.
@@ -315,7 +297,6 @@ void PhaseChaitin::interfere_with_live( uint r, IndexSet *liveout ) {
_ifg->add_edge( r, l );
}
-//------------------------------build_ifg_virtual------------------------------
// Actually build the interference graph. Uses virtual registers only, no
// physical register masks. This allows me to be very aggressive when
// coalescing copies. Some of this aggressiveness will have to be undone
@@ -325,9 +306,9 @@ void PhaseChaitin::interfere_with_live( uint r, IndexSet *liveout ) {
void PhaseChaitin::build_ifg_virtual( ) {
// For all blocks (in any order) do...
- for( uint i=0; i<_cfg._num_blocks; i++ ) {
- Block *b = _cfg._blocks[i];
- IndexSet *liveout = _live->live(b);
+ for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
+ Block* block = _cfg.get_block(i);
+ IndexSet* liveout = _live->live(block);
// The IFG is built by a single reverse pass over each basic block.
// Starting with the known live-out set, we remove things that get
@@ -337,8 +318,8 @@ void PhaseChaitin::build_ifg_virtual( ) {
// The defined value interferes with everything currently live. The
// value is then removed from the live-ness set and it's inputs are
// added to the live-ness set.
- for( uint j = b->end_idx() + 1; j > 1; j-- ) {
- Node *n = b->_nodes[j-1];
+ for (uint j = block->end_idx() + 1; j > 1; j--) {
+ Node* n = block->get_node(j - 1);
// Get value being defined
uint r = _lrg_map.live_range_id(n);
@@ -408,7 +389,6 @@ void PhaseChaitin::build_ifg_virtual( ) {
} // End of forall blocks
}
-//------------------------------count_int_pressure-----------------------------
uint PhaseChaitin::count_int_pressure( IndexSet *liveout ) {
IndexSetIterator elements(liveout);
uint lidx;
@@ -424,7 +404,6 @@ uint PhaseChaitin::count_int_pressure( IndexSet *liveout ) {
return cnt;
}
-//------------------------------count_float_pressure---------------------------
uint PhaseChaitin::count_float_pressure( IndexSet *liveout ) {
IndexSetIterator elements(liveout);
uint lidx;
@@ -438,7 +417,6 @@ uint PhaseChaitin::count_float_pressure( IndexSet *liveout ) {
return cnt;
}
-//------------------------------lower_pressure---------------------------------
// Adjust register pressure down by 1. Capture last hi-to-low transition,
static void lower_pressure( LRG *lrg, uint where, Block *b, uint *pressure, uint *hrp_index ) {
if (lrg->mask().is_UP() && lrg->mask_size()) {
@@ -460,40 +438,41 @@ static void lower_pressure( LRG *lrg, uint where, Block *b, uint *pressure, uint
}
}
-//------------------------------build_ifg_physical-----------------------------
// Build the interference graph using physical registers when available.
// That is, if 2 live ranges are simultaneously alive but in their acceptable
// register sets do not overlap, then they do not interfere.
uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
NOT_PRODUCT( Compile::TracePhase t3("buildIFG", &_t_buildIFGphysical, TimeCompiler); )
- uint spill_reg = LRG::SPILL_REG;
uint must_spill = 0;
// For all blocks (in any order) do...
- for( uint i = 0; i < _cfg._num_blocks; i++ ) {
- Block *b = _cfg._blocks[i];
+ for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
+ Block* block = _cfg.get_block(i);
// Clone (rather than smash in place) the liveout info, so it is alive
// for the "collect_gc_info" phase later.
- IndexSet liveout(_live->live(b));
- uint last_inst = b->end_idx();
+ IndexSet liveout(_live->live(block));
+ uint last_inst = block->end_idx();
// Compute first nonphi node index
uint first_inst;
- for( first_inst = 1; first_inst < last_inst; first_inst++ )
- if( !b->_nodes[first_inst]->is_Phi() )
+ for (first_inst = 1; first_inst < last_inst; first_inst++) {
+ if (!block->get_node(first_inst)->is_Phi()) {
break;
+ }
+ }
// Spills could be inserted before CreateEx node which should be
// first instruction in block after Phis. Move CreateEx up.
- for( uint insidx = first_inst; insidx < last_inst; insidx++ ) {
- Node *ex = b->_nodes[insidx];
- if( ex->is_SpillCopy() ) continue;
- if( insidx > first_inst && ex->is_Mach() &&
- ex->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
+ for (uint insidx = first_inst; insidx < last_inst; insidx++) {
+ Node *ex = block->get_node(insidx);
+ if (ex->is_SpillCopy()) {
+ continue;
+ }
+ if (insidx > first_inst && ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) {
// If the CreateEx isn't above all the MachSpillCopies
// then move it to the top.
- b->_nodes.remove(insidx);
- b->_nodes.insert(first_inst, ex);
+ block->remove_node(insidx);
+ block->insert_node(ex, first_inst);
}
// Stop once a CreateEx or any other node is found
break;
@@ -503,12 +482,12 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
uint pressure[2], hrp_index[2];
pressure[0] = pressure[1] = 0;
hrp_index[0] = hrp_index[1] = last_inst+1;
- b->_reg_pressure = b->_freg_pressure = 0;
+ block->_reg_pressure = block->_freg_pressure = 0;
// Liveout things are presumed live for the whole block. We accumulate
// 'area' accordingly. If they get killed in the block, we'll subtract
// the unused part of the block from the area.
int inst_count = last_inst - first_inst;
- double cost = (inst_count <= 0) ? 0.0 : b->_freq * double(inst_count);
+ double cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count);
assert(!(cost < 0.0), "negative spill cost" );
IndexSetIterator elements(&liveout);
uint lidx;
@@ -519,13 +498,15 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if (lrg.mask().is_UP() && lrg.mask_size()) {
if (lrg._is_float || lrg._is_vector) { // Count float pressure
pressure[1] += lrg.reg_pressure();
- if( pressure[1] > b->_freg_pressure )
- b->_freg_pressure = pressure[1];
+ if (pressure[1] > block->_freg_pressure) {
+ block->_freg_pressure = pressure[1];
+ }
// Count int pressure, but do not count the SP, flags
- } else if( lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
+ } else if(lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI])) {
pressure[0] += lrg.reg_pressure();
- if( pressure[0] > b->_reg_pressure )
- b->_reg_pressure = pressure[0];
+ if (pressure[0] > block->_reg_pressure) {
+ block->_reg_pressure = pressure[0];
+ }
}
}
}
@@ -541,8 +522,8 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// value is then removed from the live-ness set and it's inputs are added
// to the live-ness set.
uint j;
- for( j = last_inst + 1; j > 1; j-- ) {
- Node *n = b->_nodes[j - 1];
+ for (j = last_inst + 1; j > 1; j--) {
+ Node* n = block->get_node(j - 1);
// Get value being defined
uint r = _lrg_map.live_range_id(n);
@@ -551,7 +532,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if(r) {
// A DEF normally costs block frequency; rematerialized values are
// removed from the DEF sight, so LOWER costs here.
- lrgs(r)._cost += n->rematerialize() ? 0 : b->_freq;
+ lrgs(r)._cost += n->rematerialize() ? 0 : block->_freq;
// If it is not live, then this instruction is dead. Probably caused
// by spilling and rematerialization. Who cares why, yank this baby.
@@ -560,12 +541,12 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if( !n->is_Proj() ||
// Could also be a flags-projection of a dead ADD or such.
(_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
- b->_nodes.remove(j - 1);
+ block->remove_node(j - 1);
if (lrgs(r)._def == n) {
lrgs(r)._def = 0;
}
n->disconnect_inputs(NULL, C);
- _cfg._bbs.map(n->_idx,NULL);
+ _cfg.unmap_node_from_block(n);
n->replace_by(C->top());
// Since yanking a Node from block, high pressure moves up one
hrp_index[0]--;
@@ -580,21 +561,21 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
RegMask itmp = lrgs(r).mask();
itmp.AND(*Matcher::idealreg2regmask[Op_RegI]);
int iregs = itmp.Size();
- if( pressure[0]+iregs > b->_reg_pressure )
- b->_reg_pressure = pressure[0]+iregs;
- if( pressure[0] <= (uint)INTPRESSURE &&
- pressure[0]+iregs > (uint)INTPRESSURE ) {
- hrp_index[0] = j-1;
+ if (pressure[0]+iregs > block->_reg_pressure) {
+ block->_reg_pressure = pressure[0] + iregs;
+ }
+ if (pressure[0] <= (uint)INTPRESSURE && pressure[0] + iregs > (uint)INTPRESSURE) {
+ hrp_index[0] = j - 1;
}
// Count the float-only registers
RegMask ftmp = lrgs(r).mask();
ftmp.AND(*Matcher::idealreg2regmask[Op_RegD]);
int fregs = ftmp.Size();
- if( pressure[1]+fregs > b->_freg_pressure )
- b->_freg_pressure = pressure[1]+fregs;
- if( pressure[1] <= (uint)FLOATPRESSURE &&
- pressure[1]+fregs > (uint)FLOATPRESSURE ) {
- hrp_index[1] = j-1;
+ if (pressure[1] + fregs > block->_freg_pressure) {
+ block->_freg_pressure = pressure[1] + fregs;
+ }
+ if(pressure[1] <= (uint)FLOATPRESSURE && pressure[1]+fregs > (uint)FLOATPRESSURE) {
+ hrp_index[1] = j - 1;
}
}
@@ -607,7 +588,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if( n->is_SpillCopy()
&& lrgs(r).is_singledef() // MultiDef live range can still split
&& n->outcnt() == 1 // and use must be in this block
- && _cfg._bbs[n->unique_out()->_idx] == b ) {
+ && _cfg.get_block_for_node(n->unique_out()) == block) {
// All single-use MachSpillCopy(s) that immediately precede their
// use must color early. If a longer live range steals their
// color, the spill copy will split and may push another spill copy
@@ -617,14 +598,16 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
//
Node *single_use = n->unique_out();
- assert( b->find_node(single_use) >= j, "Use must be later in block");
+ assert(block->find_node(single_use) >= j, "Use must be later in block");
// Use can be earlier in block if it is a Phi, but then I should be a MultiDef
// Find first non SpillCopy 'm' that follows the current instruction
// (j - 1) is index for current instruction 'n'
Node *m = n;
- for( uint i = j; i <= last_inst && m->is_SpillCopy(); ++i ) { m = b->_nodes[i]; }
- if( m == single_use ) {
+ for (uint i = j; i <= last_inst && m->is_SpillCopy(); ++i) {
+ m = block->get_node(i);
+ }
+ if (m == single_use) {
lrgs(r)._area = 0.0;
}
}
@@ -633,7 +616,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if( liveout.remove(r) ) {
// Adjust register pressure.
// Capture last hi-to-lo pressure transition
- lower_pressure( &lrgs(r), j-1, b, pressure, hrp_index );
+ lower_pressure(&lrgs(r), j - 1, block, pressure, hrp_index);
assert( pressure[0] == count_int_pressure (&liveout), "" );
assert( pressure[1] == count_float_pressure(&liveout), "" );
}
@@ -646,7 +629,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if (liveout.remove(x)) {
lrgs(x)._area -= cost;
// Adjust register pressure.
- lower_pressure(&lrgs(x), j-1, b, pressure, hrp_index);
+ lower_pressure(&lrgs(x), j - 1, block, pressure, hrp_index);
assert( pressure[0] == count_int_pressure (&liveout), "" );
assert( pressure[1] == count_float_pressure(&liveout), "" );
}
@@ -718,7 +701,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// Area remaining in the block
inst_count--;
- cost = (inst_count <= 0) ? 0.0 : b->_freq * double(inst_count);
+ cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count);
// Make all inputs live
if( !n->is_Phi() ) { // Phi function uses come from prior block
@@ -743,7 +726,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if (k < debug_start) {
// A USE costs twice block frequency (once for the Load, once
// for a Load-delay). Rematerialized uses only cost once.
- lrg._cost += (def->rematerialize() ? b->_freq : (b->_freq + b->_freq));
+ lrg._cost += (def->rematerialize() ? block->_freq : (block->_freq + block->_freq));
}
// It is live now
if (liveout.insert(x)) {
@@ -753,12 +736,14 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if (lrg.mask().is_UP() && lrg.mask_size()) {
if (lrg._is_float || lrg._is_vector) {
pressure[1] += lrg.reg_pressure();
- if( pressure[1] > b->_freg_pressure )
- b->_freg_pressure = pressure[1];
+ if (pressure[1] > block->_freg_pressure) {
+ block->_freg_pressure = pressure[1];
+ }
} else if( lrg.mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
pressure[0] += lrg.reg_pressure();
- if( pressure[0] > b->_reg_pressure )
- b->_reg_pressure = pressure[0];
+ if (pressure[0] > block->_reg_pressure) {
+ block->_reg_pressure = pressure[0];
+ }
}
}
assert( pressure[0] == count_int_pressure (&liveout), "" );
@@ -772,44 +757,47 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// If we run off the top of the block with high pressure and
// never see a hi-to-low pressure transition, just record that
// the whole block is high pressure.
- if( pressure[0] > (uint)INTPRESSURE ) {
+ if (pressure[0] > (uint)INTPRESSURE) {
hrp_index[0] = 0;
- if( pressure[0] > b->_reg_pressure )
- b->_reg_pressure = pressure[0];
+ if (pressure[0] > block->_reg_pressure) {
+ block->_reg_pressure = pressure[0];
+ }
}
- if( pressure[1] > (uint)FLOATPRESSURE ) {
+ if (pressure[1] > (uint)FLOATPRESSURE) {
hrp_index[1] = 0;
- if( pressure[1] > b->_freg_pressure )
- b->_freg_pressure = pressure[1];
+ if (pressure[1] > block->_freg_pressure) {
+ block->_freg_pressure = pressure[1];
+ }
}
// Compute high pressure indice; avoid landing in the middle of projnodes
j = hrp_index[0];
- if( j < b->_nodes.size() && j < b->end_idx()+1 ) {
- Node *cur = b->_nodes[j];
- while( cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch() ) {
+ if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
+ Node* cur = block->get_node(j);
+ while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
j--;
- cur = b->_nodes[j];
+ cur = block->get_node(j);
}
}
- b->_ihrp_index = j;
+ block->_ihrp_index = j;
j = hrp_index[1];
- if( j < b->_nodes.size() && j < b->end_idx()+1 ) {
- Node *cur = b->_nodes[j];
- while( cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch() ) {
+ if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
+ Node* cur = block->get_node(j);
+ while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
j--;
- cur = b->_nodes[j];
+ cur = block->get_node(j);
}
}
- b->_fhrp_index = j;
+ block->_fhrp_index = j;
#ifndef PRODUCT
// Gather Register Pressure Statistics
if( PrintOptoStatistics ) {
- if( b->_reg_pressure > (uint)INTPRESSURE || b->_freg_pressure > (uint)FLOATPRESSURE )
+ if (block->_reg_pressure > (uint)INTPRESSURE || block->_freg_pressure > (uint)FLOATPRESSURE) {
_high_pressure++;
- else
+ } else {
_low_pressure++;
+ }
}
#endif
} // End of for all blocks
diff --git a/src/share/vm/opto/ifnode.cpp b/src/share/vm/opto/ifnode.cpp
index b506a03f5..399544648 100644
--- a/src/share/vm/opto/ifnode.cpp
+++ b/src/share/vm/opto/ifnode.cpp
@@ -673,7 +673,7 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node *val, Node* if_proj
// / Region
//
Node* IfNode::fold_compares(PhaseGVN* phase) {
- if (!EliminateAutoBox || Opcode() != Op_If) return NULL;
+ if (!phase->C->eliminate_boxing() || Opcode() != Op_If) return NULL;
Node* this_cmp = in(1)->in(1);
if (this_cmp != NULL && this_cmp->Opcode() == Op_CmpI &&
diff --git a/src/share/vm/opto/lcm.cpp b/src/share/vm/opto/lcm.cpp
index eb56a3e42..e8390719a 100644
--- a/src/share/vm/opto/lcm.cpp
+++ b/src/share/vm/opto/lcm.cpp
@@ -61,14 +61,14 @@
// The proj is the control projection for the not-null case.
// The val is the pointer being checked for nullness or
// decodeHeapOop_not_null node if it did not fold into address.
-void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) {
+void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons) {
// Assume if null check need for 0 offset then always needed
// Intel solaris doesn't support any null checks yet and no
// mechanism exists (yet) to set the switches at an os_cpu level
if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return;
// Make sure the ptr-is-null path appears to be uncommon!
- float f = end()->as_MachIf()->_prob;
+ float f = block->end()->as_MachIf()->_prob;
if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f;
if( f > PROB_UNLIKELY_MAG(4) ) return;
@@ -78,13 +78,13 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// Get the successor block for if the test ptr is non-null
Block* not_null_block; // this one goes with the proj
Block* null_block;
- if (_nodes[_nodes.size()-1] == proj) {
- null_block = _succs[0];
- not_null_block = _succs[1];
+ if (block->get_node(block->number_of_nodes()-1) == proj) {
+ null_block = block->_succs[0];
+ not_null_block = block->_succs[1];
} else {
- assert(_nodes[_nodes.size()-2] == proj, "proj is one or the other");
- not_null_block = _succs[0];
- null_block = _succs[1];
+ assert(block->get_node(block->number_of_nodes()-2) == proj, "proj is one or the other");
+ not_null_block = block->_succs[0];
+ null_block = block->_succs[1];
}
while (null_block->is_Empty() == Block::empty_with_goto) {
null_block = null_block->_succs[0];
@@ -96,8 +96,8 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// detect failure of this optimization, as in 6366351.)
{
bool found_trap = false;
- for (uint i1 = 0; i1 < null_block->_nodes.size(); i1++) {
- Node* nn = null_block->_nodes[i1];
+ for (uint i1 = 0; i1 < null_block->number_of_nodes(); i1++) {
+ Node* nn = null_block->get_node(i1);
if (nn->is_MachCall() &&
nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type();
@@ -222,9 +222,10 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// cannot reason about it; is probably not implicit null exception
} else {
const TypePtr* tptr;
- if (UseCompressedOops && Universe::narrow_oop_shift() == 0) {
+ if (UseCompressedOops && (Universe::narrow_oop_shift() == 0 ||
+ Universe::narrow_klass_shift() == 0)) {
// 32-bits narrow oop can be the base of address expressions
- tptr = base->bottom_type()->make_ptr();
+ tptr = base->get_ptr_type();
} else {
// only regular oops are expected here
tptr = base->bottom_type()->is_ptr();
@@ -239,20 +240,20 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
}
// Check ctrl input to see if the null-check dominates the memory op
- Block *cb = cfg->_bbs[mach->_idx];
+ Block *cb = get_block_for_node(mach);
cb = cb->_idom; // Always hoist at least 1 block
if( !was_store ) { // Stores can be hoisted only one block
- while( cb->_dom_depth > (_dom_depth + 1))
+ while( cb->_dom_depth > (block->_dom_depth + 1))
cb = cb->_idom; // Hoist loads as far as we want
// The non-null-block should dominate the memory op, too. Live
// range spilling will insert a spill in the non-null-block if it is
// needs to spill the memory op for an implicit null check.
- if (cb->_dom_depth == (_dom_depth + 1)) {
+ if (cb->_dom_depth == (block->_dom_depth + 1)) {
if (cb != not_null_block) continue;
cb = cb->_idom;
}
}
- if( cb != this ) continue;
+ if( cb != block ) continue;
// Found a memory user; see if it can be hoisted to check-block
uint vidx = 0; // Capture index of value into memop
@@ -264,8 +265,8 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
if( is_decoden ) continue;
}
// Block of memory-op input
- Block *inb = cfg->_bbs[mach->in(j)->_idx];
- Block *b = this; // Start from nul check
+ Block *inb = get_block_for_node(mach->in(j));
+ Block *b = block; // Start from nul check
while( b != inb && b->_dom_depth > inb->_dom_depth )
b = b->_idom; // search upwards for input
// See if input dominates null check
@@ -274,28 +275,28 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
}
if( j > 0 )
continue;
- Block *mb = cfg->_bbs[mach->_idx];
+ Block *mb = get_block_for_node(mach);
// Hoisting stores requires more checks for the anti-dependence case.
// Give up hoisting if we have to move the store past any load.
if( was_store ) {
Block *b = mb; // Start searching here for a local load
// mach use (faulting) trying to hoist
// n might be blocker to hoisting
- while( b != this ) {
+ while( b != block ) {
uint k;
- for( k = 1; k < b->_nodes.size(); k++ ) {
- Node *n = b->_nodes[k];
+ for( k = 1; k < b->number_of_nodes(); k++ ) {
+ Node *n = b->get_node(k);
if( n->needs_anti_dependence_check() &&
n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) )
break; // Found anti-dependent load
}
- if( k < b->_nodes.size() )
+ if( k < b->number_of_nodes() )
break; // Found anti-dependent load
// Make sure control does not do a merge (would have to check allpaths)
if( b->num_preds() != 2 ) break;
- b = cfg->_bbs[b->pred(1)->_idx]; // Move up to predecessor block
+ b = get_block_for_node(b->pred(1)); // Move up to predecessor block
}
- if( b != this ) continue;
+ if( b != block ) continue;
}
// Make sure this memory op is not already being used for a NullCheck
@@ -305,15 +306,15 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// Found a candidate! Pick one with least dom depth - the highest
// in the dom tree should be closest to the null check.
- if( !best ||
- cfg->_bbs[mach->_idx]->_dom_depth < cfg->_bbs[best->_idx]->_dom_depth ) {
+ if (best == NULL || get_block_for_node(mach)->_dom_depth < get_block_for_node(best)->_dom_depth) {
best = mach;
bidx = vidx;
-
}
}
// No candidate!
- if( !best ) return;
+ if (best == NULL) {
+ return;
+ }
// ---- Found an implicit null check
extern int implicit_null_checks;
@@ -321,46 +322,45 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
if( is_decoden ) {
// Check if we need to hoist decodeHeapOop_not_null first.
- Block *valb = cfg->_bbs[val->_idx];
- if( this != valb && this->_dom_depth < valb->_dom_depth ) {
+ Block *valb = get_block_for_node(val);
+ if( block != valb && block->_dom_depth < valb->_dom_depth ) {
// Hoist it up to the end of the test block.
valb->find_remove(val);
- this->add_inst(val);
- cfg->_bbs.map(val->_idx,this);
+ block->add_inst(val);
+ map_node_to_block(val, block);
// DecodeN on x86 may kill flags. Check for flag-killing projections
// that also need to be hoisted.
for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
Node* n = val->fast_out(j);
if( n->is_MachProj() ) {
- cfg->_bbs[n->_idx]->find_remove(n);
- this->add_inst(n);
- cfg->_bbs.map(n->_idx,this);
+ get_block_for_node(n)->find_remove(n);
+ block->add_inst(n);
+ map_node_to_block(n, block);
}
}
}
}
// Hoist the memory candidate up to the end of the test block.
- Block *old_block = cfg->_bbs[best->_idx];
+ Block *old_block = get_block_for_node(best);
old_block->find_remove(best);
- add_inst(best);
- cfg->_bbs.map(best->_idx,this);
+ block->add_inst(best);
+ map_node_to_block(best, block);
// Move the control dependence
- if (best->in(0) && best->in(0) == old_block->_nodes[0])
- best->set_req(0, _nodes[0]);
+ if (best->in(0) && best->in(0) == old_block->head())
+ best->set_req(0, block->head());
// Check for flag-killing projections that also need to be hoisted
// Should be DU safe because no edge updates.
for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
Node* n = best->fast_out(j);
if( n->is_MachProj() ) {
- cfg->_bbs[n->_idx]->find_remove(n);
- add_inst(n);
- cfg->_bbs.map(n->_idx,this);
+ get_block_for_node(n)->find_remove(n);
+ block->add_inst(n);
+ map_node_to_block(n, block);
}
}
- Compile *C = cfg->C;
// proj==Op_True --> ne test; proj==Op_False --> eq test.
// One of two graph shapes got matched:
// (IfTrue (If (Bool NE (CmpP ptr NULL))))
@@ -370,10 +370,10 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// We need to flip the projections to keep the same semantics.
if( proj->Opcode() == Op_IfTrue ) {
// Swap order of projections in basic block to swap branch targets
- Node *tmp1 = _nodes[end_idx()+1];
- Node *tmp2 = _nodes[end_idx()+2];
- _nodes.map(end_idx()+1, tmp2);
- _nodes.map(end_idx()+2, tmp1);
+ Node *tmp1 = block->get_node(block->end_idx()+1);
+ Node *tmp2 = block->get_node(block->end_idx()+2);
+ block->map_node(tmp2, block->end_idx()+1);
+ block->map_node(tmp1, block->end_idx()+2);
Node *tmp = new (C) Node(C->top()); // Use not NULL input
tmp1->replace_by(tmp);
tmp2->replace_by(tmp1);
@@ -386,8 +386,8 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// it as well.
Node *old_tst = proj->in(0);
MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
- _nodes.map(end_idx(),nul_chk);
- cfg->_bbs.map(nul_chk->_idx,this);
+ block->map_node(nul_chk, block->end_idx());
+ map_node_to_block(nul_chk, block);
// Redirect users of old_test to nul_chk
for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
old_tst->last_out(i2)->set_req(0, nul_chk);
@@ -395,8 +395,8 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
for (uint i3 = 0; i3 < old_tst->req(); i3++)
old_tst->set_req(i3, NULL);
- cfg->latency_from_uses(nul_chk);
- cfg->latency_from_uses(best);
+ latency_from_uses(nul_chk);
+ latency_from_uses(best);
}
@@ -410,7 +410,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// remaining cases (most), choose the instruction with the greatest latency
// (that is, the most number of pseudo-cycles required to the end of the
// routine). If there is a tie, choose the instruction with the most inputs.
-Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
+Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
// If only a single entry on the stack, use it
uint cnt = worklist.size();
@@ -444,7 +444,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
}
// Final call in a block must be adjacent to 'catch'
- Node *e = end();
+ Node *e = block->end();
if( e->is_Catch() && e->in(0)->in(0) == n )
continue;
@@ -470,7 +470,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
Node* use = n->fast_out(j);
// The use is a conditional branch, make them adjacent
- if (use->is_MachIf() && cfg->_bbs[use->_idx]==this ) {
+ if (use->is_MachIf() && get_block_for_node(use) == block) {
found_machif = true;
break;
}
@@ -503,7 +503,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
n_choice = 1;
}
- uint n_latency = cfg->_node_latency->at_grow(n->_idx);
+ uint n_latency = get_latency_for_node(n);
uint n_score = n->req(); // Many inputs get high score to break ties
// Keep best latency found
@@ -531,13 +531,14 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
//------------------------------set_next_call----------------------------------
-void Block::set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ) {
+void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) {
if( next_call.test_set(n->_idx) ) return;
for( uint i=0; i<n->len(); i++ ) {
Node *m = n->in(i);
if( !m ) continue; // must see all nodes in block that precede call
- if( bbs[m->_idx] == this )
- set_next_call( m, next_call, bbs );
+ if (get_block_for_node(m) == block) {
+ set_next_call(block, m, next_call);
+ }
}
}
@@ -547,24 +548,26 @@ void Block::set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ) {
// next subroutine call get priority - basically it moves things NOT needed
// for the next call till after the call. This prevents me from trying to
// carry lots of stuff live across a call.
-void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs) {
+void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call) {
// Find the next control-defining Node in this block
Node* call = NULL;
for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
Node* m = this_call->fast_out(i);
- if( bbs[m->_idx] == this && // Local-block user
+ if (get_block_for_node(m) == block && // Local-block user
m != this_call && // Not self-start node
- m->is_MachCall() )
+ m->is_MachCall()) {
call = m;
break;
+ }
}
if (call == NULL) return; // No next call (e.g., block end is near)
// Set next-call for all inputs to this call
- set_next_call(call, next_call, bbs);
+ set_next_call(block, call, next_call);
}
//------------------------------add_call_kills-------------------------------------
-void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
+// helper function that adds caller save registers to MachProjNode
+static void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
// Fill in the kill mask for the call
for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
if( !regs.Member(r) ) { // Not already defined by the call
@@ -580,7 +583,7 @@ void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_p
//------------------------------sched_call-------------------------------------
-uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
+uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call) {
RegMask regs;
// Schedule all the users of the call right now. All the users are
@@ -593,18 +596,20 @@ uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_
ready_cnt.at_put(n->_idx, n_cnt);
assert( n_cnt == 0, "" );
// Schedule next to call
- _nodes.map(node_cnt++, n);
+ block->map_node(n, node_cnt++);
// Collect defined registers
regs.OR(n->out_RegMask());
// Check for scheduling the next control-definer
if( n->bottom_type() == Type::CONTROL )
// Warm up next pile of heuristic bits
- needed_for_next_call(n, next_call, bbs);
+ needed_for_next_call(block, n, next_call);
// Children of projections are now all ready
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
Node* m = n->fast_out(j); // Get user
- if( bbs[m->_idx] != this ) continue;
+ if(get_block_for_node(m) != block) {
+ continue;
+ }
if( m->is_Phi() ) continue;
int m_cnt = ready_cnt.at(m->_idx)-1;
ready_cnt.at_put(m->_idx, m_cnt);
@@ -616,14 +621,14 @@ uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_
// Act as if the call defines the Frame Pointer.
// Certainly the FP is alive and well after the call.
- regs.Insert(matcher.c_frame_pointer());
+ regs.Insert(_matcher.c_frame_pointer());
// Set all registers killed and not already defined by the call.
uint r_cnt = mcall->tf()->range()->cnt();
int op = mcall->ideal_Opcode();
- MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
- bbs.map(proj->_idx,this);
- _nodes.insert(node_cnt++, proj);
+ MachProjNode *proj = new (C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
+ map_node_to_block(proj, block);
+ block->insert_node(proj, node_cnt++);
// Select the right register save policy.
const char * save_policy;
@@ -632,13 +637,13 @@ uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_
case Op_CallLeaf:
case Op_CallLeafNoFP:
// Calling C code so use C calling convention
- save_policy = matcher._c_reg_save_policy;
+ save_policy = _matcher._c_reg_save_policy;
break;
case Op_CallStaticJava:
case Op_CallDynamicJava:
// Calling Java code so use Java calling convention
- save_policy = matcher._register_save_policy;
+ save_policy = _matcher._register_save_policy;
break;
default:
@@ -673,44 +678,46 @@ uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_
//------------------------------schedule_local---------------------------------
// Topological sort within a block. Someday become a real scheduler.
-bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &ready_cnt, VectorSet &next_call) {
+bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call) {
// Already "sorted" are the block start Node (as the first entry), and
// the block-ending Node and any trailing control projections. We leave
// these alone. PhiNodes and ParmNodes are made to follow the block start
// Node. Everything else gets topo-sorted.
#ifndef PRODUCT
- if (cfg->trace_opto_pipelining()) {
- tty->print_cr("# --- schedule_local B%d, before: ---", _pre_order);
- for (uint i = 0;i < _nodes.size();i++) {
+ if (trace_opto_pipelining()) {
+ tty->print_cr("# --- schedule_local B%d, before: ---", block->_pre_order);
+ for (uint i = 0;i < block->number_of_nodes(); i++) {
tty->print("# ");
- _nodes[i]->fast_dump();
+ block->get_node(i)->fast_dump();
}
tty->print_cr("#");
}
#endif
// RootNode is already sorted
- if( _nodes.size() == 1 ) return true;
+ if (block->number_of_nodes() == 1) {
+ return true;
+ }
// Move PhiNodes and ParmNodes from 1 to cnt up to the start
- uint node_cnt = end_idx();
+ uint node_cnt = block->end_idx();
uint phi_cnt = 1;
uint i;
for( i = 1; i<node_cnt; i++ ) { // Scan for Phi
- Node *n = _nodes[i];
+ Node *n = block->get_node(i);
if( n->is_Phi() || // Found a PhiNode or ParmNode
- (n->is_Proj() && n->in(0) == head()) ) {
+ (n->is_Proj() && n->in(0) == block->head()) ) {
// Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt
- _nodes.map(i,_nodes[phi_cnt]);
- _nodes.map(phi_cnt++,n); // swap Phi/Parm up front
+ block->map_node(block->get_node(phi_cnt), i);
+ block->map_node(n, phi_cnt++); // swap Phi/Parm up front
} else { // All others
// Count block-local inputs to 'n'
uint cnt = n->len(); // Input count
uint local = 0;
for( uint j=0; j<cnt; j++ ) {
Node *m = n->in(j);
- if( m && cfg->_bbs[m->_idx] == this && !m->is_top() )
+ if( m && get_block_for_node(m) == block && !m->is_top() )
local++; // One more block-local input
}
ready_cnt.at_put(n->_idx, local); // Count em up
@@ -722,7 +729,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
for (uint prec = n->req(); prec < n->len(); prec++) {
Node* oop_store = n->in(prec);
if (oop_store != NULL) {
- assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
+ assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark");
}
}
}
@@ -746,16 +753,16 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
}
}
}
- for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count
- ready_cnt.at_put(_nodes[i2]->_idx, 0);
+ for(uint i2=i; i2< block->number_of_nodes(); i2++ ) // Trailing guys get zapped count
+ ready_cnt.at_put(block->get_node(i2)->_idx, 0);
// All the prescheduled guys do not hold back internal nodes
uint i3;
for(i3 = 0; i3<phi_cnt; i3++ ) { // For all pre-scheduled
- Node *n = _nodes[i3]; // Get pre-scheduled
+ Node *n = block->get_node(i3); // Get pre-scheduled
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
Node* m = n->fast_out(j);
- if( cfg->_bbs[m->_idx] ==this ) { // Local-block user
+ if (get_block_for_node(m) == block) { // Local-block user
int m_cnt = ready_cnt.at(m->_idx)-1;
ready_cnt.at_put(m->_idx, m_cnt); // Fix ready count
}
@@ -766,7 +773,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
// Make a worklist
Node_List worklist;
for(uint i4=i3; i4<node_cnt; i4++ ) { // Put ready guys on worklist
- Node *m = _nodes[i4];
+ Node *m = block->get_node(i4);
if( !ready_cnt.at(m->_idx) ) { // Zero ready count?
if (m->is_iteratively_computed()) {
// Push induction variable increments last to allow other uses
@@ -788,15 +795,15 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
}
// Warm up the 'next_call' heuristic bits
- needed_for_next_call(_nodes[0], next_call, cfg->_bbs);
+ needed_for_next_call(block, block->head(), next_call);
#ifndef PRODUCT
- if (cfg->trace_opto_pipelining()) {
- for (uint j=0; j<_nodes.size(); j++) {
- Node *n = _nodes[j];
+ if (trace_opto_pipelining()) {
+ for (uint j=0; j< block->number_of_nodes(); j++) {
+ Node *n = block->get_node(j);
int idx = n->_idx;
tty->print("# ready cnt:%3d ", ready_cnt.at(idx));
- tty->print("latency:%3d ", cfg->_node_latency->at_grow(idx));
+ tty->print("latency:%3d ", get_latency_for_node(n));
tty->print("%4d: %s\n", idx, n->Name());
}
}
@@ -807,7 +814,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
while( worklist.size() ) { // Worklist is not ready
#ifndef PRODUCT
- if (cfg->trace_opto_pipelining()) {
+ if (trace_opto_pipelining()) {
tty->print("# ready list:");
for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
Node *n = worklist[i]; // Get Node on worklist
@@ -818,13 +825,13 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
#endif
// Select and pop a ready guy from worklist
- Node* n = select(cfg, worklist, ready_cnt, next_call, phi_cnt);
- _nodes.map(phi_cnt++,n); // Schedule him next
+ Node* n = select(block, worklist, ready_cnt, next_call, phi_cnt);
+ block->map_node(n, phi_cnt++); // Schedule him next
#ifndef PRODUCT
- if (cfg->trace_opto_pipelining()) {
+ if (trace_opto_pipelining()) {
tty->print("# select %d: %s", n->_idx, n->Name());
- tty->print(", latency:%d", cfg->_node_latency->at_grow(n->_idx));
+ tty->print(", latency:%d", get_latency_for_node(n));
n->dump();
if (Verbose) {
tty->print("# ready list:");
@@ -839,26 +846,28 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
#endif
if( n->is_MachCall() ) {
MachCallNode *mcall = n->as_MachCall();
- phi_cnt = sched_call(matcher, cfg->_bbs, phi_cnt, worklist, ready_cnt, mcall, next_call);
+ phi_cnt = sched_call(block, phi_cnt, worklist, ready_cnt, mcall, next_call);
continue;
}
if (n->is_Mach() && n->as_Mach()->has_call()) {
RegMask regs;
- regs.Insert(matcher.c_frame_pointer());
+ regs.Insert(_matcher.c_frame_pointer());
regs.OR(n->out_RegMask());
- MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
- cfg->_bbs.map(proj->_idx,this);
- _nodes.insert(phi_cnt++, proj);
+ MachProjNode *proj = new (C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
+ map_node_to_block(proj, block);
+ block->insert_node(proj, phi_cnt++);
- add_call_kills(proj, regs, matcher._c_reg_save_policy, false);
+ add_call_kills(proj, regs, _matcher._c_reg_save_policy, false);
}
// Children are now all ready
for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
Node* m = n->fast_out(i5); // Get user
- if( cfg->_bbs[m->_idx] != this ) continue;
+ if (get_block_for_node(m) != block) {
+ continue;
+ }
if( m->is_Phi() ) continue;
if (m->_idx >= max_idx) { // new node, skip it
assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
@@ -871,9 +880,8 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
}
}
- if( phi_cnt != end_idx() ) {
+ if( phi_cnt != block->end_idx() ) {
// did not schedule all. Retry, Bailout, or Die
- Compile* C = matcher.C;
if (C->subsume_loads() == true && !C->failing()) {
// Retry with subsume_loads == false
// If this is the first failure, the sentinel string will "stick"
@@ -885,12 +893,12 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
}
#ifndef PRODUCT
- if (cfg->trace_opto_pipelining()) {
+ if (trace_opto_pipelining()) {
tty->print_cr("#");
tty->print_cr("# after schedule_local");
- for (uint i = 0;i < _nodes.size();i++) {
+ for (uint i = 0;i < block->number_of_nodes();i++) {
tty->print("# ");
- _nodes[i]->fast_dump();
+ block->get_node(i)->fast_dump();
}
tty->cr();
}
@@ -916,7 +924,7 @@ static void catch_cleanup_fix_all_inputs(Node *use, Node *old_def, Node *new_def
}
//------------------------------catch_cleanup_find_cloned_def------------------
-static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) {
+Node* PhaseCFG::catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
assert( use_blk != def_blk, "Inter-block cleanup only");
// The use is some block below the Catch. Find and return the clone of the def
@@ -942,13 +950,14 @@ static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def
// PhiNode, the PhiNode uses from the def and IT's uses need fixup.
Node_Array inputs = new Node_List(Thread::current()->resource_area());
for(uint k = 1; k < use_blk->num_preds(); k++) {
- inputs.map(k, catch_cleanup_find_cloned_def(bbs[use_blk->pred(k)->_idx], def, def_blk, bbs, n_clone_idx));
+ Block* block = get_block_for_node(use_blk->pred(k));
+ inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, n_clone_idx));
}
// Check to see if the use_blk already has an identical phi inserted.
// If it exists, it will be at the first position since all uses of a
// def are processed together.
- Node *phi = use_blk->_nodes[1];
+ Node *phi = use_blk->get_node(1);
if( phi->is_Phi() ) {
fixup = phi;
for (uint k = 1; k < use_blk->num_preds(); k++) {
@@ -963,8 +972,8 @@ static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def
// If an existing PhiNode was not found, make a new one.
if (fixup == NULL) {
Node *new_phi = PhiNode::make(use_blk->head(), def);
- use_blk->_nodes.insert(1, new_phi);
- bbs.map(new_phi->_idx, use_blk);
+ use_blk->insert_node(new_phi, 1);
+ map_node_to_block(new_phi, use_blk);
for (uint k = 1; k < use_blk->num_preds(); k++) {
new_phi->set_req(k, inputs[k]);
}
@@ -973,7 +982,7 @@ static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def
} else {
// Found the use just below the Catch. Make it use the clone.
- fixup = use_blk->_nodes[n_clone_idx];
+ fixup = use_blk->get_node(n_clone_idx);
}
return fixup;
@@ -993,36 +1002,36 @@ static void catch_cleanup_intra_block(Node *use, Node *def, Block *blk, int beg,
for( uint k = 0; k < blk->_num_succs; k++ ) {
// Get clone in each successor block
Block *sb = blk->_succs[k];
- Node *clone = sb->_nodes[offset_idx+1];
+ Node *clone = sb->get_node(offset_idx+1);
assert( clone->Opcode() == use->Opcode(), "" );
// Make use-clone reference the def-clone
- catch_cleanup_fix_all_inputs(clone, def, sb->_nodes[n_clone_idx]);
+ catch_cleanup_fix_all_inputs(clone, def, sb->get_node(n_clone_idx));
}
}
//------------------------------catch_cleanup_inter_block---------------------
// Fix all input edges in use that reference "def". The use is in a different
// block than the def.
-static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) {
+void PhaseCFG::catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
if( !use_blk ) return; // Can happen if the use is a precedence edge
- Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, bbs, n_clone_idx);
+ Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, n_clone_idx);
catch_cleanup_fix_all_inputs(use, def, new_def);
}
//------------------------------call_catch_cleanup-----------------------------
// If we inserted any instructions between a Call and his CatchNode,
// clone the instructions on all paths below the Catch.
-void Block::call_catch_cleanup(Block_Array &bbs, Compile* C) {
+void PhaseCFG::call_catch_cleanup(Block* block) {
// End of region to clone
- uint end = end_idx();
- if( !_nodes[end]->is_Catch() ) return;
+ uint end = block->end_idx();
+ if( !block->get_node(end)->is_Catch() ) return;
// Start of region to clone
uint beg = end;
- while(!_nodes[beg-1]->is_MachProj() ||
- !_nodes[beg-1]->in(0)->is_MachCall() ) {
+ while(!block->get_node(beg-1)->is_MachProj() ||
+ !block->get_node(beg-1)->in(0)->is_MachCall() ) {
beg--;
assert(beg > 0,"Catch cleanup walking beyond block boundary");
}
@@ -1031,15 +1040,15 @@ void Block::call_catch_cleanup(Block_Array &bbs, Compile* C) {
// Clone along all Catch output paths. Clone area between the 'beg' and
// 'end' indices.
- for( uint i = 0; i < _num_succs; i++ ) {
- Block *sb = _succs[i];
+ for( uint i = 0; i < block->_num_succs; i++ ) {
+ Block *sb = block->_succs[i];
// Clone the entire area; ignoring the edge fixup for now.
for( uint j = end; j > beg; j-- ) {
// It is safe here to clone a node with anti_dependence
// since clones dominate on each path.
- Node *clone = _nodes[j-1]->clone();
- sb->_nodes.insert( 1, clone );
- bbs.map(clone->_idx,sb);
+ Node *clone = block->get_node(j-1)->clone();
+ sb->insert_node(clone, 1);
+ map_node_to_block(clone, sb);
}
}
@@ -1047,7 +1056,7 @@ void Block::call_catch_cleanup(Block_Array &bbs, Compile* C) {
// Fixup edges. Check the def-use info per cloned Node
for(uint i2 = beg; i2 < end; i2++ ) {
uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block
- Node *n = _nodes[i2]; // Node that got cloned
+ Node *n = block->get_node(i2); // Node that got cloned
// Need DU safe iterator because of edge manipulation in calls.
Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area());
for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) {
@@ -1056,18 +1065,19 @@ void Block::call_catch_cleanup(Block_Array &bbs, Compile* C) {
uint max = out->size();
for (uint j = 0; j < max; j++) {// For all users
Node *use = out->pop();
- Block *buse = bbs[use->_idx];
+ Block *buse = get_block_for_node(use);
if( use->is_Phi() ) {
for( uint k = 1; k < use->req(); k++ )
if( use->in(k) == n ) {
- Node *fixup = catch_cleanup_find_cloned_def(bbs[buse->pred(k)->_idx], n, this, bbs, n_clone_idx);
+ Block* b = get_block_for_node(buse->pred(k));
+ Node *fixup = catch_cleanup_find_cloned_def(b, n, block, n_clone_idx);
use->set_req(k, fixup);
}
} else {
- if (this == buse) {
- catch_cleanup_intra_block(use, n, this, beg, n_clone_idx);
+ if (block == buse) {
+ catch_cleanup_intra_block(use, n, block, beg, n_clone_idx);
} else {
- catch_cleanup_inter_block(use, buse, n, this, bbs, n_clone_idx);
+ catch_cleanup_inter_block(use, buse, n, block, n_clone_idx);
}
}
} // End for all users
@@ -1076,30 +1086,30 @@ void Block::call_catch_cleanup(Block_Array &bbs, Compile* C) {
// Remove the now-dead cloned ops
for(uint i3 = beg; i3 < end; i3++ ) {
- _nodes[beg]->disconnect_inputs(NULL, C);
- _nodes.remove(beg);
+ block->get_node(beg)->disconnect_inputs(NULL, C);
+ block->remove_node(beg);
}
// If the successor blocks have a CreateEx node, move it back to the top
- for(uint i4 = 0; i4 < _num_succs; i4++ ) {
- Block *sb = _succs[i4];
+ for(uint i4 = 0; i4 < block->_num_succs; i4++ ) {
+ Block *sb = block->_succs[i4];
uint new_cnt = end - beg;
// Remove any newly created, but dead, nodes.
for( uint j = new_cnt; j > 0; j-- ) {
- Node *n = sb->_nodes[j];
+ Node *n = sb->get_node(j);
if (n->outcnt() == 0 &&
(!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){
n->disconnect_inputs(NULL, C);
- sb->_nodes.remove(j);
+ sb->remove_node(j);
new_cnt--;
}
}
// If any newly created nodes remain, move the CreateEx node to the top
if (new_cnt > 0) {
- Node *cex = sb->_nodes[1+new_cnt];
+ Node *cex = sb->get_node(1+new_cnt);
if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
- sb->_nodes.remove(1+new_cnt);
- sb->_nodes.insert(1,cex);
+ sb->remove_node(1+new_cnt);
+ sb->insert_node(cex, 1);
}
}
}
diff --git a/src/share/vm/opto/library_call.cpp b/src/share/vm/opto/library_call.cpp
index 1f4b58ebb..902ed5919 100644
--- a/src/share/vm/opto/library_call.cpp
+++ b/src/share/vm/opto/library_call.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,7 @@
#include "opto/subnode.hpp"
#include "prims/nativeLookup.hpp"
#include "runtime/sharedRuntime.hpp"
+#include "trace/traceMacros.hpp"
class LibraryIntrinsic : public InlineCallGenerator {
// Extend the set of intrinsics known to the runtime:
@@ -212,6 +213,7 @@ class LibraryCallKit : public GraphKit {
void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
+ static bool klass_needs_init_guard(Node* kls);
bool inline_unsafe_allocate();
bool inline_unsafe_copyMemory();
bool inline_native_currentThread();
@@ -290,6 +292,9 @@ class LibraryCallKit : public GraphKit {
Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
bool inline_encodeISOArray();
+ bool inline_updateCRC32();
+ bool inline_updateBytesCRC32();
+ bool inline_updateByteBufferCRC32();
};
@@ -487,6 +492,12 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
is_predicted = true;
break;
+ case vmIntrinsics::_updateCRC32:
+ case vmIntrinsics::_updateBytesCRC32:
+ case vmIntrinsics::_updateByteBufferCRC32:
+ if (!UseCRC32Intrinsics) return NULL;
+ break;
+
default:
assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
@@ -532,7 +543,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
Compile* C = kit.C;
int nodes = C->unique();
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
char buf[1000];
const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
tty->print_cr("Intrinsic %s", str);
@@ -543,7 +554,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
// Try to inline the intrinsic.
if (kit.try_to_inline()) {
- if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+ if (C->print_intrinsics() || C->print_inlining()) {
C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
}
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@@ -559,7 +570,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
}
// The intrinsic bailed out
- if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+ if (C->print_intrinsics() || C->print_inlining()) {
if (jvms->has_method()) {
// Not a root compile.
const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
@@ -581,7 +592,7 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
int nodes = C->unique();
#ifndef PRODUCT
assert(is_predicted(), "sanity");
- if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
char buf[1000];
const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
tty->print_cr("Predicate for intrinsic %s", str);
@@ -592,7 +603,7 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
Node* slow_ctl = kit.try_to_predicate();
if (!kit.failing()) {
- if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+ if (C->print_intrinsics() || C->print_inlining()) {
C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
}
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@@ -606,7 +617,7 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
}
// The intrinsic bailed out
- if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+ if (C->print_intrinsics() || C->print_inlining()) {
if (jvms->has_method()) {
// Not a root compile.
const char* msg = "failed to generate predicate for intrinsic";
@@ -806,6 +817,13 @@ bool LibraryCallKit::try_to_inline() {
case vmIntrinsics::_encodeISOArray:
return inline_encodeISOArray();
+ case vmIntrinsics::_updateCRC32:
+ return inline_updateCRC32();
+ case vmIntrinsics::_updateBytesCRC32:
+ return inline_updateBytesCRC32();
+ case vmIntrinsics::_updateByteBufferCRC32:
+ return inline_updateByteBufferCRC32();
+
default:
// If you get here, it may be that someone has added a new intrinsic
// to the list in vmSymbols.hpp without implementing it here.
@@ -883,7 +901,7 @@ Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_
IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
- Node* if_slow = _gvn.transform( new (C) IfTrueNode(iff) );
+ Node* if_slow = _gvn.transform(new (C) IfTrueNode(iff));
if (if_slow == top()) {
// The slow branch is never taken. No need to build this guard.
return NULL;
@@ -892,7 +910,7 @@ Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_
if (region != NULL)
region->add_req(if_slow);
- Node* if_fast = _gvn.transform( new (C) IfFalseNode(iff) );
+ Node* if_fast = _gvn.transform(new (C) IfFalseNode(iff));
set_control(if_fast);
return if_slow;
@@ -911,8 +929,8 @@ inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* re
return NULL; // already stopped
if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
return NULL; // index is already adequately typed
- Node* cmp_lt = _gvn.transform( new (C) CmpINode(index, intcon(0)) );
- Node* bol_lt = _gvn.transform( new (C) BoolNode(cmp_lt, BoolTest::lt) );
+ Node* cmp_lt = _gvn.transform(new (C) CmpINode(index, intcon(0)));
+ Node* bol_lt = _gvn.transform(new (C) BoolNode(cmp_lt, BoolTest::lt));
Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
if (is_neg != NULL && pos_index != NULL) {
// Emulate effect of Parse::adjust_map_after_if.
@@ -929,9 +947,9 @@ inline Node* LibraryCallKit::generate_nonpositive_guard(Node* index, bool never_
return NULL; // already stopped
if (_gvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
return NULL; // index is already adequately typed
- Node* cmp_le = _gvn.transform( new (C) CmpINode(index, intcon(0)) );
+ Node* cmp_le = _gvn.transform(new (C) CmpINode(index, intcon(0)));
BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
- Node* bol_le = _gvn.transform( new (C) BoolNode(cmp_le, le_or_eq) );
+ Node* bol_le = _gvn.transform(new (C) BoolNode(cmp_le, le_or_eq));
Node* is_notp = generate_guard(bol_le, NULL, PROB_MIN);
if (is_notp != NULL && pos_index != NULL) {
// Emulate effect of Parse::adjust_map_after_if.
@@ -967,9 +985,9 @@ inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
return NULL; // common case of whole-array copy
Node* last = subseq_length;
if (!zero_offset) // last += offset
- last = _gvn.transform( new (C) AddINode(last, offset));
- Node* cmp_lt = _gvn.transform( new (C) CmpUNode(array_length, last) );
- Node* bol_lt = _gvn.transform( new (C) BoolNode(cmp_lt, BoolTest::lt) );
+ last = _gvn.transform(new (C) AddINode(last, offset));
+ Node* cmp_lt = _gvn.transform(new (C) CmpUNode(array_length, last));
+ Node* bol_lt = _gvn.transform(new (C) BoolNode(cmp_lt, BoolTest::lt));
Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
return is_over;
}
@@ -1150,8 +1168,8 @@ bool LibraryCallKit::inline_string_equals() {
Node* argument_cnt = load_String_length(no_ctrl, argument);
// Check for receiver count != argument count
- Node* cmp = _gvn.transform( new(C) CmpINode(receiver_cnt, argument_cnt) );
- Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::ne) );
+ Node* cmp = _gvn.transform(new(C) CmpINode(receiver_cnt, argument_cnt));
+ Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::ne));
Node* if_ne = generate_slow_guard(bol, NULL);
if (if_ne != NULL) {
phi->init_req(4, intcon(0));
@@ -1257,11 +1275,16 @@ Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_ar
Node* sourceOffset = load_String_offset(no_ctrl, string_object);
Node* sourceCount = load_String_length(no_ctrl, string_object);
- Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)) );
+ Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)));
jint target_length = target_array->length();
const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
+ // String.value field is known to be @Stable.
+ if (UseImplicitStableValues) {
+ target = cast_array_to_stable(target, target_type);
+ }
+
IdealKit kit(this, false, true);
#define __ kit.
Node* zero = __ ConI(0);
@@ -1364,8 +1387,8 @@ bool LibraryCallKit::inline_string_indexOf() {
Node* substr_cnt = load_String_length(no_ctrl, arg);
// Check for substr count > string count
- Node* cmp = _gvn.transform( new(C) CmpINode(substr_cnt, source_cnt) );
- Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::gt) );
+ Node* cmp = _gvn.transform(new(C) CmpINode(substr_cnt, source_cnt));
+ Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::gt));
Node* if_gt = generate_slow_guard(bol, NULL);
if (if_gt != NULL) {
result_phi->init_req(2, intcon(-1));
@@ -1374,8 +1397,8 @@ bool LibraryCallKit::inline_string_indexOf() {
if (!stopped()) {
// Check for substr count == 0
- cmp = _gvn.transform( new(C) CmpINode(substr_cnt, intcon(0)) );
- bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) );
+ cmp = _gvn.transform(new(C) CmpINode(substr_cnt, intcon(0)));
+ bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::eq));
Node* if_zero = generate_slow_guard(bol, NULL);
if (if_zero != NULL) {
result_phi->init_req(3, intcon(0));
@@ -1551,7 +1574,7 @@ bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) {
// Check PI/4 : abs(arg)
Node *cmp = _gvn.transform(new (C) CmpDNode(pi4,abs));
// Check: If PI/4 < abs(arg) then go slow
- Node *bol = _gvn.transform( new (C) BoolNode( cmp, BoolTest::lt ) );
+ Node *bol = _gvn.transform(new (C) BoolNode( cmp, BoolTest::lt ));
// Branch either way
IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
set_control(opt_iff(r,iff));
@@ -1616,8 +1639,8 @@ void LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFu
// to the runtime to properly handle corner cases
IfNode* iff = create_and_xform_if(control(), bolisnum, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
- Node* if_slow = _gvn.transform( new (C) IfFalseNode(iff) );
- Node* if_fast = _gvn.transform( new (C) IfTrueNode(iff) );
+ Node* if_slow = _gvn.transform(new (C) IfFalseNode(iff));
+ Node* if_fast = _gvn.transform(new (C) IfTrueNode(iff));
if (!if_slow->is_top()) {
RegionNode* result_region = new (C) RegionNode(3);
@@ -1703,42 +1726,42 @@ bool LibraryCallKit::inline_pow() {
// Check x:0
Node *cmp = _gvn.transform(new (C) CmpDNode(x, zeronode));
// Check: If (x<=0) then go complex path
- Node *bol1 = _gvn.transform( new (C) BoolNode( cmp, BoolTest::le ) );
+ Node *bol1 = _gvn.transform(new (C) BoolNode( cmp, BoolTest::le ));
// Branch either way
IfNode *if1 = create_and_xform_if(control(),bol1, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
// Fast path taken; set region slot 3
- Node *fast_taken = _gvn.transform( new (C) IfFalseNode(if1) );
+ Node *fast_taken = _gvn.transform(new (C) IfFalseNode(if1));
r->init_req(3,fast_taken); // Capture fast-control
// Fast path not-taken, i.e. slow path
- Node *complex_path = _gvn.transform( new (C) IfTrueNode(if1) );
+ Node *complex_path = _gvn.transform(new (C) IfTrueNode(if1));
// Set fast path result
- Node *fast_result = _gvn.transform( new (C) PowDNode(C, control(), x, y) );
+ Node *fast_result = _gvn.transform(new (C) PowDNode(C, control(), x, y));
phi->init_req(3, fast_result);
// Complex path
// Build the second if node (if y is long)
// Node for (long)y
- Node *longy = _gvn.transform( new (C) ConvD2LNode(y));
+ Node *longy = _gvn.transform(new (C) ConvD2LNode(y));
// Node for (double)((long) y)
- Node *doublelongy= _gvn.transform( new (C) ConvL2DNode(longy));
+ Node *doublelongy= _gvn.transform(new (C) ConvL2DNode(longy));
// Check (double)((long) y) : y
Node *cmplongy= _gvn.transform(new (C) CmpDNode(doublelongy, y));
// Check if (y isn't long) then go to slow path
- Node *bol2 = _gvn.transform( new (C) BoolNode( cmplongy, BoolTest::ne ) );
+ Node *bol2 = _gvn.transform(new (C) BoolNode( cmplongy, BoolTest::ne ));
// Branch either way
IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
- Node* ylong_path = _gvn.transform( new (C) IfFalseNode(if2));
+ Node* ylong_path = _gvn.transform(new (C) IfFalseNode(if2));
- Node *slow_path = _gvn.transform( new (C) IfTrueNode(if2) );
+ Node *slow_path = _gvn.transform(new (C) IfTrueNode(if2));
// Calculate DPow(abs(x), y)*(1 & (long)y)
// Node for constant 1
Node *conone = longcon(1);
// 1& (long)y
- Node *signnode= _gvn.transform( new (C) AndLNode(conone, longy) );
+ Node *signnode= _gvn.transform(new (C) AndLNode(conone, longy));
// A huge number is always even. Detect a huge number by checking
// if y + 1 == y and set integer to be tested for parity to 0.
@@ -1746,9 +1769,9 @@ bool LibraryCallKit::inline_pow() {
// (long)9.223372036854776E18 = max_jlong
// (double)(long)9.223372036854776E18 = 9.223372036854776E18
// max_jlong is odd but 9.223372036854776E18 is even
- Node* yplus1 = _gvn.transform( new (C) AddDNode(y, makecon(TypeD::make(1))));
+ Node* yplus1 = _gvn.transform(new (C) AddDNode(y, makecon(TypeD::make(1))));
Node *cmpyplus1= _gvn.transform(new (C) CmpDNode(yplus1, y));
- Node *bolyplus1 = _gvn.transform( new (C) BoolNode( cmpyplus1, BoolTest::eq ) );
+ Node *bolyplus1 = _gvn.transform(new (C) BoolNode( cmpyplus1, BoolTest::eq ));
Node* correctedsign = NULL;
if (ConditionalMoveLimit != 0) {
correctedsign = _gvn.transform( CMoveNode::make(C, NULL, bolyplus1, signnode, longcon(0), TypeLong::LONG));
@@ -1756,8 +1779,8 @@ bool LibraryCallKit::inline_pow() {
IfNode *ifyplus1 = create_and_xform_if(ylong_path,bolyplus1, PROB_FAIR, COUNT_UNKNOWN);
RegionNode *r = new (C) RegionNode(3);
Node *phi = new (C) PhiNode(r, TypeLong::LONG);
- r->init_req(1, _gvn.transform( new (C) IfFalseNode(ifyplus1)));
- r->init_req(2, _gvn.transform( new (C) IfTrueNode(ifyplus1)));
+ r->init_req(1, _gvn.transform(new (C) IfFalseNode(ifyplus1)));
+ r->init_req(2, _gvn.transform(new (C) IfTrueNode(ifyplus1)));
phi->init_req(1, signnode);
phi->init_req(2, longcon(0));
correctedsign = _gvn.transform(phi);
@@ -1770,11 +1793,11 @@ bool LibraryCallKit::inline_pow() {
// Check (1&(long)y)==0?
Node *cmpeq1 = _gvn.transform(new (C) CmpLNode(correctedsign, conzero));
// Check if (1&(long)y)!=0?, if so the result is negative
- Node *bol3 = _gvn.transform( new (C) BoolNode( cmpeq1, BoolTest::ne ) );
+ Node *bol3 = _gvn.transform(new (C) BoolNode( cmpeq1, BoolTest::ne ));
// abs(x)
- Node *absx=_gvn.transform( new (C) AbsDNode(x));
+ Node *absx=_gvn.transform(new (C) AbsDNode(x));
// abs(x)^y
- Node *absxpowy = _gvn.transform( new (C) PowDNode(C, control(), absx, y) );
+ Node *absxpowy = _gvn.transform(new (C) PowDNode(C, control(), absx, y));
// -abs(x)^y
Node *negabsxpowy = _gvn.transform(new (C) NegDNode (absxpowy));
// (1&(long)y)==1?-DPow(abs(x), y):DPow(abs(x), y)
@@ -1785,8 +1808,8 @@ bool LibraryCallKit::inline_pow() {
IfNode *ifyeven = create_and_xform_if(ylong_path,bol3, PROB_FAIR, COUNT_UNKNOWN);
RegionNode *r = new (C) RegionNode(3);
Node *phi = new (C) PhiNode(r, Type::DOUBLE);
- r->init_req(1, _gvn.transform( new (C) IfFalseNode(ifyeven)));
- r->init_req(2, _gvn.transform( new (C) IfTrueNode(ifyeven)));
+ r->init_req(1, _gvn.transform(new (C) IfFalseNode(ifyeven)));
+ r->init_req(2, _gvn.transform(new (C) IfTrueNode(ifyeven)));
phi->init_req(1, absxpowy);
phi->init_req(2, negabsxpowy);
signresult = _gvn.transform(phi);
@@ -1919,7 +1942,7 @@ LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
int cmp_op = Op_CmpI;
Node* xkey = xvalue;
Node* ykey = yvalue;
- Node* ideal_cmpxy = _gvn.transform( new(C) CmpINode(xkey, ykey) );
+ Node* ideal_cmpxy = _gvn.transform(new(C) CmpINode(xkey, ykey));
if (ideal_cmpxy->is_Cmp()) {
// E.g., if we have CmpI(length - offset, count),
// it might idealize to CmpI(length, count + offset)
@@ -2012,7 +2035,7 @@ LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
default:
if (cmpxy == NULL)
cmpxy = ideal_cmpxy;
- best_bol = _gvn.transform( new(C) BoolNode(cmpxy, BoolTest::lt) );
+ best_bol = _gvn.transform(new(C) BoolNode(cmpxy, BoolTest::lt));
// and fall through:
case BoolTest::lt: // x < y
case BoolTest::le: // x <= y
@@ -2072,7 +2095,7 @@ LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset) {
return Type::AnyPtr;
} else if (base_type == TypePtr::NULL_PTR) {
// Since this is a NULL+long form, we have to switch to a rawptr.
- base = _gvn.transform( new (C) CastX2PNode(offset) );
+ base = _gvn.transform(new (C) CastX2PNode(offset));
offset = MakeConX(0);
return Type::RawPtr;
} else if (base_type->base() == Type::RawPtr) {
@@ -2276,7 +2299,7 @@ const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_
const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
#ifndef PRODUCT
- if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
+ if (C->print_intrinsics() || C->print_inlining()) {
tty->print(" from base type: "); adr_type->dump();
tty->print(" sharpened value: "); tjp->dump();
}
@@ -2466,7 +2489,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
case T_ADDRESS:
// Repackage the long as a pointer.
val = ConvL2X(val);
- val = _gvn.transform( new (C) CastX2PNode(val) );
+ val = _gvn.transform(new (C) CastX2PNode(val));
break;
}
@@ -2738,10 +2761,28 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
newval = _gvn.makecon(TypePtr::NULL_PTR);
// Reference stores need a store barrier.
- pre_barrier(true /* do_load*/,
- control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
- NULL /* pre_val*/,
- T_OBJECT);
+ if (kind == LS_xchg) {
+ // If pre-barrier must execute before the oop store, old value will require do_load here.
+ if (!can_move_pre_barrier()) {
+ pre_barrier(true /* do_load*/,
+ control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
+ NULL /* pre_val*/,
+ T_OBJECT);
+ } // Else move pre_barrier to use load_store value, see below.
+ } else if (kind == LS_cmpxchg) {
+ // Same as for newval above:
+ if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
+ oldval = _gvn.makecon(TypePtr::NULL_PTR);
+ }
+ // The only known value which might get overwritten is oldval.
+ pre_barrier(false /* do_load */,
+ control(), NULL, NULL, max_juint, NULL, NULL,
+ oldval /* pre_val */,
+ T_OBJECT);
+ } else {
+ ShouldNotReachHere();
+ }
+
#ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
@@ -2774,19 +2815,30 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
// SCMemProjNodes represent the memory state of a LoadStore. Their
// main role is to prevent LoadStore nodes from being optimized away
// when their results aren't used.
- Node* proj = _gvn.transform( new (C) SCMemProjNode(load_store));
+ Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
set_memory(proj, alias_idx);
+ if (type == T_OBJECT && kind == LS_xchg) {
+#ifdef _LP64
+ if (adr->bottom_type()->is_ptr_to_narrowoop()) {
+ load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
+ }
+#endif
+ if (can_move_pre_barrier()) {
+ // Don't need to load pre_val. The old value is returned by load_store.
+ // The pre_barrier can execute after the xchg as long as no safepoint
+ // gets inserted between them.
+ pre_barrier(false /* do_load */,
+ control(), NULL, NULL, max_juint, NULL, NULL,
+ load_store /* pre_val */,
+ T_OBJECT);
+ }
+ }
+
// Add the trailing membar surrounding the access
insert_mem_bar(Op_MemBarCPUOrder);
insert_mem_bar(Op_MemBarAcquire);
-#ifdef _LP64
- if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) {
- load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->bottom_type()->make_ptr()));
- }
-#endif
-
assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
set_result(load_store);
return true;
@@ -2875,8 +2927,21 @@ bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
}
}
+bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
+ if (!kls->is_Con()) {
+ return true;
+ }
+ const TypeKlassPtr* klsptr = kls->bottom_type()->isa_klassptr();
+ if (klsptr == NULL) {
+ return true;
+ }
+ ciInstanceKlass* ik = klsptr->klass()->as_instance_klass();
+ // don't need a guard for a klass that is already initialized
+ return !ik->is_initialized();
+}
+
//----------------------------inline_unsafe_allocate---------------------------
-// public native Object sun.mics.Unsafe.allocateInstance(Class<?> cls);
+// public native Object sun.misc.Unsafe.allocateInstance(Class<?> cls);
bool LibraryCallKit::inline_unsafe_allocate() {
if (callee()->is_static()) return false; // caller must have the capability!
@@ -2888,16 +2953,19 @@ bool LibraryCallKit::inline_unsafe_allocate() {
kls = null_check(kls);
if (stopped()) return true; // argument was like int.class
- // Note: The argument might still be an illegal value like
- // Serializable.class or Object[].class. The runtime will handle it.
- // But we must make an explicit check for initialization.
- Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
- // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
- // can generate code to load it as unsigned byte.
- Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN);
- Node* bits = intcon(InstanceKlass::fully_initialized);
- Node* test = _gvn.transform(new (C) SubINode(inst, bits));
- // The 'test' is non-zero if we need to take a slow path.
+ Node* test = NULL;
+ if (LibraryCallKit::klass_needs_init_guard(kls)) {
+ // Note: The argument might still be an illegal value like
+ // Serializable.class or Object[].class. The runtime will handle it.
+ // But we must make an explicit check for initialization.
+ Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
+ // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
+ // can generate code to load it as unsigned byte.
+ Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN);
+ Node* bits = intcon(InstanceKlass::fully_initialized);
+ test = _gvn.transform(new (C) SubINode(inst, bits));
+ // The 'test' is non-zero if we need to take a slow path.
+ }
Node* obj = new_instance(kls, test);
set_result(obj);
@@ -3009,8 +3077,8 @@ bool LibraryCallKit::inline_native_isInterrupted() {
Node* rec_thr = argument(0);
Node* tls_ptr = NULL;
Node* cur_thr = generate_current_thread(tls_ptr);
- Node* cmp_thr = _gvn.transform( new (C) CmpPNode(cur_thr, rec_thr) );
- Node* bol_thr = _gvn.transform( new (C) BoolNode(cmp_thr, BoolTest::ne) );
+ Node* cmp_thr = _gvn.transform(new (C) CmpPNode(cur_thr, rec_thr));
+ Node* bol_thr = _gvn.transform(new (C) BoolNode(cmp_thr, BoolTest::ne));
generate_slow_guard(bol_thr, slow_region);
@@ -3021,36 +3089,36 @@ bool LibraryCallKit::inline_native_isInterrupted() {
// Set the control input on the field _interrupted read to prevent it floating up.
Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT);
- Node* cmp_bit = _gvn.transform( new (C) CmpINode(int_bit, intcon(0)) );
- Node* bol_bit = _gvn.transform( new (C) BoolNode(cmp_bit, BoolTest::ne) );
+ Node* cmp_bit = _gvn.transform(new (C) CmpINode(int_bit, intcon(0)));
+ Node* bol_bit = _gvn.transform(new (C) BoolNode(cmp_bit, BoolTest::ne));
IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
// First fast path: if (!TLS._interrupted) return false;
- Node* false_bit = _gvn.transform( new (C) IfFalseNode(iff_bit) );
+ Node* false_bit = _gvn.transform(new (C) IfFalseNode(iff_bit));
result_rgn->init_req(no_int_result_path, false_bit);
result_val->init_req(no_int_result_path, intcon(0));
// drop through to next case
- set_control( _gvn.transform(new (C) IfTrueNode(iff_bit)) );
+ set_control( _gvn.transform(new (C) IfTrueNode(iff_bit)));
// (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path.
Node* clr_arg = argument(1);
- Node* cmp_arg = _gvn.transform( new (C) CmpINode(clr_arg, intcon(0)) );
- Node* bol_arg = _gvn.transform( new (C) BoolNode(cmp_arg, BoolTest::ne) );
+ Node* cmp_arg = _gvn.transform(new (C) CmpINode(clr_arg, intcon(0)));
+ Node* bol_arg = _gvn.transform(new (C) BoolNode(cmp_arg, BoolTest::ne));
IfNode* iff_arg = create_and_map_if(control(), bol_arg, PROB_FAIR, COUNT_UNKNOWN);
// Second fast path: ... else if (!clear_int) return true;
- Node* false_arg = _gvn.transform( new (C) IfFalseNode(iff_arg) );
+ Node* false_arg = _gvn.transform(new (C) IfFalseNode(iff_arg));
result_rgn->init_req(no_clear_result_path, false_arg);
result_val->init_req(no_clear_result_path, intcon(1));
// drop through to next case
- set_control( _gvn.transform(new (C) IfTrueNode(iff_arg)) );
+ set_control( _gvn.transform(new (C) IfTrueNode(iff_arg)));
// (d) Otherwise, go to the slow path.
slow_region->add_req(control());
- set_control( _gvn.transform(slow_region) );
+ set_control( _gvn.transform(slow_region));
if (stopped()) {
// There is no slow path.
@@ -3106,7 +3174,7 @@ Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
if (region == NULL) never_see_null = true;
Node* p = basic_plus_adr(mirror, offset);
const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
- Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type) );
+ Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
Node* null_ctl = top();
kls = null_check_oop(kls, &null_ctl, never_see_null);
if (region != NULL) {
@@ -3128,9 +3196,9 @@ Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask,
Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT);
Node* mask = intcon(modifier_mask);
Node* bits = intcon(modifier_bits);
- Node* mbit = _gvn.transform( new (C) AndINode(mods, mask) );
- Node* cmp = _gvn.transform( new (C) CmpINode(mbit, bits) );
- Node* bol = _gvn.transform( new (C) BoolNode(cmp, BoolTest::ne) );
+ Node* mbit = _gvn.transform(new (C) AndINode(mods, mask));
+ Node* cmp = _gvn.transform(new (C) CmpINode(mbit, bits));
+ Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
return generate_fair_guard(bol, region);
}
Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
@@ -3192,7 +3260,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
if (mirror_con == NULL) return false; // cannot happen?
#ifndef PRODUCT
- if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
+ if (C->print_intrinsics() || C->print_inlining()) {
ciType* k = mirror_con->java_mirror_type();
if (k) {
tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
@@ -3281,7 +3349,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
// If we fall through, it's a plain class. Get its _super.
p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
- kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL) );
+ kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
null_ctl = top();
kls = null_check_oop(kls, &null_ctl);
if (null_ctl != top()) {
@@ -3394,8 +3462,8 @@ bool LibraryCallKit::inline_native_subtype_check() {
set_control(region->in(_prim_0_path)); // go back to first null check
if (!stopped()) {
// Since superc is primitive, make a guard for the superc==subc case.
- Node* cmp_eq = _gvn.transform( new (C) CmpPNode(args[0], args[1]) );
- Node* bol_eq = _gvn.transform( new (C) BoolNode(cmp_eq, BoolTest::eq) );
+ Node* cmp_eq = _gvn.transform(new (C) CmpPNode(args[0], args[1]));
+ Node* bol_eq = _gvn.transform(new (C) BoolNode(cmp_eq, BoolTest::eq));
generate_guard(bol_eq, region, PROB_FAIR);
if (region->req() == PATH_LIMIT+1) {
// A guard was added. If the added guard is taken, superc==subc.
@@ -3460,11 +3528,11 @@ Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
? ((jint)Klass::_lh_array_tag_type_value
<< Klass::_lh_array_tag_shift)
: Klass::_lh_neutral_value);
- Node* cmp = _gvn.transform( new(C) CmpINode(layout_val, intcon(nval)) );
+ Node* cmp = _gvn.transform(new(C) CmpINode(layout_val, intcon(nval)));
BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array
// invert the test if we are looking for a non-array
if (not_array) btest = BoolTest(btest).negate();
- Node* bol = _gvn.transform( new(C) BoolNode(cmp, btest) );
+ Node* bol = _gvn.transform(new(C) BoolNode(cmp, btest));
return generate_fair_guard(bol, region);
}
@@ -3524,7 +3592,7 @@ bool LibraryCallKit::inline_native_newArray() {
// Return the combined state.
set_i_o( _gvn.transform(result_io) );
- set_all_memory( _gvn.transform(result_mem) );
+ set_all_memory( _gvn.transform(result_mem));
C->set_has_split_ifs(true); // Has chance for split-if optimization
set_result(result_reg, result_val);
@@ -3666,6 +3734,8 @@ Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
RegionNode* slow_region) {
ciMethod* method = callee();
int vtable_index = method->vtable_index();
+ assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
+ err_msg_res("bad index %d", vtable_index));
// Get the Method* out of the appropriate vtable entry.
int entry_offset = (InstanceKlass::vtable_start_offset() +
vtable_index*vtableEntry::size()) * wordSize +
@@ -3677,8 +3747,8 @@ Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
Node* native_call = makecon(native_call_addr);
- Node* chk_native = _gvn.transform( new(C) CmpPNode(target_call, native_call) );
- Node* test_native = _gvn.transform( new(C) BoolNode(chk_native, BoolTest::ne) );
+ Node* chk_native = _gvn.transform(new(C) CmpPNode(target_call, native_call));
+ Node* test_native = _gvn.transform(new(C) BoolNode(chk_native, BoolTest::ne));
return generate_slow_guard(test_native, slow_region);
}
@@ -3703,7 +3773,7 @@ LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual
CallJavaNode* slow_call;
if (is_static) {
assert(!is_virtual, "");
- slow_call = new(C) CallStaticJavaNode(tf,
+ slow_call = new(C) CallStaticJavaNode(C, tf,
SharedRuntime::get_resolve_static_call_stub(),
method, bci());
} else if (is_virtual) {
@@ -3716,13 +3786,15 @@ LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual
// so the vtable index is fixed.
// No need to use the linkResolver to get it.
vtable_index = method->vtable_index();
+ assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
+ err_msg_res("bad index %d", vtable_index));
}
slow_call = new(C) CallDynamicJavaNode(tf,
SharedRuntime::get_resolve_virtual_call_stub(),
method, vtable_index, bci());
} else { // neither virtual nor static: opt_virtual
null_check_receiver();
- slow_call = new(C) CallStaticJavaNode(tf,
+ slow_call = new(C) CallStaticJavaNode(C, tf,
SharedRuntime::get_resolve_opt_virtual_call_stub(),
method, bci());
slow_call->set_optimized_virtual(true);
@@ -3799,10 +3871,10 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
// Test the header to see if it is unlocked.
Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
- Node *lmasked_header = _gvn.transform( new (C) AndXNode(header, lock_mask) );
+ Node *lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask));
Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value);
- Node *chk_unlocked = _gvn.transform( new (C) CmpXNode( lmasked_header, unlocked_val));
- Node *test_unlocked = _gvn.transform( new (C) BoolNode( chk_unlocked, BoolTest::ne) );
+ Node *chk_unlocked = _gvn.transform(new (C) CmpXNode( lmasked_header, unlocked_val));
+ Node *test_unlocked = _gvn.transform(new (C) BoolNode( chk_unlocked, BoolTest::ne));
generate_slow_guard(test_unlocked, slow_region);
@@ -3812,17 +3884,17 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
// vm: see markOop.hpp.
Node *hash_mask = _gvn.intcon(markOopDesc::hash_mask);
Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift);
- Node *hshifted_header= _gvn.transform( new (C) URShiftXNode(header, hash_shift) );
+ Node *hshifted_header= _gvn.transform(new (C) URShiftXNode(header, hash_shift));
// This hack lets the hash bits live anywhere in the mark object now, as long
// as the shift drops the relevant bits into the low 32 bits. Note that
// Java spec says that HashCode is an int so there's no point in capturing
// an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
hshifted_header = ConvX2I(hshifted_header);
- Node *hash_val = _gvn.transform( new (C) AndINode(hshifted_header, hash_mask) );
+ Node *hash_val = _gvn.transform(new (C) AndINode(hshifted_header, hash_mask));
Node *no_hash_val = _gvn.intcon(markOopDesc::no_hash);
- Node *chk_assigned = _gvn.transform( new (C) CmpINode( hash_val, no_hash_val));
- Node *test_assigned = _gvn.transform( new (C) BoolNode( chk_assigned, BoolTest::eq) );
+ Node *chk_assigned = _gvn.transform(new (C) CmpINode( hash_val, no_hash_val));
+ Node *test_assigned = _gvn.transform(new (C) BoolNode( chk_assigned, BoolTest::eq));
generate_slow_guard(test_assigned, slow_region);
@@ -3853,7 +3925,7 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
// Return the combined state.
set_i_o( _gvn.transform(result_io) );
- set_all_memory( _gvn.transform(result_mem) );
+ set_all_memory( _gvn.transform(result_mem));
set_result(result_reg, result_val);
return true;
@@ -3880,14 +3952,14 @@ bool LibraryCallKit::inline_native_getClass() {
// caller sensitive methods.
bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
}
#endif
if (!jvms()->has_method()) {
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Bailing out because intrinsic was inlined at top level");
}
#endif
@@ -3911,7 +3983,7 @@ bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
// Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
if (!m->caller_sensitive()) {
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
}
#endif
@@ -3927,7 +3999,7 @@ bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
set_result(makecon(TypeInstPtr::make(caller_mirror)));
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
tty->print_cr(" JVM state at this point:");
for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
@@ -3943,7 +4015,7 @@ bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
}
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
tty->print_cr(" JVM state at this point:");
for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
@@ -3981,7 +4053,7 @@ bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
Node *opt_isnan = _gvn.transform(ifisnan);
assert( opt_isnan->is_If(), "Expect an IfNode");
IfNode *opt_ifisnan = (IfNode*)opt_isnan;
- Node *iftrue = _gvn.transform( new (C) IfTrueNode(opt_ifisnan) );
+ Node *iftrue = _gvn.transform(new (C) IfTrueNode(opt_ifisnan));
set_control(iftrue);
@@ -4022,7 +4094,7 @@ bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
Node *opt_isnan = _gvn.transform(ifisnan);
assert( opt_isnan->is_If(), "Expect an IfNode");
IfNode *opt_ifisnan = (IfNode*)opt_isnan;
- Node *iftrue = _gvn.transform( new (C) IfTrueNode(opt_ifisnan) );
+ Node *iftrue = _gvn.transform(new (C) IfTrueNode(opt_ifisnan));
set_control(iftrue);
@@ -4136,7 +4208,7 @@ void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, b
// 12 - 64-bit VM, compressed klass
// 16 - 64-bit VM, normal klass
if (base_off % BytesPerLong != 0) {
- assert(UseCompressedKlassPointers, "");
+ assert(UseCompressedClassPointers, "");
if (is_array) {
// Exclude length to copy by 8 bytes words.
base_off += sizeof(int);
@@ -4151,8 +4223,8 @@ void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, b
// Compute the length also, if needed:
Node* countx = size;
- countx = _gvn.transform( new (C) SubXNode(countx, MakeConX(base_off)) );
- countx = _gvn.transform( new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));
+ countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off)));
+ countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));
const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
bool disjoint_bases = true;
@@ -4356,9 +4428,9 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
}
// Return the combined state.
- set_control( _gvn.transform(result_reg) );
- set_i_o( _gvn.transform(result_i_o) );
- set_all_memory( _gvn.transform(result_mem) );
+ set_control( _gvn.transform(result_reg));
+ set_i_o( _gvn.transform(result_i_o));
+ set_all_memory( _gvn.transform(result_mem));
} // original reexecute is set back here
set_result(_gvn.transform(result_val));
@@ -4683,8 +4755,8 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
// are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length].
Node* dest_size = alloc->in(AllocateNode::AllocSize);
Node* dest_length = alloc->in(AllocateNode::ALength);
- Node* dest_tail = _gvn.transform( new(C) AddINode(dest_offset,
- copy_length) );
+ Node* dest_tail = _gvn.transform(new(C) AddINode(dest_offset,
+ copy_length));
// If there is a head section that needs zeroing, do it now.
if (find_int_con(dest_offset, -1) != 0) {
@@ -4700,8 +4772,8 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
// the copy to a more hardware-friendly word size of 64 bits.
Node* tail_ctl = NULL;
if (!stopped() && !dest_tail->eqv_uncast(dest_length)) {
- Node* cmp_lt = _gvn.transform( new(C) CmpINode(dest_tail, dest_length) );
- Node* bol_lt = _gvn.transform( new(C) BoolNode(cmp_lt, BoolTest::lt) );
+ Node* cmp_lt = _gvn.transform(new(C) CmpINode(dest_tail, dest_length));
+ Node* bol_lt = _gvn.transform(new(C) BoolNode(cmp_lt, BoolTest::lt));
tail_ctl = generate_slow_guard(bol_lt, NULL);
assert(tail_ctl != NULL || !stopped(), "must be an outcome");
}
@@ -4744,7 +4816,7 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
dest_size);
done_ctl->init_req(2, control());
done_mem->init_req(2, memory(adr_type));
- set_control( _gvn.transform(done_ctl) );
+ set_control( _gvn.transform(done_ctl));
set_memory( _gvn.transform(done_mem), adr_type );
}
}
@@ -4831,18 +4903,18 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
// Clean up after the checked call.
// The returned value is either 0 or -1^K,
// where K = number of partially transferred array elements.
- Node* cmp = _gvn.transform( new(C) CmpINode(checked_value, intcon(0)) );
- Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) );
+ Node* cmp = _gvn.transform(new(C) CmpINode(checked_value, intcon(0)));
+ Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::eq));
IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
// If it is 0, we are done, so transfer to the end.
- Node* checks_done = _gvn.transform( new(C) IfTrueNode(iff) );
+ Node* checks_done = _gvn.transform(new(C) IfTrueNode(iff));
result_region->init_req(checked_path, checks_done);
result_i_o ->init_req(checked_path, checked_i_o);
result_memory->init_req(checked_path, checked_mem);
// If it is not zero, merge into the slow call.
- set_control( _gvn.transform( new(C) IfFalseNode(iff) ));
+ set_control( _gvn.transform(new(C) IfFalseNode(iff) ));
RegionNode* slow_reg2 = new(C) RegionNode(3);
PhiNode* slow_i_o2 = new(C) PhiNode(slow_reg2, Type::ABIO);
PhiNode* slow_mem2 = new(C) PhiNode(slow_reg2, Type::MEMORY, adr_type);
@@ -4865,16 +4937,16 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
} else {
// We must continue the copy exactly where it failed, or else
// another thread might see the wrong number of writes to dest.
- Node* checked_offset = _gvn.transform( new(C) XorINode(checked_value, intcon(-1)) );
+ Node* checked_offset = _gvn.transform(new(C) XorINode(checked_value, intcon(-1)));
Node* slow_offset = new(C) PhiNode(slow_reg2, TypeInt::INT);
slow_offset->init_req(1, intcon(0));
slow_offset->init_req(2, checked_offset);
slow_offset = _gvn.transform(slow_offset);
// Adjust the arguments by the conditionally incoming offset.
- Node* src_off_plus = _gvn.transform( new(C) AddINode(src_offset, slow_offset) );
- Node* dest_off_plus = _gvn.transform( new(C) AddINode(dest_offset, slow_offset) );
- Node* length_minus = _gvn.transform( new(C) SubINode(copy_length, slow_offset) );
+ Node* src_off_plus = _gvn.transform(new(C) AddINode(src_offset, slow_offset));
+ Node* dest_off_plus = _gvn.transform(new(C) AddINode(dest_offset, slow_offset));
+ Node* length_minus = _gvn.transform(new(C) SubINode(copy_length, slow_offset));
// Tweak the node variables to adjust the code produced below:
src_offset = src_off_plus;
@@ -4913,7 +4985,7 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
}
// Finished; return the combined state.
- set_control( _gvn.transform(result_region) );
+ set_control( _gvn.transform(result_region));
set_i_o( _gvn.transform(result_i_o) );
set_memory( _gvn.transform(result_memory), adr_type );
@@ -5095,10 +5167,10 @@ LibraryCallKit::generate_clear_array(const TypePtr* adr_type,
int end_round = (-1 << scale) & (BytesPerLong - 1);
Node* end = ConvI2X(slice_len);
if (scale != 0)
- end = _gvn.transform( new(C) LShiftXNode(end, intcon(scale) ));
+ end = _gvn.transform(new(C) LShiftXNode(end, intcon(scale) ));
end_base += end_round;
- end = _gvn.transform( new(C) AddXNode(end, MakeConX(end_base)) );
- end = _gvn.transform( new(C) AndXNode(end, MakeConX(~end_round)) );
+ end = _gvn.transform(new(C) AddXNode(end, MakeConX(end_base)));
+ end = _gvn.transform(new(C) AndXNode(end, MakeConX(~end_round)));
mem = ClearArrayNode::clear_memory(control(), mem, dest,
start_con, end, &_gvn);
} else if (start_con < 0 && dest_size != top()) {
@@ -5107,8 +5179,8 @@ LibraryCallKit::generate_clear_array(const TypePtr* adr_type,
Node* start = slice_idx;
start = ConvI2X(start);
if (scale != 0)
- start = _gvn.transform( new(C) LShiftXNode( start, intcon(scale) ));
- start = _gvn.transform( new(C) AddXNode(start, MakeConX(abase)) );
+ start = _gvn.transform(new(C) LShiftXNode( start, intcon(scale) ));
+ start = _gvn.transform(new(C) AddXNode(start, MakeConX(abase)));
if ((bump_bit | clear_low) != 0) {
int to_clear = (bump_bit | clear_low);
// Align up mod 8, then store a jint zero unconditionally
@@ -5119,14 +5191,14 @@ LibraryCallKit::generate_clear_array(const TypePtr* adr_type,
assert((abase & to_clear) == 0, "array base must be long-aligned");
} else {
// Bump 'start' up to (or past) the next jint boundary:
- start = _gvn.transform( new(C) AddXNode(start, MakeConX(bump_bit)) );
+ start = _gvn.transform(new(C) AddXNode(start, MakeConX(bump_bit)));
assert((abase & clear_low) == 0, "array base must be int-aligned");
}
// Round bumped 'start' down to jlong boundary in body of array.
- start = _gvn.transform( new(C) AndXNode(start, MakeConX(~to_clear)) );
+ start = _gvn.transform(new(C) AndXNode(start, MakeConX(~to_clear)));
if (bump_bit != 0) {
// Store a zero to the immediately preceding jint:
- Node* x1 = _gvn.transform( new(C) AddXNode(start, MakeConX(-bump_bit)) );
+ Node* x1 = _gvn.transform(new(C) AddXNode(start, MakeConX(-bump_bit)));
Node* p1 = basic_plus_adr(dest, x1);
mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT);
mem = _gvn.transform(mem);
@@ -5193,8 +5265,8 @@ LibraryCallKit::generate_block_arraycopy(const TypePtr* adr_type,
Node* sptr = basic_plus_adr(src, src_off);
Node* dptr = basic_plus_adr(dest, dest_off);
Node* countx = dest_size;
- countx = _gvn.transform( new (C) SubXNode(countx, MakeConX(dest_off)) );
- countx = _gvn.transform( new (C) URShiftXNode(countx, intcon(LogBytesPerLong)) );
+ countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(dest_off)));
+ countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong)));
bool disjoint_bases = true; // since alloc != NULL
generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases,
@@ -5359,6 +5431,117 @@ bool LibraryCallKit::inline_encodeISOArray() {
return true;
}
+/**
+ * Calculate CRC32 for byte.
+ * int java.util.zip.CRC32.update(int crc, int b)
+ */
+bool LibraryCallKit::inline_updateCRC32() {
+ assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
+ assert(callee()->signature()->size() == 2, "update has 2 parameters");
+ // no receiver since it is static method
+ Node* crc = argument(0); // type: int
+ Node* b = argument(1); // type: int
+
+ /*
+ * int c = ~ crc;
+ * b = timesXtoThe32[(b ^ c) & 0xFF];
+ * b = b ^ (c >>> 8);
+ * crc = ~b;
+ */
+
+ Node* M1 = intcon(-1);
+ crc = _gvn.transform(new (C) XorINode(crc, M1));
+ Node* result = _gvn.transform(new (C) XorINode(crc, b));
+ result = _gvn.transform(new (C) AndINode(result, intcon(0xFF)));
+
+ Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr()));
+ Node* offset = _gvn.transform(new (C) LShiftINode(result, intcon(0x2)));
+ Node* adr = basic_plus_adr(top(), base, ConvI2X(offset));
+ result = make_load(control(), adr, TypeInt::INT, T_INT);
+
+ crc = _gvn.transform(new (C) URShiftINode(crc, intcon(8)));
+ result = _gvn.transform(new (C) XorINode(crc, result));
+ result = _gvn.transform(new (C) XorINode(result, M1));
+ set_result(result);
+ return true;
+}
+
+/**
+ * Calculate CRC32 for byte[] array.
+ * int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len)
+ */
+bool LibraryCallKit::inline_updateBytesCRC32() {
+ assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
+ assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
+ // no receiver since it is static method
+ Node* crc = argument(0); // type: int
+ Node* src = argument(1); // type: oop
+ Node* offset = argument(2); // type: int
+ Node* length = argument(3); // type: int
+
+ const Type* src_type = src->Value(&_gvn);
+ const TypeAryPtr* top_src = src_type->isa_aryptr();
+ if (top_src == NULL || top_src->klass() == NULL) {
+ // failed array check
+ return false;
+ }
+
+ // Figure out the size and type of the elements we will be copying.
+ BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
+ if (src_elem != T_BYTE) {
+ return false;
+ }
+
+ // 'src_start' points to src array + scaled offset
+ Node* src_start = array_element_address(src, offset, src_elem);
+
+ // We assume that range check is done by caller.
+ // TODO: generate range check (offset+length < src.length) in debug VM.
+
+ // Call the stub.
+ address stubAddr = StubRoutines::updateBytesCRC32();
+ const char *stubName = "updateBytesCRC32";
+
+ Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
+ stubAddr, stubName, TypePtr::BOTTOM,
+ crc, src_start, length);
+ Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
+ set_result(result);
+ return true;
+}
+
+/**
+ * Calculate CRC32 for ByteBuffer.
+ * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
+ */
+bool LibraryCallKit::inline_updateByteBufferCRC32() {
+ assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
+ assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
+ // no receiver since it is static method
+ Node* crc = argument(0); // type: int
+ Node* src = argument(1); // type: long
+ Node* offset = argument(3); // type: int
+ Node* length = argument(4); // type: int
+
+ src = ConvL2X(src); // adjust Java long to machine word
+ Node* base = _gvn.transform(new (C) CastX2PNode(src));
+ offset = ConvI2X(offset);
+
+ // 'src_start' points to src array + scaled offset
+ Node* src_start = basic_plus_adr(top(), base, offset);
+
+ // Call the stub.
+ address stubAddr = StubRoutines::updateBytesCRC32();
+ const char *stubName = "updateBytesCRC32";
+
+ Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
+ stubAddr, stubName, TypePtr::BOTTOM,
+ crc, src_start, length);
+ Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
+ set_result(result);
+ return true;
+}
+
//----------------------------inline_reference_get----------------------------
// public T java.lang.ref.Reference.get();
bool LibraryCallKit::inline_reference_get() {
diff --git a/src/share/vm/opto/live.cpp b/src/share/vm/opto/live.cpp
index 773dd1ea2..02bbb1b97 100644
--- a/src/share/vm/opto/live.cpp
+++ b/src/share/vm/opto/live.cpp
@@ -30,9 +30,6 @@
#include "opto/machnode.hpp"
-
-//=============================================================================
-//------------------------------PhaseLive--------------------------------------
// Compute live-in/live-out. We use a totally incremental algorithm. The LIVE
// problem is monotonic. The steady-state solution looks like this: pull a
// block from the worklist. It has a set of delta's - values which are newly
@@ -53,9 +50,9 @@ void PhaseLive::compute(uint maxlrg) {
// Init the sparse live arrays. This data is live on exit from here!
// The _live info is the live-out info.
- _live = (IndexSet*)_arena->Amalloc(sizeof(IndexSet)*_cfg._num_blocks);
+ _live = (IndexSet*)_arena->Amalloc(sizeof(IndexSet) * _cfg.number_of_blocks());
uint i;
- for( i=0; i<_cfg._num_blocks; i++ ) {
+ for (i = 0; i < _cfg.number_of_blocks(); i++) {
_live[i].initialize(_maxlrg);
}
@@ -65,14 +62,14 @@ void PhaseLive::compute(uint maxlrg) {
// Does the memory used by _defs and _deltas get reclaimed? Does it matter? TT
// Array of values defined locally in blocks
- _defs = NEW_RESOURCE_ARRAY(IndexSet,_cfg._num_blocks);
- for( i=0; i<_cfg._num_blocks; i++ ) {
+ _defs = NEW_RESOURCE_ARRAY(IndexSet,_cfg.number_of_blocks());
+ for (i = 0; i < _cfg.number_of_blocks(); i++) {
_defs[i].initialize(_maxlrg);
}
// Array of delta-set pointers, indexed by block pre_order-1.
- _deltas = NEW_RESOURCE_ARRAY(IndexSet*,_cfg._num_blocks);
- memset( _deltas, 0, sizeof(IndexSet*)* _cfg._num_blocks);
+ _deltas = NEW_RESOURCE_ARRAY(IndexSet*,_cfg.number_of_blocks());
+ memset( _deltas, 0, sizeof(IndexSet*)* _cfg.number_of_blocks());
_free_IndexSet = NULL;
@@ -80,31 +77,32 @@ void PhaseLive::compute(uint maxlrg) {
VectorSet first_pass(Thread::current()->resource_area());
// Outer loop: must compute local live-in sets and push into predecessors.
- uint iters = _cfg._num_blocks; // stat counters
- for( uint j=_cfg._num_blocks; j>0; j-- ) {
- Block *b = _cfg._blocks[j-1];
+ for (uint j = _cfg.number_of_blocks(); j > 0; j--) {
+ Block* block = _cfg.get_block(j - 1);
// Compute the local live-in set. Start with any new live-out bits.
- IndexSet *use = getset( b );
- IndexSet *def = &_defs[b->_pre_order-1];
+ IndexSet* use = getset(block);
+ IndexSet* def = &_defs[block->_pre_order-1];
DEBUG_ONLY(IndexSet *def_outside = getfreeset();)
uint i;
- for( i=b->_nodes.size(); i>1; i-- ) {
- Node *n = b->_nodes[i-1];
- if( n->is_Phi() ) break;
+ for (i = block->number_of_nodes(); i > 1; i--) {
+ Node* n = block->get_node(i-1);
+ if (n->is_Phi()) {
+ break;
+ }
- uint r = _names[n->_idx];
+ uint r = _names.at(n->_idx);
assert(!def_outside->member(r), "Use of external LRG overlaps the same LRG defined in this block");
def->insert( r );
use->remove( r );
uint cnt = n->req();
- for( uint k=1; k<cnt; k++ ) {
+ for (uint k = 1; k < cnt; k++) {
Node *nk = n->in(k);
uint nkidx = nk->_idx;
- if( _cfg._bbs[nkidx] != b ) {
- uint u = _names[nkidx];
- use->insert( u );
- DEBUG_ONLY(def_outside->insert( u );)
+ if (_cfg.get_block_for_node(nk) != block) {
+ uint u = _names.at(nkidx);
+ use->insert(u);
+ DEBUG_ONLY(def_outside->insert(u);)
}
}
}
@@ -113,39 +111,38 @@ void PhaseLive::compute(uint maxlrg) {
_free_IndexSet = def_outside; // Drop onto free list
#endif
// Remove anything defined by Phis and the block start instruction
- for( uint k=i; k>0; k-- ) {
- uint r = _names[b->_nodes[k-1]->_idx];
- def->insert( r );
- use->remove( r );
+ for (uint k = i; k > 0; k--) {
+ uint r = _names.at(block->get_node(k - 1)->_idx);
+ def->insert(r);
+ use->remove(r);
}
// Push these live-in things to predecessors
- for( uint l=1; l<b->num_preds(); l++ ) {
- Block *p = _cfg._bbs[b->pred(l)->_idx];
- add_liveout( p, use, first_pass );
+ for (uint l = 1; l < block->num_preds(); l++) {
+ Block* p = _cfg.get_block_for_node(block->pred(l));
+ add_liveout(p, use, first_pass);
// PhiNode uses go in the live-out set of prior blocks.
- for( uint k=i; k>0; k-- )
- add_liveout( p, _names[b->_nodes[k-1]->in(l)->_idx], first_pass );
+ for (uint k = i; k > 0; k--) {
+ add_liveout(p, _names.at(block->get_node(k-1)->in(l)->_idx), first_pass);
+ }
}
- freeset( b );
- first_pass.set(b->_pre_order);
+ freeset(block);
+ first_pass.set(block->_pre_order);
// Inner loop: blocks that picked up new live-out values to be propagated
- while( _worklist->size() ) {
- // !!!!!
-// #ifdef ASSERT
- iters++;
-// #endif
- Block *b = _worklist->pop();
- IndexSet *delta = getset(b);
+ while (_worklist->size()) {
+ Block* block = _worklist->pop();
+ IndexSet *delta = getset(block);
assert( delta->count(), "missing delta set" );
// Add new-live-in to predecessors live-out sets
- for( uint l=1; l<b->num_preds(); l++ )
- add_liveout( _cfg._bbs[b->pred(l)->_idx], delta, first_pass );
+ for (uint l = 1; l < block->num_preds(); l++) {
+ Block* predecessor = _cfg.get_block_for_node(block->pred(l));
+ add_liveout(predecessor, delta, first_pass);
+ }
- freeset(b);
+ freeset(block);
} // End of while-worklist-not-empty
} // End of for-all-blocks-outer-loop
@@ -153,7 +150,7 @@ void PhaseLive::compute(uint maxlrg) {
// We explicitly clear all of the IndexSets which we are about to release.
// This allows us to recycle their internal memory into IndexSet's free list.
- for( i=0; i<_cfg._num_blocks; i++ ) {
+ for (i = 0; i < _cfg.number_of_blocks(); i++) {
_defs[i].clear();
if (_deltas[i]) {
// Is this always true?
@@ -169,13 +166,11 @@ void PhaseLive::compute(uint maxlrg) {
}
-//------------------------------stats------------------------------------------
#ifndef PRODUCT
void PhaseLive::stats(uint iters) const {
}
#endif
-//------------------------------getset-----------------------------------------
// Get an IndexSet for a block. Return existing one, if any. Make a new
// empty one if a prior one does not exist.
IndexSet *PhaseLive::getset( Block *p ) {
@@ -186,7 +181,6 @@ IndexSet *PhaseLive::getset( Block *p ) {
return delta; // Return set of new live-out items
}
-//------------------------------getfreeset-------------------------------------
// Pull from free list, or allocate. Internal allocation on the returned set
// is always from thread local storage.
IndexSet *PhaseLive::getfreeset( ) {
@@ -205,7 +199,6 @@ IndexSet *PhaseLive::getfreeset( ) {
return f;
}
-//------------------------------freeset----------------------------------------
// Free an IndexSet from a block.
void PhaseLive::freeset( const Block *p ) {
IndexSet *f = _deltas[p->_pre_order-1];
@@ -214,7 +207,6 @@ void PhaseLive::freeset( const Block *p ) {
_deltas[p->_pre_order-1] = NULL;
}
-//------------------------------add_liveout------------------------------------
// Add a live-out value to a given blocks live-out set. If it is new, then
// also add it to the delta set and stick the block on the worklist.
void PhaseLive::add_liveout( Block *p, uint r, VectorSet &first_pass ) {
@@ -231,8 +223,6 @@ void PhaseLive::add_liveout( Block *p, uint r, VectorSet &first_pass ) {
}
}
-
-//------------------------------add_liveout------------------------------------
// Add a vector of live-out values to a given blocks live-out set.
void PhaseLive::add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ) {
IndexSet *live = &_live[p->_pre_order-1];
@@ -260,31 +250,31 @@ void PhaseLive::add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ) {
}
#ifndef PRODUCT
-//------------------------------dump-------------------------------------------
// Dump the live-out set for a block
void PhaseLive::dump( const Block *b ) const {
tty->print("Block %d: ",b->_pre_order);
tty->print("LiveOut: "); _live[b->_pre_order-1].dump();
- uint cnt = b->_nodes.size();
+ uint cnt = b->number_of_nodes();
for( uint i=0; i<cnt; i++ ) {
- tty->print("L%d/", _names[b->_nodes[i]->_idx] );
- b->_nodes[i]->dump();
+ tty->print("L%d/", _names.at(b->get_node(i)->_idx));
+ b->get_node(i)->dump();
}
tty->print("\n");
}
-//------------------------------verify_base_ptrs-------------------------------
// Verify that base pointers and derived pointers are still sane.
void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const {
#ifdef ASSERT
Unique_Node_List worklist(a);
- for( uint i = 0; i < _cfg._num_blocks; i++ ) {
- Block *b = _cfg._blocks[i];
- for( uint j = b->end_idx() + 1; j > 1; j-- ) {
- Node *n = b->_nodes[j-1];
- if( n->is_Phi() ) break;
+ for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
+ Block* block = _cfg.get_block(i);
+ for (uint j = block->end_idx() + 1; j > 1; j--) {
+ Node* n = block->get_node(j-1);
+ if (n->is_Phi()) {
+ break;
+ }
// Found a safepoint?
- if( n->is_MachSafePoint() ) {
+ if (n->is_MachSafePoint()) {
MachSafePointNode *sfpt = n->as_MachSafePoint();
JVMState* jvms = sfpt->jvms();
if (jvms != NULL) {
@@ -331,7 +321,7 @@ void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const {
#ifdef _LP64
UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP ||
UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN ||
- UseCompressedKlassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
+ UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
#endif
check->as_Mach()->ideal_Opcode() == Op_LoadP ||
check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) {
@@ -356,7 +346,6 @@ void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const {
#endif
}
-//------------------------------verify-------------------------------------
// Verify that graphs and base pointers are still sane.
void PhaseChaitin::verify( ResourceArea *a, bool verify_ifg ) const {
#ifdef ASSERT
diff --git a/src/share/vm/opto/live.hpp b/src/share/vm/opto/live.hpp
index c2ebe758c..e449bb3f3 100644
--- a/src/share/vm/opto/live.hpp
+++ b/src/share/vm/opto/live.hpp
@@ -40,27 +40,7 @@ class IndexSet;
//------------------------------LRG_List---------------------------------------
// Map Node indices to Live RanGe indices.
// Array lookup in the optimized case.
-class LRG_List : public ResourceObj {
- friend class VMStructs;
- uint _cnt, _max;
- uint* _lidxs;
- ReallocMark _nesting; // assertion check for reallocations
-public:
- LRG_List( uint max );
-
- uint lookup( uint nidx ) const {
- return _lidxs[nidx];
- }
- uint operator[] (uint nidx) const { return lookup(nidx); }
-
- void map( uint nidx, uint lidx ) {
- assert( nidx < _cnt, "oob" );
- _lidxs[nidx] = lidx;
- }
- void extend( uint nidx, uint lidx );
-
- uint Size() const { return _cnt; }
-};
+typedef GrowableArray<uint> LRG_List;
//------------------------------PhaseLive--------------------------------------
// Compute live-in/live-out
diff --git a/src/share/vm/opto/loopPredicate.cpp b/src/share/vm/opto/loopPredicate.cpp
index a9867a5ee..f29d9daab 100644
--- a/src/share/vm/opto/loopPredicate.cpp
+++ b/src/share/vm/opto/loopPredicate.cpp
@@ -821,8 +821,8 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
loop->dump_head();
}
#endif
- } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) {
- assert(proj->_con == predicate_proj->_con, "must match");
+ } else if ((cl != NULL) && (proj->_con == predicate_proj->_con) &&
+ loop->is_range_check_if(iff, this, invar)) {
// Range check for counted loops
const Node* cmp = bol->in(1)->as_Cmp();
diff --git a/src/share/vm/opto/loopTransform.cpp b/src/share/vm/opto/loopTransform.cpp
index c438cf9b8..a30fc80df 100644
--- a/src/share/vm/opto/loopTransform.cpp
+++ b/src/share/vm/opto/loopTransform.cpp
@@ -624,8 +624,6 @@ bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
}
-#define MAX_UNROLL 16 // maximum number of unrolls for main loop
-
//------------------------------policy_unroll----------------------------------
// Return TRUE or FALSE if the loop should be unrolled or not. Unroll if
// the loop is a CountedLoop and the body is small enough.
@@ -642,7 +640,7 @@ bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false;
int future_unroll_ct = cl->unrolled_count() * 2;
- if (future_unroll_ct > MAX_UNROLL) return false;
+ if (future_unroll_ct > LoopMaxUnroll) return false;
// Check for initial stride being a small enough constant
if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false;
diff --git a/src/share/vm/opto/loopnode.cpp b/src/share/vm/opto/loopnode.cpp
index c323d02f8..ab05d186b 100644
--- a/src/share/vm/opto/loopnode.cpp
+++ b/src/share/vm/opto/loopnode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -440,7 +440,7 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
// ---- SUCCESS! Found A Trip-Counted Loop! -----
//
assert(x->Opcode() == Op_Loop, "regular loops only");
- C->print_method("Before CountedLoop", 3);
+ C->print_method(PHASE_BEFORE_CLOOPS, 3);
Node *hook = new (C) Node(6);
@@ -791,7 +791,7 @@ bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
}
#endif
- C->print_method("After CountedLoop", 3);
+ C->print_method(PHASE_AFTER_CLOOPS, 3);
return true;
}
@@ -2164,7 +2164,7 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts)
// Split shared headers and insert loop landing pads.
// Do not bother doing this on the Root loop of course.
if( !_verify_me && !_verify_only && _ltree_root->_child ) {
- C->print_method("Before beautify loops", 3);
+ C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3);
if( _ltree_root->_child->beautify_loops( this ) ) {
// Re-build loop tree!
_ltree_root->_child = NULL;
@@ -2178,7 +2178,7 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts)
// Reset loop nesting depth
_ltree_root->set_nest( 0 );
- C->print_method("After beautify loops", 3);
+ C->print_method(PHASE_AFTER_BEAUTIFY_LOOPS, 3);
}
}
diff --git a/src/share/vm/opto/loopnode.hpp b/src/share/vm/opto/loopnode.hpp
index c45ea8421..df3eb4c05 100644
--- a/src/share/vm/opto/loopnode.hpp
+++ b/src/share/vm/opto/loopnode.hpp
@@ -965,7 +965,7 @@ public:
// Has use internal to the vector set (ie. not in a phi at the loop head)
bool has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop );
// clone "n" for uses that are outside of loop
- void clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist );
+ int clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist );
// clone "n" for special uses that are in the not_peeled region
void clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
VectorSet& not_peel, Node_List& sink_list, Node_List& worklist );
diff --git a/src/share/vm/opto/loopopts.cpp b/src/share/vm/opto/loopopts.cpp
index 1db82d4ce..31b5d8a5b 100644
--- a/src/share/vm/opto/loopopts.cpp
+++ b/src/share/vm/opto/loopopts.cpp
@@ -1939,8 +1939,8 @@ bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoo
//------------------------------ clone_for_use_outside_loop -------------------------------------
// clone "n" for uses that are outside of loop
-void PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) {
-
+int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) {
+ int cloned = 0;
assert(worklist.size() == 0, "should be empty");
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
Node* use = n->fast_out(j);
@@ -1960,6 +1960,7 @@ void PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, N
// clone "n" and insert it between the inputs of "n" and the use outside the loop
Node* n_clone = n->clone();
_igvn.replace_input_of(use, j, n_clone);
+ cloned++;
Node* use_c;
if (!use->is_Phi()) {
use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
@@ -1977,6 +1978,7 @@ void PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, N
}
#endif
}
+ return cloned;
}
@@ -2495,6 +2497,7 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
// Evacuate nodes in peel region into the not_peeled region if possible
uint new_phi_cnt = 0;
+ uint cloned_for_outside_use = 0;
for (i = 0; i < peel_list.size();) {
Node* n = peel_list.at(i);
#if !defined(PRODUCT)
@@ -2513,8 +2516,7 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
// if not pinned and not a load (which maybe anti-dependent on a store)
// and not a CMove (Matcher expects only bool->cmove).
if ( n->in(0) == NULL && !n->is_Load() && !n->is_CMove() ) {
- clone_for_use_outside_loop( loop, n, worklist );
-
+ cloned_for_outside_use += clone_for_use_outside_loop( loop, n, worklist );
sink_list.push(n);
peel >>= n->_idx; // delete n from peel set.
not_peel <<= n->_idx; // add n to not_peel set.
@@ -2551,6 +2553,12 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
// Inhibit more partial peeling on this loop
assert(!head->is_partial_peel_loop(), "not partial peeled");
head->mark_partial_peel_failed();
+ if (cloned_for_outside_use > 0) {
+ // Terminate this round of loop opts because
+ // the graph outside this loop was changed.
+ C->set_major_progress();
+ return true;
+ }
return false;
}
diff --git a/src/share/vm/opto/machnode.cpp b/src/share/vm/opto/machnode.cpp
index b9b014e05..0d6ddf9ae 100644
--- a/src/share/vm/opto/machnode.cpp
+++ b/src/share/vm/opto/machnode.cpp
@@ -349,11 +349,11 @@ const class TypePtr *MachNode::adr_type() const {
if (base == NodeSentinel) return TypePtr::BOTTOM;
const Type* t = base->bottom_type();
- if (UseCompressedOops && Universe::narrow_oop_shift() == 0) {
+ if (t->isa_narrowoop() && Universe::narrow_oop_shift() == 0) {
// 32-bit unscaled narrow oop can be the base of any address expression
t = t->make_ptr();
}
- if (UseCompressedKlassPointers && Universe::narrow_klass_shift() == 0) {
+ if (t->isa_narrowklass() && Universe::narrow_klass_shift() == 0) {
// 32-bit unscaled narrow oop can be the base of any address expression
t = t->make_ptr();
}
diff --git a/src/share/vm/opto/machnode.hpp b/src/share/vm/opto/machnode.hpp
index 5b630e45c..525135d94 100644
--- a/src/share/vm/opto/machnode.hpp
+++ b/src/share/vm/opto/machnode.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -58,7 +58,7 @@ class State;
class MachOper : public ResourceObj {
public:
// Allocate right next to the MachNodes in the same arena
- void *operator new( size_t x, Compile* C ) { return C->node_arena()->Amalloc_D(x); }
+ void *operator new( size_t x, Compile* C ) throw() { return C->node_arena()->Amalloc_D(x); }
// Opcode
virtual uint opcode() const = 0;
diff --git a/src/share/vm/opto/macro.cpp b/src/share/vm/opto/macro.cpp
index aeb1b6226..06bf9e608 100644
--- a/src/share/vm/opto/macro.cpp
+++ b/src/share/vm/opto/macro.cpp
@@ -72,6 +72,8 @@ void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcal
int jvms_adj = new_dbg_start - old_dbg_start;
assert (new_dbg_start == newcall->req(), "argument count mismatch");
+ // SafePointScalarObject node could be referenced several times in debug info.
+ // Use Dict to record cloned nodes.
Dict* sosn_map = new Dict(cmpkey,hashkey);
for (uint i = old_dbg_start; i < oldcall->req(); i++) {
Node* old_in = oldcall->in(i);
@@ -79,8 +81,8 @@ void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcal
if (old_in != NULL && old_in->is_SafePointScalarObject()) {
SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
uint old_unique = C->unique();
- Node* new_in = old_sosn->clone(jvms_adj, sosn_map);
- if (old_unique != C->unique()) {
+ Node* new_in = old_sosn->clone(sosn_map);
+ if (old_unique != C->unique()) { // New node?
new_in->set_req(0, C->root()); // reset control edge
new_in = transform_later(new_in); // Register new node.
}
@@ -666,7 +668,7 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr
alloc->dump();
else
res->dump();
- } else {
+ } else if (alloc->_is_scalar_replaceable) {
tty->print("NotScalar (%s)", fail_eliminate);
if (res == NULL)
alloc->dump();
@@ -725,7 +727,11 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
while (safepoints.length() > 0) {
SafePointNode* sfpt = safepoints.pop();
Node* mem = sfpt->memory();
- uint first_ind = sfpt->req();
+ assert(sfpt->jvms() != NULL, "missed JVMS");
+ // Fields of scalar objs are referenced only at the end
+ // of regular debuginfo at the last (youngest) JVMS.
+ // Record relative start index.
+ uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
SafePointScalarObjectNode* sobj = new (C) SafePointScalarObjectNode(res_type,
#ifdef ASSERT
alloc,
@@ -799,7 +805,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
for (int i = start; i < end; i++) {
if (sfpt_done->in(i)->is_SafePointScalarObject()) {
SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject();
- if (scobj->first_index() == sfpt_done->req() &&
+ if (scobj->first_index(jvms) == sfpt_done->req() &&
scobj->n_fields() == (uint)nfields) {
assert(scobj->alloc() == alloc, "sanity");
sfpt_done->set_req(i, res);
@@ -834,7 +840,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
if (field_val->is_EncodeP()) {
field_val = field_val->in(1);
} else {
- field_val = transform_later(new (C) DecodeNNode(field_val, field_val->bottom_type()->make_ptr()));
+ field_val = transform_later(new (C) DecodeNNode(field_val, field_val->get_ptr_type()));
}
}
sfpt->add_req(field_val);
@@ -845,18 +851,14 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
// to the allocated object with "sobj"
int start = jvms->debug_start();
int end = jvms->debug_end();
- for (int i = start; i < end; i++) {
- if (sfpt->in(i) == res) {
- sfpt->set_req(i, sobj);
- }
- }
+ sfpt->replace_edges_in_range(res, sobj, start, end);
safepoints_done.append_if_missing(sfpt); // keep it for rollback
}
return true;
}
// Process users of eliminated allocation.
-void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) {
+void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
Node* res = alloc->result_cast();
if (res != NULL) {
for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
@@ -899,6 +901,17 @@ void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) {
// Process other users of allocation's projections
//
if (_resproj != NULL && _resproj->outcnt() != 0) {
+ // First disconnect stores captured by Initialize node.
+ // If Initialize node is eliminated first in the following code,
+ // it will kill such stores and DUIterator_Last will assert.
+ for (DUIterator_Fast jmax, j = _resproj->fast_outs(jmax); j < jmax; j++) {
+ Node *use = _resproj->fast_out(j);
+ if (use->is_AddP()) {
+ // raw memory addresses used only by the initialization
+ _igvn.replace_node(use, C->top());
+ --j; --jmax;
+ }
+ }
for (DUIterator_Last jmin, j = _resproj->last_outs(jmin); j >= jmin; ) {
Node *use = _resproj->last_out(j);
uint oc1 = _resproj->outcnt();
@@ -923,9 +936,6 @@ void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) {
#endif
_igvn.replace_node(mem_proj, mem);
}
- } else if (use->is_AddP()) {
- // raw memory addresses used only by the initialization
- _igvn.replace_node(use, C->top());
} else {
assert(false, "only Initialize or AddP expected");
}
@@ -953,8 +963,18 @@ void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) {
}
bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
-
- if (!EliminateAllocations || !alloc->_is_scalar_replaceable) {
+ if (!EliminateAllocations || !alloc->_is_non_escaping) {
+ return false;
+ }
+ Node* klass = alloc->in(AllocateNode::KlassNode);
+ const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
+ Node* res = alloc->result_cast();
+ // Eliminate boxing allocations which are not used
+ // regardless scalar replacable status.
+ bool boxing_alloc = C->eliminate_boxing() &&
+ tklass->klass()->is_instance_klass() &&
+ tklass->klass()->as_instance_klass()->is_box_klass();
+ if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != NULL))) {
return false;
}
@@ -965,14 +985,22 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
return false;
}
+ if (!alloc->_is_scalar_replaceable) {
+ assert(res == NULL, "sanity");
+ // We can only eliminate allocation if all debug info references
+ // are already replaced with SafePointScalarObject because
+ // we can't search for a fields value without instance_id.
+ if (safepoints.length() > 0) {
+ return false;
+ }
+ }
+
if (!scalar_replacement(alloc, safepoints)) {
return false;
}
CompileLog* log = C->log();
if (log != NULL) {
- Node* klass = alloc->in(AllocateNode::KlassNode);
- const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
log->head("eliminate_allocation type='%d'",
log->identify(tklass->klass()));
JVMState* p = alloc->jvms();
@@ -997,6 +1025,43 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
return true;
}
+bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
+ // EA should remove all uses of non-escaping boxing node.
+ if (!C->eliminate_boxing() || boxing->proj_out(TypeFunc::Parms) != NULL) {
+ return false;
+ }
+
+ extract_call_projections(boxing);
+
+ const TypeTuple* r = boxing->tf()->range();
+ assert(r->cnt() > TypeFunc::Parms, "sanity");
+ const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
+ assert(t != NULL, "sanity");
+
+ CompileLog* log = C->log();
+ if (log != NULL) {
+ log->head("eliminate_boxing type='%d'",
+ log->identify(t->klass()));
+ JVMState* p = boxing->jvms();
+ while (p != NULL) {
+ log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
+ p = p->caller();
+ }
+ log->tail("eliminate_boxing");
+ }
+
+ process_users_of_allocation(boxing);
+
+#ifndef PRODUCT
+ if (PrintEliminateAllocations) {
+ tty->print("++++ Eliminated: %d ", boxing->_idx);
+ boxing->method()->print_short_name(tty);
+ tty->cr();
+ }
+#endif
+
+ return true;
+}
//---------------------------set_eden_pointers-------------------------
void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) {
@@ -2126,7 +2191,7 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
#ifdef _LP64
- if (UseCompressedKlassPointers && klass_node->is_DecodeNKlass()) {
+ if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
klass_node->in(1)->init_req(0, ctrl);
} else
@@ -2384,6 +2449,9 @@ void PhaseMacroExpand::eliminate_macro_nodes() {
case Node::Class_AllocateArray:
success = eliminate_allocate_node(n->as_Allocate());
break;
+ case Node::Class_CallStaticJava:
+ success = eliminate_boxing_node(n->as_CallStaticJava());
+ break;
case Node::Class_Lock:
case Node::Class_Unlock:
assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
@@ -2424,6 +2492,11 @@ bool PhaseMacroExpand::expand_macro_nodes() {
C->remove_macro_node(n);
_igvn._worklist.push(n);
success = true;
+ } else if (n->Opcode() == Op_CallStaticJava) {
+ // Remove it from macro list and put on IGVN worklist to optimize.
+ C->remove_macro_node(n);
+ _igvn._worklist.push(n);
+ success = true;
} else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
_igvn.replace_node(n, n->in(1));
success = true;
diff --git a/src/share/vm/opto/macro.hpp b/src/share/vm/opto/macro.hpp
index ba35c497f..7a72316df 100644
--- a/src/share/vm/opto/macro.hpp
+++ b/src/share/vm/opto/macro.hpp
@@ -86,10 +86,11 @@ private:
Node *value_from_mem(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc);
Node *value_from_mem_phi(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc, Node_Stack *value_phis, int level);
+ bool eliminate_boxing_node(CallStaticJavaNode *boxing);
bool eliminate_allocate_node(AllocateNode *alloc);
bool can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints);
bool scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints_done);
- void process_users_of_allocation(AllocateNode *alloc);
+ void process_users_of_allocation(CallNode *alloc);
void eliminate_card_mark(Node *cm);
void mark_eliminated_box(Node* box, Node* obj);
diff --git a/src/share/vm/opto/matcher.cpp b/src/share/vm/opto/matcher.cpp
index 4cba6510f..4d5ff16ff 100644
--- a/src/share/vm/opto/matcher.cpp
+++ b/src/share/vm/opto/matcher.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,8 +70,8 @@ const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
const uint Matcher::_end_rematerialize = _END_REMATERIALIZE;
//---------------------------Matcher-------------------------------------------
-Matcher::Matcher( Node_List &proj_list ) :
- PhaseTransform( Phase::Ins_Select ),
+Matcher::Matcher()
+: PhaseTransform( Phase::Ins_Select ),
#ifdef ASSERT
_old2new_map(C->comp_arena()),
_new2old_map(C->comp_arena()),
@@ -81,7 +81,7 @@ Matcher::Matcher( Node_List &proj_list ) :
_swallowed(swallowed),
_begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
_end_inst_chain_rule(_END_INST_CHAIN_RULE),
- _must_clone(must_clone), _proj_list(proj_list),
+ _must_clone(must_clone),
_register_save_policy(register_save_policy),
_c_reg_save_policy(c_reg_save_policy),
_register_save_type(register_save_type),
@@ -320,7 +320,7 @@ void Matcher::match( ) {
find_shared( C->root() );
find_shared( C->top() );
- C->print_method("Before Matching");
+ C->print_method(PHASE_BEFORE_MATCHING);
// Create new ideal node ConP #NULL even if it does exist in old space
// to avoid false sharing if the corresponding mach node is not used.
@@ -988,6 +988,8 @@ Node *Matcher::xform( Node *n, int max_stack ) {
mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root
while (mstack.is_nonempty()) {
+ C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
+ if (C->failing()) return NULL;
n = mstack.node(); // Leave node on stack
Node_State nstate = mstack.state();
if (nstate == Visit) {
@@ -1285,16 +1287,6 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area;
}
- if (is_method_handle_invoke) {
- // Kill some extra stack space in case method handles want to do
- // a little in-place argument insertion.
- // FIXME: Is this still necessary?
- int regs_per_word = NOT_LP64(1) LP64_ONLY(2); // %%% make a global const!
- out_arg_limit_per_call += Method::extra_stack_entries() * regs_per_word;
- // Do not update mcall->_argsize because (a) the extra space is not
- // pushed as arguments and (b) _argsize is dead (not used anywhere).
- }
-
// Compute the max stack slot killed by any call. These will not be
// available for debug info, and will be used to adjust FIRST_STACK_mask
// after all call sites have been visited.
@@ -1315,8 +1307,9 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
proj->_rout.Insert(OptoReg::Name(i));
}
- if( proj->_rout.is_NotEmpty() )
- _proj_list.push(proj);
+ if (proj->_rout.is_NotEmpty()) {
+ push_projection(proj);
+ }
}
// Transfer the safepoint information from the call to the mcall
// Move the JVMState list
@@ -1696,14 +1689,15 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
}
// If the _leaf is an AddP, insert the base edge
- if( leaf->is_AddP() )
+ if (leaf->is_AddP()) {
mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
+ }
- uint num_proj = _proj_list.size();
+ uint number_of_projections_prior = number_of_projections();
// Perform any 1-to-many expansions required
- MachNode *ex = mach->Expand(s,_proj_list, mem);
- if( ex != mach ) {
+ MachNode *ex = mach->Expand(s, _projection_list, mem);
+ if (ex != mach) {
assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
if( ex->in(1)->is_Con() )
ex->in(1)->set_req(0, C->root());
@@ -1724,7 +1718,7 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
// generated belatedly during spill code generation.
if (_allocation_started) {
guarantee(ex == mach, "no expand rules during spill generation");
- guarantee(_proj_list.size() == num_proj, "no allocation during spill generation");
+ guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
}
if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
@@ -1861,7 +1855,7 @@ void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) { // binary tree
int newrule;
- if( i == 0 )
+ if( i == 0)
newrule = kid->_rule[_leftOp[rule]];
else
newrule = kid->_rule[_rightOp[rule]];
@@ -2316,26 +2310,26 @@ void Matcher::validate_null_checks( ) {
// atomic instruction acting as a store_load barrier without any
// intervening volatile load, and thus we don't need a barrier here.
// We retain the Node to act as a compiler ordering barrier.
-bool Matcher::post_store_load_barrier(const Node *vmb) {
- Compile *C = Compile::current();
- assert( vmb->is_MemBar(), "" );
- assert( vmb->Opcode() != Op_MemBarAcquire, "" );
- const MemBarNode *mem = (const MemBarNode*)vmb;
-
- // Get the Proj node, ctrl, that can be used to iterate forward
- Node *ctrl = NULL;
- DUIterator_Fast imax, i = mem->fast_outs(imax);
- while( true ) {
- ctrl = mem->fast_out(i); // Throw out-of-bounds if proj not found
- assert( ctrl->is_Proj(), "only projections here" );
- ProjNode *proj = (ProjNode*)ctrl;
- if( proj->_con == TypeFunc::Control &&
- !C->node_arena()->contains(ctrl) ) // Unmatched old-space only
+bool Matcher::post_store_load_barrier(const Node* vmb) {
+ Compile* C = Compile::current();
+ assert(vmb->is_MemBar(), "");
+ assert(vmb->Opcode() != Op_MemBarAcquire, "");
+ const MemBarNode* membar = vmb->as_MemBar();
+
+ // Get the Ideal Proj node, ctrl, that can be used to iterate forward
+ Node* ctrl = NULL;
+ for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
+ Node* p = membar->fast_out(i);
+ assert(p->is_Proj(), "only projections here");
+ if ((p->as_Proj()->_con == TypeFunc::Control) &&
+ !C->node_arena()->contains(p)) { // Unmatched old-space only
+ ctrl = p;
break;
- i++;
+ }
}
+ assert((ctrl != NULL), "missing control projection");
- for( DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++ ) {
+ for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
Node *x = ctrl->fast_out(j);
int xop = x->Opcode();
@@ -2347,37 +2341,36 @@ bool Matcher::post_store_load_barrier(const Node *vmb) {
// that a monitor exit operation contains a serializing instruction.
if (xop == Op_MemBarVolatile ||
- xop == Op_FastLock ||
xop == Op_CompareAndSwapL ||
xop == Op_CompareAndSwapP ||
xop == Op_CompareAndSwapN ||
- xop == Op_CompareAndSwapI)
+ xop == Op_CompareAndSwapI) {
+ return true;
+ }
+
+ // Op_FastLock previously appeared in the Op_* list above.
+ // With biased locking we're no longer guaranteed that a monitor
+ // enter operation contains a serializing instruction.
+ if ((xop == Op_FastLock) && !UseBiasedLocking) {
return true;
+ }
if (x->is_MemBar()) {
// We must retain this membar if there is an upcoming volatile
- // load, which will be preceded by acquire membar.
- if (xop == Op_MemBarAcquire)
+ // load, which will be followed by acquire membar.
+ if (xop == Op_MemBarAcquire) {
return false;
- // For other kinds of barriers, check by pretending we
- // are them, and seeing if we can be removed.
- else
- return post_store_load_barrier((const MemBarNode*)x);
+ } else {
+ // For other kinds of barriers, check by pretending we
+ // are them, and seeing if we can be removed.
+ return post_store_load_barrier(x->as_MemBar());
+ }
}
- // Delicate code to detect case of an upcoming fastlock block
- if( x->is_If() && x->req() > 1 &&
- !C->node_arena()->contains(x) ) { // Unmatched old-space only
- Node *iff = x;
- Node *bol = iff->in(1);
- // The iff might be some random subclass of If or bol might be Con-Top
- if (!bol->is_Bool()) return false;
- assert( bol->req() > 1, "" );
- return (bol->in(1)->Opcode() == Op_FastUnlock);
- }
// probably not necessary to check for these
- if (x->is_Call() || x->is_SafePoint() || x->is_block_proj())
+ if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
return false;
+ }
}
return false;
}
diff --git a/src/share/vm/opto/matcher.hpp b/src/share/vm/opto/matcher.hpp
index 280b8ad88..8435b0f99 100644
--- a/src/share/vm/opto/matcher.hpp
+++ b/src/share/vm/opto/matcher.hpp
@@ -88,7 +88,7 @@ class Matcher : public PhaseTransform {
Node *transform( Node *dummy );
- Node_List &_proj_list; // For Machine nodes killing many values
+ Node_List _projection_list; // For Machine nodes killing many values
Node_Array _shared_nodes;
@@ -183,10 +183,30 @@ public:
void collect_null_checks( Node *proj, Node *orig_proj );
void validate_null_checks( );
- Matcher( Node_List &proj_list );
+ Matcher();
+
+ // Get a projection node at position pos
+ Node* get_projection(uint pos) {
+ return _projection_list[pos];
+ }
+
+ // Push a projection node onto the projection list
+ void push_projection(Node* node) {
+ _projection_list.push(node);
+ }
+
+ Node* pop_projection() {
+ return _projection_list.pop();
+ }
+
+ // Number of nodes in the projection list
+ uint number_of_projections() const {
+ return _projection_list.size();
+ }
// Select instructions for entire method
- void match( );
+ void match();
+
// Helper for match
OptoReg::Name warp_incoming_stk_arg( VMReg reg );
diff --git a/src/share/vm/opto/memnode.cpp b/src/share/vm/opto/memnode.cpp
index e3158f640..24ce86690 100644
--- a/src/share/vm/opto/memnode.cpp
+++ b/src/share/vm/opto/memnode.cpp
@@ -114,11 +114,15 @@ extern void print_alias_types();
#endif
-Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) {
- const TypeOopPtr *tinst = t_adr->isa_oopptr();
- if (tinst == NULL || !tinst->is_known_instance_field())
+Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) {
+ assert((t_oop != NULL), "sanity");
+ bool is_instance = t_oop->is_known_instance_field();
+ bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() &&
+ (load != NULL) && load->is_Load() &&
+ (phase->is_IterGVN() != NULL);
+ if (!(is_instance || is_boxed_value_load))
return mchain; // don't try to optimize non-instance types
- uint instance_id = tinst->instance_id();
+ uint instance_id = t_oop->instance_id();
Node *start_mem = phase->C->start()->proj_out(TypeFunc::Memory);
Node *prev = NULL;
Node *result = mchain;
@@ -133,15 +137,24 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr,
break; // hit one of our sentinels
} else if (proj_in->is_Call()) {
CallNode *call = proj_in->as_Call();
- if (!call->may_modify(t_adr, phase)) {
+ if (!call->may_modify(t_oop, phase)) { // returns false for instances
result = call->in(TypeFunc::Memory);
}
} else if (proj_in->is_Initialize()) {
AllocateNode* alloc = proj_in->as_Initialize()->allocation();
// Stop if this is the initialization for the object instance which
// which contains this memory slice, otherwise skip over it.
- if (alloc != NULL && alloc->_idx != instance_id) {
+ if ((alloc == NULL) || (alloc->_idx == instance_id)) {
+ break;
+ }
+ if (is_instance) {
result = proj_in->in(TypeFunc::Memory);
+ } else if (is_boxed_value_load) {
+ Node* klass = alloc->in(AllocateNode::KlassNode);
+ const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
+ if (tklass->klass_is_exact() && !tklass->klass()->equals(t_oop->klass())) {
+ result = proj_in->in(TypeFunc::Memory); // not related allocation
+ }
}
} else if (proj_in->is_MemBar()) {
result = proj_in->in(TypeFunc::Memory);
@@ -149,25 +162,26 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr,
assert(false, "unexpected projection");
}
} else if (result->is_ClearArray()) {
- if (!ClearArrayNode::step_through(&result, instance_id, phase)) {
+ if (!is_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) {
// Can not bypass initialization of the instance
// we are looking for.
break;
}
// Otherwise skip it (the call updated 'result' value).
} else if (result->is_MergeMem()) {
- result = step_through_mergemem(phase, result->as_MergeMem(), t_adr, NULL, tty);
+ result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, NULL, tty);
}
}
return result;
}
-Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) {
- const TypeOopPtr *t_oop = t_adr->isa_oopptr();
- bool is_instance = (t_oop != NULL) && t_oop->is_known_instance_field();
+Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase) {
+ const TypeOopPtr* t_oop = t_adr->isa_oopptr();
+ if (t_oop == NULL)
+ return mchain; // don't try to optimize non-oop types
+ Node* result = optimize_simple_memory_chain(mchain, t_oop, load, phase);
+ bool is_instance = t_oop->is_known_instance_field();
PhaseIterGVN *igvn = phase->is_IterGVN();
- Node *result = mchain;
- result = optimize_simple_memory_chain(result, t_adr, phase);
if (is_instance && igvn != NULL && result->is_Phi()) {
PhiNode *mphi = result->as_Phi();
assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
@@ -394,7 +408,7 @@ bool MemNode::all_controls_dominate(Node* dom, Node* sub) {
// Or Region for the check in LoadNode::Ideal();
// 'sub' should have sub->in(0) != NULL.
assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start() ||
- sub->is_Region(), "expecting only these nodes");
+ sub->is_Region() || sub->is_Call(), "expecting only these nodes");
// Get control edge of 'sub'.
Node* orig_sub = sub;
@@ -959,6 +973,19 @@ uint LoadNode::hash() const {
return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
}
+static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
+ if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
+ bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
+ bool is_stable_ary = FoldStableValues &&
+ (tp != NULL) && (tp->isa_aryptr() != NULL) &&
+ tp->isa_aryptr()->is_stable();
+
+ return (eliminate_boxing && non_volatile) || is_stable_ary;
+ }
+
+ return false;
+}
+
//---------------------------can_see_stored_value------------------------------
// This routine exists to make sure this set of tests is done the same
// everywhere. We need to make a coordinated change: first LoadNode::Ideal
@@ -968,13 +995,14 @@ uint LoadNode::hash() const {
// of aliasing.
Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
Node* ld_adr = in(MemNode::Address);
-
+ intptr_t ld_off = 0;
+ AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
- Compile::AliasType* atp = tp != NULL ? phase->C->alias_type(tp) : NULL;
- if (EliminateAutoBox && atp != NULL && atp->index() >= Compile::AliasIdxRaw &&
- atp->field() != NULL && !atp->field()->is_volatile()) {
+ Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL;
+ // This is more general than load from boxing objects.
+ if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
uint alias_idx = atp->index();
- bool final = atp->field()->is_final();
+ bool final = !atp->is_rewritable();
Node* result = NULL;
Node* current = st;
// Skip through chains of MemBarNodes checking the MergeMems for
@@ -994,7 +1022,7 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
Node* new_st = merge->memory_at(alias_idx);
if (new_st == merge->base_memory()) {
// Keep searching
- current = merge->base_memory();
+ current = new_st;
continue;
}
// Save the new memory state for the slice and fall through
@@ -1009,7 +1037,6 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
}
}
-
// Loop around twice in the case Load -> Initialize -> Store.
// (See PhaseIterGVN::add_users_to_worklist, which knows about this case.)
for (int trip = 0; trip <= 1; trip++) {
@@ -1021,9 +1048,7 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
intptr_t st_off = 0;
AllocateNode* alloc = AllocateNode::Ideal_allocation(st_adr, phase, st_off);
if (alloc == NULL) return NULL;
- intptr_t ld_off = 0;
- AllocateNode* allo2 = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
- if (alloc != allo2) return NULL;
+ if (alloc != ld_alloc) return NULL;
if (ld_off != st_off) return NULL;
// At this point we have proven something like this setup:
// A = Allocate(...)
@@ -1040,14 +1065,12 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
return st->in(MemNode::ValueIn);
}
- intptr_t offset = 0; // scratch
-
// A load from a freshly-created object always returns zero.
// (This can happen after LoadNode::Ideal resets the load's memory input
// to find_captured_store, which returned InitializeNode::zero_memory.)
if (st->is_Proj() && st->in(0)->is_Allocate() &&
- st->in(0) == AllocateNode::Ideal_allocation(ld_adr, phase, offset) &&
- offset >= st->in(0)->as_Allocate()->minimum_header_size()) {
+ (st->in(0) == ld_alloc) &&
+ (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
// return a zero value for the load's basic type
// (This is one of the few places where a generic PhaseTransform
// can create new nodes. Think of it as lazily manifesting
@@ -1059,15 +1082,27 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
if (st->is_Proj() && st->in(0)->is_Initialize()) {
InitializeNode* init = st->in(0)->as_Initialize();
AllocateNode* alloc = init->allocation();
- if (alloc != NULL &&
- alloc == AllocateNode::Ideal_allocation(ld_adr, phase, offset)) {
+ if ((alloc != NULL) && (alloc == ld_alloc)) {
// examine a captured store value
- st = init->find_captured_store(offset, memory_size(), phase);
+ st = init->find_captured_store(ld_off, memory_size(), phase);
if (st != NULL)
continue; // take one more trip around
}
}
+ // Load boxed value from result of valueOf() call is input parameter.
+ if (this->is_Load() && ld_adr->is_AddP() &&
+ (tp != NULL) && tp->is_ptr_to_boxed_value()) {
+ intptr_t ignore = 0;
+ Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore);
+ if (base != NULL && base->is_Proj() &&
+ base->as_Proj()->_con == TypeFunc::Parms &&
+ base->in(0)->is_CallStaticJava() &&
+ base->in(0)->as_CallStaticJava()->is_boxing_method()) {
+ return base->in(0)->in(TypeFunc::Parms);
+ }
+ }
+
break;
}
@@ -1076,11 +1111,13 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
//----------------------is_instance_field_load_with_local_phi------------------
bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
- if( in(MemNode::Memory)->is_Phi() && in(MemNode::Memory)->in(0) == ctrl &&
- in(MemNode::Address)->is_AddP() ) {
- const TypeOopPtr* t_oop = in(MemNode::Address)->bottom_type()->isa_oopptr();
- // Only instances.
- if( t_oop != NULL && t_oop->is_known_instance_field() &&
+ if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
+ in(Address)->is_AddP() ) {
+ const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
+ // Only instances and boxed values.
+ if( t_oop != NULL &&
+ (t_oop->is_ptr_to_boxed_value() ||
+ t_oop->is_known_instance_field()) &&
t_oop->offset() != Type::OffsetBot &&
t_oop->offset() != Type::OffsetTop) {
return true;
@@ -1094,7 +1131,7 @@ bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
Node *LoadNode::Identity( PhaseTransform *phase ) {
// If the previous store-maker is the right kind of Store, and the store is
// to the same address, then we are equal to the value stored.
- Node* mem = in(MemNode::Memory);
+ Node* mem = in(Memory);
Node* value = can_see_stored_value(mem, phase);
if( value ) {
// byte, short & char stores truncate naturally.
@@ -1116,15 +1153,22 @@ Node *LoadNode::Identity( PhaseTransform *phase ) {
// instance's field to avoid infinite generation of phis in a loop.
Node *region = mem->in(0);
if (is_instance_field_load_with_local_phi(region)) {
- const TypePtr *addr_t = in(MemNode::Address)->bottom_type()->isa_ptr();
+ const TypeOopPtr *addr_t = in(Address)->bottom_type()->isa_oopptr();
int this_index = phase->C->get_alias_index(addr_t);
int this_offset = addr_t->offset();
- int this_id = addr_t->is_oopptr()->instance_id();
+ int this_iid = addr_t->instance_id();
+ if (!addr_t->is_known_instance() &&
+ addr_t->is_ptr_to_boxed_value()) {
+ // Use _idx of address base (could be Phi node) for boxed values.
+ intptr_t ignore = 0;
+ Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
+ this_iid = base->_idx;
+ }
const Type* this_type = bottom_type();
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
Node* phi = region->fast_out(i);
if (phi->is_Phi() && phi != mem &&
- phi->as_Phi()->is_same_inst_field(this_type, this_id, this_index, this_offset)) {
+ phi->as_Phi()->is_same_inst_field(this_type, this_iid, this_index, this_offset)) {
return phi;
}
}
@@ -1133,170 +1177,106 @@ Node *LoadNode::Identity( PhaseTransform *phase ) {
return this;
}
-
-// Returns true if the AliasType refers to the field that holds the
-// cached box array. Currently only handles the IntegerCache case.
-static bool is_autobox_cache(Compile::AliasType* atp) {
- if (atp != NULL && atp->field() != NULL) {
- ciField* field = atp->field();
- ciSymbol* klass = field->holder()->name();
- if (field->name() == ciSymbol::cache_field_name() &&
- field->holder()->uses_default_loader() &&
- klass == ciSymbol::java_lang_Integer_IntegerCache()) {
- return true;
- }
- }
- return false;
-}
-
-// Fetch the base value in the autobox array
-static bool fetch_autobox_base(Compile::AliasType* atp, int& cache_offset) {
- if (atp != NULL && atp->field() != NULL) {
- ciField* field = atp->field();
- ciSymbol* klass = field->holder()->name();
- if (field->name() == ciSymbol::cache_field_name() &&
- field->holder()->uses_default_loader() &&
- klass == ciSymbol::java_lang_Integer_IntegerCache()) {
- assert(field->is_constant(), "what?");
- ciObjArray* array = field->constant_value().as_object()->as_obj_array();
- // Fetch the box object at the base of the array and get its value
- ciInstance* box = array->obj_at(0)->as_instance();
- ciInstanceKlass* ik = box->klass()->as_instance_klass();
- if (ik->nof_nonstatic_fields() == 1) {
- // This should be true nonstatic_field_at requires calling
- // nof_nonstatic_fields so check it anyway
- ciConstant c = box->field_value(ik->nonstatic_field_at(0));
- cache_offset = c.as_int();
- }
- return true;
- }
- }
- return false;
-}
-
-// Returns true if the AliasType refers to the value field of an
-// autobox object. Currently only handles Integer.
-static bool is_autobox_object(Compile::AliasType* atp) {
- if (atp != NULL && atp->field() != NULL) {
- ciField* field = atp->field();
- ciSymbol* klass = field->holder()->name();
- if (field->name() == ciSymbol::value_name() &&
- field->holder()->uses_default_loader() &&
- klass == ciSymbol::java_lang_Integer()) {
- return true;
- }
- }
- return false;
-}
-
-
// We're loading from an object which has autobox behaviour.
// If this object is result of a valueOf call we'll have a phi
// merging a newly allocated object and a load from the cache.
// We want to replace this load with the original incoming
// argument to the valueOf call.
Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
- Node* base = in(Address)->in(AddPNode::Base);
- if (base->is_Phi() && base->req() == 3) {
- AllocateNode* allocation = NULL;
- int allocation_index = -1;
- int load_index = -1;
- for (uint i = 1; i < base->req(); i++) {
- allocation = AllocateNode::Ideal_allocation(base->in(i), phase);
- if (allocation != NULL) {
- allocation_index = i;
- load_index = 3 - allocation_index;
- break;
- }
- }
- bool has_load = ( allocation != NULL &&
- (base->in(load_index)->is_Load() ||
- base->in(load_index)->is_DecodeN() &&
- base->in(load_index)->in(1)->is_Load()) );
- if (has_load && in(Memory)->is_Phi() && in(Memory)->in(0) == base->in(0)) {
- // Push the loads from the phi that comes from valueOf up
- // through it to allow elimination of the loads and the recovery
- // of the original value.
- Node* mem_phi = in(Memory);
- Node* offset = in(Address)->in(AddPNode::Offset);
- Node* region = base->in(0);
-
- Node* in1 = clone();
- Node* in1_addr = in1->in(Address)->clone();
- in1_addr->set_req(AddPNode::Base, base->in(allocation_index));
- in1_addr->set_req(AddPNode::Address, base->in(allocation_index));
- in1_addr->set_req(AddPNode::Offset, offset);
- in1->set_req(0, region->in(allocation_index));
- in1->set_req(Address, in1_addr);
- in1->set_req(Memory, mem_phi->in(allocation_index));
-
- Node* in2 = clone();
- Node* in2_addr = in2->in(Address)->clone();
- in2_addr->set_req(AddPNode::Base, base->in(load_index));
- in2_addr->set_req(AddPNode::Address, base->in(load_index));
- in2_addr->set_req(AddPNode::Offset, offset);
- in2->set_req(0, region->in(load_index));
- in2->set_req(Address, in2_addr);
- in2->set_req(Memory, mem_phi->in(load_index));
-
- in1_addr = phase->transform(in1_addr);
- in1 = phase->transform(in1);
- in2_addr = phase->transform(in2_addr);
- in2 = phase->transform(in2);
-
- PhiNode* result = PhiNode::make_blank(region, this);
- result->set_req(allocation_index, in1);
- result->set_req(load_index, in2);
- return result;
- }
+ assert(phase->C->eliminate_boxing(), "sanity");
+ intptr_t ignore = 0;
+ Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
+ if ((base == NULL) || base->is_Phi()) {
+ // Push the loads from the phi that comes from valueOf up
+ // through it to allow elimination of the loads and the recovery
+ // of the original value. It is done in split_through_phi().
+ return NULL;
} else if (base->is_Load() ||
base->is_DecodeN() && base->in(1)->is_Load()) {
+ // Eliminate the load of boxed value for integer types from the cache
+ // array by deriving the value from the index into the array.
+ // Capture the offset of the load and then reverse the computation.
+
+ // Get LoadN node which loads a boxing object from 'cache' array.
if (base->is_DecodeN()) {
- // Get LoadN node which loads cached Integer object
base = base->in(1);
}
- // Eliminate the load of Integer.value for integers from the cache
- // array by deriving the value from the index into the array.
- // Capture the offset of the load and then reverse the computation.
- Node* load_base = base->in(Address)->in(AddPNode::Base);
- if (load_base->is_DecodeN()) {
- // Get LoadN node which loads IntegerCache.cache field
- load_base = load_base->in(1);
+ if (!base->in(Address)->is_AddP()) {
+ return NULL; // Complex address
}
- if (load_base != NULL) {
- Compile::AliasType* atp = phase->C->alias_type(load_base->adr_type());
- intptr_t cache_offset;
- int shift = -1;
- Node* cache = NULL;
- if (is_autobox_cache(atp)) {
- shift = exact_log2(type2aelembytes(T_OBJECT));
- cache = AddPNode::Ideal_base_and_offset(load_base->in(Address), phase, cache_offset);
- }
- if (cache != NULL && base->in(Address)->is_AddP()) {
+ AddPNode* address = base->in(Address)->as_AddP();
+ Node* cache_base = address->in(AddPNode::Base);
+ if ((cache_base != NULL) && cache_base->is_DecodeN()) {
+ // Get ConP node which is static 'cache' field.
+ cache_base = cache_base->in(1);
+ }
+ if ((cache_base != NULL) && cache_base->is_Con()) {
+ const TypeAryPtr* base_type = cache_base->bottom_type()->isa_aryptr();
+ if ((base_type != NULL) && base_type->is_autobox_cache()) {
Node* elements[4];
- int count = base->in(Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
- int cache_low;
- if (count > 0 && fetch_autobox_base(atp, cache_low)) {
- int offset = arrayOopDesc::base_offset_in_bytes(memory_type()) - (cache_low << shift);
- // Add up all the offsets making of the address of the load
- Node* result = elements[0];
- for (int i = 1; i < count; i++) {
- result = phase->transform(new (phase->C) AddXNode(result, elements[i]));
- }
- // Remove the constant offset from the address and then
- // remove the scaling of the offset to recover the original index.
- result = phase->transform(new (phase->C) AddXNode(result, phase->MakeConX(-offset)));
- if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) {
- // Peel the shift off directly but wrap it in a dummy node
- // since Ideal can't return existing nodes
- result = new (phase->C) RShiftXNode(result->in(1), phase->intcon(0));
- } else {
- result = new (phase->C) RShiftXNode(result, phase->intcon(shift));
- }
+ int shift = exact_log2(type2aelembytes(T_OBJECT));
+ int count = address->unpack_offsets(elements, ARRAY_SIZE(elements));
+ if ((count > 0) && elements[0]->is_Con() &&
+ ((count == 1) ||
+ (count == 2) && elements[1]->Opcode() == Op_LShiftX &&
+ elements[1]->in(2) == phase->intcon(shift))) {
+ ciObjArray* array = base_type->const_oop()->as_obj_array();
+ // Fetch the box object cache[0] at the base of the array and get its value
+ ciInstance* box = array->obj_at(0)->as_instance();
+ ciInstanceKlass* ik = box->klass()->as_instance_klass();
+ assert(ik->is_box_klass(), "sanity");
+ assert(ik->nof_nonstatic_fields() == 1, "change following code");
+ if (ik->nof_nonstatic_fields() == 1) {
+ // This should be true nonstatic_field_at requires calling
+ // nof_nonstatic_fields so check it anyway
+ ciConstant c = box->field_value(ik->nonstatic_field_at(0));
+ BasicType bt = c.basic_type();
+ // Only integer types have boxing cache.
+ assert(bt == T_BOOLEAN || bt == T_CHAR ||
+ bt == T_BYTE || bt == T_SHORT ||
+ bt == T_INT || bt == T_LONG, err_msg_res("wrong type = %s", type2name(bt)));
+ jlong cache_low = (bt == T_LONG) ? c.as_long() : c.as_int();
+ if (cache_low != (int)cache_low) {
+ return NULL; // should not happen since cache is array indexed by value
+ }
+ jlong offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT) - (cache_low << shift);
+ if (offset != (int)offset) {
+ return NULL; // should not happen since cache is array indexed by value
+ }
+ // Add up all the offsets making of the address of the load
+ Node* result = elements[0];
+ for (int i = 1; i < count; i++) {
+ result = phase->transform(new (phase->C) AddXNode(result, elements[i]));
+ }
+ // Remove the constant offset from the address and then
+ result = phase->transform(new (phase->C) AddXNode(result, phase->MakeConX(-(int)offset)));
+ // remove the scaling of the offset to recover the original index.
+ if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) {
+ // Peel the shift off directly but wrap it in a dummy node
+ // since Ideal can't return existing nodes
+ result = new (phase->C) RShiftXNode(result->in(1), phase->intcon(0));
+ } else if (result->is_Add() && result->in(2)->is_Con() &&
+ result->in(1)->Opcode() == Op_LShiftX &&
+ result->in(1)->in(2) == phase->intcon(shift)) {
+ // We can't do general optimization: ((X<<Z) + Y) >> Z ==> X + (Y>>Z)
+ // but for boxing cache access we know that X<<Z will not overflow
+ // (there is range check) so we do this optimizatrion by hand here.
+ Node* add_con = new (phase->C) RShiftXNode(result->in(2), phase->intcon(shift));
+ result = new (phase->C) AddXNode(result->in(1)->in(1), phase->transform(add_con));
+ } else {
+ result = new (phase->C) RShiftXNode(result, phase->intcon(shift));
+ }
#ifdef _LP64
- result = new (phase->C) ConvL2INode(phase->transform(result));
+ if (bt != T_LONG) {
+ result = new (phase->C) ConvL2INode(phase->transform(result));
+ }
+#else
+ if (bt == T_LONG) {
+ result = new (phase->C) ConvI2LNode(phase->transform(result));
+ }
#endif
- return result;
+ return result;
+ }
}
}
}
@@ -1304,65 +1284,131 @@ Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
return NULL;
}
-//------------------------------split_through_phi------------------------------
-// Split instance field load through Phi.
-Node *LoadNode::split_through_phi(PhaseGVN *phase) {
- Node* mem = in(MemNode::Memory);
- Node* address = in(MemNode::Address);
- const TypePtr *addr_t = phase->type(address)->isa_ptr();
- const TypeOopPtr *t_oop = addr_t->isa_oopptr();
-
- assert(mem->is_Phi() && (t_oop != NULL) &&
- t_oop->is_known_instance_field(), "invalide conditions");
-
- Node *region = mem->in(0);
+static bool stable_phi(PhiNode* phi, PhaseGVN *phase) {
+ Node* region = phi->in(0);
if (region == NULL) {
- return NULL; // Wait stable graph
+ return false; // Wait stable graph
}
- uint cnt = mem->req();
+ uint cnt = phi->req();
for (uint i = 1; i < cnt; i++) {
Node* rc = region->in(i);
if (rc == NULL || phase->type(rc) == Type::TOP)
- return NULL; // Wait stable graph
- Node *in = mem->in(i);
- if (in == NULL) {
+ return false; // Wait stable graph
+ Node* in = phi->in(i);
+ if (in == NULL || phase->type(in) == Type::TOP)
+ return false; // Wait stable graph
+ }
+ return true;
+}
+//------------------------------split_through_phi------------------------------
+// Split instance or boxed field load through Phi.
+Node *LoadNode::split_through_phi(PhaseGVN *phase) {
+ Node* mem = in(Memory);
+ Node* address = in(Address);
+ const TypeOopPtr *t_oop = phase->type(address)->isa_oopptr();
+
+ assert((t_oop != NULL) &&
+ (t_oop->is_known_instance_field() ||
+ t_oop->is_ptr_to_boxed_value()), "invalide conditions");
+
+ Compile* C = phase->C;
+ intptr_t ignore = 0;
+ Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore);
+ bool base_is_phi = (base != NULL) && base->is_Phi();
+ bool load_boxed_values = t_oop->is_ptr_to_boxed_value() && C->aggressive_unboxing() &&
+ (base != NULL) && (base == address->in(AddPNode::Base)) &&
+ phase->type(base)->higher_equal(TypePtr::NOTNULL);
+
+ if (!((mem->is_Phi() || base_is_phi) &&
+ (load_boxed_values || t_oop->is_known_instance_field()))) {
+ return NULL; // memory is not Phi
+ }
+
+ if (mem->is_Phi()) {
+ if (!stable_phi(mem->as_Phi(), phase)) {
return NULL; // Wait stable graph
}
+ uint cnt = mem->req();
+ // Check for loop invariant memory.
+ if (cnt == 3) {
+ for (uint i = 1; i < cnt; i++) {
+ Node* in = mem->in(i);
+ Node* m = optimize_memory_chain(in, t_oop, this, phase);
+ if (m == mem) {
+ set_req(Memory, mem->in(cnt - i));
+ return this; // made change
+ }
+ }
+ }
}
- // Check for loop invariant.
- if (cnt == 3) {
- for (uint i = 1; i < cnt; i++) {
- Node *in = mem->in(i);
- Node* m = MemNode::optimize_memory_chain(in, addr_t, phase);
- if (m == mem) {
- set_req(MemNode::Memory, mem->in(cnt - i)); // Skip this phi.
- return this;
+ if (base_is_phi) {
+ if (!stable_phi(base->as_Phi(), phase)) {
+ return NULL; // Wait stable graph
+ }
+ uint cnt = base->req();
+ // Check for loop invariant memory.
+ if (cnt == 3) {
+ for (uint i = 1; i < cnt; i++) {
+ if (base->in(i) == base) {
+ return NULL; // Wait stable graph
+ }
}
}
}
+
+ bool load_boxed_phi = load_boxed_values && base_is_phi && (base->in(0) == mem->in(0));
+
// Split through Phi (see original code in loopopts.cpp).
- assert(phase->C->have_alias_type(addr_t), "instance should have alias type");
+ assert(C->have_alias_type(t_oop), "instance should have alias type");
// Do nothing here if Identity will find a value
// (to avoid infinite chain of value phis generation).
if (!phase->eqv(this, this->Identity(phase)))
return NULL;
- // Skip the split if the region dominates some control edge of the address.
- if (!MemNode::all_controls_dominate(address, region))
- return NULL;
+ // Select Region to split through.
+ Node* region;
+ if (!base_is_phi) {
+ assert(mem->is_Phi(), "sanity");
+ region = mem->in(0);
+ // Skip if the region dominates some control edge of the address.
+ if (!MemNode::all_controls_dominate(address, region))
+ return NULL;
+ } else if (!mem->is_Phi()) {
+ assert(base_is_phi, "sanity");
+ region = base->in(0);
+ // Skip if the region dominates some control edge of the memory.
+ if (!MemNode::all_controls_dominate(mem, region))
+ return NULL;
+ } else if (base->in(0) != mem->in(0)) {
+ assert(base_is_phi && mem->is_Phi(), "sanity");
+ if (MemNode::all_controls_dominate(mem, base->in(0))) {
+ region = base->in(0);
+ } else if (MemNode::all_controls_dominate(address, mem->in(0))) {
+ region = mem->in(0);
+ } else {
+ return NULL; // complex graph
+ }
+ } else {
+ assert(base->in(0) == mem->in(0), "sanity");
+ region = mem->in(0);
+ }
const Type* this_type = this->bottom_type();
- int this_index = phase->C->get_alias_index(addr_t);
- int this_offset = addr_t->offset();
- int this_iid = addr_t->is_oopptr()->instance_id();
- PhaseIterGVN *igvn = phase->is_IterGVN();
- Node *phi = new (igvn->C) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset);
+ int this_index = C->get_alias_index(t_oop);
+ int this_offset = t_oop->offset();
+ int this_iid = t_oop->instance_id();
+ if (!t_oop->is_known_instance() && load_boxed_values) {
+ // Use _idx of address base for boxed values.
+ this_iid = base->_idx;
+ }
+ PhaseIterGVN* igvn = phase->is_IterGVN();
+ Node* phi = new (C) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset);
for (uint i = 1; i < region->req(); i++) {
- Node *x;
+ Node* x;
Node* the_clone = NULL;
- if (region->in(i) == phase->C->top()) {
- x = phase->C->top(); // Dead path? Use a dead data op
+ if (region->in(i) == C->top()) {
+ x = C->top(); // Dead path? Use a dead data op
} else {
x = this->clone(); // Else clone up the data op
the_clone = x; // Remember for possible deletion.
@@ -1372,10 +1418,16 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
} else {
x->set_req(0, NULL);
}
- for (uint j = 1; j < this->req(); j++) {
- Node *in = this->in(j);
- if (in->is_Phi() && in->in(0) == region)
- x->set_req(j, in->in(i)); // Use pre-Phi input for the clone
+ if (mem->is_Phi() && (mem->in(0) == region)) {
+ x->set_req(Memory, mem->in(i)); // Use pre-Phi input for the clone.
+ }
+ if (address->is_Phi() && address->in(0) == region) {
+ x->set_req(Address, address->in(i)); // Use pre-Phi input for the clone
+ }
+ if (base_is_phi && (base->in(0) == region)) {
+ Node* base_x = base->in(i); // Clone address for loads from boxed objects.
+ Node* adr_x = phase->transform(new (C) AddPNode(base_x,base_x,address->in(AddPNode::Offset)));
+ x->set_req(Address, adr_x);
}
}
// Check for a 'win' on some paths
@@ -1405,7 +1457,7 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
if (y != x) {
x = y;
} else {
- y = igvn->hash_find(x);
+ y = igvn->hash_find_insert(x);
if (y) {
x = y;
} else {
@@ -1416,8 +1468,9 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
}
}
}
- if (x != the_clone && the_clone != NULL)
+ if (x != the_clone && the_clone != NULL) {
igvn->remove_dead_node(the_clone);
+ }
phi->set_req(i, x);
}
// Record Phi
@@ -1456,31 +1509,23 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// A method-invariant, non-null address (constant or 'this' argument).
set_req(MemNode::Control, NULL);
}
-
- if (EliminateAutoBox && can_reshape) {
- assert(!phase->type(base)->higher_equal(TypePtr::NULL_PTR), "the autobox pointer should be non-null");
- Compile::AliasType* atp = phase->C->alias_type(adr_type());
- if (is_autobox_object(atp)) {
- Node* result = eliminate_autobox(phase);
- if (result != NULL) return result;
- }
- }
}
Node* mem = in(MemNode::Memory);
const TypePtr *addr_t = phase->type(address)->isa_ptr();
- if (addr_t != NULL) {
+ if (can_reshape && (addr_t != NULL)) {
// try to optimize our memory input
- Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, phase);
+ Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase);
if (opt_mem != mem) {
set_req(MemNode::Memory, opt_mem);
if (phase->type( opt_mem ) == Type::TOP) return NULL;
return this;
}
const TypeOopPtr *t_oop = addr_t->isa_oopptr();
- if (can_reshape && opt_mem->is_Phi() &&
- (t_oop != NULL) && t_oop->is_known_instance_field()) {
+ if ((t_oop != NULL) &&
+ (t_oop->is_known_instance_field() ||
+ t_oop->is_ptr_to_boxed_value())) {
PhaseIterGVN *igvn = phase->is_IterGVN();
if (igvn != NULL && igvn->_worklist.member(opt_mem)) {
// Delay this transformation until memory Phi is processed.
@@ -1490,6 +1535,11 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Split instance field load through Phi.
Node* result = split_through_phi(phase);
if (result != NULL) return result;
+
+ if (t_oop->is_ptr_to_boxed_value()) {
+ Node* result = eliminate_autobox(phase);
+ if (result != NULL) return result;
+ }
}
}
@@ -1548,6 +1598,40 @@ LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
return NULL;
}
+// Try to constant-fold a stable array element.
+static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) {
+ assert(ary->is_stable(), "array should be stable");
+
+ if (ary->const_oop() != NULL) {
+ // Decode the results of GraphKit::array_element_address.
+ ciArray* aobj = ary->const_oop()->as_array();
+ ciConstant con = aobj->element_value_by_offset(off);
+
+ if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
+ const Type* con_type = Type::make_from_constant(con);
+ if (con_type != NULL) {
+ if (con_type->isa_aryptr()) {
+ // Join with the array element type, in case it is also stable.
+ int dim = ary->stable_dimension();
+ con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
+ }
+ if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
+ con_type = con_type->make_narrowoop();
+ }
+#ifndef PRODUCT
+ if (TraceIterativeGVN) {
+ tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
+ con_type->dump(); tty->cr();
+ }
+#endif //PRODUCT
+ return con_type;
+ }
+ }
+ }
+
+ return NULL;
+}
+
//------------------------------Value-----------------------------------------
const Type *LoadNode::Value( PhaseTransform *phase ) const {
// Either input is TOP ==> the result is TOP
@@ -1562,8 +1646,31 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
Compile* C = phase->C;
// Try to guess loaded type from pointer type
- if (tp->base() == Type::AryPtr) {
- const Type *t = tp->is_aryptr()->elem();
+ if (tp->isa_aryptr()) {
+ const TypeAryPtr* ary = tp->is_aryptr();
+ const Type *t = ary->elem();
+
+ // Determine whether the reference is beyond the header or not, by comparing
+ // the offset against the offset of the start of the array's data.
+ // Different array types begin at slightly different offsets (12 vs. 16).
+ // We choose T_BYTE as an example base type that is least restrictive
+ // as to alignment, which will therefore produce the smallest
+ // possible base offset.
+ const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
+ const bool off_beyond_header = ((uint)off >= (uint)min_base_off);
+
+ // Try to constant-fold a stable array element.
+ if (FoldStableValues && ary->is_stable()) {
+ // Make sure the reference is not into the header
+ if (off_beyond_header && off != Type::OffsetBot) {
+ assert(adr->is_AddP() && adr->in(AddPNode::Offset)->is_Con(), "offset is a constant");
+ const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
+ if (con_type != NULL) {
+ return con_type;
+ }
+ }
+ }
+
// Don't do this for integer types. There is only potential profit if
// the element type t is lower than _type; that is, for int types, if _type is
// more restrictive than t. This only happens here if one is short and the other
@@ -1584,32 +1691,30 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
&& Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
// t might actually be lower than _type, if _type is a unique
// concrete subclass of abstract class t.
- // Make sure the reference is not into the header, by comparing
- // the offset against the offset of the start of the array's data.
- // Different array types begin at slightly different offsets (12 vs. 16).
- // We choose T_BYTE as an example base type that is least restrictive
- // as to alignment, which will therefore produce the smallest
- // possible base offset.
- const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
- if ((uint)off >= (uint)min_base_off) { // is the offset beyond the header?
+ if (off_beyond_header) { // is the offset beyond the header?
const Type* jt = t->join(_type);
// In any case, do not allow the join, per se, to empty out the type.
if (jt->empty() && !t->empty()) {
// This can happen if a interface-typed array narrows to a class type.
jt = _type;
}
-
- if (EliminateAutoBox && adr->is_AddP()) {
+#ifdef ASSERT
+ if (phase->C->eliminate_boxing() && adr->is_AddP()) {
// The pointers in the autobox arrays are always non-null
Node* base = adr->in(AddPNode::Base);
- if (base != NULL &&
- !phase->type(base)->higher_equal(TypePtr::NULL_PTR)) {
- Compile::AliasType* atp = C->alias_type(base->adr_type());
- if (is_autobox_cache(atp)) {
- return jt->join(TypePtr::NOTNULL)->is_ptr();
+ if ((base != NULL) && base->is_DecodeN()) {
+ // Get LoadN node which loads IntegerCache.cache field
+ base = base->in(1);
+ }
+ if ((base != NULL) && base->is_Con()) {
+ const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
+ if ((base_type != NULL) && base_type->is_autobox_cache()) {
+ // It could be narrow oop
+ assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
}
}
}
+#endif
return jt;
}
}
@@ -1649,6 +1754,10 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
// Optimizations for constant objects
ciObject* const_oop = tinst->const_oop();
if (const_oop != NULL) {
+ // For constant Boxed value treat the target field as a compile time constant.
+ if (tinst->is_ptr_to_boxed_value()) {
+ return tinst->get_const_boxed_value();
+ } else
// For constant CallSites treat the target field as a compile time constant.
if (const_oop->is_call_site()) {
ciCallSite* call_site = const_oop->as_call_site();
@@ -1770,7 +1879,8 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
// (Also allow a variable load from a fresh array to produce zero.)
const TypeOopPtr *tinst = tp->isa_oopptr();
bool is_instance = (tinst != NULL) && tinst->is_known_instance_field();
- if (ReduceFieldZeroing || is_instance) {
+ bool is_boxed_value = (tinst != NULL) && tinst->is_ptr_to_boxed_value();
+ if (ReduceFieldZeroing || is_instance || is_boxed_value) {
Node* value = can_see_stored_value(mem,phase);
if (value != NULL && value->is_Con()) {
assert(value->bottom_type()->higher_equal(_type),"sanity");
@@ -1932,7 +2042,7 @@ Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* a
assert(adr_type != NULL, "expecting TypeKlassPtr");
#ifdef _LP64
if (adr_type->is_ptr_to_narrowklass()) {
- assert(UseCompressedKlassPointers, "no compressed klasses");
+ assert(UseCompressedClassPointers, "no compressed klasses");
Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass()));
return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
}
@@ -2270,7 +2380,7 @@ StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, cons
val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
return new (C) StoreNNode(ctl, mem, adr, adr_type, val);
} else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
- (UseCompressedKlassPointers && val->bottom_type()->isa_klassptr() &&
+ (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
adr->bottom_type()->isa_rawptr())) {
val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val);
@@ -2891,28 +3001,60 @@ MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (remove_dead_region(phase, can_reshape)) return this;
// Don't bother trying to transform a dead node
- if (in(0) && in(0)->is_top()) return NULL;
+ if (in(0) && in(0)->is_top()) {
+ return NULL;
+ }
// Eliminate volatile MemBars for scalar replaced objects.
- if (can_reshape && req() == (Precedent+1) &&
- (Opcode() == Op_MemBarAcquire || Opcode() == Op_MemBarVolatile)) {
- // Volatile field loads and stores.
- Node* my_mem = in(MemBarNode::Precedent);
- if (my_mem != NULL && my_mem->is_Mem()) {
- const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr();
- // Check for scalar replaced object reference.
- if( t_oop != NULL && t_oop->is_known_instance_field() &&
- t_oop->offset() != Type::OffsetBot &&
- t_oop->offset() != Type::OffsetTop) {
- // Replace MemBar projections by its inputs.
- PhaseIterGVN* igvn = phase->is_IterGVN();
- igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
- igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
- // Must return either the original node (now dead) or a new node
- // (Do not return a top here, since that would break the uniqueness of top.)
- return new (phase->C) ConINode(TypeInt::ZERO);
+ if (can_reshape && req() == (Precedent+1)) {
+ bool eliminate = false;
+ int opc = Opcode();
+ if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
+ // Volatile field loads and stores.
+ Node* my_mem = in(MemBarNode::Precedent);
+ // The MembarAquire may keep an unused LoadNode alive through the Precedent edge
+ if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) {
+ // if the Precedent is a decodeN and its input (a Load) is used at more than one place,
+ // replace this Precedent (decodeN) with the Load instead.
+ if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) {
+ Node* load_node = my_mem->in(1);
+ set_req(MemBarNode::Precedent, load_node);
+ phase->is_IterGVN()->_worklist.push(my_mem);
+ my_mem = load_node;
+ } else {
+ assert(my_mem->unique_out() == this, "sanity");
+ del_req(Precedent);
+ phase->is_IterGVN()->_worklist.push(my_mem); // remove dead node later
+ my_mem = NULL;
+ }
+ }
+ if (my_mem != NULL && my_mem->is_Mem()) {
+ const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr();
+ // Check for scalar replaced object reference.
+ if( t_oop != NULL && t_oop->is_known_instance_field() &&
+ t_oop->offset() != Type::OffsetBot &&
+ t_oop->offset() != Type::OffsetTop) {
+ eliminate = true;
+ }
+ }
+ } else if (opc == Op_MemBarRelease) {
+ // Final field stores.
+ Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase);
+ if ((alloc != NULL) && alloc->is_Allocate() &&
+ alloc->as_Allocate()->_is_non_escaping) {
+ // The allocated object does not escape.
+ eliminate = true;
}
}
+ if (eliminate) {
+ // Replace MemBar projections by its inputs.
+ PhaseIterGVN* igvn = phase->is_IterGVN();
+ igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
+ igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
+ // Must return either the original node (now dead) or a new node
+ // (Do not return a top here, since that would break the uniqueness of top.)
+ return new (phase->C) ConINode(TypeInt::ZERO);
+ }
}
return NULL;
}
@@ -3124,9 +3266,7 @@ intptr_t InitializeNode::get_store_offset(Node* st, PhaseTransform* phase) {
// within the initialization without creating a vicious cycle, such as:
// { Foo p = new Foo(); p.next = p; }
// True for constants and parameters and small combinations thereof.
-bool InitializeNode::detect_init_independence(Node* n,
- bool st_is_pinned,
- int& count) {
+bool InitializeNode::detect_init_independence(Node* n, int& count) {
if (n == NULL) return true; // (can this really happen?)
if (n->is_Proj()) n = n->in(0);
if (n == this) return false; // found a cycle
@@ -3146,7 +3286,6 @@ bool InitializeNode::detect_init_independence(Node* n,
// a store is never pinned *before* the availability of its inputs.
if (!MemNode::all_controls_dominate(n, this))
return false; // failed to prove a good control
-
}
// Check data edges for possible dependencies on 'this'.
@@ -3156,7 +3295,7 @@ bool InitializeNode::detect_init_independence(Node* n,
if (m == NULL || m == n || m->is_top()) continue;
uint first_i = n->find_edge(m);
if (i != first_i) continue; // process duplicate edge just once
- if (!detect_init_independence(m, st_is_pinned, count)) {
+ if (!detect_init_independence(m, count)) {
return false;
}
}
@@ -3187,7 +3326,7 @@ intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase,
return FAIL; // wrong allocation! (store needs to float up)
Node* val = st->in(MemNode::ValueIn);
int complexity_count = 0;
- if (!detect_init_independence(val, true, complexity_count))
+ if (!detect_init_independence(val, complexity_count))
return FAIL; // stored value must be 'simple enough'
// The Store can be captured only if nothing after the allocation
@@ -4334,7 +4473,7 @@ static void verify_memory_slice(const MergeMemNode* m, int alias_idx, Node* n) {
}
}
#else // !ASSERT
-#define verify_memory_slice(m,i,n) (0) // PRODUCT version is no-op
+#define verify_memory_slice(m,i,n) (void)(0) // PRODUCT version is no-op
#endif
diff --git a/src/share/vm/opto/memnode.hpp b/src/share/vm/opto/memnode.hpp
index 567b2b1e2..73a143028 100644
--- a/src/share/vm/opto/memnode.hpp
+++ b/src/share/vm/opto/memnode.hpp
@@ -75,8 +75,8 @@ public:
PhaseTransform* phase);
static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
- static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
- static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
+ static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
+ static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
// This one should probably be a phase-specific function:
static bool all_controls_dominate(Node* dom, Node* sub);
@@ -1102,7 +1102,7 @@ public:
Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
- bool detect_init_independence(Node* n, bool st_is_pinned, int& count);
+ bool detect_init_independence(Node* n, int& count);
void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
PhaseGVN* phase);
diff --git a/src/share/vm/opto/multnode.cpp b/src/share/vm/opto/multnode.cpp
index 280414106..dca8dbe70 100644
--- a/src/share/vm/opto/multnode.cpp
+++ b/src/share/vm/opto/multnode.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "opto/callnode.hpp"
#include "opto/matcher.hpp"
#include "opto/multnode.hpp"
#include "opto/opcodes.hpp"
@@ -73,13 +74,26 @@ bool ProjNode::is_CFG() const {
return (_con == TypeFunc::Control && def->is_CFG());
}
+const Type* ProjNode::proj_type(const Type* t) const {
+ if (t == Type::TOP) {
+ return Type::TOP;
+ }
+ if (t == Type::BOTTOM) {
+ return Type::BOTTOM;
+ }
+ t = t->is_tuple()->field_at(_con);
+ Node* n = in(0);
+ if ((_con == TypeFunc::Parms) &&
+ n->is_CallStaticJava() && n->as_CallStaticJava()->is_boxing_method()) {
+ // The result of autoboxing is always non-null on normal path.
+ t = t->join(TypePtr::NOTNULL);
+ }
+ return t;
+}
+
const Type *ProjNode::bottom_type() const {
- if (in(0) == NULL) return Type::TOP;
- const Type *tb = in(0)->bottom_type();
- if( tb == Type::TOP ) return Type::TOP;
- if( tb == Type::BOTTOM ) return Type::BOTTOM;
- const TypeTuple *t = tb->is_tuple();
- return t->field_at(_con);
+ if (in(0) == NULL) return Type::TOP;
+ return proj_type(in(0)->bottom_type());
}
const TypePtr *ProjNode::adr_type() const {
@@ -115,11 +129,8 @@ void ProjNode::check_con() const {
//------------------------------Value------------------------------------------
const Type *ProjNode::Value( PhaseTransform *phase ) const {
- if( !in(0) ) return Type::TOP;
- const Type *t = phase->type(in(0));
- if( t == Type::TOP ) return t;
- if( t == Type::BOTTOM ) return t;
- return t->is_tuple()->field_at(_con);
+ if (in(0) == NULL) return Type::TOP;
+ return proj_type(phase->type(in(0)));
}
//------------------------------out_RegMask------------------------------------
diff --git a/src/share/vm/opto/multnode.hpp b/src/share/vm/opto/multnode.hpp
index fba94e5b4..242e58f48 100644
--- a/src/share/vm/opto/multnode.hpp
+++ b/src/share/vm/opto/multnode.hpp
@@ -60,6 +60,7 @@ protected:
virtual uint cmp( const Node &n ) const;
virtual uint size_of() const;
void check_con() const; // Called from constructor.
+ const Type* proj_type(const Type* t) const;
public:
ProjNode( Node *src, uint con, bool io_use = false )
@@ -83,6 +84,7 @@ public:
virtual const Type *Value( PhaseTransform *phase ) const;
virtual uint ideal_reg() const;
virtual const RegMask &out_RegMask() const;
+
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
diff --git a/src/share/vm/opto/node.cpp b/src/share/vm/opto/node.cpp
index e1698b26b..1df5eb51a 100644
--- a/src/share/vm/opto/node.cpp
+++ b/src/share/vm/opto/node.cpp
@@ -67,7 +67,8 @@ void Node::verify_construction() {
}
Compile::set_debug_idx(new_debug_idx);
set_debug_idx( new_debug_idx );
- assert(Compile::current()->unique() < (UINT_MAX - 1), "Node limit exceeded UINT_MAX");
+ assert(Compile::current()->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX");
+ assert(Compile::current()->live_nodes() < (uint)MaxNodeLimit, "Live Node limit exceeded limit");
if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) {
tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx);
BREAKPOINT;
@@ -471,9 +472,9 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
//------------------------------clone------------------------------------------
// Clone a Node.
Node *Node::clone() const {
- Compile *compile = Compile::current();
+ Compile* C = Compile::current();
uint s = size_of(); // Size of inherited Node
- Node *n = (Node*)compile->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*));
+ Node *n = (Node*)C->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*));
Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s);
// Set the new input pointer array
n->_in = (Node**)(((char*)n)+s);
@@ -492,18 +493,18 @@ Node *Node::clone() const {
if (x != NULL) x->add_out(n);
}
if (is_macro())
- compile->add_macro_node(n);
+ C->add_macro_node(n);
if (is_expensive())
- compile->add_expensive_node(n);
+ C->add_expensive_node(n);
- n->set_idx(compile->next_unique()); // Get new unique index as well
+ n->set_idx(C->next_unique()); // Get new unique index as well
debug_only( n->verify_construction() );
NOT_PRODUCT(nodes_created++);
// Do not patch over the debug_idx of a clone, because it makes it
// impossible to break on the clone's moment of creation.
//debug_only( n->set_debug_idx( debug_idx() ) );
- compile->copy_node_notes_to(n, (Node*) this);
+ C->copy_node_notes_to(n, (Node*) this);
// MachNode clone
uint nopnds;
@@ -518,13 +519,12 @@ Node *Node::clone() const {
(const void*)(&mthis->_opnds), 1));
mach->_opnds = to;
for ( uint i = 0; i < nopnds; ++i ) {
- to[i] = from[i]->clone(compile);
+ to[i] = from[i]->clone(C);
}
}
// cloning CallNode may need to clone JVMState
if (n->is_Call()) {
- CallNode *call = n->as_Call();
- call->clone_jvms();
+ n->as_Call()->clone_jvms(C);
}
return n; // Return the clone
}
@@ -773,6 +773,21 @@ void Node::del_req( uint idx ) {
_in[_cnt] = NULL; // NULL out emptied slot
}
+//------------------------------del_req_ordered--------------------------------
+// Delete the required edge and compact the edge array with preserved order
+void Node::del_req_ordered( uint idx ) {
+ assert( idx < _cnt, "oob");
+ assert( !VerifyHashTableKeys || _hash_lock == 0,
+ "remove node from hash table before modifying it");
+ // First remove corresponding def-use edge
+ Node *n = in(idx);
+ if (n != NULL) n->del_out((Node *)this);
+ if (idx < _cnt - 1) { // Not last edge ?
+ Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx-1)*sizeof(Node*)));
+ }
+ _in[--_cnt] = NULL; // NULL out emptied slot
+}
+
//------------------------------ins_req----------------------------------------
// Insert a new required input at the end
void Node::ins_req( uint idx, Node *n ) {
@@ -811,6 +826,21 @@ int Node::replace_edge(Node* old, Node* neww) {
return nrep;
}
+/**
+ * Replace input edges in the range pointing to 'old' node.
+ */
+int Node::replace_edges_in_range(Node* old, Node* neww, int start, int end) {
+ if (old == neww) return 0; // nothing to do
+ uint nrep = 0;
+ for (int i = start; i < end; i++) {
+ if (in(i) == old) {
+ set_req(i, neww);
+ nrep++;
+ }
+ }
+ return nrep;
+}
+
//-------------------------disconnect_inputs-----------------------------------
// NULL out all inputs to eliminate incoming Def-Use edges.
// Return the number of edges between 'n' and 'this'
@@ -1383,6 +1413,21 @@ const TypeLong* Node::find_long_type() const {
return NULL;
}
+
+/**
+ * Return a ptr type for nodes which should have it.
+ */
+const TypePtr* Node::get_ptr_type() const {
+ const TypePtr* tp = this->bottom_type()->make_ptr();
+#ifdef ASSERT
+ if (tp == NULL) {
+ this->dump(1);
+ assert((tp != NULL), "unexpected node type");
+ }
+#endif
+ return tp;
+}
+
// Get a double constant from a ConstNode.
// Returns the constant if it is a double ConstNode
jdouble Node::getd() const {
diff --git a/src/share/vm/opto/node.hpp b/src/share/vm/opto/node.hpp
index f8f2c24e8..1c695f566 100644
--- a/src/share/vm/opto/node.hpp
+++ b/src/share/vm/opto/node.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,6 @@ class AliasInfo;
class AllocateArrayNode;
class AllocateNode;
class Block;
-class Block_Array;
class BoolNode;
class BoxLockNode;
class CMoveNode;
@@ -212,7 +211,7 @@ public:
// New Operator that takes a Compile pointer, this will eventually
// be the "new" New operator.
- inline void* operator new( size_t x, Compile* C) {
+ inline void* operator new( size_t x, Compile* C) throw() {
Node* n = (Node*)C->node_arena()->Amalloc_D(x);
#ifdef ASSERT
n->_in = (Node**)n; // magic cookie for assertion check
@@ -385,6 +384,7 @@ protected:
void add_req( Node *n ); // Append a NEW required input
void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
void del_req( uint idx ); // Delete required edge & compact
+ void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order
void ins_req( uint i, Node *n ); // Insert a NEW required input
void set_req( uint i, Node *n ) {
assert( is_not_dead(n), "can not use dead node");
@@ -410,6 +410,7 @@ protected:
// Find first occurrence of n among my edges:
int find_edge(Node* n);
int replace_edge(Node* old, Node* neww);
+ int replace_edges_in_range(Node* old, Node* neww, int start, int end);
// NULL out all inputs to eliminate incoming Def-Use edges.
// Return the number of edges between 'n' and 'this'
int disconnect_inputs(Node *n, Compile *c);
@@ -964,6 +965,8 @@ public:
}
const TypeLong* find_long_type() const;
+ const TypePtr* get_ptr_type() const;
+
// These guys are called by code generated by ADLC:
intptr_t get_ptr() const;
intptr_t get_narrowcon() const;
diff --git a/src/share/vm/opto/output.cpp b/src/share/vm/opto/output.cpp
index f5a1e08e1..6232c41f0 100644
--- a/src/share/vm/opto/output.cpp
+++ b/src/share/vm/opto/output.cpp
@@ -54,11 +54,10 @@ extern uint size_deopt_handler();
extern int emit_exception_handler(CodeBuffer &cbuf);
extern int emit_deopt_handler(CodeBuffer &cbuf);
-//------------------------------Output-----------------------------------------
// Convert Nodes to instruction bits and pass off to the VM
void Compile::Output() {
// RootNode goes
- assert( _cfg->_broot->_nodes.size() == 0, "" );
+ assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
// The number of new nodes (mostly MachNop) is proportional to
// the number of java calls and inner loops which are aligned.
@@ -68,17 +67,16 @@ void Compile::Output() {
return;
}
// Make sure I can find the Start Node
- Block_Array& bbs = _cfg->_bbs;
- Block *entry = _cfg->_blocks[1];
- Block *broot = _cfg->_broot;
+ Block *entry = _cfg->get_block(1);
+ Block *broot = _cfg->get_root_block();
- const StartNode *start = entry->_nodes[0]->as_Start();
+ const StartNode *start = entry->head()->as_Start();
// Replace StartNode with prolog
MachPrologNode *prolog = new (this) MachPrologNode();
- entry->_nodes.map( 0, prolog );
- bbs.map( prolog->_idx, entry );
- bbs.map( start->_idx, NULL ); // start is no longer in any block
+ entry->map_node(prolog, 0);
+ _cfg->map_node_to_block(prolog, entry);
+ _cfg->unmap_node_from_block(start); // start is no longer in any block
// Virtual methods need an unverified entry point
@@ -110,41 +108,44 @@ void Compile::Output() {
}
// Insert epilogs before every return
- for( uint i=0; i<_cfg->_num_blocks; i++ ) {
- Block *b = _cfg->_blocks[i];
- if( !b->is_connector() && b->non_connector_successor(0) == _cfg->_broot ) { // Found a program exit point?
- Node *m = b->end();
- if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) {
- MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
- b->add_inst( epilog );
- bbs.map(epilog->_idx, b);
- //_regalloc->set_bad(epilog->_idx); // Already initialized this way.
+ for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
+ Block* block = _cfg->get_block(i);
+ if (!block->is_connector() && block->non_connector_successor(0) == _cfg->get_root_block()) { // Found a program exit point?
+ Node* m = block->end();
+ if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
+ MachEpilogNode* epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
+ block->add_inst(epilog);
+ _cfg->map_node_to_block(epilog, block);
}
}
}
# ifdef ENABLE_ZAP_DEAD_LOCALS
- if ( ZapDeadCompiledLocals ) Insert_zap_nodes();
+ if (ZapDeadCompiledLocals) {
+ Insert_zap_nodes();
+ }
# endif
- uint* blk_starts = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks+1);
- blk_starts[0] = 0;
+ uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
+ blk_starts[0] = 0;
// Initialize code buffer and process short branches.
CodeBuffer* cb = init_buffer(blk_starts);
- if (cb == NULL || failing()) return;
+ if (cb == NULL || failing()) {
+ return;
+ }
ScheduleAndBundle();
#ifndef PRODUCT
if (trace_opto_output()) {
tty->print("\n---- After ScheduleAndBundle ----\n");
- for (uint i = 0; i < _cfg->_num_blocks; i++) {
+ for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
tty->print("\nBB#%03d:\n", i);
- Block *bb = _cfg->_blocks[i];
- for (uint j = 0; j < bb->_nodes.size(); j++) {
- Node *n = bb->_nodes[j];
+ Block* block = _cfg->get_block(i);
+ for (uint j = 0; j < block->number_of_nodes(); j++) {
+ Node* n = block->get_node(j);
OptoReg::Name reg = _regalloc->get_reg_first(n);
tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
n->dump();
@@ -153,11 +154,15 @@ void Compile::Output() {
}
#endif
- if (failing()) return;
+ if (failing()) {
+ return;
+ }
BuildOopMaps();
- if (failing()) return;
+ if (failing()) {
+ return;
+ }
fill_buffer(cb, blk_starts);
}
@@ -219,10 +224,10 @@ void Compile::Insert_zap_nodes() {
return; // no safepoints/oopmaps emitted for calls in stubs,so we don't care
// Insert call to zap runtime stub before every node with an oop map
- for( uint i=0; i<_cfg->_num_blocks; i++ ) {
- Block *b = _cfg->_blocks[i];
- for ( uint j = 0; j < b->_nodes.size(); ++j ) {
- Node *n = b->_nodes[j];
+ for( uint i=0; i<_cfg->number_of_blocks(); i++ ) {
+ Block *b = _cfg->get_block(i);
+ for ( uint j = 0; j < b->number_of_nodes(); ++j ) {
+ Node *n = b->get_node(j);
// Determining if we should insert a zap-a-lot node in output.
// We do that for all nodes that has oopmap info, except for calls
@@ -251,8 +256,8 @@ void Compile::Insert_zap_nodes() {
}
if (insert) {
Node *zap = call_zap_node(n->as_MachSafePoint(), i);
- b->_nodes.insert( j, zap );
- _cfg->_bbs.map( zap->_idx, b );
+ b->insert_node(zap, j);
+ _cfg->map_node_to_block(zap, b);
++j;
}
}
@@ -277,7 +282,6 @@ Node* Compile::call_zap_node(MachSafePointNode* node_to_check, int block_no) {
return _matcher->match_sfpt(ideal_node);
}
-//------------------------------is_node_getting_a_safepoint--------------------
bool Compile::is_node_getting_a_safepoint( Node* n) {
// This code duplicates the logic prior to the call of add_safepoint
// below in this file.
@@ -287,7 +291,6 @@ bool Compile::is_node_getting_a_safepoint( Node* n) {
# endif // ENABLE_ZAP_DEAD_LOCALS
-//------------------------------compute_loop_first_inst_sizes------------------
// Compute the size of first NumberOfLoopInstrToAlign instructions at the top
// of a loop. When aligning a loop we need to provide enough instructions
// in cpu's fetch buffer to feed decoders. The loop alignment could be
@@ -304,42 +307,39 @@ void Compile::compute_loop_first_inst_sizes() {
// or alignment padding is larger then MaxLoopPad. By default, MaxLoopPad
// is equal to OptoLoopAlignment-1 except on new Intel cpus, where it is
// equal to 11 bytes which is the largest address NOP instruction.
- if( MaxLoopPad < OptoLoopAlignment-1 ) {
- uint last_block = _cfg->_num_blocks-1;
- for( uint i=1; i <= last_block; i++ ) {
- Block *b = _cfg->_blocks[i];
+ if (MaxLoopPad < OptoLoopAlignment - 1) {
+ uint last_block = _cfg->number_of_blocks() - 1;
+ for (uint i = 1; i <= last_block; i++) {
+ Block* block = _cfg->get_block(i);
// Check the first loop's block which requires an alignment.
- if( b->loop_alignment() > (uint)relocInfo::addr_unit() ) {
+ if (block->loop_alignment() > (uint)relocInfo::addr_unit()) {
uint sum_size = 0;
uint inst_cnt = NumberOfLoopInstrToAlign;
- inst_cnt = b->compute_first_inst_size(sum_size, inst_cnt, _regalloc);
+ inst_cnt = block->compute_first_inst_size(sum_size, inst_cnt, _regalloc);
// Check subsequent fallthrough blocks if the loop's first
// block(s) does not have enough instructions.
- Block *nb = b;
- while( inst_cnt > 0 &&
- i < last_block &&
- !_cfg->_blocks[i+1]->has_loop_alignment() &&
- !nb->has_successor(b) ) {
+ Block *nb = block;
+ while(inst_cnt > 0 &&
+ i < last_block &&
+ !_cfg->get_block(i + 1)->has_loop_alignment() &&
+ !nb->has_successor(block)) {
i++;
- nb = _cfg->_blocks[i];
+ nb = _cfg->get_block(i);
inst_cnt = nb->compute_first_inst_size(sum_size, inst_cnt, _regalloc);
} // while( inst_cnt > 0 && i < last_block )
- b->set_first_inst_size(sum_size);
+ block->set_first_inst_size(sum_size);
} // f( b->head()->is_Loop() )
} // for( i <= last_block )
} // if( MaxLoopPad < OptoLoopAlignment-1 )
}
-//----------------------shorten_branches---------------------------------------
// The architecture description provides short branch variants for some long
// branch instructions. Replace eligible long branches with short branches.
void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size) {
-
- // ------------------
// Compute size of each block, method size, and relocation information size
- uint nblocks = _cfg->_num_blocks;
+ uint nblocks = _cfg->number_of_blocks();
uint* jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
uint* jmp_size = NEW_RESOURCE_ARRAY(uint,nblocks);
@@ -366,7 +366,7 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
uint last_avoid_back_to_back_adr = max_uint;
uint nop_size = (new (this) MachNopNode())->size(_regalloc);
for (uint i = 0; i < nblocks; i++) { // For all blocks
- Block *b = _cfg->_blocks[i];
+ Block* block = _cfg->get_block(i);
// During short branch replacement, we store the relative (to blk_starts)
// offset of jump in jmp_offset, rather than the absolute offset of jump.
@@ -379,10 +379,10 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
DEBUG_ONLY( jmp_rule[i] = 0; )
// Sum all instruction sizes to compute block size
- uint last_inst = b->_nodes.size();
+ uint last_inst = block->number_of_nodes();
uint blk_size = 0;
for (uint j = 0; j < last_inst; j++) {
- Node* nj = b->_nodes[j];
+ Node* nj = block->get_node(j);
// Handle machine instruction nodes
if (nj->is_Mach()) {
MachNode *mach = nj->as_Mach();
@@ -443,8 +443,8 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
// When the next block starts a loop, we may insert pad NOP
// instructions. Since we cannot know our future alignment,
// assume the worst.
- if (i< nblocks-1) {
- Block *nb = _cfg->_blocks[i+1];
+ if (i < nblocks - 1) {
+ Block* nb = _cfg->get_block(i + 1);
int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
if (max_loop_pad > 0) {
assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
@@ -475,26 +475,26 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
has_short_branch_candidate = false;
int adjust_block_start = 0;
for (uint i = 0; i < nblocks; i++) {
- Block *b = _cfg->_blocks[i];
+ Block* block = _cfg->get_block(i);
int idx = jmp_nidx[i];
- MachNode* mach = (idx == -1) ? NULL: b->_nodes[idx]->as_Mach();
+ MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach();
if (mach != NULL && mach->may_be_short_branch()) {
#ifdef ASSERT
assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
int j;
// Find the branch; ignore trailing NOPs.
- for (j = b->_nodes.size()-1; j>=0; j--) {
- Node* n = b->_nodes[j];
+ for (j = block->number_of_nodes()-1; j>=0; j--) {
+ Node* n = block->get_node(j);
if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
break;
}
- assert(j >= 0 && j == idx && b->_nodes[j] == (Node*)mach, "sanity");
+ assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity");
#endif
int br_size = jmp_size[i];
int br_offs = blk_starts[i] + jmp_offset[i];
// This requires the TRUE branch target be in succs[0]
- uint bnum = b->non_connector_successor(0)->_pre_order;
+ uint bnum = block->non_connector_successor(0)->_pre_order;
int offset = blk_starts[bnum] - br_offs;
if (bnum > i) { // adjust following block's offset
offset -= adjust_block_start;
@@ -522,7 +522,7 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
diff -= nop_size;
}
adjust_block_start += diff;
- b->_nodes.map(idx, replacement);
+ block->map_node(replacement, idx);
mach->subsume_by(replacement, C);
mach = replacement;
progress = true;
@@ -639,7 +639,7 @@ void Compile::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,
new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
Compile::set_sv_for_object_node(objs, sv);
- uint first_ind = spobj->first_index();
+ uint first_ind = spobj->first_index(sfpt->jvms());
for (uint i = 0; i < spobj->n_fields(); i++) {
Node* fld_node = sfpt->in(first_ind+i);
(void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
@@ -894,7 +894,7 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
// Loop over monitors and insert into array
- for(idx = 0; idx < num_mon; idx++) {
+ for (idx = 0; idx < num_mon; idx++) {
// Grab the node that defines this monitor
Node* box_node = sfn->monitor_box(jvms, idx);
Node* obj_node = sfn->monitor_obj(jvms, idx);
@@ -902,11 +902,11 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
// Create ScopeValue for object
ScopeValue *scval = NULL;
- if( obj_node->is_SafePointScalarObject() ) {
+ if (obj_node->is_SafePointScalarObject()) {
SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
scval = Compile::sv_for_node_id(objs, spobj->_idx);
if (scval == NULL) {
- const Type *t = obj_node->bottom_type();
+ const Type *t = spobj->bottom_type();
ciKlass* cik = t->is_oopptr()->klass();
assert(cik->is_instance_klass() ||
cik->is_array_klass(), "Not supported allocation.");
@@ -914,14 +914,14 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
Compile::set_sv_for_object_node(objs, sv);
- uint first_ind = spobj->first_index();
+ uint first_ind = spobj->first_index(youngest_jvms);
for (uint i = 0; i < spobj->n_fields(); i++) {
Node* fld_node = sfn->in(first_ind+i);
(void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
}
scval = sv;
}
- } else if( !obj_node->is_Con() ) {
+ } else if (!obj_node->is_Con()) {
OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node);
if( obj_node->bottom_type()->base() == Type::NarrowOop ) {
scval = new_loc_value( _regalloc, obj_reg, Location::narrowoop );
@@ -929,7 +929,7 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
scval = new_loc_value( _regalloc, obj_reg, Location::oop );
}
} else {
- const TypePtr *tp = obj_node->bottom_type()->make_ptr();
+ const TypePtr *tp = obj_node->get_ptr_type();
scval = new ConstantOopWriteValue(tp->is_oopptr()->const_oop()->constant_encoding());
}
@@ -1085,11 +1085,11 @@ CodeBuffer* Compile::init_buffer(uint* blk_starts) {
if (has_mach_constant_base_node()) {
// Fill the constant table.
// Note: This must happen before shorten_branches.
- for (uint i = 0; i < _cfg->_num_blocks; i++) {
- Block* b = _cfg->_blocks[i];
+ for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
+ Block* b = _cfg->get_block(i);
- for (uint j = 0; j < b->_nodes.size(); j++) {
- Node* n = b->_nodes[j];
+ for (uint j = 0; j < b->number_of_nodes(); j++) {
+ Node* n = b->get_node(j);
// If the node is a MachConstantNode evaluate the constant
// value section.
@@ -1172,7 +1172,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// !!!!! This preserves old handling of oopmaps for now
debug_info()->set_oopmaps(_oop_map_set);
- uint nblocks = _cfg->_num_blocks;
+ uint nblocks = _cfg->number_of_blocks();
// Count and start of implicit null check instructions
uint inct_cnt = 0;
uint *inct_starts = NEW_RESOURCE_ARRAY(uint, nblocks+1);
@@ -1220,21 +1220,21 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// Now fill in the code buffer
Node *delay_slot = NULL;
- for (uint i=0; i < nblocks; i++) {
- Block *b = _cfg->_blocks[i];
-
- Node *head = b->head();
+ for (uint i = 0; i < nblocks; i++) {
+ Block* block = _cfg->get_block(i);
+ Node* head = block->head();
// If this block needs to start aligned (i.e, can be reached other
// than by falling-thru from the previous block), then force the
// start of a new bundle.
- if (Pipeline::requires_bundling() && starts_bundle(head))
+ if (Pipeline::requires_bundling() && starts_bundle(head)) {
cb->flush_bundle(true);
+ }
#ifdef ASSERT
- if (!b->is_connector()) {
+ if (!block->is_connector()) {
stringStream st;
- b->dump_head(&_cfg->_bbs, &st);
+ block->dump_head(_cfg, &st);
MacroAssembler(cb).block_comment(st.as_string());
}
jmp_target[i] = 0;
@@ -1245,16 +1245,16 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
int blk_offset = current_offset;
// Define the label at the beginning of the basic block
- MacroAssembler(cb).bind(blk_labels[b->_pre_order]);
+ MacroAssembler(cb).bind(blk_labels[block->_pre_order]);
- uint last_inst = b->_nodes.size();
+ uint last_inst = block->number_of_nodes();
// Emit block normally, except for last instruction.
// Emit means "dump code bits into code buffer".
for (uint j = 0; j<last_inst; j++) {
// Get the node
- Node* n = b->_nodes[j];
+ Node* n = block->get_node(j);
// See if delay slots are supported
if (valid_bundle_info(n) &&
@@ -1308,9 +1308,9 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
int nops_cnt = padding / nop_size;
MachNode *nop = new (this) MachNopNode(nops_cnt);
- b->_nodes.insert(j++, nop);
+ block->insert_node(nop, j++);
last_inst++;
- _cfg->_bbs.map( nop->_idx, b );
+ _cfg->map_node_to_block(nop, block);
nop->emit(*cb, _regalloc);
cb->flush_bundle(true);
current_offset = cb->insts_size();
@@ -1324,7 +1324,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
mcall->method_set((intptr_t)mcall->entry_point());
// Save the return address
- call_returns[b->_pre_order] = current_offset + mcall->ret_addr_offset();
+ call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
if (mcall->is_MachCallLeaf()) {
is_mcall = false;
@@ -1361,7 +1361,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// If this is a branch, then fill in the label with the target BB's label
else if (mach->is_MachBranch()) {
// This requires the TRUE branch target be in succs[0]
- uint block_num = b->non_connector_successor(0)->_pre_order;
+ uint block_num = block->non_connector_successor(0)->_pre_order;
// Try to replace long branch if delay slot is not used,
// it is mostly for back branches since forward branch's
@@ -1394,8 +1394,8 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// Insert padding between avoid_back_to_back branches.
if (needs_padding && replacement->avoid_back_to_back()) {
MachNode *nop = new (this) MachNopNode();
- b->_nodes.insert(j++, nop);
- _cfg->_bbs.map(nop->_idx, b);
+ block->insert_node(nop, j++);
+ _cfg->map_node_to_block(nop, block);
last_inst++;
nop->emit(*cb, _regalloc);
cb->flush_bundle(true);
@@ -1407,7 +1407,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
jmp_size[i] = new_size;
jmp_rule[i] = mach->rule();
#endif
- b->_nodes.map(j, replacement);
+ block->map_node(replacement, j);
mach->subsume_by(replacement, C);
n = replacement;
mach = replacement;
@@ -1415,8 +1415,8 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
}
mach->as_MachBranch()->label_set( &blk_labels[block_num], block_num );
} else if (mach->ideal_Opcode() == Op_Jump) {
- for (uint h = 0; h < b->_num_succs; h++) {
- Block* succs_block = b->_succs[h];
+ for (uint h = 0; h < block->_num_succs; h++) {
+ Block* succs_block = block->_succs[h];
for (uint j = 1; j < succs_block->num_preds(); j++) {
Node* jpn = succs_block->pred(j);
if (jpn->is_JumpProj() && jpn->in(0) == mach) {
@@ -1427,7 +1427,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
}
}
}
-
#ifdef ASSERT
// Check that oop-store precedes the card-mark
else if (mach->ideal_Opcode() == Op_StoreCM) {
@@ -1438,17 +1437,18 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
if (oop_store == NULL) continue;
count++;
uint i4;
- for( i4 = 0; i4 < last_inst; ++i4 ) {
- if( b->_nodes[i4] == oop_store ) break;
+ for (i4 = 0; i4 < last_inst; ++i4) {
+ if (block->get_node(i4) == oop_store) {
+ break;
+ }
}
// Note: This test can provide a false failure if other precedence
// edges have been added to the storeCMNode.
- assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
+ assert(i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
}
assert(count > 0, "storeCM expects at least one precedence edge");
}
#endif
-
else if (!n->is_Proj()) {
// Remember the beginning of the previous instruction, in case
// it's followed by a flag-kill and a null-check. Happens on
@@ -1544,12 +1544,12 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// If the next block is the top of a loop, pad this block out to align
// the loop top a little. Helps prevent pipe stalls at loop back branches.
if (i < nblocks-1) {
- Block *nb = _cfg->_blocks[i+1];
+ Block *nb = _cfg->get_block(i + 1);
int padding = nb->alignment_padding(current_offset);
if( padding > 0 ) {
MachNode *nop = new (this) MachNopNode(padding / nop_size);
- b->_nodes.insert( b->_nodes.size(), nop );
- _cfg->_bbs.map( nop->_idx, b );
+ block->insert_node(nop, block->number_of_nodes());
+ _cfg->map_node_to_block(nop, block);
nop->emit(*cb, _regalloc);
current_offset = cb->insts_size();
}
@@ -1589,8 +1589,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
}
#endif
- // ------------------
-
#ifndef PRODUCT
// Information on the size of the method, without the extraneous code
Scheduling::increment_method_size(cb->insts_size());
@@ -1651,52 +1649,55 @@ void Compile::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_start
_inc_table.set_size(cnt);
uint inct_cnt = 0;
- for( uint i=0; i<_cfg->_num_blocks; i++ ) {
- Block *b = _cfg->_blocks[i];
+ for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
+ Block* block = _cfg->get_block(i);
Node *n = NULL;
int j;
// Find the branch; ignore trailing NOPs.
- for( j = b->_nodes.size()-1; j>=0; j-- ) {
- n = b->_nodes[j];
- if( !n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con )
+ for (j = block->number_of_nodes() - 1; j >= 0; j--) {
+ n = block->get_node(j);
+ if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
break;
+ }
}
// If we didn't find anything, continue
- if( j < 0 ) continue;
+ if (j < 0) {
+ continue;
+ }
// Compute ExceptionHandlerTable subtable entry and add it
// (skip empty blocks)
- if( n->is_Catch() ) {
+ if (n->is_Catch()) {
// Get the offset of the return from the call
- uint call_return = call_returns[b->_pre_order];
+ uint call_return = call_returns[block->_pre_order];
#ifdef ASSERT
assert( call_return > 0, "no call seen for this basic block" );
- while( b->_nodes[--j]->is_MachProj() ) ;
- assert( b->_nodes[j]->is_MachCall(), "CatchProj must follow call" );
+ while (block->get_node(--j)->is_MachProj()) ;
+ assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
#endif
// last instruction is a CatchNode, find it's CatchProjNodes
- int nof_succs = b->_num_succs;
+ int nof_succs = block->_num_succs;
// allocate space
GrowableArray<intptr_t> handler_bcis(nof_succs);
GrowableArray<intptr_t> handler_pcos(nof_succs);
// iterate through all successors
for (int j = 0; j < nof_succs; j++) {
- Block* s = b->_succs[j];
+ Block* s = block->_succs[j];
bool found_p = false;
- for( uint k = 1; k < s->num_preds(); k++ ) {
- Node *pk = s->pred(k);
- if( pk->is_CatchProj() && pk->in(0) == n ) {
+ for (uint k = 1; k < s->num_preds(); k++) {
+ Node* pk = s->pred(k);
+ if (pk->is_CatchProj() && pk->in(0) == n) {
const CatchProjNode* p = pk->as_CatchProj();
found_p = true;
// add the corresponding handler bci & pco information
- if( p->_con != CatchProjNode::fall_through_index ) {
+ if (p->_con != CatchProjNode::fall_through_index) {
// p leads to an exception handler (and is not fall through)
- assert(s == _cfg->_blocks[s->_pre_order],"bad numbering");
+ assert(s == _cfg->get_block(s->_pre_order), "bad numbering");
// no duplicates, please
- if( !handler_bcis.contains(p->handler_bci()) ) {
+ if (!handler_bcis.contains(p->handler_bci())) {
uint block_num = s->non_connector()->_pre_order;
handler_bcis.append(p->handler_bci());
handler_pcos.append(blk_labels[block_num].loc_pos());
@@ -1715,9 +1716,9 @@ void Compile::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_start
}
// Handle implicit null exception table updates
- if( n->is_MachNullCheck() ) {
- uint block_num = b->non_connector_successor(0)->_pre_order;
- _inc_table.append( inct_starts[inct_cnt++], blk_labels[block_num].loc_pos() );
+ if (n->is_MachNullCheck()) {
+ uint block_num = block->non_connector_successor(0)->_pre_order;
+ _inc_table.append(inct_starts[inct_cnt++], blk_labels[block_num].loc_pos());
continue;
}
} // End of for all blocks fill in exception table entries
@@ -1737,7 +1738,6 @@ uint Scheduling::_total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+
Scheduling::Scheduling(Arena *arena, Compile &compile)
: _arena(arena),
_cfg(compile.cfg()),
- _bbs(compile.cfg()->_bbs),
_regalloc(compile.regalloc()),
_reg_node(arena),
_bundle_instr_count(0),
@@ -1777,14 +1777,12 @@ Scheduling::Scheduling(Arena *arena, Compile &compile)
memset(_current_latency, 0, node_max * sizeof(unsigned short));
// Clear the bundling information
- memcpy(_bundle_use_elements,
- Pipeline_Use::elaborated_elements,
- sizeof(Pipeline_Use::elaborated_elements));
+ memcpy(_bundle_use_elements, Pipeline_Use::elaborated_elements, sizeof(Pipeline_Use::elaborated_elements));
// Get the last node
- Block *bb = _cfg->_blocks[_cfg->_blocks.size()-1];
+ Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1);
- _next_node = bb->_nodes[bb->_nodes.size()-1];
+ _next_node = block->get_node(block->number_of_nodes() - 1);
}
#ifndef PRODUCT
@@ -1834,7 +1832,6 @@ void Scheduling::step_and_clear() {
sizeof(Pipeline_Use::elaborated_elements));
}
-//------------------------------ScheduleAndBundle------------------------------
// Perform instruction scheduling and bundling over the sequence of
// instructions in backwards order.
void Compile::ScheduleAndBundle() {
@@ -1861,7 +1858,6 @@ void Compile::ScheduleAndBundle() {
scheduling.DoScheduling();
}
-//------------------------------ComputeLocalLatenciesForward-------------------
// Compute the latency of all the instructions. This is fairly simple,
// because we already have a legal ordering. Walk over the instructions
// from first to last, and compute the latency of the instruction based
@@ -1879,7 +1875,7 @@ void Scheduling::ComputeLocalLatenciesForward(const Block *bb) {
// Used to allow latency 0 to force an instruction to the beginning
// of the bb
uint latency = 1;
- Node *use = bb->_nodes[j];
+ Node *use = bb->get_node(j);
uint nlen = use->len();
// Walk over all the inputs
@@ -2031,7 +2027,6 @@ Node * Scheduling::ChooseNodeToBundle() {
return _available[0];
}
-//------------------------------AddNodeToAvailableList-------------------------
void Scheduling::AddNodeToAvailableList(Node *n) {
assert( !n->is_Proj(), "projections never directly made available" );
#ifndef PRODUCT
@@ -2077,7 +2072,6 @@ void Scheduling::AddNodeToAvailableList(Node *n) {
#endif
}
-//------------------------------DecrementUseCounts-----------------------------
void Scheduling::DecrementUseCounts(Node *n, const Block *bb) {
for ( uint i=0; i < n->len(); i++ ) {
Node *def = n->in(i);
@@ -2085,8 +2079,9 @@ void Scheduling::DecrementUseCounts(Node *n, const Block *bb) {
if( def->is_Proj() ) // If this is a machine projection, then
def = def->in(0); // propagate usage thru to the base instruction
- if( _bbs[def->_idx] != bb ) // Ignore if not block-local
+ if(_cfg->get_block_for_node(def) != bb) { // Ignore if not block-local
continue;
+ }
// Compute the latency
uint l = _bundle_cycle_number + n->latency(i);
@@ -2099,7 +2094,6 @@ void Scheduling::DecrementUseCounts(Node *n, const Block *bb) {
}
}
-//------------------------------AddNodeToBundle--------------------------------
void Scheduling::AddNodeToBundle(Node *n, const Block *bb) {
#ifndef PRODUCT
if (_cfg->C->trace_opto_output()) {
@@ -2292,7 +2286,7 @@ void Scheduling::AddNodeToBundle(Node *n, const Block *bb) {
(OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
// Push any trailing projections
- if( bb->_nodes[bb->_nodes.size()-1] != n ) {
+ if( bb->get_node(bb->number_of_nodes()-1) != n ) {
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node *foi = n->fast_out(i);
if( foi->is_Proj() )
@@ -2314,7 +2308,6 @@ void Scheduling::AddNodeToBundle(Node *n, const Block *bb) {
DecrementUseCounts(n,bb);
}
-//------------------------------ComputeUseCount--------------------------------
// This method sets the use count within a basic block. We will ignore all
// uses outside the current basic block. As we are doing a backwards walk,
// any node we reach that has a use count of 0 may be scheduled. This also
@@ -2336,21 +2329,21 @@ void Scheduling::ComputeUseCount(const Block *bb) {
_unconditional_delay_slot = NULL;
#ifdef ASSERT
- for( uint i=0; i < bb->_nodes.size(); i++ )
- assert( _uses[bb->_nodes[i]->_idx] == 0, "_use array not clean" );
+ for( uint i=0; i < bb->number_of_nodes(); i++ )
+ assert( _uses[bb->get_node(i)->_idx] == 0, "_use array not clean" );
#endif
// Force the _uses count to never go to zero for unscheduable pieces
// of the block
for( uint k = 0; k < _bb_start; k++ )
- _uses[bb->_nodes[k]->_idx] = 1;
- for( uint l = _bb_end; l < bb->_nodes.size(); l++ )
- _uses[bb->_nodes[l]->_idx] = 1;
+ _uses[bb->get_node(k)->_idx] = 1;
+ for( uint l = _bb_end; l < bb->number_of_nodes(); l++ )
+ _uses[bb->get_node(l)->_idx] = 1;
// Iterate backwards over the instructions in the block. Don't count the
// branch projections at end or the block header instructions.
for( uint j = _bb_end-1; j >= _bb_start; j-- ) {
- Node *n = bb->_nodes[j];
+ Node *n = bb->get_node(j);
if( n->is_Proj() ) continue; // Projections handled another way
// Account for all uses
@@ -2358,9 +2351,10 @@ void Scheduling::ComputeUseCount(const Block *bb) {
Node *inp = n->in(k);
if (!inp) continue;
assert(inp != n, "no cycles allowed" );
- if( _bbs[inp->_idx] == bb ) { // Block-local use?
- if( inp->is_Proj() ) // Skip through Proj's
+ if (_cfg->get_block_for_node(inp) == bb) { // Block-local use?
+ if (inp->is_Proj()) { // Skip through Proj's
inp = inp->in(0);
+ }
++_uses[inp->_idx]; // Count 1 block-local use
}
}
@@ -2398,20 +2392,22 @@ void Scheduling::DoScheduling() {
Block *bb;
// Walk over all the basic blocks in reverse order
- for( int i=_cfg->_num_blocks-1; i >= 0; succ_bb = bb, i-- ) {
- bb = _cfg->_blocks[i];
+ for (int i = _cfg->number_of_blocks() - 1; i >= 0; succ_bb = bb, i--) {
+ bb = _cfg->get_block(i);
#ifndef PRODUCT
if (_cfg->C->trace_opto_output()) {
tty->print("# Schedule BB#%03d (initial)\n", i);
- for (uint j = 0; j < bb->_nodes.size(); j++)
- bb->_nodes[j]->dump();
+ for (uint j = 0; j < bb->number_of_nodes(); j++) {
+ bb->get_node(j)->dump();
+ }
}
#endif
// On the head node, skip processing
- if( bb == _cfg->_broot )
+ if (bb == _cfg->get_root_block()) {
continue;
+ }
// Skip empty, connector blocks
if (bb->is_connector())
@@ -2430,10 +2426,10 @@ void Scheduling::DoScheduling() {
}
// Leave untouched the starting instruction, any Phis, a CreateEx node
- // or Top. bb->_nodes[_bb_start] is the first schedulable instruction.
- _bb_end = bb->_nodes.size()-1;
+ // or Top. bb->get_node(_bb_start) is the first schedulable instruction.
+ _bb_end = bb->number_of_nodes()-1;
for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) {
- Node *n = bb->_nodes[_bb_start];
+ Node *n = bb->get_node(_bb_start);
// Things not matched, like Phinodes and ProjNodes don't get scheduled.
// Also, MachIdealNodes do not get scheduled
if( !n->is_Mach() ) continue; // Skip non-machine nodes
@@ -2453,19 +2449,19 @@ void Scheduling::DoScheduling() {
// in the block), because they have delay slots we can fill. Calls all
// have their delay slots filled in the template expansions, so we don't
// bother scheduling them.
- Node *last = bb->_nodes[_bb_end];
+ Node *last = bb->get_node(_bb_end);
// Ignore trailing NOPs.
while (_bb_end > 0 && last->is_Mach() &&
last->as_Mach()->ideal_Opcode() == Op_Con) {
- last = bb->_nodes[--_bb_end];
+ last = bb->get_node(--_bb_end);
}
assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
if( last->is_Catch() ||
// Exclude unreachable path case when Halt node is in a separate block.
(_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
// There must be a prior call. Skip it.
- while( !bb->_nodes[--_bb_end]->is_MachCall() ) {
- assert( bb->_nodes[_bb_end]->is_MachProj(), "skipping projections after expected call" );
+ while( !bb->get_node(--_bb_end)->is_MachCall() ) {
+ assert( bb->get_node(_bb_end)->is_MachProj(), "skipping projections after expected call" );
}
} else if( last->is_MachNullCheck() ) {
// Backup so the last null-checked memory instruction is
@@ -2474,7 +2470,7 @@ void Scheduling::DoScheduling() {
Node *mem = last->in(1);
do {
_bb_end--;
- } while (mem != bb->_nodes[_bb_end]);
+ } while (mem != bb->get_node(_bb_end));
} else {
// Set _bb_end to point after last schedulable inst.
_bb_end++;
@@ -2503,7 +2499,7 @@ void Scheduling::DoScheduling() {
assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" );
#ifdef ASSERT
for( uint l = _bb_start; l < _bb_end; l++ ) {
- Node *n = bb->_nodes[l];
+ Node *n = bb->get_node(l);
uint m;
for( m = 0; m < _bb_end-_bb_start; m++ )
if( _scheduled[m] == n )
@@ -2514,14 +2510,14 @@ void Scheduling::DoScheduling() {
// Now copy the instructions (in reverse order) back to the block
for ( uint k = _bb_start; k < _bb_end; k++ )
- bb->_nodes.map(k, _scheduled[_bb_end-k-1]);
+ bb->map_node(_scheduled[_bb_end-k-1], k);
#ifndef PRODUCT
if (_cfg->C->trace_opto_output()) {
tty->print("# Schedule BB#%03d (final)\n", i);
uint current = 0;
- for (uint j = 0; j < bb->_nodes.size(); j++) {
- Node *n = bb->_nodes[j];
+ for (uint j = 0; j < bb->number_of_nodes(); j++) {
+ Node *n = bb->get_node(j);
if( valid_bundle_info(n) ) {
Bundle *bundle = node_bundling(n);
if (bundle->instr_count() > 0 || bundle->flags() > 0) {
@@ -2548,7 +2544,6 @@ void Scheduling::DoScheduling() {
} // end DoScheduling
-//------------------------------verify_good_schedule---------------------------
// Verify that no live-range used in the block is killed in the block by a
// wrong DEF. This doesn't verify live-ranges that span blocks.
@@ -2561,7 +2556,6 @@ static bool edge_from_to( Node *from, Node *to ) {
}
#ifdef ASSERT
-//------------------------------verify_do_def----------------------------------
void Scheduling::verify_do_def( Node *n, OptoReg::Name def, const char *msg ) {
// Check for bad kills
if( OptoReg::is_valid(def) ) { // Ignore stores & control flow
@@ -2577,7 +2571,6 @@ void Scheduling::verify_do_def( Node *n, OptoReg::Name def, const char *msg ) {
}
}
-//------------------------------verify_good_schedule---------------------------
void Scheduling::verify_good_schedule( Block *b, const char *msg ) {
// Zap to something reasonable for the verify code
@@ -2586,8 +2579,8 @@ void Scheduling::verify_good_schedule( Block *b, const char *msg ) {
// Walk over the block backwards. Check to make sure each DEF doesn't
// kill a live value (other than the one it's supposed to). Add each
// USE to the live set.
- for( uint i = b->_nodes.size()-1; i >= _bb_start; i-- ) {
- Node *n = b->_nodes[i];
+ for( uint i = b->number_of_nodes()-1; i >= _bb_start; i-- ) {
+ Node *n = b->get_node(i);
int n_op = n->Opcode();
if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
// Fat-proj kills a slew of registers
@@ -2637,13 +2630,12 @@ static void add_prec_edge_from_to( Node *from, Node *to ) {
from->add_prec(to);
}
-//------------------------------anti_do_def------------------------------------
void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def ) {
if( !OptoReg::is_valid(def_reg) ) // Ignore stores & control flow
return;
Node *pinch = _reg_node[def_reg]; // Get pinch point
- if( !pinch || _bbs[pinch->_idx] != b || // No pinch-point yet?
+ if ((pinch == NULL) || _cfg->get_block_for_node(pinch) != b || // No pinch-point yet?
is_def ) { // Check for a true def (not a kill)
_reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point
return;
@@ -2669,7 +2661,7 @@ void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is
_cfg->C->record_method_not_compilable("too many D-U pinch points");
return;
}
- _bbs.map(pinch->_idx,b); // Pretend it's valid in this block (lazy init)
+ _cfg->map_node_to_block(pinch, b); // Pretend it's valid in this block (lazy init)
_reg_node.map(def_reg,pinch); // Record pinch-point
//_regalloc->set_bad(pinch->_idx); // Already initialized this way.
if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill
@@ -2707,20 +2699,19 @@ void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is
add_prec_edge_from_to(kill,pinch);
}
-//------------------------------anti_do_use------------------------------------
void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) {
if( !OptoReg::is_valid(use_reg) ) // Ignore stores & control flow
return;
Node *pinch = _reg_node[use_reg]; // Get pinch point
// Check for no later def_reg/kill in block
- if( pinch && _bbs[pinch->_idx] == b &&
+ if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b &&
// Use has to be block-local as well
- _bbs[use->_idx] == b ) {
+ _cfg->get_block_for_node(use) == b) {
if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
pinch->req() == 1 ) { // pinch not yet in block?
pinch->del_req(0); // yank pointer to later-def, also set flag
// Insert the pinch-point in the block just after the last use
- b->_nodes.insert(b->find_node(use)+1,pinch);
+ b->insert_node(pinch, b->find_node(use) + 1);
_bb_end++; // Increase size scheduled region in block
}
@@ -2728,7 +2719,6 @@ void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) {
}
}
-//------------------------------ComputeRegisterAntidependences-----------------
// We insert antidependences between the reads and following write of
// allocated registers to prevent illegal code motion. Hopefully, the
// number of added references should be fairly small, especially as we
@@ -2773,10 +2763,10 @@ void Scheduling::ComputeRegisterAntidependencies(Block *b) {
// it being in the current block.
bool fat_proj_seen = false;
uint last_safept = _bb_end-1;
- Node* end_node = (_bb_end-1 >= _bb_start) ? b->_nodes[last_safept] : NULL;
+ Node* end_node = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : NULL;
Node* last_safept_node = end_node;
for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
- Node *n = b->_nodes[i];
+ Node *n = b->get_node(i);
int is_def = n->outcnt(); // def if some uses prior to adding precedence edges
if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
// Fat-proj kills a slew of registers
@@ -2825,7 +2815,7 @@ void Scheduling::ComputeRegisterAntidependencies(Block *b) {
// Do not allow defs of new derived values to float above GC
// points unless the base is definitely available at the GC point.
- Node *m = b->_nodes[i];
+ Node *m = b->get_node(i);
// Add precedence edge from following safepoint to use of derived pointer
if( last_safept_node != end_node &&
@@ -2842,11 +2832,11 @@ void Scheduling::ComputeRegisterAntidependencies(Block *b) {
if( n->jvms() ) { // Precedence edge from derived to safept
// Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
- if( b->_nodes[last_safept] != last_safept_node ) {
+ if( b->get_node(last_safept) != last_safept_node ) {
last_safept = b->find_node(last_safept_node);
}
for( uint j=last_safept; j > i; j-- ) {
- Node *mach = b->_nodes[j];
+ Node *mach = b->get_node(j);
if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
mach->add_prec( n );
}
@@ -2862,8 +2852,6 @@ void Scheduling::ComputeRegisterAntidependencies(Block *b) {
}
}
-//------------------------------garbage_collect_pinch_nodes-------------------------------
-
// Garbage collect pinch nodes for reuse by other blocks.
//
// The block scheduler's insertion of anti-dependence
@@ -2895,7 +2883,7 @@ void Scheduling::garbage_collect_pinch_nodes() {
int trace_cnt = 0;
for (uint k = 0; k < _reg_node.Size(); k++) {
Node* pinch = _reg_node[k];
- if (pinch != NULL && pinch->Opcode() == Op_Node &&
+ if ((pinch != NULL) && pinch->Opcode() == Op_Node &&
// no predecence input edges
(pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
cleanup_pinch(pinch);
@@ -2938,7 +2926,6 @@ void Scheduling::cleanup_pinch( Node *pinch ) {
pinch->set_req(0, NULL);
}
-//------------------------------print_statistics-------------------------------
#ifndef PRODUCT
void Scheduling::dump_available() const {
diff --git a/src/share/vm/opto/output.hpp b/src/share/vm/opto/output.hpp
index 402c7cf96..8cf81de0d 100644
--- a/src/share/vm/opto/output.hpp
+++ b/src/share/vm/opto/output.hpp
@@ -99,9 +99,6 @@ private:
// List of nodes currently available for choosing for scheduling
Node_List _available;
- // Mapping from node (index) to basic block
- Block_Array& _bbs;
-
// For each instruction beginning a bundle, the number of following
// nodes to be bundled with it.
Bundle *_node_bundling_base;
diff --git a/src/share/vm/opto/parse.hpp b/src/share/vm/opto/parse.hpp
index c38f96b6b..ea01b0847 100644
--- a/src/share/vm/opto/parse.hpp
+++ b/src/share/vm/opto/parse.hpp
@@ -330,6 +330,7 @@ class Parse : public GraphKit {
bool _wrote_final; // Did we write a final field?
bool _count_invocations; // update and test invocation counter
bool _method_data_update; // update method data oop
+ Node* _alloc_with_final; // An allocation node with final field
// Variables which track Java semantics during bytecode parsing:
@@ -370,6 +371,11 @@ class Parse : public GraphKit {
void set_wrote_final(bool z) { _wrote_final = z; }
bool count_invocations() const { return _count_invocations; }
bool method_data_update() const { return _method_data_update; }
+ Node* alloc_with_final() const { return _alloc_with_final; }
+ void set_alloc_with_final(Node* n) {
+ assert((_alloc_with_final == NULL) || (_alloc_with_final == n), "different init objects?");
+ _alloc_with_final = n;
+ }
Block* block() const { return _block; }
ciBytecodeStream& iter() { return _iter; }
@@ -512,7 +518,7 @@ class Parse : public GraphKit {
// loading from a constant field or the constant pool
// returns false if push failed (non-perm field constants only, not ldcs)
- bool push_constant(ciConstant con, bool require_constant = false);
+ bool push_constant(ciConstant con, bool require_constant = false, bool is_autobox_cache = false, const Type* basic_type = NULL);
// implementation of object creation bytecodes
void emit_guard_for_new(ciInstanceKlass* klass);
diff --git a/src/share/vm/opto/parse1.cpp b/src/share/vm/opto/parse1.cpp
index f0f7c8b0a..10d98b92f 100644
--- a/src/share/vm/opto/parse1.cpp
+++ b/src/share/vm/opto/parse1.cpp
@@ -390,6 +390,7 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
_expected_uses = expected_uses;
_depth = 1 + (caller->has_method() ? caller->depth() : 0);
_wrote_final = false;
+ _alloc_with_final = NULL;
_entry_bci = InvocationEntryBci;
_tf = NULL;
_block = NULL;
@@ -723,6 +724,8 @@ void Parse::build_exits() {
// Note: iophi and memphi are not transformed until do_exits.
Node* iophi = new (C) PhiNode(region, Type::ABIO);
Node* memphi = new (C) PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
+ gvn().set_type_bottom(iophi);
+ gvn().set_type_bottom(memphi);
_exits.set_i_o(iophi);
_exits.set_all_memory(memphi);
@@ -738,6 +741,7 @@ void Parse::build_exits() {
}
int ret_size = type2size[ret_type->basic_type()];
Node* ret_phi = new (C) PhiNode(region, ret_type);
+ gvn().set_type_bottom(ret_phi);
_exits.ensure_stack(ret_size);
assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
@@ -917,7 +921,7 @@ void Parse::do_exits() {
// such unusual early publications. But no barrier is needed on
// exceptional returns, since they cannot publish normally.
//
- _exits.insert_mem_bar(Op_MemBarRelease);
+ _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
#ifndef PRODUCT
if (PrintOpto && (Verbose || WizardMode)) {
method()->print_name();
diff --git a/src/share/vm/opto/parse2.cpp b/src/share/vm/opto/parse2.cpp
index 73be6aae5..c41ca257e 100644
--- a/src/share/vm/opto/parse2.cpp
+++ b/src/share/vm/opto/parse2.cpp
@@ -987,7 +987,7 @@ void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
uncommon_trap(Deoptimization::Reason_unreached,
Deoptimization::Action_reinterpret,
NULL, "cold");
- if (EliminateAutoBox) {
+ if (C->eliminate_boxing()) {
// Mark the successor blocks as parsed
branch_block->next_path_num();
next_block->next_path_num();
@@ -1012,7 +1012,7 @@ void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
if (stopped()) { // Path is dead?
explicit_null_checks_elided++;
- if (EliminateAutoBox) {
+ if (C->eliminate_boxing()) {
// Mark the successor block as parsed
branch_block->next_path_num();
}
@@ -1032,7 +1032,7 @@ void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
if (stopped()) { // Path is dead?
explicit_null_checks_elided++;
- if (EliminateAutoBox) {
+ if (C->eliminate_boxing()) {
// Mark the successor block as parsed
next_block->next_path_num();
}
@@ -1069,7 +1069,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
uncommon_trap(Deoptimization::Reason_unreached,
Deoptimization::Action_reinterpret,
NULL, "cold");
- if (EliminateAutoBox) {
+ if (C->eliminate_boxing()) {
// Mark the successor blocks as parsed
branch_block->next_path_num();
next_block->next_path_num();
@@ -1135,7 +1135,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
set_control(taken_branch);
if (stopped()) {
- if (EliminateAutoBox) {
+ if (C->eliminate_boxing()) {
// Mark the successor block as parsed
branch_block->next_path_num();
}
@@ -1154,7 +1154,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
// Branch not taken.
if (stopped()) {
- if (EliminateAutoBox) {
+ if (C->eliminate_boxing()) {
// Mark the successor block as parsed
next_block->next_path_num();
}
diff --git a/src/share/vm/opto/parse3.cpp b/src/share/vm/opto/parse3.cpp
index 9de92a2ae..8c545f3ec 100644
--- a/src/share/vm/opto/parse3.cpp
+++ b/src/share/vm/opto/parse3.cpp
@@ -147,14 +147,38 @@ void Parse::do_field_access(bool is_get, bool is_field) {
void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
// Does this field have a constant value? If so, just push the value.
if (field->is_constant()) {
- // final field
+ // final or stable field
+ const Type* stable_type = NULL;
+ if (FoldStableValues && field->is_stable()) {
+ stable_type = Type::get_const_type(field->type());
+ if (field->type()->is_array_klass()) {
+ int stable_dimension = field->type()->as_array_klass()->dimension();
+ stable_type = stable_type->is_aryptr()->cast_to_stable(true, stable_dimension);
+ }
+ }
if (field->is_static()) {
// final static field
- if (push_constant(field->constant_value()))
+ if (C->eliminate_boxing()) {
+ // The pointers in the autobox arrays are always non-null.
+ ciSymbol* klass_name = field->holder()->name();
+ if (field->name() == ciSymbol::cache_field_name() &&
+ field->holder()->uses_default_loader() &&
+ (klass_name == ciSymbol::java_lang_Character_CharacterCache() ||
+ klass_name == ciSymbol::java_lang_Byte_ByteCache() ||
+ klass_name == ciSymbol::java_lang_Short_ShortCache() ||
+ klass_name == ciSymbol::java_lang_Integer_IntegerCache() ||
+ klass_name == ciSymbol::java_lang_Long_LongCache())) {
+ bool require_const = true;
+ bool autobox_cache = true;
+ if (push_constant(field->constant_value(), require_const, autobox_cache)) {
+ return;
+ }
+ }
+ }
+ if (push_constant(field->constant_value(), false, false, stable_type))
return;
- }
- else {
- // final non-static field
+ } else {
+ // final or stable non-static field
// Treat final non-static fields of trusted classes (classes in
// java.lang.invoke and sun.invoke packages and subpackages) as
// compile time constants.
@@ -162,8 +186,12 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
ciObject* constant_oop = oop_ptr->const_oop();
ciConstant constant = field->constant_value_of(constant_oop);
- if (push_constant(constant, true))
- return;
+ if (FoldStableValues && field->is_stable() && constant.is_null_or_zero()) {
+ // fall through to field load; the field is not yet initialized
+ } else {
+ if (push_constant(constant, true, false, stable_type))
+ return;
+ }
}
}
}
@@ -277,66 +305,42 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
// If reference is volatile, prevent following volatiles ops from
// floating up before the volatile write.
if (is_vol) {
- // First place the specific membar for THIS volatile index. This first
- // membar is dependent on the store, keeping any other membars generated
- // below from floating up past the store.
- int adr_idx = C->get_alias_index(adr_type);
- insert_mem_bar_volatile(Op_MemBarVolatile, adr_idx, store);
-
- // Now place a membar for AliasIdxBot for the unknown yet-to-be-parsed
- // volatile alias indices. Skip this if the membar is redundant.
- if (adr_idx != Compile::AliasIdxBot) {
- insert_mem_bar_volatile(Op_MemBarVolatile, Compile::AliasIdxBot, store);
- }
-
- // Finally, place alias-index-specific membars for each volatile index
- // that isn't the adr_idx membar. Typically there's only 1 or 2.
- for( int i = Compile::AliasIdxRaw; i < C->num_alias_types(); i++ ) {
- if (i != adr_idx && C->alias_type(i)->is_volatile()) {
- insert_mem_bar_volatile(Op_MemBarVolatile, i, store);
- }
- }
+ insert_mem_bar(Op_MemBarVolatile); // Use fat membar
}
// If the field is final, the rules of Java say we are in <init> or <clinit>.
// Note the presence of writes to final non-static fields, so that we
// can insert a memory barrier later on to keep the writes from floating
// out of the constructor.
- if (is_field && field->is_final()) {
+ // Any method can write a @Stable field; insert memory barriers after those also.
+ if (is_field && (field->is_final() || field->is_stable())) {
set_wrote_final(true);
+ // Preserve allocation ptr to create precedent edge to it in membar
+ // generated on exit from constructor.
+ if (C->eliminate_boxing() &&
+ adr_type->isa_oopptr() && adr_type->is_oopptr()->is_ptr_to_boxed_value() &&
+ AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
+ set_alloc_with_final(obj);
+ }
}
}
-bool Parse::push_constant(ciConstant constant, bool require_constant) {
+
+bool Parse::push_constant(ciConstant constant, bool require_constant, bool is_autobox_cache, const Type* stable_type) {
+ const Type* con_type = Type::make_from_constant(constant, require_constant, is_autobox_cache);
switch (constant.basic_type()) {
- case T_BOOLEAN: push( intcon(constant.as_boolean()) ); break;
- case T_INT: push( intcon(constant.as_int()) ); break;
- case T_CHAR: push( intcon(constant.as_char()) ); break;
- case T_BYTE: push( intcon(constant.as_byte()) ); break;
- case T_SHORT: push( intcon(constant.as_short()) ); break;
- case T_FLOAT: push( makecon(TypeF::make(constant.as_float())) ); break;
- case T_DOUBLE: push_pair( makecon(TypeD::make(constant.as_double())) ); break;
- case T_LONG: push_pair( longcon(constant.as_long()) ); break;
case T_ARRAY:
- case T_OBJECT: {
+ case T_OBJECT:
// cases:
// can_be_constant = (oop not scavengable || ScavengeRootsInCode != 0)
// should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
// An oop is not scavengable if it is in the perm gen.
- ciObject* oop_constant = constant.as_object();
- if (oop_constant->is_null_object()) {
- push( zerocon(T_OBJECT) );
- break;
- } else if (require_constant || oop_constant->should_be_constant()) {
- push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant)) );
- break;
- } else {
- // we cannot inline the oop, but we can use it later to narrow a type
- return false;
- }
- }
- case T_ILLEGAL: {
+ if (stable_type != NULL && con_type != NULL && con_type->isa_oopptr())
+ con_type = con_type->join(stable_type);
+ break;
+
+ case T_ILLEGAL:
// Invalid ciConstant returned due to OutOfMemoryError in the CI
assert(C->env()->failing(), "otherwise should not see this");
// These always occur because of object types; we are going to
@@ -344,17 +348,16 @@ bool Parse::push_constant(ciConstant constant, bool require_constant) {
push( zerocon(T_OBJECT) );
return false;
}
- default:
- ShouldNotReachHere();
+
+ if (con_type == NULL)
+ // we cannot inline the oop, but we can use it later to narrow a type
return false;
- }
- // success
+ push_node(constant.basic_type(), makecon(con_type));
return true;
}
-
//=============================================================================
void Parse::do_anewarray() {
bool will_link;
diff --git a/src/share/vm/opto/parseHelper.cpp b/src/share/vm/opto/parseHelper.cpp
index e9486088d..5d0c2f7be 100644
--- a/src/share/vm/opto/parseHelper.cpp
+++ b/src/share/vm/opto/parseHelper.cpp
@@ -284,6 +284,11 @@ void Parse::do_new() {
klass == C->env()->StringBuffer_klass())) {
C->set_has_stringbuilder(true);
}
+
+ // Keep track of boxed values for EliminateAutoBox optimizations.
+ if (C->eliminate_boxing() && klass->is_box_klass()) {
+ C->set_has_boxed_value(true);
+ }
}
#ifndef PRODUCT
diff --git a/src/share/vm/opto/phase.cpp b/src/share/vm/opto/phase.cpp
index 0b88996d3..535930157 100644
--- a/src/share/vm/opto/phase.cpp
+++ b/src/share/vm/opto/phase.cpp
@@ -64,6 +64,7 @@ elapsedTimer Phase::_t_idealLoopVerify;
// Subtimers for _t_optimizer
elapsedTimer Phase::_t_iterGVN;
elapsedTimer Phase::_t_iterGVN2;
+elapsedTimer Phase::_t_incrInline;
// Subtimers for _t_registerAllocation
elapsedTimer Phase::_t_ctorChaitin;
@@ -110,6 +111,7 @@ void Phase::print_timers() {
tty->print_cr (" macroEliminate : %3.3f sec", Phase::_t_macroEliminate.seconds());
}
tty->print_cr (" iterGVN : %3.3f sec", Phase::_t_iterGVN.seconds());
+ tty->print_cr (" incrInline : %3.3f sec", Phase::_t_incrInline.seconds());
tty->print_cr (" idealLoop : %3.3f sec", Phase::_t_idealLoop.seconds());
tty->print_cr (" idealLoopVerify: %3.3f sec", Phase::_t_idealLoopVerify.seconds());
tty->print_cr (" ccp : %3.3f sec", Phase::_t_ccp.seconds());
diff --git a/src/share/vm/opto/phase.hpp b/src/share/vm/opto/phase.hpp
index 9faabf543..582a126db 100644
--- a/src/share/vm/opto/phase.hpp
+++ b/src/share/vm/opto/phase.hpp
@@ -100,6 +100,7 @@ protected:
// Subtimers for _t_optimizer
static elapsedTimer _t_iterGVN;
static elapsedTimer _t_iterGVN2;
+ static elapsedTimer _t_incrInline;
// Subtimers for _t_registerAllocation
static elapsedTimer _t_ctorChaitin;
diff --git a/src/share/vm/opto/phaseX.cpp b/src/share/vm/opto/phaseX.cpp
index a8c979662..71c8d8afc 100644
--- a/src/share/vm/opto/phaseX.cpp
+++ b/src/share/vm/opto/phaseX.cpp
@@ -882,7 +882,7 @@ void PhaseIterGVN::optimize() {
return;
}
Node *n = _worklist.pop();
- if (++loop_count >= K * C->unique()) {
+ if (++loop_count >= K * C->live_nodes()) {
debug_only(n->dump(4);)
assert(false, "infinite loop in PhaseIterGVN::optimize");
C->record_method_not_compilable("infinite loop in PhaseIterGVN::optimize");
@@ -1643,15 +1643,15 @@ void PhasePeephole::do_transform() {
bool method_name_not_printed = true;
// Examine each basic block
- for( uint block_number = 1; block_number < _cfg._num_blocks; ++block_number ) {
- Block *block = _cfg._blocks[block_number];
+ for (uint block_number = 1; block_number < _cfg.number_of_blocks(); ++block_number) {
+ Block* block = _cfg.get_block(block_number);
bool block_not_printed = true;
// and each instruction within a block
- uint end_index = block->_nodes.size();
+ uint end_index = block->number_of_nodes();
// block->end_idx() not valid after PhaseRegAlloc
for( uint instruction_index = 1; instruction_index < end_index; ++instruction_index ) {
- Node *n = block->_nodes.at(instruction_index);
+ Node *n = block->get_node(instruction_index);
if( n->is_Mach() ) {
MachNode *m = n->as_Mach();
int deleted_count = 0;
@@ -1673,7 +1673,7 @@ void PhasePeephole::do_transform() {
}
// Print instructions being deleted
for( int i = (deleted_count - 1); i >= 0; --i ) {
- block->_nodes.at(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr();
+ block->get_node(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr();
}
tty->print_cr("replaced with");
// Print new instruction
@@ -1687,11 +1687,11 @@ void PhasePeephole::do_transform() {
// the node index to live range mappings.)
uint safe_instruction_index = (instruction_index - deleted_count);
for( ; (instruction_index > safe_instruction_index); --instruction_index ) {
- block->_nodes.remove( instruction_index );
+ block->remove_node( instruction_index );
}
// install new node after safe_instruction_index
- block->_nodes.insert( safe_instruction_index + 1, m2 );
- end_index = block->_nodes.size() - 1; // Recompute new block size
+ block->insert_node(m2, safe_instruction_index + 1);
+ end_index = block->number_of_nodes() - 1; // Recompute new block size
NOT_PRODUCT( inc_peepholes(); )
}
}
diff --git a/src/share/vm/opto/phasetype.hpp b/src/share/vm/opto/phasetype.hpp
new file mode 100644
index 000000000..ba769d410
--- /dev/null
+++ b/src/share/vm/opto/phasetype.hpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OPTO_PHASETYPE_HPP
+#define SHARE_VM_OPTO_PHASETYPE_HPP
+
+enum CompilerPhaseType {
+ PHASE_BEFORE_STRINGOPTS,
+ PHASE_AFTER_STRINGOPTS,
+ PHASE_BEFORE_REMOVEUSELESS,
+ PHASE_AFTER_PARSING,
+ PHASE_ITER_GVN1,
+ PHASE_PHASEIDEAL_BEFORE_EA,
+ PHASE_ITER_GVN_AFTER_EA,
+ PHASE_ITER_GVN_AFTER_ELIMINATION,
+ PHASE_PHASEIDEALLOOP1,
+ PHASE_PHASEIDEALLOOP2,
+ PHASE_PHASEIDEALLOOP3,
+ PHASE_CPP1,
+ PHASE_ITER_GVN2,
+ PHASE_PHASEIDEALLOOP_ITERATIONS,
+ PHASE_OPTIMIZE_FINISHED,
+ PHASE_GLOBAL_CODE_MOTION,
+ PHASE_FINAL_CODE,
+ PHASE_AFTER_EA,
+ PHASE_BEFORE_CLOOPS,
+ PHASE_AFTER_CLOOPS,
+ PHASE_BEFORE_BEAUTIFY_LOOPS,
+ PHASE_AFTER_BEAUTIFY_LOOPS,
+ PHASE_BEFORE_MATCHING,
+ PHASE_INCREMENTAL_INLINE,
+ PHASE_INCREMENTAL_BOXING_INLINE,
+ PHASE_END,
+ PHASE_FAILURE,
+
+ PHASE_NUM_TYPES
+};
+
+class CompilerPhaseTypeHelper {
+ public:
+ static const char* to_string(CompilerPhaseType cpt) {
+ switch (cpt) {
+ case PHASE_BEFORE_STRINGOPTS: return "Before StringOpts";
+ case PHASE_AFTER_STRINGOPTS: return "After StringOpts";
+ case PHASE_BEFORE_REMOVEUSELESS: return "Before RemoveUseless";
+ case PHASE_AFTER_PARSING: return "After Parsing";
+ case PHASE_ITER_GVN1: return "Iter GVN 1";
+ case PHASE_PHASEIDEAL_BEFORE_EA: return "PhaseIdealLoop before EA";
+ case PHASE_ITER_GVN_AFTER_EA: return "Iter GVN after EA";
+ case PHASE_ITER_GVN_AFTER_ELIMINATION: return "Iter GVN after eliminating allocations and locks";
+ case PHASE_PHASEIDEALLOOP1: return "PhaseIdealLoop 1";
+ case PHASE_PHASEIDEALLOOP2: return "PhaseIdealLoop 2";
+ case PHASE_PHASEIDEALLOOP3: return "PhaseIdealLoop 3";
+ case PHASE_CPP1: return "PhaseCPP 1";
+ case PHASE_ITER_GVN2: return "Iter GVN 2";
+ case PHASE_PHASEIDEALLOOP_ITERATIONS: return "PhaseIdealLoop iterations";
+ case PHASE_OPTIMIZE_FINISHED: return "Optimize finished";
+ case PHASE_GLOBAL_CODE_MOTION: return "Global code motion";
+ case PHASE_FINAL_CODE: return "Final Code";
+ case PHASE_AFTER_EA: return "After Escape Analysis";
+ case PHASE_BEFORE_CLOOPS: return "Before CountedLoop";
+ case PHASE_AFTER_CLOOPS: return "After CountedLoop";
+ case PHASE_BEFORE_BEAUTIFY_LOOPS: return "Before beautify loops";
+ case PHASE_AFTER_BEAUTIFY_LOOPS: return "After beautify loops";
+ case PHASE_BEFORE_MATCHING: return "Before Matching";
+ case PHASE_INCREMENTAL_INLINE: return "Incremental Inline";
+ case PHASE_INCREMENTAL_BOXING_INLINE: return "Incremental Boxing Inline";
+ case PHASE_END: return "End";
+ case PHASE_FAILURE: return "Failure";
+ default:
+ ShouldNotReachHere();
+ return NULL;
+ }
+ }
+};
+
+#endif //SHARE_VM_OPTO_PHASETYPE_HPP
diff --git a/src/share/vm/opto/postaloc.cpp b/src/share/vm/opto/postaloc.cpp
index c1b3fdbd2..76de2ed16 100644
--- a/src/share/vm/opto/postaloc.cpp
+++ b/src/share/vm/opto/postaloc.cpp
@@ -78,11 +78,13 @@ bool PhaseChaitin::may_be_copy_of_callee( Node *def ) const {
// Helper function for yank_if_dead
int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd ) {
int blk_adjust=0;
- Block *oldb = _cfg._bbs[old->_idx];
+ Block *oldb = _cfg.get_block_for_node(old);
oldb->find_remove(old);
// Count 1 if deleting an instruction from the current block
- if( oldb == current_block ) blk_adjust++;
- _cfg._bbs.map(old->_idx,NULL);
+ if (oldb == current_block) {
+ blk_adjust++;
+ }
+ _cfg.unmap_node_from_block(old);
OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg();
if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available?
value->map(old_reg,NULL); // Yank from value/regnd maps
@@ -403,28 +405,29 @@ void PhaseChaitin::post_allocate_copy_removal() {
// Need a mapping from basic block Node_Lists. We need a Node_List to
// map from register number to value-producing Node.
- Node_List **blk2value = NEW_RESOURCE_ARRAY( Node_List *, _cfg._num_blocks+1);
- memset( blk2value, 0, sizeof(Node_List*)*(_cfg._num_blocks+1) );
+ Node_List **blk2value = NEW_RESOURCE_ARRAY( Node_List *, _cfg.number_of_blocks() + 1);
+ memset(blk2value, 0, sizeof(Node_List*) * (_cfg.number_of_blocks() + 1));
// Need a mapping from basic block Node_Lists. We need a Node_List to
// map from register number to register-defining Node.
- Node_List **blk2regnd = NEW_RESOURCE_ARRAY( Node_List *, _cfg._num_blocks+1);
- memset( blk2regnd, 0, sizeof(Node_List*)*(_cfg._num_blocks+1) );
+ Node_List **blk2regnd = NEW_RESOURCE_ARRAY( Node_List *, _cfg.number_of_blocks() + 1);
+ memset(blk2regnd, 0, sizeof(Node_List*) * (_cfg.number_of_blocks() + 1));
// We keep unused Node_Lists on a free_list to avoid wasting
// memory.
GrowableArray<Node_List*> free_list = GrowableArray<Node_List*>(16);
// For all blocks
- for( uint i = 0; i < _cfg._num_blocks; i++ ) {
+ for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
uint j;
- Block *b = _cfg._blocks[i];
+ Block* block = _cfg.get_block(i);
// Count of Phis in block
uint phi_dex;
- for( phi_dex = 1; phi_dex < b->_nodes.size(); phi_dex++ ) {
- Node *phi = b->_nodes[phi_dex];
- if( !phi->is_Phi() )
+ for (phi_dex = 1; phi_dex < block->number_of_nodes(); phi_dex++) {
+ Node* phi = block->get_node(phi_dex);
+ if (!phi->is_Phi()) {
break;
+ }
}
// If any predecessor has not been visited, we do not know the state
@@ -432,21 +435,23 @@ void PhaseChaitin::post_allocate_copy_removal() {
// along Phi input edges
bool missing_some_inputs = false;
Block *freed = NULL;
- for( j = 1; j < b->num_preds(); j++ ) {
- Block *pb = _cfg._bbs[b->pred(j)->_idx];
+ for (j = 1; j < block->num_preds(); j++) {
+ Block* pb = _cfg.get_block_for_node(block->pred(j));
// Remove copies along phi edges
- for( uint k=1; k<phi_dex; k++ )
- elide_copy( b->_nodes[k], j, b, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false );
- if( blk2value[pb->_pre_order] ) { // Have a mapping on this edge?
+ for (uint k = 1; k < phi_dex; k++) {
+ elide_copy(block->get_node(k), j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false);
+ }
+ if (blk2value[pb->_pre_order]) { // Have a mapping on this edge?
// See if this predecessor's mappings have been used by everybody
// who wants them. If so, free 'em.
uint k;
- for( k=0; k<pb->_num_succs; k++ ) {
- Block *pbsucc = pb->_succs[k];
- if( !blk2value[pbsucc->_pre_order] && pbsucc != b )
+ for (k = 0; k < pb->_num_succs; k++) {
+ Block* pbsucc = pb->_succs[k];
+ if (!blk2value[pbsucc->_pre_order] && pbsucc != block) {
break; // Found a future user
+ }
}
- if( k >= pb->_num_succs ) { // No more uses, free!
+ if (k >= pb->_num_succs) { // No more uses, free!
freed = pb; // Record last block freed
free_list.push(blk2value[pb->_pre_order]);
free_list.push(blk2regnd[pb->_pre_order]);
@@ -465,20 +470,20 @@ void PhaseChaitin::post_allocate_copy_removal() {
value.map(_max_reg,NULL);
regnd.map(_max_reg,NULL);
// Set mappings as OUR mappings
- blk2value[b->_pre_order] = &value;
- blk2regnd[b->_pre_order] = &regnd;
+ blk2value[block->_pre_order] = &value;
+ blk2regnd[block->_pre_order] = &regnd;
// Initialize value & regnd for this block
- if( missing_some_inputs ) {
+ if (missing_some_inputs) {
// Some predecessor has not yet been visited; zap map to empty
- for( uint k = 0; k < (uint)_max_reg; k++ ) {
+ for (uint k = 0; k < (uint)_max_reg; k++) {
value.map(k,NULL);
regnd.map(k,NULL);
}
} else {
if( !freed ) { // Didn't get a freebie prior block
// Must clone some data
- freed = _cfg._bbs[b->pred(1)->_idx];
+ freed = _cfg.get_block_for_node(block->pred(1));
Node_List &f_value = *blk2value[freed->_pre_order];
Node_List &f_regnd = *blk2regnd[freed->_pre_order];
for( uint k = 0; k < (uint)_max_reg; k++ ) {
@@ -487,9 +492,11 @@ void PhaseChaitin::post_allocate_copy_removal() {
}
}
// Merge all inputs together, setting to NULL any conflicts.
- for( j = 1; j < b->num_preds(); j++ ) {
- Block *pb = _cfg._bbs[b->pred(j)->_idx];
- if( pb == freed ) continue; // Did self already via freelist
+ for (j = 1; j < block->num_preds(); j++) {
+ Block* pb = _cfg.get_block_for_node(block->pred(j));
+ if (pb == freed) {
+ continue; // Did self already via freelist
+ }
Node_List &p_regnd = *blk2regnd[pb->_pre_order];
for( uint k = 0; k < (uint)_max_reg; k++ ) {
if( regnd[k] != p_regnd[k] ) { // Conflict on reaching defs?
@@ -501,9 +508,9 @@ void PhaseChaitin::post_allocate_copy_removal() {
}
// For all Phi's
- for( j = 1; j < phi_dex; j++ ) {
+ for (j = 1; j < phi_dex; j++) {
uint k;
- Node *phi = b->_nodes[j];
+ Node *phi = block->get_node(j);
uint pidx = _lrg_map.live_range_id(phi);
OptoReg::Name preg = lrgs(_lrg_map.live_range_id(phi)).reg();
@@ -514,9 +521,10 @@ void PhaseChaitin::post_allocate_copy_removal() {
if( phi != x && u != x ) // Found a different input
u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
}
- if( u != NodeSentinel ) { // Junk Phi. Remove
- b->_nodes.remove(j--); phi_dex--;
- _cfg._bbs.map(phi->_idx,NULL);
+ if (u != NodeSentinel) { // Junk Phi. Remove
+ block->remove_node(j--);
+ phi_dex--;
+ _cfg.unmap_node_from_block(phi);
phi->replace_by(u);
phi->disconnect_inputs(NULL, C);
continue;
@@ -544,13 +552,13 @@ void PhaseChaitin::post_allocate_copy_removal() {
}
// For all remaining instructions
- for( j = phi_dex; j < b->_nodes.size(); j++ ) {
- Node *n = b->_nodes[j];
+ for (j = phi_dex; j < block->number_of_nodes(); j++) {
+ Node* n = block->get_node(j);
- if( n->outcnt() == 0 && // Dead?
- n != C->top() && // (ignore TOP, it has no du info)
- !n->is_Proj() ) { // fat-proj kills
- j -= yank_if_dead(n,b,&value,&regnd);
+ if(n->outcnt() == 0 && // Dead?
+ n != C->top() && // (ignore TOP, it has no du info)
+ !n->is_Proj() ) { // fat-proj kills
+ j -= yank_if_dead(n, block, &value, &regnd);
continue;
}
@@ -595,8 +603,9 @@ void PhaseChaitin::post_allocate_copy_removal() {
const uint two_adr = n->is_Mach() ? n->as_Mach()->two_adr() : 0;
// Remove copies along input edges
- for( k = 1; k < n->req(); k++ )
- j -= elide_copy( n, k, b, value, regnd, two_adr!=k );
+ for (k = 1; k < n->req(); k++) {
+ j -= elide_copy(n, k, block, value, regnd, two_adr != k);
+ }
// Unallocated Nodes define no registers
uint lidx = _lrg_map.live_range_id(n);
@@ -627,8 +636,8 @@ void PhaseChaitin::post_allocate_copy_removal() {
// then 'n' is a useless copy. Do not update the register->node
// mapping so 'n' will go dead.
if( value[nreg] != val ) {
- if (eliminate_copy_of_constant(val, n, b, value, regnd, nreg, OptoReg::Bad)) {
- j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
+ if (eliminate_copy_of_constant(val, n, block, value, regnd, nreg, OptoReg::Bad)) {
+ j -= replace_and_yank_if_dead(n, nreg, block, value, regnd);
} else {
// Update the mapping: record new Node defined by the register
regnd.map(nreg,n);
@@ -637,8 +646,8 @@ void PhaseChaitin::post_allocate_copy_removal() {
value.map(nreg,val);
}
} else if( !may_be_copy_of_callee(n) ) {
- assert( n->is_Copy(), "" );
- j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
+ assert(n->is_Copy(), "");
+ j -= replace_and_yank_if_dead(n, nreg, block, value, regnd);
}
} else if (RegMask::is_vector(n_ideal_reg)) {
// If Node 'n' does not change the value mapped by the register,
@@ -657,7 +666,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
}
} else if (n->is_Copy()) {
// Note: vector can't be constant and can't be copy of calee.
- j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
+ j -= replace_and_yank_if_dead(n, nreg, block, value, regnd);
}
} else {
// If the value occupies a register pair, record same info
@@ -671,18 +680,18 @@ void PhaseChaitin::post_allocate_copy_removal() {
tmp.Remove(nreg);
nreg_lo = tmp.find_first_elem();
}
- if( value[nreg] != val || value[nreg_lo] != val ) {
- if (eliminate_copy_of_constant(val, n, b, value, regnd, nreg, nreg_lo)) {
- j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
+ if (value[nreg] != val || value[nreg_lo] != val) {
+ if (eliminate_copy_of_constant(val, n, block, value, regnd, nreg, nreg_lo)) {
+ j -= replace_and_yank_if_dead(n, nreg, block, value, regnd);
} else {
regnd.map(nreg , n );
regnd.map(nreg_lo, n );
value.map(nreg ,val);
value.map(nreg_lo,val);
}
- } else if( !may_be_copy_of_callee(n) ) {
- assert( n->is_Copy(), "" );
- j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
+ } else if (!may_be_copy_of_callee(n)) {
+ assert(n->is_Copy(), "");
+ j -= replace_and_yank_if_dead(n, nreg, block, value, regnd);
}
}
diff --git a/src/share/vm/opto/reg_split.cpp b/src/share/vm/opto/reg_split.cpp
index edd614987..2c4ad874f 100644
--- a/src/share/vm/opto/reg_split.cpp
+++ b/src/share/vm/opto/reg_split.cpp
@@ -51,6 +51,15 @@
static const char out_of_nodes[] = "out of nodes during split";
+static bool contains_no_live_range_input(const Node* def) {
+ for (uint i = 1; i < def->req(); ++i) {
+ if (def->in(i) != NULL && def->in_RegMask(i).is_NotEmpty()) {
+ return false;
+ }
+ }
+ return true;
+}
+
//------------------------------get_spillcopy_wide-----------------------------
// Get a SpillCopy node with wide-enough masks. Use the 'wide-mask', the
// wide ideal-register spill-mask if possible. If the 'wide-mask' does
@@ -103,17 +112,17 @@ Node *PhaseChaitin::get_spillcopy_wide( Node *def, Node *use, uint uidx ) {
void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
// Skip intervening ProjNodes. Do not insert between a ProjNode and
// its definer.
- while( i < b->_nodes.size() &&
- (b->_nodes[i]->is_Proj() ||
- b->_nodes[i]->is_Phi() ) )
+ while( i < b->number_of_nodes() &&
+ (b->get_node(i)->is_Proj() ||
+ b->get_node(i)->is_Phi() ) )
i++;
// Do not insert between a call and his Catch
- if( b->_nodes[i]->is_Catch() ) {
+ if( b->get_node(i)->is_Catch() ) {
// Put the instruction at the top of the fall-thru block.
// Find the fall-thru projection
while( 1 ) {
- const CatchProjNode *cp = b->_nodes[++i]->as_CatchProj();
+ const CatchProjNode *cp = b->get_node(++i)->as_CatchProj();
if( cp->_con == CatchProjNode::fall_through_index )
break;
}
@@ -122,8 +131,8 @@ void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
i = 1; // Right at start of block
}
- b->_nodes.insert(i,spill); // Insert node in block
- _cfg._bbs.map(spill->_idx,b); // Update node->block mapping to reflect
+ b->insert_node(spill, i); // Insert node in block
+ _cfg.map_node_to_block(spill, b); // Update node->block mapping to reflect
// Adjust the point where we go hi-pressure
if( i <= b->_ihrp_index ) b->_ihrp_index++;
if( i <= b->_fhrp_index ) b->_fhrp_index++;
@@ -151,9 +160,9 @@ uint PhaseChaitin::split_DEF( Node *def, Block *b, int loc, uint maxlrg, Node **
// (The implicit_null_check function ensures the use is also dominated
// by the branch-not-taken block.)
Node *be = b->end();
- if( be->is_MachNullCheck() && be->in(1) == def && def == b->_nodes[loc] ) {
+ if( be->is_MachNullCheck() && be->in(1) == def && def == b->get_node(loc)) {
// Spill goes in the branch-not-taken block
- b = b->_succs[b->_nodes[b->end_idx()+1]->Opcode() == Op_IfTrue];
+ b = b->_succs[b->get_node(b->end_idx()+1)->Opcode() == Op_IfTrue];
loc = 0; // Just past the Region
}
assert( loc >= 0, "must insert past block head" );
@@ -210,7 +219,7 @@ uint PhaseChaitin::split_USE( Node *def, Block *b, Node *use, uint useidx, uint
use->set_req(useidx, def);
} else {
// Block and index where the use occurs.
- Block *b = _cfg._bbs[use->_idx];
+ Block *b = _cfg.get_block_for_node(use);
// Put the clone just prior to use
int bindex = b->find_node(use);
// DEF is UP, so must copy it DOWN and hook in USE
@@ -261,7 +270,7 @@ uint PhaseChaitin::split_USE( Node *def, Block *b, Node *use, uint useidx, uint
int bindex;
// Phi input spill-copys belong at the end of the prior block
if( use->is_Phi() ) {
- b = _cfg._bbs[b->pred(useidx)->_idx];
+ b = _cfg.get_block_for_node(b->pred(useidx));
bindex = b->end_idx();
} else {
// Put the clone just prior to use
@@ -326,7 +335,7 @@ Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint
continue;
}
- Block *b_def = _cfg._bbs[def->_idx];
+ Block *b_def = _cfg.get_block_for_node(def);
int idx_def = b_def->find_node(def);
Node *in_spill = get_spillcopy_wide( in, def, i );
if( !in_spill ) return 0; // Bailed out
@@ -388,10 +397,15 @@ Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint
#endif
// See if the cloned def kills any flags, and copy those kills as well
uint i = insidx+1;
- if( clone_projs( b, i, def, spill, maxlrg) ) {
+ int found_projs = clone_projs( b, i, def, spill, maxlrg);
+ if (found_projs > 0) {
// Adjust the point where we go hi-pressure
- if( i <= b->_ihrp_index ) b->_ihrp_index++;
- if( i <= b->_fhrp_index ) b->_fhrp_index++;
+ if (i <= b->_ihrp_index) {
+ b->_ihrp_index += found_projs;
+ }
+ if (i <= b->_fhrp_index) {
+ b->_fhrp_index += found_projs;
+ }
}
return spill;
@@ -436,7 +450,7 @@ bool PhaseChaitin::prompt_use( Block *b, uint lidx ) {
// Scan block for 1st use.
for( uint i = 1; i <= b->end_idx(); i++ ) {
- Node *n = b->_nodes[i];
+ Node *n = b->get_node(i);
// Ignore PHI use, these can be up or down
if (n->is_Phi()) {
continue;
@@ -520,13 +534,13 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// a Def is UP or DOWN. UP means that it should get a register (ie -
// it is always in LRP regions), and DOWN means that it is probably
// on the stack (ie - it crosses HRP regions).
- Node ***Reaches = NEW_SPLIT_ARRAY( Node**, _cfg._num_blocks+1 );
- bool **UP = NEW_SPLIT_ARRAY( bool*, _cfg._num_blocks+1 );
+ Node ***Reaches = NEW_SPLIT_ARRAY( Node**, _cfg.number_of_blocks() + 1);
+ bool **UP = NEW_SPLIT_ARRAY( bool*, _cfg.number_of_blocks() + 1);
Node **debug_defs = NEW_SPLIT_ARRAY( Node*, spill_cnt );
VectorSet **UP_entry= NEW_SPLIT_ARRAY( VectorSet*, spill_cnt );
// Initialize Reaches & UP
- for( bidx = 0; bidx < _cfg._num_blocks+1; bidx++ ) {
+ for (bidx = 0; bidx < _cfg.number_of_blocks() + 1; bidx++) {
Reaches[bidx] = NEW_SPLIT_ARRAY( Node*, spill_cnt );
UP[bidx] = NEW_SPLIT_ARRAY( bool, spill_cnt );
Node **Reachblock = Reaches[bidx];
@@ -546,13 +560,13 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
//----------PASS 1----------
//----------Propagation & Node Insertion Code----------
// Walk the Blocks in RPO for DEF & USE info
- for( bidx = 0; bidx < _cfg._num_blocks; bidx++ ) {
+ for( bidx = 0; bidx < _cfg.number_of_blocks(); bidx++ ) {
if (C->check_node_count(spill_cnt, out_of_nodes)) {
return 0;
}
- b = _cfg._blocks[bidx];
+ b = _cfg.get_block(bidx);
// Reaches & UP arrays for this block
Reachblock = Reaches[b->_pre_order];
UPblock = UP[b->_pre_order];
@@ -580,7 +594,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
UPblock[slidx] = true;
// Record following instruction in case 'n' rematerializes and
// kills flags
- Block *pred1 = _cfg._bbs[b->pred(1)->_idx];
+ Block *pred1 = _cfg.get_block_for_node(b->pred(1));
continue;
}
@@ -592,7 +606,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// Grab predecessor block header
n1 = b->pred(1);
// Grab the appropriate reaching def info for inpidx
- pred = _cfg._bbs[n1->_idx];
+ pred = _cfg.get_block_for_node(n1);
pidx = pred->_pre_order;
Node **Ltmp = Reaches[pidx];
bool *Utmp = UP[pidx];
@@ -607,7 +621,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// Grab predecessor block headers
n2 = b->pred(inpidx);
// Grab the appropriate reaching def info for inpidx
- pred = _cfg._bbs[n2->_idx];
+ pred = _cfg.get_block_for_node(n2);
pidx = pred->_pre_order;
Ltmp = Reaches[pidx];
Utmp = UP[pidx];
@@ -633,7 +647,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// check block for appropriate phinode & update edges
for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
- n1 = b->_nodes[insidx];
+ n1 = b->get_node(insidx);
// bail if this is not a phi
phi = n1->is_Phi() ? n1->as_Phi() : NULL;
if( phi == NULL ) {
@@ -692,7 +706,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// Grab predecessor block header
n1 = b->pred(1);
// Grab the appropriate reaching def info for k
- pred = _cfg._bbs[n1->_idx];
+ pred = _cfg.get_block_for_node(n1);
pidx = pred->_pre_order;
Node **Ltmp = Reaches[pidx];
bool *Utmp = UP[pidx];
@@ -733,7 +747,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
//----------Walk Instructions in the Block and Split----------
// For all non-phi instructions in the block
for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
- Node *n = b->_nodes[insidx];
+ Node *n = b->get_node(insidx);
// Find the defining Node's live range index
uint defidx = _lrg_map.find_id(n);
uint cnt = n->req();
@@ -762,7 +776,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg");
n->replace_by(u); // Then replace with unique input
n->disconnect_inputs(NULL, C);
- b->_nodes.remove(insidx);
+ b->remove_node(insidx);
insidx--;
b->_ihrp_index--;
b->_fhrp_index--;
@@ -775,12 +789,12 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
(b->_reg_pressure < (uint)INTPRESSURE) ||
b->_ihrp_index > 4000000 ||
b->_ihrp_index >= b->end_idx() ||
- !b->_nodes[b->_ihrp_index]->is_Proj(), "" );
+ !b->get_node(b->_ihrp_index)->is_Proj(), "" );
assert( insidx > b->_fhrp_index ||
(b->_freg_pressure < (uint)FLOATPRESSURE) ||
b->_fhrp_index > 4000000 ||
b->_fhrp_index >= b->end_idx() ||
- !b->_nodes[b->_fhrp_index]->is_Proj(), "" );
+ !b->get_node(b->_fhrp_index)->is_Proj(), "" );
// ********** Handle Crossing HRP Boundry **********
if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) {
@@ -805,7 +819,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// Insert point is just past last use or def in the block
int insert_point = insidx-1;
while( insert_point > 0 ) {
- Node *n = b->_nodes[insert_point];
+ Node *n = b->get_node(insert_point);
// Hit top of block? Quit going backwards
if (n->is_Phi()) {
break;
@@ -851,7 +865,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
}
} // end if LRG is UP
} // end for all spilling live ranges
- assert( b->_nodes[insidx] == n, "got insidx set incorrectly" );
+ assert( b->get_node(insidx) == n, "got insidx set incorrectly" );
} // end if crossing HRP Boundry
// If the LRG index is oob, then this is a new spillcopy, skip it.
@@ -864,7 +878,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) {
n->replace_by( n->in(copyidx) );
n->set_req( copyidx, NULL );
- b->_nodes.remove(insidx--);
+ b->remove_node(insidx--);
b->_ihrp_index--; // Adjust the point where we go hi-pressure
b->_fhrp_index--;
continue;
@@ -910,7 +924,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
return 0;
}
_lrg_map.extend(def->_idx, 0);
- _cfg._bbs.map(def->_idx,b);
+ _cfg.map_node_to_block(def, b);
n->set_req(inpidx, def);
continue;
}
@@ -918,10 +932,10 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// Rematerializable? Then clone def at use site instead
// of store/load
if( def->rematerialize() ) {
- int old_size = b->_nodes.size();
+ int old_size = b->number_of_nodes();
def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true );
if( !def ) return 0; // Bail out
- insidx += b->_nodes.size()-old_size;
+ insidx += b->number_of_nodes()-old_size;
}
MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL;
@@ -1282,7 +1296,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
for( insidx = 0; insidx < phis->size(); insidx++ ) {
Node *phi = phis->at(insidx);
assert(phi->is_Phi(),"This list must only contain Phi Nodes");
- Block *b = _cfg._bbs[phi->_idx];
+ Block *b = _cfg.get_block_for_node(phi);
// Grab the live range number
uint lidx = _lrg_map.find_id(phi);
uint slidx = lrg2reach[lidx];
@@ -1306,20 +1320,20 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// DEF has the wrong UP/DOWN value.
for( uint i = 1; i < b->num_preds(); i++ ) {
// Get predecessor block pre-order number
- Block *pred = _cfg._bbs[b->pred(i)->_idx];
+ Block *pred = _cfg.get_block_for_node(b->pred(i));
pidx = pred->_pre_order;
// Grab reaching def
Node *def = Reaches[pidx][slidx];
assert( def, "must have reaching def" );
// If input up/down sense and reg-pressure DISagree
- if( def->rematerialize() ) {
+ if (def->rematerialize() && contains_no_live_range_input(def)) {
// Place the rematerialized node above any MSCs created during
// phi node splitting. end_idx points at the insertion point
// so look at the node before it.
int insert = pred->end_idx();
while (insert >= 1 &&
- pred->_nodes[insert - 1]->is_SpillCopy() &&
- _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) {
+ pred->get_node(insert - 1)->is_SpillCopy() &&
+ _lrg_map.find(pred->get_node(insert - 1)) >= lrgs_before_phi_split) {
insert--;
}
def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false);
@@ -1385,10 +1399,10 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// DEBUG
#ifdef ASSERT
// Validate all live range index assignments
- for (bidx = 0; bidx < _cfg._num_blocks; bidx++) {
- b = _cfg._blocks[bidx];
+ for (bidx = 0; bidx < _cfg.number_of_blocks(); bidx++) {
+ b = _cfg.get_block(bidx);
for (insidx = 0; insidx <= b->end_idx(); insidx++) {
- Node *n = b->_nodes[insidx];
+ Node *n = b->get_node(insidx);
uint defidx = _lrg_map.find(n);
assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split");
assert(defidx < maxlrg,"Bad live range index in Split");
diff --git a/src/share/vm/opto/runtime.cpp b/src/share/vm/opto/runtime.cpp
index 5742731e1..e2a3e69a2 100644
--- a/src/share/vm/opto/runtime.cpp
+++ b/src/share/vm/opto/runtime.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -129,17 +129,15 @@ ExceptionBlob* OptoRuntime::_exception_blob;
// This should be called in an assertion at the start of OptoRuntime routines
// which are entered from compiled code (all of them)
-#ifndef PRODUCT
+#ifdef ASSERT
static bool check_compiled_frame(JavaThread* thread) {
assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code");
-#ifdef ASSERT
RegisterMap map(thread, false);
frame caller = thread->last_frame().sender(&map);
assert(caller.is_compiled_frame(), "not being called from compiled like code");
-#endif /* ASSERT */
return true;
}
-#endif
+#endif // ASSERT
#define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, save_arg_regs, return_pc) \
@@ -834,6 +832,28 @@ const TypeFunc* OptoRuntime::aescrypt_block_Type() {
return TypeFunc::make(domain, range);
}
+/**
+ * int updateBytesCRC32(int crc, byte* b, int len)
+ */
+const TypeFunc* OptoRuntime::updateBytesCRC32_Type() {
+ // create input type (domain)
+ int num_args = 3;
+ int argcnt = num_args;
+ const Type** fields = TypeTuple::fields(argcnt);
+ int argp = TypeFunc::Parms;
+ fields[argp++] = TypeInt::INT; // crc
+ fields[argp++] = TypePtr::NOTNULL; // src
+ fields[argp++] = TypeInt::INT; // len
+ assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
+ const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
+
+ // result type needed
+ fields = TypeTuple::fields(1);
+ fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
+ const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
+ return TypeFunc::make(domain, range);
+}
+
// for cipherBlockChaining calls of aescrypt encrypt/decrypt, four pointers and a length, returning void
const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() {
// create input type (domain)
diff --git a/src/share/vm/opto/runtime.hpp b/src/share/vm/opto/runtime.hpp
index 295b71237..b3f7ff4cb 100644
--- a/src/share/vm/opto/runtime.hpp
+++ b/src/share/vm/opto/runtime.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -284,6 +284,8 @@ private:
static const TypeFunc* aescrypt_block_Type();
static const TypeFunc* cipherBlockChaining_aescrypt_Type();
+ static const TypeFunc* updateBytesCRC32_Type();
+
// leaf on stack replacement interpreter accessor types
static const TypeFunc* osr_end_Type();
diff --git a/src/share/vm/opto/subnode.cpp b/src/share/vm/opto/subnode.cpp
index 70a64ba70..275b4d80d 100644
--- a/src/share/vm/opto/subnode.cpp
+++ b/src/share/vm/opto/subnode.cpp
@@ -863,10 +863,11 @@ const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const {
const TypePtr *r1 = t2->make_ptr();
// Undefined inputs makes for an undefined result
- if( TypePtr::above_centerline(r0->_ptr) ||
- TypePtr::above_centerline(r1->_ptr) )
+ if ((r0 == NULL) || (r1 == NULL) ||
+ TypePtr::above_centerline(r0->_ptr) ||
+ TypePtr::above_centerline(r1->_ptr)) {
return Type::TOP;
-
+ }
if (r0 == r1 && r0->singleton()) {
// Equal pointer constants (klasses, nulls, etc.)
return TypeInt::CC_EQ;
diff --git a/src/share/vm/opto/type.cpp b/src/share/vm/opto/type.cpp
index 68f681342..e25c529a8 100644
--- a/src/share/vm/opto/type.cpp
+++ b/src/share/vm/opto/type.cpp
@@ -189,6 +189,38 @@ const Type* Type::get_typeflow_type(ciType* type) {
}
+//-----------------------make_from_constant------------------------------------
+const Type* Type::make_from_constant(ciConstant constant,
+ bool require_constant, bool is_autobox_cache) {
+ switch (constant.basic_type()) {
+ case T_BOOLEAN: return TypeInt::make(constant.as_boolean());
+ case T_CHAR: return TypeInt::make(constant.as_char());
+ case T_BYTE: return TypeInt::make(constant.as_byte());
+ case T_SHORT: return TypeInt::make(constant.as_short());
+ case T_INT: return TypeInt::make(constant.as_int());
+ case T_LONG: return TypeLong::make(constant.as_long());
+ case T_FLOAT: return TypeF::make(constant.as_float());
+ case T_DOUBLE: return TypeD::make(constant.as_double());
+ case T_ARRAY:
+ case T_OBJECT:
+ {
+ // cases:
+ // can_be_constant = (oop not scavengable || ScavengeRootsInCode != 0)
+ // should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
+ // An oop is not scavengable if it is in the perm gen.
+ ciObject* oop_constant = constant.as_object();
+ if (oop_constant->is_null_object()) {
+ return Type::get_zero_type(T_OBJECT);
+ } else if (require_constant || oop_constant->should_be_constant()) {
+ return TypeOopPtr::make_from_constant(oop_constant, require_constant, is_autobox_cache);
+ }
+ }
+ }
+ // Fall through to failure
+ return NULL;
+}
+
+
//------------------------------make-------------------------------------------
// Create a simple Type, with default empty symbol sets. Then hashcons it
// and look for an existing copy in the type dictionary.
@@ -1824,12 +1856,12 @@ inline const TypeInt* normalize_array_size(const TypeInt* size) {
}
//------------------------------make-------------------------------------------
-const TypeAry *TypeAry::make( const Type *elem, const TypeInt *size) {
+const TypeAry* TypeAry::make(const Type* elem, const TypeInt* size, bool stable) {
if (UseCompressedOops && elem->isa_oopptr()) {
elem = elem->make_narrowoop();
}
size = normalize_array_size(size);
- return (TypeAry*)(new TypeAry(elem,size))->hashcons();
+ return (TypeAry*)(new TypeAry(elem,size,stable))->hashcons();
}
//------------------------------meet-------------------------------------------
@@ -1850,7 +1882,8 @@ const Type *TypeAry::xmeet( const Type *t ) const {
case Array: { // Meeting 2 arrays?
const TypeAry *a = t->is_ary();
return TypeAry::make(_elem->meet(a->_elem),
- _size->xmeet(a->_size)->is_int());
+ _size->xmeet(a->_size)->is_int(),
+ _stable & a->_stable);
}
case Top:
break;
@@ -1863,7 +1896,7 @@ const Type *TypeAry::xmeet( const Type *t ) const {
const Type *TypeAry::xdual() const {
const TypeInt* size_dual = _size->dual()->is_int();
size_dual = normalize_array_size(size_dual);
- return new TypeAry( _elem->dual(), size_dual);
+ return new TypeAry(_elem->dual(), size_dual, !_stable);
}
//------------------------------eq---------------------------------------------
@@ -1871,13 +1904,14 @@ const Type *TypeAry::xdual() const {
bool TypeAry::eq( const Type *t ) const {
const TypeAry *a = (const TypeAry*)t;
return _elem == a->_elem &&
+ _stable == a->_stable &&
_size == a->_size;
}
//------------------------------hash-------------------------------------------
// Type-specific hashing function.
int TypeAry::hash(void) const {
- return (intptr_t)_elem + (intptr_t)_size;
+ return (intptr_t)_elem + (intptr_t)_size + (_stable ? 43 : 0);
}
//----------------------interface_vs_oop---------------------------------------
@@ -1894,6 +1928,7 @@ bool TypeAry::interface_vs_oop(const Type *t) const {
//------------------------------dump2------------------------------------------
#ifndef PRODUCT
void TypeAry::dump2( Dict &d, uint depth, outputStream *st ) const {
+ if (_stable) st->print("stable:");
_elem->dump2(d, depth, st);
st->print("[");
_size->dump2(d, depth, st);
@@ -2372,11 +2407,16 @@ TypeOopPtr::TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int
_klass_is_exact(xk),
_is_ptr_to_narrowoop(false),
_is_ptr_to_narrowklass(false),
+ _is_ptr_to_boxed_value(false),
_instance_id(instance_id) {
+ if (Compile::current()->eliminate_boxing() && (t == InstPtr) &&
+ (offset > 0) && xk && (k != 0) && k->is_instance_klass()) {
+ _is_ptr_to_boxed_value = k->as_instance_klass()->is_boxed_value_offset(offset);
+ }
#ifdef _LP64
if (_offset != 0) {
if (_offset == oopDesc::klass_offset_in_bytes()) {
- _is_ptr_to_narrowklass = UseCompressedKlassPointers;
+ _is_ptr_to_narrowklass = UseCompressedClassPointers;
} else if (klass() == NULL) {
// Array with unknown body type
assert(this->isa_aryptr(), "only arrays without klass");
@@ -2613,44 +2653,50 @@ const TypeOopPtr* TypeOopPtr::make_from_klass_common(ciKlass *klass, bool klass_
//------------------------------make_from_constant-----------------------------
// Make a java pointer from an oop constant
-const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o, bool require_constant) {
- assert(!o->is_null_object(), "null object not yet handled here.");
- ciKlass* klass = o->klass();
- if (klass->is_instance_klass()) {
- // Element is an instance
- if (require_constant) {
- if (!o->can_be_constant()) return NULL;
- } else if (!o->should_be_constant()) {
- return TypeInstPtr::make(TypePtr::NotNull, klass, true, NULL, 0);
- }
- return TypeInstPtr::make(o);
- } else if (klass->is_obj_array_klass()) {
- // Element is an object array. Recursively call ourself.
- const Type *etype =
+const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o,
+ bool require_constant,
+ bool is_autobox_cache) {
+ assert(!o->is_null_object(), "null object not yet handled here.");
+ ciKlass* klass = o->klass();
+ if (klass->is_instance_klass()) {
+ // Element is an instance
+ if (require_constant) {
+ if (!o->can_be_constant()) return NULL;
+ } else if (!o->should_be_constant()) {
+ return TypeInstPtr::make(TypePtr::NotNull, klass, true, NULL, 0);
+ }
+ return TypeInstPtr::make(o);
+ } else if (klass->is_obj_array_klass()) {
+ // Element is an object array. Recursively call ourself.
+ const TypeOopPtr *etype =
TypeOopPtr::make_from_klass_raw(klass->as_obj_array_klass()->element_klass());
- const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length()));
- // We used to pass NotNull in here, asserting that the sub-arrays
- // are all not-null. This is not true in generally, as code can
- // slam NULLs down in the subarrays.
- if (require_constant) {
- if (!o->can_be_constant()) return NULL;
- } else if (!o->should_be_constant()) {
- return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0);
- }
- const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0);
+ if (is_autobox_cache) {
+ // The pointers in the autobox arrays are always non-null.
+ etype = etype->cast_to_ptr_type(TypePtr::NotNull)->is_oopptr();
+ }
+ const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length()));
+ // We used to pass NotNull in here, asserting that the sub-arrays
+ // are all not-null. This is not true in generally, as code can
+ // slam NULLs down in the subarrays.
+ if (require_constant) {
+ if (!o->can_be_constant()) return NULL;
+ } else if (!o->should_be_constant()) {
+ return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0);
+ }
+ const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0, InstanceBot, is_autobox_cache);
return arr;
- } else if (klass->is_type_array_klass()) {
- // Element is an typeArray
+ } else if (klass->is_type_array_klass()) {
+ // Element is an typeArray
const Type* etype =
(Type*)get_const_basic_type(klass->as_type_array_klass()->element_type());
- const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length()));
- // We used to pass NotNull in here, asserting that the array pointer
- // is not-null. That was not true in general.
- if (require_constant) {
- if (!o->can_be_constant()) return NULL;
- } else if (!o->should_be_constant()) {
- return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0);
- }
+ const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length()));
+ // We used to pass NotNull in here, asserting that the array pointer
+ // is not-null. That was not true in general.
+ if (require_constant) {
+ if (!o->can_be_constant()) return NULL;
+ } else if (!o->should_be_constant()) {
+ return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0);
+ }
const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0);
return arr;
}
@@ -2856,6 +2902,28 @@ const TypeInstPtr *TypeInstPtr::make(PTR ptr,
return result;
}
+/**
+ * Create constant type for a constant boxed value
+ */
+const Type* TypeInstPtr::get_const_boxed_value() const {
+ assert(is_ptr_to_boxed_value(), "should be called only for boxed value");
+ assert((const_oop() != NULL), "should be called only for constant object");
+ ciConstant constant = const_oop()->as_instance()->field_value_by_offset(offset());
+ BasicType bt = constant.basic_type();
+ switch (bt) {
+ case T_BOOLEAN: return TypeInt::make(constant.as_boolean());
+ case T_INT: return TypeInt::make(constant.as_int());
+ case T_CHAR: return TypeInt::make(constant.as_char());
+ case T_BYTE: return TypeInt::make(constant.as_byte());
+ case T_SHORT: return TypeInt::make(constant.as_short());
+ case T_FLOAT: return TypeF::make(constant.as_float());
+ case T_DOUBLE: return TypeD::make(constant.as_double());
+ case T_LONG: return TypeLong::make(constant.as_long());
+ default: break;
+ }
+ fatal(err_msg_res("Invalid boxed value type '%s'", type2name(bt)));
+ return NULL;
+}
//------------------------------cast_to_ptr_type-------------------------------
const Type *TypeInstPtr::cast_to_ptr_type(PTR ptr) const {
@@ -3330,18 +3398,18 @@ const TypeAryPtr *TypeAryPtr::make( PTR ptr, const TypeAry *ary, ciKlass* k, boo
if (!xk) xk = ary->ary_must_be_exact();
assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed");
if (!UseExactTypes) xk = (ptr == Constant);
- return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id))->hashcons();
+ return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id, false))->hashcons();
}
//------------------------------make-------------------------------------------
-const TypeAryPtr *TypeAryPtr::make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id ) {
+const TypeAryPtr *TypeAryPtr::make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, bool is_autobox_cache) {
assert(!(k == NULL && ary->_elem->isa_int()),
"integral arrays must be pre-equipped with a class");
assert( (ptr==Constant && o) || (ptr!=Constant && !o), "" );
if (!xk) xk = (o != NULL) || ary->ary_must_be_exact();
assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed");
if (!UseExactTypes) xk = (ptr == Constant);
- return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id))->hashcons();
+ return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id, is_autobox_cache))->hashcons();
}
//------------------------------cast_to_ptr_type-------------------------------
@@ -3397,8 +3465,20 @@ const TypeInt* TypeAryPtr::narrow_size_type(const TypeInt* size) const {
jint max_hi = max_array_length(elem()->basic_type());
//if (index_not_size) --max_hi; // type of a valid array index, FTR
bool chg = false;
- if (lo < min_lo) { lo = min_lo; chg = true; }
- if (hi > max_hi) { hi = max_hi; chg = true; }
+ if (lo < min_lo) {
+ lo = min_lo;
+ if (size->is_con()) {
+ hi = lo;
+ }
+ chg = true;
+ }
+ if (hi > max_hi) {
+ hi = max_hi;
+ if (size->is_con()) {
+ lo = hi;
+ }
+ chg = true;
+ }
// Negative length arrays will produce weird intermediate dead fast-path code
if (lo > hi)
return TypeInt::ZERO;
@@ -3412,10 +3492,38 @@ const TypeAryPtr* TypeAryPtr::cast_to_size(const TypeInt* new_size) const {
assert(new_size != NULL, "");
new_size = narrow_size_type(new_size);
if (new_size == size()) return this;
- const TypeAry* new_ary = TypeAry::make(elem(), new_size);
+ const TypeAry* new_ary = TypeAry::make(elem(), new_size, is_stable());
+ return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id);
+}
+
+
+//------------------------------cast_to_stable---------------------------------
+const TypeAryPtr* TypeAryPtr::cast_to_stable(bool stable, int stable_dimension) const {
+ if (stable_dimension <= 0 || (stable_dimension == 1 && stable == this->is_stable()))
+ return this;
+
+ const Type* elem = this->elem();
+ const TypePtr* elem_ptr = elem->make_ptr();
+
+ if (stable_dimension > 1 && elem_ptr != NULL && elem_ptr->isa_aryptr()) {
+ // If this is widened from a narrow oop, TypeAry::make will re-narrow it.
+ elem = elem_ptr = elem_ptr->is_aryptr()->cast_to_stable(stable, stable_dimension - 1);
+ }
+
+ const TypeAry* new_ary = TypeAry::make(elem, size(), stable);
+
return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id);
}
+//-----------------------------stable_dimension--------------------------------
+int TypeAryPtr::stable_dimension() const {
+ if (!is_stable()) return 0;
+ int dim = 1;
+ const TypePtr* elem_ptr = elem()->make_ptr();
+ if (elem_ptr != NULL && elem_ptr->isa_aryptr())
+ dim += elem_ptr->is_aryptr()->stable_dimension();
+ return dim;
+}
//------------------------------eq---------------------------------------------
// Structural equality check for Type representations
@@ -3525,7 +3633,7 @@ const Type *TypeAryPtr::xmeet( const Type *t ) const {
// Something like byte[int+] meets char[int+].
// This must fall to bottom, not (int[-128..65535])[int+].
instance_id = InstanceBot;
- tary = TypeAry::make(Type::BOTTOM, tary->_size);
+ tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
}
} else // Non integral arrays.
// Must fall to bottom if exact klasses in upper lattice
@@ -3539,7 +3647,7 @@ const Type *TypeAryPtr::xmeet( const Type *t ) const {
(tap ->_klass_is_exact && !tap->klass()->is_subtype_of(klass())) ||
// 'this' is exact and super or unrelated:
(this->_klass_is_exact && !klass()->is_subtype_of(tap->klass())))) {
- tary = TypeAry::make(Type::BOTTOM, tary->_size);
+ tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
return make( NotNull, NULL, tary, lazy_klass, false, off, InstanceBot );
}
@@ -3630,7 +3738,7 @@ const Type *TypeAryPtr::xmeet( const Type *t ) const {
//------------------------------xdual------------------------------------------
// Dual: compute field-by-field dual
const Type *TypeAryPtr::xdual() const {
- return new TypeAryPtr( dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance_id() );
+ return new TypeAryPtr( dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance_id(), is_autobox_cache() );
}
//----------------------interface_vs_oop---------------------------------------
diff --git a/src/share/vm/opto/type.hpp b/src/share/vm/opto/type.hpp
index 7868b2f78..d48325918 100644
--- a/src/share/vm/opto/type.hpp
+++ b/src/share/vm/opto/type.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -169,7 +169,7 @@ protected:
public:
- inline void* operator new( size_t x ) {
+ inline void* operator new( size_t x ) throw() {
Compile* compile = Compile::current();
compile->set_type_last_size(x);
void *temp = compile->type_arena()->Amalloc_D(x);
@@ -234,6 +234,9 @@ public:
bool is_ptr_to_narrowoop() const;
bool is_ptr_to_narrowklass() const;
+ bool is_ptr_to_boxing_obj() const;
+
+
// Convenience access
float getf() const;
double getd() const;
@@ -369,6 +372,10 @@ public:
// Mapping from CI type system to compiler type:
static const Type* get_typeflow_type(ciType* type);
+ static const Type* make_from_constant(ciConstant constant,
+ bool require_constant = false,
+ bool is_autobox_cache = false);
+
private:
// support arrays
static const BasicType _basic_type[];
@@ -585,8 +592,8 @@ public:
//------------------------------TypeAry----------------------------------------
// Class of Array Types
class TypeAry : public Type {
- TypeAry( const Type *elem, const TypeInt *size) : Type(Array),
- _elem(elem), _size(size) {}
+ TypeAry(const Type* elem, const TypeInt* size, bool stable) : Type(Array),
+ _elem(elem), _size(size), _stable(stable) {}
public:
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
@@ -596,10 +603,11 @@ public:
private:
const Type *_elem; // Element type of array
const TypeInt *_size; // Elements in array
+ const bool _stable; // Are elements @Stable?
friend class TypeAryPtr;
public:
- static const TypeAry *make( const Type *elem, const TypeInt *size);
+ static const TypeAry* make(const Type* elem, const TypeInt* size, bool stable = false);
virtual const Type *xmeet( const Type *t ) const;
virtual const Type *xdual() const; // Compute dual right now.
@@ -794,6 +802,7 @@ protected:
bool _klass_is_exact;
bool _is_ptr_to_narrowoop;
bool _is_ptr_to_narrowklass;
+ bool _is_ptr_to_boxed_value;
// If not InstanceTop or InstanceBot, indicates that this is
// a particular instance of this type which is distinct.
@@ -826,7 +835,9 @@ public:
// If the object cannot be rendered as a constant,
// may return a non-singleton type.
// If require_constant, produce a NULL if a singleton is not possible.
- static const TypeOopPtr* make_from_constant(ciObject* o, bool require_constant = false);
+ static const TypeOopPtr* make_from_constant(ciObject* o,
+ bool require_constant = false,
+ bool not_null_elements = false);
// Make a generic (unclassed) pointer to an oop.
static const TypeOopPtr* make(PTR ptr, int offset, int instance_id);
@@ -839,7 +850,7 @@ public:
// compressed oop references.
bool is_ptr_to_narrowoop_nv() const { return _is_ptr_to_narrowoop; }
bool is_ptr_to_narrowklass_nv() const { return _is_ptr_to_narrowklass; }
-
+ bool is_ptr_to_boxed_value() const { return _is_ptr_to_boxed_value; }
bool is_known_instance() const { return _instance_id > 0; }
int instance_id() const { return _instance_id; }
bool is_known_instance_field() const { return is_known_instance() && _offset >= 0; }
@@ -912,6 +923,9 @@ class TypeInstPtr : public TypeOopPtr {
// Make a pointer to an oop.
static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot );
+ /** Create constant type for a constant boxed value */
+ const Type* get_const_boxed_value() const;
+
// If this is a java.lang.Class constant, return the type for it or NULL.
// Pass to Type::get_const_type to turn it to a type, which will usually
// be a TypeInstPtr, but may also be a TypeInt::INT for int.class, etc.
@@ -943,7 +957,12 @@ class TypeInstPtr : public TypeOopPtr {
//------------------------------TypeAryPtr-------------------------------------
// Class of Java array pointers
class TypeAryPtr : public TypeOopPtr {
- TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id ) : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id), _ary(ary) {
+ TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk,
+ int offset, int instance_id, bool is_autobox_cache )
+ : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id),
+ _ary(ary),
+ _is_autobox_cache(is_autobox_cache)
+ {
#ifdef ASSERT
if (k != NULL) {
// Verify that specified klass and TypeAryPtr::klass() follow the same rules.
@@ -964,6 +983,7 @@ class TypeAryPtr : public TypeOopPtr {
virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing
const TypeAry *_ary; // Array we point into
+ const bool _is_autobox_cache;
ciKlass* compute_klass(DEBUG_ONLY(bool verify = false)) const;
@@ -973,10 +993,13 @@ public:
const TypeAry* ary() const { return _ary; }
const Type* elem() const { return _ary->_elem; }
const TypeInt* size() const { return _ary->_size; }
+ bool is_stable() const { return _ary->_stable; }
+
+ bool is_autobox_cache() const { return _is_autobox_cache; }
static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot);
// Constant pointer to array
- static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot);
+ static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, bool is_autobox_cache = false);
// Return a 'ptr' version of this type
virtual const Type *cast_to_ptr_type(PTR ptr) const;
@@ -994,6 +1017,9 @@ public:
virtual const Type *xmeet( const Type *t ) const;
virtual const Type *xdual() const; // Compute dual right now.
+ const TypeAryPtr* cast_to_stable(bool stable, int stable_dimension = 1) const;
+ int stable_dimension() const;
+
// Convenience common pre-built types.
static const TypeAryPtr *RANGE;
static const TypeAryPtr *OOPS;
@@ -1504,6 +1530,13 @@ inline bool Type::is_floatingpoint() const {
return false;
}
+inline bool Type::is_ptr_to_boxing_obj() const {
+ const TypeInstPtr* tp = isa_instptr();
+ return (tp != NULL) && (tp->offset() == 0) &&
+ tp->klass()->is_instance_klass() &&
+ tp->klass()->as_instance_klass()->is_box_klass();
+}
+
// ===============================================================
// Things that need to be 64-bits in the 64-bit build but
diff --git a/src/share/vm/precompiled/precompiled.hpp b/src/share/vm/precompiled/precompiled.hpp
index 118fe9c1b..d4be22e6c 100644
--- a/src/share/vm/precompiled/precompiled.hpp
+++ b/src/share/vm/precompiled/precompiled.hpp
@@ -26,7 +26,6 @@
// or if the user passes USE_PRECOMPILED_HEADER=0 to the makefiles.
#ifndef DONT_USE_PRECOMPILED_HEADER
-
# include "asm/assembler.hpp"
# include "asm/assembler.inline.hpp"
# include "asm/codeBuffer.hpp"
diff --git a/src/share/vm/prims/forte.cpp b/src/share/vm/prims/forte.cpp
index 33419f298..4b1fbebc1 100644
--- a/src/share/vm/prims/forte.cpp
+++ b/src/share/vm/prims/forte.cpp
@@ -31,10 +31,24 @@
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "prims/forte.hpp"
+#include "runtime/javaCalls.hpp"
#include "runtime/thread.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vframeArray.hpp"
+// call frame copied from old .h file and renamed
+typedef struct {
+ jint lineno; // line number in the source file
+ jmethodID method_id; // method executed in this frame
+} ASGCT_CallFrame;
+
+// call trace copied from old .h file and renamed
+typedef struct {
+ JNIEnv *env_id; // Env where trace was recorded
+ jint num_frames; // number of frames in this trace
+ ASGCT_CallFrame *frames; // frames
+} ASGCT_CallTrace;
+
// These name match the names reported by the forte quality kit
enum {
ticks_no_Java_frame = 0,
@@ -50,6 +64,8 @@ enum {
ticks_safepoint = -10
};
+#if INCLUDE_JVMTI
+
//-------------------------------------------------------
// Native interfaces for use by Forte tools.
@@ -293,10 +309,14 @@ static bool find_initial_Java_frame(JavaThread* thread,
for (loop_count = 0; loop_count < loop_max; loop_count++) {
- if (candidate.is_first_frame()) {
+ if (candidate.is_entry_frame()) {
+ // jcw is NULL if the java call wrapper couldn't be found
+ JavaCallWrapper *jcw = candidate.entry_frame_call_wrapper_if_safe(thread);
// If initial frame is frame from StubGenerator and there is no
// previous anchor, there are no java frames associated with a method
- return false;
+ if (jcw == NULL || jcw->is_first_frame()) {
+ return false;
+ }
}
if (candidate.is_interpreted_frame()) {
@@ -360,20 +380,6 @@ static bool find_initial_Java_frame(JavaThread* thread,
}
-
-// call frame copied from old .h file and renamed
-typedef struct {
- jint lineno; // line number in the source file
- jmethodID method_id; // method executed in this frame
-} ASGCT_CallFrame;
-
-// call trace copied from old .h file and renamed
-typedef struct {
- JNIEnv *env_id; // Env where trace was recorded
- jint num_frames; // number of frames in this trace
- ASGCT_CallFrame *frames; // frames
-} ASGCT_CallTrace;
-
static void forte_fill_call_trace_given_top(JavaThread* thd,
ASGCT_CallTrace* trace,
int depth,
@@ -618,7 +624,7 @@ void collector_func_load(char* name,
void* null_argument_3);
#pragma weak collector_func_load
#define collector_func_load(x0,x1,x2,x3,x4,x5,x6) \
- ( collector_func_load ? collector_func_load(x0,x1,x2,x3,x4,x5,x6),0 : 0 )
+ ( collector_func_load ? collector_func_load(x0,x1,x2,x3,x4,x5,x6),(void)0 : (void)0 )
#endif // __APPLE__
#endif // !_WINDOWS
@@ -634,3 +640,12 @@ void Forte::register_stub(const char* name, address start, address end) {
pointer_delta(end, start, sizeof(jbyte)), 0, NULL);
#endif // !_WINDOWS && !IA64
}
+
+#else // INCLUDE_JVMTI
+extern "C" {
+ JNIEXPORT
+ void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext) {
+ trace->num_frames = ticks_no_class_load; // -1
+ }
+}
+#endif // INCLUDE_JVMTI
diff --git a/src/share/vm/prims/jni.cpp b/src/share/vm/prims/jni.cpp
index 5f37992d0..cbd7860a0 100644
--- a/src/share/vm/prims/jni.cpp
+++ b/src/share/vm/prims/jni.cpp
@@ -74,7 +74,6 @@
#include "runtime/vm_operations.hpp"
#include "services/runtimeService.hpp"
#include "trace/tracing.hpp"
-#include "trace/traceEventTypes.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
@@ -880,7 +879,7 @@ JNI_ENTRY(jint, jni_PushLocalFrame(JNIEnv *env, jint capacity))
env, capacity);
#endif /* USDT2 */
//%note jni_11
- if (capacity < 0 && capacity > MAX_REASONABLE_LOCAL_CAPACITY) {
+ if (capacity < 0 || capacity > MAX_REASONABLE_LOCAL_CAPACITY) {
#ifndef USDT2
DTRACE_PROBE1(hotspot_jni, PushLocalFrame__return, JNI_ERR);
#else /* USDT2 */
@@ -1337,6 +1336,7 @@ static void jni_invoke_nonstatic(JNIEnv *env, JavaValue* result, jobject receive
if (call_type == JNI_VIRTUAL) {
// jni_GetMethodID makes sure class is linked and initialized
// so m should have a valid vtable index.
+ assert(!m->has_itable_index(), "");
int vtbl_index = m->vtable_index();
if (vtbl_index != Method::nonvirtual_vtable_index) {
Klass* k = h_recv->klass();
@@ -1356,12 +1356,7 @@ static void jni_invoke_nonstatic(JNIEnv *env, JavaValue* result, jobject receive
// interface call
KlassHandle h_holder(THREAD, holder);
- int itbl_index = m->cached_itable_index();
- if (itbl_index == -1) {
- itbl_index = klassItable::compute_itable_index(m);
- m->set_cached_itable_index(itbl_index);
- // the above may have grabbed a lock, 'm' and anything non-handlized can't be used again
- }
+ int itbl_index = m->itable_index();
Klass* k = h_recv->klass();
selected_method = InstanceKlass::cast(k)->method_at_itable(h_holder(), itbl_index, CHECK);
}
@@ -3235,19 +3230,22 @@ JNI_QUICK_ENTRY(const jchar*, jni_GetStringChars(
HOTSPOT_JNI_GETSTRINGCHARS_ENTRY(
env, string, (uintptr_t *) isCopy);
#endif /* USDT2 */
- //%note jni_5
- if (isCopy != NULL) {
- *isCopy = JNI_TRUE;
- }
oop s = JNIHandles::resolve_non_null(string);
int s_len = java_lang_String::length(s);
typeArrayOop s_value = java_lang_String::value(s);
int s_offset = java_lang_String::offset(s);
- jchar* buf = NEW_C_HEAP_ARRAY(jchar, s_len + 1, mtInternal); // add one for zero termination
- if (s_len > 0) {
- memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len);
+ jchar* buf = NEW_C_HEAP_ARRAY_RETURN_NULL(jchar, s_len + 1, mtInternal); // add one for zero termination
+ /* JNI Specification states return NULL on OOM */
+ if (buf != NULL) {
+ if (s_len > 0) {
+ memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len);
+ }
+ buf[s_len] = 0;
+ //%note jni_5
+ if (isCopy != NULL) {
+ *isCopy = JNI_TRUE;
+ }
}
- buf[s_len] = 0;
#ifndef USDT2
DTRACE_PROBE1(hotspot_jni, GetStringChars__return, buf);
#else /* USDT2 */
@@ -3336,9 +3334,14 @@ JNI_ENTRY(const char*, jni_GetStringUTFChars(JNIEnv *env, jstring string, jboole
#endif /* USDT2 */
oop java_string = JNIHandles::resolve_non_null(string);
size_t length = java_lang_String::utf8_length(java_string);
- char* result = AllocateHeap(length + 1, mtInternal);
- java_lang_String::as_utf8_string(java_string, result, (int) length + 1);
- if (isCopy != NULL) *isCopy = JNI_TRUE;
+ /* JNI Specification states return NULL on OOM */
+ char* result = AllocateHeap(length + 1, mtInternal, 0, AllocFailStrategy::RETURN_NULL);
+ if (result != NULL) {
+ java_lang_String::as_utf8_string(java_string, result, (int) length + 1);
+ if (isCopy != NULL) {
+ *isCopy = JNI_TRUE;
+ }
+ }
#ifndef USDT2
DTRACE_PROBE1(hotspot_jni, GetStringUTFChars__return, result);
#else /* USDT2 */
@@ -3592,11 +3595,16 @@ JNI_QUICK_ENTRY(ElementType*, \
* Avoid asserts in typeArrayOop. */ \
result = (ElementType*)get_bad_address(); \
} else { \
- result = NEW_C_HEAP_ARRAY(ElementType, len, mtInternal); \
- /* copy the array to the c chunk */ \
- memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \
+ /* JNI Specification states return NULL on OOM */ \
+ result = NEW_C_HEAP_ARRAY_RETURN_NULL(ElementType, len, mtInternal); \
+ if (result != NULL) { \
+ /* copy the array to the c chunk */ \
+ memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \
+ if (isCopy) { \
+ *isCopy = JNI_TRUE; \
+ } \
+ } \
} \
- if (isCopy) *isCopy = JNI_TRUE; \
DTRACE_PROBE1(hotspot_jni, Get##Result##ArrayElements__return, result);\
return result; \
JNI_END
@@ -3629,11 +3637,16 @@ JNI_QUICK_ENTRY(ElementType*, \
* Avoid asserts in typeArrayOop. */ \
result = (ElementType*)get_bad_address(); \
} else { \
- result = NEW_C_HEAP_ARRAY(ElementType, len, mtInternal); \
- /* copy the array to the c chunk */ \
- memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \
+ /* JNI Specification states return NULL on OOM */ \
+ result = NEW_C_HEAP_ARRAY_RETURN_NULL(ElementType, len, mtInternal); \
+ if (result != NULL) { \
+ /* copy the array to the c chunk */ \
+ memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \
+ if (isCopy) { \
+ *isCopy = JNI_TRUE; \
+ } \
+ } \
} \
- if (isCopy) *isCopy = JNI_TRUE; \
ReturnProbe; \
return result; \
JNI_END
@@ -5014,8 +5027,13 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void *args_) {
#ifndef PRODUCT
+#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_interface/collectedHeap.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/heapRegionRemSet.hpp"
+#endif
#include "utilities/quickSort.hpp"
+#include "utilities/ostream.hpp"
#if INCLUDE_VM_STRUCTS
#include "runtime/vmStructs.hpp"
#endif
@@ -5024,17 +5042,36 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void *args_) {
tty->print_cr("Running test: " #unit_test_function_call); \
unit_test_function_call
+// Forward declaration
+void TestReservedSpace_test();
+void TestReserveMemorySpecial_test();
+void TestVirtualSpace_test();
+void TestMetaspaceAux_test();
+#if INCLUDE_ALL_GCS
+void TestG1BiasedArray_test();
+#endif
+
void execute_internal_vm_tests() {
if (ExecuteInternalVMTests) {
tty->print_cr("Running internal VM tests");
+ run_unit_test(TestReservedSpace_test());
+ run_unit_test(TestReserveMemorySpecial_test());
+ run_unit_test(TestVirtualSpace_test());
+ run_unit_test(TestMetaspaceAux_test());
run_unit_test(GlobalDefinitions::test_globals());
+ run_unit_test(GCTimerAllTest::all());
run_unit_test(arrayOopDesc::test_max_array_length());
run_unit_test(CollectedHeap::test_is_in());
run_unit_test(QuickSort::test_quick_sort());
run_unit_test(AltHashing::test_alt_hash());
+ run_unit_test(test_loggc_filename());
#if INCLUDE_VM_STRUCTS
run_unit_test(VMStructs::test());
#endif
+#if INCLUDE_ALL_GCS
+ run_unit_test(TestG1BiasedArray_test());
+ run_unit_test(HeapRegionRemSet::test_prt());
+#endif
tty->print_cr("All internal VM tests passed");
}
}
@@ -5090,7 +5127,7 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM **vm, void **penv, v
// function used to determine this will always return false. Atomic::xchg
// does not have this problem.
if (Atomic::xchg(1, &vm_created) == 1) {
- return JNI_ERR; // already created, or create attempt in progress
+ return JNI_EEXIST; // already created, or create attempt in progress
}
if (Atomic::xchg(0, &safe_to_recreate_vm) == 0) {
return JNI_ERR; // someone tried and failed and retry not allowed.
@@ -5125,13 +5162,27 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM **vm, void **penv, v
JvmtiExport::post_thread_start(thread);
}
- EVENT_BEGIN(TraceEventThreadStart, event);
- EVENT_COMMIT(event,
- EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
+ EventThreadStart event;
+ if (event.should_commit()) {
+ event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj()));
+ event.commit();
+ }
+
+#ifndef PRODUCT
+ #ifndef TARGET_OS_FAMILY_windows
+ #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f()
+ #endif
// Check if we should compile all classes on bootclasspath
- NOT_PRODUCT(if (CompileTheWorld) ClassLoader::compile_the_world();)
- NOT_PRODUCT(if (ReplayCompiles) ciReplay::replay(thread);)
+ if (CompileTheWorld) ClassLoader::compile_the_world();
+ if (ReplayCompiles) ciReplay::replay(thread);
+
+ // Some platforms (like Win*) need a wrapper around these test
+ // functions in order to properly handle error conditions.
+ CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(test_error_handler);
+ CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(execute_internal_vm_tests);
+#endif
+
// Since this is not a JVM_ENTRY we have to set the thread state manually before leaving.
ThreadStateTransition::transition_and_fence(thread, _thread_in_vm, _thread_in_native);
} else {
@@ -5148,8 +5199,6 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM **vm, void **penv, v
OrderAccess::release_store(&vm_created, 0);
}
- NOT_PRODUCT(test_error_handler(ErrorHandlerTest));
- NOT_PRODUCT(execute_internal_vm_tests());
return result;
}
@@ -5328,9 +5377,11 @@ static jint attach_current_thread(JavaVM *vm, void **penv, void *_args, bool dae
JvmtiExport::post_thread_start(thread);
}
- EVENT_BEGIN(TraceEventThreadStart, event);
- EVENT_COMMIT(event,
- EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
+ EventThreadStart event;
+ if (event.should_commit()) {
+ event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj()));
+ event.commit();
+ }
*(JNIEnv**)penv = thread->jni_environment();
diff --git a/src/share/vm/prims/jniCheck.cpp b/src/share/vm/prims/jniCheck.cpp
index 32692669b..b45da91e7 100644
--- a/src/share/vm/prims/jniCheck.cpp
+++ b/src/share/vm/prims/jniCheck.cpp
@@ -129,6 +129,7 @@ static const char * fatal_wrong_class_or_method = "Wrong object class or methodI
static const char * fatal_non_weak_method = "non-weak methodID passed to JNI call";
static const char * fatal_unknown_array_object = "Unknown array object passed to JNI array operations";
static const char * fatal_object_array_expected = "Object array expected but not received for JNI array operation";
+static const char * fatal_prim_type_array_expected = "Primitive type array expected but not received for JNI array operation";
static const char * fatal_non_array = "Non-array passed to JNI array operations";
static const char * fatal_element_type_mismatch = "Array element type mismatch in JNI";
static const char * fatal_should_be_static = "Non-static field ID passed to JNI";
@@ -281,30 +282,49 @@ checkString(JavaThread* thr, jstring js)
ReportJNIFatalError(thr, fatal_non_string);
}
-static inline void
-checkArray(JavaThread* thr, jarray jArray, int elementType)
+static inline arrayOop
+check_is_array(JavaThread* thr, jarray jArray)
{
ASSERT_OOPS_ALLOWED;
arrayOop aOop;
aOop = (arrayOop)jniCheck::validate_object(thr, jArray);
- if (aOop == NULL || !aOop->is_array())
+ if (aOop == NULL || !aOop->is_array()) {
ReportJNIFatalError(thr, fatal_non_array);
+ }
+ return aOop;
+}
- if (elementType != -1) {
- if (aOop->is_typeArray()) {
- BasicType array_type = TypeArrayKlass::cast(aOop->klass())->element_type();
- if (array_type != elementType)
- ReportJNIFatalError(thr, fatal_element_type_mismatch);
- } else if (aOop->is_objArray()) {
- if ( T_OBJECT != elementType)
- ReportJNIFatalError(thr, fatal_object_array_expected);
- } else {
- ReportJNIFatalError(thr, fatal_unknown_array_object);
- }
+static inline arrayOop
+check_is_primitive_array(JavaThread* thr, jarray jArray) {
+ arrayOop aOop = check_is_array(thr, jArray);
+
+ if (!aOop->is_typeArray()) {
+ ReportJNIFatalError(thr, fatal_prim_type_array_expected);
+ }
+ return aOop;
+}
+
+static inline void
+check_primitive_array_type(JavaThread* thr, jarray jArray, BasicType elementType)
+{
+ BasicType array_type;
+ arrayOop aOop;
+
+ aOop = check_is_primitive_array(thr, jArray);
+ array_type = TypeArrayKlass::cast(aOop->klass())->element_type();
+ if (array_type != elementType) {
+ ReportJNIFatalError(thr, fatal_element_type_mismatch);
}
}
+static inline void
+check_is_obj_array(JavaThread* thr, jarray jArray) {
+ arrayOop aOop = check_is_array(thr, jArray);
+ if (!aOop->is_objArray()) {
+ ReportJNIFatalError(thr, fatal_object_array_expected);
+ }
+}
oop jniCheck::validate_handle(JavaThread* thr, jobject obj) {
if (JNIHandles::is_frame_handle(thr, obj) ||
@@ -1420,7 +1440,7 @@ JNI_ENTRY_CHECKED(jsize,
jarray array))
functionEnter(thr);
IN_VM(
- checkArray(thr, array, -1);
+ check_is_array(thr, array);
)
jsize result = UNCHECKED()->GetArrayLength(env,array);
functionExit(env);
@@ -1444,7 +1464,7 @@ JNI_ENTRY_CHECKED(jobject,
jsize index))
functionEnter(thr);
IN_VM(
- checkArray(thr, array, T_OBJECT);
+ check_is_obj_array(thr, array);
)
jobject result = UNCHECKED()->GetObjectArrayElement(env,array,index);
functionExit(env);
@@ -1458,7 +1478,7 @@ JNI_ENTRY_CHECKED(void,
jobject val))
functionEnter(thr);
IN_VM(
- checkArray(thr, array, T_OBJECT);
+ check_is_obj_array(thr, array);
)
UNCHECKED()->SetObjectArrayElement(env,array,index,val);
functionExit(env);
@@ -1490,7 +1510,7 @@ JNI_ENTRY_CHECKED(ElementType *, \
jboolean *isCopy)) \
functionEnter(thr); \
IN_VM( \
- checkArray(thr, array, ElementTag); \
+ check_primitive_array_type(thr, array, ElementTag); \
) \
ElementType *result = UNCHECKED()->Get##Result##ArrayElements(env, \
array, \
@@ -1516,7 +1536,7 @@ JNI_ENTRY_CHECKED(void, \
jint mode)) \
functionEnterExceptionAllowed(thr); \
IN_VM( \
- checkArray(thr, array, ElementTag); \
+ check_primitive_array_type(thr, array, ElementTag); \
ASSERT_OOPS_ALLOWED; \
typeArrayOop a = typeArrayOop(JNIHandles::resolve_non_null(array)); \
/* cannot check validity of copy, unless every request is logged by
@@ -1546,7 +1566,7 @@ JNI_ENTRY_CHECKED(void, \
ElementType *buf)) \
functionEnter(thr); \
IN_VM( \
- checkArray(thr, array, ElementTag); \
+ check_primitive_array_type(thr, array, ElementTag); \
) \
UNCHECKED()->Get##Result##ArrayRegion(env,array,start,len,buf); \
functionExit(env); \
@@ -1570,7 +1590,7 @@ JNI_ENTRY_CHECKED(void, \
const ElementType *buf)) \
functionEnter(thr); \
IN_VM( \
- checkArray(thr, array, ElementTag); \
+ check_primitive_array_type(thr, array, ElementTag); \
) \
UNCHECKED()->Set##Result##ArrayRegion(env,array,start,len,buf); \
functionExit(env); \
@@ -1672,7 +1692,7 @@ JNI_ENTRY_CHECKED(void *,
jboolean *isCopy))
functionEnterCritical(thr);
IN_VM(
- checkArray(thr, array, -1);
+ check_is_primitive_array(thr, array);
)
void *result = UNCHECKED()->GetPrimitiveArrayCritical(env, array, isCopy);
functionExit(env);
@@ -1686,7 +1706,7 @@ JNI_ENTRY_CHECKED(void,
jint mode))
functionEnterCriticalExceptionAllowed(thr);
IN_VM(
- checkArray(thr, array, -1);
+ check_is_primitive_array(thr, array);
)
/* The Hotspot JNI code does not use the parameters, so just check the
* array parameter as a minor sanity check
diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp
index 5c31ea1e5..d4fa736bd 100644
--- a/src/share/vm/prims/jvm.cpp
+++ b/src/share/vm/prims/jvm.cpp
@@ -59,6 +59,7 @@
#include "services/attachListener.hpp"
#include "services/management.hpp"
#include "services/threadService.hpp"
+#include "trace/tracing.hpp"
#include "utilities/copy.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/dtrace.hpp"
@@ -1072,11 +1073,7 @@ JVM_ENTRY(jobjectArray, JVM_GetClassSigners(JNIEnv *env, jclass cls))
return NULL;
}
- Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls));
- objArrayOop signers = NULL;
- if (k->oop_is_instance()) {
- signers = InstanceKlass::cast(k)->signers();
- }
+ objArrayOop signers = java_lang_Class::signers(JNIHandles::resolve_non_null(cls));
// If there are no signers set in the class, or if the class
// is an array, return NULL.
@@ -1102,7 +1099,7 @@ JVM_ENTRY(void, JVM_SetClassSigners(JNIEnv *env, jclass cls, jobjectArray signer
// be called with an array. Only the bootstrap loader creates arrays.
Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls));
if (k->oop_is_instance()) {
- InstanceKlass::cast(k)->set_signers(objArrayOop(JNIHandles::resolve(signers)));
+ java_lang_Class::set_signers(k->java_mirror(), objArrayOop(JNIHandles::resolve(signers)));
}
}
JVM_END
@@ -1119,31 +1116,61 @@ JVM_ENTRY(jobject, JVM_GetProtectionDomain(JNIEnv *env, jclass cls))
return NULL;
}
- Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve(cls));
- return (jobject) JNIHandles::make_local(env, k->protection_domain());
+ oop pd = java_lang_Class::protection_domain(JNIHandles::resolve(cls));
+ return (jobject) JNIHandles::make_local(env, pd);
JVM_END
-// Obsolete since 1.2 (Class.setProtectionDomain removed), although
-// still defined in core libraries as of 1.5.
-JVM_ENTRY(void, JVM_SetProtectionDomain(JNIEnv *env, jclass cls, jobject protection_domain))
- JVMWrapper("JVM_SetProtectionDomain");
- if (JNIHandles::resolve(cls) == NULL) {
- THROW(vmSymbols::java_lang_NullPointerException());
- }
- if (!java_lang_Class::is_primitive(JNIHandles::resolve(cls))) {
- // Call is ignored for primitive types
- Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve(cls));
+static bool is_authorized(Handle context, instanceKlassHandle klass, TRAPS) {
+ // If there is a security manager and protection domain, check the access
+ // in the protection domain, otherwise it is authorized.
+ if (java_lang_System::has_security_manager()) {
- // cls won't be an array, as this called only from ClassLoader.defineClass
- if (k->oop_is_instance()) {
- oop pd = JNIHandles::resolve(protection_domain);
- assert(pd == NULL || pd->is_oop(), "just checking");
- InstanceKlass::cast(k)->set_protection_domain(pd);
+ // For bootstrapping, if pd implies method isn't in the JDK, allow
+ // this context to revert to older behavior.
+ // In this case the isAuthorized field in AccessControlContext is also not
+ // present.
+ if (Universe::protection_domain_implies_method() == NULL) {
+ return true;
+ }
+
+ // Whitelist certain access control contexts
+ if (java_security_AccessControlContext::is_authorized(context)) {
+ return true;
+ }
+
+ oop prot = klass->protection_domain();
+ if (prot != NULL) {
+ // Call pd.implies(new SecurityPermission("createAccessControlContext"))
+ // in the new wrapper.
+ methodHandle m(THREAD, Universe::protection_domain_implies_method());
+ Handle h_prot(THREAD, prot);
+ JavaValue result(T_BOOLEAN);
+ JavaCallArguments args(h_prot);
+ JavaCalls::call(&result, m, &args, CHECK_false);
+ return (result.get_jboolean() != 0);
}
}
-JVM_END
+ return true;
+}
+// Create an AccessControlContext with a protection domain with null codesource
+// and null permissions - which gives no permissions.
+oop create_dummy_access_control_context(TRAPS) {
+ InstanceKlass* pd_klass = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass());
+ // new ProtectionDomain(null,null);
+ oop null_protection_domain = pd_klass->allocate_instance(CHECK_NULL);
+ Handle null_pd(THREAD, null_protection_domain);
+
+ // new ProtectionDomain[] {pd};
+ objArrayOop context = oopFactory::new_objArray(pd_klass, 1, CHECK_NULL);
+ context->obj_at_put(0, null_pd());
+
+ // new AccessControlContext(new ProtectionDomain[] {pd})
+ objArrayHandle h_context(THREAD, context);
+ oop result = java_security_AccessControlContext::create(h_context, false, Handle(), CHECK_NULL);
+ return result;
+}
JVM_ENTRY(jobject, JVM_DoPrivileged(JNIEnv *env, jclass cls, jobject action, jobject context, jboolean wrapException))
JVMWrapper("JVM_DoPrivileged");
@@ -1152,8 +1179,29 @@ JVM_ENTRY(jobject, JVM_DoPrivileged(JNIEnv *env, jclass cls, jobject action, job
THROW_MSG_0(vmSymbols::java_lang_NullPointerException(), "Null action");
}
- // Stack allocated list of privileged stack elements
- PrivilegedElement pi;
+ // Compute the frame initiating the do privileged operation and setup the privileged stack
+ vframeStream vfst(thread);
+ vfst.security_get_caller_frame(1);
+
+ if (vfst.at_end()) {
+ THROW_MSG_0(vmSymbols::java_lang_InternalError(), "no caller?");
+ }
+
+ Method* method = vfst.method();
+ instanceKlassHandle klass (THREAD, method->method_holder());
+
+ // Check that action object understands "Object run()"
+ Handle h_context;
+ if (context != NULL) {
+ h_context = Handle(THREAD, JNIHandles::resolve(context));
+ bool authorized = is_authorized(h_context, klass, CHECK_NULL);
+ if (!authorized) {
+ // Create an unprivileged access control object and call it's run function
+ // instead.
+ oop noprivs = create_dummy_access_control_context(CHECK_NULL);
+ h_context = Handle(THREAD, noprivs);
+ }
+ }
// Check that action object understands "Object run()"
Handle object (THREAD, JNIHandles::resolve(action));
@@ -1167,12 +1215,10 @@ JVM_ENTRY(jobject, JVM_DoPrivileged(JNIEnv *env, jclass cls, jobject action, job
THROW_MSG_0(vmSymbols::java_lang_InternalError(), "No run method");
}
- // Compute the frame initiating the do privileged operation and setup the privileged stack
- vframeStream vfst(thread);
- vfst.security_get_caller_frame(1);
-
+ // Stack allocated list of privileged stack elements
+ PrivilegedElement pi;
if (!vfst.at_end()) {
- pi.initialize(&vfst, JNIHandles::resolve(context), thread->privileged_stack_top(), CHECK_NULL);
+ pi.initialize(&vfst, h_context(), thread->privileged_stack_top(), CHECK_NULL);
thread->set_privileged_stack_top(&pi);
}
@@ -1710,7 +1756,7 @@ JVM_ENTRY(jobjectArray, JVM_GetMethodParameters(JNIEnv *env, jobject method))
for (int i = 0; i < num_params; i++) {
MethodParametersElement* params = mh->method_parameters_start();
// For a 0 index, give a NULL symbol
- Symbol* const sym = 0 != params[i].name_cp_index ?
+ Symbol* sym = 0 != params[i].name_cp_index ?
mh->constants()->symbol_at(params[i].name_cp_index) : NULL;
int flags = params[i].flags;
oop param = Reflection::new_parameter(reflected_method, i, sym,
@@ -1778,7 +1824,7 @@ JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredFields(JNIEnv *env, jclass ofClass,
}
if (!publicOnly || fs.access_flags().is_public()) {
- fd.initialize(k(), fs.index());
+ fd.reinitialize(k(), fs.index());
oop field = Reflection::new_field(&fd, UseNewReflection, CHECK_NULL);
result->obj_at_put(out_idx, field);
++out_idx;
@@ -1789,16 +1835,27 @@ JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredFields(JNIEnv *env, jclass ofClass,
}
JVM_END
-JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredMethods(JNIEnv *env, jclass ofClass, jboolean publicOnly))
-{
- JVMWrapper("JVM_GetClassDeclaredMethods");
+static bool select_method(methodHandle method, bool want_constructor) {
+ if (want_constructor) {
+ return (method->is_initializer() && !method->is_static());
+ } else {
+ return (!method->is_initializer() && !method->is_overpass());
+ }
+}
+
+static jobjectArray get_class_declared_methods_helper(
+ JNIEnv *env,
+ jclass ofClass, jboolean publicOnly,
+ bool want_constructor,
+ Klass* klass, TRAPS) {
+
JvmtiVMObjectAllocEventCollector oam;
// Exclude primitive types and array types
if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
|| java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_array()) {
// Return empty array
- oop res = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), 0, CHECK_NULL);
+ oop res = oopFactory::new_objArray(klass, 0, CHECK_NULL);
return (jobjectArray) JNIHandles::make_local(env, res);
}
@@ -1809,87 +1866,67 @@ JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredMethods(JNIEnv *env, jclass ofClass,
Array<Method*>* methods = k->methods();
int methods_length = methods->length();
+
+ // Save original method_idnum in case of redefinition, which can change
+ // the idnum of obsolete methods. The new method will have the same idnum
+ // but if we refresh the methods array, the counts will be wrong.
+ ResourceMark rm(THREAD);
+ GrowableArray<int>* idnums = new GrowableArray<int>(methods_length);
int num_methods = 0;
- int i;
- for (i = 0; i < methods_length; i++) {
+ for (int i = 0; i < methods_length; i++) {
methodHandle method(THREAD, methods->at(i));
- if (!method->is_initializer() && !method->is_overpass()) {
+ if (select_method(method, want_constructor)) {
if (!publicOnly || method->is_public()) {
+ idnums->push(method->method_idnum());
++num_methods;
}
}
}
// Allocate result
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), num_methods, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(klass, num_methods, CHECK_NULL);
objArrayHandle result (THREAD, r);
- int out_idx = 0;
- for (i = 0; i < methods_length; i++) {
- methodHandle method(THREAD, methods->at(i));
- if (!method->is_initializer() && !method->is_overpass()) {
- if (!publicOnly || method->is_public()) {
- oop m = Reflection::new_method(method, UseNewReflection, false, CHECK_NULL);
- result->obj_at_put(out_idx, m);
- ++out_idx;
+ // Now just put the methods that we selected above, but go by their idnum
+ // in case of redefinition. The methods can be redefined at any safepoint,
+ // so above when allocating the oop array and below when creating reflect
+ // objects.
+ for (int i = 0; i < num_methods; i++) {
+ methodHandle method(THREAD, k->method_with_idnum(idnums->at(i)));
+ if (method.is_null()) {
+ // Method may have been deleted and seems this API can handle null
+ // Otherwise should probably put a method that throws NSME
+ result->obj_at_put(i, NULL);
+ } else {
+ oop m;
+ if (want_constructor) {
+ m = Reflection::new_constructor(method, CHECK_NULL);
+ } else {
+ m = Reflection::new_method(method, UseNewReflection, false, CHECK_NULL);
}
+ result->obj_at_put(i, m);
}
}
- assert(out_idx == num_methods, "just checking");
+
return (jobjectArray) JNIHandles::make_local(env, result());
}
+
+JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredMethods(JNIEnv *env, jclass ofClass, jboolean publicOnly))
+{
+ JVMWrapper("JVM_GetClassDeclaredMethods");
+ return get_class_declared_methods_helper(env, ofClass, publicOnly,
+ /*want_constructor*/ false,
+ SystemDictionary::reflect_Method_klass(), THREAD);
+}
JVM_END
JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredConstructors(JNIEnv *env, jclass ofClass, jboolean publicOnly))
{
JVMWrapper("JVM_GetClassDeclaredConstructors");
- JvmtiVMObjectAllocEventCollector oam;
-
- // Exclude primitive types and array types
- if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
- || java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_array()) {
- // Return empty array
- oop res = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), 0 , CHECK_NULL);
- return (jobjectArray) JNIHandles::make_local(env, res);
- }
-
- instanceKlassHandle k(THREAD, java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass)));
-
- // Ensure class is linked
- k->link_class(CHECK_NULL);
-
- Array<Method*>* methods = k->methods();
- int methods_length = methods->length();
- int num_constructors = 0;
-
- int i;
- for (i = 0; i < methods_length; i++) {
- methodHandle method(THREAD, methods->at(i));
- if (method->is_initializer() && !method->is_static()) {
- if (!publicOnly || method->is_public()) {
- ++num_constructors;
- }
- }
- }
-
- // Allocate result
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), num_constructors, CHECK_NULL);
- objArrayHandle result(THREAD, r);
-
- int out_idx = 0;
- for (i = 0; i < methods_length; i++) {
- methodHandle method(THREAD, methods->at(i));
- if (method->is_initializer() && !method->is_static()) {
- if (!publicOnly || method->is_public()) {
- oop m = Reflection::new_constructor(method, CHECK_NULL);
- result->obj_at_put(out_idx, m);
- ++out_idx;
- }
- }
- }
- assert(out_idx == num_constructors, "just checking");
- return (jobjectArray) JNIHandles::make_local(env, result());
+ return get_class_declared_methods_helper(env, ofClass, publicOnly,
+ /*want_constructor*/ true,
+ SystemDictionary::reflect_Constructor_klass(), THREAD);
}
JVM_END
@@ -3003,6 +3040,8 @@ JVM_ENTRY(void, JVM_Sleep(JNIEnv* env, jclass threadClass, jlong millis))
millis);
#endif /* USDT2 */
+ EventThreadSleep event;
+
if (millis == 0) {
// When ConvertSleepToYield is on, this matches the classic VM implementation of
// JVM_Sleep. Critical for similar threading behaviour (Win32)
@@ -3023,6 +3062,10 @@ JVM_ENTRY(void, JVM_Sleep(JNIEnv* env, jclass threadClass, jlong millis))
// An asynchronous exception (e.g., ThreadDeathException) could have been thrown on
// us while we were sleeping. We do not overwrite those.
if (!HAS_PENDING_EXCEPTION) {
+ if (event.should_commit()) {
+ event.set_time(millis);
+ event.commit();
+ }
#ifndef USDT2
HS_DTRACE_PROBE1(hotspot, thread__sleep__end,1);
#else /* USDT2 */
@@ -3036,6 +3079,10 @@ JVM_ENTRY(void, JVM_Sleep(JNIEnv* env, jclass threadClass, jlong millis))
}
thread->osthread()->set_state(old_state);
}
+ if (event.should_commit()) {
+ event.set_time(millis);
+ event.commit();
+ }
#ifndef USDT2
HS_DTRACE_PROBE1(hotspot, thread__sleep__end,0);
#else /* USDT2 */
@@ -3234,24 +3281,10 @@ JVM_ENTRY(jobject, JVM_CurrentClassLoader(JNIEnv *env))
JVM_END
-// Utility object for collecting method holders walking down the stack
-class KlassLink: public ResourceObj {
- public:
- KlassHandle klass;
- KlassLink* next;
-
- KlassLink(KlassHandle k) { klass = k; next = NULL; }
-};
-
-
JVM_ENTRY(jobjectArray, JVM_GetClassContext(JNIEnv *env))
JVMWrapper("JVM_GetClassContext");
ResourceMark rm(THREAD);
JvmtiVMObjectAllocEventCollector oam;
- // Collect linked list of (handles to) method holders
- KlassLink* first = NULL;
- KlassLink* last = NULL;
- int depth = 0;
vframeStream vfst(thread);
if (SystemDictionary::reflect_CallerSensitive_klass() != NULL) {
@@ -3265,32 +3298,23 @@ JVM_ENTRY(jobjectArray, JVM_GetClassContext(JNIEnv *env))
}
// Collect method holders
+ GrowableArray<KlassHandle>* klass_array = new GrowableArray<KlassHandle>();
for (; !vfst.at_end(); vfst.security_next()) {
Method* m = vfst.method();
// Native frames are not returned
if (!m->is_ignored_by_security_stack_walk() && !m->is_native()) {
Klass* holder = m->method_holder();
assert(holder->is_klass(), "just checking");
- depth++;
- KlassLink* l = new KlassLink(KlassHandle(thread, holder));
- if (first == NULL) {
- first = last = l;
- } else {
- last->next = l;
- last = l;
- }
+ klass_array->append(holder);
}
}
// Create result array of type [Ljava/lang/Class;
- objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), depth, CHECK_NULL);
+ objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), klass_array->length(), CHECK_NULL);
// Fill in mirrors corresponding to method holders
- int index = 0;
- while (first != NULL) {
- result->obj_at_put(index++, first->klass()->java_mirror());
- first = first->next;
+ for (int i = 0; i < klass_array->length(); i++) {
+ result->obj_at_put(i, klass_array->at(i)->java_mirror());
}
- assert(index == depth, "just checking");
return (jobjectArray) JNIHandles::make_local(env, result);
JVM_END
diff --git a/src/share/vm/prims/jvm.h b/src/share/vm/prims/jvm.h
index 486b13531..6248f4d79 100644
--- a/src/share/vm/prims/jvm.h
+++ b/src/share/vm/prims/jvm.h
@@ -471,9 +471,6 @@ JVM_SetClassSigners(JNIEnv *env, jclass cls, jobjectArray signers);
JNIEXPORT jobject JNICALL
JVM_GetProtectionDomain(JNIEnv *env, jclass cls);
-JNIEXPORT void JNICALL
-JVM_SetProtectionDomain(JNIEnv *env, jclass cls, jobject protection_domain);
-
JNIEXPORT jboolean JNICALL
JVM_IsArrayClass(JNIEnv *env, jclass cls);
diff --git a/src/share/vm/prims/jvmti.xml b/src/share/vm/prims/jvmti.xml
index dbd6735aa..98a0a0640 100644
--- a/src/share/vm/prims/jvmti.xml
+++ b/src/share/vm/prims/jvmti.xml
@@ -1,7 +1,7 @@
<?xml version="1.0" encoding="ISO-8859-1"?>
<?xml-stylesheet type="text/xsl" href="jvmti.xsl"?>
<!--
- Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
This code is free software; you can redistribute it and/or modify it
@@ -358,7 +358,7 @@
<specification label="JVM(TM) Tool Interface"
majorversion="1"
minorversion="2"
- microversion="2">
+ microversion="3">
<title subtitle="Version">
<tm>JVM</tm> Tool Interface
</title>
@@ -431,12 +431,48 @@
On the <tm>Solaris</tm> Operating Environment, an agent library is a shared
object (<code>.so</code> file).
<p/>
+
An agent may be started at VM startup by specifying the agent library
name using a <internallink id="starting">command line option</internallink>.
Some implementations may support a mechanism to <internallink id="onattach">
start agents</internallink> in the live <functionlink id="GetPhase">phase</functionlink>.
The details of how this is initiated are implementation specific.
</intro>
+
+ <intro id="entry point" label="Statically Linked Agents (since version 1.2.3)">
+
+ A native JVMTI Agent may be <i>statically linked</i> with the VM.
+ The manner in which the library and VM image are combined is
+ implementation-dependent.
+ An agent L whose image has been combined with the VM is defined as
+ <i>statically linked</i> if and only if the agent exports a function
+ called Agent_OnLoad_L.
+<p/>
+ If a <i>statically linked</i> agent L exports a function called
+ Agent_OnLoad_L and a function called Agent_OnLoad, the Agent_OnLoad
+ function will be ignored.
+ If an agent L is <i>statically linked</i>, an Agent_OnLoad_L
+ function will be invoked with the same arguments and expected return
+ value as specified for the Agent_OnLoad function.
+ An agent L that is <i>statically linked</i> will prohibit an agent of
+ the same name from being loaded dynamically.
+<p/>
+ The VM will invoke the Agent_OnUnload_L function of the agent, if such
+ a function is exported, at the same point during VM execution as it would
+ have called the dynamic entry point Agent_OnUnLoad. A statically loaded
+ agent cannot be unloaded. The Agent_OnUnload_L function will still be
+ called to do any other agent shutdown related tasks.
+ If a <i>statically linked</i> agent L exports a function called
+ Agent_OnUnLoad_L and a function called Agent_OnUnLoad, the Agent_OnUnLoad
+ function will be ignored.
+<p/>
+ If an agent L is <i>statically linked</i>, an Agent_OnAttach_L function
+ will be invoked with the same arguments and expected return value as
+ specified for the Agent_OnAttach function.
+ If a <i>statically linked</i> agent L exports a function called
+ Agent_OnAttach_L and a function called Agent_OnAttach, the Agent_OnAttach
+ function will be ignored.
+</intro>
<intro id="starting" label="Agent Command Line Options">
The term "command-line option" is used below to
@@ -455,7 +491,7 @@
<dd>
The name following <code>-agentlib:</code> is the name of the
library to load. Lookup of the library, both its full name and location,
- proceeds in a platform-specific manner.
+ proceeds in a platform-specific manner.
Typically, the <i>&lt;agent-lib-name&gt;</i> is expanded to an
operating system specific file name.
The <i>&lt;options&gt;</i> will be passed to the agent on start-up.
@@ -463,7 +499,11 @@
<code>-agentlib:foo=opt1,opt2</code> is specified, the VM will attempt to
load the shared library <code>foo.dll</code> from the system <code>PATH</code>
under <tm>Windows</tm> or <code>libfoo.so</code> from the
- <code>LD_LIBRARY_PATH</code> under the <tm>Solaris</tm> operating environment.
+ <code>LD_LIBRARY_PATH</code> under the <tm>Solaris</tm> operating
+ environment.
+ If the agent library is statically linked into the executable
+ then no actual loading takes place.
+ <p/>
</dd>
<dt><code>-agentpath:</code><i>&lt;path-to-agent&gt;</i><code>=</code><i>&lt;options&gt;</i></dt>
<dd>
@@ -473,11 +513,20 @@
The <i>&lt;options&gt;</i> will be passed to the agent on start-up.
For example, if the option
<code>-agentpath:c:\myLibs\foo.dll=opt1,opt2</code> is specified, the VM will attempt to
- load the shared library <code>c:\myLibs\foo.dll</code>.
+ load the shared library <code>c:\myLibs\foo.dll</code>. If the agent
+ library is statically linked into the executable
+ then no actual loading takes place.
+ <p/>
</dd>
</dl>
- The start-up routine <internallink id="onload"><code>Agent_OnLoad</code></internallink>
- in the library will be invoked.
+ For a dynamic shared library agent, the start-up routine
+ <internallink id="onload"><code>Agent_OnLoad</code></internallink>
+ in the library will be invoked. If the agent library is statically linked
+ into the executable then the system will attempt to invoke the
+ <code>Agent_OnLoad_&lt;agent-lib-name&gt;</code> entry point where
+ &lt;agent-lib-name&gt; is the basename of the
+ agent. In the above example <code>-agentpath:c:\myLibs\foo.dll=opt1,opt2</code>,
+ the system will attempt to find and call the <code>Agent_OnLoad_foo</code> start-up routine.
<p/>
Libraries loaded with <code>-agentlib:</code> or <code>-agentpath:</code>
will be searched for JNI native method implementations to facilitate the
@@ -502,11 +551,13 @@
If the agent is started in the <code>OnLoad</code>
<functionlink id="GetPhase">phase</functionlink> the function
<internallink id="onload"><code>Agent_OnLoad</code></internallink>
- will be invoked.
+ or <internallink id="onload"><code>Agent_OnLoad_L</code></internallink>
+ for statically linked agents will be invoked.
If the agent is started in the live
<functionlink id="GetPhase">phase</functionlink> the function
<internallink id="onattach"><code>Agent_OnAttach</code></internallink>
- will be invoked.
+ or <internallink id="onattach"><code>Agent_OnAttach_L</code></internallink>
+ for statically linked agents will be invoked.
Exactly one call to a start-up function is made per agent.
</intro>
@@ -516,6 +567,11 @@
<example>
JNIEXPORT jint JNICALL
Agent_OnLoad(JavaVM *vm, char *options, void *reserved)</example>
+ Or for a statically linked agent named 'L':
+ <example>
+JNIEXPORT jint JNICALL
+Agent_OnLoad_L(JavaVM *vm, char *options, void *reserved)</example>
+
The VM will start the agent by calling this function.
It will be called early enough in VM initialization that:
<ul>
@@ -531,7 +587,8 @@ Agent_OnLoad(JavaVM *vm, char *options, void *reserved)</example>
<li>no objects have been created</li>
</ul>
<p/>
- The VM will call the <code>Agent_OnLoad</code> function with
+ The VM will call the <code>Agent_OnLoad</code> or
+ <code>Agent_OnLoad_&lt;agent-lib-name&gt;</code> function with
<i>&lt;options&gt;</i> as the second argument -
that is, using the command-line option examples,
<code>"opt1,opt2"</code> will be passed to the <code>char *options</code>
@@ -540,7 +597,8 @@ Agent_OnLoad(JavaVM *vm, char *options, void *reserved)</example>
<internallink id="mUTF">modified UTF-8</internallink> string.
If <i>=&lt;options&gt;</i> is not specified,
a zero length string is passed to <code>options</code>.
- The lifespan of the <code>options</code> string is the <code>Agent_OnLoad</code>
+ The lifespan of the <code>options</code> string is the
+ <code>Agent_OnLoad</code> or <code>Agent_OnLoad_&lt;agent-lib-name&gt;</code>
call. If needed beyond this time the string or parts of the string must
be copied.
The period between when <code>Agent_OnLoad</code> is called and when it
@@ -570,7 +628,8 @@ Agent_OnLoad(JavaVM *vm, char *options, void *reserved)</example>
their functionality.
</rationale>
<p/>
- The return value from <code>Agent_OnLoad</code> is used to indicate an error.
+ The return value from <code>Agent_OnLoad</code> or
+ <code>Agent_OnLoad_&lt;agent-lib-name&gt;</code> is used to indicate an error.
Any value other than zero indicates an error and causes termination of the VM.
</intro>
@@ -587,6 +646,11 @@ Agent_OnLoad(JavaVM *vm, char *options, void *reserved)</example>
<example>
JNIEXPORT jint JNICALL
Agent_OnAttach(JavaVM* vm, char *options, void *reserved)</example>
+Or for a statically linked agent named 'L':
+ <example>
+JNIEXPORT jint JNICALL
+Agent_OnAttach_L(JavaVM* vm, char *options, void *reserved)</example>
+
<p/>
The VM will start the agent by calling this function.
It will be called in the context of a thread
@@ -596,13 +660,14 @@ Agent_OnAttach(JavaVM* vm, char *options, void *reserved)</example>
</internallink> string.
If startup options were not provided, a zero length string is passed to
<code>options</code>. The lifespan of the <code>options</code> string is the
- <code>Agent_OnAttach</code> call. If needed beyond this time the string or parts of
- the string must be copied.
+ <code>Agent_OnAttach</code> or <code>Agent_OnAttach_&lt;agent-lib-name&gt;</code> call.
+ If needed beyond this time the string or parts of the string must be copied.
<p/>
Note that some <internallink id="capability">capabilities</internallink>
may not be available in the live phase.
<p/>
- The <code>Agent_OnAttach</code> function initializes the agent and returns a value
+ The <code>Agent_OnAttach</code> or <code>Agent_OnAttach_&lt;agent-lib-name
+ &gt;</code> function initializes the agent and returns a value
to the VM to indicate if an error occurred. Any value other than zero indicates an error.
An error does not cause the VM to terminate. Instead the VM ignores the error, or takes
some implementation specific action -- for example it might print an error to standard error,
@@ -615,8 +680,14 @@ Agent_OnAttach(JavaVM* vm, char *options, void *reserved)</example>
<example>
JNIEXPORT void JNICALL
Agent_OnUnload(JavaVM *vm)</example>
+ Or for a statically linked agent named 'L':
+ <example>
+JNIEXPORT void JNICALL
+Agent_OnUnload_L(JavaVM *vm)</example>
+
This function will be called by the VM when the library is about to be unloaded.
- The library will be unloaded and this function will be called if some platform specific
+ The library will be unloaded (unless it is statically linked into the
+ executable) and this function will be called if some platform specific
mechanism causes the unload (an unload mechanism is not specified in this document)
or the library is (in effect) unloaded by the termination of the VM whether through
normal termination or VM failure, including start-up failure.
@@ -625,8 +696,9 @@ Agent_OnUnload(JavaVM *vm)</example>
<eventlink id="VMDeath">VM Death event</eventlink>: for the VM Death event
to be sent, the VM must have run at least to the point of initialization and a valid
<jvmti/> environment must exist which has set a callback for VMDeath
- and enabled the event
- None of these are required for <code>Agent_OnUnload</code> and this function
+ and enabled the event.
+ None of these are required for <code>Agent_OnUnload</code> or
+ <code>Agent_OnUnload_&lt;agent-lib-name&gt;</code> and this function
is also called if the library is unloaded for other reasons.
In the case that a VM Death event is sent, it will be sent before this
function is called (assuming this function is called due to VM termination).
@@ -1897,7 +1969,7 @@ jvmtiEnv *jvmti;
</description>
</param>
<param id="monitor_info_ptr">
- <allocbuf outcount="owned_monitor_depth_count_ptr">
+ <allocbuf outcount="monitor_info_count_ptr">
<struct>jvmtiMonitorStackDepthInfo</struct>
</allocbuf>
<description>
@@ -10701,10 +10773,14 @@ myInit() {
<constants id="jvmtiPhase" label="Phases of execution" kind="enum">
<constant id="JVMTI_PHASE_ONLOAD" num="1">
<code>OnLoad</code> phase: while in the
- <internallink id="onload"><code>Agent_OnLoad</code></internallink> function.
+ <internallink id="onload"><code>Agent_OnLoad</code></internallink>
+ or, for statically linked agents, the <internallink id="onload">
+ <code>Agent_OnLoad_&lt;agent-lib-name&gt;
+ </code></internallink> function.
</constant>
<constant id="JVMTI_PHASE_PRIMORDIAL" num="2">
- Primordial phase: between return from <code>Agent_OnLoad</code> and the
+ Primordial phase: between return from <code>Agent_OnLoad</code>
+ or <code>Agent_OnLoad_&lt;agent-lib-name&gt;</code> and the
<code>VMStart</code> event.
</constant>
<constant id="JVMTI_PHASE_START" num="6">
@@ -14261,6 +14337,9 @@ typedef void (JNICALL *jvmtiEventVMInit)
<change date="11 October 2012" version="1.2.2">
Fixed the "HTTP" and "Missing Anchor" errors reported by the LinkCheck tool.
</change>
+ <change date="19 June 2013" version="1.2.3">
+ Added support for statically linked agents.
+ </change>
</changehistory>
</specification>
diff --git a/src/share/vm/prims/jvmtiEnvBase.hpp b/src/share/vm/prims/jvmtiEnvBase.hpp
index 929dcf222..265154683 100644
--- a/src/share/vm/prims/jvmtiEnvBase.hpp
+++ b/src/share/vm/prims/jvmtiEnvBase.hpp
@@ -406,7 +406,11 @@ public:
VMOp_Type type() const { return VMOp_GetCurrentContendedMonitor; }
jvmtiError result() { return _result; }
void doit() {
- _result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread,_java_thread,_owned_monitor_ptr);
+ _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+ if (Threads::includes(_java_thread) && !_java_thread->is_exiting() &&
+ _java_thread->threadObj() != NULL) {
+ _result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread,_java_thread,_owned_monitor_ptr);
+ }
}
};
diff --git a/src/share/vm/prims/jvmtiExport.cpp b/src/share/vm/prims/jvmtiExport.cpp
index 7a0dfa37c..8b0d5e133 100644
--- a/src/share/vm/prims/jvmtiExport.cpp
+++ b/src/share/vm/prims/jvmtiExport.cpp
@@ -41,6 +41,7 @@
#include "prims/jvmtiRawMonitor.hpp"
#include "prims/jvmtiTagMap.hpp"
#include "prims/jvmtiThreadState.inline.hpp"
+#include "prims/jvmtiRedefineClasses.hpp"
#include "runtime/arguments.hpp"
#include "runtime/handles.hpp"
#include "runtime/interfaceSupport.hpp"
@@ -516,8 +517,7 @@ class JvmtiClassFileLoadHookPoster : public StackObj {
jint _curr_len;
unsigned char * _curr_data;
JvmtiEnv * _curr_env;
- jint * _cached_length_ptr;
- unsigned char ** _cached_data_ptr;
+ JvmtiCachedClassFileData ** _cached_class_file_ptr;
JvmtiThreadState * _state;
KlassHandle * _h_class_being_redefined;
JvmtiClassLoadKind _load_kind;
@@ -526,8 +526,7 @@ class JvmtiClassFileLoadHookPoster : public StackObj {
inline JvmtiClassFileLoadHookPoster(Symbol* h_name, Handle class_loader,
Handle h_protection_domain,
unsigned char **data_ptr, unsigned char **end_ptr,
- unsigned char **cached_data_ptr,
- jint *cached_length_ptr) {
+ JvmtiCachedClassFileData **cache_ptr) {
_h_name = h_name;
_class_loader = class_loader;
_h_protection_domain = h_protection_domain;
@@ -537,8 +536,7 @@ class JvmtiClassFileLoadHookPoster : public StackObj {
_curr_len = *end_ptr - *data_ptr;
_curr_data = *data_ptr;
_curr_env = NULL;
- _cached_length_ptr = cached_length_ptr;
- _cached_data_ptr = cached_data_ptr;
+ _cached_class_file_ptr = cache_ptr;
_state = _thread->jvmti_thread_state();
if (_state != NULL) {
@@ -615,12 +613,20 @@ class JvmtiClassFileLoadHookPoster : public StackObj {
}
if (new_data != NULL) {
// this agent has modified class data.
- if (caching_needed && *_cached_data_ptr == NULL) {
+ if (caching_needed && *_cached_class_file_ptr == NULL) {
// data has been changed by the new retransformable agent
// and it hasn't already been cached, cache it
- *_cached_data_ptr = (unsigned char *)os::malloc(_curr_len, mtInternal);
- memcpy(*_cached_data_ptr, _curr_data, _curr_len);
- *_cached_length_ptr = _curr_len;
+ JvmtiCachedClassFileData *p;
+ p = (JvmtiCachedClassFileData *)os::malloc(
+ offset_of(JvmtiCachedClassFileData, data) + _curr_len, mtInternal);
+ if (p == NULL) {
+ vm_exit_out_of_memory(offset_of(JvmtiCachedClassFileData, data) + _curr_len,
+ OOM_MALLOC_ERROR,
+ "unable to allocate cached copy of original class bytes");
+ }
+ p->length = _curr_len;
+ memcpy(p->data, _curr_data, _curr_len);
+ *_cached_class_file_ptr = p;
}
if (_curr_data != *_data_ptr) {
@@ -659,13 +665,11 @@ void JvmtiExport::post_class_file_load_hook(Symbol* h_name,
Handle h_protection_domain,
unsigned char **data_ptr,
unsigned char **end_ptr,
- unsigned char **cached_data_ptr,
- jint *cached_length_ptr) {
+ JvmtiCachedClassFileData **cache_ptr) {
JvmtiClassFileLoadHookPoster poster(h_name, class_loader,
h_protection_domain,
data_ptr, end_ptr,
- cached_data_ptr,
- cached_length_ptr);
+ cache_ptr);
poster.post();
}
@@ -1625,15 +1629,19 @@ void JvmtiExport::post_raw_field_modification(JavaThread *thread, Method* method
}
}
+ assert(sig_type != '[', "array should have sig_type == 'L'");
+ bool handle_created = false;
+
// convert oop to JNI handle.
- if (sig_type == 'L' || sig_type == '[') {
+ if (sig_type == 'L') {
+ handle_created = true;
value->l = (jobject)JNIHandles::make_local(thread, (oop)value->l);
}
post_field_modification(thread, method, location, field_klass, object, field, sig_type, value);
// Destroy the JNI handle allocated above.
- if (sig_type == 'L') {
+ if (handle_created) {
JNIHandles::destroy_local(value->l);
}
}
@@ -2187,6 +2195,8 @@ jint JvmtiExport::load_agent_library(AttachOperation* op, outputStream* st) {
char buffer[JVM_MAXPATHLEN];
void* library = NULL;
jint result = JNI_ERR;
+ const char *on_attach_symbols[] = AGENT_ONATTACH_SYMBOLS;
+ size_t num_symbol_entries = ARRAY_SIZE(on_attach_symbols);
// get agent name and options
const char* agent = op->arg(0);
@@ -2196,43 +2206,48 @@ jint JvmtiExport::load_agent_library(AttachOperation* op, outputStream* st) {
// The abs paramter should be "true" or "false"
bool is_absolute_path = (absParam != NULL) && (strcmp(absParam,"true")==0);
+ // Initially marked as invalid. It will be set to valid if we can find the agent
+ AgentLibrary *agent_lib = new AgentLibrary(agent, options, is_absolute_path, NULL);
- // If the path is absolute we attempt to load the library. Otherwise we try to
- // load it from the standard dll directory.
+ // Check for statically linked in agent. If not found then if the path is
+ // absolute we attempt to load the library. Otherwise we try to load it
+ // from the standard dll directory.
- if (is_absolute_path) {
- library = os::dll_load(agent, ebuf, sizeof ebuf);
- } else {
- // Try to load the agent from the standard dll directory
- if (os::dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(),
- agent)) {
- library = os::dll_load(buffer, ebuf, sizeof ebuf);
- }
- if (library == NULL) {
- // not found - try local path
- char ns[1] = {0};
- if (os::dll_build_name(buffer, sizeof(buffer), ns, agent)) {
+ if (!os::find_builtin_agent(agent_lib, on_attach_symbols, num_symbol_entries)) {
+ if (is_absolute_path) {
+ library = os::dll_load(agent, ebuf, sizeof ebuf);
+ } else {
+ // Try to load the agent from the standard dll directory
+ if (os::dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(),
+ agent)) {
library = os::dll_load(buffer, ebuf, sizeof ebuf);
}
+ if (library == NULL) {
+ // not found - try local path
+ char ns[1] = {0};
+ if (os::dll_build_name(buffer, sizeof(buffer), ns, agent)) {
+ library = os::dll_load(buffer, ebuf, sizeof ebuf);
+ }
+ }
+ }
+ if (library != NULL) {
+ agent_lib->set_os_lib(library);
+ agent_lib->set_valid();
}
}
-
// If the library was loaded then we attempt to invoke the Agent_OnAttach
// function
- if (library != NULL) {
-
+ if (agent_lib->valid()) {
// Lookup the Agent_OnAttach function
OnAttachEntry_t on_attach_entry = NULL;
- const char *on_attach_symbols[] = AGENT_ONATTACH_SYMBOLS;
- for (uint symbol_index = 0; symbol_index < ARRAY_SIZE(on_attach_symbols); symbol_index++) {
- on_attach_entry =
- CAST_TO_FN_PTR(OnAttachEntry_t, os::dll_lookup(library, on_attach_symbols[symbol_index]));
- if (on_attach_entry != NULL) break;
- }
-
+ on_attach_entry = CAST_TO_FN_PTR(OnAttachEntry_t,
+ os::find_agent_function(agent_lib, false, on_attach_symbols, num_symbol_entries));
if (on_attach_entry == NULL) {
// Agent_OnAttach missing - unload library
- os::dll_unload(library);
+ if (!agent_lib->is_static_lib()) {
+ os::dll_unload(library);
+ }
+ delete agent_lib;
} else {
// Invoke the Agent_OnAttach function
JavaThread* THREAD = JavaThread::current();
@@ -2252,7 +2267,9 @@ jint JvmtiExport::load_agent_library(AttachOperation* op, outputStream* st) {
// If OnAttach returns JNI_OK then we add it to the list of
// agent libraries so that we can call Agent_OnUnload later.
if (result == JNI_OK) {
- Arguments::add_loaded_agent(agent, (char*)options, is_absolute_path, library);
+ Arguments::add_loaded_agent(agent_lib);
+ } else {
+ delete agent_lib;
}
// Agent_OnAttach executed so completion status is JNI_OK
diff --git a/src/share/vm/prims/jvmtiExport.hpp b/src/share/vm/prims/jvmtiExport.hpp
index a1e0e0bd4..dc52a32f8 100644
--- a/src/share/vm/prims/jvmtiExport.hpp
+++ b/src/share/vm/prims/jvmtiExport.hpp
@@ -323,8 +323,7 @@ class JvmtiExport : public AllStatic {
static void post_class_file_load_hook(Symbol* h_name, Handle class_loader,
Handle h_protection_domain,
unsigned char **data_ptr, unsigned char **end_ptr,
- unsigned char **cached_data_ptr,
- jint *cached_length_ptr) NOT_JVMTI_RETURN;
+ JvmtiCachedClassFileData **cache_ptr) NOT_JVMTI_RETURN;
static void post_native_method_bind(Method* method, address* function_ptr) NOT_JVMTI_RETURN;
static void post_compiled_method_load(nmethod *nm) NOT_JVMTI_RETURN;
static void post_dynamic_code_generated(const char *name, const void *code_begin, const void *code_end) NOT_JVMTI_RETURN;
diff --git a/src/share/vm/prims/jvmtiGen.java b/src/share/vm/prims/jvmtiGen.java
index 74191ed60..f2cdbe9ae 100644
--- a/src/share/vm/prims/jvmtiGen.java
+++ b/src/share/vm/prims/jvmtiGen.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,6 @@ import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;
import org.w3c.dom.Document;
import org.w3c.dom.DOMException;
-
// For write operation
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerException;
@@ -129,6 +128,7 @@ public class jvmtiGen
factory.setNamespaceAware(true);
factory.setValidating(true);
+ factory.setXIncludeAware(true);
try {
File datafile = new File(inFileName);
diff --git a/src/share/vm/prims/jvmtiImpl.cpp b/src/share/vm/prims/jvmtiImpl.cpp
index c569b8fdc..0fcd1ba94 100644
--- a/src/share/vm/prims/jvmtiImpl.cpp
+++ b/src/share/vm/prims/jvmtiImpl.cpp
@@ -273,59 +273,49 @@ void JvmtiBreakpoint::each_method_version_do(method_action meth_act) {
// add/remove breakpoint to/from versions of the method that
// are EMCP. Directly or transitively obsolete methods are
- // not saved in the PreviousVersionInfo.
+ // not saved in the PreviousVersionNodes.
Thread *thread = Thread::current();
instanceKlassHandle ikh = instanceKlassHandle(thread, _method->method_holder());
Symbol* m_name = _method->name();
Symbol* m_signature = _method->signature();
- {
- ResourceMark rm(thread);
- // PreviousVersionInfo objects returned via PreviousVersionWalker
- // contain a GrowableArray of handles. We have to clean up the
- // GrowableArray _after_ the PreviousVersionWalker destructor
- // has destroyed the handles.
- {
- // search previous versions if they exist
- PreviousVersionWalker pvw((InstanceKlass *)ikh());
- for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
- pv_info != NULL; pv_info = pvw.next_previous_version()) {
- GrowableArray<methodHandle>* methods =
- pv_info->prev_EMCP_method_handles();
-
- if (methods == NULL) {
- // We have run into a PreviousVersion generation where
- // all methods were made obsolete during that generation's
- // RedefineClasses() operation. At the time of that
- // operation, all EMCP methods were flushed so we don't
- // have to go back any further.
- //
- // A NULL methods array is different than an empty methods
- // array. We cannot infer any optimizations about older
- // generations from an empty methods array for the current
- // generation.
- break;
- }
+ // search previous versions if they exist
+ PreviousVersionWalker pvw(thread, (InstanceKlass *)ikh());
+ for (PreviousVersionNode * pv_node = pvw.next_previous_version();
+ pv_node != NULL; pv_node = pvw.next_previous_version()) {
+ GrowableArray<Method*>* methods = pv_node->prev_EMCP_methods();
+
+ if (methods == NULL) {
+ // We have run into a PreviousVersion generation where
+ // all methods were made obsolete during that generation's
+ // RedefineClasses() operation. At the time of that
+ // operation, all EMCP methods were flushed so we don't
+ // have to go back any further.
+ //
+ // A NULL methods array is different than an empty methods
+ // array. We cannot infer any optimizations about older
+ // generations from an empty methods array for the current
+ // generation.
+ break;
+ }
- for (int i = methods->length() - 1; i >= 0; i--) {
- methodHandle method = methods->at(i);
- // obsolete methods that are running are not deleted from
- // previous version array, but they are skipped here.
- if (!method->is_obsolete() &&
- method->name() == m_name &&
- method->signature() == m_signature) {
- RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)",
- meth_act == &Method::set_breakpoint ? "sett" : "clear",
- method->name()->as_C_string(),
- method->signature()->as_C_string()));
-
- ((Method*)method()->*meth_act)(_bci);
- break;
- }
- }
+ for (int i = methods->length() - 1; i >= 0; i--) {
+ Method* method = methods->at(i);
+ // obsolete methods that are running are not deleted from
+ // previous version array, but they are skipped here.
+ if (!method->is_obsolete() &&
+ method->name() == m_name &&
+ method->signature() == m_signature) {
+ RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)",
+ meth_act == &Method::set_breakpoint ? "sett" : "clear",
+ method->name()->as_C_string(),
+ method->signature()->as_C_string()));
+
+ (method->*meth_act)(_bci);
+ break;
}
- } // pvw is cleaned up
- } // rm is cleaned up
+ }
+ }
}
void JvmtiBreakpoint::set() {
@@ -360,19 +350,14 @@ void VM_ChangeBreakpoints::doit() {
case CLEAR_BREAKPOINT:
_breakpoints->clear_at_safepoint(*_bp);
break;
- case CLEAR_ALL_BREAKPOINT:
- _breakpoints->clearall_at_safepoint();
- break;
default:
assert(false, "Unknown operation");
}
}
void VM_ChangeBreakpoints::oops_do(OopClosure* f) {
- // This operation keeps breakpoints alive
- if (_breakpoints != NULL) {
- _breakpoints->oops_do(f);
- }
+ // The JvmtiBreakpoints in _breakpoints will be visited via
+ // JvmtiExport::oops_do.
if (_bp != NULL) {
_bp->oops_do(f);
}
@@ -433,23 +418,13 @@ void JvmtiBreakpoints::clear_at_safepoint(JvmtiBreakpoint& bp) {
}
}
-void JvmtiBreakpoints::clearall_at_safepoint() {
- assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-
- int len = _bps.length();
- for (int i=0; i<len; i++) {
- _bps.at(i).clear();
- }
- _bps.clear();
-}
-
int JvmtiBreakpoints::length() { return _bps.length(); }
int JvmtiBreakpoints::set(JvmtiBreakpoint& bp) {
if ( _bps.find(bp) != -1) {
return JVMTI_ERROR_DUPLICATE;
}
- VM_ChangeBreakpoints set_breakpoint(this,VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
+ VM_ChangeBreakpoints set_breakpoint(VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
VMThread::execute(&set_breakpoint);
return JVMTI_ERROR_NONE;
}
@@ -459,7 +434,7 @@ int JvmtiBreakpoints::clear(JvmtiBreakpoint& bp) {
return JVMTI_ERROR_NOT_FOUND;
}
- VM_ChangeBreakpoints clear_breakpoint(this,VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
+ VM_ChangeBreakpoints clear_breakpoint(VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
VMThread::execute(&clear_breakpoint);
return JVMTI_ERROR_NONE;
}
@@ -490,11 +465,6 @@ void JvmtiBreakpoints::clearall_in_class_at_safepoint(Klass* klass) {
}
}
-void JvmtiBreakpoints::clearall() {
- VM_ChangeBreakpoints clearall_breakpoint(this,VM_ChangeBreakpoints::CLEAR_ALL_BREAKPOINT);
- VMThread::execute(&clearall_breakpoint);
-}
-
//
// class JvmtiCurrentBreakpoints
//
diff --git a/src/share/vm/prims/jvmtiImpl.hpp b/src/share/vm/prims/jvmtiImpl.hpp
index 204bd83f1..c7beb1762 100644
--- a/src/share/vm/prims/jvmtiImpl.hpp
+++ b/src/share/vm/prims/jvmtiImpl.hpp
@@ -204,47 +204,6 @@ public:
///////////////////////////////////////////////////////////////
//
-// class VM_ChangeBreakpoints
-// Used by : JvmtiBreakpoints
-// Used by JVMTI methods: none directly.
-// Note: A Helper class.
-//
-// VM_ChangeBreakpoints implements a VM_Operation for ALL modifications to the JvmtiBreakpoints class.
-//
-
-class VM_ChangeBreakpoints : public VM_Operation {
-private:
- JvmtiBreakpoints* _breakpoints;
- int _operation;
- JvmtiBreakpoint* _bp;
-
-public:
- enum { SET_BREAKPOINT=0, CLEAR_BREAKPOINT=1, CLEAR_ALL_BREAKPOINT=2 };
-
- VM_ChangeBreakpoints(JvmtiBreakpoints* breakpoints, int operation) {
- _breakpoints = breakpoints;
- _bp = NULL;
- _operation = operation;
- assert(breakpoints != NULL, "breakpoints != NULL");
- assert(operation == CLEAR_ALL_BREAKPOINT, "unknown breakpoint operation");
- }
- VM_ChangeBreakpoints(JvmtiBreakpoints* breakpoints, int operation, JvmtiBreakpoint *bp) {
- _breakpoints = breakpoints;
- _bp = bp;
- _operation = operation;
- assert(breakpoints != NULL, "breakpoints != NULL");
- assert(bp != NULL, "bp != NULL");
- assert(operation == SET_BREAKPOINT || operation == CLEAR_BREAKPOINT , "unknown breakpoint operation");
- }
-
- VMOp_Type type() const { return VMOp_ChangeBreakpoints; }
- void doit();
- void oops_do(OopClosure* f);
-};
-
-
-///////////////////////////////////////////////////////////////
-//
// class JvmtiBreakpoints
// Used by : JvmtiCurrentBreakpoints
// Used by JVMTI methods: none directly
@@ -271,7 +230,6 @@ private:
friend class VM_ChangeBreakpoints;
void set_at_safepoint(JvmtiBreakpoint& bp);
void clear_at_safepoint(JvmtiBreakpoint& bp);
- void clearall_at_safepoint();
static void do_element(GrowableElement *e);
@@ -286,7 +244,6 @@ public:
int set(JvmtiBreakpoint& bp);
int clear(JvmtiBreakpoint& bp);
void clearall_in_class_at_safepoint(Klass* klass);
- void clearall();
void gc_epilogue();
};
@@ -344,6 +301,40 @@ bool JvmtiCurrentBreakpoints::is_breakpoint(address bcp) {
return false;
}
+
+///////////////////////////////////////////////////////////////
+//
+// class VM_ChangeBreakpoints
+// Used by : JvmtiBreakpoints
+// Used by JVMTI methods: none directly.
+// Note: A Helper class.
+//
+// VM_ChangeBreakpoints implements a VM_Operation for ALL modifications to the JvmtiBreakpoints class.
+//
+
+class VM_ChangeBreakpoints : public VM_Operation {
+private:
+ JvmtiBreakpoints* _breakpoints;
+ int _operation;
+ JvmtiBreakpoint* _bp;
+
+public:
+ enum { SET_BREAKPOINT=0, CLEAR_BREAKPOINT=1 };
+
+ VM_ChangeBreakpoints(int operation, JvmtiBreakpoint *bp) {
+ JvmtiBreakpoints& current_bps = JvmtiCurrentBreakpoints::get_jvmti_breakpoints();
+ _breakpoints = &current_bps;
+ _bp = bp;
+ _operation = operation;
+ assert(bp != NULL, "bp != NULL");
+ }
+
+ VMOp_Type type() const { return VMOp_ChangeBreakpoints; }
+ void doit();
+ void oops_do(OopClosure* f);
+};
+
+
///////////////////////////////////////////////////////////////
// The get/set local operations must only be done by the VM thread
// because the interpreter version needs to access oop maps, which can
diff --git a/src/share/vm/prims/jvmtiRedefineClasses.cpp b/src/share/vm/prims/jvmtiRedefineClasses.cpp
index 4e8e8c0e0..34254d6b1 100644
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp
@@ -160,7 +160,8 @@ void VM_RedefineClasses::doit() {
if (RC_TRACE_ENABLED(0x00004000)) {
#endif
RC_TRACE_WITH_THREAD(0x00004000, thread, ("calling check_class"));
- SystemDictionary::classes_do(check_class, thread);
+ CheckClass check_class(thread);
+ ClassLoaderDataGraph::classes_do(&check_class);
#ifdef PRODUCT
}
#endif
@@ -1071,8 +1072,17 @@ jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) {
}
res = merge_cp_and_rewrite(the_class, scratch_class, THREAD);
- if (res != JVMTI_ERROR_NONE) {
- return res;
+ if (HAS_PENDING_EXCEPTION) {
+ Symbol* ex_name = PENDING_EXCEPTION->klass()->name();
+ // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark
+ RC_TRACE_WITH_THREAD(0x00000002, THREAD,
+ ("merge_cp_and_rewrite exception: '%s'", ex_name->as_C_string()));
+ CLEAR_PENDING_EXCEPTION;
+ if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) {
+ return JVMTI_ERROR_OUT_OF_MEMORY;
+ } else {
+ return JVMTI_ERROR_INTERNAL;
+ }
}
if (VerifyMergedCPBytecodes) {
@@ -1104,6 +1114,9 @@ jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) {
}
if (HAS_PENDING_EXCEPTION) {
Symbol* ex_name = PENDING_EXCEPTION->klass()->name();
+ // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark
+ RC_TRACE_WITH_THREAD(0x00000002, THREAD,
+ ("Rewriter::rewrite or link_methods exception: '%s'", ex_name->as_C_string()));
CLEAR_PENDING_EXCEPTION;
if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) {
return JVMTI_ERROR_OUT_OF_MEMORY;
@@ -1348,12 +1361,11 @@ bool VM_RedefineClasses::merge_constant_pools(constantPoolHandle old_cp,
CHECK_0);
}
- finalize_operands_merge(*merge_cp_p, THREAD);
-
RC_TRACE_WITH_THREAD(0x00020000, THREAD,
("after pass 1b: merge_cp_len=%d, scratch_i=%d, index_map_len=%d",
*merge_cp_length_p, scratch_i, _index_map_count));
}
+ finalize_operands_merge(*merge_cp_p, THREAD);
return true;
} // end merge_constant_pools()
@@ -1395,8 +1407,8 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite(
ClassLoaderData* loader_data = the_class->class_loader_data();
ConstantPool* merge_cp_oop =
ConstantPool::allocate(loader_data,
- merge_cp_length,
- THREAD);
+ merge_cp_length,
+ CHECK_(JVMTI_ERROR_OUT_OF_MEMORY));
MergeCPCleaner cp_cleaner(loader_data, merge_cp_oop);
HandleMark hm(THREAD); // make sure handles are cleared before
@@ -1472,7 +1484,8 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite(
// Replace the new constant pool with a shrunken copy of the
// merged constant pool
- set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length, THREAD);
+ set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length,
+ CHECK_(JVMTI_ERROR_OUT_OF_MEMORY));
// The new constant pool replaces scratch_cp so have cleaner clean it up.
// It can't be cleaned up while there are handles to it.
cp_cleaner.add_scratch_cp(scratch_cp());
@@ -1502,7 +1515,8 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite(
// merged constant pool so now the rewritten bytecodes have
// valid references; the previous new constant pool will get
// GCed.
- set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length, THREAD);
+ set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length,
+ CHECK_(JVMTI_ERROR_OUT_OF_MEMORY));
// The new constant pool replaces scratch_cp so have cleaner clean it up.
// It can't be cleaned up while there are handles to it.
cp_cleaner.add_scratch_cp(scratch_cp());
@@ -1554,6 +1568,24 @@ bool VM_RedefineClasses::rewrite_cp_refs(instanceKlassHandle scratch_class,
return false;
}
+ // rewrite source file name index:
+ u2 source_file_name_idx = scratch_class->source_file_name_index();
+ if (source_file_name_idx != 0) {
+ u2 new_source_file_name_idx = find_new_index(source_file_name_idx);
+ if (new_source_file_name_idx != 0) {
+ scratch_class->set_source_file_name_index(new_source_file_name_idx);
+ }
+ }
+
+ // rewrite class generic signature index:
+ u2 generic_signature_index = scratch_class->generic_signature_index();
+ if (generic_signature_index != 0) {
+ u2 new_generic_signature_index = find_new_index(generic_signature_index);
+ if (new_generic_signature_index != 0) {
+ scratch_class->set_generic_signature_index(new_generic_signature_index);
+ }
+ }
+
return true;
} // end rewrite_cp_refs()
@@ -1572,11 +1604,23 @@ bool VM_RedefineClasses::rewrite_cp_refs_in_methods(
for (int i = methods->length() - 1; i >= 0; i--) {
methodHandle method(THREAD, methods->at(i));
methodHandle new_method;
- rewrite_cp_refs_in_method(method, &new_method, CHECK_false);
+ rewrite_cp_refs_in_method(method, &new_method, THREAD);
if (!new_method.is_null()) {
// the method has been replaced so save the new method version
+ // even in the case of an exception. original method is on the
+ // deallocation list.
methods->at_put(i, new_method());
}
+ if (HAS_PENDING_EXCEPTION) {
+ Symbol* ex_name = PENDING_EXCEPTION->klass()->name();
+ // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark
+ RC_TRACE_WITH_THREAD(0x00000002, THREAD,
+ ("rewrite_cp_refs_in_method exception: '%s'", ex_name->as_C_string()));
+ // Need to clear pending exception here as the super caller sets
+ // the JVMTI_ERROR_INTERNAL if the returned value is false.
+ CLEAR_PENDING_EXCEPTION;
+ return false;
+ }
}
return true;
@@ -1656,10 +1700,7 @@ void VM_RedefineClasses::rewrite_cp_refs_in_method(methodHandle method,
Pause_No_Safepoint_Verifier pnsv(&nsv);
// ldc is 2 bytes and ldc_w is 3 bytes
- m = rc.insert_space_at(bci, 3, inst_buffer, THREAD);
- if (m.is_null() || HAS_PENDING_EXCEPTION) {
- guarantee(false, "insert_space_at() failed");
- }
+ m = rc.insert_space_at(bci, 3, inst_buffer, CHECK);
}
// return the new method so that the caller can update
@@ -1723,7 +1764,10 @@ void VM_RedefineClasses::rewrite_cp_refs_in_method(methodHandle method,
for (int i = 0; i < len; i++) {
const u2 cp_index = elem[i].name_cp_index;
- elem[i].name_cp_index = find_new_index(cp_index);
+ const u2 new_cp_index = find_new_index(cp_index);
+ if (new_cp_index != 0) {
+ elem[i].name_cp_index = new_cp_index;
+ }
}
}
} // end rewrite_cp_refs_in_method()
@@ -2466,8 +2510,8 @@ void VM_RedefineClasses::set_new_constant_pool(
// scratch_cp is a merged constant pool and has enough space for a
// worst case merge situation. We want to associate the minimum
// sized constant pool with the klass to save space.
- constantPoolHandle smaller_cp(THREAD,
- ConstantPool::allocate(loader_data, scratch_cp_length, THREAD));
+ ConstantPool* cp = ConstantPool::allocate(loader_data, scratch_cp_length, CHECK);
+ constantPoolHandle smaller_cp(THREAD, cp);
// preserve version() value in the smaller copy
int version = scratch_cp->version();
@@ -2479,6 +2523,11 @@ void VM_RedefineClasses::set_new_constant_pool(
smaller_cp->set_pool_holder(scratch_class());
scratch_cp->copy_cp_to(1, scratch_cp_length - 1, smaller_cp, 1, THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ // Exception is handled in the caller
+ loader_data->add_to_deallocate_list(smaller_cp());
+ return;
+ }
scratch_cp = smaller_cp;
// attach new constant pool to klass
@@ -2653,29 +2702,35 @@ void VM_RedefineClasses::set_new_constant_pool(
} // end set_new_constant_pool()
-void VM_RedefineClasses::adjust_array_vtable(Klass* k_oop) {
- ArrayKlass* ak = ArrayKlass::cast(k_oop);
- bool trace_name_printed = false;
- ak->vtable()->adjust_method_entries(_matching_old_methods,
- _matching_new_methods,
- _matching_methods_length,
- &trace_name_printed);
-}
-
// Unevolving classes may point to methods of the_class directly
// from their constant pool caches, itables, and/or vtables. We
-// use the SystemDictionary::classes_do() facility and this helper
+// use the ClassLoaderDataGraph::classes_do() facility and this helper
// to fix up these pointers.
-//
-// Note: We currently don't support updating the vtable in
-// arrayKlassOops. See Open Issues in jvmtiRedefineClasses.hpp.
-void VM_RedefineClasses::adjust_cpool_cache_and_vtable(Klass* k_oop,
- ClassLoaderData* initiating_loader,
- TRAPS) {
- Klass *k = k_oop;
- if (k->oop_is_instance()) {
- HandleMark hm(THREAD);
- InstanceKlass *ik = (InstanceKlass *) k;
+
+// Adjust cpools and vtables closure
+void VM_RedefineClasses::AdjustCpoolCacheAndVtable::do_klass(Klass* k) {
+
+ // This is a very busy routine. We don't want too much tracing
+ // printed out.
+ bool trace_name_printed = false;
+
+ // Very noisy: only enable this call if you are trying to determine
+ // that a specific class gets found by this routine.
+ // RC_TRACE macro has an embedded ResourceMark
+ // RC_TRACE_WITH_THREAD(0x00100000, THREAD,
+ // ("adjust check: name=%s", k->external_name()));
+ // trace_name_printed = true;
+
+ // If the class being redefined is java.lang.Object, we need to fix all
+ // array class vtables also
+ if (k->oop_is_array() && _the_class_oop == SystemDictionary::Object_klass()) {
+ k->vtable()->adjust_method_entries(_matching_old_methods,
+ _matching_new_methods,
+ _matching_methods_length,
+ &trace_name_printed);
+ } else if (k->oop_is_instance()) {
+ HandleMark hm(_thread);
+ InstanceKlass *ik = InstanceKlass::cast(k);
// HotSpot specific optimization! HotSpot does not currently
// support delegation from the bootstrap class loader to a
@@ -2695,23 +2750,6 @@ void VM_RedefineClasses::adjust_cpool_cache_and_vtable(Klass* k_oop,
return;
}
- // If the class being redefined is java.lang.Object, we need to fix all
- // array class vtables also
- if (_the_class_oop == SystemDictionary::Object_klass()) {
- ik->array_klasses_do(adjust_array_vtable);
- }
-
- // This is a very busy routine. We don't want too much tracing
- // printed out.
- bool trace_name_printed = false;
-
- // Very noisy: only enable this call if you are trying to determine
- // that a specific class gets found by this routine.
- // RC_TRACE macro has an embedded ResourceMark
- // RC_TRACE_WITH_THREAD(0x00100000, THREAD,
- // ("adjust check: name=%s", ik->external_name()));
- // trace_name_printed = true;
-
// Fix the vtable embedded in the_class and subclasses of the_class,
// if one exists. We discard scratch_class and we don't keep an
// InstanceKlass around to hold obsolete methods so we don't have
@@ -2719,7 +2757,7 @@ void VM_RedefineClasses::adjust_cpool_cache_and_vtable(Klass* k_oop,
// holds the Method*s for virtual (but not final) methods.
if (ik->vtable_length() > 0 && ik->is_subtype_of(_the_class_oop)) {
// ik->vtable() creates a wrapper object; rm cleans it up
- ResourceMark rm(THREAD);
+ ResourceMark rm(_thread);
ik->vtable()->adjust_method_entries(_matching_old_methods,
_matching_new_methods,
_matching_methods_length,
@@ -2735,7 +2773,7 @@ void VM_RedefineClasses::adjust_cpool_cache_and_vtable(Klass* k_oop,
if (ik->itable_length() > 0 && (_the_class_oop->is_interface()
|| ik->is_subclass_of(_the_class_oop))) {
// ik->itable() creates a wrapper object; rm cleans it up
- ResourceMark rm(THREAD);
+ ResourceMark rm(_thread);
ik->itable()->adjust_method_entries(_matching_old_methods,
_matching_new_methods,
_matching_methods_length,
@@ -2758,7 +2796,7 @@ void VM_RedefineClasses::adjust_cpool_cache_and_vtable(Klass* k_oop,
constantPoolHandle other_cp;
ConstantPoolCache* cp_cache;
- if (k_oop != _the_class_oop) {
+ if (ik != _the_class_oop) {
// this klass' constant pool cache may need adjustment
other_cp = constantPoolHandle(ik->constants());
cp_cache = other_cp->cache();
@@ -2769,28 +2807,20 @@ void VM_RedefineClasses::adjust_cpool_cache_and_vtable(Klass* k_oop,
&trace_name_printed);
}
}
- {
- ResourceMark rm(THREAD);
- // PreviousVersionInfo objects returned via PreviousVersionWalker
- // contain a GrowableArray of handles. We have to clean up the
- // GrowableArray _after_ the PreviousVersionWalker destructor
- // has destroyed the handles.
- {
- // the previous versions' constant pool caches may need adjustment
- PreviousVersionWalker pvw(ik);
- for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
- pv_info != NULL; pv_info = pvw.next_previous_version()) {
- other_cp = pv_info->prev_constant_pool_handle();
- cp_cache = other_cp->cache();
- if (cp_cache != NULL) {
- cp_cache->adjust_method_entries(_matching_old_methods,
- _matching_new_methods,
- _matching_methods_length,
- &trace_name_printed);
- }
- }
- } // pvw is cleaned up
- } // rm is cleaned up
+
+ // the previous versions' constant pool caches may need adjustment
+ PreviousVersionWalker pvw(_thread, ik);
+ for (PreviousVersionNode * pv_node = pvw.next_previous_version();
+ pv_node != NULL; pv_node = pvw.next_previous_version()) {
+ other_cp = pv_node->prev_constant_pool();
+ cp_cache = other_cp->cache();
+ if (cp_cache != NULL) {
+ cp_cache->adjust_method_entries(_matching_old_methods,
+ _matching_new_methods,
+ _matching_methods_length,
+ &trace_name_printed);
+ }
+ }
}
}
@@ -2904,10 +2934,9 @@ void VM_RedefineClasses::check_methods_and_mark_as_obsolete(
// obsolete methods need a unique idnum
u2 num = InstanceKlass::cast(_the_class_oop)->next_method_idnum();
if (num != ConstMethod::UNSET_IDNUM) {
-// u2 old_num = old_method->method_idnum();
old_method->set_method_idnum(num);
-// TO DO: attach obsolete annotations to obsolete method's new idnum
}
+
// With tracing we try not to "yack" too much. The position of
// this trace assumes there are fewer obsolete methods than
// EMCP methods.
@@ -2920,7 +2949,7 @@ void VM_RedefineClasses::check_methods_and_mark_as_obsolete(
for (int i = 0; i < _deleted_methods_length; ++i) {
Method* old_method = _deleted_methods[i];
- assert(old_method->vtable_index() < 0,
+ assert(!old_method->has_vtable_index(),
"cannot delete methods with vtable entries");;
// Mark all deleted methods as old and obsolete
@@ -3208,7 +3237,7 @@ void VM_RedefineClasses::swap_annotations(instanceKlassHandle the_class,
// parts of the_class
// - adjusting constant pool caches and vtables in other classes
// that refer to methods in the_class. These adjustments use the
-// SystemDictionary::classes_do() facility which only allows
+// ClassLoaderDataGraph::classes_do() facility which only allows
// a helper method to be specified. The interesting parameters
// that we would like to pass to the helper method are saved in
// static global fields in the VM operation.
@@ -3228,15 +3257,6 @@ void VM_RedefineClasses::redefine_single_class(jclass the_jclass,
JvmtiBreakpoints& jvmti_breakpoints = JvmtiCurrentBreakpoints::get_jvmti_breakpoints();
jvmti_breakpoints.clearall_in_class_at_safepoint(the_class_oop);
- if (the_class_oop == Universe::reflect_invoke_cache()->klass()) {
- // We are redefining java.lang.reflect.Method. Method.invoke() is
- // cached and users of the cache care about each active version of
- // the method so we have to track this previous version.
- // Do this before methods get switched
- Universe::reflect_invoke_cache()->add_previous_version(
- the_class->method_with_idnum(Universe::reflect_invoke_cache()->method_idnum()));
- }
-
// Deoptimize all compiled code that depends on this class
flush_dependent_code(the_class, THREAD);
@@ -3353,9 +3373,7 @@ void VM_RedefineClasses::redefine_single_class(jclass the_jclass,
// should get cleared in the_class too.
if (the_class->get_cached_class_file_bytes() == 0) {
// the_class doesn't have a cache yet so copy it
- the_class->set_cached_class_file(
- scratch_class->get_cached_class_file_bytes(),
- scratch_class->get_cached_class_file_len());
+ the_class->set_cached_class_file(scratch_class->get_cached_class_file());
}
#ifndef PRODUCT
else {
@@ -3366,6 +3384,10 @@ void VM_RedefineClasses::redefine_single_class(jclass the_jclass,
}
#endif
+ // NULL out in scratch class to not delete twice. The class to be redefined
+ // always owns these bytes.
+ scratch_class->set_cached_class_file(NULL);
+
// Replace inner_classes
Array<u2>* old_inner_classes = the_class->inner_classes();
the_class->set_inner_classes(scratch_class->inner_classes());
@@ -3388,7 +3410,8 @@ void VM_RedefineClasses::redefine_single_class(jclass the_jclass,
// Leave arrays of jmethodIDs and itable index cache unchanged
// Copy the "source file name" attribute from new class version
- the_class->set_source_file_name(scratch_class->source_file_name());
+ the_class->set_source_file_name_index(
+ scratch_class->source_file_name_index());
// Copy the "source debug extension" attribute from new class version
the_class->set_source_debug_extension(
@@ -3438,7 +3461,8 @@ void VM_RedefineClasses::redefine_single_class(jclass the_jclass,
// Adjust constantpool caches and vtables for all classes
// that reference methods of the evolved class.
- SystemDictionary::classes_do(adjust_cpool_cache_and_vtable, THREAD);
+ AdjustCpoolCacheAndVtable adjust_cpool_cache_and_vtable(THREAD);
+ ClassLoaderDataGraph::classes_do(&adjust_cpool_cache_and_vtable);
// JSR-292 support
MemberNameTable* mnt = the_class->member_names();
@@ -3499,34 +3523,33 @@ void VM_RedefineClasses::increment_class_counter(InstanceKlass *ik, TRAPS) {
}
}
-void VM_RedefineClasses::check_class(Klass* k_oop,
- ClassLoaderData* initiating_loader,
- TRAPS) {
- Klass *k = k_oop;
- if (k->oop_is_instance()) {
- HandleMark hm(THREAD);
- InstanceKlass *ik = (InstanceKlass *) k;
- bool no_old_methods = true; // be optimistic
- ResourceMark rm(THREAD);
+void VM_RedefineClasses::CheckClass::do_klass(Klass* k) {
+ bool no_old_methods = true; // be optimistic
- // a vtable should never contain old or obsolete methods
- if (ik->vtable_length() > 0 &&
- !ik->vtable()->check_no_old_or_obsolete_entries()) {
- if (RC_TRACE_ENABLED(0x00004000)) {
- RC_TRACE_WITH_THREAD(0x00004000, THREAD,
- ("klassVtable::check_no_old_or_obsolete_entries failure"
- " -- OLD or OBSOLETE method found -- class: %s",
- ik->signature_name()));
- ik->vtable()->dump_vtable();
- }
- no_old_methods = false;
+ // Both array and instance classes have vtables.
+ // a vtable should never contain old or obsolete methods
+ ResourceMark rm(_thread);
+ if (k->vtable_length() > 0 &&
+ !k->vtable()->check_no_old_or_obsolete_entries()) {
+ if (RC_TRACE_ENABLED(0x00004000)) {
+ RC_TRACE_WITH_THREAD(0x00004000, _thread,
+ ("klassVtable::check_no_old_or_obsolete_entries failure"
+ " -- OLD or OBSOLETE method found -- class: %s",
+ k->signature_name()));
+ k->vtable()->dump_vtable();
}
+ no_old_methods = false;
+ }
+
+ if (k->oop_is_instance()) {
+ HandleMark hm(_thread);
+ InstanceKlass *ik = InstanceKlass::cast(k);
// an itable should never contain old or obsolete methods
if (ik->itable_length() > 0 &&
!ik->itable()->check_no_old_or_obsolete_entries()) {
if (RC_TRACE_ENABLED(0x00004000)) {
- RC_TRACE_WITH_THREAD(0x00004000, THREAD,
+ RC_TRACE_WITH_THREAD(0x00004000, _thread,
("klassItable::check_no_old_or_obsolete_entries failure"
" -- OLD or OBSOLETE method found -- class: %s",
ik->signature_name()));
@@ -3540,7 +3563,7 @@ void VM_RedefineClasses::check_class(Klass* k_oop,
ik->constants()->cache() != NULL &&
!ik->constants()->cache()->check_no_old_or_obsolete_entries()) {
if (RC_TRACE_ENABLED(0x00004000)) {
- RC_TRACE_WITH_THREAD(0x00004000, THREAD,
+ RC_TRACE_WITH_THREAD(0x00004000, _thread,
("cp-cache::check_no_old_or_obsolete_entries failure"
" -- OLD or OBSOLETE method found -- class: %s",
ik->signature_name()));
@@ -3548,19 +3571,21 @@ void VM_RedefineClasses::check_class(Klass* k_oop,
}
no_old_methods = false;
}
+ }
- if (!no_old_methods) {
- if (RC_TRACE_ENABLED(0x00004000)) {
- dump_methods();
- } else {
- tty->print_cr("INFO: use the '-XX:TraceRedefineClasses=16384' option "
- "to see more info about the following guarantee() failure.");
- }
- guarantee(false, "OLD and/or OBSOLETE method(s) found");
+ // print and fail guarantee if old methods are found.
+ if (!no_old_methods) {
+ if (RC_TRACE_ENABLED(0x00004000)) {
+ dump_methods();
+ } else {
+ tty->print_cr("INFO: use the '-XX:TraceRedefineClasses=16384' option "
+ "to see more info about the following guarantee() failure.");
}
+ guarantee(false, "OLD and/or OBSOLETE method(s) found");
}
}
+
void VM_RedefineClasses::dump_methods() {
int j;
RC_TRACE(0x00004000, ("_old_methods --"));
diff --git a/src/share/vm/prims/jvmtiRedefineClasses.hpp b/src/share/vm/prims/jvmtiRedefineClasses.hpp
index ffe9a7ed8..97aa8143e 100644
--- a/src/share/vm/prims/jvmtiRedefineClasses.hpp
+++ b/src/share/vm/prims/jvmtiRedefineClasses.hpp
@@ -87,7 +87,7 @@
// parts of the_class
// - adjusting constant pool caches and vtables in other classes
// that refer to methods in the_class. These adjustments use the
-// SystemDictionary::classes_do() facility which only allows
+// ClassLoaderDataGraph::classes_do() facility which only allows
// a helper method to be specified. The interesting parameters
// that we would like to pass to the helper method are saved in
// static global fields in the VM operation.
@@ -331,10 +331,15 @@
// coordinate a cleanup of these constants with Runtime.
//
+struct JvmtiCachedClassFileData {
+ jint length;
+ unsigned char data[1];
+};
+
class VM_RedefineClasses: public VM_Operation {
private:
- // These static fields are needed by SystemDictionary::classes_do()
- // facility and the adjust_cpool_cache_and_vtable() helper:
+ // These static fields are needed by ClassLoaderDataGraph::classes_do()
+ // facility and the AdjustCpoolCacheAndVtable helper:
static Array<Method*>* _old_methods;
static Array<Method*>* _new_methods;
static Method** _matching_old_methods;
@@ -408,13 +413,6 @@ class VM_RedefineClasses: public VM_Operation {
int * emcp_method_count_p);
void transfer_old_native_function_registrations(instanceKlassHandle the_class);
- // Unevolving classes may point to methods of the_class directly
- // from their constant pool caches, itables, and/or vtables. We
- // use the SystemDictionary::classes_do() facility and this helper
- // to fix up these pointers.
- static void adjust_cpool_cache_and_vtable(Klass* k_oop, ClassLoaderData* initiating_loader, TRAPS);
- static void adjust_array_vtable(Klass* k_oop);
-
// Install the redefinition of a class
void redefine_single_class(jclass the_jclass,
Klass* scratch_class_oop, TRAPS);
@@ -480,10 +478,27 @@ class VM_RedefineClasses: public VM_Operation {
void flush_dependent_code(instanceKlassHandle k_h, TRAPS);
- static void check_class(Klass* k_oop, ClassLoaderData* initiating_loader,
- TRAPS);
static void dump_methods();
+ // Check that there are no old or obsolete methods
+ class CheckClass : public KlassClosure {
+ Thread* _thread;
+ public:
+ CheckClass(Thread* t) : _thread(t) {}
+ void do_klass(Klass* k);
+ };
+
+ // Unevolving classes may point to methods of the_class directly
+ // from their constant pool caches, itables, and/or vtables. We
+ // use the ClassLoaderDataGraph::classes_do() facility and this helper
+ // to fix up these pointers.
+ class AdjustCpoolCacheAndVtable : public KlassClosure {
+ Thread* _thread;
+ public:
+ AdjustCpoolCacheAndVtable(Thread* t) : _thread(t) {}
+ void do_klass(Klass* k);
+ };
+
public:
VM_RedefineClasses(jint class_count,
const jvmtiClassDefinition *class_defs,
@@ -499,5 +514,12 @@ class VM_RedefineClasses: public VM_Operation {
// Modifiable test must be shared between IsModifiableClass query
// and redefine implementation
static bool is_modifiable_class(oop klass_mirror);
+
+ static jint get_cached_class_file_len(JvmtiCachedClassFileData *cache) {
+ return cache == NULL ? 0 : cache->length;
+ }
+ static unsigned char * get_cached_class_file_bytes(JvmtiCachedClassFileData *cache) {
+ return cache == NULL ? NULL : cache->data;
+ }
};
#endif // SHARE_VM_PRIMS_JVMTIREDEFINECLASSES_HPP
diff --git a/src/share/vm/prims/jvmtiTagMap.cpp b/src/share/vm/prims/jvmtiTagMap.cpp
index 99e4af5aa..eb68b4296 100644
--- a/src/share/vm/prims/jvmtiTagMap.cpp
+++ b/src/share/vm/prims/jvmtiTagMap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2857,7 +2857,7 @@ inline bool VM_HeapWalkOperation::iterate_over_class(oop java_class) {
// references from the constant pool
{
- ConstantPool* const pool = ik->constants();
+ ConstantPool* pool = ik->constants();
for (int i = 1; i < pool->length(); i++) {
constantTag tag = pool->tag_at(i).value();
if (tag.is_string() || tag.is_klass()) {
diff --git a/src/share/vm/prims/methodHandles.cpp b/src/share/vm/prims/methodHandles.cpp
index 341049852..88b82b358 100644
--- a/src/share/vm/prims/methodHandles.cpp
+++ b/src/share/vm/prims/methodHandles.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -127,25 +127,37 @@ Handle MethodHandles::new_MemberName(TRAPS) {
}
oop MethodHandles::init_MemberName(Handle mname, Handle target) {
+ // This method is used from java.lang.invoke.MemberName constructors.
+ // It fills in the new MemberName from a java.lang.reflect.Member.
Thread* thread = Thread::current();
oop target_oop = target();
Klass* target_klass = target_oop->klass();
if (target_klass == SystemDictionary::reflect_Field_klass()) {
oop clazz = java_lang_reflect_Field::clazz(target_oop); // fd.field_holder()
int slot = java_lang_reflect_Field::slot(target_oop); // fd.index()
- int mods = java_lang_reflect_Field::modifiers(target_oop);
- oop type = java_lang_reflect_Field::type(target_oop);
- oop name = java_lang_reflect_Field::name(target_oop);
KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
- intptr_t offset = InstanceKlass::cast(k())->field_offset(slot);
- return init_field_MemberName(mname, k, accessFlags_from(mods), type, name, offset);
+ if (!k.is_null() && k->oop_is_instance()) {
+ fieldDescriptor fd(InstanceKlass::cast(k()), slot);
+ oop mname2 = init_field_MemberName(mname, fd);
+ if (mname2 != NULL) {
+ // Since we have the reified name and type handy, add them to the result.
+ if (java_lang_invoke_MemberName::name(mname2) == NULL)
+ java_lang_invoke_MemberName::set_name(mname2, java_lang_reflect_Field::name(target_oop));
+ if (java_lang_invoke_MemberName::type(mname2) == NULL)
+ java_lang_invoke_MemberName::set_type(mname2, java_lang_reflect_Field::type(target_oop));
+ }
+ return mname2;
+ }
} else if (target_klass == SystemDictionary::reflect_Method_klass()) {
oop clazz = java_lang_reflect_Method::clazz(target_oop);
int slot = java_lang_reflect_Method::slot(target_oop);
KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
if (!k.is_null() && k->oop_is_instance()) {
Method* m = InstanceKlass::cast(k())->method_with_idnum(slot);
- return init_method_MemberName(mname, m, true, k);
+ if (m == NULL || is_signature_polymorphic(m->intrinsic_id()))
+ return NULL; // do not resolve unless there is a concrete signature
+ CallInfo info(m, k());
+ return init_method_MemberName(mname, info);
}
} else if (target_klass == SystemDictionary::reflect_Constructor_klass()) {
oop clazz = java_lang_reflect_Constructor::clazz(target_oop);
@@ -153,69 +165,50 @@ oop MethodHandles::init_MemberName(Handle mname, Handle target) {
KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
if (!k.is_null() && k->oop_is_instance()) {
Method* m = InstanceKlass::cast(k())->method_with_idnum(slot);
- return init_method_MemberName(mname, m, false, k);
- }
- } else if (target_klass == SystemDictionary::MemberName_klass()) {
- // Note: This only works if the MemberName has already been resolved.
- oop clazz = java_lang_invoke_MemberName::clazz(target_oop);
- int flags = java_lang_invoke_MemberName::flags(target_oop);
- Metadata* vmtarget=java_lang_invoke_MemberName::vmtarget(target_oop);
- intptr_t vmindex = java_lang_invoke_MemberName::vmindex(target_oop);
- KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
- int ref_kind = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK;
- if (vmtarget == NULL) return NULL; // not resolved
- if ((flags & IS_FIELD) != 0) {
- assert(vmtarget->is_klass(), "field vmtarget is Klass*");
- int basic_mods = (ref_kind_is_static(ref_kind) ? JVM_ACC_STATIC : 0);
- // FIXME: how does k (receiver_limit) contribute?
- KlassHandle k_vmtarget(thread, (Klass*)vmtarget);
- return init_field_MemberName(mname, k_vmtarget, accessFlags_from(basic_mods), NULL, NULL, vmindex);
- } else if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) != 0) {
- assert(vmtarget->is_method(), "method or constructor vmtarget is Method*");
- return init_method_MemberName(mname, (Method*)vmtarget, ref_kind_does_dispatch(ref_kind), k);
- } else {
- return NULL;
+ if (m == NULL) return NULL;
+ CallInfo info(m, k());
+ return init_method_MemberName(mname, info);
}
}
return NULL;
}
-oop MethodHandles::init_method_MemberName(Handle mname, Method* m, bool do_dispatch,
- KlassHandle receiver_limit_h) {
- Klass* receiver_limit = receiver_limit_h();
- AccessFlags mods = m->access_flags();
- int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS );
- int vmindex = Method::nonvirtual_vtable_index; // implies never any dispatch
- Klass* mklass = m->method_holder();
- if (receiver_limit == NULL)
- receiver_limit = mklass;
- if (m->is_initializer()) {
- flags |= IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
- } else if (mods.is_static()) {
- flags |= IS_METHOD | (JVM_REF_invokeStatic << REFERENCE_KIND_SHIFT);
- // Get vindex from itable if method holder is an interface.
- if (m->method_holder()->is_interface()) {
- vmindex = klassItable::compute_itable_index(m);
- }
- } else if (receiver_limit != mklass &&
- !receiver_limit->is_subtype_of(mklass)) {
- return NULL; // bad receiver limit
- } else if (receiver_limit->is_interface() &&
- mklass->is_interface()) {
+oop MethodHandles::init_method_MemberName(Handle mname, CallInfo& info) {
+ assert(info.resolved_appendix().is_null(), "only normal methods here");
+ KlassHandle receiver_limit = info.resolved_klass();
+ methodHandle m = info.resolved_method();
+ int flags = (jushort)( m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS );
+ int vmindex = Method::invalid_vtable_index;
+
+ switch (info.call_kind()) {
+ case CallInfo::itable_call:
+ vmindex = info.itable_index();
+ // More importantly, the itable index only works with the method holder.
+ receiver_limit = m->method_holder();
+ assert(receiver_limit->verify_itable_index(vmindex), "");
flags |= IS_METHOD | (JVM_REF_invokeInterface << REFERENCE_KIND_SHIFT);
- receiver_limit = mklass; // ignore passed-in limit; interfaces are interconvertible
- vmindex = klassItable::compute_itable_index(m);
- } else if (mklass != receiver_limit && mklass->is_interface()) {
- flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT);
- // it is a miranda method, so m->vtable_index is not what we want
- ResourceMark rm;
- klassVtable* vt = InstanceKlass::cast(receiver_limit)->vtable();
- vmindex = vt->index_of_miranda(m->name(), m->signature());
- } else if (!do_dispatch || m->can_be_statically_bound()) {
- flags |= IS_METHOD | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
- } else {
+ break;
+
+ case CallInfo::vtable_call:
+ vmindex = info.vtable_index();
flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT);
- vmindex = m->vtable_index();
+ assert(receiver_limit->is_subtype_of(m->method_holder()), "virtual call must be type-safe");
+ break;
+
+ case CallInfo::direct_call:
+ vmindex = Method::nonvirtual_vtable_index;
+ if (m->is_static()) {
+ flags |= IS_METHOD | (JVM_REF_invokeStatic << REFERENCE_KIND_SHIFT);
+ } else if (m->is_initializer()) {
+ flags |= IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
+ assert(receiver_limit == m->method_holder(), "constructor call must be exactly typed");
+ } else {
+ flags |= IS_METHOD | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
+ assert(receiver_limit->is_subtype_of(m->method_holder()), "special call must be type-safe");
+ }
+ break;
+
+ default: assert(false, "bad CallInfo"); return NULL;
}
// @CallerSensitive annotation detected
@@ -225,7 +218,7 @@ oop MethodHandles::init_method_MemberName(Handle mname, Method* m, bool do_dispa
oop mname_oop = mname();
java_lang_invoke_MemberName::set_flags( mname_oop, flags);
- java_lang_invoke_MemberName::set_vmtarget(mname_oop, m);
+ java_lang_invoke_MemberName::set_vmtarget(mname_oop, m());
java_lang_invoke_MemberName::set_vmindex( mname_oop, vmindex); // vtable/itable index
java_lang_invoke_MemberName::set_clazz( mname_oop, receiver_limit->java_mirror());
// Note: name and type can be lazily computed by resolve_MemberName,
@@ -236,48 +229,24 @@ oop MethodHandles::init_method_MemberName(Handle mname, Method* m, bool do_dispa
// This is done eagerly, since it is readily available without
// constructing any new objects.
// TO DO: maybe intern mname_oop
- m->method_holder()->add_member_name(mname);
- return mname();
-}
+ m->method_holder()->add_member_name(m->method_idnum(), mname);
-Handle MethodHandles::init_method_MemberName(Handle mname, CallInfo& info, TRAPS) {
- Handle empty;
- if (info.resolved_appendix().not_null()) {
- // The resolved MemberName must not be accompanied by an appendix argument,
- // since there is no way to bind this value into the MemberName.
- // Caller is responsible to prevent this from happening.
- THROW_MSG_(vmSymbols::java_lang_InternalError(), "appendix", empty);
- }
- methodHandle m = info.resolved_method();
- KlassHandle defc = info.resolved_klass();
- int vmindex = -1;
- if (defc->is_interface() && m->method_holder()->is_interface()) {
- // LinkResolver does not report itable indexes! (fix this?)
- vmindex = klassItable::compute_itable_index(m());
- } else if (m->can_be_statically_bound()) {
- // LinkResolver reports vtable index even for final methods!
- vmindex = Method::nonvirtual_vtable_index;
- } else {
- vmindex = info.vtable_index();
- }
- oop res = init_method_MemberName(mname, m(), (vmindex >= 0), defc());
- assert(res == NULL || (java_lang_invoke_MemberName::vmindex(res) == vmindex), "");
- return Handle(THREAD, res);
+ return mname();
}
-oop MethodHandles::init_field_MemberName(Handle mname, KlassHandle field_holder,
- AccessFlags mods, oop type, oop name,
- intptr_t offset, bool is_setter) {
- int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS );
- flags |= IS_FIELD | ((mods.is_static() ? JVM_REF_getStatic : JVM_REF_getField) << REFERENCE_KIND_SHIFT);
+oop MethodHandles::init_field_MemberName(Handle mname, fieldDescriptor& fd, bool is_setter) {
+ int flags = (jushort)( fd.access_flags().as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS );
+ flags |= IS_FIELD | ((fd.is_static() ? JVM_REF_getStatic : JVM_REF_getField) << REFERENCE_KIND_SHIFT);
if (is_setter) flags += ((JVM_REF_putField - JVM_REF_getField) << REFERENCE_KIND_SHIFT);
- Metadata* vmtarget = field_holder();
- int vmindex = offset; // determines the field uniquely when combined with static bit
+ Metadata* vmtarget = fd.field_holder();
+ int vmindex = fd.offset(); // determines the field uniquely when combined with static bit
oop mname_oop = mname();
java_lang_invoke_MemberName::set_flags(mname_oop, flags);
java_lang_invoke_MemberName::set_vmtarget(mname_oop, vmtarget);
java_lang_invoke_MemberName::set_vmindex(mname_oop, vmindex);
- java_lang_invoke_MemberName::set_clazz(mname_oop, field_holder->java_mirror());
+ java_lang_invoke_MemberName::set_clazz(mname_oop, fd.field_holder()->java_mirror());
+ oop type = field_signature_type_or_null(fd.signature());
+ oop name = field_name_or_null(fd.name());
if (name != NULL)
java_lang_invoke_MemberName::set_name(mname_oop, name);
if (type != NULL)
@@ -290,23 +259,9 @@ oop MethodHandles::init_field_MemberName(Handle mname, KlassHandle field_holder,
// Although the fieldDescriptor::_index would also identify the field,
// we do not use it, because it is harder to decode.
// TO DO: maybe intern mname_oop
- InstanceKlass::cast(field_holder())->add_member_name(mname);
return mname();
}
-Handle MethodHandles::init_field_MemberName(Handle mname, FieldAccessInfo& info, TRAPS) {
- return Handle();
-#if 0 // FIXME
- KlassHandle field_holder = info.klass();
- intptr_t field_offset = info.field_offset();
- return init_field_MemberName(mname_oop, field_holder(),
- info.access_flags(),
- type, name,
- field_offset, false /*is_setter*/);
-#endif
-}
-
-
// JVM 2.9 Special Methods:
// A method is signature polymorphic if and only if all of the following conditions hold :
// * It is declared in the java.lang.invoke.MethodHandle class.
@@ -562,12 +517,12 @@ static oop object_java_mirror() {
return SystemDictionary::Object_klass()->java_mirror();
}
-static oop field_name_or_null(Symbol* s) {
+oop MethodHandles::field_name_or_null(Symbol* s) {
if (s == NULL) return NULL;
return StringTable::lookup(s);
}
-static oop field_signature_type_or_null(Symbol* s) {
+oop MethodHandles::field_signature_type_or_null(Symbol* s) {
if (s == NULL) return NULL;
BasicType bt = FieldType::basic_type(s);
if (is_java_primitive(bt)) {
@@ -665,11 +620,9 @@ Handle MethodHandles::resolve_MemberName(Handle mname, TRAPS) {
case IS_METHOD:
{
CallInfo result;
- bool do_dispatch = true; // default, neutral setting
{
assert(!HAS_PENDING_EXCEPTION, "");
if (ref_kind == JVM_REF_invokeStatic) {
- //do_dispatch = false; // no need, since statics are never dispatched
LinkResolver::resolve_static_call(result,
defc, name, type, KlassHandle(), false, false, THREAD);
} else if (ref_kind == JVM_REF_invokeInterface) {
@@ -680,7 +633,6 @@ Handle MethodHandles::resolve_MemberName(Handle mname, TRAPS) {
LinkResolver::resolve_handle_call(result,
defc, name, type, KlassHandle(), THREAD);
} else if (ref_kind == JVM_REF_invokeSpecial) {
- do_dispatch = false; // force non-virtual linkage
LinkResolver::resolve_special_call(result,
defc, name, type, KlassHandle(), false, THREAD);
} else if (ref_kind == JVM_REF_invokeVirtual) {
@@ -693,7 +645,14 @@ Handle MethodHandles::resolve_MemberName(Handle mname, TRAPS) {
return empty;
}
}
- return init_method_MemberName(mname, result, THREAD);
+ if (result.resolved_appendix().not_null()) {
+ // The resolved MemberName must not be accompanied by an appendix argument,
+ // since there is no way to bind this value into the MemberName.
+ // Caller is responsible to prevent this from happening.
+ THROW_MSG_(vmSymbols::java_lang_InternalError(), "appendix", empty);
+ }
+ oop mname2 = init_method_MemberName(mname, result);
+ return Handle(THREAD, mname2);
}
case IS_CONSTRUCTOR:
{
@@ -711,22 +670,21 @@ Handle MethodHandles::resolve_MemberName(Handle mname, TRAPS) {
}
}
assert(result.is_statically_bound(), "");
- return init_method_MemberName(mname, result, THREAD);
+ oop mname2 = init_method_MemberName(mname, result);
+ return Handle(THREAD, mname2);
}
case IS_FIELD:
{
- // This is taken from LinkResolver::resolve_field, sans access checks.
- fieldDescriptor fd; // find_field initializes fd if found
- KlassHandle sel_klass(THREAD, InstanceKlass::cast(defc())->find_field(name, type, &fd));
- // check if field exists; i.e., if a klass containing the field def has been selected
- if (sel_klass.is_null()) return empty; // should not happen
- oop type = field_signature_type_or_null(fd.signature());
- oop name = field_name_or_null(fd.name());
- bool is_setter = (ref_kind_is_valid(ref_kind) && ref_kind_is_setter(ref_kind));
- mname = Handle(THREAD,
- init_field_MemberName(mname, sel_klass,
- fd.access_flags(), type, name, fd.offset(), is_setter));
- return mname;
+ fieldDescriptor result; // find_field initializes fd if found
+ {
+ assert(!HAS_PENDING_EXCEPTION, "");
+ LinkResolver::resolve_field(result, defc, name, type, KlassHandle(), Bytecodes::_nop, false, false, THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ return empty;
+ }
+ }
+ oop mname2 = init_field_MemberName(mname, result, ref_kind_is_setter(ref_kind));
+ return Handle(THREAD, mname2);
}
default:
THROW_MSG_(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format", empty);
@@ -785,7 +743,6 @@ void MethodHandles::expand_MemberName(Handle mname, int suppress, TRAPS) {
}
case IS_FIELD:
{
- // This is taken from LinkResolver::resolve_field, sans access checks.
assert(vmtarget->is_klass(), "field vmtarget is Klass*");
if (!((Klass*) vmtarget)->oop_is_instance()) break;
instanceKlassHandle defc(THREAD, (Klass*) vmtarget);
@@ -864,11 +821,7 @@ int MethodHandles::find_MemberNames(KlassHandle k,
Handle result(thread, results->obj_at(rfill++));
if (!java_lang_invoke_MemberName::is_instance(result()))
return -99; // caller bug!
- oop type = field_signature_type_or_null(st.signature());
- oop name = field_name_or_null(st.name());
- oop saved = MethodHandles::init_field_MemberName(result, st.klass(),
- st.access_flags(), type, name,
- st.offset());
+ oop saved = MethodHandles::init_field_MemberName(result, st.field_descriptor());
if (saved != result())
results->obj_at_put(rfill-1, saved); // show saved instance to user
} else if (++overflow >= overflow_limit) {
@@ -918,7 +871,8 @@ int MethodHandles::find_MemberNames(KlassHandle k,
Handle result(thread, results->obj_at(rfill++));
if (!java_lang_invoke_MemberName::is_instance(result()))
return -99; // caller bug!
- oop saved = MethodHandles::init_method_MemberName(result, m, true, NULL);
+ CallInfo info(m);
+ oop saved = MethodHandles::init_method_MemberName(result, info);
if (saved != result())
results->obj_at_put(rfill-1, saved); // show saved instance to user
} else if (++overflow >= overflow_limit) {
@@ -935,7 +889,8 @@ int MethodHandles::find_MemberNames(KlassHandle k,
// MemberNameTable
//
-MemberNameTable::MemberNameTable() : GrowableArray<jweak>(10, true) {
+MemberNameTable::MemberNameTable(int methods_cnt)
+ : GrowableArray<jweak>(methods_cnt, true) {
assert_locked_or_safepoint(MemberNameTable_lock);
}
@@ -949,29 +904,18 @@ MemberNameTable::~MemberNameTable() {
}
}
-// Return entry index if found, return -1 otherwise.
-int MemberNameTable::find_member_name(oop mem_name) {
+void MemberNameTable::add_member_name(int index, jweak mem_name_wref) {
assert_locked_or_safepoint(MemberNameTable_lock);
- int len = this->length();
-
- for (int idx = 0; idx < len; idx++) {
- jweak ref = this->at(idx);
- oop entry = JNIHandles::resolve(ref);
- if (entry == mem_name) {
- return idx;
- }
- }
- return -1;
+ this->at_put_grow(index, mem_name_wref);
}
-void MemberNameTable::add_member_name(jweak mem_name_wref) {
+// Return a member name oop or NULL.
+oop MemberNameTable::get_member_name(int index) {
assert_locked_or_safepoint(MemberNameTable_lock);
- oop mem_name = JNIHandles::resolve(mem_name_wref);
- // Each member name may appear just once: add only if not found
- if (find_member_name(mem_name) == -1) {
- this->append(mem_name_wref);
- }
+ jweak ref = this->at(index);
+ oop mem_name = JNIHandles::resolve(ref);
+ return mem_name;
}
#if INCLUDE_JVMTI
@@ -1139,7 +1083,12 @@ JVM_ENTRY(jobject, MHN_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh,
if (VerifyMethodHandles && caller_jh != NULL &&
java_lang_invoke_MemberName::clazz(mname()) != NULL) {
Klass* reference_klass = java_lang_Class::as_Klass(java_lang_invoke_MemberName::clazz(mname()));
- if (reference_klass != NULL) {
+ if (reference_klass != NULL && reference_klass->oop_is_objArray()) {
+ reference_klass = ObjArrayKlass::cast(reference_klass)->bottom_klass();
+ }
+
+ // Reflection::verify_class_access can only handle instance classes.
+ if (reference_klass != NULL && reference_klass->oop_is_instance()) {
// Emulate LinkResolver::check_klass_accessability.
Klass* caller = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(caller_jh));
if (!Reflection::verify_class_access(caller,
@@ -1224,7 +1173,8 @@ JVM_ENTRY(jobject, MHN_getMemberVMInfo(JNIEnv *env, jobject igcls, jobject mname
x = ((Klass*) vmtarget)->java_mirror();
} else if (vmtarget->is_method()) {
Handle mname2 = MethodHandles::new_MemberName(CHECK_NULL);
- x = MethodHandles::init_method_MemberName(mname2, (Method*)vmtarget, false, NULL);
+ CallInfo info((Method*)vmtarget);
+ x = MethodHandles::init_method_MemberName(mname2, info);
}
result->obj_at_put(1, x);
return JNIHandles::make_local(env, result());
@@ -1298,6 +1248,28 @@ JVM_ENTRY(void, MHN_setCallSiteTargetVolatile(JNIEnv* env, jobject igcls, jobjec
}
JVM_END
+/**
+ * Throws a java/lang/UnsupportedOperationException unconditionally.
+ * This is required by the specification of MethodHandle.invoke if
+ * invoked directly.
+ */
+JVM_ENTRY(jobject, MH_invoke_UOE(JNIEnv* env, jobject mh, jobjectArray args)) {
+ THROW_MSG_NULL(vmSymbols::java_lang_UnsupportedOperationException(), "MethodHandle.invoke cannot be invoked reflectively");
+ return NULL;
+}
+JVM_END
+
+/**
+ * Throws a java/lang/UnsupportedOperationException unconditionally.
+ * This is required by the specification of MethodHandle.invokeExact if
+ * invoked directly.
+ */
+JVM_ENTRY(jobject, MH_invokeExact_UOE(JNIEnv* env, jobject mh, jobjectArray args)) {
+ THROW_MSG_NULL(vmSymbols::java_lang_UnsupportedOperationException(), "MethodHandle.invokeExact cannot be invoked reflectively");
+ return NULL;
+}
+JVM_END
+
/// JVM_RegisterMethodHandleMethods
#undef CS // Solaris builds complain
@@ -1317,7 +1289,7 @@ JVM_END
#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
// These are the native methods on java.lang.invoke.MethodHandleNatives.
-static JNINativeMethod required_methods_JDK8[] = {
+static JNINativeMethod MHN_methods[] = {
{CC"init", CC"("MEM""OBJ")V", FN_PTR(MHN_init_Mem)},
{CC"expand", CC"("MEM")V", FN_PTR(MHN_expand_Mem)},
{CC"resolve", CC"("MEM""CLS")"MEM, FN_PTR(MHN_resolve_Mem)},
@@ -1335,8 +1307,28 @@ static JNINativeMethod required_methods_JDK8[] = {
{CC"getMemberVMInfo", CC"("MEM")"OBJ, FN_PTR(MHN_getMemberVMInfo)}
};
-// This one function is exported, used by NativeLookup.
+static JNINativeMethod MH_methods[] = {
+ // UnsupportedOperationException throwers
+ {CC"invoke", CC"(["OBJ")"OBJ, FN_PTR(MH_invoke_UOE)},
+ {CC"invokeExact", CC"(["OBJ")"OBJ, FN_PTR(MH_invokeExact_UOE)}
+};
+/**
+ * Helper method to register native methods.
+ */
+static bool register_natives(JNIEnv* env, jclass clazz, const JNINativeMethod* methods, jint nMethods) {
+ int status = env->RegisterNatives(clazz, methods, nMethods);
+ if (status != JNI_OK || env->ExceptionOccurred()) {
+ warning("JSR 292 method handle code is mismatched to this JVM. Disabling support.");
+ env->ExceptionClear();
+ return false;
+ }
+ return true;
+}
+
+/**
+ * This one function is exported, used by NativeLookup.
+ */
JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class)) {
if (!EnableInvokeDynamic) {
warning("JSR 292 is disabled in this JVM. Use -XX:+UnlockDiagnosticVMOptions -XX:+EnableInvokeDynamic to enable.");
@@ -1354,16 +1346,14 @@ JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class))
MH_class = (jclass) JNIHandles::make_local(env, mirror);
}
- int status;
-
if (enable_MH) {
ThreadToNativeFromVM ttnfv(thread);
- status = env->RegisterNatives(MHN_class, required_methods_JDK8, sizeof(required_methods_JDK8)/sizeof(JNINativeMethod));
- if (status != JNI_OK || env->ExceptionOccurred()) {
- warning("JSR 292 method handle code is mismatched to this JVM. Disabling support.");
- enable_MH = false;
- env->ExceptionClear();
+ if (enable_MH) {
+ enable_MH = register_natives(env, MHN_class, MHN_methods, sizeof(MHN_methods)/sizeof(JNINativeMethod));
+ }
+ if (enable_MH) {
+ enable_MH = register_natives(env, MH_class, MH_methods, sizeof(MH_methods)/sizeof(JNINativeMethod));
}
}
diff --git a/src/share/vm/prims/methodHandles.hpp b/src/share/vm/prims/methodHandles.hpp
index a687f53e5..49a02ae49 100644
--- a/src/share/vm/prims/methodHandles.hpp
+++ b/src/share/vm/prims/methodHandles.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,19 +49,18 @@ class MethodHandles: AllStatic {
// Adapters.
static MethodHandlesAdapterBlob* _adapter_code;
+ // utility functions for reifying names and types
+ static oop field_name_or_null(Symbol* s);
+ static oop field_signature_type_or_null(Symbol* s);
+
public:
// working with member names
static Handle resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type
static void expand_MemberName(Handle mname, int suppress, TRAPS); // expand defc/name/type if missing
static Handle new_MemberName(TRAPS); // must be followed by init_MemberName
static oop init_MemberName(Handle mname_h, Handle target_h); // compute vmtarget/vmindex from target
- static oop init_method_MemberName(Handle mname_h, Method* m, bool do_dispatch,
- KlassHandle receiver_limit_h);
- static oop init_field_MemberName(Handle mname_h, KlassHandle field_holder_h,
- AccessFlags mods, oop type, oop name,
- intptr_t offset, bool is_setter = false);
- static Handle init_method_MemberName(Handle mname_h, CallInfo& info, TRAPS);
- static Handle init_field_MemberName(Handle mname_h, FieldAccessInfo& info, TRAPS);
+ static oop init_field_MemberName(Handle mname_h, fieldDescriptor& fd, bool is_setter = false);
+ static oop init_method_MemberName(Handle mname_h, CallInfo& info);
static int method_ref_kind(Method* m, bool do_dispatch_if_possible = true);
static int find_MemberNames(KlassHandle k, Symbol* name, Symbol* sig,
int mflags, KlassHandle caller,
@@ -222,7 +221,6 @@ public:
}
};
-
//------------------------------------------------------------------------------
// MethodHandlesAdapterGenerator
//
@@ -236,13 +234,13 @@ public:
//------------------------------------------------------------------------------
// MemberNameTable
//
+
class MemberNameTable : public GrowableArray<jweak> {
public:
- MemberNameTable();
+ MemberNameTable(int methods_cnt);
~MemberNameTable();
- void add_member_name(jweak mem_name_ref);
- private:
- int find_member_name(oop mem_name);
+ void add_member_name(int index, jweak mem_name_ref);
+ oop get_member_name(int index);
#if INCLUDE_JVMTI
public:
diff --git a/src/share/vm/prims/nativeLookup.cpp b/src/share/vm/prims/nativeLookup.cpp
index 6162ae850..990600eea 100644
--- a/src/share/vm/prims/nativeLookup.cpp
+++ b/src/share/vm/prims/nativeLookup.cpp
@@ -383,10 +383,7 @@ address NativeLookup::lookup_base(methodHandle method, bool& in_base_library, TR
address NativeLookup::lookup(methodHandle method, bool& in_base_library, TRAPS) {
if (!method->has_native_function()) {
- address entry =
- method->intrinsic_id() == vmIntrinsics::_invokeGeneric ?
- SharedRuntime::native_method_throw_unsupported_operation_exception_entry() :
- lookup_base(method, in_base_library, CHECK_NULL);
+ address entry = lookup_base(method, in_base_library, CHECK_NULL);
method->set_native_function(entry,
Method::native_bind_event_is_interesting);
// -verbose:jni printing
diff --git a/src/share/vm/prims/unsafe.cpp b/src/share/vm/prims/unsafe.cpp
index c51156134..e37b0c80d 100644
--- a/src/share/vm/prims/unsafe.cpp
+++ b/src/share/vm/prims/unsafe.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,7 @@
#include "runtime/reflection.hpp"
#include "runtime/synchronizer.hpp"
#include "services/threadService.hpp"
+#include "trace/tracing.hpp"
#include "utilities/copy.hpp"
#include "utilities/dtrace.hpp"
@@ -115,8 +116,6 @@ inline jint invocation_key_to_method_slot(jint key) {
inline void* index_oop_from_field_offset_long(oop p, jlong field_offset) {
jlong byte_offset = field_offset_to_byte_offset(field_offset);
- // Don't allow unsafe to be used to read or write the header word of oops
- assert(p == NULL || field_offset >= oopDesc::header_size(), "offset must be outside of header");
#ifdef ASSERT
if (p != NULL) {
assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset");
@@ -1206,6 +1205,7 @@ UNSAFE_END
UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time))
UnsafeWrapper("Unsafe_Park");
+ EventThreadPark event;
#ifndef USDT2
HS_DTRACE_PROBE3(hotspot, thread__park__begin, thread->parker(), (int) isAbsolute, time);
#else /* USDT2 */
@@ -1220,6 +1220,13 @@ UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute,
HOTSPOT_THREAD_PARK_END(
(uintptr_t) thread->parker());
#endif /* USDT2 */
+ if (event.should_commit()) {
+ oop obj = thread->current_park_blocker();
+ event.set_klass(obj ? obj->klass() : NULL);
+ event.set_timeout(time);
+ event.set_address(obj ? (TYPE_ADDRESS) (uintptr_t) obj : 0);
+ event.commit();
+ }
UNSAFE_END
UNSAFE_ENTRY(void, Unsafe_Unpark(JNIEnv *env, jobject unsafe, jobject jthread))
diff --git a/src/share/vm/prims/whitebox.cpp b/src/share/vm/prims/whitebox.cpp
index 7742c0361..6f6a2000a 100644
--- a/src/share/vm/prims/whitebox.cpp
+++ b/src/share/vm/prims/whitebox.cpp
@@ -33,10 +33,12 @@
#include "prims/whitebox.hpp"
#include "prims/wbtestmethods/parserTests.hpp"
+#include "runtime/arguments.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
+#include "utilities/exceptions.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/concurrentMark.hpp"
@@ -93,6 +95,11 @@ WB_ENTRY(jboolean, WB_IsClassAlive(JNIEnv* env, jobject target, jstring name))
return closure.found();
WB_END
+WB_ENTRY(jlong, WB_GetCompressedOopsMaxHeapSize(JNIEnv* env, jobject o)) {
+ return (jlong)Arguments::max_heap_for_compressed_oops();
+}
+WB_END
+
WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
CollectorPolicy * p = Universe::heap()->collector_policy();
gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap "
@@ -127,7 +134,7 @@ WB_ENTRY(jint, WB_G1RegionSize(JNIEnv* env, jobject o))
WB_END
#endif // INCLUDE_ALL_GCS
-#ifdef INCLUDE_NMT
+#if INCLUDE_NMT
// Alloc memory using the test memory type so that we can use that to see if
// NMT picks it up correctly
WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size))
@@ -158,7 +165,7 @@ WB_END
WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
- os::commit_memory((char *)(uintptr_t)addr, size);
+ os::commit_memory((char *)(uintptr_t)addr, size, !ExecMem);
MemTracker::record_virtual_memory_type((address)(uintptr_t)addr, mtTest);
WB_END
@@ -180,6 +187,10 @@ WB_ENTRY(jboolean, WB_NMTWaitForDataMerge(JNIEnv* env))
return MemTracker::wbtest_wait_for_data_merge();
WB_END
+WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
+ return MemTracker::tracking_level() == MemTracker::NMT_detail;
+WB_END
+
#endif // INCLUDE_NMT
static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
@@ -195,12 +206,22 @@ WB_ENTRY(void, WB_DeoptimizeAll(JNIEnv* env, jobject o))
VMThread::execute(&op);
WB_END
-WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method))
+WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
MutexLockerEx mu(Compile_lock);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
int result = 0;
- nmethod* code = mh->code();
+ nmethod* code;
+ if (is_osr) {
+ int bci = InvocationEntryBci;
+ while ((code = mh->lookup_osr_nmethod_for(bci, CompLevel_none, false)) != NULL) {
+ code->mark_for_deoptimization();
+ ++result;
+ bci = code->osr_entry_bci() + 1;
+ }
+ } else {
+ code = mh->code();
+ }
if (code != NULL) {
code->mark_for_deoptimization();
++result;
@@ -213,22 +234,26 @@ WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method))
return result;
WB_END
-WB_ENTRY(jboolean, WB_IsMethodCompiled(JNIEnv* env, jobject o, jobject method))
+WB_ENTRY(jboolean, WB_IsMethodCompiled(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
MutexLockerEx mu(Compile_lock);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
- nmethod* code = mh->code();
+ nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
if (code == NULL) {
return JNI_FALSE;
}
return (code->is_alive() && !code->is_marked_for_deoptimization());
WB_END
-WB_ENTRY(jboolean, WB_IsMethodCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level))
+WB_ENTRY(jboolean, WB_IsMethodCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level, jboolean is_osr))
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
MutexLockerEx mu(Compile_lock);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
- return CompilationPolicy::can_be_compiled(mh, comp_level);
+ if (is_osr) {
+ return CompilationPolicy::can_be_osr_compiled(mh, comp_level);
+ } else {
+ return CompilationPolicy::can_be_compiled(mh, comp_level);
+ }
WB_END
WB_ENTRY(jboolean, WB_IsMethodQueuedForCompilation(JNIEnv* env, jobject o, jobject method))
@@ -238,18 +263,28 @@ WB_ENTRY(jboolean, WB_IsMethodQueuedForCompilation(JNIEnv* env, jobject o, jobje
return mh->queued_for_compilation();
WB_END
-WB_ENTRY(jint, WB_GetMethodCompilationLevel(JNIEnv* env, jobject o, jobject method))
+WB_ENTRY(jint, WB_GetMethodCompilationLevel(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
- nmethod* code = mh->code();
+ nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
return (code != NULL ? code->comp_level() : CompLevel_none);
WB_END
+WB_ENTRY(void, WB_MakeMethodNotCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level, jboolean is_osr))
+ jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+ methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
+ if (is_osr) {
+ mh->set_not_osr_compilable(comp_level, true /* report */, "WhiteBox");
+ } else {
+ mh->set_not_compilable(comp_level, true /* report */, "WhiteBox");
+ }
+WB_END
-WB_ENTRY(void, WB_MakeMethodNotCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level))
+WB_ENTRY(jint, WB_GetMethodEntryBci(JNIEnv* env, jobject o, jobject method))
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
- mh->set_not_compilable(comp_level, true /* report */, "WhiteBox");
+ nmethod* code = mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false);
+ return (code != NULL && code->is_osr_method() ? code->osr_entry_bci() : InvocationEntryBci);
WB_END
WB_ENTRY(jboolean, WB_TestSetDontInlineMethod(JNIEnv* env, jobject o, jobject method, jboolean value))
@@ -260,12 +295,15 @@ WB_ENTRY(jboolean, WB_TestSetDontInlineMethod(JNIEnv* env, jobject o, jobject me
return result;
WB_END
-WB_ENTRY(jint, WB_GetCompileQueuesSize(JNIEnv* env, jobject o))
- return CompileBroker::queue_size(CompLevel_full_optimization) /* C2 */ +
- CompileBroker::queue_size(CompLevel_full_profile) /* C1 */;
+WB_ENTRY(jint, WB_GetCompileQueueSize(JNIEnv* env, jobject o, jint comp_level))
+ if (comp_level == CompLevel_any) {
+ return CompileBroker::queue_size(CompLevel_full_optimization) /* C2 */ +
+ CompileBroker::queue_size(CompLevel_full_profile) /* C1 */;
+ } else {
+ return CompileBroker::queue_size(comp_level);
+ }
WB_END
-
WB_ENTRY(jboolean, WB_TestSetForceInlineMethod(JNIEnv* env, jobject o, jobject method, jboolean value))
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
@@ -274,10 +312,10 @@ WB_ENTRY(jboolean, WB_TestSetForceInlineMethod(JNIEnv* env, jobject o, jobject m
return result;
WB_END
-WB_ENTRY(jboolean, WB_EnqueueMethodForCompilation(JNIEnv* env, jobject o, jobject method, jint comp_level))
+WB_ENTRY(jboolean, WB_EnqueueMethodForCompilation(JNIEnv* env, jobject o, jobject method, jint comp_level, jint bci))
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
- nmethod* nm = CompileBroker::compile_method(mh, InvocationEntryBci, comp_level, mh, mh->invocation_count(), "WhiteBox", THREAD);
+ nmethod* nm = CompileBroker::compile_method(mh, bci, comp_level, mh, mh->invocation_count(), "WhiteBox", THREAD);
MutexLockerEx mu(Compile_lock);
return (mh->queued_for_compilation() || nm != NULL);
WB_END
@@ -323,15 +361,24 @@ WB_ENTRY(jboolean, WB_IsInStringTable(JNIEnv* env, jobject o, jstring javaString
return (StringTable::lookup(name, len) != NULL);
WB_END
-
WB_ENTRY(void, WB_FullGC(JNIEnv* env, jobject o))
Universe::heap()->collector_policy()->set_should_clear_all_soft_refs(true);
Universe::heap()->collect(GCCause::_last_ditch_collection);
WB_END
-WB_ENTRY(jlong, WB_ReserveMemory(JNIEnv* env, jobject o, jlong size))
- return (jlong)os::reserve_memory(size, NULL, 0);
+WB_ENTRY(void, WB_ReadReservedMemory(JNIEnv* env, jobject o))
+ // static+volatile in order to force the read to happen
+ // (not be eliminated by the compiler)
+ static char c;
+ static volatile char* p;
+
+ p = os::reserve_memory(os::vm_allocation_granularity(), NULL, 0);
+ if (p == NULL) {
+ THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(), "Failed to reserve memory");
+ }
+
+ c = *p;
WB_END
//Some convenience methods to deal with objects from java
@@ -395,6 +442,8 @@ static JNINativeMethod methods[] = {
CC"(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
(void*) &WB_ParseCommandLine
},
+ {CC"getCompressedOopsMaxHeapSize", CC"()J",
+ (void*)&WB_GetCompressedOopsMaxHeapSize},
{CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes },
#if INCLUDE_ALL_GCS
{CC"g1InConcurrentMark", CC"()Z", (void*)&WB_G1InConcurrentMark},
@@ -402,7 +451,7 @@ static JNINativeMethod methods[] = {
{CC"g1NumFreeRegions", CC"()J", (void*)&WB_G1NumFreeRegions },
{CC"g1RegionSize", CC"()I", (void*)&WB_G1RegionSize },
#endif // INCLUDE_ALL_GCS
-#ifdef INCLUDE_NMT
+#if INCLUDE_NMT
{CC"NMTMalloc", CC"(J)J", (void*)&WB_NMTMalloc },
{CC"NMTFree", CC"(J)V", (void*)&WB_NMTFree },
{CC"NMTReserveMemory", CC"(J)J", (void*)&WB_NMTReserveMemory },
@@ -410,34 +459,36 @@ static JNINativeMethod methods[] = {
{CC"NMTUncommitMemory", CC"(JJ)V", (void*)&WB_NMTUncommitMemory },
{CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory },
{CC"NMTWaitForDataMerge", CC"()Z", (void*)&WB_NMTWaitForDataMerge},
+ {CC"NMTIsDetailSupported",CC"()Z", (void*)&WB_NMTIsDetailSupported},
#endif // INCLUDE_NMT
{CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll },
- {CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Executable;)I",
+ {CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Executable;Z)I",
(void*)&WB_DeoptimizeMethod },
- {CC"isMethodCompiled", CC"(Ljava/lang/reflect/Executable;)Z",
+ {CC"isMethodCompiled", CC"(Ljava/lang/reflect/Executable;Z)Z",
(void*)&WB_IsMethodCompiled },
- {CC"isMethodCompilable", CC"(Ljava/lang/reflect/Executable;I)Z",
+ {CC"isMethodCompilable", CC"(Ljava/lang/reflect/Executable;IZ)Z",
(void*)&WB_IsMethodCompilable},
{CC"isMethodQueuedForCompilation",
CC"(Ljava/lang/reflect/Executable;)Z", (void*)&WB_IsMethodQueuedForCompilation},
{CC"makeMethodNotCompilable",
- CC"(Ljava/lang/reflect/Executable;I)V", (void*)&WB_MakeMethodNotCompilable},
+ CC"(Ljava/lang/reflect/Executable;IZ)V", (void*)&WB_MakeMethodNotCompilable},
{CC"testSetDontInlineMethod",
CC"(Ljava/lang/reflect/Executable;Z)Z", (void*)&WB_TestSetDontInlineMethod},
{CC"getMethodCompilationLevel",
- CC"(Ljava/lang/reflect/Executable;)I", (void*)&WB_GetMethodCompilationLevel},
- {CC"getCompileQueuesSize",
- CC"()I", (void*)&WB_GetCompileQueuesSize},
+ CC"(Ljava/lang/reflect/Executable;Z)I", (void*)&WB_GetMethodCompilationLevel},
+ {CC"getMethodEntryBci",
+ CC"(Ljava/lang/reflect/Executable;)I", (void*)&WB_GetMethodEntryBci},
+ {CC"getCompileQueueSize",
+ CC"(I)I", (void*)&WB_GetCompileQueueSize},
{CC"testSetForceInlineMethod",
CC"(Ljava/lang/reflect/Executable;Z)Z", (void*)&WB_TestSetForceInlineMethod},
{CC"enqueueMethodForCompilation",
- CC"(Ljava/lang/reflect/Executable;I)Z", (void*)&WB_EnqueueMethodForCompilation},
+ CC"(Ljava/lang/reflect/Executable;II)Z", (void*)&WB_EnqueueMethodForCompilation},
{CC"clearMethodState",
CC"(Ljava/lang/reflect/Executable;)V", (void*)&WB_ClearMethodState},
{CC"isInStringTable", CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable },
{CC"fullGC", CC"()V", (void*)&WB_FullGC },
-
- {CC"reserveMemory", CC"(J)J", (void*)&WB_ReserveMemory },
+ {CC"readReservedMemory", CC"()V", (void*)&WB_ReadReservedMemory },
};
#undef CC
diff --git a/src/share/vm/runtime/advancedThresholdPolicy.cpp b/src/share/vm/runtime/advancedThresholdPolicy.cpp
index e7558fda3..10cb8d3cc 100644
--- a/src/share/vm/runtime/advancedThresholdPolicy.cpp
+++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp
@@ -68,7 +68,7 @@ void AdvancedThresholdPolicy::initialize() {
}
#endif
-
+ set_increase_threshold_at_ratio();
set_start_time(os::javaTimeMillis());
}
@@ -205,6 +205,17 @@ double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k)
double queue_size = CompileBroker::queue_size(level);
int comp_count = compiler_count(level);
double k = queue_size / (feedback_k * comp_count) + 1;
+
+ // Increase C1 compile threshold when the code cache is filled more
+ // than specified by IncreaseFirstTierCompileThresholdAt percentage.
+ // The main intention is to keep enough free space for C2 compiled code
+ // to achieve peak performance if the code cache is under stress.
+ if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) {
+ double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
+ if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
+ k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
+ }
+ }
return k;
}
diff --git a/src/share/vm/runtime/advancedThresholdPolicy.hpp b/src/share/vm/runtime/advancedThresholdPolicy.hpp
index 90a1ca174..4ab765358 100644
--- a/src/share/vm/runtime/advancedThresholdPolicy.hpp
+++ b/src/share/vm/runtime/advancedThresholdPolicy.hpp
@@ -201,9 +201,12 @@ class AdvancedThresholdPolicy : public SimpleThresholdPolicy {
// Is method profiled enough?
bool is_method_profiled(Method* method);
+ double _increase_threshold_at_ratio;
+
protected:
void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level);
+ void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); }
void set_start_time(jlong t) { _start_time = t; }
jlong start_time() const { return _start_time; }
diff --git a/src/share/vm/runtime/aprofiler.cpp b/src/share/vm/runtime/aprofiler.cpp
deleted file mode 100644
index e71bfb587..000000000
--- a/src/share/vm/runtime/aprofiler.cpp
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "gc_interface/collectedHeap.inline.hpp"
-#include "memory/resourceArea.hpp"
-#include "memory/space.hpp"
-#include "oops/oop.inline.hpp"
-#include "oops/oop.inline2.hpp"
-#include "runtime/aprofiler.hpp"
-
-
-bool AllocationProfiler::_active = false;
-GrowableArray<Klass*>* AllocationProfiler::_print_array = NULL;
-
-
-class AllocProfClosure : public ObjectClosure {
- public:
- void do_object(oop obj) {
- Klass* k = obj->klass();
- k->set_alloc_count(k->alloc_count() + 1);
- k->set_alloc_size(k->alloc_size() + obj->size());
- }
-};
-
-
-void AllocationProfiler::iterate_since_last_gc() {
- if (is_active()) {
- AllocProfClosure blk;
- GenCollectedHeap* heap = GenCollectedHeap::heap();
- heap->object_iterate_since_last_GC(&blk);
- }
-}
-
-
-void AllocationProfiler::engage() {
- _active = true;
-}
-
-
-void AllocationProfiler::disengage() {
- _active = false;
-}
-
-
-void AllocationProfiler::add_class_to_array(Klass* k) {
- _print_array->append(k);
-}
-
-
-void AllocationProfiler::add_classes_to_array(Klass* k) {
- // Iterate over klass and all array klasses for klass
- k->with_array_klasses_do(&AllocationProfiler::add_class_to_array);
-}
-
-
-int AllocationProfiler::compare_classes(Klass** k1, Klass** k2) {
- // Sort by total allocation size
- return (*k2)->alloc_size() - (*k1)->alloc_size();
-}
-
-
-int AllocationProfiler::average(size_t alloc_size, int alloc_count) {
- return (int) ((double) (alloc_size * BytesPerWord) / MAX2(alloc_count, 1) + 0.5);
-}
-
-
-void AllocationProfiler::sort_and_print_array(size_t cutoff) {
- _print_array->sort(&AllocationProfiler::compare_classes);
- tty->print_cr("________________Size"
- "__Instances"
- "__Average"
- "__Class________________");
- size_t total_alloc_size = 0;
- int total_alloc_count = 0;
- for (int index = 0; index < _print_array->length(); index++) {
- Klass* k = _print_array->at(index);
- size_t alloc_size = k->alloc_size();
- if (alloc_size > cutoff) {
- int alloc_count = k->alloc_count();
-#ifdef PRODUCT
- const char* name = k->external_name();
-#else
- const char* name = k->internal_name();
-#endif
- tty->print_cr("%20u %10u %8u %s",
- alloc_size * BytesPerWord,
- alloc_count,
- average(alloc_size, alloc_count),
- name);
- total_alloc_size += alloc_size;
- total_alloc_count += alloc_count;
- }
- k->set_alloc_count(0);
- k->set_alloc_size(0);
- }
- tty->print_cr("%20u %10u %8u --total--",
- total_alloc_size * BytesPerWord,
- total_alloc_count,
- average(total_alloc_size, total_alloc_count));
- tty->cr();
-}
-
-
-void AllocationProfiler::print(size_t cutoff) {
- ResourceMark rm;
- assert(!is_active(), "AllocationProfiler cannot be active while printing profile");
-
- tty->cr();
- tty->print_cr("Allocation profile (sizes in bytes, cutoff = " SIZE_FORMAT " bytes):", cutoff * BytesPerWord);
- tty->cr();
-
- // Print regular instance klasses and basic type array klasses
- _print_array = new GrowableArray<Klass*>(SystemDictionary::number_of_classes()*2);
- SystemDictionary::classes_do(&add_classes_to_array);
- Universe::basic_type_classes_do(&add_classes_to_array);
- sort_and_print_array(cutoff);
-
- // This used to print metadata in the permgen but since there isn't a permgen
- // anymore, it is not yet implemented.
-}
diff --git a/src/share/vm/runtime/aprofiler.hpp b/src/share/vm/runtime/aprofiler.hpp
deleted file mode 100644
index ba4dd7aa5..000000000
--- a/src/share/vm/runtime/aprofiler.hpp
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_RUNTIME_APROFILER_HPP
-#define SHARE_VM_RUNTIME_APROFILER_HPP
-
-#include "memory/allocation.hpp"
-#include "memory/universe.hpp"
-#include "oops/klass.hpp"
-#include "utilities/top.hpp"
-
-// A simple allocation profiler for Java. The profiler collects and prints
-// the number and total size of instances allocated per class, including
-// array classes.
-//
-// The profiler is currently global for all threads. It can be changed to a
-// per threads profiler by keeping a more elaborate data structure and calling
-// iterate_since_last_scavenge at thread switches.
-
-
-class AllocationProfiler: AllStatic {
- friend class GenCollectedHeap;
- friend class G1CollectedHeap;
- friend class MarkSweep;
- private:
- static bool _active; // tells whether profiler is active
- static GrowableArray<Klass*>* _print_array; // temporary array for printing
-
- // Utility printing functions
- static void add_class_to_array(Klass* k);
- static void add_classes_to_array(Klass* k);
- static int compare_classes(Klass** k1, Klass** k2);
- static int average(size_t alloc_size, int alloc_count);
- static void sort_and_print_array(size_t cutoff);
-
- // Call for collecting allocation information. Called at scavenge, mark-sweep and disengage.
- static void iterate_since_last_gc();
-
- public:
- // Start profiler
- static void engage();
- // Stop profiler
- static void disengage();
- // Tells whether profiler is active
- static bool is_active() { return _active; }
- // Print profile
- static void print(size_t cutoff); // Cutoff in total allocation size (in words)
-};
-
-#endif // SHARE_VM_RUNTIME_APROFILER_HPP
diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp
index bf149fcd0..10aaededf 100644
--- a/src/share/vm/runtime/arguments.cpp
+++ b/src/share/vm/runtime/arguments.cpp
@@ -28,6 +28,7 @@
#include "compiler/compilerOracle.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/cardTableRS.hpp"
+#include "memory/genCollectedHeap.hpp"
#include "memory/referenceProcessor.hpp"
#include "memory/universe.inline.hpp"
#include "oops/oop.inline.hpp"
@@ -54,12 +55,36 @@
#endif
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
#endif // INCLUDE_ALL_GCS
// Note: This is a special bug reporting site for the JVM
#define DEFAULT_VENDOR_URL_BUG "http://bugreport.sun.com/bugreport/crash.jsp"
#define DEFAULT_JAVA_LAUNCHER "generic"
+// Disable options not supported in this release, with a warning if they
+// were explicitly requested on the command-line
+#define UNSUPPORTED_OPTION(opt, description) \
+do { \
+ if (opt) { \
+ if (FLAG_IS_CMDLINE(opt)) { \
+ warning(description " is disabled in this release."); \
+ } \
+ FLAG_SET_DEFAULT(opt, false); \
+ } \
+} while(0)
+
+#define UNSUPPORTED_GC_OPTION(gc) \
+do { \
+ if (gc) { \
+ if (FLAG_IS_CMDLINE(gc)) { \
+ warning(#gc " is not supported in this VM. Using Serial GC."); \
+ } \
+ FLAG_SET_DEFAULT(gc, false); \
+ } \
+} while(0)
+
char** Arguments::_jvm_flags_array = NULL;
int Arguments::_num_jvm_flags = 0;
char** Arguments::_jvm_args_array = NULL;
@@ -68,7 +93,7 @@ char* Arguments::_java_command = NULL;
SystemProperty* Arguments::_system_properties = NULL;
const char* Arguments::_gc_log_filename = NULL;
bool Arguments::_has_profile = false;
-bool Arguments::_has_alloc_profile = false;
+size_t Arguments::_conservative_max_heap_alignment = 0;
uintx Arguments::_min_heap_size = 0;
Arguments::Mode Arguments::_mode = _mixed;
bool Arguments::_java_compiler = false;
@@ -261,6 +286,10 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
{ "PrintRevisitStats", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "UseVectoredExceptions", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "UseSplitVerifier", JDK_Version::jdk(8), JDK_Version::jdk(9) },
+ { "UseISM", JDK_Version::jdk(8), JDK_Version::jdk(9) },
+ { "UsePermISM", JDK_Version::jdk(8), JDK_Version::jdk(9) },
+ { "UseMPSS", JDK_Version::jdk(8), JDK_Version::jdk(9) },
+ { "UseStringCache", JDK_Version::jdk(8), JDK_Version::jdk(9) },
#ifdef PRODUCT
{ "DesiredMethodLimit",
JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
@@ -849,7 +878,7 @@ bool Arguments::process_argument(const char* arg,
arg_len = equal_sign - argname;
}
- Flag* found_flag = Flag::find_flag((char*)argname, arg_len, true);
+ Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true);
if (found_flag != NULL) {
char locked_message_buf[BUFLEN];
found_flag->get_locked_message(locked_message_buf, BUFLEN);
@@ -870,6 +899,14 @@ bool Arguments::process_argument(const char* arg,
} else {
jio_fprintf(defaultStream::error_stream(),
"Unrecognized VM option '%s'\n", argname);
+ Flag* fuzzy_matched = Flag::fuzzy_match((const char*)argname, arg_len, true);
+ if (fuzzy_matched != NULL) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Did you mean '%s%s%s'?\n",
+ (fuzzy_matched->is_bool()) ? "(+/-)" : "",
+ fuzzy_matched->name,
+ (fuzzy_matched->is_bool()) ? "" : "=<value>");
+ }
}
// allow for commandline "commenting out" options like -XX:#+Verbose
@@ -1063,6 +1100,7 @@ void Arguments::set_mode_flags(Mode mode) {
}
}
+#if defined(COMPILER2) || defined(_LP64) || !INCLUDE_CDS
// Conflict: required to use shared spaces (-Xshare:on), but
// incompatible command line options were chosen.
@@ -1075,6 +1113,7 @@ static void no_shared_spaces() {
FLAG_SET_DEFAULT(UseSharedSpaces, false);
}
}
+#endif
void Arguments::set_tiered_flags() {
// With tiered, set default policy to AdvancedThresholdPolicy, which is 3.
@@ -1089,6 +1128,10 @@ void Arguments::set_tiered_flags() {
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 5);
}
+ if (!UseInterpreter) { // -Xcomp
+ Tier3InvokeNotifyFreqLog = 0;
+ Tier4InvocationThreshold = 0;
+ }
}
#if INCLUDE_ALL_GCS
@@ -1354,12 +1397,17 @@ bool verify_object_alignment() {
return true;
}
-inline uintx max_heap_for_compressed_oops() {
+uintx Arguments::max_heap_for_compressed_oops() {
// Avoid sign flip.
- if (OopEncodingHeapMax < ClassMetaspaceSize + os::vm_page_size()) {
- return 0;
- }
- LP64_ONLY(return OopEncodingHeapMax - ClassMetaspaceSize - os::vm_page_size());
+ assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
+ // We need to fit both the NULL page and the heap into the memory budget, while
+ // keeping alignment constraints of the heap. To guarantee the latter, as the
+ // NULL page is located before the heap, we pad the NULL page to the conservative
+ // maximum alignment that the GC may ever impose upon the heap.
+ size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(),
+ Arguments::conservative_max_heap_alignment());
+
+ LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
NOT_LP64(ShouldNotReachHere(); return 0);
}
@@ -1404,13 +1452,59 @@ void Arguments::set_use_compressed_oops() {
if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
warning("Max heap size too large for Compressed Oops");
FLAG_SET_DEFAULT(UseCompressedOops, false);
- FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
+ FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
}
}
#endif // _LP64
#endif // ZERO
}
+
+// NOTE: set_use_compressed_klass_ptrs() must be called after calling
+// set_use_compressed_oops().
+void Arguments::set_use_compressed_klass_ptrs() {
+#ifndef ZERO
+#ifdef _LP64
+ // UseCompressedOops must be on for UseCompressedClassPointers to be on.
+ if (!UseCompressedOops) {
+ if (UseCompressedClassPointers) {
+ warning("UseCompressedClassPointers requires UseCompressedOops");
+ }
+ FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
+ } else {
+ // Turn on UseCompressedClassPointers too
+ if (FLAG_IS_DEFAULT(UseCompressedClassPointers)) {
+ FLAG_SET_ERGO(bool, UseCompressedClassPointers, true);
+ }
+ // Check the CompressedClassSpaceSize to make sure we use compressed klass ptrs.
+ if (UseCompressedClassPointers) {
+ if (CompressedClassSpaceSize > KlassEncodingMetaspaceMax) {
+ warning("CompressedClassSpaceSize is too large for UseCompressedClassPointers");
+ FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
+ }
+ }
+ }
+#endif // _LP64
+#endif // !ZERO
+}
+
+void Arguments::set_conservative_max_heap_alignment() {
+ // The conservative maximum required alignment for the heap is the maximum of
+ // the alignments imposed by several sources: any requirements from the heap
+ // itself, the collector policy and the maximum page size we may run the VM
+ // with.
+ size_t heap_alignment = GenCollectedHeap::conservative_max_heap_alignment();
+#if INCLUDE_ALL_GCS
+ if (UseParallelGC) {
+ heap_alignment = ParallelScavengeHeap::conservative_max_heap_alignment();
+ } else if (UseG1GC) {
+ heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
+ }
+#endif // INCLUDE_ALL_GCS
+ _conservative_max_heap_alignment = MAX3(heap_alignment, os::max_page_size(),
+ CollectorPolicy::compute_max_alignment());
+}
+
void Arguments::set_ergonomics_flags() {
if (os::is_server_class_machine()) {
@@ -1428,46 +1522,29 @@ void Arguments::set_ergonomics_flags() {
FLAG_SET_ERGO(bool, UseParallelGC, true);
}
}
- // Shared spaces work fine with other GCs but causes bytecode rewriting
- // to be disabled, which hurts interpreter performance and decreases
- // server performance. On server class machines, keep the default
- // off unless it is asked for. Future work: either add bytecode rewriting
- // at link time, or rewrite bytecodes in non-shared methods.
- if (!DumpSharedSpaces && !RequireSharedSpaces) {
- no_shared_spaces();
- }
}
+#ifdef COMPILER2
+ // Shared spaces work fine with other GCs but causes bytecode rewriting
+ // to be disabled, which hurts interpreter performance and decreases
+ // server performance. When -server is specified, keep the default off
+ // unless it is asked for. Future work: either add bytecode rewriting
+ // at link time, or rewrite bytecodes in non-shared methods.
+ if (!DumpSharedSpaces && !RequireSharedSpaces &&
+ (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
+ no_shared_spaces();
+ }
+#endif
+
+ set_conservative_max_heap_alignment();
#ifndef ZERO
#ifdef _LP64
set_use_compressed_oops();
- // UseCompressedOops must be on for UseCompressedKlassPointers to be on.
- if (!UseCompressedOops) {
- if (UseCompressedKlassPointers) {
- warning("UseCompressedKlassPointers requires UseCompressedOops");
- }
- FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
- } else {
- // Turn on UseCompressedKlassPointers too
- if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) {
- FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true);
- }
- // Set the ClassMetaspaceSize to something that will not need to be
- // expanded, since it cannot be expanded.
- if (UseCompressedKlassPointers) {
- if (ClassMetaspaceSize > KlassEncodingMetaspaceMax) {
- warning("Class metaspace size is too large for UseCompressedKlassPointers");
- FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
- } else if (FLAG_IS_DEFAULT(ClassMetaspaceSize)) {
- // 100,000 classes seems like a good size, so 100M assumes around 1K
- // per klass. The vtable and oopMap is embedded so we don't have a fixed
- // size per klass. Eventually, this will be parameterized because it
- // would also be useful to determine the optimal size of the
- // systemDictionary.
- FLAG_SET_ERGO(uintx, ClassMetaspaceSize, 100*M);
- }
- }
- }
+
+ // set_use_compressed_klass_ptrs() must be called after calling
+ // set_use_compressed_oops().
+ set_use_compressed_klass_ptrs();
+
// Also checks that certain machines are slower with compressed oops
// in vm_version initialization code.
#endif // _LP64
@@ -1669,6 +1746,20 @@ void Arguments::set_bytecode_flags() {
// Aggressive optimization flags -XX:+AggressiveOpts
void Arguments::set_aggressive_opts_flags() {
#ifdef COMPILER2
+ if (AggressiveUnboxing) {
+ if (FLAG_IS_DEFAULT(EliminateAutoBox)) {
+ FLAG_SET_DEFAULT(EliminateAutoBox, true);
+ } else if (!EliminateAutoBox) {
+ // warning("AggressiveUnboxing is disabled because EliminateAutoBox is disabled");
+ AggressiveUnboxing = false;
+ }
+ if (FLAG_IS_DEFAULT(DoEscapeAnalysis)) {
+ FLAG_SET_DEFAULT(DoEscapeAnalysis, true);
+ } else if (!DoEscapeAnalysis) {
+ // warning("AggressiveUnboxing is disabled because DoEscapeAnalysis is disabled");
+ AggressiveUnboxing = false;
+ }
+ }
if (AggressiveOpts || !FLAG_IS_DEFAULT(AutoBoxCacheMax)) {
if (FLAG_IS_DEFAULT(EliminateAutoBox)) {
FLAG_SET_DEFAULT(EliminateAutoBox, true);
@@ -1782,7 +1873,7 @@ void check_gclog_consistency() {
(NumberOfGCLogFiles == 0) ||
(GCLogFileSize == 0)) {
jio_fprintf(defaultStream::output_stream(),
- "To enable GC log rotation, use -Xloggc:<filename> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=<num_of_files> -XX:GCLogFileSize=<num_of_size>\n"
+ "To enable GC log rotation, use -Xloggc:<filename> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=<num_of_files> -XX:GCLogFileSize=<num_of_size>[k|K|m|M|g|G]\n"
"where num_of_file > 0 and num_of_size > 0\n"
"GC log rotation is turned off\n");
UseGCLogFileRotation = false;
@@ -1796,6 +1887,51 @@ void check_gclog_consistency() {
}
}
+// This function is called for -Xloggc:<filename>, it can be used
+// to check if a given file name(or string) conforms to the following
+// specification:
+// A valid string only contains "[A-Z][a-z][0-9].-_%[p|t]"
+// %p and %t only allowed once. We only limit usage of filename not path
+bool is_filename_valid(const char *file_name) {
+ const char* p = file_name;
+ char file_sep = os::file_separator()[0];
+ const char* cp;
+ // skip prefix path
+ for (cp = file_name; *cp != '\0'; cp++) {
+ if (*cp == '/' || *cp == file_sep) {
+ p = cp + 1;
+ }
+ }
+
+ int count_p = 0;
+ int count_t = 0;
+ while (*p != '\0') {
+ if ((*p >= '0' && *p <= '9') ||
+ (*p >= 'A' && *p <= 'Z') ||
+ (*p >= 'a' && *p <= 'z') ||
+ *p == '-' ||
+ *p == '_' ||
+ *p == '.') {
+ p++;
+ continue;
+ }
+ if (*p == '%') {
+ if(*(p + 1) == 'p') {
+ p += 2;
+ count_p ++;
+ continue;
+ }
+ if (*(p + 1) == 't') {
+ p += 2;
+ count_t ++;
+ continue;
+ }
+ }
+ return false;
+ }
+ return count_p < 2 && count_t < 2;
+}
+
// Check consistency of GC selection
bool Arguments::check_gc_consistency() {
check_gclog_consistency();
@@ -1818,8 +1954,13 @@ bool Arguments::check_gc_consistency() {
"please refer to the release notes for the combinations "
"allowed\n");
status = false;
+ } else if (ReservedCodeCacheSize > 2*G) {
+ // Code cache size larger than MAXINT is not supported.
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M,
+ (2*G)/M);
+ status = false;
}
-
return status;
}
@@ -1846,6 +1987,10 @@ void Arguments::check_deprecated_gc_flags() {
warning("Using MaxGCMinorPauseMillis as minor pause goal is deprecated"
"and will likely be removed in future release");
}
+ if (FLAG_IS_CMDLINE(DefaultMaxRAMFraction)) {
+ warning("DefaultMaxRAMFraction is deprecated and will likely be removed in a future release. "
+ "Use MaxRAMFraction instead.");
+ }
}
// Check stack pages settings
@@ -1867,21 +2012,6 @@ bool Arguments::check_vm_args_consistency() {
// Note: Needs platform-dependent factoring.
bool status = true;
-#if ( (defined(COMPILER2) && defined(SPARC)))
- // NOTE: The call to VM_Version_init depends on the fact that VM_Version_init
- // on sparc doesn't require generation of a stub as is the case on, e.g.,
- // x86. Normally, VM_Version_init must be called from init_globals in
- // init.cpp, which is called by the initial java thread *after* arguments
- // have been parsed. VM_Version_init gets called twice on sparc.
- extern void VM_Version_init();
- VM_Version_init();
- if (!VM_Version::has_v9()) {
- jio_fprintf(defaultStream::error_stream(),
- "V8 Machine detected, Server requires V9\n");
- status = false;
- }
-#endif /* COMPILER2 && SPARC */
-
// Allow both -XX:-UseStackBanging and -XX:-UseBoundThreads in non-product
// builds so the cost of stack banging can be measured.
#if (defined(PRODUCT) && defined(SOLARIS))
@@ -1901,7 +2031,7 @@ bool Arguments::check_vm_args_consistency() {
status = false;
}
- status = status && verify_percentage(AdaptiveSizePolicyWeight,
+ status = status && verify_interval(AdaptiveSizePolicyWeight, 0, 100,
"AdaptiveSizePolicyWeight");
status = status && verify_percentage(ThresholdTolerance, "ThresholdTolerance");
status = status && verify_percentage(MinHeapFreeRatio, "MinHeapFreeRatio");
@@ -1961,28 +2091,9 @@ bool Arguments::check_vm_args_consistency() {
FLAG_SET_DEFAULT(UseGCOverheadLimit, false);
}
- status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
-
status = status && check_gc_consistency();
status = status && check_stack_pages();
- if (_has_alloc_profile) {
- if (UseParallelGC || UseParallelOldGC) {
- jio_fprintf(defaultStream::error_stream(),
- "error: invalid argument combination.\n"
- "Allocation profiling (-Xaprof) cannot be used together with "
- "Parallel GC (-XX:+UseParallelGC or -XX:+UseParallelOldGC).\n");
- status = false;
- }
- if (UseConcMarkSweepGC) {
- jio_fprintf(defaultStream::error_stream(),
- "error: invalid argument combination.\n"
- "Allocation profiling (-Xaprof) cannot be used together with "
- "the CMS collector (-XX:+UseConcMarkSweepGC).\n");
- status = false;
- }
- }
-
if (CMSIncrementalMode) {
if (!UseConcMarkSweepGC) {
jio_fprintf(defaultStream::error_stream(),
@@ -2056,6 +2167,52 @@ bool Arguments::check_vm_args_consistency() {
status = status && verify_interval(G1ConcRSLogCacheSize, 0, 31,
"G1ConcRSLogCacheSize");
}
+ if (UseConcMarkSweepGC) {
+ status = status && verify_min_value(CMSOldPLABNumRefills, 1, "CMSOldPLABNumRefills");
+ status = status && verify_min_value(CMSOldPLABToleranceFactor, 1, "CMSOldPLABToleranceFactor");
+ status = status && verify_min_value(CMSOldPLABMax, 1, "CMSOldPLABMax");
+ status = status && verify_interval(CMSOldPLABMin, 1, CMSOldPLABMax, "CMSOldPLABMin");
+
+ status = status && verify_min_value(CMSYoungGenPerWorker, 1, "CMSYoungGenPerWorker");
+
+ status = status && verify_min_value(CMSSamplingGrain, 1, "CMSSamplingGrain");
+ status = status && verify_interval(CMS_SweepWeight, 0, 100, "CMS_SweepWeight");
+ status = status && verify_interval(CMS_FLSWeight, 0, 100, "CMS_FLSWeight");
+
+ status = status && verify_interval(FLSCoalescePolicy, 0, 4, "FLSCoalescePolicy");
+
+ status = status && verify_min_value(CMSRescanMultiple, 1, "CMSRescanMultiple");
+ status = status && verify_min_value(CMSConcMarkMultiple, 1, "CMSConcMarkMultiple");
+
+ status = status && verify_interval(CMSPrecleanIter, 0, 9, "CMSPrecleanIter");
+ status = status && verify_min_value(CMSPrecleanDenominator, 1, "CMSPrecleanDenominator");
+ status = status && verify_interval(CMSPrecleanNumerator, 0, CMSPrecleanDenominator - 1, "CMSPrecleanNumerator");
+
+ status = status && verify_percentage(CMSBootstrapOccupancy, "CMSBootstrapOccupancy");
+
+ status = status && verify_min_value(CMSPrecleanThreshold, 100, "CMSPrecleanThreshold");
+
+ status = status && verify_percentage(CMSScheduleRemarkEdenPenetration, "CMSScheduleRemarkEdenPenetration");
+ status = status && verify_min_value(CMSScheduleRemarkSamplingRatio, 1, "CMSScheduleRemarkSamplingRatio");
+ status = status && verify_min_value(CMSBitMapYieldQuantum, 1, "CMSBitMapYieldQuantum");
+ status = status && verify_percentage(CMSTriggerRatio, "CMSTriggerRatio");
+ status = status && verify_percentage(CMSIsTooFullPercentage, "CMSIsTooFullPercentage");
+ }
+
+ if (UseParallelGC || UseParallelOldGC) {
+ status = status && verify_interval(ParallelOldDeadWoodLimiterMean, 0, 100, "ParallelOldDeadWoodLimiterMean");
+ status = status && verify_interval(ParallelOldDeadWoodLimiterStdDev, 0, 100, "ParallelOldDeadWoodLimiterStdDev");
+
+ status = status && verify_percentage(YoungGenerationSizeIncrement, "YoungGenerationSizeIncrement");
+ status = status && verify_percentage(TenuredGenerationSizeIncrement, "TenuredGenerationSizeIncrement");
+
+ status = status && verify_min_value(YoungGenerationSizeSupplementDecay, 1, "YoungGenerationSizeSupplementDecay");
+ status = status && verify_min_value(TenuredGenerationSizeSupplementDecay, 1, "TenuredGenerationSizeSupplementDecay");
+
+ status = status && verify_min_value(ParGCCardsPerStrideChunk, 1, "ParGCCardsPerStrideChunk");
+
+ status = status && verify_min_value(ParallelOldGCSplitInterval, 0, "ParallelOldGCSplitInterval");
+ }
#endif // INCLUDE_ALL_GCS
status = status && verify_interval(RefDiscoveryPolicy,
@@ -2070,12 +2227,47 @@ bool Arguments::check_vm_args_consistency() {
status = status && verify_object_alignment();
- status = status && verify_min_value(ClassMetaspaceSize, 1*M,
- "ClassMetaspaceSize");
+ status = status && verify_interval(CompressedClassSpaceSize, 1*M, 3*G,
+ "CompressedClassSpaceSize");
status = status && verify_interval(MarkStackSizeMax,
1, (max_jint - 1), "MarkStackSizeMax");
+ status = status && verify_interval(NUMAChunkResizeWeight, 0, 100, "NUMAChunkResizeWeight");
+
+ status = status && verify_min_value(LogEventsBufferEntries, 1, "LogEventsBufferEntries");
+
+ status = status && verify_min_value(HeapSizePerGCThread, (uintx) os::vm_page_size(), "HeapSizePerGCThread");
+
+ status = status && verify_min_value(GCTaskTimeStampEntries, 1, "GCTaskTimeStampEntries");
+ status = status && verify_percentage(ParallelGCBufferWastePct, "ParallelGCBufferWastePct");
+ status = status && verify_interval(TargetPLABWastePct, 1, 100, "TargetPLABWastePct");
+
+ status = status && verify_min_value(ParGCStridesPerThread, 1, "ParGCStridesPerThread");
+
+ status = status && verify_min_value(MinRAMFraction, 1, "MinRAMFraction");
+ status = status && verify_min_value(InitialRAMFraction, 1, "InitialRAMFraction");
+ status = status && verify_min_value(MaxRAMFraction, 1, "MaxRAMFraction");
+ status = status && verify_min_value(DefaultMaxRAMFraction, 1, "DefaultMaxRAMFraction");
+
+ status = status && verify_interval(AdaptiveTimeWeight, 0, 100, "AdaptiveTimeWeight");
+ status = status && verify_min_value(AdaptiveSizeDecrementScaleFactor, 1, "AdaptiveSizeDecrementScaleFactor");
+
+ status = status && verify_interval(TLABAllocationWeight, 0, 100, "TLABAllocationWeight");
+ status = status && verify_min_value(MinTLABSize, 1, "MinTLABSize");
+ status = status && verify_min_value(TLABRefillWasteFraction, 1, "TLABRefillWasteFraction");
+
+ status = status && verify_percentage(YoungGenerationSizeSupplement, "YoungGenerationSizeSupplement");
+ status = status && verify_percentage(TenuredGenerationSizeSupplement, "TenuredGenerationSizeSupplement");
+
+ // the "age" field in the oop header is 4 bits; do not want to pull in markOop.hpp
+ // just for that, so hardcode here.
+ status = status && verify_interval(MaxTenuringThreshold, 0, 15, "MaxTenuringThreshold");
+ status = status && verify_interval(InitialTenuringThreshold, 0, MaxTenuringThreshold, "MaxTenuringThreshold");
+ status = status && verify_percentage(TargetSurvivorRatio, "TargetSurvivorRatio");
+ status = status && verify_percentage(MarkSweepDeadRatio, "MarkSweepDeadRatio");
+
+ status = status && verify_min_value(MarkSweepAlwaysCompactCount, 1, "MarkSweepAlwaysCompactCount");
#ifdef SPARC
if (UseConcMarkSweepGC || UseG1GC) {
// Issue a stern warning if the user has explicitly set
@@ -2100,6 +2292,51 @@ bool Arguments::check_vm_args_consistency() {
#endif
}
+ // Need to limit the extent of the padding to reasonable size.
+ // 8K is well beyond the reasonable HW cache line size, even with the
+ // aggressive prefetching, while still leaving the room for segregating
+ // among the distinct pages.
+ if (ContendedPaddingWidth < 0 || ContendedPaddingWidth > 8192) {
+ jio_fprintf(defaultStream::error_stream(),
+ "ContendedPaddingWidth=" INTX_FORMAT " must be in between %d and %d\n",
+ ContendedPaddingWidth, 0, 8192);
+ status = false;
+ }
+
+ // Need to enforce the padding not to break the existing field alignments.
+ // It is sufficient to check against the largest type size.
+ if ((ContendedPaddingWidth % BytesPerLong) != 0) {
+ jio_fprintf(defaultStream::error_stream(),
+ "ContendedPaddingWidth=" INTX_FORMAT " must be a multiple of %d\n",
+ ContendedPaddingWidth, BytesPerLong);
+ status = false;
+ }
+
+ // Check lower bounds of the code cache
+ // Template Interpreter code is approximately 3X larger in debug builds.
+ uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
+ if (InitialCodeCacheSize < (uintx)os::vm_page_size()) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid InitialCodeCacheSize=%dK. Must be at least %dK.\n", InitialCodeCacheSize/K,
+ os::vm_page_size()/K);
+ status = false;
+ } else if (ReservedCodeCacheSize < InitialCodeCacheSize) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid ReservedCodeCacheSize: %dK. Must be at least InitialCodeCacheSize=%dK.\n",
+ ReservedCodeCacheSize/K, InitialCodeCacheSize/K);
+ status = false;
+ } else if (ReservedCodeCacheSize < min_code_cache_size) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid ReservedCodeCacheSize=%dK. Must be at least %uK.\n", ReservedCodeCacheSize/K,
+ min_code_cache_size/K);
+ status = false;
+ } else if (ReservedCodeCacheSize > 2*G) {
+ // Code cache size larger than MAXINT is not supported.
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M,
+ (2*G)/M);
+ status = false;
+ }
return status;
}
@@ -2206,21 +2443,6 @@ jint Arguments::parse_vm_init_args(const JavaVMInitArgs* args) {
return result;
}
- if (AggressiveOpts) {
- // Insert alt-rt.jar between user-specified bootclasspath
- // prefix and the default bootclasspath. os::set_boot_path()
- // uses meta_index_dir as the default bootclasspath directory.
- const char* altclasses_jar = "alt-rt.jar";
- size_t altclasses_path_len = strlen(get_meta_index_dir()) + 1 +
- strlen(altclasses_jar);
- char* altclasses_path = NEW_C_HEAP_ARRAY(char, altclasses_path_len, mtInternal);
- strcpy(altclasses_path, get_meta_index_dir());
- strcat(altclasses_path, altclasses_jar);
- scp.add_suffix_to_prefix(altclasses_path);
- scp_assembly_required = true;
- FREE_C_HEAP_ARRAY(char, altclasses_path, mtInternal);
- }
-
// Parse _JAVA_OPTIONS environment variable (if present) (mimics classic VM)
result = parse_java_options_environment_variable(&scp, &scp_assembly_required);
if (result != JNI_OK) {
@@ -2498,20 +2720,37 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
// -Xoss
} else if (match_option(option, "-Xoss", &tail)) {
// HotSpot does not have separate native and Java stacks, ignore silently for compatibility
- // -Xmaxjitcodesize
+ } else if (match_option(option, "-XX:CodeCacheExpansionSize=", &tail)) {
+ julong long_CodeCacheExpansionSize = 0;
+ ArgsRange errcode = parse_memory_size(tail, &long_CodeCacheExpansionSize, os::vm_page_size());
+ if (errcode != arg_in_range) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid argument: %s. Must be at least %luK.\n", option->optionString,
+ os::vm_page_size()/K);
+ return JNI_EINVAL;
+ }
+ FLAG_SET_CMDLINE(uintx, CodeCacheExpansionSize, (uintx)long_CodeCacheExpansionSize);
} else if (match_option(option, "-Xmaxjitcodesize", &tail) ||
match_option(option, "-XX:ReservedCodeCacheSize=", &tail)) {
julong long_ReservedCodeCacheSize = 0;
- ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize,
- (size_t)InitialCodeCacheSize);
+
+ ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize, 1);
if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(),
- "Invalid maximum code cache size: %s. Should be greater than InitialCodeCacheSize=%dK\n",
- option->optionString, InitialCodeCacheSize/K);
- describe_range_error(errcode);
+ "Invalid maximum code cache size: %s.\n", option->optionString);
return JNI_EINVAL;
}
FLAG_SET_CMDLINE(uintx, ReservedCodeCacheSize, (uintx)long_ReservedCodeCacheSize);
+ //-XX:IncreaseFirstTierCompileThresholdAt=
+ } else if (match_option(option, "-XX:IncreaseFirstTierCompileThresholdAt=", &tail)) {
+ uintx uint_IncreaseFirstTierCompileThresholdAt = 0;
+ if (!parse_uintx(tail, &uint_IncreaseFirstTierCompileThresholdAt, 0) || uint_IncreaseFirstTierCompileThresholdAt > 99) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Invalid value for IncreaseFirstTierCompileThresholdAt: %s. Should be between 0 and 99.\n",
+ option->optionString);
+ return JNI_EINVAL;
+ }
+ FLAG_SET_CMDLINE(uintx, IncreaseFirstTierCompileThresholdAt, (uintx)uint_IncreaseFirstTierCompileThresholdAt);
// -green
} else if (match_option(option, "-green", &tail)) {
jio_fprintf(defaultStream::error_stream(),
@@ -2542,9 +2781,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
"Flat profiling is not supported in this VM.\n");
return JNI_ERR;
#endif // INCLUDE_FPROF
- // -Xaprof
- } else if (match_option(option, "-Xaprof", &tail)) {
- _has_alloc_profile = true;
// -Xconcurrentio
} else if (match_option(option, "-Xconcurrentio", &tail)) {
FLAG_SET_CMDLINE(bool, UseLWPSynchronization, true);
@@ -2634,6 +2870,13 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
// ostream_init_log(), when called will use this filename
// to initialize a fileStream.
_gc_log_filename = strdup(tail);
+ if (!is_filename_valid(_gc_log_filename)) {
+ jio_fprintf(defaultStream::output_stream(),
+ "Invalid file name for use with -Xloggc: Filename can only contain the "
+ "characters [A-Z][a-z][0-9]-_.%%[p|t] but it has been %s\n"
+ "Note %%p or %%t can only be used once\n", _gc_log_filename);
+ return JNI_EINVAL;
+ }
FLAG_SET_CMDLINE(bool, PrintGC, true);
FLAG_SET_CMDLINE(bool, PrintGCTimeStamps, true);
@@ -2799,13 +3042,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
FLAG_SET_CMDLINE(bool, UseTLAB, true);
} else if (match_option(option, "-XX:-UseTLE", &tail)) {
FLAG_SET_CMDLINE(bool, UseTLAB, false);
-SOLARIS_ONLY(
- } else if (match_option(option, "-XX:+UsePermISM", &tail)) {
- warning("-XX:+UsePermISM is obsolete.");
- FLAG_SET_CMDLINE(bool, UseISM, true);
- } else if (match_option(option, "-XX:-UsePermISM", &tail)) {
- FLAG_SET_CMDLINE(bool, UseISM, false);
-)
} else if (match_option(option, "-XX:+DisplayVMOutputToStderr", &tail)) {
FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, false);
FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, true);
@@ -2965,6 +3201,11 @@ jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_req
set_mode_flags(_int);
}
+ // eventually fix up InitialTenuringThreshold if only MaxTenuringThreshold is set
+ if (FLAG_IS_DEFAULT(InitialTenuringThreshold) && (InitialTenuringThreshold > MaxTenuringThreshold)) {
+ FLAG_SET_ERGO(uintx, InitialTenuringThreshold, MaxTenuringThreshold);
+ }
+
#ifndef COMPILER2
// Don't degrade server performance for footprint
if (FLAG_IS_DEFAULT(UseLargePages) &&
@@ -2973,18 +3214,19 @@ jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_req
// Note that large pages are enabled/disabled for both the
// Java heap and the code cache.
FLAG_SET_DEFAULT(UseLargePages, false);
- SOLARIS_ONLY(FLAG_SET_DEFAULT(UseMPSS, false));
- SOLARIS_ONLY(FLAG_SET_DEFAULT(UseISM, false));
}
- // Tiered compilation is undefined with C1.
- TieredCompilation = false;
#else
if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) {
FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1);
}
#endif
+#ifndef TIERED
+ // Tiered compilation is undefined.
+ UNSUPPORTED_OPTION(TieredCompilation, "TieredCompilation");
+#endif
+
// If we are running in a headless jre, force java.awt.headless property
// to be true unless the property has already been set.
// Also allow the OS environment variable JAVA_AWT_HEADLESS to set headless state.
@@ -3097,68 +3339,25 @@ jint Arguments::parse_options_environment_variable(const char* name, SysClassPat
}
void Arguments::set_shared_spaces_flags() {
- const bool must_share = DumpSharedSpaces || RequireSharedSpaces;
- const bool might_share = must_share || UseSharedSpaces;
-
- // CompressedOops cannot be used with CDS. The offsets of oopmaps and
- // static fields are incorrect in the archive. With some more clever
- // initialization, this restriction can probably be lifted.
- // ??? UseLargePages might be okay now
- const bool cannot_share = UseCompressedOops ||
- (UseLargePages && FLAG_IS_CMDLINE(UseLargePages));
- if (cannot_share) {
- if (must_share) {
- warning("disabling large pages %s"
- "because of %s", "" LP64_ONLY("and compressed oops "),
- DumpSharedSpaces ? "-Xshare:dump" : "-Xshare:on");
- FLAG_SET_CMDLINE(bool, UseLargePages, false);
- LP64_ONLY(FLAG_SET_CMDLINE(bool, UseCompressedOops, false));
- LP64_ONLY(FLAG_SET_CMDLINE(bool, UseCompressedKlassPointers, false));
- } else {
- // Prefer compressed oops and large pages to class data sharing
- if (UseSharedSpaces && Verbose) {
- warning("turning off use of shared archive because of large pages%s",
- "" LP64_ONLY(" and/or compressed oops"));
- }
- no_shared_spaces();
- }
- } else if (UseLargePages && might_share) {
- // Disable large pages to allow shared spaces. This is sub-optimal, since
- // there may not even be a shared archive to use.
- FLAG_SET_DEFAULT(UseLargePages, false);
- }
-
if (DumpSharedSpaces) {
if (RequireSharedSpaces) {
warning("cannot dump shared archive while using shared archive");
}
UseSharedSpaces = false;
+#ifdef _LP64
+ if (!UseCompressedOops || !UseCompressedClassPointers) {
+ vm_exit_during_initialization(
+ "Cannot dump shared archive when UseCompressedOops or UseCompressedClassPointers is off.", NULL);
+ }
+ } else {
+ // UseCompressedOops and UseCompressedClassPointers must be on for UseSharedSpaces.
+ if (!UseCompressedOops || !UseCompressedClassPointers) {
+ no_shared_spaces();
+ }
+#endif
}
}
-// Disable options not supported in this release, with a warning if they
-// were explicitly requested on the command-line
-#define UNSUPPORTED_OPTION(opt, description) \
-do { \
- if (opt) { \
- if (FLAG_IS_CMDLINE(opt)) { \
- warning(description " is disabled in this release."); \
- } \
- FLAG_SET_DEFAULT(opt, false); \
- } \
-} while(0)
-
-
-#define UNSUPPORTED_GC_OPTION(gc) \
-do { \
- if (gc) { \
- if (FLAG_IS_CMDLINE(gc)) { \
- warning(#gc " is not supported in this VM. Using Serial GC."); \
- } \
- FLAG_SET_DEFAULT(gc, false); \
- } \
-} while(0)
-
#if !INCLUDE_ALL_GCS
static void force_serial_gc() {
FLAG_SET_DEFAULT(UseSerialGC, true);
@@ -3171,25 +3370,64 @@ static void force_serial_gc() {
}
#endif // INCLUDE_ALL_GCS
+// Sharing support
+// Construct the path to the archive
+static char* get_shared_archive_path() {
+ char *shared_archive_path;
+ if (SharedArchiveFile == NULL) {
+ char jvm_path[JVM_MAXPATHLEN];
+ os::jvm_path(jvm_path, sizeof(jvm_path));
+ char *end = strrchr(jvm_path, *os::file_separator());
+ if (end != NULL) *end = '\0';
+ size_t jvm_path_len = strlen(jvm_path);
+ size_t file_sep_len = strlen(os::file_separator());
+ shared_archive_path = NEW_C_HEAP_ARRAY(char, jvm_path_len +
+ file_sep_len + 20, mtInternal);
+ if (shared_archive_path != NULL) {
+ strncpy(shared_archive_path, jvm_path, jvm_path_len + 1);
+ strncat(shared_archive_path, os::file_separator(), file_sep_len);
+ strncat(shared_archive_path, "classes.jsa", 11);
+ }
+ } else {
+ shared_archive_path = NEW_C_HEAP_ARRAY(char, strlen(SharedArchiveFile) + 1, mtInternal);
+ if (shared_archive_path != NULL) {
+ strncpy(shared_archive_path, SharedArchiveFile, strlen(SharedArchiveFile) + 1);
+ }
+ }
+ return shared_archive_path;
+}
+
+#ifndef PRODUCT
+// Determine whether LogVMOutput should be implicitly turned on.
+static bool use_vm_log() {
+ if (LogCompilation || !FLAG_IS_DEFAULT(LogFile) ||
+ PrintCompilation || PrintInlining || PrintDependencies || PrintNativeNMethods ||
+ PrintDebugInfo || PrintRelocations || PrintNMethods || PrintExceptionHandlers ||
+ PrintAssembly || TraceDeoptimization || TraceDependencies ||
+ (VerifyDependencies && FLAG_IS_CMDLINE(VerifyDependencies))) {
+ return true;
+ }
+
+#ifdef COMPILER1
+ if (PrintC1Statistics) {
+ return true;
+ }
+#endif // COMPILER1
+
+#ifdef COMPILER2
+ if (PrintOptoAssembly || PrintOptoStatistics) {
+ return true;
+ }
+#endif // COMPILER2
+
+ return false;
+}
+#endif // PRODUCT
+
// Parse entry point called from JNI_CreateJavaVM
jint Arguments::parse(const JavaVMInitArgs* args) {
- // Sharing support
- // Construct the path to the archive
- char jvm_path[JVM_MAXPATHLEN];
- os::jvm_path(jvm_path, sizeof(jvm_path));
- char *end = strrchr(jvm_path, *os::file_separator());
- if (end != NULL) *end = '\0';
- char *shared_archive_path = NEW_C_HEAP_ARRAY(char, strlen(jvm_path) +
- strlen(os::file_separator()) + 20, mtInternal);
- if (shared_archive_path == NULL) return JNI_ENOMEM;
- strcpy(shared_archive_path, jvm_path);
- strcat(shared_archive_path, os::file_separator());
- strcat(shared_archive_path, "classes");
- strcat(shared_archive_path, ".jsa");
- SharedArchivePath = shared_archive_path;
-
// Remaining part of option string
const char* tail;
@@ -3280,6 +3518,12 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
return result;
}
+ // Call get_shared_archive_path() here, after possible SharedArchiveFile option got parsed.
+ SharedArchivePath = get_shared_archive_path();
+ if (SharedArchivePath == NULL) {
+ return JNI_ENOMEM;
+ }
+
// Delay warning until here so that we've had a chance to process
// the -XX:-PrintWarnings flag
if (needs_hotspotrc_warning) {
@@ -3360,6 +3604,11 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
no_shared_spaces();
#endif // INCLUDE_CDS
+ return JNI_OK;
+}
+
+jint Arguments::apply_ergo() {
+
// Set flags based on ergonomics.
set_ergonomics_flags();
@@ -3435,7 +3684,7 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
FLAG_SET_DEFAULT(ProfileInterpreter, false);
FLAG_SET_DEFAULT(UseBiasedLocking, false);
LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
- LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedKlassPointers, false));
+ LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedClassPointers, false));
#endif // CC_INTERP
#ifdef COMPILER2
@@ -3464,6 +3713,10 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
DebugNonSafepoints = true;
}
+ if (FLAG_IS_CMDLINE(CompressedClassSpaceSize) && !UseCompressedClassPointers) {
+ warning("Setting CompressedClassSpaceSize has no effect when compressed class pointers are not used");
+ }
+
#ifndef PRODUCT
if (CompileTheWorld) {
// Force NmethodSweeper to sweep whole CodeCache each time.
@@ -3471,7 +3724,13 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
NmethodSweepFraction = 1;
}
}
-#endif
+
+ if (!LogVMOutput && FLAG_IS_DEFAULT(LogVMOutput)) {
+ if (use_vm_log()) {
+ LogVMOutput = true;
+ }
+ }
+#endif // PRODUCT
if (PrintCommandLineFlags) {
CommandLineFlags::printSetFlags(tty);
diff --git a/src/share/vm/runtime/arguments.hpp b/src/share/vm/runtime/arguments.hpp
index 0a4350ee3..4b2a821a5 100644
--- a/src/share/vm/runtime/arguments.hpp
+++ b/src/share/vm/runtime/arguments.hpp
@@ -118,11 +118,21 @@ class SystemProperty: public CHeapObj<mtInternal> {
// For use by -agentlib, -agentpath and -Xrun
class AgentLibrary : public CHeapObj<mtInternal> {
friend class AgentLibraryList;
+public:
+ // Is this library valid or not. Don't rely on os_lib == NULL as statically
+ // linked lib could have handle of RTLD_DEFAULT which == 0 on some platforms
+ enum AgentState {
+ agent_invalid = 0,
+ agent_valid = 1
+ };
+
private:
char* _name;
char* _options;
void* _os_lib;
bool _is_absolute_path;
+ bool _is_static_lib;
+ AgentState _state;
AgentLibrary* _next;
public:
@@ -133,6 +143,11 @@ class AgentLibrary : public CHeapObj<mtInternal> {
void* os_lib() const { return _os_lib; }
void set_os_lib(void* os_lib) { _os_lib = os_lib; }
AgentLibrary* next() const { return _next; }
+ bool is_static_lib() const { return _is_static_lib; }
+ void set_static_lib(bool is_static_lib) { _is_static_lib = is_static_lib; }
+ bool valid() { return (_state == agent_valid); }
+ void set_valid() { _state = agent_valid; }
+ void set_invalid() { _state = agent_invalid; }
// Constructor
AgentLibrary(const char* name, const char* options, bool is_absolute_path, void* os_lib) {
@@ -147,6 +162,8 @@ class AgentLibrary : public CHeapObj<mtInternal> {
_is_absolute_path = is_absolute_path;
_os_lib = os_lib;
_next = NULL;
+ _state = agent_invalid;
+ _is_static_lib = false;
}
};
@@ -262,8 +279,10 @@ class Arguments : AllStatic {
// Option flags
static bool _has_profile;
- static bool _has_alloc_profile;
static const char* _gc_log_filename;
+ // Value of the conservative maximum heap alignment needed
+ static size_t _conservative_max_heap_alignment;
+
static uintx _min_heap_size;
// -Xrun arguments
@@ -277,6 +296,8 @@ class Arguments : AllStatic {
{ _agentList.add(new AgentLibrary(name, options, absolute_path, NULL)); }
// Late-binding agents not started via arguments
+ static void add_loaded_agent(AgentLibrary *agentLib)
+ { _agentList.add(agentLib); }
static void add_loaded_agent(const char* name, char* options, bool absolute_path, void* os_lib)
{ _agentList.add(new AgentLibrary(name, options, absolute_path, os_lib)); }
@@ -309,7 +330,9 @@ class Arguments : AllStatic {
// Garbage-First (UseG1GC)
static void set_g1_gc_flags();
// GC ergonomics
+ static void set_conservative_max_heap_alignment();
static void set_use_compressed_oops();
+ static void set_use_compressed_klass_ptrs();
static void set_ergonomics_flags();
static void set_shared_spaces_flags();
// limits the given memory size by the maximum amount of memory this process is
@@ -411,8 +434,10 @@ class Arguments : AllStatic {
static char* SharedArchivePath;
public:
- // Parses the arguments
+ // Parses the arguments, first phase
static jint parse(const JavaVMInitArgs* args);
+ // Apply ergonomics
+ static jint apply_ergo();
// Adjusts the arguments after the OS have adjusted the arguments
static jint adjust_after_os();
// Check for consistency in the selection of the garbage collector.
@@ -426,6 +451,10 @@ class Arguments : AllStatic {
// Used by os_solaris
static bool process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized);
+ static size_t conservative_max_heap_alignment() { return _conservative_max_heap_alignment; }
+ // Return the maximum size a heap with compressed oops can take
+ static size_t max_heap_for_compressed_oops();
+
// return a char* array containing all options
static char** jvm_flags_array() { return _jvm_flags_array; }
static char** jvm_args_array() { return _jvm_args_array; }
@@ -462,9 +491,8 @@ class Arguments : AllStatic {
// -Xloggc:<file>, if not specified will be NULL
static const char* gc_log_filename() { return _gc_log_filename; }
- // -Xprof/-Xaprof
+ // -Xprof
static bool has_profile() { return _has_profile; }
- static bool has_alloc_profile() { return _has_alloc_profile; }
// -Xms, -Xmx
static uintx min_heap_size() { return _min_heap_size; }
diff --git a/src/share/vm/runtime/atomic.cpp b/src/share/vm/runtime/atomic.cpp
index dbb66b2f6..87c80ad33 100644
--- a/src/share/vm/runtime/atomic.cpp
+++ b/src/share/vm/runtime/atomic.cpp
@@ -80,3 +80,32 @@ jlong Atomic::add(jlong add_value, volatile jlong* dest) {
}
return old;
}
+
+void Atomic::inc(volatile short* dest) {
+ // Most platforms do not support atomic increment on a 2-byte value. However,
+ // if the value occupies the most significant 16 bits of an aligned 32-bit
+ // word, then we can do this with an atomic add of 0x10000 to the 32-bit word.
+ //
+ // The least significant parts of this 32-bit word will never be affected, even
+ // in case of overflow/underflow.
+ //
+ // Use the ATOMIC_SHORT_PAIR macro to get the desired alignment.
+#ifdef VM_LITTLE_ENDIAN
+ assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
+ (void)Atomic::add(0x10000, (volatile int*)(dest-1));
+#else
+ assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
+ (void)Atomic::add(0x10000, (volatile int*)(dest));
+#endif
+}
+
+void Atomic::dec(volatile short* dest) {
+#ifdef VM_LITTLE_ENDIAN
+ assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
+ (void)Atomic::add(-0x10000, (volatile int*)(dest-1));
+#else
+ assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
+ (void)Atomic::add(-0x10000, (volatile int*)(dest));
+#endif
+}
+
diff --git a/src/share/vm/runtime/atomic.hpp b/src/share/vm/runtime/atomic.hpp
index ae40def88..3f35c3de2 100644
--- a/src/share/vm/runtime/atomic.hpp
+++ b/src/share/vm/runtime/atomic.hpp
@@ -64,11 +64,13 @@ class Atomic : AllStatic {
// Atomically increment location
inline static void inc (volatile jint* dest);
+ static void inc (volatile jshort* dest);
inline static void inc_ptr(volatile intptr_t* dest);
inline static void inc_ptr(volatile void* dest);
// Atomically decrement a location
inline static void dec (volatile jint* dest);
+ static void dec (volatile jshort* dest);
inline static void dec_ptr(volatile intptr_t* dest);
inline static void dec_ptr(volatile void* dest);
@@ -95,4 +97,24 @@ class Atomic : AllStatic {
inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value);
};
+// To use Atomic::inc(jshort* dest) and Atomic::dec(jshort* dest), the address must be specially
+// aligned, such that (*dest) occupies the upper 16 bits of an aligned 32-bit word. The best way to
+// achieve is to place your short value next to another short value, which doesn't need atomic ops.
+//
+// Example
+// ATOMIC_SHORT_PAIR(
+// volatile short _refcount, // needs atomic operation
+// unsigned short _length // number of UTF8 characters in the symbol (does not need atomic op)
+// );
+
+#ifdef VM_LITTLE_ENDIAN
+#define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl) \
+ non_atomic_decl; \
+ atomic_decl
+#else
+#define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl) \
+ atomic_decl ; \
+ non_atomic_decl
+#endif
+
#endif // SHARE_VM_RUNTIME_ATOMIC_HPP
diff --git a/src/share/vm/runtime/compilationPolicy.cpp b/src/share/vm/runtime/compilationPolicy.cpp
index 8fff9586b..23fbc87f7 100644
--- a/src/share/vm/runtime/compilationPolicy.cpp
+++ b/src/share/vm/runtime/compilationPolicy.cpp
@@ -138,6 +138,23 @@ bool CompilationPolicy::can_be_compiled(methodHandle m, int comp_level) {
return false;
}
+// Returns true if m is allowed to be osr compiled
+bool CompilationPolicy::can_be_osr_compiled(methodHandle m, int comp_level) {
+ bool result = false;
+ if (comp_level == CompLevel_all) {
+ if (TieredCompilation) {
+ // enough to be osr compilable at any level for tiered
+ result = !m->is_not_osr_compilable(CompLevel_simple) || !m->is_not_osr_compilable(CompLevel_full_optimization);
+ } else {
+ // must be osr compilable at available level for non-tiered
+ result = !m->is_not_osr_compilable(CompLevel_highest_tier);
+ }
+ } else if (is_compile(comp_level)) {
+ result = !m->is_not_osr_compilable(comp_level);
+ }
+ return (result && can_be_compiled(m, comp_level));
+}
+
bool CompilationPolicy::is_compilation_enabled() {
// NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
return !delay_compilation_during_startup() && CompileBroker::should_compile_new_jobs();
@@ -458,7 +475,7 @@ void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, JavaThr
const int hot_count = m->backedge_count();
const char* comment = "backedge_count";
- if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m, comp_level)) {
+ if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread);
NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
}
@@ -514,7 +531,7 @@ void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, Java
const int hot_count = m->backedge_count();
const char* comment = "backedge_count";
- if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m, comp_level)) {
+ if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread);
NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
}
diff --git a/src/share/vm/runtime/compilationPolicy.hpp b/src/share/vm/runtime/compilationPolicy.hpp
index 6d90e049e..3bba54e5d 100644
--- a/src/share/vm/runtime/compilationPolicy.hpp
+++ b/src/share/vm/runtime/compilationPolicy.hpp
@@ -52,6 +52,8 @@ public:
static bool must_be_compiled(methodHandle m, int comp_level = CompLevel_all);
// m is allowed to be compiled
static bool can_be_compiled(methodHandle m, int comp_level = CompLevel_all);
+ // m is allowed to be osr compiled
+ static bool can_be_osr_compiled(methodHandle m, int comp_level = CompLevel_all);
static bool is_compilation_enabled();
static void set_policy(CompilationPolicy* policy) { _policy = policy; }
static CompilationPolicy* policy() { return _policy; }
diff --git a/src/share/vm/runtime/deoptimization.cpp b/src/share/vm/runtime/deoptimization.cpp
index ab56d07bf..d76f6c1cd 100644
--- a/src/share/vm/runtime/deoptimization.cpp
+++ b/src/share/vm/runtime/deoptimization.cpp
@@ -641,18 +641,22 @@ JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_m
// at an uncommon trap for an invoke (where the compiler
// generates debug info before the invoke has executed)
Bytecodes::Code cur_code = str.next();
- if (cur_code == Bytecodes::_invokevirtual ||
- cur_code == Bytecodes::_invokespecial ||
- cur_code == Bytecodes::_invokestatic ||
- cur_code == Bytecodes::_invokeinterface) {
+ if (cur_code == Bytecodes::_invokevirtual ||
+ cur_code == Bytecodes::_invokespecial ||
+ cur_code == Bytecodes::_invokestatic ||
+ cur_code == Bytecodes::_invokeinterface ||
+ cur_code == Bytecodes::_invokedynamic) {
Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci());
Symbol* signature = invoke.signature();
ArgumentSizeComputer asc(signature);
cur_invoke_parameter_size = asc.size();
- if (cur_code != Bytecodes::_invokestatic) {
+ if (invoke.has_receiver()) {
// Add in receiver
++cur_invoke_parameter_size;
}
+ if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) {
+ callee_size_of_parameters++;
+ }
}
if (str.bci() < max_bci) {
Bytecodes::Code bc = str.next();
@@ -667,6 +671,7 @@ JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_m
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
case Bytecodes::_invokeinterface:
+ case Bytecodes::_invokedynamic:
case Bytecodes::_athrow:
break;
default: {
@@ -1752,7 +1757,7 @@ int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) {
else return trap_state & ~DS_RECOMPILE_BIT;
}
//---------------------------format_trap_state---------------------------------
-// This is used for debugging and diagnostics, including hotspot.log output.
+// This is used for debugging and diagnostics, including LogFile output.
const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
int trap_state) {
DeoptReason reason = trap_state_reason(trap_state);
@@ -1829,7 +1834,7 @@ const char* Deoptimization::trap_action_name(int action) {
return buf;
}
-// This is used for debugging and diagnostics, including hotspot.log output.
+// This is used for debugging and diagnostics, including LogFile output.
const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
int trap_request) {
jint unloaded_class_index = trap_request_index(trap_request);
diff --git a/src/share/vm/runtime/fieldDescriptor.cpp b/src/share/vm/runtime/fieldDescriptor.cpp
index 23d679494..79e3ddcd8 100644
--- a/src/share/vm/runtime/fieldDescriptor.cpp
+++ b/src/share/vm/runtime/fieldDescriptor.cpp
@@ -97,18 +97,32 @@ oop fieldDescriptor::string_initial_value(TRAPS) const {
return constants()->uncached_string_at(initial_value_index(), CHECK_0);
}
-void fieldDescriptor::initialize(InstanceKlass* ik, int index) {
- _cp = ik->constants();
+void fieldDescriptor::reinitialize(InstanceKlass* ik, int index) {
+ if (_cp.is_null() || field_holder() != ik) {
+ _cp = constantPoolHandle(Thread::current(), ik->constants());
+ // _cp should now reference ik's constant pool; i.e., ik is now field_holder.
+ assert(field_holder() == ik, "must be already initialized to this class");
+ }
FieldInfo* f = ik->field(index);
assert(!f->is_internal(), "regular Java fields only");
_access_flags = accessFlags_from(f->access_flags());
guarantee(f->name_index() != 0 && f->signature_index() != 0, "bad constant pool index for fieldDescriptor");
_index = index;
+ verify();
}
#ifndef PRODUCT
+void fieldDescriptor::verify() const {
+ if (_cp.is_null()) {
+ assert(_index == badInt, "constructor must be called"); // see constructor
+ } else {
+ assert(_index >= 0, "good index");
+ assert(_index < field_holder()->java_fields_count(), "oob");
+ }
+}
+
void fieldDescriptor::print_on(outputStream* st) const {
access_flags().print_on(st);
name()->print_value_on(st);
diff --git a/src/share/vm/runtime/fieldDescriptor.hpp b/src/share/vm/runtime/fieldDescriptor.hpp
index 12b75cab1..9c3101b38 100644
--- a/src/share/vm/runtime/fieldDescriptor.hpp
+++ b/src/share/vm/runtime/fieldDescriptor.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,6 +53,13 @@ class fieldDescriptor VALUE_OBJ_CLASS_SPEC {
}
public:
+ fieldDescriptor() {
+ DEBUG_ONLY(_index = badInt);
+ }
+ fieldDescriptor(InstanceKlass* ik, int index) {
+ DEBUG_ONLY(_index = badInt);
+ reinitialize(ik, index);
+ }
Symbol* name() const {
return field()->name(_cp);
}
@@ -112,12 +119,13 @@ class fieldDescriptor VALUE_OBJ_CLASS_SPEC {
}
// Initialization
- void initialize(InstanceKlass* ik, int index);
+ void reinitialize(InstanceKlass* ik, int index);
// Print
void print() { print_on(tty); }
void print_on(outputStream* st) const PRODUCT_RETURN;
void print_on_for(outputStream* st, oop obj) PRODUCT_RETURN;
+ void verify() const PRODUCT_RETURN;
};
#endif // SHARE_VM_RUNTIME_FIELDDESCRIPTOR_HPP
diff --git a/src/share/vm/runtime/fprofiler.cpp b/src/share/vm/runtime/fprofiler.cpp
index 111c4db5a..3ddf1a191 100644
--- a/src/share/vm/runtime/fprofiler.cpp
+++ b/src/share/vm/runtime/fprofiler.cpp
@@ -264,7 +264,7 @@ class ProfilerNode {
public:
- void* operator new(size_t size, ThreadProfiler* tp);
+ void* operator new(size_t size, ThreadProfiler* tp) throw();
void operator delete(void* p);
ProfilerNode() {
@@ -373,7 +373,7 @@ class ProfilerNode {
}
};
-void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp){
+void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp) throw() {
void* result = (void*) tp->area_top;
tp->area_top += size;
@@ -925,6 +925,8 @@ void FlatProfiler::record_thread_ticks() {
FlatProfiler::interval_print();
FlatProfiler::interval_reset();
}
+
+ FREE_C_HEAP_ARRAY(JavaThread *, threadsList, mtInternal);
} else {
// Couldn't get the threads lock, just record that rather than blocking
FlatProfiler::threads_lock_ticks += 1;
diff --git a/src/share/vm/runtime/frame.cpp b/src/share/vm/runtime/frame.cpp
index 96b2b6221..a01ae677b 100644
--- a/src/share/vm/runtime/frame.cpp
+++ b/src/share/vm/runtime/frame.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "compiler/abstractCompiler.hpp"
#include "compiler/disassembler.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
@@ -224,9 +225,20 @@ bool frame::is_first_java_frame() const {
bool frame::entry_frame_is_first() const {
- return entry_frame_call_wrapper()->anchor()->last_Java_sp() == NULL;
+ return entry_frame_call_wrapper()->is_first_frame();
}
+JavaCallWrapper* frame::entry_frame_call_wrapper_if_safe(JavaThread* thread) const {
+ JavaCallWrapper** jcw = entry_frame_call_wrapper_addr();
+ address addr = (address) jcw;
+
+ // addr must be within the usable part of the stack
+ if (thread->is_in_usable_stack(addr)) {
+ return *jcw;
+ }
+
+ return NULL;
+}
bool frame::should_be_deoptimized() const {
if (_deopt_state == is_deoptimized ||
@@ -390,7 +402,6 @@ void frame::interpreter_frame_set_locals(intptr_t* locs) {
Method* frame::interpreter_frame_method() const {
assert(is_interpreted_frame(), "interpreted frame expected");
Method* m = *interpreter_frame_method_addr();
- assert(m->is_metadata(), "bad Method* in interpreter frame");
assert(m->is_method(), "not a Method*");
return m;
}
@@ -552,7 +563,7 @@ void frame::print_value_on(outputStream* st, JavaThread *thread) const {
st->print("%s frame (sp=" INTPTR_FORMAT " unextended sp=" INTPTR_FORMAT, print_name(), sp(), unextended_sp());
if (sp() != NULL)
- st->print(", fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), pc());
+ st->print(", fp=" INTPTR_FORMAT ", real_fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), real_fp(), pc());
if (StubRoutines::contains(pc())) {
st->print_cr(")");
@@ -644,7 +655,7 @@ void frame::interpreter_frame_print_on(outputStream* st) const {
// Return whether the frame is in the VM or os indicating a Hotspot problem.
// Otherwise, it's likely a bug in the native library that the Java code calls,
// hopefully indicating where to submit bugs.
-static void print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
+void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
// C/C++ frame
bool in_vm = os::address_is_in_vm(pc);
st->print(in_vm ? "V" : "C");
@@ -713,10 +724,14 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose
} else if (_cb->is_buffer_blob()) {
st->print("v ~BufferBlob::%s", ((BufferBlob *)_cb)->name());
} else if (_cb->is_nmethod()) {
- Method* m = ((nmethod *)_cb)->method();
+ nmethod* nm = (nmethod*)_cb;
+ Method* m = nm->method();
if (m != NULL) {
m->name_and_sig_as_C_string(buf, buflen);
- st->print("J %s", buf);
+ st->print("J %d%s %s %s (%d bytes) @ " PTR_FORMAT " [" PTR_FORMAT "+0x%x]",
+ nm->compile_id(), (nm->is_osr_method() ? "%" : ""),
+ ((nm->compiler() != NULL) ? nm->compiler()->name() : ""),
+ buf, m->code_size(), _pc, _cb->code_begin(), _pc - _cb->code_begin());
} else {
st->print("J " PTR_FORMAT, pc());
}
@@ -1011,6 +1026,7 @@ class CompiledArgumentOopFinder: public SignatureInfo {
OopClosure* _f;
int _offset; // the current offset, incremented with each argument
bool _has_receiver; // true if the callee has a receiver
+ bool _has_appendix; // true if the call has an appendix
frame _fr;
RegisterMap* _reg_map;
int _arg_size;
@@ -1030,19 +1046,20 @@ class CompiledArgumentOopFinder: public SignatureInfo {
}
public:
- CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, OopClosure* f, frame fr, const RegisterMap* reg_map)
+ CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map)
: SignatureInfo(signature) {
// initialize CompiledArgumentOopFinder
_f = f;
_offset = 0;
_has_receiver = has_receiver;
+ _has_appendix = has_appendix;
_fr = fr;
_reg_map = (RegisterMap*)reg_map;
- _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
+ _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0) + (has_appendix ? 1 : 0);
int arg_size;
- _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, &arg_size);
+ _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &arg_size);
assert(arg_size == _arg_size, "wrong arg size");
}
@@ -1052,12 +1069,16 @@ class CompiledArgumentOopFinder: public SignatureInfo {
_offset++;
}
iterate_parameters();
+ if (_has_appendix) {
+ handle_oop_offset();
+ _offset++;
+ }
}
};
-void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f) {
+void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f) {
ResourceMark rm;
- CompiledArgumentOopFinder finder(signature, has_receiver, f, *this, reg_map);
+ CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map);
finder.oops_do();
}
diff --git a/src/share/vm/runtime/frame.hpp b/src/share/vm/runtime/frame.hpp
index 196f4035b..ec08629fb 100644
--- a/src/share/vm/runtime/frame.hpp
+++ b/src/share/vm/runtime/frame.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -137,6 +137,7 @@ class frame VALUE_OBJ_CLASS_SPEC {
bool is_interpreted_frame() const;
bool is_java_frame() const;
bool is_entry_frame() const; // Java frame called from C?
+ bool is_stub_frame() const;
bool is_ignored_frame() const;
bool is_native_frame() const;
bool is_runtime_frame() const;
@@ -355,7 +356,9 @@ class frame VALUE_OBJ_CLASS_SPEC {
public:
// Entry frames
- JavaCallWrapper* entry_frame_call_wrapper() const;
+ JavaCallWrapper* entry_frame_call_wrapper() const { return *entry_frame_call_wrapper_addr(); }
+ JavaCallWrapper* entry_frame_call_wrapper_if_safe(JavaThread* thread) const;
+ JavaCallWrapper** entry_frame_call_wrapper_addr() const;
intptr_t* entry_frame_argument_at(int offset) const;
// tells whether there is another chunk of Delta stack above
@@ -406,6 +409,7 @@ class frame VALUE_OBJ_CLASS_SPEC {
void print_on(outputStream* st) const;
void interpreter_frame_print_on(outputStream* st) const;
void print_on_error(outputStream* st, char* buf, int buflen, bool verbose = false) const;
+ static void print_C_frame(outputStream* st, char* buf, int buflen, address pc);
// Add annotated descriptions of memory locations belonging to this frame to values
void describe(FrameValues& values, int frame_no);
@@ -414,7 +418,7 @@ class frame VALUE_OBJ_CLASS_SPEC {
oop* oopmapreg_to_location(VMReg reg, const RegisterMap* regmap) const;
// Oops-do's
- void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f);
+ void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f);
void oops_interpreted_do(OopClosure* f, CLDToOopClosure* cld_f, const RegisterMap* map, bool query_oop_map_cache = true);
private:
diff --git a/src/share/vm/runtime/frame.inline.hpp b/src/share/vm/runtime/frame.inline.hpp
index c14f473db..10aeaa6a4 100644
--- a/src/share/vm/runtime/frame.inline.hpp
+++ b/src/share/vm/runtime/frame.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -82,6 +82,10 @@ inline bool frame::is_entry_frame() const {
return StubRoutines::returns_to_call_stub(pc());
}
+inline bool frame::is_stub_frame() const {
+ return StubRoutines::is_stub_code(pc()) || (_cb != NULL && _cb->is_adapter_blob());
+}
+
inline bool frame::is_first_frame() const {
return is_entry_frame() && entry_frame_is_first();
}
diff --git a/src/share/vm/runtime/globals.cpp b/src/share/vm/runtime/globals.cpp
index 9d0b938fa..280e15976 100644
--- a/src/share/vm/runtime/globals.cpp
+++ b/src/share/vm/runtime/globals.cpp
@@ -73,12 +73,6 @@ bool Flag::is_unlocked() const {
strcmp(kind, "{C2 diagnostic}") == 0 ||
strcmp(kind, "{ARCH diagnostic}") == 0 ||
strcmp(kind, "{Shark diagnostic}") == 0) {
- if (strcmp(name, "EnableInvokeDynamic") == 0 && UnlockExperimentalVMOptions && !UnlockDiagnosticVMOptions) {
- // transitional logic to allow tests to run until they are changed
- static int warned;
- if (++warned == 1) warning("Use -XX:+UnlockDiagnosticVMOptions before EnableInvokeDynamic flag");
- return true;
- }
return UnlockDiagnosticVMOptions;
} else if (strcmp(kind, "{experimental}") == 0 ||
strcmp(kind, "{C2 experimental}") == 0 ||
@@ -211,6 +205,7 @@ void Flag::print_as_flag(outputStream* st) {
#define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 product}", DEFAULT },
#define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 pd product}", DEFAULT },
+#define C1_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 diagnostic}", DEFAULT },
#ifdef PRODUCT
#define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
#define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc) /* flag is constant */
@@ -266,7 +261,7 @@ static Flag flagTable[] = {
G1_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT)
#endif // INCLUDE_ALL_GCS
#ifdef COMPILER1
- C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
+ C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_DIAGNOSTIC_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
#endif
#ifdef COMPILER2
C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
@@ -282,14 +277,14 @@ static Flag flagTable[] = {
Flag* Flag::flags = flagTable;
size_t Flag::numFlags = (sizeof(flagTable) / sizeof(Flag));
-inline bool str_equal(const char* s, char* q, size_t len) {
+inline bool str_equal(const char* s, const char* q, size_t len) {
// s is null terminated, q is not!
if (strlen(s) != (unsigned int) len) return false;
return strncmp(s, q, len) == 0;
}
// Search the flag table for a named flag
-Flag* Flag::find_flag(char* name, size_t length, bool allow_locked) {
+Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked) {
for (Flag* current = &flagTable[0]; current->name != NULL; current++) {
if (str_equal(current->name, name, length)) {
// Found a matching entry. Report locked flags only if allowed.
@@ -307,6 +302,52 @@ Flag* Flag::find_flag(char* name, size_t length, bool allow_locked) {
return NULL;
}
+// Compute string similarity based on Dice's coefficient
+static float str_similar(const char* str1, const char* str2, size_t len2) {
+ int len1 = (int) strlen(str1);
+ int total = len1 + (int) len2;
+
+ int hit = 0;
+
+ for (int i = 0; i < len1 -1; ++i) {
+ for (int j = 0; j < (int) len2 -1; ++j) {
+ if ((str1[i] == str2[j]) && (str1[i+1] == str2[j+1])) {
+ ++hit;
+ break;
+ }
+ }
+ }
+
+ return 2.0f * (float) hit / (float) total;
+}
+
+Flag* Flag::fuzzy_match(const char* name, size_t length, bool allow_locked) {
+ float VMOptionsFuzzyMatchSimilarity = 0.7f;
+ Flag* match = NULL;
+ float score;
+ float max_score = -1;
+
+ for (Flag* current = &flagTable[0]; current->name != NULL; current++) {
+ score = str_similar(current->name, name, length);
+ if (score > max_score) {
+ max_score = score;
+ match = current;
+ }
+ }
+
+ if (!(match->is_unlocked() || match->is_unlocker())) {
+ if (!allow_locked) {
+ return NULL;
+ }
+ }
+
+ if (max_score < VMOptionsFuzzyMatchSimilarity) {
+ return NULL;
+ }
+
+ return match;
+}
+
// Returns the address of the index'th element
static Flag* address_of_flag(CommandLineFlagWithType flag) {
assert((size_t)flag < Flag::numFlags, "bad command line flag index");
diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
index cf16d45fb..5b1a40e1a 100644
--- a/src/share/vm/runtime/globals.hpp
+++ b/src/share/vm/runtime/globals.hpp
@@ -187,6 +187,7 @@ define_pd_global(intx, InitialCodeCacheSize, 160*K);
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
define_pd_global(intx, CodeCacheMinBlockLength, 1);
+define_pd_global(intx, CodeCacheMinimumUseSpace, 200*K);
define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(4*M));
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
@@ -232,7 +233,8 @@ struct Flag {
// number of flags
static size_t numFlags;
- static Flag* find_flag(char* name, size_t length, bool allow_locked = false);
+ static Flag* find_flag(const char* name, size_t length, bool allow_locked = false);
+ static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false);
bool is_bool() const { return strcmp(type, "bool") == 0; }
bool get_bool() const { return *((bool*) addr); }
@@ -298,12 +300,12 @@ class CounterSetting {
};
-class IntFlagSetting {
- intx val;
- intx* flag;
+class UIntFlagSetting {
+ uintx val;
+ uintx* flag;
public:
- IntFlagSetting(intx& fl, intx newValue) { flag = &fl; val = fl; fl = newValue; }
- ~IntFlagSetting() { *flag = val; }
+ UIntFlagSetting(uintx& fl, uintx newValue) { flag = &fl; val = fl; fl = newValue; }
+ ~UIntFlagSetting() { *flag = val; }
};
@@ -453,8 +455,8 @@ class CommandLineFlags {
"Use 32-bit object references in 64-bit VM " \
"lp64_product means flag is always constant in 32 bit VM") \
\
- lp64_product(bool, UseCompressedKlassPointers, false, \
- "Use 32-bit klass pointers in 64-bit VM " \
+ lp64_product(bool, UseCompressedClassPointers, false, \
+ "Use 32-bit class pointers in 64-bit VM " \
"lp64_product means flag is always constant in 32 bit VM") \
\
notproduct(bool, CheckCompressedOops, true, \
@@ -525,12 +527,12 @@ class CommandLineFlags {
product(bool, ForceNUMA, false, \
"Force NUMA optimizations on single-node/UMA systems") \
\
- product(intx, NUMAChunkResizeWeight, 20, \
- "Percentage (0-100) used to weight the current sample when " \
+ product(uintx, NUMAChunkResizeWeight, 20, \
+ "Percentage (0-100) used to weigh the current sample when " \
"computing exponentially decaying average for " \
"AdaptiveNUMAChunkSizing") \
\
- product(intx, NUMASpaceResizeRate, 1*G, \
+ product(uintx, NUMASpaceResizeRate, 1*G, \
"Do not reallocate more that this amount per collection") \
\
product(bool, UseAdaptiveNUMAChunkSizing, true, \
@@ -539,7 +541,7 @@ class CommandLineFlags {
product(bool, NUMAStats, false, \
"Print NUMA stats in detailed heap information") \
\
- product(intx, NUMAPageScanRate, 256, \
+ product(uintx, NUMAPageScanRate, 256, \
"Maximum number of pages to include in the page scan procedure") \
\
product_pd(bool, NeedsDeoptSuspend, \
@@ -656,6 +658,9 @@ class CommandLineFlags {
product(bool, UseAESIntrinsics, false, \
"use intrinsics for AES versions of crypto") \
\
+ product(bool, UseCRC32Intrinsics, false, \
+ "use intrinsics for java.util.zip.CRC32") \
+ \
develop(bool, TraceCallFixup, false, \
"traces all call fixups") \
\
@@ -727,7 +732,7 @@ class CommandLineFlags {
diagnostic(bool, LogEvents, true, \
"Enable the various ring buffer event logs") \
\
- diagnostic(intx, LogEventsBufferEntries, 10, \
+ diagnostic(uintx, LogEventsBufferEntries, 10, \
"Enable the various ring buffer event logs") \
\
product(bool, BytecodeVerificationRemote, true, \
@@ -887,7 +892,7 @@ class CommandLineFlags {
"stay alive at the expense of JVM performance") \
\
diagnostic(bool, LogCompilation, false, \
- "Log compilation activity in detail to hotspot.log or LogFile") \
+ "Log compilation activity in detail to LogFile") \
\
product(bool, PrintCompilation, false, \
"Print compilations") \
@@ -1171,9 +1176,6 @@ class CommandLineFlags {
product(bool, CompactFields, true, \
"Allocate nonstatic fields in gaps between previous fields") \
\
- notproduct(bool, PrintCompactFieldsSavings, false, \
- "Print how many words were saved with CompactFields") \
- \
AARCH64_ONLY(product_pd(bool, UseBiasedLocking, \
"Enable biased locking in JVM")) \
NOT_AARCH64(product(bool, UseBiasedLocking, true, \
@@ -1446,16 +1448,17 @@ class CommandLineFlags {
product(bool, ParallelGCVerbose, false, \
"Verbose output for parallel GC.") \
\
- product(intx, ParallelGCBufferWastePct, 10, \
- "wasted fraction of parallel allocation buffer.") \
+ product(uintx, ParallelGCBufferWastePct, 10, \
+ "Wasted fraction of parallel allocation buffer.") \
\
diagnostic(bool, ParallelGCRetainPLAB, false, \
"Retain parallel allocation buffers across scavenges; " \
" -- disabled because this currently conflicts with " \
" parallel card scanning under certain conditions ") \
\
- product(intx, TargetPLABWastePct, 10, \
- "target wasted space in last buffer as pct of overall allocation")\
+ product(uintx, TargetPLABWastePct, 10, \
+ "Target wasted space in last buffer as percent of overall " \
+ "allocation") \
\
product(uintx, PLABWeight, 75, \
"Percentage (0-100) used to weight the current sample when" \
@@ -1533,7 +1536,7 @@ class CommandLineFlags {
product(bool, AlwaysPreTouch, false, \
"It forces all freshly committed pages to be pre-touched.") \
\
- product_pd(intx, CMSYoungGenPerWorker, \
+ product_pd(uintx, CMSYoungGenPerWorker, \
"The maximum size of young gen chosen by default per GC worker " \
"thread available") \
\
@@ -1700,6 +1703,9 @@ class CommandLineFlags {
product(bool, CMSAbortSemantics, false, \
"Whether abort-on-overflow semantics is implemented") \
\
+ product(bool, CMSParallelInitialMarkEnabled, true, \
+ "Use the parallel initial mark.") \
+ \
product(bool, CMSParallelRemarkEnabled, true, \
"Whether parallel remark enabled (only if ParNewGC)") \
\
@@ -1711,6 +1717,14 @@ class CommandLineFlags {
"Whether to always record survivor space PLAB bdries" \
" (effective only if CMSParallelSurvivorRemarkEnabled)") \
\
+ product(bool, CMSEdenChunksRecordAlways, true, \
+ "Whether to always record eden chunks used for " \
+ "the parallel initial mark or remark of eden" ) \
+ \
+ product(bool, CMSPrintEdenSurvivorChunks, false, \
+ "Print the eden and the survivor chunks used for the parallel " \
+ "initial mark or remark of the eden/survivor spaces") \
+ \
product(bool, CMSConcurrentMTEnabled, true, \
"Whether multi-threaded concurrent work enabled (if ParNewGC)") \
\
@@ -1851,7 +1865,7 @@ class CommandLineFlags {
product(bool, UseCMSInitiatingOccupancyOnly, false, \
"Only use occupancy as a crierion for starting a CMS collection") \
\
- product(intx, CMSIsTooFullPercentage, 98, \
+ product(uintx, CMSIsTooFullPercentage, 98, \
"An absolute ceiling above which CMS will always consider the " \
"unloading of classes when class unloading is enabled") \
\
@@ -1890,7 +1904,7 @@ class CommandLineFlags {
develop(uintx, PromotionFailureALotInterval, 5, \
"Total collections between promotion failures alot") \
\
- experimental(intx, WorkStealingSleepMillis, 1, \
+ experimental(uintx, WorkStealingSleepMillis, 1, \
"Sleep time when sleep is used for yields") \
\
experimental(uintx, WorkStealingYieldsBeforeSleep, 5000, \
@@ -1933,6 +1947,9 @@ class CommandLineFlags {
notproduct(bool, ExecuteInternalVMTests, false, \
"Enable execution of internal VM tests.") \
\
+ notproduct(bool, VerboseInternalVMTests, false, \
+ "Turn on logging for internal VM tests.") \
+ \
product_pd(bool, UseTLAB, "Use thread-local object allocation") \
\
product_pd(bool, ResizeTLAB, \
@@ -2034,7 +2051,7 @@ class CommandLineFlags {
"Number of collections before the adaptive sizing is started") \
\
product(uintx, AdaptiveSizePolicyOutputInterval, 0, \
- "Collecton interval for printing information; zero => never") \
+ "Collection interval for printing information; zero means never") \
\
product(bool, UseAdaptiveSizePolicyFootprintGoal, true, \
"Use adaptive minimum footprint as a goal") \
@@ -2327,6 +2344,10 @@ class CommandLineFlags {
"Print diagnostic message when GC is stalled" \
"by JNI critical section") \
\
+ experimental(double, ObjectCountCutOffPercent, 0.5, \
+ "The percentage of the used heap that the instances of a class " \
+ "must occupy for the class to generate a trace event.") \
+ \
/* GC log rotation setting */ \
\
product(bool, UseGCLogFileRotation, false, \
@@ -2491,16 +2512,17 @@ class CommandLineFlags {
"Print all VM flags with default values and descriptions and exit")\
\
diagnostic(bool, SerializeVMOutput, true, \
- "Use a mutex to serialize output to tty and hotspot.log") \
+ "Use a mutex to serialize output to tty and LogFile") \
\
diagnostic(bool, DisplayVMOutput, true, \
"Display all VM output on the tty, independently of LogVMOutput") \
\
- diagnostic(bool, LogVMOutput, trueInDebug, \
- "Save VM output to hotspot.log, or to LogFile") \
+ diagnostic(bool, LogVMOutput, false, \
+ "Save VM output to LogFile") \
\
diagnostic(ccstr, LogFile, NULL, \
- "If LogVMOutput is on, save VM output to this file [hotspot.log]") \
+ "If LogVMOutput or LogCompilation is on, save VM output to " \
+ "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)") \
\
product(ccstr, ErrorFile, NULL, \
"If an error occurs, save the error data to this file " \
@@ -2518,6 +2540,9 @@ class CommandLineFlags {
product(bool, PrintStringTableStatistics, false, \
"print statistics about the StringTable and SymbolTable") \
\
+ diagnostic(bool, VerifyStringTableAtExit, false, \
+ "verify StringTable contents at exit") \
+ \
notproduct(bool, PrintSymbolTableSizeHistogram, false, \
"print histogram of the symbol table") \
\
@@ -2596,9 +2621,6 @@ class CommandLineFlags {
product(bool, AggressiveOpts, false, \
"Enable aggressive optimizations - see arguments.cpp") \
\
- product(bool, UseStringCache, false, \
- "Enable String cache capabilities on String.java") \
- \
/* statistics */ \
develop(bool, CountCompiledCalls, false, \
"counts method invocations") \
@@ -3035,9 +3057,9 @@ class CommandLineFlags {
product(uintx, MaxMetaspaceSize, max_uintx, \
"Maximum size of Metaspaces (in bytes)") \
\
- product(uintx, ClassMetaspaceSize, 2*M, \
- "Maximum size of InstanceKlass area in Metaspace used for " \
- "UseCompressedKlassPointers") \
+ product(uintx, CompressedClassSpaceSize, 1*G, \
+ "Maximum size of class area in Metaspace when compressed " \
+ "class pointers are used") \
\
product(uintx, MinHeapFreeRatio, 40, \
"Min percentage of heap free after GC to avoid expansion") \
@@ -3063,7 +3085,7 @@ class CommandLineFlags {
product(uintx, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \
"Max expansion of Metaspace without full GC (in bytes)") \
\
- product(intx, QueuedAllocationWarningCount, 0, \
+ product(uintx, QueuedAllocationWarningCount, 0, \
"Number of times an allocation that queues behind a GC " \
"will retry before printing a warning") \
\
@@ -3091,7 +3113,7 @@ class CommandLineFlags {
"either completely full or completely empty. Par compact also" \
"has a smaller default value; see arguments.cpp.") \
\
- product(intx, MarkSweepAlwaysCompactCount, 4, \
+ product(uintx, MarkSweepAlwaysCompactCount, 4, \
"How often should we fully compact the heap (ignoring the dead " \
"space parameters)") \
\
@@ -3172,6 +3194,9 @@ class CommandLineFlags {
product_pd(uintx, InitialCodeCacheSize, \
"Initial code cache size (in bytes)") \
\
+ develop_pd(uintx, CodeCacheMinimumUseSpace, \
+ "Minimum code cache size (in bytes) required to start VM.") \
+ \
product_pd(uintx, ReservedCodeCacheSize, \
"Reserved code cache size (in bytes) - maximum code cache size") \
\
@@ -3452,6 +3477,10 @@ class CommandLineFlags {
"Start profiling in interpreter if the counters exceed tier 3" \
"thresholds by the specified percentage") \
\
+ product(uintx, IncreaseFirstTierCompileThresholdAt, 50, \
+ "Increase the compile threshold for C1 compilation if the code" \
+ "cache is filled by the specified percentage.") \
+ \
product(intx, TieredRateUpdateMinTime, 1, \
"Minimum rate sampling interval (in milliseconds)") \
\
@@ -3503,6 +3532,8 @@ class CommandLineFlags {
"Temporary flag for transition to AbstractMethodError wrapped " \
"in InvocationTargetException. See 6531596") \
\
+ develop(bool, VerifyLambdaBytecodes, false, \
+ "Force verification of jdk 8 lambda metafactory bytecodes.") \
\
develop(intx, FastSuperclassLimit, 8, \
"Depth of hardwired instanceof accelerator array") \
@@ -3636,6 +3667,9 @@ class CommandLineFlags {
experimental(bool, TrustFinalNonStaticFields, false, \
"trust final non-static declarations for constant folding") \
\
+ experimental(bool, FoldStableValues, false, \
+ "Private flag to control optimizations for stable variables") \
+ \
develop(bool, TraceInvokeDynamic, false, \
"trace internal invoke dynamic operations") \
\
@@ -3674,9 +3708,6 @@ class CommandLineFlags {
develop(bool, TraceDefaultMethods, false, \
"Trace the default method processing steps") \
\
- develop(bool, ParseAllGenericSignatures, false, \
- "Parse all generic signatures while classloading") \
- \
develop(bool, VerifyGenericSignatures, false, \
"Abort VM on erroneous or inconsistent generic signatures") \
\
@@ -3694,10 +3725,19 @@ class CommandLineFlags {
product(bool , AllowNonVirtualCalls, false, \
"Obey the ACC_SUPER flag and allow invokenonvirtual calls") \
\
+ diagnostic(ccstr, SharedArchiveFile, NULL, \
+ "Override the default location of the CDS archive file") \
+ \
experimental(uintx, ArrayAllocatorMallocLimit, \
SOLARIS_ONLY(64*K) NOT_SOLARIS(max_uintx), \
"Allocation less than this value will be allocated " \
- "using malloc. Larger allocations will use mmap.")
+ "using malloc. Larger allocations will use mmap.") \
+ \
+ product(bool, EnableTracing, false, \
+ "Enable event-based tracing") \
+ product(bool, UseLockedTracing, false, \
+ "Use locked-tracing when doing event-based tracing")
+
/*
* Macros for factoring of globals
diff --git a/src/share/vm/runtime/globals_extension.hpp b/src/share/vm/runtime/globals_extension.hpp
index 00d06fe27..bc4fd4a74 100644
--- a/src/share/vm/runtime/globals_extension.hpp
+++ b/src/share/vm/runtime/globals_extension.hpp
@@ -57,6 +57,7 @@
#define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
#define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
+#define C1_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
#ifdef PRODUCT
#define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
#define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc) /* flag is constant */
@@ -99,7 +100,7 @@ typedef enum {
G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER)
#endif // INCLUDE_ALL_GCS
#ifdef COMPILER1
- C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
+ C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_DIAGNOSTIC_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
#endif
#ifdef COMPILER2
C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
@@ -131,6 +132,7 @@ typedef enum {
#define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
#define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#define C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
#ifdef PRODUCT
#define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
#define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) /* flag is constant */
@@ -204,6 +206,7 @@ typedef enum {
C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
C1_PRODUCT_FLAG_MEMBER_WITH_TYPE,
C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+ C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
#endif
#ifdef COMPILER2
diff --git a/src/share/vm/runtime/handles.cpp b/src/share/vm/runtime/handles.cpp
index a62ff177d..1b4e9faec 100644
--- a/src/share/vm/runtime/handles.cpp
+++ b/src/share/vm/runtime/handles.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -179,6 +179,22 @@ HandleMark::~HandleMark() {
_thread->set_last_handle_mark(previous_handle_mark());
}
+void* HandleMark::operator new(size_t size) throw() {
+ return AllocateHeap(size, mtThread);
+}
+
+void* HandleMark::operator new [] (size_t size) throw() {
+ return AllocateHeap(size, mtThread);
+}
+
+void HandleMark::operator delete(void* p) {
+ FreeHeap(p, mtThread);
+}
+
+void HandleMark::operator delete[](void* p) {
+ FreeHeap(p, mtThread);
+}
+
#ifdef ASSERT
NoHandleMark::NoHandleMark() {
diff --git a/src/share/vm/runtime/handles.hpp b/src/share/vm/runtime/handles.hpp
index 8c643d7c2..c2ce4b38c 100644
--- a/src/share/vm/runtime/handles.hpp
+++ b/src/share/vm/runtime/handles.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -136,7 +136,7 @@ DEF_HANDLE(typeArray , is_typeArray )
// Specific Handles for different oop types
#define DEF_METADATA_HANDLE(name, type) \
class name##Handle; \
- class name##Handle { \
+ class name##Handle : public StackObj { \
type* _value; \
Thread* _thread; \
protected: \
@@ -175,7 +175,7 @@ DEF_METADATA_HANDLE(constantPool, ConstantPool)
// Writing this class explicitly, since DEF_METADATA_HANDLE(klass) doesn't
// provide the necessary Klass* <-> Klass* conversions. This Klass
// could be removed when we don't have the Klass* typedef anymore.
-class KlassHandle {
+class KlassHandle : public StackObj {
Klass* _value;
protected:
Klass* obj() const { return _value; }
@@ -227,7 +227,7 @@ class HandleArea: public Arena {
HandleArea* _prev; // link to outer (older) area
public:
// Constructor
- HandleArea(HandleArea* prev) {
+ HandleArea(HandleArea* prev) : Arena(Chunk::tiny_size) {
debug_only(_handle_mark_nesting = 0);
debug_only(_no_handle_mark_nesting = 0);
_prev = prev;
@@ -281,7 +281,7 @@ class HandleArea: public Arena {
// across the HandleMark boundary.
// The base class of HandleMark should have been StackObj but we also heap allocate
-// a HandleMark when a thread is created.
+// a HandleMark when a thread is created. The operator new is for this special case.
class HandleMark {
private:
@@ -308,6 +308,11 @@ class HandleMark {
void push();
// called in the destructor of HandleMarkCleaner
void pop_and_restore();
+ // overloaded operators
+ void* operator new(size_t size) throw();
+ void* operator new [](size_t size) throw();
+ void operator delete(void* p);
+ void operator delete[](void* p);
};
//------------------------------------------------------------------------------------------------------------------------
diff --git a/src/share/vm/runtime/handles.inline.hpp b/src/share/vm/runtime/handles.inline.hpp
index 9530b127a..5a0f3f773 100644
--- a/src/share/vm/runtime/handles.inline.hpp
+++ b/src/share/vm/runtime/handles.inline.hpp
@@ -79,6 +79,7 @@ inline name##Handle::name##Handle(const name##Handle &h) { \
} else { \
_thread = Thread::current(); \
} \
+ assert (_thread->is_in_stack((address)this), "not on stack?"); \
_thread->metadata_handles()->push((Metadata*)_value); \
} else { \
_thread = NULL; \
@@ -95,6 +96,7 @@ inline name##Handle& name##Handle::operator=(const name##Handle &s) { \
} else { \
_thread = Thread::current(); \
} \
+ assert (_thread->is_in_stack((address)this), "not on stack?"); \
_thread->metadata_handles()->push((Metadata*)_value); \
} else { \
_thread = NULL; \
diff --git a/src/share/vm/runtime/init.cpp b/src/share/vm/runtime/init.cpp
index 62f295c7e..4533c7e81 100644
--- a/src/share/vm/runtime/init.cpp
+++ b/src/share/vm/runtime/init.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -95,7 +95,6 @@ jint init_globals() {
management_init();
bytecodes_init();
classLoader_init();
- Metaspace::global_initialize(); // must be before codeCache
codeCache_init();
VM_Version_init();
os_init_globals();
diff --git a/src/share/vm/runtime/interfaceSupport.hpp b/src/share/vm/runtime/interfaceSupport.hpp
index 0c48e5181..4d2ca5137 100644
--- a/src/share/vm/runtime/interfaceSupport.hpp
+++ b/src/share/vm/runtime/interfaceSupport.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -56,7 +56,7 @@ class HandleMarkCleaner: public StackObj {
}
private:
- inline void* operator new(size_t size, void* ptr) {
+ inline void* operator new(size_t size, void* ptr) throw() {
return ptr;
}
};
@@ -471,16 +471,6 @@ class RuntimeHistogramElement : public HistogramElement {
VM_ENTRY_BASE(result_type, header, thread) \
debug_only(VMEntryWrapper __vew;)
-// Another special case for nmethod_entry_point so the nmethod that the
-// interpreter is about to branch to doesn't get flushed before as we
-// branch to it's interpreter_entry_point. Skip stress testing here too.
-// Also we don't allow async exceptions because it is just too painful.
-#define IRT_ENTRY_FOR_NMETHOD(result_type, header) \
- result_type header { \
- nmethodLocker _nmlock(nm); \
- ThreadInVMfromJavaNoAsyncException __tiv(thread); \
- VM_ENTRY_BASE(result_type, header, thread)
-
#define IRT_END }
diff --git a/src/share/vm/runtime/java.cpp b/src/share/vm/runtime/java.cpp
index 7f8987c45..6b55352ce 100644
--- a/src/share/vm/runtime/java.cpp
+++ b/src/share/vm/runtime/java.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,6 @@
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "prims/jvmtiExport.hpp"
-#include "runtime/aprofiler.hpp"
#include "runtime/arguments.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/compilationPolicy.hpp"
@@ -60,7 +59,6 @@
#include "services/memReporter.hpp"
#include "services/memTracker.hpp"
#include "trace/tracing.hpp"
-#include "trace/traceEventTypes.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/histogram.hpp"
@@ -513,16 +511,6 @@ void before_exit(JavaThread * thread) {
}
}
-
- if (Arguments::has_alloc_profile()) {
- HandleMark hm;
- // Do one last collection to enumerate all the objects
- // allocated since the last one.
- Universe::heap()->collect(GCCause::_allocation_profiler);
- AllocationProfiler::disengage();
- AllocationProfiler::print(0);
- }
-
if (PrintBytecodeHistogram) {
BytecodeHistogram::print();
}
@@ -531,9 +519,12 @@ void before_exit(JavaThread * thread) {
JvmtiExport::post_thread_end(thread);
}
- EVENT_BEGIN(TraceEventThreadEnd, event);
- EVENT_COMMIT(event,
- EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
+
+ EventThreadEnd event;
+ if (event.should_commit()) {
+ event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj()));
+ event.commit();
+ }
// Always call even when there are not JVMTI environments yet, since environments
// may be attached late and JVMTI must track phases of VM execution
@@ -556,6 +547,19 @@ void before_exit(JavaThread * thread) {
// it will run into trouble when system destroys static variables.
MemTracker::shutdown(MemTracker::NMT_normal);
+ if (VerifyStringTableAtExit) {
+ int fail_cnt = 0;
+ {
+ MutexLocker ml(StringTable_lock);
+ fail_cnt = StringTable::verify_and_compare_entries();
+ }
+
+ if (fail_cnt != 0) {
+ tty->print_cr("ERROR: fail_cnt=%d", fail_cnt);
+ guarantee(fail_cnt == 0, "unexpected StringTable verification failures");
+ }
+ }
+
#undef BEFORE_EXIT_NOT_RUN
#undef BEFORE_EXIT_RUNNING
#undef BEFORE_EXIT_DONE
diff --git a/src/share/vm/runtime/javaCalls.hpp b/src/share/vm/runtime/javaCalls.hpp
index 339154ef8..8d96aee34 100644
--- a/src/share/vm/runtime/javaCalls.hpp
+++ b/src/share/vm/runtime/javaCalls.hpp
@@ -83,6 +83,8 @@ class JavaCallWrapper: StackObj {
oop receiver() { return _receiver; }
void oops_do(OopClosure* f);
+ bool is_first_frame() const { return _anchor.last_Java_sp() == NULL; }
+
};
diff --git a/src/share/vm/runtime/jniHandles.cpp b/src/share/vm/runtime/jniHandles.cpp
index 4dc83d304..d518dfa93 100644
--- a/src/share/vm/runtime/jniHandles.cpp
+++ b/src/share/vm/runtime/jniHandles.cpp
@@ -188,7 +188,6 @@ long JNIHandles::weak_global_handle_memory_usage() {
class AlwaysAliveClosure: public BoolObjectClosure {
public:
bool do_object_b(oop obj) { return true; }
- void do_object(oop obj) { assert(false, "Don't call"); }
};
class CountHandleClosure: public OopClosure {
diff --git a/src/share/vm/runtime/mutex.cpp b/src/share/vm/runtime/mutex.cpp
index 10d91fa37..7adc19f7a 100644
--- a/src/share/vm/runtime/mutex.cpp
+++ b/src/share/vm/runtime/mutex.cpp
@@ -1370,6 +1370,10 @@ void Monitor::check_prelock_state(Thread *thread) {
debug_only(if (rank() != Mutex::special) \
thread->check_for_valid_safepoint_state(false);)
}
+ if (thread->is_Watcher_thread()) {
+ assert(!WatcherThread::watcher_thread()->has_crash_protection(),
+ "locking not allowed when crash protection is set");
+ }
}
void Monitor::check_block_state(Thread *thread) {
diff --git a/src/share/vm/runtime/mutexLocker.cpp b/src/share/vm/runtime/mutexLocker.cpp
index c386ae8f4..19f98cc2e 100644
--- a/src/share/vm/runtime/mutexLocker.cpp
+++ b/src/share/vm/runtime/mutexLocker.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,6 @@ Mutex* InlineCacheBuffer_lock = NULL;
Mutex* VMStatistic_lock = NULL;
Mutex* JNIGlobalHandle_lock = NULL;
Mutex* JNIHandleBlockFreeList_lock = NULL;
-Mutex* JNICachedItableIndex_lock = NULL;
Mutex* MemberNameTable_lock = NULL;
Mutex* JmethodIdCreation_lock = NULL;
Mutex* JfieldIdCreation_lock = NULL;
@@ -124,13 +123,15 @@ Monitor* GCTaskManager_lock = NULL;
Mutex* Management_lock = NULL;
Monitor* Service_lock = NULL;
-Mutex* Stacktrace_lock = NULL;
+Monitor* PeriodicTask_lock = NULL;
-Monitor* JfrQuery_lock = NULL;
+#ifdef INCLUDE_TRACE
+Mutex* JfrStacktrace_lock = NULL;
Monitor* JfrMsg_lock = NULL;
Mutex* JfrBuffer_lock = NULL;
Mutex* JfrStream_lock = NULL;
-Monitor* PeriodicTask_lock = NULL;
+Mutex* JfrThreadGroups_lock = NULL;
+#endif
#define MAX_NUM_MUTEX 128
static Monitor * _mutex_array[MAX_NUM_MUTEX];
@@ -206,7 +207,6 @@ void mutex_init() {
def(Patching_lock , Mutex , special, true ); // used for safepointing and code patching.
def(ObjAllocPost_lock , Monitor, special, false);
def(Service_lock , Monitor, special, true ); // used for service thread operations
- def(Stacktrace_lock , Mutex, special, true ); // used for JFR stacktrace database
def(JmethodIdCreation_lock , Mutex , leaf, true ); // used for creating jmethodIDs.
def(SystemDictionary_lock , Monitor, leaf, true ); // lookups done by VM thread
@@ -252,7 +252,6 @@ void mutex_init() {
}
def(Heap_lock , Monitor, nonleaf+1, false);
def(JfieldIdCreation_lock , Mutex , nonleaf+1, true ); // jfieldID, Used in VM_Operation
- def(JNICachedItableIndex_lock , Mutex , nonleaf+1, false); // Used to cache an itable index during JNI invoke
def(MemberNameTable_lock , Mutex , nonleaf+1, false); // Used to protect MemberNameTable
def(CompiledIC_lock , Mutex , nonleaf+2, false); // locks VtableStubs_lock, InlineCacheBuffer_lock
@@ -270,14 +269,18 @@ void mutex_init() {
def(MethodCompileQueue_lock , Monitor, nonleaf+4, true );
def(Debug2_lock , Mutex , nonleaf+4, true );
def(Debug3_lock , Mutex , nonleaf+4, true );
- def(ProfileVM_lock , Monitor, nonleaf+4, false); // used for profiling of the VMThread
+ def(ProfileVM_lock , Monitor, special, false); // used for profiling of the VMThread
def(CompileThread_lock , Monitor, nonleaf+5, false );
-
- def(JfrQuery_lock , Monitor, nonleaf, true); // JFR locks, keep these in consecutive order
- def(JfrMsg_lock , Monitor, nonleaf+2, true);
- def(JfrBuffer_lock , Mutex, nonleaf+3, true);
- def(JfrStream_lock , Mutex, nonleaf+4, true);
def(PeriodicTask_lock , Monitor, nonleaf+5, true);
+
+#ifdef INCLUDE_TRACE
+ def(JfrMsg_lock , Monitor, leaf, true);
+ def(JfrBuffer_lock , Mutex, nonleaf+1, true);
+ def(JfrThreadGroups_lock , Mutex, nonleaf+1, true);
+ def(JfrStream_lock , Mutex, nonleaf+2, true);
+ def(JfrStacktrace_lock , Mutex, special, true );
+#endif
+
}
GCMutexLocker::GCMutexLocker(Monitor * mutex) {
diff --git a/src/share/vm/runtime/mutexLocker.hpp b/src/share/vm/runtime/mutexLocker.hpp
index 7a2e240bd..361febdcd 100644
--- a/src/share/vm/runtime/mutexLocker.hpp
+++ b/src/share/vm/runtime/mutexLocker.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,6 @@ extern Mutex* InlineCacheBuffer_lock; // a lock used to guard the Inl
extern Mutex* VMStatistic_lock; // a lock used to guard statistics count increment
extern Mutex* JNIGlobalHandle_lock; // a lock on creating JNI global handles
extern Mutex* JNIHandleBlockFreeList_lock; // a lock on the JNI handle block free list
-extern Mutex* JNICachedItableIndex_lock; // a lock on caching an itable index during JNI invoke
extern Mutex* MemberNameTable_lock; // a lock on the MemberNameTable updates
extern Mutex* JmethodIdCreation_lock; // a lock on creating JNI method identifiers
extern Mutex* JfieldIdCreation_lock; // a lock on creating JNI static field identifiers
@@ -137,13 +136,15 @@ extern Mutex* HotCardCache_lock; // protects the hot card cache
extern Mutex* Management_lock; // a lock used to serialize JVM management
extern Monitor* Service_lock; // a lock used for service thread operation
-extern Mutex* Stacktrace_lock; // used to guard access to the stacktrace table
+extern Monitor* PeriodicTask_lock; // protects the periodic task structure
-extern Monitor* JfrQuery_lock; // protects JFR use
+#ifdef INCLUDE_TRACE
+extern Mutex* JfrStacktrace_lock; // used to guard access to the JFR stacktrace table
extern Monitor* JfrMsg_lock; // protects JFR messaging
extern Mutex* JfrBuffer_lock; // protects JFR buffer operations
extern Mutex* JfrStream_lock; // protects JFR stream access
-extern Monitor* PeriodicTask_lock; // protects the periodic task structure
+extern Mutex* JfrThreadGroups_lock; // protects JFR access to Thread Groups
+#endif
// A MutexLocker provides mutual exclusion with respect to a given mutex
// for the scope which contains the locker. The lock is an OS lock, not
diff --git a/src/share/vm/runtime/objectMonitor.cpp b/src/share/vm/runtime/objectMonitor.cpp
index 27b6243a5..523887502 100644
--- a/src/share/vm/runtime/objectMonitor.cpp
+++ b/src/share/vm/runtime/objectMonitor.cpp
@@ -36,7 +36,10 @@
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
#include "services/threadService.hpp"
+#include "trace/tracing.hpp"
+#include "trace/traceMacros.hpp"
#include "utilities/dtrace.hpp"
+#include "utilities/macros.hpp"
#include "utilities/preserveException.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
@@ -371,6 +374,8 @@ void ATTR ObjectMonitor::enter(TRAPS) {
// Ensure the object-monitor relationship remains stable while there's contention.
Atomic::inc_ptr(&_count);
+ EventJavaMonitorEnter event;
+
{ // Change java thread status to indicate blocked on monitor enter.
JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
@@ -402,7 +407,7 @@ void ATTR ObjectMonitor::enter(TRAPS) {
//
_recursions = 0 ;
_succ = NULL ;
- exit (Self) ;
+ exit (false, Self) ;
jt->java_suspend_self();
}
@@ -435,6 +440,14 @@ void ATTR ObjectMonitor::enter(TRAPS) {
if (JvmtiExport::should_post_monitor_contended_entered()) {
JvmtiExport::post_monitor_contended_entered(jt, this);
}
+
+ if (event.should_commit()) {
+ event.set_klass(((oop)this->object())->klass());
+ event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid);
+ event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
+ event.commit();
+ }
+
if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
}
@@ -917,7 +930,7 @@ void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
// Both impinge on OS scalability. Given that, at most one thread parked on
// a monitor will use a timer.
-void ATTR ObjectMonitor::exit(TRAPS) {
+void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
Thread * Self = THREAD ;
if (THREAD != _owner) {
if (THREAD->is_lock_owned((address) _owner)) {
@@ -954,6 +967,14 @@ void ATTR ObjectMonitor::exit(TRAPS) {
_Responsible = NULL ;
}
+#if INCLUDE_TRACE
+ // get the owner's thread id for the MonitorEnter event
+ // if it is enabled and the thread isn't suspended
+ if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
+ _previous_owner_tid = SharedRuntime::get_java_tid(Self);
+ }
+#endif
+
for (;;) {
assert (THREAD == _owner, "invariant") ;
@@ -1343,7 +1364,7 @@ intptr_t ObjectMonitor::complete_exit(TRAPS) {
guarantee(Self == _owner, "complete_exit not owner");
intptr_t save = _recursions; // record the old recursion count
_recursions = 0; // set the recursion level to be 0
- exit (Self) ; // exit the monitor
+ exit (true, Self) ; // exit the monitor
guarantee (_owner != Self, "invariant");
return save;
}
@@ -1397,6 +1418,20 @@ static int Adjust (volatile int * adr, int dx) {
for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
return v ;
}
+
+// helper method for posting a monitor wait event
+void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
+ jlong notifier_tid,
+ jlong timeout,
+ bool timedout) {
+ event->set_klass(((oop)this->object())->klass());
+ event->set_timeout((TYPE_ULONG)timeout);
+ event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
+ event->set_notifier((TYPE_OSTHREAD)notifier_tid);
+ event->set_timedOut((TYPE_BOOLEAN)timedout);
+ event->commit();
+}
+
// -----------------------------------------------------------------------------
// Wait/Notify/NotifyAll
//
@@ -1412,6 +1447,8 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
// Throw IMSX or IEX.
CHECK_OWNER();
+ EventJavaMonitorWait event;
+
// check for a pending interrupt
if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
// post monitor waited event. Note that this is past-tense, we are done waiting.
@@ -1420,10 +1457,14 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
// wait was not timed out due to thread interrupt.
JvmtiExport::post_monitor_waited(jt, this, false);
}
+ if (event.should_commit()) {
+ post_monitor_wait_event(&event, 0, millis, false);
+ }
TEVENT (Wait - Throw IEX) ;
THROW(vmSymbols::java_lang_InterruptedException());
return ;
}
+
TEVENT (Wait) ;
assert (Self->_Stalled == 0, "invariant") ;
@@ -1455,7 +1496,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
intptr_t save = _recursions; // record the old recursion count
_waiters++; // increment the number of waiters
_recursions = 0; // set the recursion level to be 1
- exit (Self) ; // exit the monitor
+ exit (true, Self) ; // exit the monitor
guarantee (_owner != Self, "invariant") ;
// As soon as the ObjectMonitor's ownership is dropped in the exit()
@@ -1555,6 +1596,11 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
if (JvmtiExport::should_post_monitor_waited()) {
JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
}
+
+ if (event.should_commit()) {
+ post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
+ }
+
OrderAccess::fence() ;
assert (Self->_Stalled != 0, "invariant") ;
@@ -1634,6 +1680,8 @@ void ObjectMonitor::notify(TRAPS) {
iterator->TState = ObjectWaiter::TS_ENTER ;
}
iterator->_notified = 1 ;
+ Thread * Self = THREAD;
+ iterator->_notifier_tid = Self->osthread()->thread_id();
ObjectWaiter * List = _EntryList ;
if (List != NULL) {
@@ -1758,6 +1806,8 @@ void ObjectMonitor::notifyAll(TRAPS) {
guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
guarantee (iterator->_notified == 0, "invariant") ;
iterator->_notified = 1 ;
+ Thread * Self = THREAD;
+ iterator->_notifier_tid = Self->osthread()->thread_id();
if (Policy != 4) {
iterator->TState = ObjectWaiter::TS_ENTER ;
}
diff --git a/src/share/vm/runtime/objectMonitor.hpp b/src/share/vm/runtime/objectMonitor.hpp
index e4236f490..10b3609c0 100644
--- a/src/share/vm/runtime/objectMonitor.hpp
+++ b/src/share/vm/runtime/objectMonitor.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,6 @@
#include "runtime/park.hpp"
#include "runtime/perfData.hpp"
-
// ObjectWaiter serves as a "proxy" or surrogate thread.
// TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
// ParkEvent instead. Beware, however, that the JVMTI code
@@ -43,6 +42,7 @@ class ObjectWaiter : public StackObj {
ObjectWaiter * volatile _next;
ObjectWaiter * volatile _prev;
Thread* _thread;
+ jlong _notifier_tid;
ParkEvent * _event;
volatile int _notified ;
volatile TStates TState ;
@@ -55,6 +55,9 @@ class ObjectWaiter : public StackObj {
void wait_reenter_end(ObjectMonitor *mon);
};
+// forward declaration to avoid include tracing.hpp
+class EventJavaMonitorWait;
+
// WARNING:
// This is a very sensitive and fragile class. DO NOT make any
// change unless you are fully aware of the underlying semantics.
@@ -151,6 +154,7 @@ class ObjectMonitor {
_SpinFreq = 0 ;
_SpinClock = 0 ;
OwnerIsThread = 0 ;
+ _previous_owner_tid = 0;
}
~ObjectMonitor() {
@@ -192,7 +196,7 @@ public:
bool try_enter (TRAPS) ;
void enter(TRAPS);
- void exit(TRAPS);
+ void exit(bool not_suspended, TRAPS);
void wait(jlong millis, bool interruptable, TRAPS);
void notify(TRAPS);
void notifyAll(TRAPS);
@@ -218,6 +222,10 @@ public:
void ctAsserts () ;
void ExitEpilog (Thread * Self, ObjectWaiter * Wakee) ;
bool ExitSuspendEquivalent (JavaThread * Self) ;
+ void post_monitor_wait_event(EventJavaMonitorWait * event,
+ jlong notifier_tid,
+ jlong timeout,
+ bool timedout);
private:
friend class ObjectSynchronizer;
@@ -240,6 +248,7 @@ public:
protected: // protected for jvmtiRawMonitor
void * volatile _owner; // pointer to owning thread OR BasicLock
+ volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor
volatile intptr_t _recursions; // recursion count, 0 for first entry
private:
int OwnerIsThread ; // _owner is (Thread *) vs SP/BasicLock
@@ -303,6 +312,18 @@ public:
public:
static int Knob_Verbose;
static int Knob_SpinLimit;
+ void* operator new (size_t size) throw() {
+ return AllocateHeap(size, mtInternal);
+ }
+ void* operator new[] (size_t size) throw() {
+ return operator new (size);
+ }
+ void operator delete(void* p) {
+ FreeHeap(p, mtInternal);
+ }
+ void operator delete[] (void *p) {
+ operator delete(p);
+ }
};
#undef TEVENT
diff --git a/src/share/vm/runtime/os.cpp b/src/share/vm/runtime/os.cpp
index e9c5b2612..e2d930a1d 100644
--- a/src/share/vm/runtime/os.cpp
+++ b/src/share/vm/runtime/os.cpp
@@ -265,8 +265,7 @@ static void signal_thread_entry(JavaThread* thread, TRAPS) {
VMThread::execute(&op1);
Universe::print_heap_at_SIGBREAK();
if (PrintClassHistogram) {
- VM_GC_HeapInspection op1(gclog_or_tty, true /* force full GC before heap inspection */,
- true /* need_prologue */);
+ VM_GC_HeapInspection op1(gclog_or_tty, true /* force full GC before heap inspection */);
VMThread::execute(&op1);
}
if (JvmtiExport::should_post_data_dump()) {
@@ -315,6 +314,11 @@ static void signal_thread_entry(JavaThread* thread, TRAPS) {
}
}
+void os::init_before_ergo() {
+ // We need to initialize large page support here because ergonomics takes some
+ // decisions depending on large page support and the calculated large page size.
+ large_page_init();
+}
void os::signal_init() {
if (!ReduceSignalUsage) {
@@ -444,6 +448,68 @@ void* os::native_java_library() {
return _native_java_library;
}
+/*
+ * Support for finding Agent_On(Un)Load/Attach<_lib_name> if it exists.
+ * If check_lib == true then we are looking for an
+ * Agent_OnLoad_lib_name or Agent_OnAttach_lib_name function to determine if
+ * this library is statically linked into the image.
+ * If check_lib == false then we will look for the appropriate symbol in the
+ * executable if agent_lib->is_static_lib() == true or in the shared library
+ * referenced by 'handle'.
+ */
+void* os::find_agent_function(AgentLibrary *agent_lib, bool check_lib,
+ const char *syms[], size_t syms_len) {
+ assert(agent_lib != NULL, "sanity check");
+ const char *lib_name;
+ void *handle = agent_lib->os_lib();
+ void *entryName = NULL;
+ char *agent_function_name;
+ size_t i;
+
+ // If checking then use the agent name otherwise test is_static_lib() to
+ // see how to process this lookup
+ lib_name = ((check_lib || agent_lib->is_static_lib()) ? agent_lib->name() : NULL);
+ for (i = 0; i < syms_len; i++) {
+ agent_function_name = build_agent_function_name(syms[i], lib_name, agent_lib->is_absolute_path());
+ if (agent_function_name == NULL) {
+ break;
+ }
+ entryName = dll_lookup(handle, agent_function_name);
+ FREE_C_HEAP_ARRAY(char, agent_function_name, mtThread);
+ if (entryName != NULL) {
+ break;
+ }
+ }
+ return entryName;
+}
+
+// See if the passed in agent is statically linked into the VM image.
+bool os::find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
+ size_t syms_len) {
+ void *ret;
+ void *proc_handle;
+ void *save_handle;
+
+ assert(agent_lib != NULL, "sanity check");
+ if (agent_lib->name() == NULL) {
+ return false;
+ }
+ proc_handle = get_default_process_handle();
+ // Check for Agent_OnLoad/Attach_lib_name function
+ save_handle = agent_lib->os_lib();
+ // We want to look in this process' symbol table.
+ agent_lib->set_os_lib(proc_handle);
+ ret = find_agent_function(agent_lib, true, syms, syms_len);
+ if (ret != NULL) {
+ // Found an entry point like Agent_OnLoad_lib_name so we have a static agent
+ agent_lib->set_valid();
+ agent_lib->set_static_lib(true);
+ return true;
+ }
+ agent_lib->set_os_lib(save_handle);
+ return false;
+}
+
// --------------------- heap allocation utilities ---------------------
char *os::strdup(const char *str, MEMFLAGS flags) {
@@ -596,6 +662,22 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
+#ifdef ASSERT
+ // checking for the WatcherThread and crash_protection first
+ // since os::malloc can be called when the libjvm.{dll,so} is
+ // first loaded and we don't have a thread yet.
+ // try to find the thread after we see that the watcher thread
+ // exists and has crash protection.
+ WatcherThread *wt = WatcherThread::watcher_thread();
+ if (wt != NULL && wt->has_crash_protection()) {
+ Thread* thread = ThreadLocalStorage::get_thread_slow();
+ if (thread == wt) {
+ assert(!wt->has_crash_protection(),
+ "Can't malloc with crash protection from WatcherThread");
+ }
+ }
+#endif
+
if (size == 0) {
// return a valid pointer if size is zero
// if NULL is returned the calling functions assume out of memory.
@@ -648,10 +730,13 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller
#ifndef ASSERT
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
+ MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
void* ptr = ::realloc(memblock, size);
if (ptr != NULL) {
- MemTracker::record_realloc((address)memblock, (address)ptr, size, memflags,
+ tkr.record((address)memblock, (address)ptr, size, memflags,
caller == 0 ? CALLER_PC : caller);
+ } else {
+ tkr.discard();
}
return ptr;
#else
@@ -1406,53 +1491,20 @@ bool os::is_server_class_machine() {
return result;
}
-// Read file line by line, if line is longer than bsize,
-// skip rest of line.
-int os::get_line_chars(int fd, char* buf, const size_t bsize){
- size_t sz, i = 0;
-
- // read until EOF, EOL or buf is full
- while ((sz = (int) read(fd, &buf[i], 1)) == 1 && i < (bsize-2) && buf[i] != '\n') {
- ++i;
- }
-
- if (buf[i] == '\n') {
- // EOL reached so ignore EOL character and return
-
- buf[i] = 0;
- return (int) i;
- }
-
- buf[i+1] = 0;
-
- if (sz != 1) {
- // EOF reached. if we read chars before EOF return them and
- // return EOF on next call otherwise return EOF
-
- return (i == 0) ? -1 : (int) i;
- }
-
- // line is longer than size of buf, skip to EOL
- char ch;
- while (read(fd, &ch, 1) == 1 && ch != '\n') {
- // Do nothing
- }
-
- // return initial part of line that fits in buf.
- // If we reached EOF, it will be returned on next call.
-
- return (int) i;
+void os::SuspendedThreadTask::run() {
+ assert(Threads_lock->owned_by_self() || (_thread == VMThread::vm_thread()), "must have threads lock to call this");
+ internal_do_task();
+ _done = true;
}
bool os::create_stack_guard_pages(char* addr, size_t bytes) {
return os::pd_create_stack_guard_pages(addr, bytes);
}
-
char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
if (result != NULL) {
- MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
+ MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
}
return result;
@@ -1462,7 +1514,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
MEMFLAGS flags) {
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
if (result != NULL) {
- MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
+ MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_type((address)result, flags);
}
@@ -1472,7 +1524,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
char* result = pd_attempt_reserve_memory_at(bytes, addr);
if (result != NULL) {
- MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
+ MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
}
return result;
}
@@ -1499,18 +1551,36 @@ bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
return res;
}
+void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable,
+ const char* mesg) {
+ pd_commit_memory_or_exit(addr, bytes, executable, mesg);
+ MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
+}
+
+void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
+ bool executable, const char* mesg) {
+ os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg);
+ MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
+}
+
bool os::uncommit_memory(char* addr, size_t bytes) {
+ MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
bool res = pd_uncommit_memory(addr, bytes);
if (res) {
- MemTracker::record_virtual_memory_uncommit((address)addr, bytes);
+ tkr.record((address)addr, bytes);
+ } else {
+ tkr.discard();
}
return res;
}
bool os::release_memory(char* addr, size_t bytes) {
+ MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
bool res = pd_release_memory(addr, bytes);
if (res) {
- MemTracker::record_virtual_memory_release((address)addr, bytes);
+ tkr.record((address)addr, bytes);
+ } else {
+ tkr.discard();
}
return res;
}
@@ -1521,8 +1591,7 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset,
bool allow_exec) {
char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
if (result != NULL) {
- MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
- MemTracker::record_virtual_memory_commit((address)result, bytes, CALLER_PC);
+ MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, mtNone, CALLER_PC);
}
return result;
}
@@ -1535,10 +1604,12 @@ char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
}
bool os::unmap_memory(char *addr, size_t bytes) {
+ MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
bool result = pd_unmap_memory(addr, bytes);
if (result) {
- MemTracker::record_virtual_memory_uncommit((address)addr, bytes);
- MemTracker::record_virtual_memory_release((address)addr, bytes);
+ tkr.record((address)addr, bytes);
+ } else {
+ tkr.discard();
}
return result;
}
@@ -1551,3 +1622,19 @@ void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
pd_realign_memory(addr, bytes, alignment_hint);
}
+#ifndef TARGET_OS_FAMILY_windows
+/* try to switch state from state "from" to state "to"
+ * returns the state set after the method is complete
+ */
+os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from,
+ os::SuspendResume::State to)
+{
+ os::SuspendResume::State result =
+ (os::SuspendResume::State) Atomic::cmpxchg((jint) to, (jint *) &_state, (jint) from);
+ if (result == from) {
+ // success
+ return to;
+ }
+ return result;
+}
+#endif
diff --git a/src/share/vm/runtime/os.hpp b/src/share/vm/runtime/os.hpp
index 110c407a1..491e9cc29 100644
--- a/src/share/vm/runtime/os.hpp
+++ b/src/share/vm/runtime/os.hpp
@@ -32,17 +32,22 @@
#include "utilities/top.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "jvm_linux.h"
+# include <setjmp.h>
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "jvm_solaris.h"
+# include <setjmp.h>
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "jvm_windows.h"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "jvm_bsd.h"
+# include <setjmp.h>
#endif
+class AgentLibrary;
+
// os defines the interface to operating system; this includes traditional
// OS services (time, I/O) as well as other functionality with system-
// dependent code.
@@ -78,10 +83,16 @@ enum ThreadPriority { // JLS 20.20.1-3
CriticalPriority = 11 // Critical thread priority
};
+// Executable parameter flag for os::commit_memory() and
+// os::commit_memory_or_exit().
+const bool ExecMem = true;
+
// Typedef for structured exception handling support
typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
class os: AllStatic {
+ friend class VMStructs;
+
public:
enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
@@ -104,9 +115,16 @@ class os: AllStatic {
static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr);
static void pd_split_reserved_memory(char *base, size_t size,
size_t split, bool realloc);
- static bool pd_commit_memory(char* addr, size_t bytes, bool executable = false);
+ static bool pd_commit_memory(char* addr, size_t bytes, bool executable);
static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
- bool executable = false);
+ bool executable);
+ // Same as pd_commit_memory() that either succeeds or calls
+ // vm_exit_out_of_memory() with the specified mesg.
+ static void pd_commit_memory_or_exit(char* addr, size_t bytes,
+ bool executable, const char* mesg);
+ static void pd_commit_memory_or_exit(char* addr, size_t size,
+ size_t alignment_hint,
+ bool executable, const char* mesg);
static bool pd_uncommit_memory(char* addr, size_t bytes);
static bool pd_release_memory(char* addr, size_t bytes);
@@ -123,7 +141,10 @@ class os: AllStatic {
public:
static void init(void); // Called before command line parsing
+ static void init_before_ergo(void); // Called after command line parsing
+ // before VM ergonomics processing.
static jint init_2(void); // Called after command line parsing
+ // and VM ergonomics processing
static void init_globals(void) { // Called from init_globals() in init.cpp
init_globals_ext();
}
@@ -238,6 +259,11 @@ class os: AllStatic {
static size_t page_size_for_region(size_t region_min_size,
size_t region_max_size,
uint min_pages);
+ // Return the largest page size that can be used
+ static size_t max_page_size() {
+ // The _page_sizes array is sorted in descending order.
+ return _page_sizes[0];
+ }
// Methods for tracing page sizes returned by the above method; enabled by
// TracePageSizes. The region_{min,max}_size parameters should be the values
@@ -261,9 +287,16 @@ class os: AllStatic {
static char* attempt_reserve_memory_at(size_t bytes, char* addr);
static void split_reserved_memory(char *base, size_t size,
size_t split, bool realloc);
- static bool commit_memory(char* addr, size_t bytes, bool executable = false);
+ static bool commit_memory(char* addr, size_t bytes, bool executable);
static bool commit_memory(char* addr, size_t size, size_t alignment_hint,
- bool executable = false);
+ bool executable);
+ // Same as commit_memory() that either succeeds or calls
+ // vm_exit_out_of_memory() with the specified mesg.
+ static void commit_memory_or_exit(char* addr, size_t bytes,
+ bool executable, const char* mesg);
+ static void commit_memory_or_exit(char* addr, size_t size,
+ size_t alignment_hint,
+ bool executable, const char* mesg);
static bool uncommit_memory(char* addr, size_t bytes);
static bool release_memory(char* addr, size_t bytes);
@@ -307,8 +340,8 @@ class os: AllStatic {
static char* non_memory_address_word();
// reserve, commit and pin the entire memory region
- static char* reserve_memory_special(size_t size, char* addr = NULL,
- bool executable = false);
+ static char* reserve_memory_special(size_t size, size_t alignment,
+ char* addr, bool executable);
static bool release_memory_special(char* addr, size_t bytes);
static void large_page_init();
static size_t large_page_size();
@@ -489,16 +522,16 @@ class os: AllStatic {
// Symbol lookup, find nearest function name; basically it implements
// dladdr() for all platforms. Name of the nearest function is copied
- // to buf. Distance from its base address is returned as offset.
+ // to buf. Distance from its base address is optionally returned as offset.
// If function name is not found, buf[0] is set to '\0' and offset is
- // set to -1.
+ // set to -1 (if offset is non-NULL).
static bool dll_address_to_function_name(address addr, char* buf,
int buflen, int* offset);
// Locate DLL/DSO. On success, full path of the library is copied to
- // buf, and offset is set to be the distance between addr and the
- // library's base address. On failure, buf[0] is set to '\0' and
- // offset is set to -1.
+ // buf, and offset is optionally set to be the distance between addr
+ // and the library's base address. On failure, buf[0] is set to '\0'
+ // and offset is set to -1 (if offset is non-NULL).
static bool dll_address_to_library_name(address addr, char* buf,
int buflen, int* offset);
@@ -516,6 +549,17 @@ class os: AllStatic {
// Unload library
static void dll_unload(void *lib);
+ // Return the handle of this process
+ static void* get_default_process_handle();
+
+ // Check for static linked agent library
+ static bool find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
+ size_t syms_len);
+
+ // Find agent entry point
+ static void *find_agent_function(AgentLibrary *agent_lib, bool check_lib,
+ const char *syms[], size_t syms_len);
+
// Print out system information; they are called by fatal error handler.
// Output format may be different on different platforms.
static void print_os_info(outputStream* st);
@@ -704,14 +748,14 @@ class os: AllStatic {
// Hook for os specific jvm options that we don't want to abort on seeing
static bool obsolete_option(const JavaVMOption *option);
- // Read file line by line. If line is longer than bsize,
- // rest of line is skipped. Returns number of bytes read or -1 on EOF
- static int get_line_chars(int fd, char *buf, const size_t bsize);
-
// Extensions
#include "runtime/os_ext.hpp"
public:
+ class CrashProtectionCallback : public StackObj {
+ public:
+ virtual void call() = 0;
+ };
// Platform dependent stuff
#ifdef TARGET_OS_FAMILY_linux
@@ -764,6 +808,14 @@ class os: AllStatic {
#endif
public:
+#ifndef PLATFORM_PRINT_NATIVE_STACK
+ // No platform-specific code for printing the native stack.
+ static bool platform_print_native_stack(outputStream* st, void* context,
+ char *buf, int buf_size) {
+ return false;
+ }
+#endif
+
// debugging support (mostly used by debug.cpp but also fatal error handler)
static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address
@@ -784,6 +836,109 @@ class os: AllStatic {
// ResumeThread call)
static void pause();
+ // Builds a platform dependent Agent_OnLoad_<libname> function name
+ // which is used to find statically linked in agents.
+ static char* build_agent_function_name(const char *sym, const char *cname,
+ bool is_absolute_path);
+
+ class SuspendedThreadTaskContext {
+ public:
+ SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {}
+ Thread* thread() const { return _thread; }
+ void* ucontext() const { return _ucontext; }
+ private:
+ Thread* _thread;
+ void* _ucontext;
+ };
+
+ class SuspendedThreadTask {
+ public:
+ SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {}
+ virtual ~SuspendedThreadTask() {}
+ void run();
+ bool is_done() { return _done; }
+ virtual void do_task(const SuspendedThreadTaskContext& context) = 0;
+ protected:
+ private:
+ void internal_do_task();
+ Thread* _thread;
+ bool _done;
+ };
+
+#ifndef TARGET_OS_FAMILY_windows
+ // Suspend/resume support
+ // Protocol:
+ //
+ // a thread starts in SR_RUNNING
+ //
+ // SR_RUNNING can go to
+ // * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it
+ // SR_SUSPEND_REQUEST can go to
+ // * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout)
+ // * SR_SUSPENDED if the stopped thread receives the signal and switches state
+ // SR_SUSPENDED can go to
+ // * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume
+ // SR_WAKEUP_REQUEST can go to
+ // * SR_RUNNING when the stopped thread receives the signal
+ // * SR_WAKEUP_REQUEST on timeout (resend the signal and try again)
+ class SuspendResume {
+ public:
+ enum State {
+ SR_RUNNING,
+ SR_SUSPEND_REQUEST,
+ SR_SUSPENDED,
+ SR_WAKEUP_REQUEST
+ };
+
+ private:
+ volatile State _state;
+
+ private:
+ /* try to switch state from state "from" to state "to"
+ * returns the state set after the method is complete
+ */
+ State switch_state(State from, State to);
+
+ public:
+ SuspendResume() : _state(SR_RUNNING) { }
+
+ State state() const { return _state; }
+
+ State request_suspend() {
+ return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST);
+ }
+
+ State cancel_suspend() {
+ return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING);
+ }
+
+ State suspended() {
+ return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED);
+ }
+
+ State request_wakeup() {
+ return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST);
+ }
+
+ State running() {
+ return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING);
+ }
+
+ bool is_running() const {
+ return _state == SR_RUNNING;
+ }
+
+ bool is_suspend_request() const {
+ return _state == SR_SUSPEND_REQUEST;
+ }
+
+ bool is_suspended() const {
+ return _state == SR_SUSPENDED;
+ }
+ };
+#endif
+
+
protected:
static long _rand_seed; // seed for random number generator
static int _processor_count; // number of processors
@@ -795,6 +950,7 @@ class os: AllStatic {
char pathSep);
static bool set_boot_path(char fileSep, char pathSep);
static char** split_path(const char* path, int* n);
+
};
// Note that "PAUSE" is almost always used with synchronization
@@ -802,8 +958,6 @@ class os: AllStatic {
// of the global SpinPause() with C linkage.
// It'd also be eligible for inlining on many platforms.
-extern "C" int SpinPause () ;
-extern "C" int SafeFetch32 (int * adr, int errValue) ;
-extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t errValue) ;
+extern "C" int SpinPause();
#endif // SHARE_VM_RUNTIME_OS_HPP
diff --git a/src/share/vm/runtime/park.cpp b/src/share/vm/runtime/park.cpp
index 6fb0224ff..6380570ef 100644
--- a/src/share/vm/runtime/park.cpp
+++ b/src/share/vm/runtime/park.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -140,7 +140,7 @@ void ParkEvent::Release (ParkEvent * ev) {
// well as bank access imbalance on Niagara-like platforms,
// although Niagara's hash function should help.
-void * ParkEvent::operator new (size_t sz) {
+void * ParkEvent::operator new (size_t sz) throw() {
return (void *) ((intptr_t (AllocateHeap(sz + 256, mtInternal, CALLER_PC)) + 256) & -256) ;
}
diff --git a/src/share/vm/runtime/park.hpp b/src/share/vm/runtime/park.hpp
index 4b72bb609..504cb1a85 100644
--- a/src/share/vm/runtime/park.hpp
+++ b/src/share/vm/runtime/park.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -166,7 +166,7 @@ class ParkEvent : public os::PlatformEvent {
// aligned on 256-byte address boundaries. This ensures that the least
// significant byte of a ParkEvent address is always 0.
- void * operator new (size_t sz) ;
+ void * operator new (size_t sz) throw();
void operator delete (void * a) ;
public:
diff --git a/src/share/vm/runtime/perfData.cpp b/src/share/vm/runtime/perfData.cpp
index 777ea27f9..60b71fde1 100644
--- a/src/share/vm/runtime/perfData.cpp
+++ b/src/share/vm/runtime/perfData.cpp
@@ -323,6 +323,10 @@ void PerfDataManager::add_item(PerfData* p, bool sampled) {
}
}
+PerfData* PerfDataManager::find_by_name(const char* name) {
+ return _all->find_by_name(name);
+}
+
PerfDataList* PerfDataManager::all() {
MutexLocker ml(PerfDataManager_lock);
diff --git a/src/share/vm/runtime/perfData.hpp b/src/share/vm/runtime/perfData.hpp
index 07dc9c956..94996df1a 100644
--- a/src/share/vm/runtime/perfData.hpp
+++ b/src/share/vm/runtime/perfData.hpp
@@ -693,6 +693,9 @@ class PerfDataManager : AllStatic {
// the given name.
static bool exists(const char* name) { return _all->contains(name); }
+ // method to search for a instrumentation object by name
+ static PerfData* find_by_name(const char* name);
+
// method to map a CounterNS enumeration to a namespace string
static const char* ns_to_string(CounterNS ns) {
return _name_spaces[ns];
diff --git a/src/share/vm/runtime/reflection.cpp b/src/share/vm/runtime/reflection.cpp
index d84143b66..fbc65ad50 100644
--- a/src/share/vm/runtime/reflection.cpp
+++ b/src/share/vm/runtime/reflection.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,8 +44,6 @@
#include "runtime/signature.hpp"
#include "runtime/vframe.hpp"
-#define JAVA_1_5_VERSION 49
-
static void trace_class_resolution(Klass* to_class) {
ResourceMark rm;
int line_number = -1;
@@ -375,7 +373,7 @@ arrayOop Reflection::reflect_new_multi_array(oop element_mirror, typeArrayOop di
}
}
klass = klass->array_klass(dim, CHECK_NULL);
- oop obj = ArrayKlass::cast(klass)->multi_allocate(len, dimensions, THREAD);
+ oop obj = ArrayKlass::cast(klass)->multi_allocate(len, dimensions, CHECK_NULL);
// obj may be NULL is one of the dimensions is 0
assert(obj == NULL || obj->is_array(), "just checking");
return arrayOop(obj);
@@ -461,7 +459,7 @@ bool Reflection::verify_class_access(Klass* current_class, Klass* new_class, boo
// doesn't have a classloader.
if ((current_class == NULL) ||
(current_class == new_class) ||
- (InstanceKlass::cast(new_class)->is_public()) ||
+ (new_class->is_public()) ||
is_same_class_package(current_class, new_class)) {
return true;
}
@@ -508,9 +506,11 @@ bool Reflection::can_relax_access_check_for(
under_host_klass(accessee_ik, accessor))
return true;
- if (RelaxAccessControlCheck ||
- (accessor_ik->major_version() < JAVA_1_5_VERSION &&
- accessee_ik->major_version() < JAVA_1_5_VERSION)) {
+ if ((RelaxAccessControlCheck &&
+ accessor_ik->major_version() < Verifier::NO_RELAX_ACCESS_CTRL_CHECK_VERSION &&
+ accessee_ik->major_version() < Verifier::NO_RELAX_ACCESS_CTRL_CHECK_VERSION) ||
+ (accessor_ik->major_version() < Verifier::STRICTER_ACCESS_CTRL_CHECK_VERSION &&
+ accessee_ik->major_version() < Verifier::STRICTER_ACCESS_CTRL_CHECK_VERSION)) {
return classloader_only &&
Verifier::relax_verify_for(accessor_ik->class_loader()) &&
accessor_ik->protection_domain() == accessee_ik->protection_domain() &&
@@ -818,6 +818,10 @@ oop Reflection::new_constructor(methodHandle method, TRAPS) {
typeArrayOop an_oop = Annotations::make_java_array(method->parameter_annotations(), CHECK_NULL);
java_lang_reflect_Constructor::set_parameter_annotations(ch(), an_oop);
}
+ if (java_lang_reflect_Constructor::has_type_annotations_field()) {
+ typeArrayOop an_oop = Annotations::make_java_array(method->type_annotations(), CHECK_NULL);
+ java_lang_reflect_Constructor::set_type_annotations(ch(), an_oop);
+ }
return ch();
}
@@ -949,7 +953,8 @@ oop Reflection::invoke(instanceKlassHandle klass, methodHandle reflected_method,
}
} else {
// if the method can be overridden, we resolve using the vtable index.
- int index = reflected_method->vtable_index();
+ assert(!reflected_method->has_itable_index(), "");
+ int index = reflected_method->vtable_index();
method = reflected_method;
if (index != Method::nonvirtual_vtable_index) {
// target_klass might be an arrayKlassOop but all vtables start at
diff --git a/src/share/vm/runtime/reflectionUtils.hpp b/src/share/vm/runtime/reflectionUtils.hpp
index 7641fa769..71a500976 100644
--- a/src/share/vm/runtime/reflectionUtils.hpp
+++ b/src/share/vm/runtime/reflectionUtils.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -109,6 +109,8 @@ class FieldStream : public KlassStream {
private:
int length() const { return _klass->java_fields_count(); }
+ fieldDescriptor _fd_buf;
+
public:
FieldStream(instanceKlassHandle klass, bool local_only, bool classes_only)
: KlassStream(klass, local_only, classes_only) {
@@ -134,12 +136,18 @@ class FieldStream : public KlassStream {
int offset() const {
return _klass->field_offset( index() );
}
+ // bridge to a heavier API:
+ fieldDescriptor& field_descriptor() const {
+ fieldDescriptor& field = const_cast<fieldDescriptor&>(_fd_buf);
+ field.reinitialize(_klass(), _index);
+ return field;
+ }
};
-class FilteredField {
+class FilteredField : public CHeapObj<mtInternal> {
private:
Klass* _klass;
- int _field_offset;
+ int _field_offset;
public:
FilteredField(Klass* klass, int field_offset) {
diff --git a/src/share/vm/runtime/sharedRuntime.cpp b/src/share/vm/runtime/sharedRuntime.cpp
index f11d2b178..cf221d89e 100644
--- a/src/share/vm/runtime/sharedRuntime.cpp
+++ b/src/share/vm/runtime/sharedRuntime.cpp
@@ -822,8 +822,11 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
// 3. Implict null exception in nmethod
if (!cb->is_nmethod()) {
- guarantee(cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(),
- "exception happened outside interpreter, nmethods and vtable stubs (1)");
+ bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob();
+ if (!is_in_blob) {
+ cb->print();
+ fatal(err_msg("exception happened outside interpreter, nmethods and vtable stubs at pc " INTPTR_FORMAT, pc));
+ }
Events::log_exception(thread, "NullPointerException in code blob at " INTPTR_FORMAT, pc);
// There is no handler here, so we will simply unwind.
return StubRoutines::throw_NullPointerException_at_call_entry();
@@ -892,15 +895,23 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
}
-JNI_ENTRY(void, throw_unsatisfied_link_error(JNIEnv* env, ...))
-{
- THROW(vmSymbols::java_lang_UnsatisfiedLinkError());
-}
-JNI_END
-
-JNI_ENTRY(void, throw_unsupported_operation_exception(JNIEnv* env, ...))
+/**
+ * Throws an java/lang/UnsatisfiedLinkError. The address of this method is
+ * installed in the native function entry of all native Java methods before
+ * they get linked to their actual native methods.
+ *
+ * \note
+ * This method actually never gets called! The reason is because
+ * the interpreter's native entries call NativeLookup::lookup() which
+ * throws the exception when the lookup fails. The exception is then
+ * caught and forwarded on the return from NativeLookup::lookup() call
+ * before the call to the native function. This might change in the future.
+ */
+JNI_ENTRY(void*, throw_unsatisfied_link_error(JNIEnv* env, ...))
{
- THROW(vmSymbols::java_lang_UnsupportedOperationException());
+ // We return a bad value here to make sure that the exception is
+ // forwarded before we look at the return value.
+ THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badJNIHandle);
}
JNI_END
@@ -908,10 +919,6 @@ address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
}
-address SharedRuntime::native_method_throw_unsupported_operation_exception_entry() {
- return CAST_FROM_FN_PTR(address, &throw_unsupported_operation_exception);
-}
-
#ifndef PRODUCT
JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2))
@@ -1053,7 +1060,8 @@ Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
// Find receiver for non-static call
if (bc != Bytecodes::_invokestatic &&
- bc != Bytecodes::_invokedynamic) {
+ bc != Bytecodes::_invokedynamic &&
+ bc != Bytecodes::_invokehandle) {
// This register map must be update since we need to find the receiver for
// compiled frames. The receiver might be in a register.
RegisterMap reg_map2(thread);
@@ -1080,7 +1088,7 @@ Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
#ifdef ASSERT
// Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
- if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) {
+ if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic && bc != Bytecodes::_invokehandle) {
assert(receiver.not_null(), "should have thrown exception");
KlassHandle receiver_klass(THREAD, receiver->klass());
Klass* rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
@@ -1242,9 +1250,9 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
#endif
if (is_virtual) {
- assert(receiver.not_null(), "sanity check");
+ assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
bool static_bound = call_info.resolved_method()->can_be_statically_bound();
- KlassHandle h_klass(THREAD, receiver->klass());
+ KlassHandle h_klass(THREAD, invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass());
CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
is_optimized, static_bound, virtual_call_info,
CHECK_(methodHandle()));
@@ -1507,8 +1515,11 @@ methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
info, CHECK_(methodHandle()));
inline_cache->set_to_monomorphic(info);
} else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
- // Change to megamorphic
- inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
+ // Potential change to megamorphic
+ bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
+ if (!successful) {
+ inline_cache->set_to_clean();
+ }
} else {
// Either clean or megamorphic
}
@@ -2749,12 +2760,12 @@ VMReg SharedRuntime::name_for_receiver() {
return regs.first();
}
-VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, int* arg_size) {
+VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int* arg_size) {
// This method is returning a data structure allocating as a
// ResourceObject, so do not put any ResourceMarks in here.
char *s = sig->as_C_string();
int len = (int)strlen(s);
- *s++; len--; // Skip opening paren
+ s++; len--; // Skip opening paren
char *t = s+len;
while( *(--t) != ')' ) ; // Find close paren
@@ -2793,6 +2804,11 @@ VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver,
default : ShouldNotReachHere();
}
}
+
+ if (has_appendix) {
+ sig_bt[cnt++] = T_OBJECT;
+ }
+
assert( cnt < 256, "grow table size" );
int comp_args_on_stack;
diff --git a/src/share/vm/runtime/sharedRuntime.hpp b/src/share/vm/runtime/sharedRuntime.hpp
index e6867645a..a7f0d92a1 100644
--- a/src/share/vm/runtime/sharedRuntime.hpp
+++ b/src/share/vm/runtime/sharedRuntime.hpp
@@ -410,7 +410,7 @@ class SharedRuntime: AllStatic {
// Convert a sig into a calling convention register layout
// and find interesting things about it.
- static VMRegPair* find_callee_arguments(Symbol* sig, bool has_receiver, int *arg_size);
+ static VMRegPair* find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int *arg_size);
static VMReg name_for_receiver();
// "Top of Stack" slots that may be unused by the calling convention but must
diff --git a/src/share/vm/runtime/stubRoutines.cpp b/src/share/vm/runtime/stubRoutines.cpp
index 0482b4698..4cb3a44f7 100644
--- a/src/share/vm/runtime/stubRoutines.cpp
+++ b/src/share/vm/runtime/stubRoutines.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -125,6 +125,9 @@ address StubRoutines::_aescrypt_decryptBlock = NULL;
address StubRoutines::_cipherBlockChaining_encryptAESCrypt = NULL;
address StubRoutines::_cipherBlockChaining_decryptAESCrypt = NULL;
+address StubRoutines::_updateBytesCRC32 = NULL;
+address StubRoutines::_crc_table_adr = NULL;
+
double (* StubRoutines::_intrinsic_log )(double) = NULL;
double (* StubRoutines::_intrinsic_log10 )(double) = NULL;
double (* StubRoutines::_intrinsic_exp )(double) = NULL;
@@ -133,6 +136,13 @@ double (* StubRoutines::_intrinsic_sin )(double) = NULL;
double (* StubRoutines::_intrinsic_cos )(double) = NULL;
double (* StubRoutines::_intrinsic_tan )(double) = NULL;
+address StubRoutines::_safefetch32_entry = NULL;
+address StubRoutines::_safefetch32_fault_pc = NULL;
+address StubRoutines::_safefetch32_continuation_pc = NULL;
+address StubRoutines::_safefetchN_entry = NULL;
+address StubRoutines::_safefetchN_fault_pc = NULL;
+address StubRoutines::_safefetchN_continuation_pc = NULL;
+
// Initialization
//
// Note: to break cycle with universe initialization, stubs are generated in two phases.
diff --git a/src/share/vm/runtime/stubRoutines.hpp b/src/share/vm/runtime/stubRoutines.hpp
index e33284459..c115a3bc0 100644
--- a/src/share/vm/runtime/stubRoutines.hpp
+++ b/src/share/vm/runtime/stubRoutines.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -210,6 +210,9 @@ class StubRoutines: AllStatic {
static address _cipherBlockChaining_encryptAESCrypt;
static address _cipherBlockChaining_decryptAESCrypt;
+ static address _updateBytesCRC32;
+ static address _crc_table_adr;
+
// These are versions of the java.lang.Math methods which perform
// the same operations as the intrinsic version. They are used for
// constant folding in the compiler to ensure equivalence. If the
@@ -224,11 +227,21 @@ class StubRoutines: AllStatic {
static double (*_intrinsic_cos)(double);
static double (*_intrinsic_tan)(double);
+ // Safefetch stubs.
+ static address _safefetch32_entry;
+ static address _safefetch32_fault_pc;
+ static address _safefetch32_continuation_pc;
+ static address _safefetchN_entry;
+ static address _safefetchN_fault_pc;
+ static address _safefetchN_continuation_pc;
+
public:
// Initialization/Testing
static void initialize1(); // must happen before universe::genesis
static void initialize2(); // must happen after universe::genesis
+ static bool is_stub_code(address addr) { return contains(addr); }
+
static bool contains(address addr) {
return
(_code1 != NULL && _code1->blob_contains(addr)) ||
@@ -346,6 +359,9 @@ class StubRoutines: AllStatic {
static address cipherBlockChaining_encryptAESCrypt() { return _cipherBlockChaining_encryptAESCrypt; }
static address cipherBlockChaining_decryptAESCrypt() { return _cipherBlockChaining_decryptAESCrypt; }
+ static address updateBytesCRC32() { return _updateBytesCRC32; }
+ static address crc_table_addr() { return _crc_table_adr; }
+
static address select_fill_function(BasicType t, bool aligned, const char* &name);
static address zero_aligned_words() { return _zero_aligned_words; }
@@ -380,6 +396,34 @@ class StubRoutines: AllStatic {
}
//
+ // Safefetch stub support
+ //
+
+ typedef int (*SafeFetch32Stub)(int* adr, int errValue);
+ typedef intptr_t (*SafeFetchNStub) (intptr_t* adr, intptr_t errValue);
+
+ static SafeFetch32Stub SafeFetch32_stub() { return CAST_TO_FN_PTR(SafeFetch32Stub, _safefetch32_entry); }
+ static SafeFetchNStub SafeFetchN_stub() { return CAST_TO_FN_PTR(SafeFetchNStub, _safefetchN_entry); }
+
+ static bool is_safefetch_fault(address pc) {
+ return pc != NULL &&
+ (pc == _safefetch32_fault_pc ||
+ pc == _safefetchN_fault_pc);
+ }
+
+ static address continuation_for_safefetch_fault(address pc) {
+ assert(_safefetch32_continuation_pc != NULL &&
+ _safefetchN_continuation_pc != NULL,
+ "not initialized");
+
+ if (pc == _safefetch32_fault_pc) return _safefetch32_continuation_pc;
+ if (pc == _safefetchN_fault_pc) return _safefetchN_continuation_pc;
+
+ ShouldNotReachHere();
+ return NULL;
+ }
+
+ //
// Default versions of the above arraycopy functions for platforms which do
// not have specialized versions
//
@@ -398,4 +442,15 @@ class StubRoutines: AllStatic {
static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count);
};
+// Safefetch allows to load a value from a location that's not known
+// to be valid. If the load causes a fault, the error value is returned.
+inline int SafeFetch32(int* adr, int errValue) {
+ assert(StubRoutines::SafeFetch32_stub(), "stub not yet generated");
+ return StubRoutines::SafeFetch32_stub()(adr, errValue);
+}
+inline intptr_t SafeFetchN(intptr_t* adr, intptr_t errValue) {
+ assert(StubRoutines::SafeFetchN_stub(), "stub not yet generated");
+ return StubRoutines::SafeFetchN_stub()(adr, errValue);
+}
+
#endif // SHARE_VM_RUNTIME_STUBROUTINES_HPP
diff --git a/src/share/vm/runtime/sweeper.cpp b/src/share/vm/runtime/sweeper.cpp
index 2921b2544..37315aec3 100644
--- a/src/share/vm/runtime/sweeper.cpp
+++ b/src/share/vm/runtime/sweeper.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,7 @@
#include "runtime/os.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/vm_operations.hpp"
+#include "trace/tracing.hpp"
#include "utilities/events.hpp"
#include "utilities/xmlstream.hpp"
@@ -130,6 +131,9 @@ void NMethodSweeper::record_sweep(nmethod* nm, int line) {
long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
nmethod* NMethodSweeper::_current = NULL; // Current nmethod
int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache
+int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
+int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
+int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep
volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
@@ -143,6 +147,15 @@ int NMethodSweeper::_highest_marked = 0;
int NMethodSweeper::_dead_compile_ids = 0;
long NMethodSweeper::_last_flush_traversal_id = 0;
+int NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
+int NMethodSweeper::_total_nof_methods_reclaimed = 0;
+jlong NMethodSweeper::_total_time_sweeping = 0;
+jlong NMethodSweeper::_total_time_this_sweep = 0;
+jlong NMethodSweeper::_peak_sweep_time = 0;
+jlong NMethodSweeper::_peak_sweep_fraction_time = 0;
+jlong NMethodSweeper::_total_disconnect_time = 0;
+jlong NMethodSweeper::_peak_disconnect_time = 0;
+
class MarkActivationClosure: public CodeBlobClosure {
public:
virtual void do_code_blob(CodeBlob* cb) {
@@ -176,6 +189,8 @@ void NMethodSweeper::scan_stacks() {
_invocations = NmethodSweepFraction;
_current = CodeCache::first_nmethod();
_traversals += 1;
+ _total_time_this_sweep = 0;
+
if (PrintMethodFlushing) {
tty->print_cr("### Sweep: stack traversal %d", _traversals);
}
@@ -229,12 +244,13 @@ void NMethodSweeper::possibly_sweep() {
}
void NMethodSweeper::sweep_code_cache() {
-#ifdef ASSERT
- jlong sweep_start;
- if (PrintMethodFlushing) {
- sweep_start = os::javaTimeMillis();
- }
-#endif
+
+ jlong sweep_start_counter = os::elapsed_counter();
+
+ _flushed_count = 0;
+ _zombified_count = 0;
+ _marked_count = 0;
+
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
}
@@ -253,6 +269,7 @@ void NMethodSweeper::sweep_code_cache() {
// the number of nmethods changes during the sweep so the final
// stage must iterate until it there are no more nmethods.
int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
+ int swept_count = 0;
assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
assert(!CodeCache_lock->owned_by_self(), "just checking");
@@ -262,6 +279,7 @@ void NMethodSweeper::sweep_code_cache() {
// The last invocation iterates until there are no more nmethods
for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
+ swept_count++;
if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
@@ -302,14 +320,34 @@ void NMethodSweeper::sweep_code_cache() {
}
}
+ jlong sweep_end_counter = os::elapsed_counter();
+ jlong sweep_time = sweep_end_counter - sweep_start_counter;
+ _total_time_sweeping += sweep_time;
+ _total_time_this_sweep += sweep_time;
+ _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
+ _total_nof_methods_reclaimed += _flushed_count;
+
+ EventSweepCodeCache event(UNTIMED);
+ if (event.should_commit()) {
+ event.set_starttime(sweep_start_counter);
+ event.set_endtime(sweep_end_counter);
+ event.set_sweepIndex(_traversals);
+ event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1);
+ event.set_sweptCount(swept_count);
+ event.set_flushedCount(_flushed_count);
+ event.set_markedCount(_marked_count);
+ event.set_zombifiedCount(_zombified_count);
+ event.commit();
+ }
+
#ifdef ASSERT
if(PrintMethodFlushing) {
- jlong sweep_end = os::javaTimeMillis();
- tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
+ tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time);
}
#endif
if (_invocations == 1) {
+ _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
log_sweep("finished");
}
@@ -388,12 +426,14 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
}
release_nmethod(nm);
+ _flushed_count++;
} else {
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
}
nm->mark_for_reclamation();
_resweep = true;
+ _marked_count++;
SWEEP(nm);
}
} else if (nm->is_not_entrant()) {
@@ -405,6 +445,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
}
nm->make_zombie();
_resweep = true;
+ _zombified_count++;
SWEEP(nm);
} else {
// Still alive, clean up its inline caches
@@ -420,13 +461,16 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
// Unloaded code, just make it a zombie
if (PrintMethodFlushing && Verbose)
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
+
if (nm->is_osr_method()) {
SWEEP(nm);
// No inline caches will ever point to osr methods, so we can just remove it
release_nmethod(nm);
+ _flushed_count++;
} else {
nm->make_zombie();
_resweep = true;
+ _zombified_count++;
SWEEP(nm);
}
} else {
@@ -484,7 +528,7 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
// If there was a race in detecting full code cache, only run
// one vm op for it or keep the compiler shut off
- debug_only(jlong start = os::javaTimeMillis();)
+ jlong disconnect_start_counter = os::elapsed_counter();
// Traverse the code cache trying to dump the oldest nmethods
int curr_max_comp_id = CompileBroker::get_compilation_id();
@@ -541,13 +585,28 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
_last_full_flush_time = os::javaTimeMillis();
}
+ jlong disconnect_end_counter = os::elapsed_counter();
+ jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
+ _total_disconnect_time += disconnect_time;
+ _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
+
+ EventCleanCodeCache event(UNTIMED);
+ if (event.should_commit()) {
+ event.set_starttime(disconnect_start_counter);
+ event.set_endtime(disconnect_end_counter);
+ event.set_disconnectedCount(disconnected);
+ event.set_madeNonEntrantCount(made_not_entrant);
+ event.commit();
+ }
+ _number_of_flushes++;
+
// After two more traversals the sweeper will get rid of unrestored nmethods
_last_flush_traversal_id = _traversals;
_resweep = true;
#ifdef ASSERT
- jlong end = os::javaTimeMillis();
+
if(PrintMethodFlushing && Verbose) {
- tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
+ tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
}
#endif
}
diff --git a/src/share/vm/runtime/sweeper.hpp b/src/share/vm/runtime/sweeper.hpp
index ff63029f1..da4a13adc 100644
--- a/src/share/vm/runtime/sweeper.hpp
+++ b/src/share/vm/runtime/sweeper.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,9 +31,12 @@
//
class NMethodSweeper : public AllStatic {
- static long _traversals; // Stack traversal count
- static nmethod* _current; // Current nmethod
- static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
+ static long _traversals; // Stack scan count, also sweep ID.
+ static nmethod* _current; // Current nmethod
+ static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
+ static int _flushed_count; // Nof. nmethods flushed in current sweep
+ static int _zombified_count; // Nof. nmethods made zombie in current sweep
+ static int _marked_count; // Nof. nmethods marked for reclaim in current sweep
static volatile int _invocations; // No. of invocations left until we are completed with this pass
static volatile int _sweep_started; // Flag to control conc sweeper
@@ -53,6 +56,16 @@ class NMethodSweeper : public AllStatic {
static int _highest_marked; // highest compile id dumped at last emergency unloading
static int _dead_compile_ids; // number of compile ids that where not in the cache last flush
+ // Stat counters
+ static int _number_of_flushes; // Total of full traversals caused by full cache
+ static int _total_nof_methods_reclaimed; // Accumulated nof methods flushed
+ static jlong _total_time_sweeping; // Accumulated time sweeping
+ static jlong _total_time_this_sweep; // Total time this sweep
+ static jlong _peak_sweep_time; // Peak time for a full sweep
+ static jlong _peak_sweep_fraction_time; // Peak time sweeping one fraction
+ static jlong _total_disconnect_time; // Total time cleaning code mem
+ static jlong _peak_disconnect_time; // Peak time cleaning code mem
+
static void process_nmethod(nmethod *nm);
static void release_nmethod(nmethod* nm);
@@ -60,9 +73,17 @@ class NMethodSweeper : public AllStatic {
static bool sweep_in_progress();
public:
- static long traversal_count() { return _traversals; }
+ static long traversal_count() { return _traversals; }
+ static int number_of_flushes() { return _number_of_flushes; }
+ static int total_nof_methods_reclaimed() { return _total_nof_methods_reclaimed; }
+ static jlong total_time_sweeping() { return _total_time_sweeping; }
+ static jlong peak_sweep_time() { return _peak_sweep_time; }
+ static jlong peak_sweep_fraction_time() { return _peak_sweep_fraction_time; }
+ static jlong total_disconnect_time() { return _total_disconnect_time; }
+ static jlong peak_disconnect_time() { return _peak_disconnect_time; }
#ifdef ASSERT
+ static bool is_sweeping(nmethod* which) { return _current == which; }
// Keep track of sweeper activity in the ring buffer
static void record_sweep(nmethod* nm, int line);
static void report_events(int id, address entry);
diff --git a/src/share/vm/runtime/synchronizer.cpp b/src/share/vm/runtime/synchronizer.cpp
index ede9affb9..015dd757b 100644
--- a/src/share/vm/runtime/synchronizer.cpp
+++ b/src/share/vm/runtime/synchronizer.cpp
@@ -213,7 +213,7 @@ void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
}
}
- ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
+ ObjectSynchronizer::inflate(THREAD, object)->exit (true, THREAD) ;
}
// -----------------------------------------------------------------------------
@@ -343,7 +343,7 @@ void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
// If this thread has locked the object, exit the monitor. Note: can't use
// monitor->check(CHECK); must exit even if an exception is pending.
if (monitor->check(THREAD)) {
- monitor->exit(THREAD);
+ monitor->exit(true, THREAD);
}
}
diff --git a/src/share/vm/runtime/task.cpp b/src/share/vm/runtime/task.cpp
index 9d2286f2d..ef57dcd68 100644
--- a/src/share/vm/runtime/task.cpp
+++ b/src/share/vm/runtime/task.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -114,9 +114,11 @@ PeriodicTask::~PeriodicTask() {
disenroll();
}
+/* enroll could be called from a JavaThread, so we have to check for
+ * safepoint when taking the lock to avoid deadlocking */
void PeriodicTask::enroll() {
MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ?
- NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+ NULL : PeriodicTask_lock);
if (_num_tasks == PeriodicTask::max_tasks) {
fatal("Overflow in PeriodicTask table");
@@ -131,9 +133,11 @@ void PeriodicTask::enroll() {
}
}
+/* disenroll could be called from a JavaThread, so we have to check for
+ * safepoint when taking the lock to avoid deadlocking */
void PeriodicTask::disenroll() {
MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ?
- NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+ NULL : PeriodicTask_lock);
int index;
for(index = 0; index < _num_tasks && _tasks[index] != this; index++)
diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp
index 7c3e256e2..a0f0bdba7 100644
--- a/src/share/vm/runtime/thread.cpp
+++ b/src/share/vm/runtime/thread.cpp
@@ -45,7 +45,6 @@
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/privilegedStack.hpp"
-#include "runtime/aprofiler.hpp"
#include "runtime/arguments.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/deoptimization.hpp"
@@ -77,7 +76,8 @@
#include "services/management.hpp"
#include "services/memTracker.hpp"
#include "services/threadService.hpp"
-#include "trace/traceEventTypes.hpp"
+#include "trace/tracing.hpp"
+#include "trace/traceMacros.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
@@ -218,8 +218,9 @@ Thread::Thread() {
// allocated data structures
set_osthread(NULL);
set_resource_area(new (mtThread)ResourceArea());
+ DEBUG_ONLY(_current_resource_mark = NULL;)
set_handle_area(new (mtThread) HandleArea(NULL));
- set_metadata_handles(new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(300, true));
+ set_metadata_handles(new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(30, true));
set_active_handles(NULL);
set_free_handle_block(NULL);
set_last_handle_mark(NULL);
@@ -238,7 +239,6 @@ Thread::Thread() {
CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;)
_jvmti_env_iteration_count = 0;
set_allocated_bytes(0);
- set_trace_buffer(NULL);
_vm_operation_started_count = 0;
_vm_operation_completed_count = 0;
_current_pending_monitor = NULL;
@@ -333,6 +333,8 @@ Thread::~Thread() {
// Reclaim the objectmonitors from the omFreeList of the moribund thread.
ObjectSynchronizer::omFlush (this) ;
+ EVENT_THREAD_DESTRUCT(this);
+
// stack_base can be NULL if the thread is never started or exited before
// record_stack_base_and_size called. Although, we would like to ensure
// that all started threads do call record_stack_base_and_size(), there is
@@ -954,6 +956,14 @@ bool Thread::is_in_stack(address adr) const {
}
+bool Thread::is_in_usable_stack(address adr) const {
+ size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
+ size_t usable_stack_size = _stack_size - stack_guard_size;
+
+ return ((adr < stack_base()) && (adr >= stack_base() - usable_stack_size));
+}
+
+
// We had to move these methods here, because vm threads get into ObjectSynchronizer::enter
// However, there is a note in JavaThread::is_lock_owned() about the VM threads not being
// used for compilation in the future. If that change is made, the need for these methods
@@ -1218,7 +1228,7 @@ WatcherThread* WatcherThread::_watcher_thread = NULL;
bool WatcherThread::_startable = false;
volatile bool WatcherThread::_should_terminate = false;
-WatcherThread::WatcherThread() : Thread() {
+WatcherThread::WatcherThread() : Thread(), _crash_protection(NULL) {
assert(watcher_thread() == NULL, "we can only allocate one WatcherThread");
if (os::create_thread(this, os::watcher_thread)) {
_watcher_thread = this;
@@ -1659,9 +1669,11 @@ void JavaThread::run() {
JvmtiExport::post_thread_start(this);
}
- EVENT_BEGIN(TraceEventThreadStart, event);
- EVENT_COMMIT(event,
- EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(this->threadObj())));
+ EventThreadStart event;
+ if (event.should_commit()) {
+ event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
+ event.commit();
+ }
// We call another function to do the rest so we are sure that the stack addresses used
// from there will be lower than the stack base just computed
@@ -1791,9 +1803,11 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
// Called before the java thread exit since we want to read info
// from java_lang_Thread object
- EVENT_BEGIN(TraceEventThreadEnd, event);
- EVENT_COMMIT(event,
- EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(this->threadObj())));
+ EventThreadEnd event;
+ if (event.should_commit()) {
+ event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
+ event.commit();
+ }
// Call after last event on thread
EVENT_THREAD_EXIT(this);
@@ -3317,6 +3331,11 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
jint parse_result = Arguments::parse(args);
if (parse_result != JNI_OK) return parse_result;
+ os::init_before_ergo();
+
+ jint ergo_result = Arguments::apply_ergo();
+ if (ergo_result != JNI_OK) return ergo_result;
+
if (PauseAtStartup) {
os::pause();
}
@@ -3478,44 +3497,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
initialize_class(vmSymbols::java_lang_String(), CHECK_0);
- if (AggressiveOpts) {
- {
- // Forcibly initialize java/util/HashMap and mutate the private
- // static final "frontCacheEnabled" field before we start creating instances
-#ifdef ASSERT
- Klass* tmp_k = SystemDictionary::find(vmSymbols::java_util_HashMap(), Handle(), Handle(), CHECK_0);
- assert(tmp_k == NULL, "java/util/HashMap should not be loaded yet");
-#endif
- Klass* k_o = SystemDictionary::resolve_or_null(vmSymbols::java_util_HashMap(), Handle(), Handle(), CHECK_0);
- KlassHandle k = KlassHandle(THREAD, k_o);
- guarantee(k.not_null(), "Must find java/util/HashMap");
- instanceKlassHandle ik = instanceKlassHandle(THREAD, k());
- ik->initialize(CHECK_0);
- fieldDescriptor fd;
- // Possible we might not find this field; if so, don't break
- if (ik->find_local_field(vmSymbols::frontCacheEnabled_name(), vmSymbols::bool_signature(), &fd)) {
- k()->java_mirror()->bool_field_put(fd.offset(), true);
- }
- }
-
- if (UseStringCache) {
- // Forcibly initialize java/lang/StringValue and mutate the private
- // static final "stringCacheEnabled" field before we start creating instances
- Klass* k_o = SystemDictionary::resolve_or_null(vmSymbols::java_lang_StringValue(), Handle(), Handle(), CHECK_0);
- // Possible that StringValue isn't present: if so, silently don't break
- if (k_o != NULL) {
- KlassHandle k = KlassHandle(THREAD, k_o);
- instanceKlassHandle ik = instanceKlassHandle(THREAD, k());
- ik->initialize(CHECK_0);
- fieldDescriptor fd;
- // Possible we might not find this field: if so, silently don't break
- if (ik->find_local_field(vmSymbols::stringCacheEnabled_name(), vmSymbols::bool_signature(), &fd)) {
- k()->java_mirror()->bool_field_put(fd.offset(), true);
- }
- }
- }
- }
-
// Initialize java_lang.System (needed before creating the thread)
initialize_class(vmSymbols::java_lang_System(), CHECK_0);
initialize_class(vmSymbols::java_lang_ThreadGroup(), CHECK_0);
@@ -3633,6 +3614,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// Start Attach Listener if +StartAttachListener or it can't be started lazily
if (!DisableAttachMechanism) {
+ AttachListener::vm_start();
if (StartAttachListener || AttachListener::init_at_startup()) {
AttachListener::init();
}
@@ -3648,8 +3630,8 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// Notify JVMTI agents that VM initialization is complete - nop if no agents.
JvmtiExport::post_vm_initialized();
- if (!TRACE_START()) {
- vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
+ if (TRACE_START() != JNI_OK) {
+ vm_exit_during_initialization("Failed to start tracing backend.");
}
if (CleanChunkPoolAsync) {
@@ -3661,6 +3643,16 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
CompileBroker::compilation_init();
#endif
+ if (EnableInvokeDynamic) {
+ // Pre-initialize some JSR292 core classes to avoid deadlock during class loading.
+ // It is done after compilers are initialized, because otherwise compilations of
+ // signature polymorphic MH intrinsics can be missed
+ // (see SystemDictionary::find_method_handle_intrinsic).
+ initialize_class(vmSymbols::java_lang_invoke_MethodHandle(), CHECK_0);
+ initialize_class(vmSymbols::java_lang_invoke_MemberName(), CHECK_0);
+ initialize_class(vmSymbols::java_lang_invoke_MethodHandleNatives(), CHECK_0);
+ }
+
#if INCLUDE_MANAGEMENT
Management::initialize(THREAD);
#endif // INCLUDE_MANAGEMENT
@@ -3673,7 +3665,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
}
if (Arguments::has_profile()) FlatProfiler::engage(main_thread, true);
- if (Arguments::has_alloc_profile()) AllocationProfiler::engage();
if (MemProfiling) MemProfiler::engage();
StatSampler::engage();
if (CheckJNICalls) JniPeriodicChecker::engage();
@@ -3722,15 +3713,18 @@ extern "C" {
// num_symbol_entries must be passed-in since only the caller knows the number of symbols in the array.
static OnLoadEntry_t lookup_on_load(AgentLibrary* agent, const char *on_load_symbols[], size_t num_symbol_entries) {
OnLoadEntry_t on_load_entry = NULL;
- void *library = agent->os_lib(); // check if we have looked it up before
+ void *library = NULL;
- if (library == NULL) {
+ if (!agent->valid()) {
char buffer[JVM_MAXPATHLEN];
char ebuf[1024];
const char *name = agent->name();
const char *msg = "Could not find agent library ";
- if (agent->is_absolute_path()) {
+ // First check to see if agent is statically linked into executable
+ if (os::find_builtin_agent(agent, on_load_symbols, num_symbol_entries)) {
+ library = agent->os_lib();
+ } else if (agent->is_absolute_path()) {
library = os::dll_load(name, ebuf, sizeof ebuf);
if (library == NULL) {
const char *sub_msg = " in absolute path, with error: ";
@@ -3764,13 +3758,15 @@ static OnLoadEntry_t lookup_on_load(AgentLibrary* agent, const char *on_load_sym
}
}
agent->set_os_lib(library);
+ agent->set_valid();
}
// Find the OnLoad function.
- for (size_t symbol_index = 0; symbol_index < num_symbol_entries; symbol_index++) {
- on_load_entry = CAST_TO_FN_PTR(OnLoadEntry_t, os::dll_lookup(library, on_load_symbols[symbol_index]));
- if (on_load_entry != NULL) break;
- }
+ on_load_entry =
+ CAST_TO_FN_PTR(OnLoadEntry_t, os::find_agent_function(agent,
+ false,
+ on_load_symbols,
+ num_symbol_entries));
return on_load_entry;
}
@@ -3845,22 +3841,23 @@ extern "C" {
void Threads::shutdown_vm_agents() {
// Send any Agent_OnUnload notifications
const char *on_unload_symbols[] = AGENT_ONUNLOAD_SYMBOLS;
+ size_t num_symbol_entries = ARRAY_SIZE(on_unload_symbols);
extern struct JavaVM_ main_vm;
for (AgentLibrary* agent = Arguments::agents(); agent != NULL; agent = agent->next()) {
// Find the Agent_OnUnload function.
- for (uint symbol_index = 0; symbol_index < ARRAY_SIZE(on_unload_symbols); symbol_index++) {
- Agent_OnUnload_t unload_entry = CAST_TO_FN_PTR(Agent_OnUnload_t,
- os::dll_lookup(agent->os_lib(), on_unload_symbols[symbol_index]));
-
- // Invoke the Agent_OnUnload function
- if (unload_entry != NULL) {
- JavaThread* thread = JavaThread::current();
- ThreadToNativeFromVM ttn(thread);
- HandleMark hm(thread);
- (*unload_entry)(&main_vm);
- break;
- }
+ Agent_OnUnload_t unload_entry = CAST_TO_FN_PTR(Agent_OnUnload_t,
+ os::find_agent_function(agent,
+ false,
+ on_unload_symbols,
+ num_symbol_entries));
+
+ // Invoke the Agent_OnUnload function
+ if (unload_entry != NULL) {
+ JavaThread* thread = JavaThread::current();
+ ThreadToNativeFromVM ttn(thread);
+ HandleMark hm(thread);
+ (*unload_entry)(&main_vm);
}
}
}
diff --git a/src/share/vm/runtime/thread.hpp b/src/share/vm/runtime/thread.hpp
index c9a932fc8..003b8b698 100644
--- a/src/share/vm/runtime/thread.hpp
+++ b/src/share/vm/runtime/thread.hpp
@@ -47,7 +47,8 @@
#include "services/memRecorder.hpp"
#endif // INCLUDE_NMT
-#include "trace/tracing.hpp"
+#include "trace/traceBackend.hpp"
+#include "trace/traceMacros.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/top.hpp"
#if INCLUDE_ALL_GCS
@@ -85,6 +86,8 @@ class GCTaskQueue;
class ThreadClosure;
class IdealGraphPrinter;
+DEBUG_ONLY(class ResourceMark;)
+
class WorkerThread;
// Class hierarchy
@@ -110,8 +113,9 @@ class Thread: public ThreadShadow {
// Support for forcing alignment of thread objects for biased locking
void* _real_malloc_address;
public:
- void* operator new(size_t size) { return allocate(size, true); }
- void* operator new(size_t size, const std::nothrow_t& nothrow_constant) { return allocate(size, false); }
+ void* operator new(size_t size) throw() { return allocate(size, true); }
+ void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
+ return allocate(size, false); }
void operator delete(void* p);
protected:
@@ -258,7 +262,7 @@ class Thread: public ThreadShadow {
jlong _allocated_bytes; // Cumulative number of bytes allocated on
// the Java heap
- TRACE_BUFFER _trace_buffer; // Thread-local buffer for tracing
+ TRACE_DATA _trace_data; // Thread-local data for tracing
int _vm_operation_started_count; // VM_Operation support
int _vm_operation_completed_count; // VM_Operation support
@@ -449,8 +453,7 @@ class Thread: public ThreadShadow {
return allocated_bytes;
}
- TRACE_BUFFER trace_buffer() { return _trace_buffer; }
- void set_trace_buffer(TRACE_BUFFER buf) { _trace_buffer = buf; }
+ TRACE_DATA* trace_data() { return &_trace_data; }
// VM operation support
int vm_operation_ticket() { return ++_vm_operation_started_count; }
@@ -519,6 +522,9 @@ public:
// Check if address is in the stack of the thread (not just for locks).
// Warning: the method can only be used on the running thread
bool is_in_stack(address adr) const;
+ // Check if address is in the usable part of the stack (excludes protected
+ // guard pages)
+ bool is_in_usable_stack(address adr) const;
// Sets this thread as starting thread. Returns failure if thread
// creation fails due to lack of memory, too many threads etc.
@@ -531,6 +537,8 @@ public:
// Thread local resource area for temporary allocation within the VM
ResourceArea* _resource_area;
+ DEBUG_ONLY(ResourceMark* _current_resource_mark;)
+
// Thread local handle area for allocation of handles within the VM
HandleArea* _handle_area;
GrowableArray<Metadata*>* _metadata_handles;
@@ -585,6 +593,8 @@ public:
// Deadlock detection
bool allow_allocation() { return _allow_allocation_count == 0; }
+ ResourceMark* current_resource_mark() { return _current_resource_mark; }
+ void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; }
#endif
void check_for_valid_safepoint_state(bool potential_vm_operation) PRODUCT_RETURN;
@@ -638,9 +648,6 @@ public:
jint _hashStateZ ;
void * _schedctl ;
- intptr_t _ScratchA, _ScratchB ; // Scratch locations for fast-path sync code
- static ByteSize ScratchA_offset() { return byte_offset_of(Thread, _ScratchA ); }
- static ByteSize ScratchB_offset() { return byte_offset_of(Thread, _ScratchB ); }
volatile jint rng [4] ; // RNG for spin loop
@@ -727,6 +734,8 @@ class WatcherThread: public Thread {
static bool _startable;
volatile static bool _should_terminate; // updated without holding lock
+
+ os::WatcherThreadCrashProtection* _crash_protection;
public:
enum SomeConstants {
delay_interval = 10 // interrupt delay in milliseconds
@@ -754,6 +763,14 @@ class WatcherThread: public Thread {
// Otherwise the first task to enroll will trigger the start
static void make_startable();
+ void set_crash_protection(os::WatcherThreadCrashProtection* crash_protection) {
+ assert(Thread::current()->is_Watcher_thread(), "Can only be set by WatcherThread");
+ _crash_protection = crash_protection;
+ }
+
+ bool has_crash_protection() const { return _crash_protection != NULL; }
+ os::WatcherThreadCrashProtection* crash_protection() const { return _crash_protection; }
+
private:
int sleep() const;
};
diff --git a/src/share/vm/runtime/timer.cpp b/src/share/vm/runtime/timer.cpp
index 838262650..12c32660b 100644
--- a/src/share/vm/runtime/timer.cpp
+++ b/src/share/vm/runtime/timer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,11 @@
# include "os_bsd.inline.hpp"
#endif
+double TimeHelper::counter_to_seconds(jlong counter) {
+ double count = (double) counter;
+ double freq = (double) os::elapsed_frequency();
+ return counter/freq;
+}
void elapsedTimer::add(elapsedTimer t) {
_counter += t._counter;
@@ -59,9 +64,7 @@ void elapsedTimer::stop() {
}
double elapsedTimer::seconds() const {
- double count = (double) _counter;
- double freq = (double) os::elapsed_frequency();
- return count/freq;
+ return TimeHelper::counter_to_seconds(_counter);
}
jlong elapsedTimer::milliseconds() const {
@@ -90,9 +93,7 @@ void TimeStamp::update() {
double TimeStamp::seconds() const {
assert(is_updated(), "must not be clear");
jlong new_count = os::elapsed_counter();
- double count = (double) new_count - _counter;
- double freq = (double) os::elapsed_frequency();
- return count/freq;
+ return TimeHelper::counter_to_seconds(new_count - _counter);
}
jlong TimeStamp::milliseconds() const {
@@ -110,19 +111,15 @@ jlong TimeStamp::ticks_since_update() const {
}
TraceTime::TraceTime(const char* title,
- bool doit,
- bool print_cr,
- outputStream* logfile) {
+ bool doit) {
_active = doit;
_verbose = true;
- _print_cr = print_cr;
- _logfile = (logfile != NULL) ? logfile : tty;
if (_active) {
_accum = NULL;
- _logfile->stamp(PrintGCTimeStamps);
- _logfile->print("[%s", title);
- _logfile->flush();
+ tty->stamp(PrintGCTimeStamps);
+ tty->print("[%s", title);
+ tty->flush();
_t.start();
}
}
@@ -130,17 +127,14 @@ TraceTime::TraceTime(const char* title,
TraceTime::TraceTime(const char* title,
elapsedTimer* accumulator,
bool doit,
- bool verbose,
- outputStream* logfile) {
+ bool verbose) {
_active = doit;
_verbose = verbose;
- _print_cr = true;
- _logfile = (logfile != NULL) ? logfile : tty;
if (_active) {
if (_verbose) {
- _logfile->stamp(PrintGCTimeStamps);
- _logfile->print("[%s", title);
- _logfile->flush();
+ tty->stamp(PrintGCTimeStamps);
+ tty->print("[%s", title);
+ tty->flush();
}
_accum = accumulator;
_t.start();
@@ -152,12 +146,8 @@ TraceTime::~TraceTime() {
_t.stop();
if (_accum!=NULL) _accum->add(_t);
if (_verbose) {
- if (_print_cr) {
- _logfile->print_cr(", %3.7f secs]", _t.seconds());
- } else {
- _logfile->print(", %3.7f secs]", _t.seconds());
- }
- _logfile->flush();
+ tty->print_cr(", %3.7f secs]", _t.seconds());
+ tty->flush();
}
}
}
diff --git a/src/share/vm/runtime/timer.hpp b/src/share/vm/runtime/timer.hpp
index 388a821c1..7e694d5bd 100644
--- a/src/share/vm/runtime/timer.hpp
+++ b/src/share/vm/runtime/timer.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -82,21 +82,16 @@ class TraceTime: public StackObj {
private:
bool _active; // do timing
bool _verbose; // report every timing
- bool _print_cr; // add a CR to the end of the timer report
elapsedTimer _t; // timer
elapsedTimer* _accum; // accumulator
- outputStream* _logfile; // output log file
public:
- // Constuctors
+ // Constructors
TraceTime(const char* title,
- bool doit = true,
- bool print_cr = true,
- outputStream *logfile = NULL);
+ bool doit = true);
TraceTime(const char* title,
elapsedTimer* accumulator,
bool doit = true,
- bool verbose = false,
- outputStream *logfile = NULL );
+ bool verbose = false);
~TraceTime();
// Accessors
@@ -125,4 +120,9 @@ class TraceCPUTime: public StackObj {
~TraceCPUTime();
};
+class TimeHelper {
+ public:
+ static double counter_to_seconds(jlong counter);
+};
+
#endif // SHARE_VM_RUNTIME_TIMER_HPP
diff --git a/src/share/vm/runtime/unhandledOops.hpp b/src/share/vm/runtime/unhandledOops.hpp
index 97fd85431..5f65d1536 100644
--- a/src/share/vm/runtime/unhandledOops.hpp
+++ b/src/share/vm/runtime/unhandledOops.hpp
@@ -48,7 +48,7 @@
class oop;
class Thread;
-class UnhandledOopEntry {
+class UnhandledOopEntry : public CHeapObj<mtThread> {
friend class UnhandledOops;
private:
oop* _oop_ptr;
@@ -62,7 +62,7 @@ class UnhandledOopEntry {
};
-class UnhandledOops {
+class UnhandledOops : public CHeapObj<mtThread> {
friend class Thread;
private:
Thread* _thread;
diff --git a/src/share/vm/runtime/virtualspace.cpp b/src/share/vm/runtime/virtualspace.cpp
index ba68e887e..98ee76350 100644
--- a/src/share/vm/runtime/virtualspace.cpp
+++ b/src/share/vm/runtime/virtualspace.cpp
@@ -42,8 +42,19 @@
// ReservedSpace
+
+// Dummy constructor
+ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
+ _alignment(0), _special(false), _executable(false) {
+}
+
ReservedSpace::ReservedSpace(size_t size) {
- initialize(size, 0, false, NULL, 0, false);
+ size_t page_size = os::page_size_for_region(size, size, 1);
+ bool large_pages = page_size != (size_t)os::vm_page_size();
+ // Don't force the alignment to be large page aligned,
+ // since that will waste memory.
+ size_t alignment = os::vm_allocation_granularity();
+ initialize(size, alignment, large_pages, NULL, 0, false);
}
ReservedSpace::ReservedSpace(size_t size, size_t alignment,
@@ -129,16 +140,18 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
if (special) {
- base = os::reserve_memory_special(size, requested_address, executable);
+ base = os::reserve_memory_special(size, alignment, requested_address, executable);
if (base != NULL) {
if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
// OS ignored requested address. Try different address.
return;
}
- // Check alignment constraints
+ // Check alignment constraints.
assert((uintptr_t) base % alignment == 0,
- "Large pages returned a non-aligned address");
+ err_msg("Large pages returned a non-aligned address, base: "
+ PTR_FORMAT " alignment: " PTR_FORMAT,
+ base, (void*)(uintptr_t)alignment));
_special = true;
} else {
// failed; try to reserve regular memory below
@@ -440,6 +453,42 @@ size_t VirtualSpace::uncommitted_size() const {
return reserved_size() - committed_size();
}
+size_t VirtualSpace::actual_committed_size() const {
+ // Special VirtualSpaces commit all reserved space up front.
+ if (special()) {
+ return reserved_size();
+ }
+
+ size_t committed_low = pointer_delta(_lower_high, _low_boundary, sizeof(char));
+ size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary, sizeof(char));
+ size_t committed_high = pointer_delta(_upper_high, _middle_high_boundary, sizeof(char));
+
+#ifdef ASSERT
+ size_t lower = pointer_delta(_lower_high_boundary, _low_boundary, sizeof(char));
+ size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary, sizeof(char));
+ size_t upper = pointer_delta(_upper_high_boundary, _middle_high_boundary, sizeof(char));
+
+ if (committed_high > 0) {
+ assert(committed_low == lower, "Must be");
+ assert(committed_middle == middle, "Must be");
+ }
+
+ if (committed_middle > 0) {
+ assert(committed_low == lower, "Must be");
+ }
+ if (committed_middle < middle) {
+ assert(committed_high == 0, "Must be");
+ }
+
+ if (committed_low < lower) {
+ assert(committed_high == 0, "Must be");
+ assert(committed_middle == 0, "Must be");
+ }
+#endif
+
+ return committed_low + committed_middle + committed_high;
+}
+
bool VirtualSpace::contains(const void* p) const {
return low() <= (const char*) p && (const char*) p < high();
@@ -533,11 +582,13 @@ bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
lower_high() + lower_needs <= lower_high_boundary(),
"must not expand beyond region");
if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
- debug_only(warning("os::commit_memory failed"));
+ debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
+ ", lower_needs=" SIZE_FORMAT ", %d) failed",
+ lower_high(), lower_needs, _executable);)
return false;
} else {
_lower_high += lower_needs;
- }
+ }
}
if (middle_needs > 0) {
assert(lower_high_boundary() <= middle_high() &&
@@ -545,7 +596,10 @@ bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
"must not expand beyond region");
if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
_executable)) {
- debug_only(warning("os::commit_memory failed"));
+ debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
+ ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
+ ", %d) failed", middle_high(), middle_needs,
+ middle_alignment(), _executable);)
return false;
}
_middle_high += middle_needs;
@@ -555,7 +609,9 @@ bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
upper_high() + upper_needs <= upper_high_boundary(),
"must not expand beyond region");
if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
- debug_only(warning("os::commit_memory failed"));
+ debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
+ ", upper_needs=" SIZE_FORMAT ", %d) failed",
+ upper_high(), upper_needs, _executable);)
return false;
} else {
_upper_high += upper_needs;
@@ -698,14 +754,304 @@ void VirtualSpace::check_for_contiguity() {
assert(high() <= upper_high(), "upper high");
}
+void VirtualSpace::print_on(outputStream* out) {
+ out->print ("Virtual space:");
+ if (special()) out->print(" (pinned in memory)");
+ out->cr();
+ out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
+ out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
+ out->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
+ out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
+}
+
void VirtualSpace::print() {
- tty->print ("Virtual space:");
- if (special()) tty->print(" (pinned in memory)");
- tty->cr();
- tty->print_cr(" - committed: " SIZE_FORMAT, committed_size());
- tty->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
- tty->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
- tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
+ print_on(tty);
}
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+#define test_log(...) \
+ do {\
+ if (VerboseInternalVMTests) { \
+ tty->print_cr(__VA_ARGS__); \
+ tty->flush(); \
+ }\
+ } while (false)
+
+class TestReservedSpace : AllStatic {
+ public:
+ static void small_page_write(void* addr, size_t size) {
+ size_t page_size = os::vm_page_size();
+
+ char* end = (char*)addr + size;
+ for (char* p = (char*)addr; p < end; p += page_size) {
+ *p = 1;
+ }
+ }
+
+ static void release_memory_for_test(ReservedSpace rs) {
+ if (rs.special()) {
+ guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
+ } else {
+ guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
+ }
+ }
+
+ static void test_reserved_space1(size_t size, size_t alignment) {
+ test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
+
+ assert(is_size_aligned(size, alignment), "Incorrect input parameters");
+
+ ReservedSpace rs(size, // size
+ alignment, // alignment
+ UseLargePages, // large
+ NULL, // requested_address
+ 0); // noacces_prefix
+
+ test_log(" rs.special() == %d", rs.special());
+
+ assert(rs.base() != NULL, "Must be");
+ assert(rs.size() == size, "Must be");
+
+ assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
+ assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
+
+ if (rs.special()) {
+ small_page_write(rs.base(), size);
+ }
+
+ release_memory_for_test(rs);
+ }
+
+ static void test_reserved_space2(size_t size) {
+ test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
+
+ assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
+
+ ReservedSpace rs(size);
+
+ test_log(" rs.special() == %d", rs.special());
+
+ assert(rs.base() != NULL, "Must be");
+ assert(rs.size() == size, "Must be");
+
+ if (rs.special()) {
+ small_page_write(rs.base(), size);
+ }
+
+ release_memory_for_test(rs);
+ }
+
+ static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
+ test_log("test_reserved_space3(%p, %p, %d)",
+ (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
+
+ assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
+ assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
+
+ bool large = maybe_large && UseLargePages && size >= os::large_page_size();
+
+ ReservedSpace rs(size, alignment, large, false);
+
+ test_log(" rs.special() == %d", rs.special());
+
+ assert(rs.base() != NULL, "Must be");
+ assert(rs.size() == size, "Must be");
+
+ if (rs.special()) {
+ small_page_write(rs.base(), size);
+ }
+
+ release_memory_for_test(rs);
+ }
+
+
+ static void test_reserved_space1() {
+ size_t size = 2 * 1024 * 1024;
+ size_t ag = os::vm_allocation_granularity();
+
+ test_reserved_space1(size, ag);
+ test_reserved_space1(size * 2, ag);
+ test_reserved_space1(size * 10, ag);
+ }
+
+ static void test_reserved_space2() {
+ size_t size = 2 * 1024 * 1024;
+ size_t ag = os::vm_allocation_granularity();
+
+ test_reserved_space2(size * 1);
+ test_reserved_space2(size * 2);
+ test_reserved_space2(size * 10);
+ test_reserved_space2(ag);
+ test_reserved_space2(size - ag);
+ test_reserved_space2(size);
+ test_reserved_space2(size + ag);
+ test_reserved_space2(size * 2);
+ test_reserved_space2(size * 2 - ag);
+ test_reserved_space2(size * 2 + ag);
+ test_reserved_space2(size * 3);
+ test_reserved_space2(size * 3 - ag);
+ test_reserved_space2(size * 3 + ag);
+ test_reserved_space2(size * 10);
+ test_reserved_space2(size * 10 + size / 2);
+ }
+
+ static void test_reserved_space3() {
+ size_t ag = os::vm_allocation_granularity();
+
+ test_reserved_space3(ag, ag , false);
+ test_reserved_space3(ag * 2, ag , false);
+ test_reserved_space3(ag * 3, ag , false);
+ test_reserved_space3(ag * 2, ag * 2, false);
+ test_reserved_space3(ag * 4, ag * 2, false);
+ test_reserved_space3(ag * 8, ag * 2, false);
+ test_reserved_space3(ag * 4, ag * 4, false);
+ test_reserved_space3(ag * 8, ag * 4, false);
+ test_reserved_space3(ag * 16, ag * 4, false);
+
+ if (UseLargePages) {
+ size_t lp = os::large_page_size();
+
+ // Without large pages
+ test_reserved_space3(lp, ag * 4, false);
+ test_reserved_space3(lp * 2, ag * 4, false);
+ test_reserved_space3(lp * 4, ag * 4, false);
+ test_reserved_space3(lp, lp , false);
+ test_reserved_space3(lp * 2, lp , false);
+ test_reserved_space3(lp * 3, lp , false);
+ test_reserved_space3(lp * 2, lp * 2, false);
+ test_reserved_space3(lp * 4, lp * 2, false);
+ test_reserved_space3(lp * 8, lp * 2, false);
+
+ // With large pages
+ test_reserved_space3(lp, ag * 4 , true);
+ test_reserved_space3(lp * 2, ag * 4, true);
+ test_reserved_space3(lp * 4, ag * 4, true);
+ test_reserved_space3(lp, lp , true);
+ test_reserved_space3(lp * 2, lp , true);
+ test_reserved_space3(lp * 3, lp , true);
+ test_reserved_space3(lp * 2, lp * 2, true);
+ test_reserved_space3(lp * 4, lp * 2, true);
+ test_reserved_space3(lp * 8, lp * 2, true);
+ }
+ }
+
+ static void test_reserved_space() {
+ test_reserved_space1();
+ test_reserved_space2();
+ test_reserved_space3();
+ }
+};
+
+void TestReservedSpace_test() {
+ TestReservedSpace::test_reserved_space();
+}
+
+#define assert_equals(actual, expected) \
+ assert(actual == expected, \
+ err_msg("Got " SIZE_FORMAT " expected " \
+ SIZE_FORMAT, actual, expected));
+
+#define assert_ge(value1, value2) \
+ assert(value1 >= value2, \
+ err_msg("'" #value1 "': " SIZE_FORMAT " '" \
+ #value2 "': " SIZE_FORMAT, value1, value2));
+
+#define assert_lt(value1, value2) \
+ assert(value1 < value2, \
+ err_msg("'" #value1 "': " SIZE_FORMAT " '" \
+ #value2 "': " SIZE_FORMAT, value1, value2));
+
+
+class TestVirtualSpace : AllStatic {
+ public:
+ static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size) {
+ size_t granularity = os::vm_allocation_granularity();
+ size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
+
+ ReservedSpace reserved(reserve_size_aligned);
+
+ assert(reserved.is_reserved(), "Must be");
+
+ VirtualSpace vs;
+ bool initialized = vs.initialize(reserved, 0);
+ assert(initialized, "Failed to initialize VirtualSpace");
+
+ vs.expand_by(commit_size, false);
+
+ if (vs.special()) {
+ assert_equals(vs.actual_committed_size(), reserve_size_aligned);
+ } else {
+ assert_ge(vs.actual_committed_size(), commit_size);
+ // Approximate the commit granularity.
+ size_t commit_granularity = UseLargePages ? os::large_page_size() : os::vm_page_size();
+ assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
+ }
+
+ reserved.release();
+ }
+
+ static void test_virtual_space_actual_committed_space_one_large_page() {
+ if (!UseLargePages) {
+ return;
+ }
+
+ size_t large_page_size = os::large_page_size();
+
+ ReservedSpace reserved(large_page_size, large_page_size, true, false);
+
+ assert(reserved.is_reserved(), "Must be");
+
+ VirtualSpace vs;
+ bool initialized = vs.initialize(reserved, 0);
+ assert(initialized, "Failed to initialize VirtualSpace");
+
+ vs.expand_by(large_page_size, false);
+
+ assert_equals(vs.actual_committed_size(), large_page_size);
+
+ reserved.release();
+ }
+
+ static void test_virtual_space_actual_committed_space() {
+ test_virtual_space_actual_committed_space(4 * K, 0);
+ test_virtual_space_actual_committed_space(4 * K, 4 * K);
+ test_virtual_space_actual_committed_space(8 * K, 0);
+ test_virtual_space_actual_committed_space(8 * K, 4 * K);
+ test_virtual_space_actual_committed_space(8 * K, 8 * K);
+ test_virtual_space_actual_committed_space(12 * K, 0);
+ test_virtual_space_actual_committed_space(12 * K, 4 * K);
+ test_virtual_space_actual_committed_space(12 * K, 8 * K);
+ test_virtual_space_actual_committed_space(12 * K, 12 * K);
+ test_virtual_space_actual_committed_space(64 * K, 0);
+ test_virtual_space_actual_committed_space(64 * K, 32 * K);
+ test_virtual_space_actual_committed_space(64 * K, 64 * K);
+ test_virtual_space_actual_committed_space(2 * M, 0);
+ test_virtual_space_actual_committed_space(2 * M, 4 * K);
+ test_virtual_space_actual_committed_space(2 * M, 64 * K);
+ test_virtual_space_actual_committed_space(2 * M, 1 * M);
+ test_virtual_space_actual_committed_space(2 * M, 2 * M);
+ test_virtual_space_actual_committed_space(10 * M, 0);
+ test_virtual_space_actual_committed_space(10 * M, 4 * K);
+ test_virtual_space_actual_committed_space(10 * M, 8 * K);
+ test_virtual_space_actual_committed_space(10 * M, 1 * M);
+ test_virtual_space_actual_committed_space(10 * M, 2 * M);
+ test_virtual_space_actual_committed_space(10 * M, 5 * M);
+ test_virtual_space_actual_committed_space(10 * M, 10 * M);
+ }
+
+ static void test_virtual_space() {
+ test_virtual_space_actual_committed_space();
+ test_virtual_space_actual_committed_space_one_large_page();
+ }
+};
+
+void TestVirtualSpace_test() {
+ TestVirtualSpace::test_virtual_space();
+}
+
+#endif // PRODUCT
+
#endif
diff --git a/src/share/vm/runtime/virtualspace.hpp b/src/share/vm/runtime/virtualspace.hpp
index 0a959e900..02b14734a 100644
--- a/src/share/vm/runtime/virtualspace.hpp
+++ b/src/share/vm/runtime/virtualspace.hpp
@@ -53,6 +53,7 @@ class ReservedSpace VALUE_OBJ_CLASS_SPEC {
public:
// Constructor
+ ReservedSpace();
ReservedSpace(size_t size);
ReservedSpace(size_t size, size_t alignment, bool large,
char* requested_address = NULL,
@@ -182,11 +183,16 @@ class VirtualSpace VALUE_OBJ_CLASS_SPEC {
// Destruction
~VirtualSpace();
- // Testers (all sizes are byte sizes)
- size_t committed_size() const;
- size_t reserved_size() const;
+ // Reserved memory
+ size_t reserved_size() const;
+ // Actually committed OS memory
+ size_t actual_committed_size() const;
+ // Memory used/expanded in this virtual space
+ size_t committed_size() const;
+ // Memory left to use/expand in this virtual space
size_t uncommitted_size() const;
- bool contains(const void* p) const;
+
+ bool contains(const void* p) const;
// Operations
// returns true on success, false otherwise
@@ -197,7 +203,8 @@ class VirtualSpace VALUE_OBJ_CLASS_SPEC {
void check_for_contiguity() PRODUCT_RETURN;
// Debugging
- void print() PRODUCT_RETURN;
+ void print_on(outputStream* out) PRODUCT_RETURN;
+ void print();
};
#endif // SHARE_VM_RUNTIME_VIRTUALSPACE_HPP
diff --git a/src/share/vm/runtime/vmStructs.cpp b/src/share/vm/runtime/vmStructs.cpp
index f1e3a9cdc..99afd7beb 100644
--- a/src/share/vm/runtime/vmStructs.cpp
+++ b/src/share/vm/runtime/vmStructs.cpp
@@ -60,6 +60,7 @@
#include "memory/generationSpec.hpp"
#include "memory/heap.hpp"
#include "memory/metablock.hpp"
+#include "memory/referenceType.hpp"
#include "memory/space.hpp"
#include "memory/tenuredGeneration.hpp"
#include "memory/universe.hpp"
@@ -271,7 +272,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
unchecked_c2_static_field) \
\
/******************************************************************/ \
- /* OopDesc and Klass hierarchies (NOTE: MethodData* incomplete) */ \
+ /* OopDesc and Klass hierarchies (NOTE: MethodData* incomplete) */ \
/******************************************************************/ \
\
volatile_nonstatic_field(oopDesc, _mark, markOop) \
@@ -282,30 +283,27 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
volatile_nonstatic_field(ArrayKlass, _higher_dimension, Klass*) \
volatile_nonstatic_field(ArrayKlass, _lower_dimension, Klass*) \
nonstatic_field(ArrayKlass, _vtable_len, int) \
- nonstatic_field(ArrayKlass, _alloc_size, juint) \
nonstatic_field(ArrayKlass, _component_mirror, oop) \
- nonstatic_field(CompiledICHolder, _holder_method, Method*) \
+ nonstatic_field(CompiledICHolder, _holder_method, Method*) \
nonstatic_field(CompiledICHolder, _holder_klass, Klass*) \
nonstatic_field(ConstantPool, _tags, Array<u1>*) \
- nonstatic_field(ConstantPool, _cache, ConstantPoolCache*) \
+ nonstatic_field(ConstantPool, _cache, ConstantPoolCache*) \
nonstatic_field(ConstantPool, _pool_holder, InstanceKlass*) \
nonstatic_field(ConstantPool, _operands, Array<u2>*) \
nonstatic_field(ConstantPool, _length, int) \
nonstatic_field(ConstantPool, _resolved_references, jobject) \
nonstatic_field(ConstantPool, _reference_map, Array<u2>*) \
nonstatic_field(ConstantPoolCache, _length, int) \
- nonstatic_field(ConstantPoolCache, _constant_pool, ConstantPool*) \
+ nonstatic_field(ConstantPoolCache, _constant_pool, ConstantPool*) \
nonstatic_field(InstanceKlass, _array_klasses, Klass*) \
- nonstatic_field(InstanceKlass, _methods, Array<Method*>*) \
+ nonstatic_field(InstanceKlass, _methods, Array<Method*>*) \
nonstatic_field(InstanceKlass, _local_interfaces, Array<Klass*>*) \
nonstatic_field(InstanceKlass, _transitive_interfaces, Array<Klass*>*) \
nonstatic_field(InstanceKlass, _fields, Array<u2>*) \
nonstatic_field(InstanceKlass, _java_fields_count, u2) \
- nonstatic_field(InstanceKlass, _constants, ConstantPool*) \
+ nonstatic_field(InstanceKlass, _constants, ConstantPool*) \
nonstatic_field(InstanceKlass, _class_loader_data, ClassLoaderData*) \
- nonstatic_field(InstanceKlass, _protection_domain, oop) \
- nonstatic_field(InstanceKlass, _signers, objArrayOop) \
- nonstatic_field(InstanceKlass, _source_file_name, Symbol*) \
+ nonstatic_field(InstanceKlass, _source_file_name_index, u2) \
nonstatic_field(InstanceKlass, _source_debug_extension, char*) \
nonstatic_field(InstanceKlass, _inner_classes, Array<jushort>*) \
nonstatic_field(InstanceKlass, _nonstatic_field_size, int) \
@@ -324,9 +322,8 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(InstanceKlass, _jni_ids, JNIid*) \
nonstatic_field(InstanceKlass, _osr_nmethods_head, nmethod*) \
nonstatic_field(InstanceKlass, _breakpoints, BreakpointInfo*) \
- nonstatic_field(InstanceKlass, _generic_signature, Symbol*) \
+ nonstatic_field(InstanceKlass, _generic_signature_index, u2) \
nonstatic_field(InstanceKlass, _methods_jmethod_ids, jmethodID*) \
- nonstatic_field(InstanceKlass, _methods_cached_itable_indices, int*) \
volatile_nonstatic_field(InstanceKlass, _idnum_allocated_count, u2) \
nonstatic_field(InstanceKlass, _annotations, Annotations*) \
nonstatic_field(InstanceKlass, _dependencies, nmethodBucket*) \
@@ -341,23 +338,29 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(Klass, _java_mirror, oop) \
nonstatic_field(Klass, _modifier_flags, jint) \
nonstatic_field(Klass, _super, Klass*) \
+ nonstatic_field(Klass, _subklass, Klass*) \
nonstatic_field(Klass, _layout_helper, jint) \
nonstatic_field(Klass, _name, Symbol*) \
nonstatic_field(Klass, _access_flags, AccessFlags) \
- nonstatic_field(Klass, _subklass, Klass*) \
+ nonstatic_field(Klass, _prototype_header, markOop) \
nonstatic_field(Klass, _next_sibling, Klass*) \
- nonstatic_field(Klass, _alloc_count, juint) \
+ nonstatic_field(vtableEntry, _method, Method*) \
nonstatic_field(MethodData, _size, int) \
- nonstatic_field(MethodData, _method, Method*) \
+ nonstatic_field(MethodData, _method, Method*) \
nonstatic_field(MethodData, _data_size, int) \
nonstatic_field(MethodData, _data[0], intptr_t) \
nonstatic_field(MethodData, _nof_decompiles, uint) \
nonstatic_field(MethodData, _nof_overflow_recompiles, uint) \
nonstatic_field(MethodData, _nof_overflow_traps, uint) \
+ nonstatic_field(MethodData, _trap_hist._array[0], u1) \
nonstatic_field(MethodData, _eflags, intx) \
nonstatic_field(MethodData, _arg_local, intx) \
nonstatic_field(MethodData, _arg_stack, intx) \
nonstatic_field(MethodData, _arg_returned, intx) \
+ nonstatic_field(DataLayout, _header._struct._tag, u1) \
+ nonstatic_field(DataLayout, _header._struct._flags, u1) \
+ nonstatic_field(DataLayout, _header._struct._bci, u2) \
+ nonstatic_field(DataLayout, _cells[0], intptr_t) \
nonstatic_field(MethodCounters, _interpreter_invocation_count, int) \
nonstatic_field(MethodCounters, _interpreter_throwout_count, u2) \
nonstatic_field(MethodCounters, _number_of_breakpoints, u2) \
@@ -369,6 +372,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(Method, _access_flags, AccessFlags) \
nonstatic_field(Method, _vtable_index, int) \
nonstatic_field(Method, _method_size, u2) \
+ nonstatic_field(Method, _intrinsic_id, u1) \
nonproduct_nonstatic_field(Method, _compiled_invocation_count, int) \
volatile_nonstatic_field(Method, _code, nmethod*) \
nonstatic_field(Method, _i2i_entry, address) \
@@ -389,7 +393,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(ConstMethod, _size_of_parameters, u2) \
nonstatic_field(ObjArrayKlass, _element_klass, Klass*) \
nonstatic_field(ObjArrayKlass, _bottom_klass, Klass*) \
- volatile_nonstatic_field(Symbol, _refcount, int) \
+ volatile_nonstatic_field(Symbol, _refcount, short) \
nonstatic_field(Symbol, _identity_hash, int) \
nonstatic_field(Symbol, _length, unsigned short) \
unchecked_nonstatic_field(Symbol, _body, sizeof(jbyte)) /* NOTE: no type */ \
@@ -447,10 +451,6 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
static_field(Universe, _main_thread_group, oop) \
static_field(Universe, _system_thread_group, oop) \
static_field(Universe, _the_empty_class_klass_array, objArrayOop) \
- static_field(Universe, _out_of_memory_error_java_heap, oop) \
- static_field(Universe, _out_of_memory_error_perm_gen, oop) \
- static_field(Universe, _out_of_memory_error_array_size, oop) \
- static_field(Universe, _out_of_memory_error_gc_overhead_limit, oop) \
static_field(Universe, _null_ptr_exception_instance, oop) \
static_field(Universe, _arithmetic_exception_instance, oop) \
static_field(Universe, _vm_exception, oop) \
@@ -459,12 +459,19 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
static_field(Universe, _bootstrapping, bool) \
static_field(Universe, _fully_initialized, bool) \
static_field(Universe, _verify_count, int) \
+ static_field(Universe, _non_oop_bits, intptr_t) \
static_field(Universe, _narrow_oop._base, address) \
static_field(Universe, _narrow_oop._shift, int) \
static_field(Universe, _narrow_oop._use_implicit_null_checks, bool) \
static_field(Universe, _narrow_klass._base, address) \
static_field(Universe, _narrow_klass._shift, int) \
\
+ /******/ \
+ /* os */ \
+ /******/ \
+ \
+ static_field(os, _polling_page, address) \
+ \
/**********************************************************************************/ \
/* Generation and Space hierarchies */ \
/**********************************************************************************/ \
@@ -472,6 +479,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
unchecked_nonstatic_field(ageTable, sizes, sizeof(ageTable::sizes)) \
\
nonstatic_field(BarrierSet, _max_covered_regions, int) \
+ nonstatic_field(BarrierSet, _kind, BarrierSet::Name) \
nonstatic_field(BlockOffsetTable, _bottom, HeapWord*) \
nonstatic_field(BlockOffsetTable, _end, HeapWord*) \
\
@@ -511,6 +519,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(CollectedHeap, _barrier_set, BarrierSet*) \
nonstatic_field(CollectedHeap, _defer_initial_card_mark, bool) \
nonstatic_field(CollectedHeap, _is_gc_active, bool) \
+ nonstatic_field(CollectedHeap, _total_collections, unsigned int) \
nonstatic_field(CompactibleSpace, _compaction_top, HeapWord*) \
nonstatic_field(CompactibleSpace, _first_dead, HeapWord*) \
nonstatic_field(CompactibleSpace, _end_of_live, HeapWord*) \
@@ -521,7 +530,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(ContiguousSpace, _saved_mark_word, HeapWord*) \
\
nonstatic_field(DefNewGeneration, _next_gen, Generation*) \
- nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
+ nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
nonstatic_field(DefNewGeneration, _age_table, ageTable) \
nonstatic_field(DefNewGeneration, _eden_space, EdenSpace*) \
nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \
@@ -568,6 +577,11 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(ThreadLocalAllocBuffer, _desired_size, size_t) \
nonstatic_field(ThreadLocalAllocBuffer, _refill_waste_limit, size_t) \
static_field(ThreadLocalAllocBuffer, _target_refills, unsigned) \
+ nonstatic_field(ThreadLocalAllocBuffer, _number_of_refills, unsigned) \
+ nonstatic_field(ThreadLocalAllocBuffer, _fast_refill_waste, unsigned) \
+ nonstatic_field(ThreadLocalAllocBuffer, _slow_refill_waste, unsigned) \
+ nonstatic_field(ThreadLocalAllocBuffer, _gc_waste, unsigned) \
+ nonstatic_field(ThreadLocalAllocBuffer, _slow_allocations, unsigned) \
nonstatic_field(VirtualSpace, _low_boundary, char*) \
nonstatic_field(VirtualSpace, _high_boundary, char*) \
nonstatic_field(VirtualSpace, _low, char*) \
@@ -729,6 +743,13 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
\
static_field(ClassLoaderDataGraph, _head, ClassLoaderData*) \
\
+ /**********/ \
+ /* Arrays */ \
+ /**********/ \
+ \
+ nonstatic_field(Array<Klass*>, _length, int) \
+ nonstatic_field(Array<Klass*>, _data[0], Klass*) \
+ \
/*******************/ \
/* GrowableArrays */ \
/*******************/ \
@@ -736,7 +757,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(GenericGrowableArray, _len, int) \
nonstatic_field(GenericGrowableArray, _max, int) \
nonstatic_field(GenericGrowableArray, _arena, Arena*) \
- nonstatic_field(GrowableArray<int>, _data, int*) \
+ nonstatic_field(GrowableArray<int>, _data, int*) \
\
/********************************/ \
/* CodeCache (NOTE: incomplete) */ \
@@ -779,7 +800,20 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
/* StubRoutines (NOTE: incomplete) */ \
/***********************************/ \
\
+ static_field(StubRoutines, _verify_oop_count, jint) \
static_field(StubRoutines, _call_stub_return_address, address) \
+ static_field(StubRoutines, _aescrypt_encryptBlock, address) \
+ static_field(StubRoutines, _aescrypt_decryptBlock, address) \
+ static_field(StubRoutines, _cipherBlockChaining_encryptAESCrypt, address) \
+ static_field(StubRoutines, _cipherBlockChaining_decryptAESCrypt, address) \
+ static_field(StubRoutines, _updateBytesCRC32, address) \
+ static_field(StubRoutines, _crc_table_adr, address) \
+ \
+ /*****************/ \
+ /* SharedRuntime */ \
+ /*****************/ \
+ \
+ static_field(SharedRuntime, _ic_miss_blob, RuntimeStub*) \
\
/***************************************/ \
/* PcDesc and other compiled code info */ \
@@ -869,6 +903,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
volatile_nonstatic_field(Thread, _suspend_flags, uint32_t) \
nonstatic_field(Thread, _active_handles, JNIHandleBlock*) \
nonstatic_field(Thread, _tlab, ThreadLocalAllocBuffer) \
+ nonstatic_field(Thread, _allocated_bytes, jlong) \
nonstatic_field(Thread, _current_pending_monitor, ObjectMonitor*) \
nonstatic_field(Thread, _current_pending_monitor_is_from_java, bool) \
nonstatic_field(Thread, _current_waiting_monitor, ObjectMonitor*) \
@@ -882,6 +917,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(JavaThread, _pending_async_exception, oop) \
volatile_nonstatic_field(JavaThread, _exception_oop, oop) \
volatile_nonstatic_field(JavaThread, _exception_pc, address) \
+ volatile_nonstatic_field(JavaThread, _is_method_handle_return, int) \
nonstatic_field(JavaThread, _is_compiling, bool) \
nonstatic_field(JavaThread, _special_runtime_exit_condition, JavaThread::AsyncRequests) \
nonstatic_field(JavaThread, _saved_exception_pc, address) \
@@ -891,6 +927,8 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(JavaThread, _stack_size, size_t) \
nonstatic_field(JavaThread, _vframe_array_head, vframeArray*) \
nonstatic_field(JavaThread, _vframe_array_last, vframeArray*) \
+ nonstatic_field(JavaThread, _satb_mark_queue, ObjPtrQueue) \
+ nonstatic_field(JavaThread, _dirty_card_queue, DirtyCardQueue) \
nonstatic_field(Thread, _resource_area, ResourceArea*) \
nonstatic_field(CompilerThread, _env, ciEnv*) \
\
@@ -1066,6 +1104,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
c2_nonstatic_field(Compile, _save_argument_registers, const bool) \
c2_nonstatic_field(Compile, _subsume_loads, const bool) \
c2_nonstatic_field(Compile, _do_escape_analysis, const bool) \
+ c2_nonstatic_field(Compile, _eliminate_boxing, const bool) \
c2_nonstatic_field(Compile, _ilt, InlineTree*) \
\
c2_nonstatic_field(InlineTree, _caller_jvms, JVMState*) \
@@ -1111,10 +1150,10 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
\
c2_nonstatic_field(MachCallRuntimeNode, _name, const char*) \
\
- c2_nonstatic_field(PhaseCFG, _num_blocks, uint) \
+ c2_nonstatic_field(PhaseCFG, _number_of_blocks, uint) \
c2_nonstatic_field(PhaseCFG, _blocks, Block_List) \
- c2_nonstatic_field(PhaseCFG, _bbs, Block_Array) \
- c2_nonstatic_field(PhaseCFG, _broot, Block*) \
+ c2_nonstatic_field(PhaseCFG, _node_to_block_mapping, Block_Array) \
+ c2_nonstatic_field(PhaseCFG, _root_block, Block*) \
\
c2_nonstatic_field(PhaseRegAlloc, _node_regs, OptoRegPair*) \
c2_nonstatic_field(PhaseRegAlloc, _node_regs_max_index, uint) \
@@ -1202,7 +1241,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
unchecked_nonstatic_field(Array<int>, _data, sizeof(int)) \
unchecked_nonstatic_field(Array<u1>, _data, sizeof(u1)) \
unchecked_nonstatic_field(Array<u2>, _data, sizeof(u2)) \
- unchecked_nonstatic_field(Array<Method*>, _data, sizeof(Method*)) \
+ unchecked_nonstatic_field(Array<Method*>, _data, sizeof(Method*)) \
unchecked_nonstatic_field(Array<Klass*>, _data, sizeof(Klass*)) \
\
/*********************************/ \
@@ -1218,7 +1257,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
/* Miscellaneous fields */ \
/************************/ \
\
- nonstatic_field(CompileTask, _method, Method*) \
+ nonstatic_field(CompileTask, _method, Method*) \
nonstatic_field(CompileTask, _osr_bci, int) \
nonstatic_field(CompileTask, _comp_level, int) \
nonstatic_field(CompileTask, _compile_id, uint) \
@@ -1232,7 +1271,11 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
\
nonstatic_field(vframeArrayElement, _frame, frame) \
nonstatic_field(vframeArrayElement, _bci, int) \
- nonstatic_field(vframeArrayElement, _method, Method*) \
+ nonstatic_field(vframeArrayElement, _method, Method*) \
+ \
+ nonstatic_field(PtrQueue, _active, bool) \
+ nonstatic_field(PtrQueue, _buf, void**) \
+ nonstatic_field(PtrQueue, _index, size_t) \
\
nonstatic_field(AccessFlags, _flags, jint) \
nonstatic_field(elapsedTimer, _counter, jlong) \
@@ -1378,7 +1421,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
/* MetadataOopDesc hierarchy (NOTE: some missing) */ \
/**************************************************/ \
\
- declare_toplevel_type(CompiledICHolder) \
+ declare_toplevel_type(CompiledICHolder) \
declare_toplevel_type(MetaspaceObj) \
declare_type(Metadata, MetaspaceObj) \
declare_type(Klass, Metadata) \
@@ -1389,17 +1432,20 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
declare_type(InstanceClassLoaderKlass, InstanceKlass) \
declare_type(InstanceMirrorKlass, InstanceKlass) \
declare_type(InstanceRefKlass, InstanceKlass) \
- declare_type(ConstantPool, Metadata) \
- declare_type(ConstantPoolCache, MetaspaceObj) \
- declare_type(MethodData, Metadata) \
- declare_type(Method, Metadata) \
- declare_type(MethodCounters, MetaspaceObj) \
- declare_type(ConstMethod, MetaspaceObj) \
+ declare_type(ConstantPool, Metadata) \
+ declare_type(ConstantPoolCache, MetaspaceObj) \
+ declare_type(MethodData, Metadata) \
+ declare_type(Method, Metadata) \
+ declare_type(MethodCounters, MetaspaceObj) \
+ declare_type(ConstMethod, MetaspaceObj) \
+ \
+ declare_toplevel_type(vtableEntry) \
\
declare_toplevel_type(Symbol) \
declare_toplevel_type(Symbol*) \
declare_toplevel_type(volatile Metadata*) \
\
+ declare_toplevel_type(DataLayout) \
declare_toplevel_type(nmethodBucket) \
\
/********/ \
@@ -1447,6 +1493,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
declare_type(ModRefBarrierSet, BarrierSet) \
declare_type(CardTableModRefBS, ModRefBarrierSet) \
declare_type(CardTableModRefBSForCTRS, CardTableModRefBS) \
+ declare_toplevel_type(BarrierSet::Name) \
declare_toplevel_type(GenRemSet) \
declare_type(CardTableRS, GenRemSet) \
declare_toplevel_type(BlockOffsetSharedArray) \
@@ -1465,6 +1512,8 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
declare_toplevel_type(ThreadLocalAllocBuffer) \
declare_toplevel_type(VirtualSpace) \
declare_toplevel_type(WaterMark) \
+ declare_toplevel_type(ObjPtrQueue) \
+ declare_toplevel_type(DirtyCardQueue) \
\
/* Pointers to Garbage Collection types */ \
\
@@ -2083,6 +2132,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
declare_toplevel_type(StubQueue*) \
declare_toplevel_type(Thread*) \
declare_toplevel_type(Universe) \
+ declare_toplevel_type(os) \
declare_toplevel_type(vframeArray) \
declare_toplevel_type(vframeArrayElement) \
declare_toplevel_type(Annotations*) \
@@ -2091,6 +2141,8 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
/* Miscellaneous types */ \
/***************/ \
\
+ declare_toplevel_type(PtrQueue) \
+ \
/* freelist */ \
declare_toplevel_type(FreeChunk*) \
declare_toplevel_type(Metablock*) \
@@ -2121,6 +2173,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
/* Useful globals */ \
/******************/ \
\
+ declare_preprocessor_constant("ASSERT", DEBUG_ONLY(1) NOT_DEBUG(0)) \
\
/**************/ \
/* Stack bias */ \
@@ -2137,6 +2190,8 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
declare_constant(BytesPerWord) \
declare_constant(BytesPerLong) \
\
+ declare_constant(LogKlassAlignmentInBytes) \
+ \
/********************************************/ \
/* Generation and Space Hierarchy Constants */ \
/********************************************/ \
@@ -2145,6 +2200,9 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
\
declare_constant(BarrierSet::ModRef) \
declare_constant(BarrierSet::CardTableModRef) \
+ declare_constant(BarrierSet::CardTableExtension) \
+ declare_constant(BarrierSet::G1SATBCT) \
+ declare_constant(BarrierSet::G1SATBCTLogging) \
declare_constant(BarrierSet::Other) \
\
declare_constant(BlockOffsetSharedArray::LogN) \
@@ -2263,8 +2321,11 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
declare_constant(Klass::_primary_super_limit) \
declare_constant(Klass::_lh_instance_slow_path_bit) \
declare_constant(Klass::_lh_log2_element_size_shift) \
+ declare_constant(Klass::_lh_log2_element_size_mask) \
declare_constant(Klass::_lh_element_type_shift) \
+ declare_constant(Klass::_lh_element_type_mask) \
declare_constant(Klass::_lh_header_size_shift) \
+ declare_constant(Klass::_lh_header_size_mask) \
declare_constant(Klass::_lh_array_tag_shift) \
declare_constant(Klass::_lh_array_tag_type_value) \
declare_constant(Klass::_lh_array_tag_obj_value) \
@@ -2283,6 +2344,12 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
declare_constant(ConstMethod::_has_default_annotations) \
declare_constant(ConstMethod::_has_type_annotations) \
\
+ /**************/ \
+ /* DataLayout */ \
+ /**************/ \
+ \
+ declare_constant(DataLayout::cell_size) \
+ \
/*************************************/ \
/* InstanceKlass enum */ \
/*************************************/ \
@@ -2417,6 +2484,13 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
declare_constant(Deoptimization::Reason_LIMIT) \
declare_constant(Deoptimization::Reason_RECORDED_LIMIT) \
\
+ declare_constant(Deoptimization::Action_none) \
+ declare_constant(Deoptimization::Action_maybe_recompile) \
+ declare_constant(Deoptimization::Action_reinterpret) \
+ declare_constant(Deoptimization::Action_make_not_entrant) \
+ declare_constant(Deoptimization::Action_make_not_compilable) \
+ declare_constant(Deoptimization::Action_LIMIT) \
+ \
/*********************/ \
/* Matcher (C2 only) */ \
/*********************/ \
@@ -2483,6 +2557,16 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
declare_constant(vmSymbols::FIRST_SID) \
declare_constant(vmSymbols::SID_LIMIT) \
\
+ /****************/ \
+ /* vmIntrinsics */ \
+ /****************/ \
+ \
+ declare_constant(vmIntrinsics::_invokeBasic) \
+ declare_constant(vmIntrinsics::_linkToVirtual) \
+ declare_constant(vmIntrinsics::_linkToStatic) \
+ declare_constant(vmIntrinsics::_linkToSpecial) \
+ declare_constant(vmIntrinsics::_linkToInterface) \
+ \
/********************************/ \
/* Calling convention constants */ \
/********************************/ \
@@ -2530,6 +2614,8 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
declare_constant(markOopDesc::biased_lock_bit_in_place) \
declare_constant(markOopDesc::age_mask) \
declare_constant(markOopDesc::age_mask_in_place) \
+ declare_constant(markOopDesc::epoch_mask) \
+ declare_constant(markOopDesc::epoch_mask_in_place) \
declare_constant(markOopDesc::hash_mask) \
declare_constant(markOopDesc::hash_mask_in_place) \
declare_constant(markOopDesc::biased_lock_alignment) \
@@ -3128,15 +3214,15 @@ static int recursiveFindType(VMTypeEntry* origtypes, const char* typeName, bool
// Search for the base type by peeling off const and *
size_t len = strlen(typeName);
if (typeName[len-1] == '*') {
- char * s = new char[len];
+ char * s = NEW_C_HEAP_ARRAY(char, len, mtInternal);
strncpy(s, typeName, len - 1);
s[len-1] = '\0';
// tty->print_cr("checking \"%s\" for \"%s\"", s, typeName);
if (recursiveFindType(origtypes, s, true) == 1) {
- delete [] s;
+ FREE_C_HEAP_ARRAY(char, s, mtInternal);
return 1;
}
- delete [] s;
+ FREE_C_HEAP_ARRAY(char, s, mtInternal);
}
const char* start = NULL;
if (strstr(typeName, "GrowableArray<") == typeName) {
@@ -3147,15 +3233,15 @@ static int recursiveFindType(VMTypeEntry* origtypes, const char* typeName, bool
if (start != NULL) {
const char * end = strrchr(typeName, '>');
int len = end - start + 1;
- char * s = new char[len];
+ char * s = NEW_C_HEAP_ARRAY(char, len, mtInternal);
strncpy(s, start, len - 1);
s[len-1] = '\0';
// tty->print_cr("checking \"%s\" for \"%s\"", s, typeName);
if (recursiveFindType(origtypes, s, true) == 1) {
- delete [] s;
+ FREE_C_HEAP_ARRAY(char, s, mtInternal);
return 1;
}
- delete [] s;
+ FREE_C_HEAP_ARRAY(char, s, mtInternal);
}
if (strstr(typeName, "const ") == typeName) {
const char * s = typeName + strlen("const ");
diff --git a/src/share/vm/runtime/vmThread.cpp b/src/share/vm/runtime/vmThread.cpp
index 8c321721e..bdb508208 100644
--- a/src/share/vm/runtime/vmThread.cpp
+++ b/src/share/vm/runtime/vmThread.cpp
@@ -35,6 +35,7 @@
#include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp"
#include "services/runtimeService.hpp"
+#include "trace/tracing.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#include "utilities/xmlstream.hpp"
@@ -365,7 +366,23 @@ void VMThread::evaluate_operation(VM_Operation* op) {
(char *) op->name(), strlen(op->name()),
op->evaluation_mode());
#endif /* USDT2 */
+
+ EventExecuteVMOperation event;
+
op->evaluate();
+
+ if (event.should_commit()) {
+ bool is_concurrent = op->evaluate_concurrently();
+ event.set_operation(op->type());
+ event.set_safepoint(op->evaluate_at_safepoint());
+ event.set_blocking(!is_concurrent);
+ // Only write caller thread information for non-concurrent vm operations.
+ // For concurrent vm operations, the thread id is set to 0 indicating thread is unknown.
+ // This is because the caller thread could have exited already.
+ event.set_caller(is_concurrent ? 0 : op->calling_thread()->osthread()->thread_id());
+ event.commit();
+ }
+
#ifndef USDT2
HS_DTRACE_PROBE3(hotspot, vmops__end, op->name(), strlen(op->name()),
op->evaluation_mode());
@@ -601,7 +618,7 @@ void VMThread::execute(VM_Operation* op) {
{
VMOperationQueue_lock->lock_without_safepoint_check();
bool ok = _vm_queue->add(op);
- op->set_timestamp(os::javaTimeMillis());
+ op->set_timestamp(os::javaTimeMillis());
VMOperationQueue_lock->notify();
VMOperationQueue_lock->unlock();
// VM_Operation got skipped
diff --git a/src/share/vm/runtime/vm_operations.cpp b/src/share/vm/runtime/vm_operations.cpp
index 53ea0bd98..5166cfdaa 100644
--- a/src/share/vm/runtime/vm_operations.cpp
+++ b/src/share/vm/runtime/vm_operations.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,6 +37,7 @@
#include "runtime/thread.inline.hpp"
#include "runtime/vm_operations.hpp"
#include "services/threadService.hpp"
+#include "trace/tracing.hpp"
#define VM_OP_NAME_INITIALIZE(name) #name,
@@ -62,19 +63,21 @@ void VM_Operation::evaluate() {
}
}
+const char* VM_Operation::mode_to_string(Mode mode) {
+ switch(mode) {
+ case _safepoint : return "safepoint";
+ case _no_safepoint : return "no safepoint";
+ case _concurrent : return "concurrent";
+ case _async_safepoint: return "async safepoint";
+ default : return "unknown";
+ }
+}
// Called by fatal error handler.
void VM_Operation::print_on_error(outputStream* st) const {
st->print("VM_Operation (" PTR_FORMAT "): ", this);
st->print("%s", name());
- const char* mode;
- switch(evaluation_mode()) {
- case _safepoint : mode = "safepoint"; break;
- case _no_safepoint : mode = "no safepoint"; break;
- case _concurrent : mode = "concurrent"; break;
- case _async_safepoint: mode = "async safepoint"; break;
- default : mode = "unknown"; break;
- }
+ const char* mode = mode_to_string(evaluation_mode());
st->print(", mode: %s", mode);
if (calling_thread()) {
diff --git a/src/share/vm/runtime/vm_operations.hpp b/src/share/vm/runtime/vm_operations.hpp
index 9d79b2c0d..b6555b457 100644
--- a/src/share/vm/runtime/vm_operations.hpp
+++ b/src/share/vm/runtime/vm_operations.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -178,6 +178,8 @@ class VM_Operation: public CHeapObj<mtInternal> {
evaluation_mode() == _async_safepoint;
}
+ static const char* mode_to_string(Mode mode);
+
// Debugging
void print_on_error(outputStream* st) const;
const char* name() const { return _names[type()]; }
diff --git a/src/share/vm/runtime/vm_version.cpp b/src/share/vm/runtime/vm_version.cpp
index 1cae197e1..7642d1fb9 100644
--- a/src/share/vm/runtime/vm_version.cpp
+++ b/src/share/vm/runtime/vm_version.cpp
@@ -235,6 +235,8 @@ const char* Abstract_VM_Version::internal_vm_info_string() {
#define HOTSPOT_BUILD_COMPILER "Workshop 5.9"
#elif __SUNPRO_CC == 0x5100
#define HOTSPOT_BUILD_COMPILER "Sun Studio 12u1"
+ #elif __SUNPRO_CC == 0x5120
+ #define HOTSPOT_BUILD_COMPILER "Sun Studio 12u3"
#else
#define HOTSPOT_BUILD_COMPILER "unknown Workshop:" XSTR(__SUNPRO_CC)
#endif
diff --git a/src/share/vm/runtime/vm_version.hpp b/src/share/vm/runtime/vm_version.hpp
index c901b260d..f03b77ca3 100644
--- a/src/share/vm/runtime/vm_version.hpp
+++ b/src/share/vm/runtime/vm_version.hpp
@@ -78,7 +78,13 @@ class Abstract_VM_Version: AllStatic {
static const char* jre_release_version();
// does HW support an 8-byte compare-exchange operation?
- static bool supports_cx8() {return _supports_cx8;}
+ static bool supports_cx8() {
+#ifdef SUPPORTS_NATIVE_CX8
+ return true;
+#else
+ return _supports_cx8;
+#endif
+ }
// does HW support atomic get-and-set or atomic get-and-add? Used
// to guide intrinsification decisions for Unsafe atomic ops
static bool supports_atomic_getset4() {return _supports_atomic_getset4;}
diff --git a/src/share/vm/services/attachListener.cpp b/src/share/vm/services/attachListener.cpp
index 564b20f4b..e30c3eeff 100644
--- a/src/share/vm/services/attachListener.cpp
+++ b/src/share/vm/services/attachListener.cpp
@@ -227,7 +227,7 @@ static jint heap_inspection(AttachOperation* op, outputStream* out) {
}
live_objects_only = strcmp(arg0, "-live") == 0;
}
- VM_GC_HeapInspection heapop(out, live_objects_only /* request full gc */, true /* need_prologue */);
+ VM_GC_HeapInspection heapop(out, live_objects_only /* request full gc */);
VMThread::execute(&heapop);
return JNI_OK;
}
@@ -470,7 +470,17 @@ void AttachListener::init() {
vmSymbols::threadgroup_string_void_signature(),
thread_group,
string,
- CHECK);
+ THREAD);
+
+ if (HAS_PENDING_EXCEPTION) {
+ tty->print_cr("Exception in VM (AttachListener::init) : ");
+ java_lang_Throwable::print(PENDING_EXCEPTION, tty);
+ tty->cr();
+
+ CLEAR_PENDING_EXCEPTION;
+
+ return;
+ }
KlassHandle group(THREAD, SystemDictionary::ThreadGroup_klass());
JavaCalls::call_special(&result,
@@ -479,7 +489,17 @@ void AttachListener::init() {
vmSymbols::add_method_name(),
vmSymbols::thread_void_signature(),
thread_oop, // ARG 1
- CHECK);
+ THREAD);
+
+ if (HAS_PENDING_EXCEPTION) {
+ tty->print_cr("Exception in VM (AttachListener::init) : ");
+ java_lang_Throwable::print(PENDING_EXCEPTION, tty);
+ tty->cr();
+
+ CLEAR_PENDING_EXCEPTION;
+
+ return;
+ }
{ MutexLocker mu(Threads_lock);
JavaThread* listener_thread = new JavaThread(&attach_listener_thread_entry);
diff --git a/src/share/vm/services/attachListener.hpp b/src/share/vm/services/attachListener.hpp
index 2e7cff395..6995a0f23 100644
--- a/src/share/vm/services/attachListener.hpp
+++ b/src/share/vm/services/attachListener.hpp
@@ -50,6 +50,7 @@ struct AttachOperationFunctionInfo {
class AttachListener: AllStatic {
public:
+ static void vm_start() NOT_SERVICES_RETURN;
static void init() NOT_SERVICES_RETURN;
static void abort() NOT_SERVICES_RETURN;
diff --git a/src/share/vm/services/diagnosticArgument.cpp b/src/share/vm/services/diagnosticArgument.cpp
index 022687db4..51126f063 100644
--- a/src/share/vm/services/diagnosticArgument.cpp
+++ b/src/share/vm/services/diagnosticArgument.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
+#include "memory/resourceArea.hpp"
#include "runtime/thread.hpp"
#include "services/diagnosticArgument.hpp"
@@ -60,7 +61,7 @@ void GenDCmdArgument::to_string(MemorySizeArgument m, char* buf, size_t len) {
}
void GenDCmdArgument::to_string(char* c, char* buf, size_t len) {
- jio_snprintf(buf, len, "%s", c);
+ jio_snprintf(buf, len, "%s", (c != NULL) ? c : "");
}
void GenDCmdArgument::to_string(StringArrayArgument* f, char* buf, size_t len) {
@@ -86,9 +87,18 @@ void GenDCmdArgument::to_string(StringArrayArgument* f, char* buf, size_t len) {
template <> void DCmdArgument<jlong>::parse_value(const char* str,
size_t len, TRAPS) {
- if (str == NULL || sscanf(str, JLONG_FORMAT, &_value) != 1) {
- THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
- "Integer parsing error in diagnostic command arguments\n");
+ int scanned = -1;
+ if (str == NULL
+ || sscanf(str, JLONG_FORMAT"%n", &_value, &scanned) != 1
+ || (size_t)scanned != len)
+ {
+ ResourceMark rm;
+
+ char* buf = NEW_RESOURCE_ARRAY(char, len + 1);
+ strncpy(buf, str, len);
+ buf[len] = '\0';
+ Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalArgumentException(),
+ "Integer parsing error in command argument '%s'. Could not parse: %s.", _name, buf);
}
}
@@ -96,7 +106,7 @@ template <> void DCmdArgument<jlong>::init_value(TRAPS) {
if (has_default()) {
this->parse_value(_default_string, strlen(_default_string), THREAD);
if (HAS_PENDING_EXCEPTION) {
- fatal("Default string must be parsable");
+ fatal("Default string must be parseable");
}
} else {
set_value(0);
@@ -116,8 +126,13 @@ template <> void DCmdArgument<bool>::parse_value(const char* str,
} else if (len == strlen("false") && strncasecmp(str, "false", len) == 0) {
set_value(false);
} else {
- THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
- "Boolean parsing error in diagnostic command arguments");
+ ResourceMark rm;
+
+ char* buf = NEW_RESOURCE_ARRAY(char, len + 1);
+ strncpy(buf, str, len);
+ buf[len] = '\0';
+ Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalArgumentException(),
+ "Boolean parsing error in command argument '%s'. Could not parse: %s.", _name, buf);
}
}
}
@@ -168,7 +183,7 @@ template <> void DCmdArgument<NanoTimeArgument>::parse_value(const char* str,
size_t len, TRAPS) {
if (str == NULL) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
- "Integer parsing error nanotime value: syntax error");
+ "Integer parsing error nanotime value: syntax error, value is null");
}
int argc = sscanf(str, JLONG_FORMAT, &_value._time);
@@ -232,7 +247,7 @@ template <> void DCmdArgument<NanoTimeArgument>::init_value(TRAPS) {
} else {
_value._time = 0;
_value._nanotime = 0;
- strcmp(_value._unit, "ns");
+ strcpy(_value._unit, "ns");
}
}
diff --git a/src/share/vm/services/diagnosticCommand.cpp b/src/share/vm/services/diagnosticCommand.cpp
index 5deaae0d4..79c922a85 100644
--- a/src/share/vm/services/diagnosticCommand.cpp
+++ b/src/share/vm/services/diagnosticCommand.cpp
@@ -320,8 +320,7 @@ ClassHistogramDCmd::ClassHistogramDCmd(outputStream* output, bool heap) :
void ClassHistogramDCmd::execute(DCmdSource source, TRAPS) {
VM_GC_HeapInspection heapop(output(),
- !_all.value() /* request full gc if false */,
- true /* need_prologue */);
+ !_all.value() /* request full gc if false */);
VMThread::execute(&heapop);
}
@@ -361,8 +360,7 @@ void ClassStatsDCmd::execute(DCmdSource source, TRAPS) {
}
VM_GC_HeapInspection heapop(output(),
- true, /* request_full_gc */
- true /* need_prologue */);
+ true /* request_full_gc */);
heapop.set_csv_format(_csv.value());
heapop.set_print_help(_help.value());
heapop.set_print_class_stats(true);
diff --git a/src/share/vm/services/gcNotifier.cpp b/src/share/vm/services/gcNotifier.cpp
index e6106e29c..102507233 100644
--- a/src/share/vm/services/gcNotifier.cpp
+++ b/src/share/vm/services/gcNotifier.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -209,11 +209,11 @@ void GCNotifier::sendNotificationInternal(TRAPS) {
GCNotificationRequest *request = getRequest();
if (request != NULL) {
NotificationMark nm(request);
- Handle objGcInfo = createGcInfo(request->gcManager, request->gcStatInfo, THREAD);
+ Handle objGcInfo = createGcInfo(request->gcManager, request->gcStatInfo, CHECK);
- Handle objName = java_lang_String::create_from_platform_dependent_str(request->gcManager->name(), CHECK);
- Handle objAction = java_lang_String::create_from_platform_dependent_str(request->gcAction, CHECK);
- Handle objCause = java_lang_String::create_from_platform_dependent_str(request->gcCause, CHECK);
+ Handle objName = java_lang_String::create_from_str(request->gcManager->name(), CHECK);
+ Handle objAction = java_lang_String::create_from_str(request->gcAction, CHECK);
+ Handle objCause = java_lang_String::create_from_str(request->gcCause, CHECK);
Klass* k = Management::sun_management_GarbageCollectorImpl_klass(CHECK);
instanceKlassHandle gc_mbean_klass(THREAD, k);
diff --git a/src/share/vm/services/management.cpp b/src/share/vm/services/management.cpp
index 872d19829..5cc0cc4ff 100644
--- a/src/share/vm/services/management.cpp
+++ b/src/share/vm/services/management.cpp
@@ -876,8 +876,6 @@ JVM_ENTRY(jobject, jmm_GetMemoryUsage(JNIEnv* env, jboolean heap))
total_used += u.used();
total_committed += u.committed();
- // if any one of the memory pool has undefined init_size or max_size,
- // set it to -1
if (u.init_size() == (size_t)-1) {
has_undefined_init_size = true;
}
@@ -894,11 +892,14 @@ JVM_ENTRY(jobject, jmm_GetMemoryUsage(JNIEnv* env, jboolean heap))
}
}
- // In our current implementation, we make sure that all non-heap
- // pools have defined init and max sizes. Heap pools do not matter,
- // as we never use total_init and total_max for them.
- assert(heap || !has_undefined_init_size, "Undefined init size");
- assert(heap || !has_undefined_max_size, "Undefined max size");
+ // if any one of the memory pool has undefined init_size or max_size,
+ // set it to -1
+ if (has_undefined_init_size) {
+ total_init = (size_t)-1;
+ }
+ if (has_undefined_max_size) {
+ total_max = (size_t)-1;
+ }
MemoryUsage usage((heap ? InitialHeapSize : total_init),
total_used,
@@ -1837,13 +1838,13 @@ class ThreadTimesClosure: public ThreadClosure {
private:
objArrayHandle _names_strings;
char **_names_chars;
- typeArrayOop _times;
+ typeArrayHandle _times;
int _names_len;
int _times_len;
int _count;
public:
- ThreadTimesClosure(objArrayHandle names, typeArrayOop times);
+ ThreadTimesClosure(objArrayHandle names, typeArrayHandle times);
~ThreadTimesClosure();
virtual void do_thread(Thread* thread);
void do_unlocked();
@@ -1851,9 +1852,9 @@ class ThreadTimesClosure: public ThreadClosure {
};
ThreadTimesClosure::ThreadTimesClosure(objArrayHandle names,
- typeArrayOop times) {
+ typeArrayHandle times) {
assert(names() != NULL, "names was NULL");
- assert(times != NULL, "times was NULL");
+ assert(times() != NULL, "times was NULL");
_names_strings = names;
_names_len = names->length();
_names_chars = NEW_C_HEAP_ARRAY(char*, _names_len, mtInternal);
@@ -1931,7 +1932,7 @@ JVM_ENTRY(jint, jmm_GetInternalThreadTimes(JNIEnv *env,
typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(times));
typeArrayHandle times_ah(THREAD, ta);
- ThreadTimesClosure ttc(names_ah, times_ah());
+ ThreadTimesClosure ttc(names_ah, times_ah);
{
MutexLockerEx ml(Threads_lock);
Threads::threads_do(&ttc);
diff --git a/src/share/vm/services/memBaseline.cpp b/src/share/vm/services/memBaseline.cpp
index b090e95ac..62e51873c 100644
--- a/src/share/vm/services/memBaseline.cpp
+++ b/src/share/vm/services/memBaseline.cpp
@@ -41,6 +41,7 @@ MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
{mtOther, "Other"},
{mtSymbol, "Symbol"},
{mtNMT, "Memory Tracking"},
+ {mtTracing, "Tracing"},
{mtChunk, "Pooled Free Chunks"},
{mtClassShared,"Shared spaces for classes"},
{mtTest, "Test"},
@@ -129,7 +130,7 @@ bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records)
if (malloc_ptr->is_arena_record()) {
// see if arena memory record present
MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
- if (next_malloc_ptr->is_arena_memory_record()) {
+ if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) {
assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
"Arena records do not match");
size = next_malloc_ptr->size();
@@ -485,7 +486,7 @@ int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
- assert(delta != 0, "dup pointer");
+ assert(p1 == p2 || delta != 0, "dup pointer");
return delta;
}
diff --git a/src/share/vm/services/memPtr.cpp b/src/share/vm/services/memPtr.cpp
index 3e124e2bd..bc460517c 100644
--- a/src/share/vm/services/memPtr.cpp
+++ b/src/share/vm/services/memPtr.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,9 +34,9 @@ jint SequenceGenerator::next() {
jint seq = Atomic::add(1, &_seq_number);
if (seq < 0) {
MemTracker::shutdown(MemTracker::NMT_sequence_overflow);
+ } else {
+ NOT_PRODUCT(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;)
}
- assert(seq > 0, "counter overflow");
- NOT_PRODUCT(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;)
return seq;
}
diff --git a/src/share/vm/services/memPtr.hpp b/src/share/vm/services/memPtr.hpp
index d98ca80c4..c54f0934a 100644
--- a/src/share/vm/services/memPtr.hpp
+++ b/src/share/vm/services/memPtr.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -457,9 +457,8 @@ class SeqMemPointerRecord : public MemPointerRecord {
public:
SeqMemPointerRecord(): _seq(0){ }
- SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size)
- : MemPointerRecord(addr, flags, size) {
- _seq = SequenceGenerator::next();
+ SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size, jint seq)
+ : MemPointerRecord(addr, flags, size), _seq(seq) {
}
SeqMemPointerRecord(const SeqMemPointerRecord& copy_from)
@@ -488,8 +487,8 @@ class SeqMemPointerRecordEx : public MemPointerRecordEx {
SeqMemPointerRecordEx(): _seq(0) { }
SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size,
- address pc): MemPointerRecordEx(addr, flags, size, pc) {
- _seq = SequenceGenerator::next();
+ jint seq, address pc):
+ MemPointerRecordEx(addr, flags, size, pc), _seq(seq) {
}
SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from)
diff --git a/src/share/vm/services/memRecorder.cpp b/src/share/vm/services/memRecorder.cpp
index 776ad223c..afe7bd245 100644
--- a/src/share/vm/services/memRecorder.cpp
+++ b/src/share/vm/services/memRecorder.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -69,10 +69,11 @@ MemRecorder::MemRecorder() {
if (_pointer_records != NULL) {
// recode itself
+ address pc = CURRENT_PC;
record((address)this, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
- sizeof(MemRecorder), CALLER_PC);
+ sizeof(MemRecorder), SequenceGenerator::next(), pc);
record((address)_pointer_records, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
- _pointer_records->instance_size(),CURRENT_PC);
+ _pointer_records->instance_size(), SequenceGenerator::next(), pc);
}
}
@@ -116,7 +117,8 @@ int MemRecorder::sort_record_fn(const void* e1, const void* e2) {
}
}
-bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, address pc) {
+bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, jint seq, address pc) {
+ assert(seq > 0, "No sequence number");
#ifdef ASSERT
if (MemPointerRecord::is_virtual_memory_record(flags)) {
assert((flags & MemPointerRecord::tag_masks) != 0, "bad virtual memory record");
@@ -133,11 +135,11 @@ bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, address pc) {
#endif
if (MemTracker::track_callsite()) {
- SeqMemPointerRecordEx ap(p, flags, size, pc);
+ SeqMemPointerRecordEx ap(p, flags, size, seq, pc);
debug_only(check_dup_seq(ap.seq());)
return _pointer_records->append(&ap);
} else {
- SeqMemPointerRecord ap(p, flags, size);
+ SeqMemPointerRecord ap(p, flags, size, seq);
debug_only(check_dup_seq(ap.seq());)
return _pointer_records->append(&ap);
}
diff --git a/src/share/vm/services/memRecorder.hpp b/src/share/vm/services/memRecorder.hpp
index 9d6bf2b7e..4329dad02 100644
--- a/src/share/vm/services/memRecorder.hpp
+++ b/src/share/vm/services/memRecorder.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,13 +53,13 @@ template <class E, int SIZE> class FixedSizeMemPointerArray :
}
}
- void* operator new(size_t size, const std::nothrow_t& nothrow_constant) {
+ void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
// the instance is part of memRecorder, needs to be tagged with 'otNMTRecorder'
// to avoid recursion
return os::malloc(size, (mtNMT | otNMTRecorder));
}
- void* operator new(size_t size) {
+ void* operator new(size_t size) throw() {
assert(false, "use nothrow version");
return NULL;
}
@@ -220,7 +220,7 @@ class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> {
~MemRecorder();
// record a memory operation
- bool record(address addr, MEMFLAGS flags, size_t size, address caller_pc = 0);
+ bool record(address addr, MEMFLAGS flags, size_t size, jint seq, address caller_pc = 0);
// linked list support
inline void set_next(MemRecorder* rec) {
diff --git a/src/share/vm/services/memReporter.cpp b/src/share/vm/services/memReporter.cpp
index 0311675f3..9d0f45ada 100644
--- a/src/share/vm/services/memReporter.cpp
+++ b/src/share/vm/services/memReporter.cpp
@@ -188,30 +188,51 @@ void BaselineReporter::diff_callsites(const MemBaseline& cur, const MemBaseline&
(MallocCallsitePointer*)prev_malloc_itr.current();
while (cur_malloc_callsite != NULL || prev_malloc_callsite != NULL) {
- if (prev_malloc_callsite == NULL ||
- cur_malloc_callsite->addr() < prev_malloc_callsite->addr()) {
+ if (prev_malloc_callsite == NULL) {
+ assert(cur_malloc_callsite != NULL, "sanity check");
+ // this is a new callsite
_outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
amount_in_current_scale(cur_malloc_callsite->amount()),
cur_malloc_callsite->count(),
diff_in_current_scale(cur_malloc_callsite->amount(), 0),
diff(cur_malloc_callsite->count(), 0));
cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
- } else if (prev_malloc_callsite == NULL ||
- cur_malloc_callsite->addr() > prev_malloc_callsite->addr()) {
- _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
- amount_in_current_scale(prev_malloc_callsite->amount()),
- prev_malloc_callsite->count(),
+ } else if (cur_malloc_callsite == NULL) {
+ assert(prev_malloc_callsite != NULL, "Sanity check");
+ // this callsite is already gone
+ _outputer.diff_malloc_callsite(prev_malloc_callsite->addr(),
+ 0, 0,
diff_in_current_scale(0, prev_malloc_callsite->amount()),
diff(0, prev_malloc_callsite->count()));
prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
- } else { // the same callsite
- _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
- amount_in_current_scale(cur_malloc_callsite->amount()),
- cur_malloc_callsite->count(),
- diff_in_current_scale(cur_malloc_callsite->amount(), prev_malloc_callsite->amount()),
- diff(cur_malloc_callsite->count(), prev_malloc_callsite->count()));
- cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
- prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
+ } else {
+ assert(cur_malloc_callsite != NULL, "Sanity check");
+ assert(prev_malloc_callsite != NULL, "Sanity check");
+ if (cur_malloc_callsite->addr() < prev_malloc_callsite->addr()) {
+ // this is a new callsite
+ _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
+ amount_in_current_scale(cur_malloc_callsite->amount()),
+ cur_malloc_callsite->count(),
+ diff_in_current_scale(cur_malloc_callsite->amount(), 0),
+ diff(cur_malloc_callsite->count(), 0));
+ cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
+ } else if (cur_malloc_callsite->addr() > prev_malloc_callsite->addr()) {
+ // this callsite is already gone
+ _outputer.diff_malloc_callsite(prev_malloc_callsite->addr(),
+ 0, 0,
+ diff_in_current_scale(0, prev_malloc_callsite->amount()),
+ diff(0, prev_malloc_callsite->count()));
+ prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
+ } else {
+ // the same callsite
+ _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
+ amount_in_current_scale(cur_malloc_callsite->amount()),
+ cur_malloc_callsite->count(),
+ diff_in_current_scale(cur_malloc_callsite->amount(), prev_malloc_callsite->amount()),
+ diff(cur_malloc_callsite->count(), prev_malloc_callsite->count()));
+ cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
+ prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
+ }
}
}
@@ -222,6 +243,7 @@ void BaselineReporter::diff_callsites(const MemBaseline& cur, const MemBaseline&
VMCallsitePointer* prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.current();
while (cur_vm_callsite != NULL || prev_vm_callsite != NULL) {
if (prev_vm_callsite == NULL || cur_vm_callsite->addr() < prev_vm_callsite->addr()) {
+ // this is a new callsite
_outputer.diff_virtual_memory_callsite(cur_vm_callsite->addr(),
amount_in_current_scale(cur_vm_callsite->reserved_amount()),
amount_in_current_scale(cur_vm_callsite->committed_amount()),
@@ -229,9 +251,10 @@ void BaselineReporter::diff_callsites(const MemBaseline& cur, const MemBaseline&
diff_in_current_scale(cur_vm_callsite->committed_amount(), 0));
cur_vm_callsite = (VMCallsitePointer*)cur_vm_itr.next();
} else if (cur_vm_callsite == NULL || cur_vm_callsite->addr() > prev_vm_callsite->addr()) {
+ // this callsite is already gone
_outputer.diff_virtual_memory_callsite(prev_vm_callsite->addr(),
- amount_in_current_scale(prev_vm_callsite->reserved_amount()),
- amount_in_current_scale(prev_vm_callsite->committed_amount()),
+ amount_in_current_scale(0),
+ amount_in_current_scale(0),
diff_in_current_scale(0, prev_vm_callsite->reserved_amount()),
diff_in_current_scale(0, prev_vm_callsite->committed_amount()));
prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.next();
diff --git a/src/share/vm/services/memTrackWorker.cpp b/src/share/vm/services/memTrackWorker.cpp
index 3e9bcd2f6..e1382dd1a 100644
--- a/src/share/vm/services/memTrackWorker.cpp
+++ b/src/share/vm/services/memTrackWorker.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -63,12 +63,12 @@ MemTrackWorker::~MemTrackWorker() {
}
}
-void* MemTrackWorker::operator new(size_t size) {
+void* MemTrackWorker::operator new(size_t size) throw() {
assert(false, "use nothrow version");
return NULL;
}
-void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) {
+void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
return allocate(size, false, mtNMT);
}
diff --git a/src/share/vm/services/memTrackWorker.hpp b/src/share/vm/services/memTrackWorker.hpp
index 5d49ae193..ee45244e3 100644
--- a/src/share/vm/services/memTrackWorker.hpp
+++ b/src/share/vm/services/memTrackWorker.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -90,8 +90,8 @@ class MemTrackWorker : public NamedThread {
public:
MemTrackWorker(MemSnapshot* snapshot);
~MemTrackWorker();
- _NOINLINE_ void* operator new(size_t size);
- _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant);
+ _NOINLINE_ void* operator new(size_t size) throw();
+ _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw();
void start();
void run();
diff --git a/src/share/vm/services/memTracker.cpp b/src/share/vm/services/memTracker.cpp
index a89de2f40..a2961ee99 100644
--- a/src/share/vm/services/memTracker.cpp
+++ b/src/share/vm/services/memTracker.cpp
@@ -34,6 +34,7 @@
#include "services/memReporter.hpp"
#include "services/memTracker.hpp"
#include "utilities/decoder.hpp"
+#include "utilities/defaultStream.hpp"
#include "utilities/globalDefinitions.hpp"
bool NMT_track_callsite = false;
@@ -68,6 +69,7 @@ int MemTracker::_thread_count = 255;
volatile jint MemTracker::_pooled_recorder_count = 0;
volatile unsigned long MemTracker::_processing_generation = 0;
volatile bool MemTracker::_worker_thread_idle = false;
+volatile jint MemTracker::_pending_op_count = 0;
volatile bool MemTracker::_slowdown_calling_thread = false;
debug_only(intx MemTracker::_main_thread_tid = 0;)
NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;)
@@ -77,7 +79,15 @@ void MemTracker::init_tracking_options(const char* option_line) {
if (strcmp(option_line, "=summary") == 0) {
_tracking_level = NMT_summary;
} else if (strcmp(option_line, "=detail") == 0) {
- _tracking_level = NMT_detail;
+ // detail relies on a stack-walking ability that may not
+ // be available depending on platform and/or compiler flags
+#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
+ _tracking_level = NMT_detail;
+#else
+ jio_fprintf(defaultStream::error_stream(),
+ "NMT detail is not supported on this platform. Using NMT summary instead.\n");
+ _tracking_level = NMT_summary;
+#endif
} else if (strcmp(option_line, "=off") != 0) {
vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
}
@@ -328,92 +338,14 @@ void MemTracker::release_thread_recorder(MemRecorder* rec) {
Atomic::inc(&_pooled_recorder_count);
}
-/*
- * This is the most important method in whole nmt implementation.
- *
- * Create a memory record.
- * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
- * still in single thread mode.
- * 2. For all threads other than JavaThread, ThreadCritical is needed
- * to write to recorders to global recorder.
- * 3. For JavaThreads that are not longer visible by safepoint, also
- * need to take ThreadCritical and records are written to global
- * recorders, since these threads are NOT walked by Threads.do_thread().
- * 4. JavaThreads that are running in native state, have to transition
- * to VM state before writing to per-thread recorders.
- * 5. JavaThreads that are running in VM state do not need any lock and
- * records are written to per-thread recorders.
- * 6. For a thread has yet to attach VM 'Thread', they need to take
- * ThreadCritical to write to global recorder.
- *
- * Important note:
- * NO LOCK should be taken inside ThreadCritical lock !!!
- */
-void MemTracker::create_memory_record(address addr, MEMFLAGS flags,
- size_t size, address pc, Thread* thread) {
- assert(addr != NULL, "Sanity check");
- if (!shutdown_in_progress()) {
- // single thread, we just write records direct to global recorder,'
- // with any lock
- if (_state == NMT_bootstrapping_single_thread) {
- assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
- thread = NULL;
- } else {
- if (thread == NULL) {
- // don't use Thread::current(), since it is possible that
- // the calling thread has yet to attach to VM 'Thread',
- // which will result assertion failure
- thread = ThreadLocalStorage::thread();
- }
- }
-
- if (thread != NULL) {
- // slow down all calling threads except NMT worker thread, so it
- // can catch up.
- if (_slowdown_calling_thread && thread != _worker_thread) {
- os::yield_all();
- }
-
- if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) {
- JavaThread* java_thread = (JavaThread*)thread;
- JavaThreadState state = java_thread->thread_state();
- if (SafepointSynchronize::safepoint_safe(java_thread, state)) {
- // JavaThreads that are safepoint safe, can run through safepoint,
- // so ThreadCritical is needed to ensure no threads at safepoint create
- // new records while the records are being gathered and the sequence number is changing
- ThreadCritical tc;
- create_record_in_recorder(addr, flags, size, pc, java_thread);
- } else {
- create_record_in_recorder(addr, flags, size, pc, java_thread);
- }
- } else {
- // other threads, such as worker and watcher threads, etc. need to
- // take ThreadCritical to write to global recorder
- ThreadCritical tc;
- create_record_in_recorder(addr, flags, size, pc, NULL);
- }
- } else {
- if (_state == NMT_bootstrapping_single_thread) {
- // single thread, no lock needed
- create_record_in_recorder(addr, flags, size, pc, NULL);
- } else {
- // for thread has yet to attach VM 'Thread', we can not use VM mutex.
- // use native thread critical instead
- ThreadCritical tc;
- create_record_in_recorder(addr, flags, size, pc, NULL);
- }
- }
- }
-}
-
// write a record to proper recorder. No lock can be taken from this method
// down.
-void MemTracker::create_record_in_recorder(address addr, MEMFLAGS flags,
- size_t size, address pc, JavaThread* thread) {
+void MemTracker::write_tracking_record(address addr, MEMFLAGS flags,
+ size_t size, jint seq, address pc, JavaThread* thread) {
MemRecorder* rc = get_thread_recorder(thread);
if (rc != NULL) {
- rc->record(addr, flags, size, pc);
+ rc->record(addr, flags, size, seq, pc);
}
}
@@ -453,6 +385,7 @@ void MemTracker::enqueue_pending_recorder(MemRecorder* rec) {
#define SAFE_SEQUENCE_THRESHOLD 30
#define HIGH_GENERATION_THRESHOLD 60
#define MAX_RECORDER_THREAD_RATIO 30
+#define MAX_RECORDER_PER_THREAD 100
void MemTracker::sync() {
assert(_tracking_level > NMT_off, "NMT is not enabled");
@@ -478,39 +411,48 @@ void MemTracker::sync() {
return;
}
}
- _sync_point_skip_count = 0;
{
// This method is running at safepoint, with ThreadCritical lock,
// it should guarantee that NMT is fully sync-ed.
ThreadCritical tc;
- SequenceGenerator::reset();
+ // We can NOT execute NMT sync-point if there are pending tracking ops.
+ if (_pending_op_count == 0) {
+ SequenceGenerator::reset();
+ _sync_point_skip_count = 0;
- // walk all JavaThreads to collect recorders
- SyncThreadRecorderClosure stc;
- Threads::threads_do(&stc);
+ // walk all JavaThreads to collect recorders
+ SyncThreadRecorderClosure stc;
+ Threads::threads_do(&stc);
- _thread_count = stc.get_thread_count();
- MemRecorder* pending_recorders = get_pending_recorders();
+ _thread_count = stc.get_thread_count();
+ MemRecorder* pending_recorders = get_pending_recorders();
- if (_global_recorder != NULL) {
- _global_recorder->set_next(pending_recorders);
- pending_recorders = _global_recorder;
- _global_recorder = NULL;
- }
+ if (_global_recorder != NULL) {
+ _global_recorder->set_next(pending_recorders);
+ pending_recorders = _global_recorder;
+ _global_recorder = NULL;
+ }
- // see if NMT has too many outstanding recorder instances, it usually
- // means that worker thread is lagging behind in processing them.
- if (!AutoShutdownNMT) {
- _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count);
- }
+ // see if NMT has too many outstanding recorder instances, it usually
+ // means that worker thread is lagging behind in processing them.
+ if (!AutoShutdownNMT) {
+ _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count);
+ } else {
+ // If auto shutdown is on, enforce MAX_RECORDER_PER_THREAD threshold to prevent OOM
+ if (MemRecorder::_instance_count >= _thread_count * MAX_RECORDER_PER_THREAD) {
+ shutdown(NMT_out_of_memory);
+ }
+ }
- // check _worker_thread with lock to avoid racing condition
- if (_worker_thread != NULL) {
- _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes());
+ // check _worker_thread with lock to avoid racing condition
+ if (_worker_thread != NULL) {
+ _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes());
+ }
+ assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
+ } else {
+ _sync_point_skip_count ++;
}
-
- assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
}
}
@@ -699,3 +641,243 @@ void MemTracker::print_tracker_stats(outputStream* st) {
}
#endif
+
+// Tracker Implementation
+
+/*
+ * Create a tracker.
+ * This is a fairly complicated constructor, as it has to make two important decisions:
+ * 1) Does it need to take ThreadCritical lock to write tracking record
+ * 2) Does it need to pre-reserve a sequence number for the tracking record
+ *
+ * The rules to determine if ThreadCritical is needed:
+ * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
+ * still in single thread mode.
+ * 2. For all threads other than JavaThread, ThreadCritical is needed
+ * to write to recorders to global recorder.
+ * 3. For JavaThreads that are no longer visible by safepoint, also
+ * need to take ThreadCritical and records are written to global
+ * recorders, since these threads are NOT walked by Threads.do_thread().
+ * 4. JavaThreads that are running in safepoint-safe states do not stop
+ * for safepoints, ThreadCritical lock should be taken to write
+ * memory records.
+ * 5. JavaThreads that are running in VM state do not need any lock and
+ * records are written to per-thread recorders.
+ * 6. For a thread has yet to attach VM 'Thread', they need to take
+ * ThreadCritical to write to global recorder.
+ *
+ * The memory operations that need pre-reserve sequence numbers:
+ * The memory operations that "release" memory blocks and the
+ * operations can fail, need to pre-reserve sequence number. They
+ * are realloc, uncommit and release.
+ *
+ * The reason for pre-reserve sequence number, is to prevent race condition:
+ * Thread 1 Thread 2
+ * <release>
+ * <allocate>
+ * <write allocate record>
+ * <write release record>
+ * if Thread 2 happens to obtain the memory address Thread 1 just released,
+ * then NMT can mistakenly report the memory is free.
+ *
+ * Noticeably, free() does not need pre-reserve sequence number, because the call
+ * does not fail, so we can alway write "release" record before the memory is actaully
+ * freed.
+ *
+ * For realloc, uncommit and release, following coding pattern should be used:
+ *
+ * MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
+ * ptr = ::realloc(...);
+ * if (ptr == NULL) {
+ * tkr.record(...)
+ * } else {
+ * tkr.discard();
+ * }
+ *
+ * MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
+ * if (uncommit(...)) {
+ * tkr.record(...);
+ * } else {
+ * tkr.discard();
+ * }
+ *
+ * MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
+ * if (release(...)) {
+ * tkr.record(...);
+ * } else {
+ * tkr.discard();
+ * }
+ *
+ * Since pre-reserved sequence number is only good for the generation that it is acquired,
+ * when there is pending Tracker that reserved sequence number, NMT sync-point has
+ * to be skipped to prevent from advancing generation. This is done by inc and dec
+ * MemTracker::_pending_op_count, when MemTracker::_pending_op_count > 0, NMT sync-point is skipped.
+ * Not all pre-reservation of sequence number will increment pending op count. For JavaThreads
+ * that honor safepoints, safepoint can not occur during the memory operations, so the
+ * pre-reserved sequence number won't cross the generation boundry.
+ */
+MemTracker::Tracker::Tracker(MemoryOperation op, Thread* thr) {
+ _op = NoOp;
+ _seq = 0;
+ if (MemTracker::is_on()) {
+ _java_thread = NULL;
+ _op = op;
+
+ // figure out if ThreadCritical lock is needed to write this operation
+ // to MemTracker
+ if (MemTracker::is_single_threaded_bootstrap()) {
+ thr = NULL;
+ } else if (thr == NULL) {
+ // don't use Thread::current(), since it is possible that
+ // the calling thread has yet to attach to VM 'Thread',
+ // which will result assertion failure
+ thr = ThreadLocalStorage::thread();
+ }
+
+ if (thr != NULL) {
+ // Check NMT load
+ MemTracker::check_NMT_load(thr);
+
+ if (thr->is_Java_thread() && ((JavaThread*)thr)->is_safepoint_visible()) {
+ _java_thread = (JavaThread*)thr;
+ JavaThreadState state = _java_thread->thread_state();
+ // JavaThreads that are safepoint safe, can run through safepoint,
+ // so ThreadCritical is needed to ensure no threads at safepoint create
+ // new records while the records are being gathered and the sequence number is changing
+ _need_thread_critical_lock =
+ SafepointSynchronize::safepoint_safe(_java_thread, state);
+ } else {
+ _need_thread_critical_lock = true;
+ }
+ } else {
+ _need_thread_critical_lock
+ = !MemTracker::is_single_threaded_bootstrap();
+ }
+
+ // see if we need to pre-reserve sequence number for this operation
+ if (_op == Realloc || _op == Uncommit || _op == Release) {
+ if (_need_thread_critical_lock) {
+ ThreadCritical tc;
+ MemTracker::inc_pending_op_count();
+ _seq = SequenceGenerator::next();
+ } else {
+ // for the threads that honor safepoints, no safepoint can occur
+ // during the lifespan of tracker, so we don't need to increase
+ // pending op count.
+ _seq = SequenceGenerator::next();
+ }
+ }
+ }
+}
+
+void MemTracker::Tracker::discard() {
+ if (MemTracker::is_on() && _seq != 0) {
+ if (_need_thread_critical_lock) {
+ ThreadCritical tc;
+ MemTracker::dec_pending_op_count();
+ }
+ _seq = 0;
+ }
+}
+
+
+void MemTracker::Tracker::record(address old_addr, address new_addr, size_t size,
+ MEMFLAGS flags, address pc) {
+ assert(old_addr != NULL && new_addr != NULL, "Sanity check");
+ assert(_op == Realloc || _op == NoOp, "Wrong call");
+ if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) {
+ assert(_seq > 0, "Need pre-reserve sequence number");
+ if (_need_thread_critical_lock) {
+ ThreadCritical tc;
+ // free old address, use pre-reserved sequence number
+ MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
+ 0, _seq, pc, _java_thread);
+ MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
+ size, SequenceGenerator::next(), pc, _java_thread);
+ // decrement MemTracker pending_op_count
+ MemTracker::dec_pending_op_count();
+ } else {
+ // free old address, use pre-reserved sequence number
+ MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
+ 0, _seq, pc, _java_thread);
+ MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
+ size, SequenceGenerator::next(), pc, _java_thread);
+ }
+ _seq = 0;
+ }
+}
+
+void MemTracker::Tracker::record(address addr, size_t size, MEMFLAGS flags, address pc) {
+ // OOM already?
+ if (addr == NULL) return;
+
+ if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) {
+ bool pre_reserved_seq = (_seq != 0);
+ address pc = CALLER_CALLER_PC;
+ MEMFLAGS orig_flags = flags;
+
+ // or the tagging flags
+ switch(_op) {
+ case Malloc:
+ flags |= MemPointerRecord::malloc_tag();
+ break;
+ case Free:
+ flags = MemPointerRecord::free_tag();
+ break;
+ case Realloc:
+ fatal("Use the other Tracker::record()");
+ break;
+ case Reserve:
+ case ReserveAndCommit:
+ flags |= MemPointerRecord::virtual_memory_reserve_tag();
+ break;
+ case Commit:
+ flags = MemPointerRecord::virtual_memory_commit_tag();
+ break;
+ case Type:
+ flags |= MemPointerRecord::virtual_memory_type_tag();
+ break;
+ case Uncommit:
+ assert(pre_reserved_seq, "Need pre-reserve sequence number");
+ flags = MemPointerRecord::virtual_memory_uncommit_tag();
+ break;
+ case Release:
+ assert(pre_reserved_seq, "Need pre-reserve sequence number");
+ flags = MemPointerRecord::virtual_memory_release_tag();
+ break;
+ case ArenaSize:
+ // a bit of hack here, add a small postive offset to arena
+ // address for its size record, so the size record is sorted
+ // right after arena record.
+ flags = MemPointerRecord::arena_size_tag();
+ addr += sizeof(void*);
+ break;
+ case StackRelease:
+ flags = MemPointerRecord::virtual_memory_release_tag();
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+
+ // write memory tracking record
+ if (_need_thread_critical_lock) {
+ ThreadCritical tc;
+ if (_seq == 0) _seq = SequenceGenerator::next();
+ MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
+ if (_op == ReserveAndCommit) {
+ MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
+ size, SequenceGenerator::next(), pc, _java_thread);
+ }
+ if (pre_reserved_seq) MemTracker::dec_pending_op_count();
+ } else {
+ if (_seq == 0) _seq = SequenceGenerator::next();
+ MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
+ if (_op == ReserveAndCommit) {
+ MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
+ size, SequenceGenerator::next(), pc, _java_thread);
+ }
+ }
+ _seq = 0;
+ }
+}
+
diff --git a/src/share/vm/services/memTracker.hpp b/src/share/vm/services/memTracker.hpp
index a7d067552..1072e5d6a 100644
--- a/src/share/vm/services/memTracker.hpp
+++ b/src/share/vm/services/memTracker.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,18 @@ class MemTracker : AllStatic {
NMT_sequence_overflow // overflow the sequence number
};
+ class Tracker {
+ public:
+ void discard() { }
+
+ void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL) { }
+ void record(address old_addr, address new_addr, size_t size,
+ MEMFLAGS flags, address pc = NULL) { }
+ };
+
+ private:
+ static Tracker _tkr;
+
public:
static inline void init_tracking_options(const char* option_line) { }
@@ -68,19 +80,20 @@ class MemTracker : AllStatic {
static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
address pc = 0, Thread* thread = NULL) { }
static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { }
- static inline void record_realloc(address old_addr, address new_addr, size_t size,
- MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
static inline void record_arena_size(address addr, size_t size) { }
static inline void record_virtual_memory_reserve(address addr, size_t size,
- address pc = 0, Thread* thread = NULL) { }
+ MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
+ static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
+ MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
static inline void record_virtual_memory_commit(address addr, size_t size,
address pc = 0, Thread* thread = NULL) { }
- static inline void record_virtual_memory_uncommit(address addr, size_t size,
- Thread* thread = NULL) { }
static inline void record_virtual_memory_release(address addr, size_t size,
Thread* thread = NULL) { }
static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
Thread* thread = NULL) { }
+ static inline Tracker get_realloc_tracker() { return _tkr; }
+ static inline Tracker get_virtual_memory_uncommit_tracker() { return _tkr; }
+ static inline Tracker get_virtual_memory_release_tracker() { return _tkr; }
static inline bool baseline() { return false; }
static inline bool has_baseline() { return false; }
@@ -165,6 +178,45 @@ class MemTracker : AllStatic {
};
public:
+ class Tracker : public StackObj {
+ friend class MemTracker;
+ public:
+ enum MemoryOperation {
+ NoOp, // no op
+ Malloc, // malloc
+ Realloc, // realloc
+ Free, // free
+ Reserve, // virtual memory reserve
+ Commit, // virtual memory commit
+ ReserveAndCommit, // virtual memory reserve and commit
+ StackAlloc = ReserveAndCommit, // allocate thread stack
+ Type, // assign virtual memory type
+ Uncommit, // virtual memory uncommit
+ Release, // virtual memory release
+ ArenaSize, // set arena size
+ StackRelease // release thread stack
+ };
+
+
+ protected:
+ Tracker(MemoryOperation op, Thread* thr = NULL);
+
+ public:
+ void discard();
+
+ void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL);
+ void record(address old_addr, address new_addr, size_t size,
+ MEMFLAGS flags, address pc = NULL);
+
+ private:
+ bool _need_thread_critical_lock;
+ JavaThread* _java_thread;
+ MemoryOperation _op; // memory operation
+ jint _seq; // reserved sequence number
+ };
+
+
+ public:
// native memory tracking level
enum NMTLevel {
NMT_off, // native memory tracking is off
@@ -276,109 +328,81 @@ class MemTracker : AllStatic {
// record a 'malloc' call
static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
address pc = 0, Thread* thread = NULL) {
- if (is_on() && NMT_CAN_TRACK(flags)) {
- assert(size > 0, "Sanity check");
- create_memory_record(addr, (flags|MemPointerRecord::malloc_tag()), size, pc, thread);
- }
+ Tracker tkr(Tracker::Malloc, thread);
+ tkr.record(addr, size, flags, pc);
}
// record a 'free' call
static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) {
- if (is_on() && NMT_CAN_TRACK(flags)) {
- create_memory_record(addr, MemPointerRecord::free_tag(), 0, 0, thread);
- }
- }
- // record a 'realloc' call
- static inline void record_realloc(address old_addr, address new_addr, size_t size,
- MEMFLAGS flags, address pc = 0, Thread* thread = NULL) {
- if (is_on() && NMT_CAN_TRACK(flags)) {
- assert(size > 0, "Sanity check");
- record_free(old_addr, flags, thread);
- record_malloc(new_addr, size, flags, pc, thread);
- }
+ Tracker tkr(Tracker::Free, thread);
+ tkr.record(addr, 0, flags, DEBUG_CALLER_PC);
}
- // record arena memory size
static inline void record_arena_size(address addr, size_t size) {
- // we add a positive offset to arena address, so we can have arena memory record
- // sorted after arena record
- if (is_on() && !UseMallocOnly) {
- assert(addr != NULL, "Sanity check");
- create_memory_record((addr + sizeof(void*)), MemPointerRecord::arena_size_tag(), size,
- DEBUG_CALLER_PC, NULL);
- }
+ Tracker tkr(Tracker::ArenaSize);
+ tkr.record(addr, size);
}
// record a virtual memory 'reserve' call
static inline void record_virtual_memory_reserve(address addr, size_t size,
- address pc = 0, Thread* thread = NULL) {
- if (is_on()) {
- assert(size > 0, "Sanity check");
- create_memory_record(addr, MemPointerRecord::virtual_memory_reserve_tag(),
- size, pc, thread);
- }
+ MEMFLAGS flags, address pc = 0, Thread* thread = NULL) {
+ assert(size > 0, "Sanity check");
+ Tracker tkr(Tracker::Reserve, thread);
+ tkr.record(addr, size, flags, pc);
}
static inline void record_thread_stack(address addr, size_t size, Thread* thr,
address pc = 0) {
- if (is_on()) {
- assert(size > 0 && thr != NULL, "Sanity check");
- create_memory_record(addr, MemPointerRecord::virtual_memory_reserve_tag() | mtThreadStack,
- size, pc, thr);
- create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag() | mtThreadStack,
- size, pc, thr);
- }
+ Tracker tkr(Tracker::StackAlloc, thr);
+ tkr.record(addr, size, mtThreadStack, pc);
}
static inline void release_thread_stack(address addr, size_t size, Thread* thr) {
- if (is_on()) {
- assert(size > 0 && thr != NULL, "Sanity check");
- assert(!thr->is_Java_thread(), "too early");
- create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag() | mtThreadStack,
- size, DEBUG_CALLER_PC, thr);
- create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag() | mtThreadStack,
- size, DEBUG_CALLER_PC, thr);
- }
+ Tracker tkr(Tracker::StackRelease, thr);
+ tkr.record(addr, size, mtThreadStack, DEBUG_CALLER_PC);
}
// record a virtual memory 'commit' call
static inline void record_virtual_memory_commit(address addr, size_t size,
address pc, Thread* thread = NULL) {
- if (is_on()) {
- assert(size > 0, "Sanity check");
- create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag(),
- size, pc, thread);
- }
+ Tracker tkr(Tracker::Commit, thread);
+ tkr.record(addr, size, mtNone, pc);
}
- // record a virtual memory 'uncommit' call
- static inline void record_virtual_memory_uncommit(address addr, size_t size,
- Thread* thread = NULL) {
- if (is_on()) {
- assert(size > 0, "Sanity check");
- create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag(),
- size, DEBUG_CALLER_PC, thread);
- }
+ static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
+ MEMFLAGS flags, address pc, Thread* thread = NULL) {
+ Tracker tkr(Tracker::ReserveAndCommit, thread);
+ tkr.record(addr, size, flags, pc);
}
- // record a virtual memory 'release' call
static inline void record_virtual_memory_release(address addr, size_t size,
- Thread* thread = NULL) {
+ Thread* thread = NULL) {
if (is_on()) {
- assert(size > 0, "Sanity check");
- create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag(),
- size, DEBUG_CALLER_PC, thread);
+ Tracker tkr(Tracker::Release, thread);
+ tkr.record(addr, size);
}
}
// record memory type on virtual memory base address
static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
Thread* thread = NULL) {
- if (is_on()) {
- assert(base > 0, "wrong base address");
- assert((flags & (~mt_masks)) == 0, "memory type only");
- create_memory_record(base, (flags | MemPointerRecord::virtual_memory_type_tag()),
- 0, DEBUG_CALLER_PC, thread);
- }
+ Tracker tkr(Tracker::Type);
+ tkr.record(base, 0, flags);
+ }
+
+ // Get memory trackers for memory operations that can result race conditions.
+ // The memory tracker has to be obtained before realloc, virtual memory uncommit
+ // and virtual memory release, and call tracker.record() method if operation
+ // succeeded, or tracker.discard() to abort the tracking.
+ static inline Tracker get_realloc_tracker() {
+ return Tracker(Tracker::Realloc);
+ }
+
+ static inline Tracker get_virtual_memory_uncommit_tracker() {
+ return Tracker(Tracker::Uncommit);
+ }
+
+ static inline Tracker get_virtual_memory_release_tracker() {
+ return Tracker(Tracker::Release);
}
@@ -444,6 +468,45 @@ class MemTracker : AllStatic {
static MemRecorder* get_pending_recorders();
static void delete_all_pending_recorders();
+ // write a memory tracking record in recorder
+ static void write_tracking_record(address addr, MEMFLAGS type,
+ size_t size, jint seq, address pc, JavaThread* thread);
+
+ static bool is_single_threaded_bootstrap() {
+ return _state == NMT_bootstrapping_single_thread;
+ }
+
+ static void check_NMT_load(Thread* thr) {
+ assert(thr != NULL, "Sanity check");
+ if (_slowdown_calling_thread && thr != _worker_thread) {
+#ifdef _WINDOWS
+ // On Windows, os::NakedYield() does not work as well
+ // as os::yield_all()
+ os::yield_all();
+#else
+ // On Solaris, os::yield_all() depends on os::sleep()
+ // which requires JavaTherad in _thread_in_vm state.
+ // Transits thread to _thread_in_vm state can be dangerous
+ // if caller holds lock, as it may deadlock with Threads_lock.
+ // So use NaKedYield instead.
+ //
+ // Linux and BSD, NakedYield() and yield_all() implementations
+ // are the same.
+ os::NakedYield();
+#endif
+ }
+ }
+
+ static void inc_pending_op_count() {
+ Atomic::inc(&_pending_op_count);
+ }
+
+ static void dec_pending_op_count() {
+ Atomic::dec(&_pending_op_count);
+ assert(_pending_op_count >= 0, "Sanity check");
+ }
+
+
private:
// retrieve a pooled memory record or create new one if there is not
// one available
@@ -522,6 +585,12 @@ class MemTracker : AllStatic {
// if NMT should slow down calling thread to allow
// worker thread to catch up
static volatile bool _slowdown_calling_thread;
+
+ // pending memory op count.
+ // Certain memory ops need to pre-reserve sequence number
+ // before memory operation can happen to avoid race condition.
+ // See MemTracker::Tracker for detail
+ static volatile jint _pending_op_count;
};
#endif // !INCLUDE_NMT
diff --git a/src/share/vm/services/memoryManager.cpp b/src/share/vm/services/memoryManager.cpp
index 3996d2163..d5e54f5ff 100644
--- a/src/share/vm/services/memoryManager.cpp
+++ b/src/share/vm/services/memoryManager.cpp
@@ -61,6 +61,10 @@ MemoryManager* MemoryManager::get_code_cache_memory_manager() {
return (MemoryManager*) new CodeCacheMemoryManager();
}
+MemoryManager* MemoryManager::get_metaspace_memory_manager() {
+ return (MemoryManager*) new MetaspaceMemoryManager();
+}
+
GCMemoryManager* MemoryManager::get_copy_memory_manager() {
return (GCMemoryManager*) new CopyMemoryManager();
}
diff --git a/src/share/vm/services/memoryManager.hpp b/src/share/vm/services/memoryManager.hpp
index 370d830e9..99bd6d47d 100644
--- a/src/share/vm/services/memoryManager.hpp
+++ b/src/share/vm/services/memoryManager.hpp
@@ -56,6 +56,7 @@ public:
enum Name {
Abstract,
CodeCache,
+ Metaspace,
Copy,
MarkSweepCompact,
ParNew,
@@ -88,6 +89,7 @@ public:
// Static factory methods to get a memory manager of a specific type
static MemoryManager* get_code_cache_memory_manager();
+ static MemoryManager* get_metaspace_memory_manager();
static GCMemoryManager* get_copy_memory_manager();
static GCMemoryManager* get_msc_memory_manager();
static GCMemoryManager* get_parnew_memory_manager();
@@ -108,6 +110,14 @@ public:
const char* name() { return "CodeCacheManager"; }
};
+class MetaspaceMemoryManager : public MemoryManager {
+public:
+ MetaspaceMemoryManager() : MemoryManager() {}
+
+ MemoryManager::Name kind() { return MemoryManager::Metaspace; }
+ const char *name() { return "Metaspace Manager"; }
+};
+
class GCStatInfo : public ResourceObj {
private:
size_t _index;
diff --git a/src/share/vm/services/memoryPool.cpp b/src/share/vm/services/memoryPool.cpp
index e2895b1f8..cfae726cf 100644
--- a/src/share/vm/services/memoryPool.cpp
+++ b/src/share/vm/services/memoryPool.cpp
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
+#include "memory/metaspace.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
@@ -33,6 +34,7 @@
#include "services/memoryManager.hpp"
#include "services/memoryPool.hpp"
#include "utilities/macros.hpp"
+#include "utilities/globalDefinitions.hpp"
MemoryPool::MemoryPool(const char* name,
PoolType type,
@@ -256,3 +258,32 @@ MemoryUsage CodeHeapPool::get_memory_usage() {
return MemoryUsage(initial_size(), used, committed, maxSize);
}
+
+MetaspacePool::MetaspacePool() :
+ MemoryPool("Metaspace", NonHeap, 0, calculate_max_size(), true, false) { }
+
+MemoryUsage MetaspacePool::get_memory_usage() {
+ size_t committed = MetaspaceAux::committed_bytes();
+ return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
+}
+
+size_t MetaspacePool::used_in_bytes() {
+ return MetaspaceAux::allocated_used_bytes();
+}
+
+size_t MetaspacePool::calculate_max_size() const {
+ return FLAG_IS_CMDLINE(MaxMetaspaceSize) ? MaxMetaspaceSize :
+ MemoryUsage::undefined_size();
+}
+
+CompressedKlassSpacePool::CompressedKlassSpacePool() :
+ MemoryPool("Compressed Class Space", NonHeap, 0, CompressedClassSpaceSize, true, false) { }
+
+size_t CompressedKlassSpacePool::used_in_bytes() {
+ return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
+}
+
+MemoryUsage CompressedKlassSpacePool::get_memory_usage() {
+ size_t committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
+ return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
+}
diff --git a/src/share/vm/services/memoryPool.hpp b/src/share/vm/services/memoryPool.hpp
index 826061853..4ec810a98 100644
--- a/src/share/vm/services/memoryPool.hpp
+++ b/src/share/vm/services/memoryPool.hpp
@@ -222,4 +222,19 @@ public:
size_t used_in_bytes() { return _codeHeap->allocated_capacity(); }
};
+class MetaspacePool : public MemoryPool {
+ size_t calculate_max_size() const;
+ public:
+ MetaspacePool();
+ MemoryUsage get_memory_usage();
+ size_t used_in_bytes();
+};
+
+class CompressedKlassSpacePool : public MemoryPool {
+ public:
+ CompressedKlassSpacePool();
+ MemoryUsage get_memory_usage();
+ size_t used_in_bytes();
+};
+
#endif // SHARE_VM_SERVICES_MEMORYPOOL_HPP
diff --git a/src/share/vm/services/memoryService.cpp b/src/share/vm/services/memoryService.cpp
index 75693dbcf..6508cd208 100644
--- a/src/share/vm/services/memoryService.cpp
+++ b/src/share/vm/services/memoryService.cpp
@@ -35,6 +35,7 @@
#include "memory/memRegion.hpp"
#include "memory/tenuredGeneration.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/globals.hpp"
#include "runtime/javaCalls.hpp"
#include "services/classLoadingService.hpp"
#include "services/lowMemoryDetector.hpp"
@@ -60,9 +61,11 @@ GrowableArray<MemoryPool*>* MemoryService::_pools_list =
GrowableArray<MemoryManager*>* MemoryService::_managers_list =
new (ResourceObj::C_HEAP, mtInternal) GrowableArray<MemoryManager*>(init_managers_list_size, true);
-GCMemoryManager* MemoryService::_minor_gc_manager = NULL;
-GCMemoryManager* MemoryService::_major_gc_manager = NULL;
-MemoryPool* MemoryService::_code_heap_pool = NULL;
+GCMemoryManager* MemoryService::_minor_gc_manager = NULL;
+GCMemoryManager* MemoryService::_major_gc_manager = NULL;
+MemoryPool* MemoryService::_code_heap_pool = NULL;
+MemoryPool* MemoryService::_metaspace_pool = NULL;
+MemoryPool* MemoryService::_compressed_class_pool = NULL;
class GcThreadCountClosure: public ThreadClosure {
private:
@@ -399,6 +402,22 @@ void MemoryService::add_code_heap_memory_pool(CodeHeap* heap) {
_managers_list->append(mgr);
}
+void MemoryService::add_metaspace_memory_pools() {
+ MemoryManager* mgr = MemoryManager::get_metaspace_memory_manager();
+
+ _metaspace_pool = new MetaspacePool();
+ mgr->add_pool(_metaspace_pool);
+ _pools_list->append(_metaspace_pool);
+
+ if (UseCompressedClassPointers) {
+ _compressed_class_pool = new CompressedKlassSpacePool();
+ mgr->add_pool(_compressed_class_pool);
+ _pools_list->append(_compressed_class_pool);
+ }
+
+ _managers_list->append(mgr);
+}
+
MemoryManager* MemoryService::get_memory_manager(instanceHandle mh) {
for (int i = 0; i < _managers_list->length(); i++) {
MemoryManager* mgr = _managers_list->at(i);
diff --git a/src/share/vm/services/memoryService.hpp b/src/share/vm/services/memoryService.hpp
index 44cf62ea3..8ae6994bf 100644
--- a/src/share/vm/services/memoryService.hpp
+++ b/src/share/vm/services/memoryService.hpp
@@ -73,6 +73,9 @@ private:
// Code heap memory pool
static MemoryPool* _code_heap_pool;
+ static MemoryPool* _metaspace_pool;
+ static MemoryPool* _compressed_class_pool;
+
static void add_generation_memory_pool(Generation* gen,
MemoryManager* major_mgr,
MemoryManager* minor_mgr);
@@ -121,6 +124,7 @@ private:
public:
static void set_universe_heap(CollectedHeap* heap);
static void add_code_heap_memory_pool(CodeHeap* heap);
+ static void add_metaspace_memory_pools();
static MemoryPool* get_memory_pool(instanceHandle pool);
static MemoryManager* get_memory_manager(instanceHandle mgr);
diff --git a/src/share/vm/services/memoryUsage.hpp b/src/share/vm/services/memoryUsage.hpp
index efc6f2966..9027f8e76 100644
--- a/src/share/vm/services/memoryUsage.hpp
+++ b/src/share/vm/services/memoryUsage.hpp
@@ -63,10 +63,12 @@ public:
size_t committed() const { return _committed; }
size_t max_size() const { return _maxSize; }
+ static size_t undefined_size() { return (size_t) -1; }
+
inline static jlong convert_to_jlong(size_t val) {
// In the 64-bit vm, a size_t can overflow a jlong (which is signed).
jlong ret;
- if (val == (size_t)-1) {
+ if (val == undefined_size()) {
ret = -1L;
} else {
NOT_LP64(ret = val;)
diff --git a/src/share/vm/services/threadService.cpp b/src/share/vm/services/threadService.cpp
index 03289c7e9..222ae383c 100644
--- a/src/share/vm/services/threadService.cpp
+++ b/src/share/vm/services/threadService.cpp
@@ -327,27 +327,30 @@ DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(bool concurrent_locks)
while (waitingToLockMonitor != NULL || waitingToLockBlocker != NULL) {
cycle->add_thread(currentThread);
if (waitingToLockMonitor != NULL) {
- currentThread = Threads::owning_thread_from_monitor_owner(
- (address)waitingToLockMonitor->owner(),
- false /* no locking needed */);
- if (currentThread == NULL) {
- // This function is called at a safepoint so the JavaThread
- // that owns waitingToLockMonitor should be findable, but
- // if it is not findable, then the previous currentThread is
- // blocked permanently. We record this as a deadlock.
- num_deadlocks++;
-
- cycle->set_deadlock(true);
-
- // add this cycle to the deadlocks list
- if (deadlocks == NULL) {
- deadlocks = cycle;
- } else {
- last->set_next(cycle);
+ address currentOwner = (address)waitingToLockMonitor->owner();
+ if (currentOwner != NULL) {
+ currentThread = Threads::owning_thread_from_monitor_owner(
+ currentOwner,
+ false /* no locking needed */);
+ if (currentThread == NULL) {
+ // This function is called at a safepoint so the JavaThread
+ // that owns waitingToLockMonitor should be findable, but
+ // if it is not findable, then the previous currentThread is
+ // blocked permanently. We record this as a deadlock.
+ num_deadlocks++;
+
+ cycle->set_deadlock(true);
+
+ // add this cycle to the deadlocks list
+ if (deadlocks == NULL) {
+ deadlocks = cycle;
+ } else {
+ last->set_next(cycle);
+ }
+ last = cycle;
+ cycle = new DeadlockCycle();
+ break;
}
- last = cycle;
- cycle = new DeadlockCycle();
- break;
}
} else {
if (concurrent_locks) {
diff --git a/src/share/vm/shark/sharkBuilder.cpp b/src/share/vm/shark/sharkBuilder.cpp
index f9c22bd16..8e83dd18e 100644
--- a/src/share/vm/shark/sharkBuilder.cpp
+++ b/src/share/vm/shark/sharkBuilder.cpp
@@ -471,7 +471,7 @@ Value* SharkBuilder::CreateInlineOop(jobject object, const char* name) {
Value* SharkBuilder::CreateInlineMetadata(Metadata* metadata, llvm::PointerType* type, const char* name) {
assert(metadata != NULL, "inlined metadata must not be NULL");
- assert(metadata->is_metadata(), "sanity check");
+ assert(metadata->is_metaspace_object(), "sanity check");
return CreateLoad(
CreateIntToPtr(
code_buffer_address(code_buffer()->inline_Metadata(metadata)),
diff --git a/src/share/tools/launcher/wildcard.h b/src/share/vm/trace/noTraceBackend.hpp
index 5cdd9312c..475548726 100644
--- a/src/share/tools/launcher/wildcard.h
+++ b/src/share/vm/trace/noTraceBackend.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -21,14 +21,28 @@
* questions.
*
*/
+#ifndef SHARE_VM_TRACE_NOTRACEBACKEND_HPP
+#define SHARE_VM_TRACE_NOTRACEBACKEND_HPP
-#ifndef WILDCARD_H_
-#define WILDCARD_H_
+#include "prims/jni.h"
+
+typedef jlong TracingTime;
+typedef jlong RelativeTracingTime;
+
+class NoTraceBackend {
+public:
+ static TracingTime time() {
+ return 0;
+ }
+};
+
+class TraceThreadData {
+public:
+ TraceThreadData() {}
+};
+
+typedef NoTraceBackend Tracing;
-#ifdef EXPAND_CLASSPATH_WILDCARDS
-const char *JLI_WildcardExpandClasspath(const char *classpath);
-#else
-#define JLI_WildcardExpandClasspath(s) (s)
#endif
-#endif /* include guard */
+
diff --git a/src/share/vm/trace/trace.dtd b/src/share/vm/trace/trace.dtd
new file mode 100644
index 000000000..a61984aaa
--- /dev/null
+++ b/src/share/vm/trace/trace.dtd
@@ -0,0 +1,86 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+ This code is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License version 2 only, as
+ published by the Free Software Foundation.
+
+ This code is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ version 2 for more details (a copy is included in the LICENSE file that
+ accompanied this code).
+
+ You should have received a copy of the GNU General Public License version
+ 2 along with this work; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+ Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ or visit www.oracle.com if you need additional information or have any
+ questions.
+
+-->
+
+<!ELEMENT trace (xi:include, relation_decls, events*, xi:include, xi:include)>
+<!ELEMENT types (content_types, primary_types)>
+<!ELEMENT content_types (content_type|struct_type)*>
+<!ELEMENT content_type (value|structvalue|structarray|array)*>
+<!ELEMENT struct_type (value*)>
+<!ELEMENT primary_types (primary_type*)>
+<!ELEMENT primary_type EMPTY>
+<!ELEMENT relation_decls (relation_decl*)>
+<!ELEMENT relation_decl EMPTY>
+<!ELEMENT events (event|struct)*>
+<!ELEMENT event (value|structvalue)*>
+<!ELEMENT struct (value|structvalue)*>
+<!ELEMENT value EMPTY>
+<!ELEMENT structvalue EMPTY>
+<!ELEMENT structarray EMPTY>
+<!ELEMENT array EMPTY>
+<!ATTLIST content_type id CDATA #REQUIRED
+ hr_name CDATA #REQUIRED
+ type CDATA #REQUIRED
+ jvm_type CDATA #IMPLIED
+ builtin_type CDATA #IMPLIED>
+<!ATTLIST struct_type id CDATA #REQUIRED>
+<!ATTLIST structarray type CDATA #REQUIRED
+ field CDATA #REQUIRED
+ label CDATA #REQUIRED>
+<!ATTLIST primary_type symbol CDATA #REQUIRED
+ datatype CDATA #REQUIRED
+ contenttype CDATA #REQUIRED
+ type CDATA #REQUIRED
+ sizeop CDATA #REQUIRED>
+<!ATTLIST relation_decl id CDATA #REQUIRED
+ uri CDATA #REQUIRED>
+<!ATTLIST event id CDATA #REQUIRED
+ path CDATA #REQUIRED
+ label CDATA #REQUIRED
+ description CDATA #IMPLIED
+ has_thread CDATA "false"
+ ignore_check CDATA "false"
+ has_stacktrace CDATA "false"
+ is_instant CDATA "false"
+ is_constant CDATA "false"
+ is_requestable CDATA "false">
+<!ATTLIST struct id CDATA #REQUIRED>
+<!ATTLIST value type CDATA #REQUIRED
+ field CDATA #REQUIRED
+ label CDATA #REQUIRED
+ description CDATA #IMPLIED
+ relation CDATA "NOT_AVAILABLE"
+ transition CDATA "NONE">
+<!ATTLIST array type CDATA #REQUIRED
+ field CDATA #REQUIRED
+ label CDATA #REQUIRED
+ description CDATA #IMPLIED>
+<!ATTLIST structarray type CDATA #REQUIRED
+ field CDATA #REQUIRED
+ label CDATA #REQUIRED
+ description CDATA #IMPLIED>
+<!ATTLIST structvalue type CDATA #REQUIRED
+ field CDATA #REQUIRED
+ label CDATA #REQUIRED
+ description CDATA #IMPLIED>
diff --git a/src/share/vm/trace/trace.xml b/src/share/vm/trace/trace.xml
new file mode 100644
index 000000000..6b1d9a885
--- /dev/null
+++ b/src/share/vm/trace/trace.xml
@@ -0,0 +1,367 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+ This code is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License version 2 only, as
+ published by the Free Software Foundation.
+
+ This code is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ version 2 for more details (a copy is included in the LICENSE file that
+ accompanied this code).
+
+ You should have received a copy of the GNU General Public License version
+ 2 along with this work; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+ Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ or visit www.oracle.com if you need additional information or have any
+ questions.
+
+-->
+
+
+<!DOCTYPE trace SYSTEM "trace.dtd" [
+<!ENTITY % xinclude SYSTEM "xinclude.mod">
+%xinclude;
+]>
+
+<trace>
+ <xi:include href="tracetypes.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+
+ <relation_decls>
+ <relation_decl id="GC_ID" uri="vm/gc/id"/>
+ <relation_decl id="COMP_ID" uri="vm/compiler/id"/>
+ <relation_decl id="SWEEP_ID" uri="vm/code_sweeper/id"/>
+ <relation_decl id="JAVA_MONITOR_ADDRESS" uri="java/monitor/address"/>
+ </relation_decls>
+
+<!--
+
+Events in the JVM are by default timed (it's more common)
+Perhaps a little strange. Might change.
+
+EVENTS
+
+Declard with the 'event' tag.
+
+<value fields> can be one or more of
+ value - a simple primitive or constant type value
+ structvalue - value is a sub-struct. This type must be previously defined
+ with 'struct'
+All these require you to declare type, field and label of the field. They also accept
+an optional description of the field. If the meaning of the field is not obvious
+from the label you should provide a description. If an event however is not actually
+meant for end-users, you should probably _not_ write descriptions at all, since you
+might just add more concepts the user has no notion of/interest in.
+
+Events should be modeled after what conceptual process you are expressing, _NOT_
+from whatever data structures you might use inside the JVM for expressing a process.
+
+
+STRUCT
+
+Declared with the 'struct' tag.
+
+Declares a structure type that can be used in other events.
+
+-->
+
+ <events>
+ <event id="ThreadStart" path="java/thread_start" label="Java Thread Start"
+ has_thread="true" is_instant="true">
+ <value type="JAVALANGTHREAD" field="javalangthread" label="Java Thread"/>
+ </event>
+
+ <event id="ThreadEnd" path="java/thread_end" label="Java Thread End"
+ has_thread="true" is_instant="true">
+ <value type="JAVALANGTHREAD" field="javalangthread" label="Java Thread"/>
+ </event>
+
+ <event id="ThreadSleep" path="java/thread_sleep" label="Java Thread Sleep"
+ has_thread="true" has_stacktrace="true" is_instant="false">
+ <value type="MILLIS" field="time" label="Sleep Time"/>
+ </event>
+
+ <event id="ThreadPark" path="java/thread_park" label="Java Thread Park"
+ has_thread="true" has_stacktrace="true" is_instant="false">
+ <value type="CLASS" field="klass" label="Class Parked On"/>
+ <value type="MILLIS" field="timeout" label="Park Timeout"/>
+ <value type="ADDRESS" field="address" label="Address of Object Parked" relation="JAVA_MONITOR_ADDRESS"/>
+ </event>
+
+ <event id="JavaMonitorEnter" path="java/monitor_enter" label="Java Monitor Blocked"
+ has_thread="true" has_stacktrace="true" is_instant="false">
+ <value type="CLASS" field="klass" label="Monitor Class"/>
+ <value type="JAVALANGTHREAD" field="previousOwner" label="Previous Monitor Owner"/>
+ <value type="ADDRESS" field="address" label="Monitor Address" relation="JAVA_MONITOR_ADDRESS"/>
+ </event>
+
+ <event id="JavaMonitorWait" path="java/monitor_wait" label="Java Monitor Wait" description="Waiting on a Java monitor"
+ has_thread="true" has_stacktrace="true" is_instant="false">
+ <value type="CLASS" field="klass" label="Monitor Class" description="Class of object waited on"/>
+ <value type="OSTHREAD" field="notifier" label="Notifier Thread" description="Notifying Thread"/>
+ <value type="MILLIS" field="timeout" label="Timeout" description="Maximum wait time"/>
+ <value type="BOOLEAN" field="timedOut" label="Timed Out" description="Wait has been timed out"/>
+ <value type="ADDRESS" field="address" label="Monitor Address" description="Address of object waited on" relation="JAVA_MONITOR_ADDRESS"/>
+ </event>
+
+ <event id="ClassLoad" path="vm/class/load" label="Class Load"
+ has_thread="true" has_stacktrace="true" is_instant="false">
+ <value type="CLASS" field="loadedClass" label="Loaded Class"/>
+ <value type="CLASS" field="definingClassLoader" label="Defining Class Loader"/>
+ <value type="CLASS" field="initiatingClassLoader" label="Initiating Class Loader"/>
+ </event>
+
+ <event id="ClassUnload" path="vm/class/unload" label="Class Unload"
+ has_thread="true" is_instant="true">
+ <value type="CLASS" field="unloadedClass" label="Unloaded Class"/>
+ <value type="CLASS" field="definingClassLoader" label="Defining Class Loader"/>
+ </event>
+
+ <struct id="VirtualSpace">
+ <value type="ADDRESS" field="start" label="Start Address" description="Start address of the virtual space" />
+ <value type="ADDRESS" field="committedEnd" label="Committed End Address" description="End address of the committed memory for the virtual space" />
+ <value type="BYTES64" field="committedSize" label="Committed Size" description="Size of the committed memory for the virtual space" />
+ <value type="ADDRESS" field="reservedEnd" label="Reserved End Address" description="End address of the reserved memory for the virtual space" />
+ <value type="BYTES64" field="reservedSize" label="Reserved Size" description="Size of the reserved memory for the virtual space" />
+ </struct>
+
+ <struct id="ObjectSpace">
+ <value type="ADDRESS" field="start" label="Start Address" description="Start address of the space" />
+ <value type="ADDRESS" field="end" label="End Address" description="End address of the space" />
+ <value type="BYTES64" field="used" label="Used" description="Bytes allocated by objects in the space" />
+ <value type="BYTES64" field="size" label="Size" description="Size of the space" />
+ </struct>
+
+ <event id="GCHeapSummary" path="vm/gc/heap/summary" label="Heap Summary" is_instant="true">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+ <value type="GCWHEN" field="when" label="When" />
+ <structvalue type="VirtualSpace" field="heapSpace" label="Heap Space"/>
+ <value type="BYTES64" field="heapUsed" label="Heap Used" description="Bytes allocated by objects in the heap"/>
+ </event>
+
+ <struct id="MetaspaceSizes">
+ <value type="BYTES64" field="capacity" label="Capacity" description="Total available memory to allocate in" />
+ <value type="BYTES64" field="used" label="Used" description="Bytes allocated by objects in the space" />
+ <value type="BYTES64" field="reserved" label="Reserved" description="Reserved memory for this space" />
+ </struct>
+
+ <event id="MetaspaceSummary" path="vm/gc/heap/metaspace_summary" label="Metaspace Summary" is_instant="true">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+ <value type="GCWHEN" field="when" label="When" />
+ <structvalue type="MetaspaceSizes" field="metaspace" label="Total"/>
+ <structvalue type="MetaspaceSizes" field="dataSpace" label="Data"/>
+ <structvalue type="MetaspaceSizes" field="classSpace" label="Class"/>
+ </event>
+
+ <event id="PSHeapSummary" path="vm/gc/heap/ps_summary" label="Parallel Scavenge Heap Summary" is_instant="true">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+ <value type="GCWHEN" field="when" label="When" />
+
+ <structvalue type="VirtualSpace" field="oldSpace" label="Old Space"/>
+ <structvalue type="ObjectSpace" field="oldObjectSpace" label="Old Object Space"/>
+
+ <structvalue type="VirtualSpace" field="youngSpace" label="Young Space"/>
+ <structvalue type="ObjectSpace" field="edenSpace" label="Eden Space"/>
+ <structvalue type="ObjectSpace" field="fromSpace" label="From Space"/>
+ <structvalue type="ObjectSpace" field="toSpace" label="To Space"/>
+ </event>
+
+ <event id="GCGarbageCollection" path="vm/gc/collector/garbage_collection" label="Garbage Collection"
+ description="Garbage collection performed by the JVM">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID" />
+ <value type="GCNAME" field="name" label="Name" description="The name of the Garbage Collector" />
+ <value type="GCCAUSE" field="cause" label="Cause" description="The reason for triggering this Garbage Collection" />
+ <value type="RELATIVE_TICKS" field="sumOfPauses" label="Sum of Pauses" description="Sum of all the times in which Java execution was paused during the garbage collection" />
+ <value type="RELATIVE_TICKS" field="longestPause" label="Longest Pause" description="Longest individual pause during the garbage collection" />
+ </event>
+
+ <event id="GCParallelOld" path="vm/gc/collector/parold_garbage_collection" label="Parallel Old Garbage Collection"
+ description="Extra information specific to Parallel Old Garbage Collections">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID" />
+ <value type="ADDRESS" field="densePrefix" label="Dense Prefix" description="The address of the dense prefix, used when compacting" />
+ </event>
+
+ <event id="GCYoungGarbageCollection" path="vm/gc/collector/young_garbage_collection" label="Young Garbage Collection"
+ description="Extra information specific to Young Garbage Collections">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID" />
+ <value type="UINT" field="tenuringThreshold" label="Tenuring Threshold" />
+ </event>
+
+ <event id="GCOldGarbageCollection" path="vm/gc/collector/old_garbage_collection" label="Old Garbage Collection"
+ description="Extra information specific to Old Garbage Collections">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+ </event>
+
+ <event id="GCG1GarbageCollection" path="vm/gc/collector/g1_garbage_collection" label="G1 Garbage Collection"
+ description="Extra information specific to G1 Garbage Collections">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+ <value type="G1YCTYPE" field="type" label="Type" />
+ </event>
+
+ <event id="EvacuationInfo" path="vm/gc/detailed/evacuation_info" label="Evacuation Information" is_instant="true">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+ <value type="UINT" field="cSetRegions" label="Collection Set Regions"/>
+ <value type="BYTES64" field="cSetUsedBefore" label="Collection Set Before" description="Memory usage before GC in the collection set regions"/>
+ <value type="BYTES64" field="cSetUsedAfter" label="Collection Set After" description="Memory usage after GC in the collection set regions"/>
+ <value type="UINT" field="allocationRegions" label="Allocation Regions" description="Regions chosen as allocation regions during evacuation (includes survivors and old space regions)"/>
+ <value type="BYTES64" field="allocRegionsUsedBefore" label="Alloc Regions Before" description="Memory usage before GC in allocation regions"/>
+ <value type="BYTES64" field="allocRegionsUsedAfter" label="Alloc Regions After" description="Memory usage after GC in allocation regions"/>
+ <value type="BYTES64" field="bytesCopied" label="Bytes Copied"/>
+ <value type="UINT" field="regionsFreed" label="Regions Freed"/>
+ </event>
+
+ <event id="GCReferenceStatistics" path="vm/gc/reference/statistics"
+ label="GC Reference Statistics" is_instant="true"
+ description="Total count of processed references during GC">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+ <value type="REFERENCETYPE" field="type" label="Type" />
+ <value type="ULONG" field="count" label="Total Count" />
+ </event>
+
+ <struct id="CopyFailed">
+ <value type="ULONG" field="objectCount" label="Object Count"/>
+ <value type="BYTES64" field="firstSize" label="First Failed Object Size"/>
+ <value type="BYTES64" field="smallestSize" label="Smallest Failed Object Size"/>
+ <value type="BYTES64" field="totalSize" label="Total Object Size"/>
+ </struct>
+
+ <event id="ObjectCountAfterGC" path="vm/gc/detailed/object_count_after_gc" is_instant="true" label="Object Count after GC">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID" />
+ <value type="CLASS" field="class" label="Class" />
+ <value type="LONG" field="count" label="Count" />
+ <value type="BYTES64" field="totalSize" label="Total Size" />
+ </event>
+
+ <event id="PromotionFailed" path="vm/gc/detailed/promotion_failed" label="Promotion Failed" is_instant="true"
+ description="Promotion of an object failed">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+ <structvalue type="CopyFailed" field="data" label="Data"/>
+ <value type="OSTHREAD" field="thread" label="Running thread"/>
+ </event>
+
+ <event id="EvacuationFailed" path="vm/gc/detailed/evacuation_failed" label="Evacuation Failed" is_instant="true"
+ description="Evacuation of an object failed">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+ <structvalue type="CopyFailed" field="data" label="Data"/>
+ </event>
+
+ <event id="ConcurrentModeFailure" path="vm/gc/detailed/concurrent_mode_failure" label="Concurrent Mode Failure"
+ is_instant="true" description="Concurrent Mode failed">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+ </event>
+
+ <event id="GCPhasePause" path="vm/gc/phases/pause" label="GC Phase Pause">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+ <value type="UTF8" field="name" label="Name" />
+ </event>
+
+ <event id="GCPhasePauseLevel1" path="vm/gc/phases/pause_level_1" label="GC Phase Pause Level 1">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+ <value type="UTF8" field="name" label="Name" />
+ </event>
+
+ <event id="GCPhasePauseLevel2" path="vm/gc/phases/pause_level_2" label="GC Phase Pause Level 2">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+ <value type="UTF8" field="name" label="Name" />
+ </event>
+
+ <event id="GCPhasePauseLevel3" path="vm/gc/phases/pause_level_3" label="GC Phase Pause Level 3">
+ <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+ <value type="UTF8" field="name" label="Name" />
+ </event>
+
+ <!-- Compiler events -->
+
+ <event id="Compilation" path="vm/compiler/compilation" label="Compilation"
+ has_thread="true" is_requestable="false" is_constant="false">
+ <value type="METHOD" field="method" label="Java Method"/>
+ <value type="UINT" field="compileID" label="Compilation ID" relation="COMP_ID"/>
+ <value type="USHORT" field="compileLevel" label="Compilation Level"/>
+ <value type="BOOLEAN" field="succeded" label="Succeeded"/>
+ <value type="BOOLEAN" field="isOsr" label="On Stack Replacement"/>
+ <value type="BYTES" field="codeSize" label="Compiled Code Size"/>
+ <value type="BYTES" field="inlinedBytes" label="Inlined Code Size"/>
+ </event>
+
+ <event id="CompilerPhase" path="vm/compiler/phase" label="Compiler Phase"
+ has_thread="true" is_requestable="false" is_constant="false">
+ <value type="COMPILERPHASETYPE" field="phase" label="Compile Phase"/>
+ <value type="UINT" field="compileID" label="Compilation ID" relation="COMP_ID"/>
+ <value type="USHORT" field="phaseLevel" label="Phase Level"/>
+ </event>
+
+ <event id="CompilerFailure" path="vm/compiler/failure" label="Compilation Failure"
+ has_thread="true" is_requestable="false" is_constant="false" is_instant="true">
+ <value type="UTF8" field="failure" label="Message"/>
+ <value type="UINT" field="compileID" label="Compilation ID" relation="COMP_ID"/>
+ </event>
+
+ <!-- Code sweeper events -->
+
+ <event id="SweepCodeCache" path="vm/code_sweeper/sweep" label="Sweep Code Cache"
+ has_thread="true" is_requestable="false" is_constant="false">
+ <value type="INTEGER" field="sweepIndex" label="Sweep Index" relation="SWEEP_ID"/>
+ <value type="USHORT" field="sweepFractionIndex" label="Fraction Index"/>
+ <value type="UINT" field="sweptCount" label="Methods Swept"/>
+ <value type="UINT" field="flushedCount" label="Methods Flushed"/>
+ <value type="UINT" field="markedCount" label="Methods Reclaimed"/>
+ <value type="UINT" field="zombifiedCount" label="Methods Zombified"/>
+ </event>
+
+ <event id="CleanCodeCache" path="vm/code_sweeper/clean" label="Clean Code Cache"
+ description="Clean code cache from oldest methods"
+ has_thread="true" is_requestable="false" is_constant="false">
+ <value type="UINT" field="disconnectedCount" label="Methods Disconnected"/>
+ <value type="UINT" field="madeNonEntrantCount" label="Methods Made Non-Entrant"/>
+ </event>
+
+ <!-- Code cache events -->
+
+ <event id="CodeCacheFull" path="vm/code_cache/full" label="Code Cache Full"
+ has_thread="true" is_requestable="false" is_constant="false" is_instant="true">
+ <value type="ADDRESS" field="startAddress" label="Start Address"/>
+ <value type="ADDRESS" field="commitedTopAddress" label="Commited Top"/>
+ <value type="ADDRESS" field="reservedTopAddress" label="Reserved Top"/>
+ <value type="INTEGER" field="entryCount" label="Entries"/>
+ <value type="INTEGER" field="methodCount" label="Methods"/>
+ <value type="INTEGER" field="adaptorCount" label="Adaptors"/>
+ <value type="BYTES64" field="unallocatedCapacity" label="Unallocated"/>
+ <value type="INTEGER" field="fullCount" label="Full Count"/>
+ </event>
+
+ <event id="ExecuteVMOperation" path="vm/runtime/execute_vm_operation" label="VM Operation"
+ description="Execution of a VM Operation" has_thread="true">
+ <value type="VMOPERATIONTYPE" field="operation" label="Operation" />
+ <value type="BOOLEAN" field="safepoint" label="At Safepoint" description="If the operation occured at a safepoint."/>
+ <value type="BOOLEAN" field="blocking" label="Caller Blocked" description="If the calling thread was blocked until the operation was complete."/>
+ <value type="OSTHREAD" field="caller" label="Caller" transition="FROM" description="Thread requesting operation. If non-blocking, will be set to 0 indicating thread is unknown."/>
+ </event>
+
+ <!-- Allocation events -->
+ <event id="AllocObjectInNewTLAB" path="java/object_alloc_in_new_TLAB" label="Allocation in new TLAB"
+ description="Allocation in new Thread Local Allocation Buffer" has_thread="true" has_stacktrace="true" is_instant="true">
+ <value type="CLASS" field="class" label="Class" description="Class of allocated object"/>
+ <value type="BYTES64" field="allocationSize" label="Allocation Size"/>
+ <value type="BYTES64" field="tlabSize" label="TLAB Size"/>
+ </event>
+
+ <event id="AllocObjectOutsideTLAB" path="java/object_alloc_outside_TLAB" label="Allocation outside TLAB"
+ description="Allocation outside Thread Local Allocation Buffers" has_thread="true" has_stacktrace="true" is_instant="true">
+ <value type="CLASS" field="class" label="Class" description="Class of allocated object"/>
+ <value type="BYTES64" field="allocationSize" label="Allocation Size"/>
+ </event>
+ </events>
+
+ <xi:include href="../../../closed/share/vm/trace/traceeventtypes.xml" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <xi:fallback/>
+ </xi:include>
+
+ <xi:include href="../../../closed/share/vm/trace/traceevents.xml" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <xi:fallback/>
+ </xi:include>
+</trace>
diff --git a/src/share/vm/trace/traceBackend.hpp b/src/share/vm/trace/traceBackend.hpp
new file mode 100644
index 000000000..cd348dfa4
--- /dev/null
+++ b/src/share/vm/trace/traceBackend.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#ifndef SHARE_VM_TRACE_TRACEBACKEND_HPP
+#define SHARE_VM_TRACE_TRACEBACKEND_HPP
+
+#include "utilities/macros.hpp"
+
+#if INCLUDE_TRACE
+
+#include "runtime/globals.hpp"
+#include "runtime/os.hpp"
+#include "trace/traceTime.hpp"
+#include "tracefiles/traceEventIds.hpp"
+
+class TraceBackend {
+public:
+ static bool enabled(void) {
+ return EnableTracing;
+ }
+
+ static bool is_event_enabled(TraceEventId id) {
+ return enabled();
+ }
+
+ static TracingTime time() {
+ return os::elapsed_counter();
+ }
+
+ static TracingTime time_adjustment(jlong time) {
+ return time;
+ }
+
+ static void on_unloading_classes(void) {
+ }
+};
+
+class TraceThreadData {
+public:
+ TraceThreadData() {}
+};
+
+typedef TraceBackend Tracing;
+
+#else /* INCLUDE_TRACE */
+
+#include "trace/noTraceBackend.hpp"
+
+#endif /* INCLUDE_TRACE */
+#endif /* SHARE_VM_TRACE_TRACEBACKEND_HPP */
diff --git a/src/share/vm/trace/traceDataTypes.hpp b/src/share/vm/trace/traceDataTypes.hpp
new file mode 100644
index 000000000..31004d934
--- /dev/null
+++ b/src/share/vm/trace/traceDataTypes.hpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_TRACE_TRACEDATATYPES_HPP
+#define SHARE_VM_TRACE_TRACEDATATYPES_HPP
+
+#include <stddef.h>
+
+#include "utilities/globalDefinitions.hpp"
+
+enum {
+ CONTENT_TYPE_NONE = 0,
+ CONTENT_TYPE_BYTES = 1,
+ CONTENT_TYPE_EPOCHMILLIS = 2,
+ CONTENT_TYPE_MILLIS = 3,
+ CONTENT_TYPE_NANOS = 4,
+ CONTENT_TYPE_TICKS = 5,
+ CONTENT_TYPE_ADDRESS = 6,
+
+ CONTENT_TYPE_OSTHREAD,
+ CONTENT_TYPE_JAVALANGTHREAD,
+ CONTENT_TYPE_STACKTRACE,
+ CONTENT_TYPE_CLASS,
+ CONTENT_TYPE_PERCENTAGE,
+
+ JVM_CONTENT_TYPES_START = 30,
+ JVM_CONTENT_TYPES_END = 100
+};
+
+enum ReservedEvent {
+ EVENT_PRODUCERS,
+ EVENT_CHECKPOINT,
+ EVENT_BUFFERLOST,
+
+ NUM_RESERVED_EVENTS
+};
+
+typedef enum ReservedEvent ReservedEvent;
+
+typedef u8 classid;
+typedef u8 stacktraceid;
+typedef u8 methodid;
+typedef u8 fieldid;
+
+class TraceUnicodeString;
+
+#endif // SHARE_VM_TRACE_TRACEDATATYPES_HPP
+
diff --git a/src/share/vm/trace/traceEvent.hpp b/src/share/vm/trace/traceEvent.hpp
new file mode 100644
index 000000000..364c2df48
--- /dev/null
+++ b/src/share/vm/trace/traceEvent.hpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_TRACE_TRACEEVENT_HPP
+#define SHARE_VM_TRACE_TRACEEVENT_HPP
+
+enum EventStartTime {
+ UNTIMED,
+ TIMED
+};
+
+#include "utilities/macros.hpp"
+
+#if INCLUDE_TRACE
+
+#include "trace/traceBackend.hpp"
+#include "trace/tracing.hpp"
+#include "tracefiles/traceEventIds.hpp"
+#include "tracefiles/traceTypes.hpp"
+
+template<typename T>
+class TraceEvent : public StackObj {
+ protected:
+ jlong _startTime;
+ jlong _endTime;
+
+ private:
+ bool _started;
+#ifdef ASSERT
+ bool _committed;
+ bool _cancelled;
+ protected:
+ bool _ignore_check;
+#endif
+
+ public:
+ TraceEvent(EventStartTime timing=TIMED) :
+ _startTime(0),
+ _endTime(0),
+ _started(false)
+#ifdef ASSERT
+ ,
+ _committed(false),
+ _cancelled(false),
+ _ignore_check(false)
+#endif
+ {
+ if (T::is_enabled()) {
+ _started = true;
+ if (timing == TIMED && !T::isInstant) {
+ static_cast<T *>(this)->set_starttime(Tracing::time());
+ }
+ }
+ }
+
+ static bool is_enabled() {
+ return Tracing::is_event_enabled(T::eventId);
+ }
+
+ bool should_commit() {
+ return _started;
+ }
+
+ void ignoreCheck() {
+ DEBUG_ONLY(_ignore_check = true);
+ }
+
+ void commit() {
+ if (!should_commit()) {
+ cancel();
+ return;
+ }
+ if (_endTime == 0) {
+ static_cast<T*>(this)->set_endtime(Tracing::time());
+ }
+ if (static_cast<T*>(this)->should_write()) {
+ static_cast<T*>(this)->writeEvent();
+ }
+ set_commited();
+ }
+
+ void set_starttime(jlong time) {
+ _startTime = time;
+ }
+
+ void set_endtime(jlong time) {
+ _endTime = time;
+ }
+
+ TraceEventId id() const {
+ return T::eventId;
+ }
+
+ bool is_instant() const {
+ return T::isInstant;
+ }
+
+ bool is_requestable() const {
+ return T::isRequestable;
+ }
+
+ bool has_thread() const {
+ return T::hasThread;
+ }
+
+ bool has_stacktrace() const {
+ return T::hasStackTrace;
+ }
+
+ void cancel() {
+ assert(!_committed && !_cancelled, "event was already committed/cancelled");
+ DEBUG_ONLY(_cancelled = true);
+ }
+
+ void set_commited() {
+ assert(!_committed, "event has already been committed");
+ DEBUG_ONLY(_committed = true);
+ }
+
+ ~TraceEvent() {
+ if (_started) {
+ assert(_ignore_check || _committed || _cancelled, "event was not committed/cancelled");
+ }
+ }
+};
+
+#endif /* INCLUDE_TRACE */
+
+#endif /* SHARE_VM_TRACE_TRACEEVENT_HPP */
diff --git a/src/share/vm/trace/traceEventClasses.xsl b/src/share/vm/trace/traceEventClasses.xsl
new file mode 100644
index 000000000..70ac9c037
--- /dev/null
+++ b/src/share/vm/trace/traceEventClasses.xsl
@@ -0,0 +1,246 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+ This code is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License version 2 only, as
+ published by the Free Software Foundation.
+
+ This code is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ version 2 for more details (a copy is included in the LICENSE file that
+ accompanied this code).
+
+ You should have received a copy of the GNU General Public License version
+ 2 along with this work; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+ Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ or visit www.oracle.com if you need additional information or have any
+ questions.
+-->
+
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
+<xsl:import href="xsl_util.xsl"/>
+
+<xsl:template match="/">
+ <xsl:call-template name="file-header"/>
+
+#ifndef TRACEFILES_TRACEEVENTCLASSES_HPP
+#define TRACEFILES_TRACEEVENTCLASSES_HPP
+
+// On purpose outside the INCLUDE_TRACE
+// Some parts of traceEvent.hpp are used outside of
+// INCLUDE_TRACE
+
+#include "memory/resourceArea.hpp"
+#include "tracefiles/traceTypes.hpp"
+#include "trace/traceEvent.hpp"
+#include "utilities/macros.hpp"
+
+#if INCLUDE_TRACE
+
+
+#include "trace/traceStream.hpp"
+#include "utilities/ostream.hpp"
+
+ <xsl:apply-templates select="trace/events/struct" mode="trace"/>
+ <xsl:apply-templates select="trace/events/event" mode="trace"/>
+
+#else
+
+class TraceEvent {
+public:
+ TraceEvent() {}
+ void set_starttime(jlong time) const {}
+ void set_endtime(jlong time) const {}
+ bool should_commit() const { return false; }
+ void commit() const {}
+};
+
+ <xsl:apply-templates select="trace/events/struct" mode="empty"/>
+ <xsl:apply-templates select="trace/events/event" mode="empty"/>
+
+#endif
+
+#endif
+</xsl:template>
+
+<xsl:template match="struct" mode="trace">
+struct TraceStruct<xsl:value-of select="@id"/>
+{
+private:
+<xsl:apply-templates select="value" mode="write-fields"/>
+public:
+<xsl:apply-templates select="value" mode="write-setters"/>
+
+ void writeStruct(TraceStream&amp; ts) {
+<xsl:apply-templates select="value" mode="write-data"/>
+ }
+};
+
+</xsl:template>
+
+<xsl:template match="struct" mode="empty">
+struct TraceStruct<xsl:value-of select="@id"/>
+{
+public:
+<xsl:apply-templates select="value" mode="write-empty-setters"/>
+};
+</xsl:template>
+
+
+<xsl:template match="event" mode="empty">
+ <xsl:value-of select="concat('class Event', @id, ' : public TraceEvent')"/>
+{
+ public:
+<xsl:value-of select="concat(' Event', @id, '(bool ignore=true) {}')"/>
+<xsl:text>
+</xsl:text>
+
+<xsl:apply-templates select="value|structvalue|transition_value|relation" mode="write-empty-setters"/>
+};
+
+</xsl:template>
+
+
+<xsl:template match="event" mode="trace">
+ <xsl:value-of select="concat('class Event', @id, ' : public TraceEvent&lt;Event', @id, '&gt;')"/>
+{
+ public:
+ static const bool hasThread = <xsl:value-of select="@has_thread"/>;
+ static const bool hasStackTrace = <xsl:value-of select="@has_stacktrace"/>;
+ static const bool isInstant = <xsl:value-of select="@is_instant"/>;
+ static const bool isRequestable = <xsl:value-of select="@is_requestable"/>;
+ static const TraceEventId eventId = <xsl:value-of select="concat('Trace', @id, 'Event')"/>;
+
+ private:
+<xsl:apply-templates select="value|structvalue|transition_value|relation" mode="write-fields"/>
+
+ void writeEventContent(void) {
+ TraceStream ts(*tty);
+ ts.print("<xsl:value-of select="@label"/>: [");
+<xsl:apply-templates select="value|structvalue" mode="write-data"/>
+ ts.print("]\n");
+ }
+
+ public:
+<xsl:apply-templates select="value|structvalue|transition_value|relation" mode="write-setters"/>
+
+ bool should_write(void) {
+ return true;
+ }
+<xsl:text>
+
+</xsl:text>
+ <xsl:value-of select="concat(' Event', @id, '(EventStartTime timing=TIMED) : TraceEvent&lt;Event', @id, '&gt;(timing) {}', $newline)"/>
+ void writeEvent(void) {
+ ResourceMark rm;
+ if (UseLockedTracing) {
+ ttyLocker lock;
+ writeEventContent();
+ } else {
+ writeEventContent();
+ }
+ }
+};
+
+</xsl:template>
+
+<xsl:template match="value|transition_value|relation" mode="write-empty-setters">
+ <xsl:param name="cls"/>
+ <xsl:variable name="type" select="@type"/>
+ <xsl:variable name="wt" select="//primary_type[@symbol=$type]/@type"/>
+ <xsl:value-of select="concat(' void set_', @field, '(', $wt, ' value) { }')"/>
+ <xsl:if test="position() != last()">
+ <xsl:text>
+</xsl:text>
+ </xsl:if>
+</xsl:template>
+
+<xsl:template match="structvalue" mode="write-empty-setters">
+ <xsl:param name="cls"/>
+ <xsl:value-of select="concat(' void set_', @field, '(const TraceStruct', @type, '&amp; value) { }')"/>
+ <xsl:if test="position() != last()">
+ <xsl:text>
+</xsl:text>
+ </xsl:if>
+</xsl:template>
+
+
+<xsl:template match="value[@type='TICKS']" mode="write-setters">
+#if INCLUDE_TRACE
+ <xsl:value-of select="concat('void set_', @field, '(jlong time) { _', @field, ' = time; }')"/>
+#else
+ <xsl:value-of select="concat('void set_', @field, '(jlong ignore) {}')"/>
+#endif
+</xsl:template>
+
+<xsl:template match="value[@type='RELATIVE_TICKS']" mode="write-setters">
+#if INCLUDE_TRACE
+ <xsl:value-of select="concat('void set_', @field, '(jlong time) { _', @field, ' = time; }')"/>
+#else
+ <xsl:value-of select="concat('void set_', @field, '(jlong ignore) {}')"/>
+#endif
+</xsl:template>
+
+<xsl:template match="value" mode="write-fields">
+ <xsl:variable name="type" select="@type"/>
+ <xsl:variable name="wt" select="//primary_type[@symbol=$type]/@type"/>
+ <xsl:value-of select="concat(' ', $wt, ' _', @field, ';')"/>
+ <xsl:if test="position() != last()">
+ <xsl:text>
+</xsl:text>
+ </xsl:if>
+</xsl:template>
+
+<xsl:template match="structvalue" mode="write-fields">
+ <xsl:value-of select="concat(' TraceStruct', @type, ' _', @field, ';')"/>
+ <xsl:text>
+</xsl:text>
+</xsl:template>
+
+<xsl:template match="value|transition_value|relation" mode="write-setters">
+ <xsl:param name="cls"/>
+ <xsl:variable name="type" select="@type"/>
+ <xsl:variable name="wt" select="//primary_type[@symbol=$type]/@type"/>
+ <xsl:value-of select="concat(' void set_', @field, '(', $wt, ' value) { this->_', @field, ' = value; }')"/>
+ <xsl:if test="position() != last()">
+ <xsl:text>
+</xsl:text>
+ </xsl:if>
+</xsl:template>
+
+<xsl:template match="structvalue" mode="write-setters">
+ <xsl:param name="cls"/>
+ <xsl:value-of select="concat(' void set_', @field, '(const TraceStruct', @type, '&amp; value) { this->_', @field, ' = value; }')"/>
+ <xsl:if test="position() != last()">
+ <xsl:text>
+</xsl:text>
+ </xsl:if>
+</xsl:template>
+
+<xsl:template match="value" mode="write-data">
+ <xsl:variable name="type" select="@type"/>
+ <xsl:variable name="wt" select="//primary_type[@symbol=$type]/@writetype"/>
+ <xsl:value-of select="concat(' ts.print_val(&quot;', @label, '&quot;, _', @field, ');')"/>
+ <xsl:if test="position() != last()">
+ <xsl:text>
+ ts.print(", ");
+</xsl:text>
+ </xsl:if>
+</xsl:template>
+
+<xsl:template match="structvalue" mode="write-data">
+ <xsl:value-of select="concat(' _', @field, '.writeStruct(ts);')"/>
+ <xsl:if test="position() != last()">
+ <xsl:text>
+ ts.print(", ");
+</xsl:text>
+ </xsl:if>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/src/share/vm/trace/traceEventIds.xsl b/src/share/vm/trace/traceEventIds.xsl
new file mode 100644
index 000000000..737377cad
--- /dev/null
+++ b/src/share/vm/trace/traceEventIds.xsl
@@ -0,0 +1,74 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+ This code is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License version 2 only, as
+ published by the Free Software Foundation.
+
+ This code is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ version 2 for more details (a copy is included in the LICENSE file that
+ accompanied this code).
+
+ You should have received a copy of the GNU General Public License version
+ 2 along with this work; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+ Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ or visit www.oracle.com if you need additional information or have any
+ questions.
+-->
+
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
+<xsl:import href="xsl_util.xsl"/>
+
+<xsl:template match="/">
+ <xsl:call-template name="file-header"/>
+
+#ifndef TRACEFILES_JFREVENTIDS_HPP
+#define TRACEFILES_JFREVENTIDS_HPP
+
+#include "utilities/macros.hpp"
+
+#if INCLUDE_TRACE
+
+#include "trace/traceDataTypes.hpp"
+
+/**
+ * Enum of the event types in the JVM
+ */
+enum TraceEventId {
+ _traceeventbase = (NUM_RESERVED_EVENTS-1), // Make sure we start at right index.
+
+ // Events -> enum entry
+<xsl:for-each select="trace/events/event">
+ <xsl:value-of select="concat(' Trace', @id, 'Event,', $newline)"/>
+</xsl:for-each>
+ MaxTraceEventId
+};
+
+/**
+ * Struct types in the JVM
+ */
+enum TraceStructId {
+<xsl:for-each select="trace/types/content_types/*">
+ <xsl:value-of select="concat(' Trace', @id, 'Struct,', $newline)"/>
+</xsl:for-each>
+<xsl:for-each select="trace/events/*">
+ <xsl:value-of select="concat(' Trace', @id, 'Struct,', $newline)"/>
+</xsl:for-each>
+ MaxTraceStructId
+};
+
+typedef enum TraceEventId TraceEventId;
+typedef enum TraceStructId TraceStructId;
+
+#endif
+#endif
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/src/share/vm/trace/traceMacros.hpp b/src/share/vm/trace/traceMacros.hpp
index 441031920..4776e1350 100644
--- a/src/share/vm/trace/traceMacros.hpp
+++ b/src/share/vm/trace/traceMacros.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,22 +25,15 @@
#ifndef SHARE_VM_TRACE_TRACE_MACRO_HPP
#define SHARE_VM_TRACE_TRACE_MACRO_HPP
-#define EVENT_BEGIN(type, name)
-#define EVENT_SET(name, field, value)
-#define EVENT_COMMIT(name, ...)
-#define EVENT_STARTED(name, time)
-#define EVENT_ENDED(name, time)
#define EVENT_THREAD_EXIT(thread)
-
-#define TRACE_ENABLED 0
+#define EVENT_THREAD_DESTRUCT(thread)
#define TRACE_INIT_ID(k)
-#define TRACE_BUFFER void*
+#define TRACE_DATA TraceThreadData
-#define TRACE_START() true
-#define TRACE_INITIALIZE() 0
+#define TRACE_START() JNI_OK
+#define TRACE_INITIALIZE() JNI_OK
-#define TRACE_SET_KLASS_TRACE_ID(x1, x2) do { } while (0)
#define TRACE_DEFINE_KLASS_METHODS typedef int ___IGNORED_hs_trace_type1
#define TRACE_DEFINE_KLASS_TRACE_ID typedef int ___IGNORED_hs_trace_type2
#define TRACE_DEFINE_OFFSET typedef int ___IGNORED_hs_trace_type3
diff --git a/src/share/vm/trace/traceStream.hpp b/src/share/vm/trace/traceStream.hpp
new file mode 100644
index 000000000..4acbbb884
--- /dev/null
+++ b/src/share/vm/trace/traceStream.hpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_TRACE_TRACESTREAM_HPP
+#define SHARE_VM_TRACE_TRACESTREAM_HPP
+
+#include "utilities/macros.hpp"
+
+#if INCLUDE_TRACE
+
+#include "oops/klass.hpp"
+#include "oops/method.hpp"
+#include "oops/symbol.hpp"
+#include "utilities/ostream.hpp"
+
+class TraceStream : public StackObj {
+ private:
+ outputStream& _st;
+
+ public:
+ TraceStream(outputStream& stream): _st(stream) {}
+
+ void print_val(const char* label, u1 val) {
+ _st.print("%s = "UINT32_FORMAT, label, val);
+ }
+
+ void print_val(const char* label, u2 val) {
+ _st.print("%s = "UINT32_FORMAT, label, val);
+ }
+
+ void print_val(const char* label, s2 val) {
+ _st.print("%s = "INT32_FORMAT, label, val);
+ }
+
+ void print_val(const char* label, u4 val) {
+ _st.print("%s = "UINT32_FORMAT, label, val);
+ }
+
+ void print_val(const char* label, s4 val) {
+ _st.print("%s = "INT32_FORMAT, label, val);
+ }
+
+ void print_val(const char* label, u8 val) {
+ _st.print("%s = "UINT64_FORMAT, label, val);
+ }
+
+ void print_val(const char* label, s8 val) {
+ _st.print("%s = "INT64_FORMAT, label, val);
+ }
+
+ void print_val(const char* label, bool val) {
+ _st.print("%s = %s", label, val ? "true" : "false");
+ }
+
+ void print_val(const char* label, float val) {
+ _st.print("%s = %f", label, val);
+ }
+
+ void print_val(const char* label, double val) {
+ _st.print("%s = %f", label, val);
+ }
+
+ // Caller is machine generated code located in traceEventClasses.hpp
+ // Event<TraceId>::writeEvent() (pseudocode) contains the
+ // necessary ResourceMark for the resource allocations below.
+ // See traceEventClasses.xsl for details.
+ void print_val(const char* label, const Klass* const val) {
+ const char* description = "NULL";
+ if (val != NULL) {
+ Symbol* name = val->name();
+ if (name != NULL) {
+ description = name->as_C_string();
+ }
+ }
+ _st.print("%s = %s", label, description);
+ }
+
+ // Caller is machine generated code located in traceEventClasses.hpp
+ // Event<TraceId>::writeEvent() (pseudocode) contains the
+ // necessary ResourceMark for the resource allocations below.
+ // See traceEventClasses.xsl for details.
+ void print_val(const char* label, const Method* const val) {
+ const char* description = "NULL";
+ if (val != NULL) {
+ description = val->name_and_sig_as_C_string();
+ }
+ _st.print("%s = %s", label, description);
+ }
+
+ void print_val(const char* label, const char* val) {
+ _st.print("%s = '%s'", label, val);
+ }
+
+ void print(const char* val) {
+ _st.print(val);
+ }
+};
+
+#endif /* INCLUDE_TRACE */
+#endif /* SHARE_VM_TRACE_TRACESTREAM_HPP */
diff --git a/src/share/vm/trace/traceEventTypes.hpp b/src/share/vm/trace/traceTime.hpp
index e7448aaeb..3a0fe2037 100644
--- a/src/share/vm/trace/traceEventTypes.hpp
+++ b/src/share/vm/trace/traceTime.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,9 +22,12 @@
*
*/
-#ifndef SHARE_VM_TRACE_TRACE_EVENT_TYPES_HPP
-#define SHARE_VM_TRACE_TRACE_EVENT_TYPES_HPP
+#ifndef SHARE_VM_TRACE_TRACETIME_HPP
+#define SHARE_VM_TRACE_TRACETIME_HPP
-/* Empty, just a placeholder for tracing events */
+#include "prims/jni.h"
+
+typedef jlong TracingTime;
+typedef jlong RelativeTracingTime;
#endif
diff --git a/src/share/vm/trace/traceTypes.xsl b/src/share/vm/trace/traceTypes.xsl
new file mode 100644
index 000000000..b06b604ce
--- /dev/null
+++ b/src/share/vm/trace/traceTypes.xsl
@@ -0,0 +1,72 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+ This code is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License version 2 only, as
+ published by the Free Software Foundation.
+
+ This code is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ version 2 for more details (a copy is included in the LICENSE file that
+ accompanied this code).
+
+ You should have received a copy of the GNU General Public License version
+ 2 along with this work; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+ Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ or visit www.oracle.com if you need additional information or have any
+ questions.
+-->
+
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
+<xsl:import href="xsl_util.xsl"/>
+
+<xsl:template match="/">
+ <xsl:call-template name="file-header"/>
+
+#ifndef TRACEFILES_JFRTYPES_HPP
+#define TRACEFILES_JFRTYPES_HPP
+
+#include "trace/traceDataTypes.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "oops/symbol.hpp"
+
+enum JVMContentType {
+ _not_a_content_type = (JVM_CONTENT_TYPES_START - 1),
+
+<xsl:for-each select="trace/types/content_types/content_type[@jvm_type]">
+ <xsl:value-of select="concat(' CONTENT_TYPE_', @jvm_type, ',', $newline)"/>
+</xsl:for-each>
+ NUM_JVM_CONTENT_TYPES
+};
+
+
+enum JVMEventRelations {
+ JVM_REL_NOT_AVAILABLE = 0,
+
+<xsl:for-each select="trace/relation_decls/relation_decl">
+ <xsl:value-of select="concat(' JVM_REL_', @id, ',', $newline)"/>
+</xsl:for-each>
+ NUM_EVENT_RELATIONS
+};
+
+/**
+ * Create typedefs for the JRA types:
+ * typedef s8 TYPE_LONG;
+ * typedef s4 TYPE_INTEGER;
+ * typedef const char * TYPE_STRING;
+ * ...
+ */
+<xsl:for-each select="trace/types/primary_types/primary_type">
+typedef <xsl:value-of select="@type"/> TYPE_<xsl:value-of select="@symbol"/>;
+</xsl:for-each>
+
+#endif // JFRFILES_JFRTYPES_HPP
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/src/share/vm/trace/tracetypes.xml b/src/share/vm/trace/tracetypes.xml
new file mode 100644
index 000000000..22fd50590
--- /dev/null
+++ b/src/share/vm/trace/tracetypes.xml
@@ -0,0 +1,356 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+ This code is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License version 2 only, as
+ published by the Free Software Foundation.
+
+ This code is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ version 2 for more details (a copy is included in the LICENSE file that
+ accompanied this code).
+
+ You should have received a copy of the GNU General Public License version
+ 2 along with this work; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+ Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ or visit www.oracle.com if you need additional information or have any
+ questions.
+-->
+
+<!DOCTYPE types SYSTEM "trace.dtd">
+
+<!--
+
+Content types (complex) should create constant pool data
+in the recording.
+Currently at least, there is _NO_ verification that whatever
+writer you have is actually writing correctly. So BE CAREFUL!
+
+Declared with the 'content_type' tag.
+
+<type> is the ID type, i.e the integer type that resolves this. Most often
+U4 or U8, but for example really small number constants, like GCTYPE uses U1.
+
+<content-type> is where it gets interesting. 'builtin_type' means we're
+defining how we resolve one of the trace built-in types (Class, Thread etc),
+jvm_type means defining a new one for our own use.
+
+Example: (GcMode)
+
+<content_type id="GCMode" hr_name="GC mode" type="U1" jvm_type="GCMODE">
+ <value type="UTF8" field="desc" description="Description"/>
+</content_type>
+
+This creates a content type CONTENT_TYPE_GCMODE
+The field type referencing it is u1 (U1), and the constant pool struct has one field, the name.
+
+Before we can use it we need also define a primary field data type:
+
+<primary_type symbol="GCMODE" datatype="U1" contenttype="NONE"
+ type="u8" sizeop="sizeof(u1)"/>
+
+Now we can use the content + data type in declaring event fields.
+ -->
+
+ <types>
+ <content_types>
+ <content_type id="Thread" hr_name="Thread"
+ type="U4" builtin_type="OSTHREAD">
+ <value type="UTF8" field="name" label="Thread name"/>
+ </content_type>
+
+ <content_type id="VMThread" hr_name="VM Thread"
+ type="U8" jvm_type="VMTHREAD">
+ <value type="OSTHREAD" field="thread" label="VM Thread"/>
+ </content_type>
+
+ <content_type id="JavaThread" hr_name="Java thread"
+ type="U8" builtin_type="JAVALANGTHREAD">
+ <value type="OSTHREAD" field="thread" label="OS Thread ID"/>
+ <value type="BYTES64" field="allocInsideTla"
+ label="Allocated bytes inside TLAs"/>
+ <value type="BYTES64" field="allocOutsideTla"
+ label="Allocated bytes outside TLAs"/>
+ <value type="THREADGROUP" field="group" label="Java Thread Group"/>
+ </content_type>
+
+ <content_type id="ThreadGroup" hr_name="Thread group"
+ type="U4" jvm_type="THREADGROUP">
+ <value type="THREADGROUP" field="parent" label="Parent"/>
+ <value type="UTF8" field="name" label="Name"/>
+ </content_type>
+
+ <content_type id="StackTrace" hr_name="Stacktrace"
+ type="U8" builtin_type="STACKTRACE">
+ <value type="BOOLEAN" field="truncated" label="Truncated"/>
+ <structarray type="StackFrame" field="frames" label="Stack frames"/>
+ </content_type>
+
+ <content_type id="Class" hr_name="Java class"
+ type="U8" builtin_type="CLASS">
+ <value type="CLASS" field="loaderClass" label="ClassLoader"/>
+ <value type="SYMBOL" field="name" label="Name"/>
+ <value type="SHORT" field="modifiers" label="Access modifiers"/>
+ </content_type>
+
+ <content_type id="Method" hr_name="Java method"
+ type="U8" jvm_type="METHOD">
+ <value type="CLASS" field="class" label="Class"/>
+ <value type="SYMBOL" field="name" label="Name"/>
+ <value type="SYMBOL" field="signature" label="Signature"/>
+ <value type="SHORT" field="modifiers" label="Access modifiers"/>
+ </content_type>
+
+ <content_type id="UTFConstant" hr_name="UTF constant"
+ type="U8" jvm_type="SYMBOL">
+ <value type="UTF8" field="utf8" label="UTF8 data"/>
+ </content_type>
+
+ <content_type id="ThreadState" hr_name="Java Thread State"
+ type="U2" jvm_type="THREADSTATE">
+ <value type="UTF8" field="name" label="Name"/>
+ </content_type>
+
+ <content_type id="FrameType" hr_name="Frame type"
+ type="U1" jvm_type="FRAMETYPE">
+ <value type="UTF8" field="desc" label="Description"/>
+ </content_type>
+
+ <struct_type id="StackFrame">
+ <value type="METHOD" field="method" label="Java Method"/>
+ <value type="INTEGER" field="line" label="Line number"/>
+ <value type="FRAMETYPE" field="type" label="Frame type"/>
+ </struct_type>
+
+ <content_type id="GCName" hr_name="GC Name"
+ type="U1" jvm_type="GCNAME">
+ <value type="UTF8" field="name" label="name" />
+ </content_type>
+
+ <content_type id="GCCause" hr_name="GC Cause"
+ type="U2" jvm_type="GCCAUSE">
+ <value type="UTF8" field="cause" label="cause" />
+ </content_type>
+
+ <content_type id="GCWhen" hr_name="GC When"
+ type="U1" jvm_type="GCWHEN">
+ <value type="UTF8" field="when" label="when" />
+ </content_type>
+
+ <content_type id="G1YCType" hr_name="G1 YC Type"
+ type="U1" jvm_type="G1YCTYPE">
+ <value type="UTF8" field="type" label="type" />
+ </content_type>
+
+ <content_type id="ReferenceType" hr_name="Reference Type"
+ type="U1" jvm_type="REFERENCETYPE">
+ <value type="UTF8" field="type" label="type" />
+ </content_type>
+
+ <content_type id="NARROW_OOP_MODE" hr_name="Narrow Oop Mode"
+ type="U1" jvm_type="NARROWOOPMODE">
+ <value type="UTF8" field="mode" label="mode" />
+ </content_type>
+
+ <content_type id="VMOperationType" hr_name="VM Operation Type"
+ type="U2" jvm_type="VMOPERATIONTYPE">
+ <value type="UTF8" field="type" label="type" />
+ </content_type>
+
+ <content_type id="CompilerPhaseType" hr_name="Compiler Phase Type"
+ type="U1" jvm_type="COMPILERPHASETYPE">
+ <value type="UTF8" field="phase" label="phase" />
+ </content_type>
+
+ </content_types>
+
+
+ <primary_types>
+ <!--
+ - primary_type takes these attributes:
+ - symbol INTEGER, LONG etc
+ - datatype The trace datatype, see enum DataType
+ - contenttype Either resolved content type or the semantic meaning
+ - type The actual type as used in structures etc
+ - sizeop A function/macro that can be applied on a single
+ - struct value of type "type" and yield the factual byte
+ - size we need to write. The % is replaced by the value
+ -->
+
+ <!-- SIGNED 64bit -->
+ <primary_type symbol="LONG" datatype="LONG" contenttype="NONE"
+ type="s8" sizeop="sizeof(s8)"/>
+
+ <!-- UNSIGNED 64bit -->
+ <primary_type symbol="ULONG" datatype="U8" contenttype="NONE"
+ type="u8" sizeop="sizeof(u8)"/>
+
+ <!-- SIGNED 32bit -->
+ <primary_type symbol="INTEGER" datatype="INT" contenttype="NONE"
+ type="s4" sizeop="sizeof(s4)"/>
+
+ <!-- UNSIGNED 32bit -->
+ <primary_type symbol="UINT" datatype="U4" contenttype="NONE"
+ type="unsigned" sizeop="sizeof(unsigned)"/>
+
+ <!-- UNSIGNED 16bit -->
+ <primary_type symbol="USHORT" datatype="U2" contenttype="NONE"
+ type="u2" sizeop="sizeof(u2)"/>
+
+ <!-- SIGNED 16bit -->
+ <primary_type symbol="SHORT" datatype="SHORT" contenttype="NONE"
+ type="s2" sizeop="sizeof(s2)"/>
+
+ <!-- SIGNED 8bit -->
+ <primary_type symbol="BYTE" datatype="BYTE" contenttype="NONE"
+ type="s1" sizeop="sizeof(s1)"/>
+
+ <!-- UNSIGNED 8bit -->
+ <primary_type symbol="UBYTE" datatype="U1" contenttype="NONE"
+ type="u1" sizeop="sizeof(u1)"/>
+
+ <!-- float 32bit -->
+ <primary_type symbol="FLOAT" datatype="FLOAT" contenttype="NONE"
+ type="float" sizeop="sizeof(float)"/>
+
+ <!-- float 64bit -->
+ <primary_type symbol="DOUBLE" datatype="DOUBLE" contenttype="NONE"
+ type="double" sizeop="sizeof(double)"/>
+
+ <!-- boolean type (1-byte) -->
+ <primary_type symbol="BOOLEAN" datatype="BOOLEAN" contenttype="NONE"
+ type="bool" sizeop="1"/>
+
+ <!-- 32-bit unsigned integer, SEMANTIC value BYTES -->
+ <primary_type symbol="BYTES" datatype="U4" contenttype="BYTES"
+ type="u4" sizeop="sizeof(u4)"/>
+
+ <primary_type symbol="IOBYTES" datatype="U4" contenttype="BYTES"
+ type="u4" sizeop="sizeof(u4)"/>
+
+ <!-- 64-bit unsigned integer, SEMANTIC value BYTES -->
+ <primary_type symbol="BYTES64" datatype="U8" contenttype="BYTES"
+ type="u8" sizeop="sizeof(u8)"/>
+
+ <!-- 64-bit unsigned integer, SEMANTIC value ABSOLUTE MILLISECONDS -->
+ <primary_type symbol="EPOCHMILLIS" datatype="LONG" contenttype="EPOCHMILLIS"
+ type="s8" sizeop="sizeof(s8)"/>
+
+ <!-- 64-bit unsigned integer, SEMANTIC value RELATIVE MILLISECONDS -->
+ <primary_type symbol="MILLIS" datatype="LONG" contenttype="MILLIS"
+ type="s8" sizeop="sizeof(s8)"/>
+
+ <!-- 64-bit unsigned integer, SEMANTIC value RELATIVE NANOSECONDS -->
+ <primary_type symbol="NANOS" datatype="LONG" contenttype="NANOS"
+ type="s8" sizeop="sizeof(s8)"/>
+
+ <!-- 64-bit signed integer, SEMANTIC value ABSOLUTE TICKS -->
+ <primary_type symbol="TICKS" datatype="LONG" contenttype="TICKS"
+ type="s8" sizeop="sizeof(s8)"/>
+
+ <!-- 64-bit signed integer, SEMANTIC value RELATIVE TICKS -->
+ <primary_type symbol="RELATIVE_TICKS" datatype="LONG" contenttype="TICKS"
+ type="s8" sizeop="sizeof(s8)"/>
+
+ <!-- 64-bit unsigned integer, SEMANTIC value ADDRESS (mem loc) -->
+ <primary_type symbol="ADDRESS" datatype="U8" contenttype="ADDRESS"
+ type="u8" sizeop="sizeof(u8)"/>
+
+ <!-- 32-bit float, SEMANTIC value PERCENTAGE (0.0-1.0) -->
+ <primary_type symbol="PERCENT" datatype="FLOAT" contenttype="PERCENTAGE"
+ type="float" sizeop="sizeof(float)"/>
+
+ <!-- UTF-encoded string, max length 64k -->
+ <primary_type symbol="UTF8" datatype="UTF8" contenttype="NONE"
+ type="const char *" sizeop="sizeof_utf(%)"/>
+
+ <!-- UTF-16 encoded (Unicode) string, max length maxjuint -->
+ <primary_type symbol="STRING" datatype="STRING" contenttype="NONE"
+ type="TraceUnicodeString*" sizeop="sizeof_unicode(%)"/>
+
+ <!-- Symbol* constant. Note that this may currently ONLY be used by
+ classes, methods fields. This restriction might be lifted. -->
+ <primary_type symbol="SYMBOL" datatype="U8" contenttype="SYMBOL"
+ type="Symbol *" sizeop="sizeof(u8)"/>
+
+ <!-- A Klass *. The actual class is marked as "used" and will
+ eventually be written into the recording constant pool -->
+ <primary_type symbol="CLASS" datatype="U8" contenttype="CLASS"
+ type="Klass *" sizeop="sizeof(u8)"/>
+
+ <!-- A Method *. The method is marked as "used" and will eventually be
+ written into the recording constant pool. -->
+ <primary_type symbol="METHOD" datatype="U8" contenttype="METHOD"
+ type="Method *" sizeop="sizeof(u8)"/>
+
+ <!-- The type for stacktraces in the recording. Shoudl not be used by
+ events explicitly -->
+ <primary_type symbol="STACKTRACE" datatype="U8" contenttype="STACKTRACE"
+ type="u8" sizeop="sizeof(u8)"/>
+
+ <!-- OS Thread ID -->
+ <primary_type symbol="OSTHREAD" datatype="U4" contenttype="OSTHREAD"
+ type="u4" sizeop="sizeof(u4)"/>
+
+ <!-- VM Thread ID Note: changed from U2 to U8 for hotspot -->
+ <primary_type symbol="VMTHREAD" datatype="U8" contenttype="VMTHREAD"
+ type="u8" sizeop="sizeof(u8)"/>
+
+ <!-- Java Thread ID -->
+ <primary_type symbol="JAVALANGTHREAD" datatype="LONG"
+ contenttype="JAVALANGTHREAD" type="s8"
+ sizeop="sizeof(s8)"/>
+
+ <!-- Threadgroup THIS TYPE MAY NOT BE USED IN NORMAL EVENTS (ATM). Only
+ for thread constant pool // KK TODO: u8 should be ObjectP -->
+ <primary_type symbol="THREADGROUP" datatype="U4" contenttype="THREADGROUP"
+ type="u8"
+ sizeop="sizeof(u4)"/>
+
+ <!-- FRAMETYPE enum -->
+ <primary_type symbol="FRAMETYPE" datatype="U1" contenttype="FRAMETYPE"
+ type="u1" sizeop="sizeof(u1)"/>
+
+ <!-- THREADSTATE enum -->
+ <primary_type symbol="THREADSTATE" datatype="U2" contenttype="THREADSTATE"
+ type="u2" sizeop="sizeof(u2)"/>
+
+ <!-- GCName -->
+ <primary_type symbol="GCNAME" datatype="U1" contenttype="GCNAME"
+ type="u1" sizeop="sizeof(u1)" />
+
+ <!-- GCCAUSE -->
+ <primary_type symbol="GCCAUSE" datatype="U2" contenttype="GCCAUSE"
+ type="u2" sizeop="sizeof(u2)" />
+
+ <!-- GCWHEN -->
+ <primary_type symbol="GCWHEN" datatype="U1" contenttype="GCWHEN"
+ type="u1" sizeop="sizeof(u1)" />
+
+ <!-- G1YCType -->
+ <primary_type symbol="G1YCTYPE" datatype="U1" contenttype="G1YCTYPE"
+ type="u1" sizeop="sizeof(u1)" />
+
+ <!-- REFERENCETYPE -->
+ <primary_type symbol="REFERENCETYPE" datatype="U1"
+ contenttype="REFERENCETYPE" type="u1" sizeop="sizeof(u1)" />
+
+ <!-- NARROWOOPMODE -->
+ <primary_type symbol="NARROWOOPMODE" datatype="U1"
+ contenttype="NARROWOOPMODE" type="u1" sizeop="sizeof(u1)" />
+
+ <!-- COMPILERPHASETYPE -->
+ <primary_type symbol="COMPILERPHASETYPE" datatype="U1"
+ contenttype="COMPILERPHASETYPE" type="u1" sizeop="sizeof(u1)" />
+
+ <!-- VMOPERATIONTYPE -->
+ <primary_type symbol="VMOPERATIONTYPE" datatype="U2" contenttype="VMOPERATIONTYPE"
+ type="u2" sizeop="sizeof(u2)" />
+
+ </primary_types>
+</types>
diff --git a/src/share/vm/trace/tracing.hpp b/src/share/vm/trace/tracing.hpp
index c56e2dc2d..72530e745 100644
--- a/src/share/vm/trace/tracing.hpp
+++ b/src/share/vm/trace/tracing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_TRACE_TRACING_HPP
#define SHARE_VM_TRACE_TRACING_HPP
-#include "trace/traceMacros.hpp"
+#include "tracefiles/traceEventClasses.hpp"
+#include "tracefiles/traceEventIds.hpp"
#endif
diff --git a/src/share/vm/trace/xinclude.mod b/src/share/vm/trace/xinclude.mod
new file mode 100644
index 000000000..eab9436ea
--- /dev/null
+++ b/src/share/vm/trace/xinclude.mod
@@ -0,0 +1,37 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+ This code is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License version 2 only, as
+ published by the Free Software Foundation.
+
+ This code is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ version 2 for more details (a copy is included in the LICENSE file that
+ accompanied this code).
+
+ You should have received a copy of the GNU General Public License version
+ 2 along with this work; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+ Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ or visit www.oracle.com if you need additional information or have any
+ questions.
+
+-->
+<!ELEMENT xi:include (xi:fallback?) >
+<!ATTLIST xi:include
+ xmlns:xi CDATA #FIXED "http://www.w3.org/2001/XInclude"
+ href CDATA #IMPLIED
+ parse (xml|text) "xml"
+ xpointer CDATA #IMPLIED
+ encoding CDATA #IMPLIED
+ accept CDATA #IMPLIED
+ accept-language CDATA #IMPLIED >
+
+<!ELEMENT xi:fallback ANY>
+<!ATTLIST xi:fallback
+ xmlns:xi CDATA #FIXED "http://www.w3.org/2001/XInclude" >
diff --git a/src/share/vm/trace/xsl_util.xsl b/src/share/vm/trace/xsl_util.xsl
new file mode 100644
index 000000000..fb82914c7
--- /dev/null
+++ b/src/share/vm/trace/xsl_util.xsl
@@ -0,0 +1,78 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+ This code is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License version 2 only, as
+ published by the Free Software Foundation.
+
+ This code is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ version 2 for more details (a copy is included in the LICENSE file that
+ accompanied this code).
+
+ You should have received a copy of the GNU General Public License version
+ 2 along with this work; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+ Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ or visit www.oracle.com if you need additional information or have any
+ questions.
+-->
+
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+
+<!-- utilities used when generating code -->
+
+<xsl:variable name="newline">
+ <xsl:text>&#xA;</xsl:text>
+</xsl:variable>
+
+<xsl:variable name="indent1">
+ <xsl:text>&#xA; </xsl:text>
+</xsl:variable>
+
+<xsl:variable name="indent2">
+ <xsl:text>&#xA; </xsl:text>
+</xsl:variable>
+
+<xsl:variable name="indent3">
+ <xsl:text>&#xA; </xsl:text>
+</xsl:variable>
+
+<xsl:variable name="indent4">
+ <xsl:text>&#xA; </xsl:text>
+</xsl:variable>
+
+<xsl:variable name="quote">
+ <xsl:text>"</xsl:text>
+</xsl:variable>
+
+<xsl:template name="file-header">
+ <xsl:text>/* AUTOMATICALLY GENERATED FILE - DO NOT EDIT */</xsl:text>
+</xsl:template>
+
+<xsl:template name="string-replace-all">
+ <xsl:param name="text" />
+ <xsl:param name="replace" />
+ <xsl:param name="by" />
+ <xsl:choose>
+ <xsl:when test="contains($text, $replace)">
+ <xsl:value-of select="substring-before($text,$replace)" />
+ <xsl:value-of select="$by" />
+ <xsl:call-template name="string-replace-all">
+ <xsl:with-param name="text" select="substring-after($text,$replace)" />
+ <xsl:with-param name="replace" select="$replace" />
+ <xsl:with-param name="by" select="$by" />
+ </xsl:call-template>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:value-of select="$text" />
+ </xsl:otherwise>
+ </xsl:choose>
+</xsl:template>
+
+
+</xsl:stylesheet>
diff --git a/src/share/vm/utilities/accessFlags.hpp b/src/share/vm/utilities/accessFlags.hpp
index 99f9a3360..a3d3de99c 100644
--- a/src/share/vm/utilities/accessFlags.hpp
+++ b/src/share/vm/utilities/accessFlags.hpp
@@ -78,11 +78,13 @@ enum {
JVM_ACC_FIELD_ACCESS_WATCHED = 0x00002000, // field access is watched by JVMTI
JVM_ACC_FIELD_MODIFICATION_WATCHED = 0x00008000, // field modification is watched by JVMTI
JVM_ACC_FIELD_INTERNAL = 0x00000400, // internal field, same as JVM_ACC_ABSTRACT
+ JVM_ACC_FIELD_STABLE = 0x00000020, // @Stable field, same as JVM_ACC_SYNCHRONIZED
JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE = 0x00000800, // field has generic signature
JVM_ACC_FIELD_INTERNAL_FLAGS = JVM_ACC_FIELD_ACCESS_WATCHED |
JVM_ACC_FIELD_MODIFICATION_WATCHED |
JVM_ACC_FIELD_INTERNAL |
+ JVM_ACC_FIELD_STABLE |
JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE,
// flags accepted by set_field_flags()
@@ -148,6 +150,7 @@ class AccessFlags VALUE_OBJ_CLASS_SPEC {
{ return (_flags & JVM_ACC_FIELD_MODIFICATION_WATCHED) != 0; }
bool on_stack() const { return (_flags & JVM_ACC_ON_STACK) != 0; }
bool is_internal() const { return (_flags & JVM_ACC_FIELD_INTERNAL) != 0; }
+ bool is_stable() const { return (_flags & JVM_ACC_FIELD_STABLE) != 0; }
bool field_has_generic_signature() const
{ return (_flags & JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE) != 0; }
diff --git a/src/share/vm/utilities/array.hpp b/src/share/vm/utilities/array.hpp
index 5578ed9b6..fb32f5ca8 100644
--- a/src/share/vm/utilities/array.hpp
+++ b/src/share/vm/utilities/array.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -317,10 +317,10 @@ protected:
Array(const Array<T>&);
void operator=(const Array<T>&);
- void* operator new(size_t size, ClassLoaderData* loader_data, int length, bool read_only, TRAPS) {
+ void* operator new(size_t size, ClassLoaderData* loader_data, int length, bool read_only, TRAPS) throw() {
size_t word_size = Array::size(length);
return (void*) Metaspace::allocate(loader_data, word_size, read_only,
- Metaspace::NonClassType, CHECK_NULL);
+ MetaspaceObj::array_type(sizeof(T)), CHECK_NULL);
}
static size_t byte_sizeof(int length) { return sizeof(Array<T>) + MAX2(length - 1, 0) * sizeof(T); }
diff --git a/src/share/vm/utilities/bitMap.cpp b/src/share/vm/utilities/bitMap.cpp
index 152b40d39..01825d302 100644
--- a/src/share/vm/utilities/bitMap.cpp
+++ b/src/share/vm/utilities/bitMap.cpp
@@ -41,7 +41,7 @@
BitMap::BitMap(bm_word_t* map, idx_t size_in_bits) :
- _map(map), _size(size_in_bits)
+ _map(map), _size(size_in_bits), _map_allocator(false)
{
assert(sizeof(bm_word_t) == BytesPerWord, "Implementation assumption.");
assert(size_in_bits >= 0, "just checking");
@@ -49,7 +49,7 @@ BitMap::BitMap(bm_word_t* map, idx_t size_in_bits) :
BitMap::BitMap(idx_t size_in_bits, bool in_resource_area) :
- _map(NULL), _size(0)
+ _map(NULL), _size(0), _map_allocator(false)
{
assert(sizeof(bm_word_t) == BytesPerWord, "Implementation assumption.");
resize(size_in_bits, in_resource_area);
@@ -65,8 +65,10 @@ void BitMap::resize(idx_t size_in_bits, bool in_resource_area) {
if (in_resource_area) {
_map = NEW_RESOURCE_ARRAY(bm_word_t, new_size_in_words);
} else {
- if (old_map != NULL) FREE_C_HEAP_ARRAY(bm_word_t, _map, mtInternal);
- _map = NEW_C_HEAP_ARRAY(bm_word_t, new_size_in_words, mtInternal);
+ if (old_map != NULL) {
+ _map_allocator.free();
+ }
+ _map = _map_allocator.allocate(new_size_in_words);
}
Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) _map,
MIN2(old_size_in_words, new_size_in_words));
diff --git a/src/share/vm/utilities/bitMap.hpp b/src/share/vm/utilities/bitMap.hpp
index 2486533ab..c4cac118b 100644
--- a/src/share/vm/utilities/bitMap.hpp
+++ b/src/share/vm/utilities/bitMap.hpp
@@ -48,6 +48,7 @@ class BitMap VALUE_OBJ_CLASS_SPEC {
} RangeSizeHint;
private:
+ ArrayAllocator<bm_word_t, mtInternal> _map_allocator;
bm_word_t* _map; // First word in bitmap
idx_t _size; // Size of bitmap (in bits)
@@ -113,7 +114,7 @@ class BitMap VALUE_OBJ_CLASS_SPEC {
public:
// Constructs a bitmap with no map, and size 0.
- BitMap() : _map(NULL), _size(0) {}
+ BitMap() : _map(NULL), _size(0), _map_allocator(false) {}
// Constructs a bitmap with the given map and size.
BitMap(bm_word_t* map, idx_t size_in_bits);
diff --git a/src/share/vm/utilities/bitMap.inline.hpp b/src/share/vm/utilities/bitMap.inline.hpp
index 7bb244795..2171e849f 100644
--- a/src/share/vm/utilities/bitMap.inline.hpp
+++ b/src/share/vm/utilities/bitMap.inline.hpp
@@ -52,16 +52,16 @@ inline void BitMap::clear_bit(idx_t bit) {
inline bool BitMap::par_set_bit(idx_t bit) {
verify_index(bit);
- volatile idx_t* const addr = word_addr(bit);
- const idx_t mask = bit_mask(bit);
- idx_t old_val = *addr;
+ volatile bm_word_t* const addr = word_addr(bit);
+ const bm_word_t mask = bit_mask(bit);
+ bm_word_t old_val = *addr;
do {
- const idx_t new_val = old_val | mask;
+ const bm_word_t new_val = old_val | mask;
if (new_val == old_val) {
return false; // Someone else beat us to it.
}
- const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val,
+ const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val,
(volatile void*) addr,
(void*) old_val);
if (cur_val == old_val) {
@@ -73,16 +73,16 @@ inline bool BitMap::par_set_bit(idx_t bit) {
inline bool BitMap::par_clear_bit(idx_t bit) {
verify_index(bit);
- volatile idx_t* const addr = word_addr(bit);
- const idx_t mask = ~bit_mask(bit);
- idx_t old_val = *addr;
+ volatile bm_word_t* const addr = word_addr(bit);
+ const bm_word_t mask = ~bit_mask(bit);
+ bm_word_t old_val = *addr;
do {
- const idx_t new_val = old_val & mask;
+ const bm_word_t new_val = old_val & mask;
if (new_val == old_val) {
return false; // Someone else beat us to it.
}
- const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val,
+ const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val,
(volatile void*) addr,
(void*) old_val);
if (cur_val == old_val) {
diff --git a/src/share/vm/utilities/debug.cpp b/src/share/vm/utilities/debug.cpp
index a675c27bf..31d13f794 100644
--- a/src/share/vm/utilities/debug.cpp
+++ b/src/share/vm/utilities/debug.cpp
@@ -314,8 +314,8 @@ bool is_error_reported() {
#ifndef PRODUCT
#include <signal.h>
-void test_error_handler(size_t test_num)
-{
+void test_error_handler() {
+ uintx test_num = ErrorHandlerTest;
if (test_num == 0) return;
// If asserts are disabled, use the corresponding guarantee instead.
@@ -327,6 +327,8 @@ void test_error_handler(size_t test_num)
const char* const eol = os::line_separator();
const char* const msg = "this message should be truncated during formatting";
+ char * const dataPtr = NULL; // bad data pointer
+ const void (*funcPtr)(void) = (const void(*)()) 0xF; // bad function pointer
// Keep this in sync with test/runtime/6888954/vmerrors.sh.
switch (n) {
@@ -348,11 +350,16 @@ void test_error_handler(size_t test_num)
case 9: ShouldNotCallThis();
case 10: ShouldNotReachHere();
case 11: Unimplemented();
- // This is last because it does not generate an hs_err* file on Windows.
- case 12: os::signal_raise(SIGSEGV);
-
- default: ShouldNotReachHere();
+ // There's no guarantee the bad data pointer will crash us
+ // so "break" out to the ShouldNotReachHere().
+ case 12: *dataPtr = '\0'; break;
+ // There's no guarantee the bad function pointer will crash us
+ // so "break" out to the ShouldNotReachHere().
+ case 13: (*funcPtr)(); break;
+
+ default: tty->print_cr("ERROR: %d: unexpected test_num value.", n);
}
+ ShouldNotReachHere();
}
#endif // !PRODUCT
@@ -665,152 +672,4 @@ void help() {
tty->print_cr(" ndebug() - undo debug");
}
-#if 0
-
-// BobV's command parser for debugging on windows when nothing else works.
-
-enum CommandID {
- CMDID_HELP,
- CMDID_QUIT,
- CMDID_HSFIND,
- CMDID_PSS,
- CMDID_PS,
- CMDID_PSF,
- CMDID_FINDM,
- CMDID_FINDNM,
- CMDID_PP,
- CMDID_BPT,
- CMDID_EXIT,
- CMDID_VERIFY,
- CMDID_THREADS,
- CMDID_ILLEGAL = 99
-};
-
-struct CommandParser {
- char *name;
- CommandID code;
- char *description;
-};
-
-struct CommandParser CommandList[] = {
- (char *)"help", CMDID_HELP, " Dump this list",
- (char *)"quit", CMDID_QUIT, " Return from this routine",
- (char *)"hsfind", CMDID_HSFIND, "Perform an hsfind on an address",
- (char *)"ps", CMDID_PS, " Print Current Thread Stack Trace",
- (char *)"pss", CMDID_PSS, " Print All Thread Stack Trace",
- (char *)"psf", CMDID_PSF, " Print All Stack Frames",
- (char *)"findm", CMDID_FINDM, " Find a Method* from a PC",
- (char *)"findnm", CMDID_FINDNM, "Find an nmethod from a PC",
- (char *)"pp", CMDID_PP, " Find out something about a pointer",
- (char *)"break", CMDID_BPT, " Execute a breakpoint",
- (char *)"exitvm", CMDID_EXIT, "Exit the VM",
- (char *)"verify", CMDID_VERIFY, "Perform a Heap Verify",
- (char *)"thread", CMDID_THREADS, "Dump Info on all Threads",
- (char *)0, CMDID_ILLEGAL
-};
-
-
-// get_debug_command()
-//
-// Read a command from standard input.
-// This is useful when you have a debugger
-// which doesn't support calling into functions.
-//
-void get_debug_command()
-{
- ssize_t count;
- int i,j;
- bool gotcommand;
- intptr_t addr;
- char buffer[256];
- nmethod *nm;
- Method* m;
-
- tty->print_cr("You have entered the diagnostic command interpreter");
- tty->print("The supported commands are:\n");
- for ( i=0; ; i++ ) {
- if ( CommandList[i].code == CMDID_ILLEGAL )
- break;
- tty->print_cr(" %s \n", CommandList[i].name );
- }
-
- while ( 1 ) {
- gotcommand = false;
- tty->print("Please enter a command: ");
- count = scanf("%s", buffer) ;
- if ( count >=0 ) {
- for ( i=0; ; i++ ) {
- if ( CommandList[i].code == CMDID_ILLEGAL ) {
- if (!gotcommand) tty->print("Invalid command, please try again\n");
- break;
- }
- if ( strcmp(buffer, CommandList[i].name) == 0 ) {
- gotcommand = true;
- switch ( CommandList[i].code ) {
- case CMDID_PS:
- ps();
- break;
- case CMDID_PSS:
- pss();
- break;
- case CMDID_PSF:
- psf();
- break;
- case CMDID_FINDM:
- tty->print("Please enter the hex addr to pass to findm: ");
- scanf("%I64X", &addr);
- m = (Method*)findm(addr);
- tty->print("findm(0x%I64X) returned 0x%I64X\n", addr, m);
- break;
- case CMDID_FINDNM:
- tty->print("Please enter the hex addr to pass to findnm: ");
- scanf("%I64X", &addr);
- nm = (nmethod*)findnm(addr);
- tty->print("findnm(0x%I64X) returned 0x%I64X\n", addr, nm);
- break;
- case CMDID_PP:
- tty->print("Please enter the hex addr to pass to pp: ");
- scanf("%I64X", &addr);
- pp((void*)addr);
- break;
- case CMDID_EXIT:
- exit(0);
- case CMDID_HELP:
- tty->print("Here are the supported commands: ");
- for ( j=0; ; j++ ) {
- if ( CommandList[j].code == CMDID_ILLEGAL )
- break;
- tty->print_cr(" %s -- %s\n", CommandList[j].name,
- CommandList[j].description );
- }
- break;
- case CMDID_QUIT:
- return;
- break;
- case CMDID_BPT:
- BREAKPOINT;
- break;
- case CMDID_VERIFY:
- verify();;
- break;
- case CMDID_THREADS:
- threads();;
- break;
- case CMDID_HSFIND:
- tty->print("Please enter the hex addr to pass to hsfind: ");
- scanf("%I64X", &addr);
- tty->print("Calling hsfind(0x%I64X)\n", addr);
- hsfind(addr);
- break;
- default:
- case CMDID_ILLEGAL:
- break;
- }
- }
- }
- }
- }
-}
-#endif
-
#endif // !PRODUCT
diff --git a/src/share/vm/utilities/debug.hpp b/src/share/vm/utilities/debug.hpp
index 9a8332feb..85b26f35f 100644
--- a/src/share/vm/utilities/debug.hpp
+++ b/src/share/vm/utilities/debug.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -225,6 +225,22 @@ void report_untested(const char* file, int line, const char* message);
void warning(const char* format, ...);
+#ifdef ASSERT
+// Compile-time asserts.
+template <bool> struct StaticAssert;
+template <> struct StaticAssert<true> {};
+
+// Only StaticAssert<true> is defined, so if cond evaluates to false we get
+// a compile time exception when trying to use StaticAssert<false>.
+#define STATIC_ASSERT(cond) \
+ do { \
+ StaticAssert<(cond)> DUMMY_STATIC_ASSERT; \
+ (void)DUMMY_STATIC_ASSERT; /* ignore */ \
+ } while (false)
+#else
+#define STATIC_ASSERT(cond)
+#endif
+
// out of shared space reporting
enum SharedSpaceType {
SharedPermGen,
@@ -243,7 +259,7 @@ bool is_error_reported();
void set_error_reported();
/* Test assert(), fatal(), guarantee(), etc. */
-NOT_PRODUCT(void test_error_handler(size_t test_num);)
+NOT_PRODUCT(void test_error_handler();)
void pd_ps(frame f);
void pd_obfuscate_location(char *buf, size_t buflen);
diff --git a/src/share/vm/utilities/decoder.cpp b/src/share/vm/utilities/decoder.cpp
index 5489fe6fe..3fc934b4f 100644
--- a/src/share/vm/utilities/decoder.cpp
+++ b/src/share/vm/utilities/decoder.cpp
@@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "prims/jvm.h"
-#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "utilities/decoder.hpp"
#include "utilities/vmError.hpp"
@@ -80,6 +79,23 @@ AbstractDecoder* Decoder::create_decoder() {
return decoder;
}
+inline bool DecoderLocker::is_first_error_thread() {
+ return (os::current_thread_id() == VMError::get_first_error_tid());
+}
+
+DecoderLocker::DecoderLocker() :
+ MutexLockerEx(DecoderLocker::is_first_error_thread() ?
+ NULL : Decoder::shared_decoder_lock(), true) {
+ _decoder = is_first_error_thread() ?
+ Decoder::get_error_handler_instance() : Decoder::get_shared_instance();
+ assert(_decoder != NULL, "null decoder");
+}
+
+Mutex* Decoder::shared_decoder_lock() {
+ assert(_shared_decoder_lock != NULL, "Just check");
+ return _shared_decoder_lock;
+}
+
bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const char* modulepath) {
assert(_shared_decoder_lock != NULL, "Just check");
bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
diff --git a/src/share/vm/utilities/decoder.hpp b/src/share/vm/utilities/decoder.hpp
index 0d2af8098..0cc880f19 100644
--- a/src/share/vm/utilities/decoder.hpp
+++ b/src/share/vm/utilities/decoder.hpp
@@ -28,6 +28,7 @@
#include "memory/allocation.hpp"
#include "runtime/mutex.hpp"
+#include "runtime/mutexLocker.hpp"
class AbstractDecoder : public CHeapObj<mtInternal> {
public:
@@ -124,6 +125,19 @@ private:
protected:
static Mutex* _shared_decoder_lock;
+ static Mutex* shared_decoder_lock();
+
+ friend class DecoderLocker;
+};
+
+class DecoderLocker : public MutexLockerEx {
+ AbstractDecoder* _decoder;
+ inline bool is_first_error_thread();
+public:
+ DecoderLocker();
+ AbstractDecoder* decoder() {
+ return _decoder;
+ }
};
#endif // SHARE_VM_UTILITIES_DECODER_HPP
diff --git a/src/share/vm/utilities/events.hpp b/src/share/vm/utilities/events.hpp
index c2e543da9..804fe77df 100644
--- a/src/share/vm/utilities/events.hpp
+++ b/src/share/vm/utilities/events.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -69,7 +69,7 @@ class EventLog : public CHeapObj<mtInternal> {
// semantics aren't appropriate. The name is used as the label of the
// log when it is dumped during a crash.
template <class T> class EventLogBase : public EventLog {
- template <class X> class EventRecord {
+ template <class X> class EventRecord : public CHeapObj<mtInternal> {
public:
double timestamp;
Thread* thread;
diff --git a/src/share/vm/utilities/exceptions.cpp b/src/share/vm/utilities/exceptions.cpp
index 6083c555c..af5f974f1 100644
--- a/src/share/vm/utilities/exceptions.cpp
+++ b/src/share/vm/utilities/exceptions.cpp
@@ -125,13 +125,13 @@ void Exceptions::_throw_oop(Thread* thread, const char* file, int line, oop exce
}
void Exceptions::_throw(Thread* thread, const char* file, int line, Handle h_exception, const char* message) {
+ ResourceMark rm;
assert(h_exception() != NULL, "exception should not be NULL");
// tracing (do this up front - so it works during boot strapping)
if (TraceExceptions) {
ttyLocker ttyl;
- ResourceMark rm;
- tty->print_cr("Exception <%s>%s%s (" INTPTR_FORMAT " ) \n"
+ tty->print_cr("Exception <%s%s%s> (" INTPTR_FORMAT ") \n"
"thrown [%s, line %d]\nfor thread " INTPTR_FORMAT,
h_exception->print_value_string(),
message ? ": " : "", message ? message : "",
@@ -141,7 +141,9 @@ void Exceptions::_throw(Thread* thread, const char* file, int line, Handle h_exc
NOT_PRODUCT(Exceptions::debug_check_abort(h_exception, message));
// Check for special boot-strapping/vm-thread handling
- if (special_exception(thread, file, line, h_exception)) return;
+ if (special_exception(thread, file, line, h_exception)) {
+ return;
+ }
assert(h_exception->is_a(SystemDictionary::Throwable_klass()), "exception is not a subclass of java/lang/Throwable");
@@ -149,7 +151,9 @@ void Exceptions::_throw(Thread* thread, const char* file, int line, Handle h_exc
thread->set_pending_exception(h_exception(), file, line);
// vm log
- Events::log_exception(thread, "Threw " INTPTR_FORMAT " at %s:%d", (address)h_exception(), file, line);
+ Events::log_exception(thread, "Exception <%s%s%s> (" INTPTR_FORMAT ") thrown at [%s, line %d]",
+ h_exception->print_value_string(), message ? ": " : "", message ? message : "",
+ (address)h_exception(), file, line);
}
diff --git a/src/share/vm/utilities/exceptions.hpp b/src/share/vm/utilities/exceptions.hpp
index 089cd3e08..beaabf8dc 100644
--- a/src/share/vm/utilities/exceptions.hpp
+++ b/src/share/vm/utilities/exceptions.hpp
@@ -194,15 +194,15 @@ class Exceptions {
#define HAS_PENDING_EXCEPTION (((ThreadShadow*)THREAD)->has_pending_exception())
#define CLEAR_PENDING_EXCEPTION (((ThreadShadow*)THREAD)->clear_pending_exception())
-#define CHECK THREAD); if (HAS_PENDING_EXCEPTION) return ; (0
-#define CHECK_(result) THREAD); if (HAS_PENDING_EXCEPTION) return result; (0
+#define CHECK THREAD); if (HAS_PENDING_EXCEPTION) return ; (void)(0
+#define CHECK_(result) THREAD); if (HAS_PENDING_EXCEPTION) return result; (void)(0
#define CHECK_0 CHECK_(0)
#define CHECK_NH CHECK_(Handle())
#define CHECK_NULL CHECK_(NULL)
#define CHECK_false CHECK_(false)
-#define CHECK_AND_CLEAR THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return; } (0
-#define CHECK_AND_CLEAR_(result) THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return result; } (0
+#define CHECK_AND_CLEAR THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return; } (void)(0
+#define CHECK_AND_CLEAR_(result) THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return result; } (void)(0
#define CHECK_AND_CLEAR_0 CHECK_AND_CLEAR_(0)
#define CHECK_AND_CLEAR_NH CHECK_AND_CLEAR_(Handle())
#define CHECK_AND_CLEAR_NULL CHECK_AND_CLEAR_(NULL)
@@ -282,7 +282,7 @@ class Exceptions {
CLEAR_PENDING_EXCEPTION; \
ex->print(); \
ShouldNotReachHere(); \
- } (0
+ } (void)(0
// ExceptionMark is a stack-allocated helper class for local exception handling.
// It is used with the EXCEPTION_MARK macro.
@@ -306,6 +306,6 @@ class ExceptionMark {
// which preserves pre-existing exceptions and does not allow new
// exceptions.
-#define EXCEPTION_MARK Thread* THREAD; ExceptionMark __em(THREAD);
+#define EXCEPTION_MARK Thread* THREAD = NULL; ExceptionMark __em(THREAD);
#endif // SHARE_VM_UTILITIES_EXCEPTIONS_HPP
diff --git a/src/share/vm/utilities/globalDefinitions.hpp b/src/share/vm/utilities/globalDefinitions.hpp
index 8a6467bf2..bc3401de9 100644
--- a/src/share/vm/utilities/globalDefinitions.hpp
+++ b/src/share/vm/utilities/globalDefinitions.hpp
@@ -362,6 +362,8 @@ const int KlassAlignment = KlassAlignmentInBytes / HeapWordSize;
// Klass encoding metaspace max size
const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes;
+const jlong CompressedKlassPointersBase = NOT_LP64(0) LP64_ONLY(CONST64(0x800000000)); // 32*G
+
// Machine dependent stuff
#ifdef TARGET_ARCH_x86
@@ -383,6 +385,14 @@ const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlass
# include "globalDefinitions_ppc.hpp"
#endif
+/*
+ * If a platform does not support native stack walking
+ * the platform specific globalDefinitions (above)
+ * can set PLATFORM_NATIVE_STACK_WALKING_SUPPORTED to 0
+ */
+#ifndef PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
+#define PLATFORM_NATIVE_STACK_WALKING_SUPPORTED 1
+#endif
// The byte alignment to be used by Arena::Amalloc. See bugid 4169348.
// Note: this value must be a power of 2
@@ -395,6 +405,14 @@ const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlass
#define align_size_up_(size, alignment) (((size) + ((alignment) - 1)) & ~((alignment) - 1))
+inline bool is_size_aligned(size_t size, size_t alignment) {
+ return align_size_up_(size, alignment) == size;
+}
+
+inline bool is_ptr_aligned(void* ptr, size_t alignment) {
+ return align_size_up_((intptr_t)ptr, (intptr_t)alignment) == (intptr_t)ptr;
+}
+
inline intptr_t align_size_up(intptr_t size, intptr_t alignment) {
return align_size_up_(size, alignment);
}
@@ -405,6 +423,16 @@ inline intptr_t align_size_down(intptr_t size, intptr_t alignment) {
return align_size_down_(size, alignment);
}
+#define is_size_aligned_(size, alignment) ((size) == (align_size_up_(size, alignment)))
+
+inline void* align_ptr_up(void* ptr, size_t alignment) {
+ return (void*)align_size_up((intptr_t)ptr, (intptr_t)alignment);
+}
+
+inline void* align_ptr_down(void* ptr, size_t alignment) {
+ return (void*)align_size_down((intptr_t)ptr, (intptr_t)alignment);
+}
+
// Align objects by rounding up their size, in HeapWord units.
#define align_object_size_(size) align_size_up_(size, MinObjAlignment)
@@ -423,6 +451,10 @@ inline intptr_t align_object_offset(intptr_t offset) {
return align_size_up(offset, HeapWordsPerLong);
}
+inline void* align_pointer_up(const void* addr, size_t size) {
+ return (void*) align_size_up_((uintptr_t)addr, size);
+}
+
// Clamp an address to be within a specific page
// 1. If addr is on the page it is returned as is
// 2. If addr is above the page_address the start of the *next* page will be returned
@@ -444,32 +476,6 @@ inline address clamp_address_in_page(address addr, address page_address, intptr_
// The expected size in bytes of a cache line, used to pad data structures.
#define DEFAULT_CACHE_LINE_SIZE 64
-// Bytes needed to pad type to avoid cache-line sharing; alignment should be the
-// expected cache line size (a power of two). The first addend avoids sharing
-// when the start address is not a multiple of alignment; the second maintains
-// alignment of starting addresses that happen to be a multiple.
-#define PADDING_SIZE(type, alignment) \
- ((alignment) + align_size_up_(sizeof(type), alignment))
-
-// Templates to create a subclass padded to avoid cache line sharing. These are
-// effective only when applied to derived-most (leaf) classes.
-
-// When no args are passed to the base ctor.
-template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
-class Padded: public T {
-private:
- char _pad_buf_[PADDING_SIZE(T, alignment)];
-};
-
-// When either 0 or 1 args may be passed to the base ctor.
-template <class T, typename Arg1T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
-class Padded01: public T {
-public:
- Padded01(): T() { }
- Padded01(Arg1T arg1): T(arg1) { }
-private:
- char _pad_buf_[PADDING_SIZE(T, alignment)];
-};
//----------------------------------------------------------------------------------------------------
// Utility macros for compilers
@@ -758,18 +764,6 @@ inline BasicType as_BasicType(TosState state) {
TosState as_TosState(BasicType type);
-// ReferenceType is used to distinguish between java/lang/ref/Reference subclasses
-
-enum ReferenceType {
- REF_NONE, // Regular class
- REF_OTHER, // Subclass of java/lang/ref/Reference, but not subclass of one of the classes below
- REF_SOFT, // Subclass of java/lang/ref/SoftReference
- REF_WEAK, // Subclass of java/lang/ref/WeakReference
- REF_FINAL, // Subclass of java/lang/ref/FinalReference
- REF_PHANTOM // Subclass of java/lang/ref/PhantomReference
-};
-
-
// JavaThreadState keeps track of which part of the code a thread is executing in. This
// information is needed by the safepoint code.
//
diff --git a/src/share/vm/utilities/growableArray.hpp b/src/share/vm/utilities/growableArray.hpp
index 932d0a202..866a23ad9 100644
--- a/src/share/vm/utilities/growableArray.hpp
+++ b/src/share/vm/utilities/growableArray.hpp
@@ -194,6 +194,7 @@ template<class E> class GrowableArray : public GenericGrowableArray {
void clear() { _len = 0; }
int length() const { return _len; }
+ int max_length() const { return _max; }
void trunc_to(int l) { assert(l <= _len,"cannot increase length"); _len = l; }
bool is_empty() const { return _len == 0; }
bool is_nonempty() const { return _len != 0; }
diff --git a/src/share/vm/utilities/hashtable.cpp b/src/share/vm/utilities/hashtable.cpp
index 960474aba..ecb43da83 100644
--- a/src/share/vm/utilities/hashtable.cpp
+++ b/src/share/vm/utilities/hashtable.cpp
@@ -33,6 +33,7 @@
#include "utilities/dtrace.hpp"
#include "utilities/hashtable.hpp"
#include "utilities/hashtable.inline.hpp"
+#include "utilities/numberSeq.hpp"
// This is a generic hashtable, designed to be used for the symbol
@@ -237,6 +238,57 @@ template <class T, MEMFLAGS F> void Hashtable<T, F>::reverse(void* boundary) {
}
}
+template <class T, MEMFLAGS F> int Hashtable<T, F>::literal_size(Symbol *symbol) {
+ return symbol->size() * HeapWordSize;
+}
+
+template <class T, MEMFLAGS F> int Hashtable<T, F>::literal_size(oop oop) {
+ // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true,
+ // and the String.value array is shared by several Strings. However, starting from JDK8,
+ // the String.value array is not shared anymore.
+ assert(oop != NULL && oop->klass() == SystemDictionary::String_klass(), "only strings are supported");
+ return (oop->size() + java_lang_String::value(oop)->size()) * HeapWordSize;
+}
+
+// Dump footprint and bucket length statistics
+//
+// Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to
+// add a new function Hashtable<T, F>::literal_size(MyNewType lit)
+
+template <class T, MEMFLAGS F> void Hashtable<T, F>::dump_table(outputStream* st, const char *table_name) {
+ NumberSeq summary;
+ int literal_bytes = 0;
+ for (int i = 0; i < this->table_size(); ++i) {
+ int count = 0;
+ for (HashtableEntry<T, F>* e = bucket(i);
+ e != NULL; e = e->next()) {
+ count++;
+ literal_bytes += literal_size(e->literal());
+ }
+ summary.add((double)count);
+ }
+ double num_buckets = summary.num();
+ double num_entries = summary.sum();
+
+ int bucket_bytes = (int)num_buckets * sizeof(bucket(0));
+ int entry_bytes = (int)num_entries * sizeof(HashtableEntry<T, F>);
+ int total_bytes = literal_bytes + bucket_bytes + entry_bytes;
+
+ double bucket_avg = (num_buckets <= 0) ? 0 : (bucket_bytes / num_buckets);
+ double entry_avg = (num_entries <= 0) ? 0 : (entry_bytes / num_entries);
+ double literal_avg = (num_entries <= 0) ? 0 : (literal_bytes / num_entries);
+
+ st->print_cr("%s statistics:", table_name);
+ st->print_cr("Number of buckets : %9d = %9d bytes, avg %7.3f", (int)num_buckets, bucket_bytes, bucket_avg);
+ st->print_cr("Number of entries : %9d = %9d bytes, avg %7.3f", (int)num_entries, entry_bytes, entry_avg);
+ st->print_cr("Number of literals : %9d = %9d bytes, avg %7.3f", (int)num_entries, literal_bytes, literal_avg);
+ st->print_cr("Total footprint : %9s = %9d bytes", "", total_bytes);
+ st->print_cr("Average bucket size : %9.3f", summary.avg());
+ st->print_cr("Variance of bucket size : %9.3f", summary.variance());
+ st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
+ st->print_cr("Maximum bucket size : %9d", (int)summary.maximum());
+}
+
// Dump the hash table buckets.
diff --git a/src/share/vm/utilities/hashtable.hpp b/src/share/vm/utilities/hashtable.hpp
index f170e6018..826823bdc 100644
--- a/src/share/vm/utilities/hashtable.hpp
+++ b/src/share/vm/utilities/hashtable.hpp
@@ -282,6 +282,19 @@ protected:
static bool use_alternate_hashcode() { return _seed != 0; }
static jint seed() { return _seed; }
+ static int literal_size(Symbol *symbol);
+ static int literal_size(oop oop);
+
+ // The following two are currently not used, but are needed anyway because some
+ // C++ compilers (MacOS and Solaris) force the instantiation of
+ // Hashtable<ConstantPool*, mtClass>::dump_table() even though we never call this function
+ // in the VM code.
+ static int literal_size(ConstantPool *cp) {Unimplemented(); return 0;}
+ static int literal_size(Klass *k) {Unimplemented(); return 0;}
+
+public:
+ void dump_table(outputStream* st, const char *table_name);
+
private:
static jint _seed;
};
diff --git a/src/share/vm/utilities/macros.hpp b/src/share/vm/utilities/macros.hpp
index 10f5fc0d4..f2b4ec9d2 100644
--- a/src/share/vm/utilities/macros.hpp
+++ b/src/share/vm/utilities/macros.hpp
@@ -160,6 +160,10 @@
#define NOT_NMT_RETURN_(code) { return code; }
#endif // INCLUDE_NMT
+#ifndef INCLUDE_TRACE
+#define INCLUDE_TRACE 1
+#endif // INCLUDE_TRACE
+
// COMPILER1 variant
#ifdef COMPILER1
#ifdef COMPILER2
diff --git a/src/share/vm/utilities/ostream.cpp b/src/share/vm/utilities/ostream.cpp
index 1d066ddde..e4215504a 100644
--- a/src/share/vm/utilities/ostream.cpp
+++ b/src/share/vm/utilities/ostream.cpp
@@ -296,6 +296,7 @@ stringStream::stringStream(size_t initial_size) : outputStream() {
buffer = NEW_RESOURCE_ARRAY(char, buffer_length);
buffer_pos = 0;
buffer_fixed = false;
+ DEBUG_ONLY(rm = Thread::current()->current_resource_mark();)
}
// useful for output to fixed chunks of memory, such as performance counters
@@ -321,6 +322,8 @@ void stringStream::write(const char* s, size_t len) {
end = buffer_length * 2;
}
char* oldbuf = buffer;
+ assert(rm == NULL || Thread::current()->current_resource_mark() == rm,
+ "stringStream is re-allocated with a different ResourceMark");
buffer = NEW_RESOURCE_ARRAY(char, end);
strncpy(buffer, oldbuf, buffer_pos);
buffer_length = end;
@@ -339,7 +342,7 @@ void stringStream::write(const char* s, size_t len) {
}
char* stringStream::as_string() {
- char* copy = NEW_RESOURCE_ARRAY(char, buffer_pos+1);
+ char* copy = NEW_RESOURCE_ARRAY(char, buffer_pos + 1);
strncpy(copy, buffer, buffer_pos);
copy[buffer_pos] = 0; // terminating null
return copy;
@@ -352,14 +355,190 @@ outputStream* tty;
outputStream* gclog_or_tty;
extern Mutex* tty_lock;
+#define EXTRACHARLEN 32
+#define CURRENTAPPX ".current"
+#define FILENAMEBUFLEN 1024
+// convert YYYY-MM-DD HH:MM:SS to YYYY-MM-DD_HH-MM-SS
+char* get_datetime_string(char *buf, size_t len) {
+ os::local_time_string(buf, len);
+ int i = (int)strlen(buf);
+ while (i-- >= 0) {
+ if (buf[i] == ' ') buf[i] = '_';
+ else if (buf[i] == ':') buf[i] = '-';
+ }
+ return buf;
+}
+
+static const char* make_log_name_internal(const char* log_name, const char* force_directory,
+ int pid, const char* tms) {
+ const char* basename = log_name;
+ char file_sep = os::file_separator()[0];
+ const char* cp;
+ char pid_text[32];
+
+ for (cp = log_name; *cp != '\0'; cp++) {
+ if (*cp == '/' || *cp == file_sep) {
+ basename = cp + 1;
+ }
+ }
+ const char* nametail = log_name;
+ // Compute buffer length
+ size_t buffer_length;
+ if (force_directory != NULL) {
+ buffer_length = strlen(force_directory) + strlen(os::file_separator()) +
+ strlen(basename) + 1;
+ } else {
+ buffer_length = strlen(log_name) + 1;
+ }
+
+ // const char* star = strchr(basename, '*');
+ const char* pts = strstr(basename, "%p");
+ int pid_pos = (pts == NULL) ? -1 : (pts - nametail);
+
+ if (pid_pos >= 0) {
+ jio_snprintf(pid_text, sizeof(pid_text), "pid%u", pid);
+ buffer_length += strlen(pid_text);
+ }
+
+ pts = strstr(basename, "%t");
+ int tms_pos = (pts == NULL) ? -1 : (pts - nametail);
+ if (tms_pos >= 0) {
+ buffer_length += strlen(tms);
+ }
+
+ // Create big enough buffer.
+ char *buf = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal);
+
+ strcpy(buf, "");
+ if (force_directory != NULL) {
+ strcat(buf, force_directory);
+ strcat(buf, os::file_separator());
+ nametail = basename; // completely skip directory prefix
+ }
+
+ // who is first, %p or %t?
+ int first = -1, second = -1;
+ const char *p1st = NULL;
+ const char *p2nd = NULL;
+
+ if (pid_pos >= 0 && tms_pos >= 0) {
+ // contains both %p and %t
+ if (pid_pos < tms_pos) {
+ // case foo%pbar%tmonkey.log
+ first = pid_pos;
+ p1st = pid_text;
+ second = tms_pos;
+ p2nd = tms;
+ } else {
+ // case foo%tbar%pmonkey.log
+ first = tms_pos;
+ p1st = tms;
+ second = pid_pos;
+ p2nd = pid_text;
+ }
+ } else if (pid_pos >= 0) {
+ // contains %p only
+ first = pid_pos;
+ p1st = pid_text;
+ } else if (tms_pos >= 0) {
+ // contains %t only
+ first = tms_pos;
+ p1st = tms;
+ }
+
+ int buf_pos = (int)strlen(buf);
+ const char* tail = nametail;
+
+ if (first >= 0) {
+ tail = nametail + first + 2;
+ strncpy(&buf[buf_pos], nametail, first);
+ strcpy(&buf[buf_pos + first], p1st);
+ buf_pos = (int)strlen(buf);
+ if (second >= 0) {
+ strncpy(&buf[buf_pos], tail, second - first - 2);
+ strcpy(&buf[buf_pos + second - first - 2], p2nd);
+ tail = nametail + second + 2;
+ }
+ }
+ strcat(buf, tail); // append rest of name, or all of name
+ return buf;
+}
+
+// log_name comes from -XX:LogFile=log_name or -Xloggc:log_name
+// in log_name, %p => pipd1234 and
+// %t => YYYY-MM-DD_HH-MM-SS
+static const char* make_log_name(const char* log_name, const char* force_directory) {
+ char timestr[32];
+ get_datetime_string(timestr, sizeof(timestr));
+ return make_log_name_internal(log_name, force_directory, os::current_process_id(),
+ timestr);
+}
+
+#ifndef PRODUCT
+void test_loggc_filename() {
+ int pid;
+ char tms[32];
+ char i_result[FILENAMEBUFLEN];
+ const char* o_result;
+ get_datetime_string(tms, sizeof(tms));
+ pid = os::current_process_id();
+
+ // test.log
+ jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test.log", tms);
+ o_result = make_log_name_internal("test.log", NULL, pid, tms);
+ assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test.log\", NULL)");
+ FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+ // test-%t-%p.log
+ jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test-%s-pid%u.log", tms, pid);
+ o_result = make_log_name_internal("test-%t-%p.log", NULL, pid, tms);
+ assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t-%%p.log\", NULL)");
+ FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+ // test-%t%p.log
+ jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test-%spid%u.log", tms, pid);
+ o_result = make_log_name_internal("test-%t%p.log", NULL, pid, tms);
+ assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t%%p.log\", NULL)");
+ FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+ // %p%t.log
+ jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "pid%u%s.log", pid, tms);
+ o_result = make_log_name_internal("%p%t.log", NULL, pid, tms);
+ assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p%%t.log\", NULL)");
+ FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+ // %p-test.log
+ jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "pid%u-test.log", pid);
+ o_result = make_log_name_internal("%p-test.log", NULL, pid, tms);
+ assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p-test.log\", NULL)");
+ FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+ // %t.log
+ jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "%s.log", tms);
+ o_result = make_log_name_internal("%t.log", NULL, pid, tms);
+ assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%t.log\", NULL)");
+ FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+}
+#endif // PRODUCT
+
fileStream::fileStream(const char* file_name) {
_file = fopen(file_name, "w");
- _need_close = true;
+ if (_file != NULL) {
+ _need_close = true;
+ } else {
+ warning("Cannot open file %s due to %s\n", file_name, strerror(errno));
+ _need_close = false;
+ }
}
fileStream::fileStream(const char* file_name, const char* opentype) {
_file = fopen(file_name, opentype);
- _need_close = true;
+ if (_file != NULL) {
+ _need_close = true;
+ } else {
+ warning("Cannot open file %s due to %s\n", file_name, strerror(errno));
+ _need_close = false;
+ }
}
void fileStream::write(const char* s, size_t len) {
@@ -420,34 +599,51 @@ void fdStream::write(const char* s, size_t len) {
update_position(s, len);
}
-rotatingFileStream::~rotatingFileStream() {
+// dump vm version, os version, platform info, build id,
+// memory usage and command line flags into header
+void gcLogFileStream::dump_loggc_header() {
+ if (is_open()) {
+ print_cr(Abstract_VM_Version::internal_vm_info_string());
+ os::print_memory_info(this);
+ print("CommandLine flags: ");
+ CommandLineFlags::printSetFlags(this);
+ }
+}
+
+gcLogFileStream::~gcLogFileStream() {
if (_file != NULL) {
if (_need_close) fclose(_file);
- _file = NULL;
+ _file = NULL;
+ }
+ if (_file_name != NULL) {
FREE_C_HEAP_ARRAY(char, _file_name, mtInternal);
_file_name = NULL;
}
}
-rotatingFileStream::rotatingFileStream(const char* file_name) {
+gcLogFileStream::gcLogFileStream(const char* file_name) {
_cur_file_num = 0;
_bytes_written = 0L;
- _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10, mtInternal);
- jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
- _file = fopen(_file_name, "w");
- _need_close = true;
-}
+ _file_name = make_log_name(file_name, NULL);
-rotatingFileStream::rotatingFileStream(const char* file_name, const char* opentype) {
- _cur_file_num = 0;
- _bytes_written = 0L;
- _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10, mtInternal);
- jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
- _file = fopen(_file_name, opentype);
- _need_close = true;
+ // gc log file rotation
+ if (UseGCLogFileRotation && NumberOfGCLogFiles > 1) {
+ char tempbuf[FILENAMEBUFLEN];
+ jio_snprintf(tempbuf, sizeof(tempbuf), "%s.%d" CURRENTAPPX, _file_name, _cur_file_num);
+ _file = fopen(tempbuf, "w");
+ } else {
+ _file = fopen(_file_name, "w");
+ }
+ if (_file != NULL) {
+ _need_close = true;
+ dump_loggc_header();
+ } else {
+ warning("Cannot open file %s due to %s\n", _file_name, strerror(errno));
+ _need_close = false;
+ }
}
-void rotatingFileStream::write(const char* s, size_t len) {
+void gcLogFileStream::write(const char* s, size_t len) {
if (_file != NULL) {
size_t count = fwrite(s, 1, len, _file);
_bytes_written += count;
@@ -463,7 +659,12 @@ void rotatingFileStream::write(const char* s, size_t len) {
// write to gc log file at safepoint. If in future, changes made for mutator threads or
// concurrent GC threads to run parallel with VMThread at safepoint, write and rotate_log
// must be synchronized.
-void rotatingFileStream::rotate_log() {
+void gcLogFileStream::rotate_log() {
+ char time_msg[FILENAMEBUFLEN];
+ char time_str[EXTRACHARLEN];
+ char current_file_name[FILENAMEBUFLEN];
+ char renamed_file_name[FILENAMEBUFLEN];
+
if (_bytes_written < (jlong)GCLogFileSize) {
return;
}
@@ -478,27 +679,89 @@ void rotatingFileStream::rotate_log() {
// rotate in same file
rewind();
_bytes_written = 0L;
+ jio_snprintf(time_msg, sizeof(time_msg), "File %s rotated at %s\n",
+ _file_name, os::local_time_string((char *)time_str, sizeof(time_str)));
+ write(time_msg, strlen(time_msg));
+ dump_loggc_header();
return;
}
- // rotate file in names file.0, file.1, file.2, ..., file.<MaxGCLogFileNumbers-1>
- // close current file, rotate to next file
+#if defined(_WINDOWS)
+#ifndef F_OK
+#define F_OK 0
+#endif
+#endif // _WINDOWS
+
+ // rotate file in names extended_filename.0, extended_filename.1, ...,
+ // extended_filename.<NumberOfGCLogFiles - 1>. Current rotation file name will
+ // have a form of extended_filename.<i>.current where i is the current rotation
+ // file number. After it reaches max file size, the file will be saved and renamed
+ // with .current removed from its tail.
+ size_t filename_len = strlen(_file_name);
if (_file != NULL) {
- _cur_file_num ++;
- if (_cur_file_num >= NumberOfGCLogFiles) _cur_file_num = 0;
- jio_snprintf(_file_name, strlen(Arguments::gc_log_filename()) + 10, "%s.%d",
- Arguments::gc_log_filename(), _cur_file_num);
+ jio_snprintf(renamed_file_name, filename_len + EXTRACHARLEN, "%s.%d",
+ _file_name, _cur_file_num);
+ jio_snprintf(current_file_name, filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX,
+ _file_name, _cur_file_num);
+ jio_snprintf(time_msg, sizeof(time_msg), "%s GC log file has reached the"
+ " maximum size. Saved as %s\n",
+ os::local_time_string((char *)time_str, sizeof(time_str)),
+ renamed_file_name);
+ write(time_msg, strlen(time_msg));
+
fclose(_file);
_file = NULL;
+
+ bool can_rename = true;
+ if (access(current_file_name, F_OK) != 0) {
+ // current file does not exist?
+ warning("No source file exists, cannot rename\n");
+ can_rename = false;
+ }
+ if (can_rename) {
+ if (access(renamed_file_name, F_OK) == 0) {
+ if (remove(renamed_file_name) != 0) {
+ warning("Could not delete existing file %s\n", renamed_file_name);
+ can_rename = false;
+ }
+ } else {
+ // file does not exist, ok to rename
+ }
+ }
+ if (can_rename && rename(current_file_name, renamed_file_name) != 0) {
+ warning("Could not rename %s to %s\n", _file_name, renamed_file_name);
+ }
}
- _file = fopen(_file_name, "w");
+
+ _cur_file_num++;
+ if (_cur_file_num > NumberOfGCLogFiles - 1) _cur_file_num = 0;
+ jio_snprintf(current_file_name, filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX,
+ _file_name, _cur_file_num);
+ _file = fopen(current_file_name, "w");
+
if (_file != NULL) {
_bytes_written = 0L;
_need_close = true;
+ // reuse current_file_name for time_msg
+ jio_snprintf(current_file_name, filename_len + EXTRACHARLEN,
+ "%s.%d", _file_name, _cur_file_num);
+ jio_snprintf(time_msg, sizeof(time_msg), "%s GC log file created %s\n",
+ os::local_time_string((char *)time_str, sizeof(time_str)),
+ current_file_name);
+ write(time_msg, strlen(time_msg));
+ dump_loggc_header();
+ // remove the existing file
+ if (access(current_file_name, F_OK) == 0) {
+ if (remove(current_file_name) != 0) {
+ warning("Could not delete existing file %s\n", current_file_name);
+ }
+ }
} else {
- tty->print_cr("failed to open rotation log file %s due to %s\n",
+ warning("failed to open rotation log file %s due to %s\n"
+ "Turned off GC log file rotation\n",
_file_name, strerror(errno));
_need_close = false;
+ FLAG_SET_DEFAULT(UseGCLogFileRotation, false);
}
}
@@ -527,69 +790,9 @@ bool defaultStream::has_log_file() {
return _log_file != NULL;
}
-static const char* make_log_name(const char* log_name, const char* force_directory) {
- const char* basename = log_name;
- char file_sep = os::file_separator()[0];
- const char* cp;
- for (cp = log_name; *cp != '\0'; cp++) {
- if (*cp == '/' || *cp == file_sep) {
- basename = cp+1;
- }
- }
- const char* nametail = log_name;
-
- // Compute buffer length
- size_t buffer_length;
- if (force_directory != NULL) {
- buffer_length = strlen(force_directory) + strlen(os::file_separator()) +
- strlen(basename) + 1;
- } else {
- buffer_length = strlen(log_name) + 1;
- }
-
- const char* star = strchr(basename, '*');
- int star_pos = (star == NULL) ? -1 : (star - nametail);
- int skip = 1;
- if (star == NULL) {
- // Try %p
- star = strstr(basename, "%p");
- if (star != NULL) {
- skip = 2;
- }
- }
- star_pos = (star == NULL) ? -1 : (star - nametail);
-
- char pid[32];
- if (star_pos >= 0) {
- jio_snprintf(pid, sizeof(pid), "%u", os::current_process_id());
- buffer_length += strlen(pid);
- }
-
- // Create big enough buffer.
- char *buf = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal);
-
- strcpy(buf, "");
- if (force_directory != NULL) {
- strcat(buf, force_directory);
- strcat(buf, os::file_separator());
- nametail = basename; // completely skip directory prefix
- }
-
- if (star_pos >= 0) {
- // convert foo*bar.log or foo%pbar.log to foo123bar.log
- int buf_pos = (int) strlen(buf);
- strncpy(&buf[buf_pos], nametail, star_pos);
- strcpy(&buf[buf_pos + star_pos], pid);
- nametail += star_pos + skip; // skip prefix and pid format
- }
-
- strcat(buf, nametail); // append rest of name, or all of name
- return buf;
-}
-
void defaultStream::init_log() {
// %%% Need a MutexLocker?
- const char* log_name = LogFile != NULL ? LogFile : "hotspot.log";
+ const char* log_name = LogFile != NULL ? LogFile : "hotspot_pid%p.log";
const char* try_name = make_log_name(log_name, NULL);
fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
if (!file->is_open()) {
@@ -600,14 +803,15 @@ void defaultStream::init_log() {
// Note: This feature is for maintainer use only. No need for L10N.
jio_print(warnbuf);
FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
- try_name = make_log_name("hs_pid%p.log", os::get_temp_directory());
+ try_name = make_log_name(log_name, os::get_temp_directory());
jio_snprintf(warnbuf, sizeof(warnbuf),
"Warning: Forcing option -XX:LogFile=%s\n", try_name);
jio_print(warnbuf);
delete file;
file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
- FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
}
+ FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
+
if (file->is_open()) {
_log_file = file;
xmlStream* xs = new(ResourceObj::C_HEAP, mtInternal) xmlStream(file);
@@ -874,11 +1078,8 @@ void ostream_init_log() {
gclog_or_tty = tty; // default to tty
if (Arguments::gc_log_filename() != NULL) {
- fileStream * gclog = UseGCLogFileRotation ?
- new(ResourceObj::C_HEAP, mtInternal)
- rotatingFileStream(Arguments::gc_log_filename()) :
- new(ResourceObj::C_HEAP, mtInternal)
- fileStream(Arguments::gc_log_filename());
+ fileStream * gclog = new(ResourceObj::C_HEAP, mtInternal)
+ gcLogFileStream(Arguments::gc_log_filename());
if (gclog->is_open()) {
// now we update the time stamp of the GC log to be synced up
// with tty.
diff --git a/src/share/vm/utilities/ostream.hpp b/src/share/vm/utilities/ostream.hpp
index 6b154184b..9b1b1217b 100644
--- a/src/share/vm/utilities/ostream.hpp
+++ b/src/share/vm/utilities/ostream.hpp
@@ -28,6 +28,8 @@
#include "memory/allocation.hpp"
#include "runtime/timer.hpp"
+DEBUG_ONLY(class ResourceMark;)
+
// Output streams for printing
//
// Printing guidelines:
@@ -177,6 +179,7 @@ class stringStream : public outputStream {
size_t buffer_pos;
size_t buffer_length;
bool buffer_fixed;
+ DEBUG_ONLY(ResourceMark* rm;)
public:
stringStream(size_t initial_bufsize = 256);
stringStream(char* fixed_buffer, size_t fixed_buffer_size);
@@ -228,20 +231,24 @@ class fdStream : public outputStream {
void flush() {};
};
-class rotatingFileStream : public fileStream {
+class gcLogFileStream : public fileStream {
protected:
- char* _file_name;
+ const char* _file_name;
jlong _bytes_written;
- uintx _cur_file_num; // current logfile rotation number, from 0 to MaxGCLogFileNumbers-1
+ uintx _cur_file_num; // current logfile rotation number, from 0 to NumberOfGCLogFiles-1
public:
- rotatingFileStream(const char* file_name);
- rotatingFileStream(const char* file_name, const char* opentype);
- rotatingFileStream(FILE* file) : fileStream(file) {}
- ~rotatingFileStream();
+ gcLogFileStream(const char* file_name);
+ ~gcLogFileStream();
virtual void write(const char* c, size_t len);
virtual void rotate_log();
+ void dump_loggc_header();
};
+#ifndef PRODUCT
+// unit test for checking -Xloggc:<filename> parsing result
+void test_loggc_filename();
+#endif
+
void ostream_init();
void ostream_init_log();
void ostream_exit();
diff --git a/src/share/vm/utilities/quickSort.cpp b/src/share/vm/utilities/quickSort.cpp
index e3cfa1efa..0cb7f6ef8 100644
--- a/src/share/vm/utilities/quickSort.cpp
+++ b/src/share/vm/utilities/quickSort.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,8 +30,11 @@
#include "runtime/os.hpp"
#include "utilities/quickSort.hpp"
+#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
#include <stdlib.h>
+#ifdef ASSERT
static int test_comparator(int a, int b) {
if (a == b) {
return 0;
@@ -41,6 +44,7 @@ static int test_comparator(int a, int b) {
}
return 1;
}
+#endif // ASSERT
static int test_even_odd_comparator(int a, int b) {
bool a_is_odd = (a % 2) == 1;
@@ -187,8 +191,8 @@ void QuickSort::test_quick_sort() {
// test sorting random arrays
for (int i = 0; i < 1000; i++) {
int length = os::random() % 100;
- int* test_array = new int[length];
- int* expected_array = new int[length];
+ int* test_array = NEW_C_HEAP_ARRAY(int, length, mtInternal);
+ int* expected_array = NEW_C_HEAP_ARRAY(int, length, mtInternal);
for (int j = 0; j < length; j++) {
// Choose random values, but get a chance of getting duplicates
test_array[j] = os::random() % (length * 2);
@@ -210,8 +214,8 @@ void QuickSort::test_quick_sort() {
sort(test_array, length, test_even_odd_comparator, true);
assert(compare_arrays(test_array, expected_array, length), "Sorting already sorted array changed order of elements - not idempotent");
- delete[] test_array;
- delete[] expected_array;
+ FREE_C_HEAP_ARRAY(int, test_array, mtInternal);
+ FREE_C_HEAP_ARRAY(int, expected_array, mtInternal);
}
}
diff --git a/src/share/vm/utilities/taskqueue.hpp b/src/share/vm/utilities/taskqueue.hpp
index 0dcc7c262..8b558c988 100644
--- a/src/share/vm/utilities/taskqueue.hpp
+++ b/src/share/vm/utilities/taskqueue.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -135,6 +135,8 @@ void TaskQueueStats::reset() {
}
#endif // TASKQUEUE_STATS
+// TaskQueueSuper collects functionality common to all GenericTaskQueue instances.
+
template <unsigned int N, MEMFLAGS F>
class TaskQueueSuper: public CHeapObj<F> {
protected:
@@ -252,7 +254,36 @@ public:
TASKQUEUE_STATS_ONLY(TaskQueueStats stats;)
};
-
+//
+// GenericTaskQueue implements an ABP, Aurora-Blumofe-Plaxton, double-
+// ended-queue (deque), intended for use in work stealing. Queue operations
+// are non-blocking.
+//
+// A queue owner thread performs push() and pop_local() operations on one end
+// of the queue, while other threads may steal work using the pop_global()
+// method.
+//
+// The main difference to the original algorithm is that this
+// implementation allows wrap-around at the end of its allocated
+// storage, which is an array.
+//
+// The original paper is:
+//
+// Arora, N. S., Blumofe, R. D., and Plaxton, C. G.
+// Thread scheduling for multiprogrammed multiprocessors.
+// Theory of Computing Systems 34, 2 (2001), 115-144.
+//
+// The following paper provides an correctness proof and an
+// implementation for weakly ordered memory models including (pseudo-)
+// code containing memory barriers for a Chase-Lev deque. Chase-Lev is
+// similar to ABP, with the main difference that it allows resizing of the
+// underlying storage:
+//
+// Le, N. M., Pop, A., Cohen A., and Nardell, F. Z.
+// Correct and efficient work-stealing for weak memory models
+// Proceedings of the 18th ACM SIGPLAN symposium on Principles and
+// practice of parallel programming (PPoPP 2013), 69-80
+//
template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
class GenericTaskQueue: public TaskQueueSuper<N, F> {
@@ -343,8 +374,12 @@ bool GenericTaskQueue<E, F, N>::push_slow(E t, uint dirty_n_elems) {
if (dirty_n_elems == N - 1) {
// Actually means 0, so do the push.
uint localBot = _bottom;
- // g++ complains if the volatile result of the assignment is unused.
- const_cast<E&>(_elems[localBot] = t);
+ // g++ complains if the volatile result of the assignment is
+ // unused, so we cast the volatile away. We cannot cast directly
+ // to void, because gcc treats that as not using the result of the
+ // assignment. However, casting to E& means that we trigger an
+ // unused-value warning. So, we cast the E& to void.
+ (void)const_cast<E&>(_elems[localBot] = t);
OrderAccess::release_store(&_bottom, increment_index(localBot));
TASKQUEUE_STATS_ONLY(stats.record_push());
return true;
@@ -394,13 +429,24 @@ bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) {
template<class E, MEMFLAGS F, unsigned int N>
bool GenericTaskQueue<E, F, N>::pop_global(E& t) {
Age oldAge = _age.get();
- uint localBot = _bottom;
+ // Architectures with weak memory model require a barrier here
+ // to guarantee that bottom is not older than age,
+ // which is crucial for the correctness of the algorithm.
+#if !(defined SPARC || defined IA32 || defined AMD64)
+ OrderAccess::fence();
+#endif
+ uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom);
uint n_elems = size(localBot, oldAge.top());
if (n_elems == 0) {
return false;
}
- const_cast<E&>(t = _elems[oldAge.top()]);
+ // g++ complains if the volatile result of the assignment is
+ // unused, so we cast the volatile away. We cannot cast directly
+ // to void, because gcc treats that as not using the result of the
+ // assignment. However, casting to E& means that we trigger an
+ // unused-value warning. So, we cast the E& to void.
+ (void) const_cast<E&>(t = _elems[oldAge.top()]);
Age newAge(oldAge);
newAge.increment();
Age resAge = _age.cmpxchg(newAge, oldAge);
@@ -638,13 +684,17 @@ public:
template<class E, MEMFLAGS F, unsigned int N> inline bool
GenericTaskQueue<E, F, N>::push(E t) {
uint localBot = _bottom;
- assert((localBot >= 0) && (localBot < N), "_bottom out of range.");
+ assert(localBot < N, "_bottom out of range.");
idx_t top = _age.top();
uint dirty_n_elems = dirty_size(localBot, top);
assert(dirty_n_elems < N, "n_elems out of range.");
if (dirty_n_elems < max_elems()) {
- // g++ complains if the volatile result of the assignment is unused.
- const_cast<E&>(_elems[localBot] = t);
+ // g++ complains if the volatile result of the assignment is
+ // unused, so we cast the volatile away. We cannot cast directly
+ // to void, because gcc treats that as not using the result of the
+ // assignment. However, casting to E& means that we trigger an
+ // unused-value warning. So, we cast the E& to void.
+ (void) const_cast<E&>(_elems[localBot] = t);
OrderAccess::release_store(&_bottom, increment_index(localBot));
TASKQUEUE_STATS_ONLY(stats.record_push());
return true;
@@ -668,7 +718,12 @@ GenericTaskQueue<E, F, N>::pop_local(E& t) {
// This is necessary to prevent any read below from being reordered
// before the store just above.
OrderAccess::fence();
- const_cast<E&>(t = _elems[localBot]);
+ // g++ complains if the volatile result of the assignment is
+ // unused, so we cast the volatile away. We cannot cast directly
+ // to void, because gcc treats that as not using the result of the
+ // assignment. However, casting to E& means that we trigger an
+ // unused-value warning. So, we cast the E& to void.
+ (void) const_cast<E&>(t = _elems[localBot]);
// This is a second read of "age"; the "size()" above is the first.
// If there's still at least one element in the queue, based on the
// "_bottom" and "age" we've read, then there can be no interference with
diff --git a/src/share/vm/utilities/vmError.cpp b/src/share/vm/utilities/vmError.cpp
index f7b940b52..79769aeb3 100644
--- a/src/share/vm/utilities/vmError.cpp
+++ b/src/share/vm/utilities/vmError.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -574,6 +574,10 @@ void VMError::report(outputStream* st) {
STEP(120, "(printing native stack)" )
if (_verbose) {
+ if (os::platform_print_native_stack(st, _context, buf, sizeof(buf))) {
+ // We have printed the native stack in platform-specific code
+ // Windows/x64 needs special handling.
+ } else {
frame fr = _context ? os::fetch_frame_from_context(_context)
: os::current_frame();
@@ -586,6 +590,13 @@ void VMError::report(outputStream* st) {
while (count++ < StackPrintLimit) {
fr.print_on_error(st, buf, sizeof(buf));
st->cr();
+ // Compiled code may use EBP register on x86 so it looks like
+ // non-walkable C frame. Use frame.sender() for java frames.
+ if (_thread && _thread->is_Java_thread() && fr.is_java_frame()) {
+ RegisterMap map((JavaThread*)_thread, false); // No update
+ fr = fr.sender(&map);
+ continue;
+ }
if (os::is_first_C_frame(&fr)) break;
fr = os::get_sender_for_C_frame(&fr);
}
@@ -597,6 +608,7 @@ void VMError::report(outputStream* st) {
st->cr();
}
}
+ }
STEP(130, "(printing Java stack)" )
@@ -799,6 +811,14 @@ void VMError::report(outputStream* st) {
VMError* volatile VMError::first_error = NULL;
volatile jlong VMError::first_error_tid = -1;
+// An error could happen before tty is initialized or after it has been
+// destroyed. Here we use a very simple unbuffered fdStream for printing.
+// Only out.print_raw() and out.print_raw_cr() should be used, as other
+// printing methods need to allocate large buffer on stack. To format a
+// string, use jio_snprintf() with a static buffer or use staticBufferStream.
+fdStream VMError::out(defaultStream::output_fd());
+fdStream VMError::log; // error log used by VMError::report_and_die()
+
/** Expand a pattern into a buffer starting at pos and open a file using constructed path */
static int expand_and_open(const char* pattern, char* buf, size_t buflen, size_t pos) {
int fd = -1;
@@ -853,13 +873,6 @@ void VMError::report_and_die() {
// Don't allocate large buffer on stack
static char buffer[O_BUFLEN];
- // An error could happen before tty is initialized or after it has been
- // destroyed. Here we use a very simple unbuffered fdStream for printing.
- // Only out.print_raw() and out.print_raw_cr() should be used, as other
- // printing methods need to allocate large buffer on stack. To format a
- // string, use jio_snprintf() with a static buffer or use staticBufferStream.
- static fdStream out(defaultStream::output_fd());
-
// How many errors occurred in error handler when reporting first_error.
static int recursive_error_count;
@@ -868,7 +881,6 @@ void VMError::report_and_die() {
static bool out_done = false; // done printing to standard out
static bool log_done = false; // done saving error log
static bool transmit_report_done = false; // done error reporting
- static fdStream log; // error log
// disble NMT to avoid further exception
MemTracker::shutdown(MemTracker::NMT_error_reporting);
@@ -908,10 +920,11 @@ void VMError::report_and_die() {
// This is not the first error, see if it happened in a different thread
// or in the same thread during error reporting.
if (first_error_tid != mytid) {
- jio_snprintf(buffer, sizeof(buffer),
+ char msgbuf[64];
+ jio_snprintf(msgbuf, sizeof(msgbuf),
"[thread " INT64_FORMAT " also had an error]",
mytid);
- out.print_raw_cr(buffer);
+ out.print_raw_cr(msgbuf);
// error reporting is not MT-safe, block current thread
os::infinite_sleep();
diff --git a/src/share/vm/utilities/vmError.hpp b/src/share/vm/utilities/vmError.hpp
index f298c1edb..299cfaa6f 100644
--- a/src/share/vm/utilities/vmError.hpp
+++ b/src/share/vm/utilities/vmError.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -96,6 +96,9 @@ class VMError : public StackObj {
return (id != OOM_MALLOC_ERROR) && (id != OOM_MMAP_ERROR);
}
+ static fdStream out;
+ static fdStream log; // error log used by VMError::report_and_die()
+
public:
// Constructor for crashes
@@ -133,6 +136,10 @@ public:
// check to see if fatal error reporting is in progress
static bool fatal_error_in_progress() { return first_error != NULL; }
+
+ static jlong get_first_error_tid() {
+ return first_error_tid;
+ }
};
#endif // SHARE_VM_UTILITIES_VMERROR_HPP
diff --git a/src/share/vm/utilities/workgroup.cpp b/src/share/vm/utilities/workgroup.cpp
index 5db6344fd..479cd0402 100644
--- a/src/share/vm/utilities/workgroup.cpp
+++ b/src/share/vm/utilities/workgroup.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -529,7 +529,7 @@ bool FreeIdSet::_safepoint;
FreeIdSet::FreeIdSet(int sz, Monitor* mon) :
_sz(sz), _mon(mon), _hd(0), _waiters(0), _index(-1), _claimed(0)
{
- _ids = new int[sz];
+ _ids = NEW_C_HEAP_ARRAY(int, sz, mtInternal);
for (int i = 0; i < sz; i++) _ids[i] = i+1;
_ids[sz-1] = end_of_list; // end of list.
if (_stat_init) {
@@ -549,6 +549,7 @@ FreeIdSet::FreeIdSet(int sz, Monitor* mon) :
FreeIdSet::~FreeIdSet() {
_sets[_index] = NULL;
+ FREE_C_HEAP_ARRAY(int, _ids, mtInternal);
}
void FreeIdSet::set_safepoint(bool b) {
diff --git a/src/share/vm/utilities/workgroup.hpp b/src/share/vm/utilities/workgroup.hpp
index 6a9353624..e1184a679 100644
--- a/src/share/vm/utilities/workgroup.hpp
+++ b/src/share/vm/utilities/workgroup.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -494,7 +494,7 @@ public:
};
// Represents a set of free small integer ids.
-class FreeIdSet {
+class FreeIdSet : public CHeapObj<mtInternal> {
enum {
end_of_list = -1,
claimed = -2
diff --git a/src/share/vm/utilities/yieldingWorkgroup.hpp b/src/share/vm/utilities/yieldingWorkgroup.hpp
index 5a626ce7f..98d8f438e 100644
--- a/src/share/vm/utilities/yieldingWorkgroup.hpp
+++ b/src/share/vm/utilities/yieldingWorkgroup.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,10 +26,7 @@
#define SHARE_VM_UTILITIES_YIELDINGWORKGROUP_HPP
#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
#include "utilities/workgroup.hpp"
-#endif // INCLUDE_ALL_GCS
-
// Forward declarations
class YieldingFlexibleWorkGang;