aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authoramurillo <none@none>2014-04-17 16:09:07 -0700
committeramurillo <none@none>2014-04-17 16:09:07 -0700
commitf71bd74e715de906f32ae14b8c6b3b80cc513a57 (patch)
treef65bce69ec8d2f3e3318562f1f289fd34875f973 /src
parent00662865fda0cfb2e7e127448fda77343a516187 (diff)
parentbcd90a3bce40982b196a908019d444fb28852d7b (diff)
Diffstat (limited to 'src')
-rw-r--r--src/cpu/sparc/vm/assembler_sparc.hpp10
-rw-r--r--src/cpu/sparc/vm/assembler_sparc.inline.hpp28
-rw-r--r--src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp4
-rw-r--r--src/cpu/sparc/vm/sparc.ad79
-rw-r--r--src/share/vm/adlc/output_h.cpp23
-rw-r--r--src/share/vm/classfile/classFileParser.cpp22
-rw-r--r--src/share/vm/classfile/defaultMethods.cpp30
-rw-r--r--src/share/vm/classfile/javaClasses.cpp67
-rw-r--r--src/share/vm/classfile/javaClasses.hpp3
-rw-r--r--src/share/vm/classfile/systemDictionary.cpp54
-rw-r--r--src/share/vm/classfile/systemDictionary.hpp1
-rw-r--r--src/share/vm/classfile/verificationType.hpp4
-rw-r--r--src/share/vm/classfile/verifier.cpp7
-rw-r--r--src/share/vm/code/debugInfo.hpp6
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp2
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp2
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp2
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp2
-rw-r--r--src/share/vm/gc_implementation/shared/gcHeapSummary.hpp10
-rw-r--r--src/share/vm/gc_implementation/shared/gcTraceSend.cpp2
-rw-r--r--src/share/vm/gc_implementation/shared/vmGCOperations.cpp9
-rw-r--r--src/share/vm/gc_interface/collectedHeap.cpp44
-rw-r--r--src/share/vm/gc_interface/collectedHeap.hpp7
-rw-r--r--src/share/vm/gc_interface/collectedHeap.inline.hpp18
-rw-r--r--src/share/vm/interpreter/linkResolver.cpp17
-rw-r--r--src/share/vm/memory/gcLocker.cpp32
-rw-r--r--src/share/vm/memory/gcLocker.hpp14
-rw-r--r--src/share/vm/memory/gcLocker.inline.hpp16
-rw-r--r--src/share/vm/memory/genCollectedHeap.cpp2
-rw-r--r--src/share/vm/memory/metaspace.cpp72
-rw-r--r--src/share/vm/memory/metaspace.hpp42
-rw-r--r--src/share/vm/memory/metaspaceCounters.cpp4
-rw-r--r--src/share/vm/memory/metaspaceShared.cpp3
-rw-r--r--src/share/vm/memory/universe.cpp3
-rw-r--r--src/share/vm/oops/arrayKlass.cpp4
-rw-r--r--src/share/vm/oops/arrayKlass.hpp2
-rw-r--r--src/share/vm/oops/constantPool.cpp4
-rw-r--r--src/share/vm/oops/instanceKlass.cpp62
-rw-r--r--src/share/vm/oops/instanceKlass.hpp14
-rw-r--r--src/share/vm/oops/instanceMirrorKlass.cpp9
-rw-r--r--src/share/vm/oops/klass.cpp35
-rw-r--r--src/share/vm/oops/klass.hpp6
-rw-r--r--src/share/vm/oops/klassVtable.cpp8
-rw-r--r--src/share/vm/oops/metadata.hpp1
-rw-r--r--src/share/vm/oops/method.cpp13
-rw-r--r--src/share/vm/oops/method.hpp2
-rw-r--r--src/share/vm/opto/loopTransform.cpp1
-rw-r--r--src/share/vm/opto/loopnode.cpp20
-rw-r--r--src/share/vm/opto/loopopts.cpp1
-rw-r--r--src/share/vm/opto/machnode.hpp9
-rw-r--r--src/share/vm/opto/node.hpp21
-rw-r--r--src/share/vm/opto/output.cpp16
-rw-r--r--src/share/vm/prims/jvm.cpp3
-rw-r--r--src/share/vm/prims/nativeLookup.cpp4
-rw-r--r--src/share/vm/prims/whitebox.cpp26
-rw-r--r--src/share/vm/runtime/sharedRuntime.cpp7
-rw-r--r--src/share/vm/runtime/sharedRuntime.hpp4
-rw-r--r--src/share/vm/runtime/thread.cpp1
-rw-r--r--src/share/vm/runtime/thread.hpp4
-rw-r--r--src/share/vm/runtime/unhandledOops.cpp4
-rw-r--r--src/share/vm/runtime/vm_operations.hpp1
-rw-r--r--src/share/vm/services/memoryPool.cpp4
-rw-r--r--src/share/vm/trace/trace.xml2
63 files changed, 487 insertions, 442 deletions
diff --git a/src/cpu/sparc/vm/assembler_sparc.hpp b/src/cpu/sparc/vm/assembler_sparc.hpp
index 11547cde9..ffbc6f27c 100644
--- a/src/cpu/sparc/vm/assembler_sparc.hpp
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp
@@ -630,11 +630,20 @@ class Assembler : public AbstractAssembler {
}
protected:
+ // Insert a nop if the previous is cbcond
+ void insert_nop_after_cbcond() {
+ if (UseCBCond && cbcond_before()) {
+ nop();
+ }
+ }
// Delay slot helpers
// cti is called when emitting control-transfer instruction,
// BEFORE doing the emitting.
// Only effective when assertion-checking is enabled.
void cti() {
+ // A cbcond instruction immediately followed by a CTI
+ // instruction introduces pipeline stalls, we need to avoid that.
+ no_cbcond_before();
#ifdef CHECK_DELAY
assert_not_delayed("cti should not be in delay slot");
#endif
@@ -658,7 +667,6 @@ class Assembler : public AbstractAssembler {
void no_cbcond_before() {
assert(offset() == 0 || !cbcond_before(), "cbcond should not follow an other cbcond");
}
-
public:
bool use_cbcond(Label& L) {
diff --git a/src/cpu/sparc/vm/assembler_sparc.inline.hpp b/src/cpu/sparc/vm/assembler_sparc.inline.hpp
index 2e5ba2101..2bbf95e3b 100644
--- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp
+++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp
@@ -54,33 +54,33 @@ inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
inline void Assembler::add(Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::add(Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
-inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); }
+inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
+inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { insert_nop_after_cbcond(); bpr( c, a, p, s1, target(L)); }
-inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
-inline void Assembler::fb( Condition c, bool a, Label& L ) { fb(c, a, target(L)); }
+inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
+inline void Assembler::fb( Condition c, bool a, Label& L ) { insert_nop_after_cbcond(); fb(c, a, target(L)); }
-inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
-inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
+inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
+inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { insert_nop_after_cbcond(); fbp(c, a, cc, p, target(L)); }
-inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
-inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
+inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
+inline void Assembler::br( Condition c, bool a, Label& L ) { insert_nop_after_cbcond(); br(c, a, target(L)); }
-inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
-inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { bp(c, a, cc, p, target(L)); }
+inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
+inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { insert_nop_after_cbcond(); bp(c, a, cc, p, target(L)); }
// compare and branch
inline void Assembler::cbcond(Condition c, CC cc, Register s1, Register s2, Label& L) { cti(); no_cbcond_before(); emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | rs2(s2)); }
inline void Assembler::cbcond(Condition c, CC cc, Register s1, int simm5, Label& L) { cti(); no_cbcond_before(); emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | immed(true) | simm(simm5, 5)); }
-inline void Assembler::call( address d, relocInfo::relocType rt ) { cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
-inline void Assembler::call( Label& L, relocInfo::relocType rt ) { call( target(L), rt); }
+inline void Assembler::call( address d, relocInfo::relocType rt ) { insert_nop_after_cbcond(); cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
+inline void Assembler::call( Label& L, relocInfo::relocType rt ) { insert_nop_after_cbcond(); call( target(L), rt); }
inline void Assembler::flush( Register s1, Register s2) { emit_int32( op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2)); }
inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::jmpl( Register s1, Register s2, Register d ) { cti(); emit_int32( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
-inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
+inline void Assembler::jmpl( Register s1, Register s2, Register d ) { insert_nop_after_cbcond(); cti(); emit_int32( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
+inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { insert_nop_after_cbcond(); cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
diff --git a/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp b/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp
index 125f9a724..7e3804fd3 100644
--- a/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp
@@ -233,6 +233,7 @@ inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, reloc
}
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
+ insert_nop_after_cbcond();
br(c, a, p, target(L));
}
@@ -248,6 +249,7 @@ inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relo
}
inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
+ insert_nop_after_cbcond();
brx(c, a, p, target(L));
}
@@ -269,6 +271,7 @@ inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, reloc
}
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
+ insert_nop_after_cbcond();
fb(c, a, p, target(L));
}
@@ -318,6 +321,7 @@ inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
}
inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
+ insert_nop_after_cbcond();
MacroAssembler::call( target(L), rt);
}
diff --git a/src/cpu/sparc/vm/sparc.ad b/src/cpu/sparc/vm/sparc.ad
index 4a3a33dfd..857ed69b8 100644
--- a/src/cpu/sparc/vm/sparc.ad
+++ b/src/cpu/sparc/vm/sparc.ad
@@ -1268,7 +1268,7 @@ int MachPrologNode::reloc() const {
void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
Compile* C = ra_->C;
- if( do_polling() && ra_->C->is_method_compilation() ) {
+ if(do_polling() && ra_->C->is_method_compilation()) {
st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
#ifdef _LP64
st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
@@ -1277,8 +1277,12 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
#endif
}
- if( do_polling() )
+ if(do_polling()) {
+ if (UseCBCond && !ra_->C->is_method_compilation()) {
+ st->print("NOP\n\t");
+ }
st->print("RET\n\t");
+ }
st->print("RESTORE");
}
@@ -1291,15 +1295,20 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
__ verify_thread();
// If this does safepoint polling, then do it here
- if( do_polling() && ra_->C->is_method_compilation() ) {
+ if(do_polling() && ra_->C->is_method_compilation()) {
AddressLiteral polling_page(os::get_polling_page());
__ sethi(polling_page, L0);
__ relocate(relocInfo::poll_return_type);
- __ ld_ptr( L0, 0, G0 );
+ __ ld_ptr(L0, 0, G0);
}
// If this is a return, then stuff the restore in the delay slot
- if( do_polling() ) {
+ if(do_polling()) {
+ if (UseCBCond && !ra_->C->is_method_compilation()) {
+ // Insert extra padding for the case when the epilogue is preceded by
+ // a cbcond jump, which can't be followed by a CTI instruction
+ __ nop();
+ }
__ ret();
__ delayed()->restore();
} else {
@@ -3330,7 +3339,18 @@ op_attrib op_cost(1); // Required cost attribute
//----------Instruction Attributes---------------------------------------------
ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
ins_attrib ins_size(32); // Required size attribute (in bits)
-ins_attrib ins_avoid_back_to_back(0); // instruction should not be generated back to back
+
+// avoid_back_to_back attribute is an expression that must return
+// one of the following values defined in MachNode:
+// AVOID_NONE - instruction can be placed anywhere
+// AVOID_BEFORE - instruction cannot be placed after an
+// instruction with MachNode::AVOID_AFTER
+// AVOID_AFTER - the next instruction cannot be the one
+// with MachNode::AVOID_BEFORE
+// AVOID_BEFORE_AND_AFTER - BEFORE and AFTER attributes at
+// the same time
+ins_attrib ins_avoid_back_to_back(MachNode::AVOID_NONE);
+
ins_attrib ins_short_branch(0); // Required flag: is this instruction a
// non-matching short branch variant of some
// long branch?
@@ -6630,6 +6650,7 @@ instruct encodeHeapOop(iRegN dst, iRegP src) %{
ins_encode %{
__ encode_heap_oop($src$$Register, $dst$$Register);
%}
+ ins_avoid_back_to_back(Universe::narrow_oop_base() == NULL ? AVOID_NONE : AVOID_BEFORE);
ins_pipe(ialu_reg);
%}
@@ -9199,6 +9220,7 @@ instruct branch(label labl) %{
__ ba(*L);
__ delayed()->nop();
%}
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br);
%}
@@ -9217,7 +9239,7 @@ instruct branch_short(label labl) %{
__ ba_short(*L);
%}
ins_short_branch(1);
- ins_avoid_back_to_back(1);
+ ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_imm);
%}
@@ -9231,6 +9253,7 @@ instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{
format %{ "BP$cmp $icc,$labl" %}
// Prim = bits 24-22, Secnd = bits 31-30
ins_encode( enc_bp( labl, cmp, icc ) );
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_cc);
%}
@@ -9242,6 +9265,7 @@ instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{
format %{ "BP$cmp $icc,$labl" %}
// Prim = bits 24-22, Secnd = bits 31-30
ins_encode( enc_bp( labl, cmp, icc ) );
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_cc);
%}
@@ -9260,6 +9284,7 @@ instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{
__ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
__ delayed()->nop();
%}
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_cc);
%}
@@ -9278,6 +9303,7 @@ instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{
__ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L);
__ delayed()->nop();
%}
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_fcc);
%}
@@ -9290,6 +9316,7 @@ instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{
format %{ "BP$cmp $icc,$labl\t! Loop end" %}
// Prim = bits 24-22, Secnd = bits 31-30
ins_encode( enc_bp( labl, cmp, icc ) );
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_cc);
%}
@@ -9302,6 +9329,7 @@ instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{
format %{ "BP$cmp $icc,$labl\t! Loop end" %}
// Prim = bits 24-22, Secnd = bits 31-30
ins_encode( enc_bp( labl, cmp, icc ) );
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_cc);
%}
@@ -9552,7 +9580,7 @@ instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flag
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
%}
ins_short_branch(1);
- ins_avoid_back_to_back(1);
+ ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@@ -9570,7 +9598,7 @@ instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flag
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
%}
ins_short_branch(1);
- ins_avoid_back_to_back(1);
+ ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_imm);
%}
@@ -9588,7 +9616,7 @@ instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, fla
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
%}
ins_short_branch(1);
- ins_avoid_back_to_back(1);
+ ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@@ -9606,7 +9634,7 @@ instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, fla
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
%}
ins_short_branch(1);
- ins_avoid_back_to_back(1);
+ ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_imm);
%}
@@ -9624,7 +9652,7 @@ instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flag
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L);
%}
ins_short_branch(1);
- ins_avoid_back_to_back(1);
+ ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@@ -9642,7 +9670,7 @@ instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flag
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L);
%}
ins_short_branch(1);
- ins_avoid_back_to_back(1);
+ ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_imm);
%}
@@ -9665,7 +9693,7 @@ instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, fla
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L);
%}
ins_short_branch(1);
- ins_avoid_back_to_back(1);
+ ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@@ -9687,7 +9715,7 @@ instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, f
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L);
%}
ins_short_branch(1);
- ins_avoid_back_to_back(1);
+ ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@@ -9705,7 +9733,7 @@ instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flag
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
%}
ins_short_branch(1);
- ins_avoid_back_to_back(1);
+ ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@@ -9723,7 +9751,7 @@ instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, fl
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L);
%}
ins_short_branch(1);
- ins_avoid_back_to_back(1);
+ ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@@ -9742,7 +9770,7 @@ instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label lab
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
%}
ins_short_branch(1);
- ins_avoid_back_to_back(1);
+ ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@@ -9760,7 +9788,7 @@ instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label lab
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
%}
ins_short_branch(1);
- ins_avoid_back_to_back(1);
+ ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_imm);
%}
@@ -9777,6 +9805,7 @@ instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{
ins_cost(BRANCH_COST);
format %{ "BR$cmp $op1,$labl" %}
ins_encode( enc_bpr( labl, cmp, op1 ) );
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_reg);
%}
@@ -9789,6 +9818,7 @@ instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{
ins_cost(BRANCH_COST);
format %{ "BR$cmp $op1,$labl" %}
ins_encode( enc_bpr( labl, cmp, op1 ) );
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_reg);
%}
@@ -9801,6 +9831,7 @@ instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{
ins_cost(BRANCH_COST);
format %{ "BR$cmp $op1,$labl" %}
ins_encode( enc_bpr( labl, cmp, op1 ) );
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_reg);
%}
@@ -9841,6 +9872,7 @@ instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{
__ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
__ delayed()->nop();
%}
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_cc);
%}
@@ -9968,6 +10000,7 @@ instruct CallStaticJavaDirect( method meth ) %{
ins_cost(CALL_COST);
format %{ "CALL,static ; NOP ==> " %}
ins_encode( Java_Static_Call( meth ), call_epilog );
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(simple_call);
%}
@@ -10004,6 +10037,7 @@ instruct CallRuntimeDirect(method meth, l7RegP l7) %{
format %{ "CALL,runtime" %}
ins_encode( Java_To_Runtime( meth ),
call_epilog, adjust_long_from_native_call );
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(simple_call);
%}
@@ -10016,6 +10050,7 @@ instruct CallLeafDirect(method meth, l7RegP l7) %{
ins_encode( Java_To_Runtime( meth ),
call_epilog,
adjust_long_from_native_call );
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(simple_call);
%}
@@ -10028,6 +10063,7 @@ instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{
ins_encode( Java_To_Runtime( meth ),
call_epilog,
adjust_long_from_native_call );
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(simple_call);
%}
@@ -10041,6 +10077,7 @@ instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{
ins_cost(CALL_COST);
format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %}
ins_encode(form_jmpl(jump_target));
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(tail_call);
%}
@@ -10072,6 +10109,7 @@ instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{
// opcode(Assembler::jmpl_op3, Assembler::arith_op);
// The hack duplicates the exception oop into G3, so that CreateEx can use it there.
// ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() );
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(tail_call);
%}
@@ -10102,6 +10140,7 @@ instruct RethrowException()
// use the following format syntax
format %{ "Jmp rethrow_stub" %}
ins_encode(enc_rethrow);
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(tail_call);
%}
@@ -10130,6 +10169,7 @@ instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP
ins_cost(DEFAULT_COST*10);
format %{ "CALL PartialSubtypeCheck\n\tNOP" %}
ins_encode( enc_PartialSubtypeCheck() );
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(partial_subtype_check_pipe);
%}
@@ -10139,6 +10179,7 @@ instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, i
ins_cost(DEFAULT_COST*10);
format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %}
ins_encode( enc_PartialSubtypeCheck() );
+ ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(partial_subtype_check_pipe);
%}
diff --git a/src/share/vm/adlc/output_h.cpp b/src/share/vm/adlc/output_h.cpp
index 1bf7def46..2279e75ec 100644
--- a/src/share/vm/adlc/output_h.cpp
+++ b/src/share/vm/adlc/output_h.cpp
@@ -1613,21 +1613,20 @@ void ArchDesc::declareClasses(FILE *fp) {
// Each instruction attribute results in a virtual call of same name.
// The ins_cost is not handled here.
Attribute *attr = instr->_attribs;
- bool avoid_back_to_back = false;
+ Attribute *avoid_back_to_back_attr = NULL;
while (attr != NULL) {
- if (strcmp (attr->_ident, "ins_cost") != 0 &&
+ if (strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") == 0) {
+ fprintf(fp, " virtual bool is_TrapBasedCheckNode() const { return %s; }\n", attr->_val);
+ } else if (strcmp (attr->_ident, "ins_cost") != 0 &&
strncmp(attr->_ident, "ins_field_", 10) != 0 &&
// Must match function in node.hpp: return type bool, no prefix "ins_".
strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") != 0 &&
strcmp (attr->_ident, "ins_short_branch") != 0) {
fprintf(fp, " virtual int %s() const { return %s; }\n", attr->_ident, attr->_val);
}
- // Check value for ins_avoid_back_to_back, and if it is true (1), set the flag
- if (!strcmp(attr->_ident, "ins_avoid_back_to_back") != 0 && attr->int_val(*this) != 0)
- avoid_back_to_back = true;
- if (strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") == 0)
- fprintf(fp, " virtual bool is_TrapBasedCheckNode() const { return %s; }\n", attr->_val);
-
+ if (strcmp(attr->_ident, "ins_avoid_back_to_back") == 0) {
+ avoid_back_to_back_attr = attr;
+ }
attr = (Attribute *)attr->_next;
}
@@ -1799,11 +1798,11 @@ void ArchDesc::declareClasses(FILE *fp) {
}
// flag: if this instruction should not be generated back to back.
- if ( avoid_back_to_back ) {
- if ( node_flags_set ) {
- fprintf(fp," | Flag_avoid_back_to_back");
+ if (avoid_back_to_back_attr != NULL) {
+ if (node_flags_set) {
+ fprintf(fp," | (%s)", avoid_back_to_back_attr->_val);
} else {
- fprintf(fp,"init_flags(Flag_avoid_back_to_back");
+ fprintf(fp,"init_flags((%s)", avoid_back_to_back_attr->_val);
node_flags_set = true;
}
}
diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile/classFileParser.cpp
index 5f8264234..4aa7c4fa5 100644
--- a/src/share/vm/classfile/classFileParser.cpp
+++ b/src/share/vm/classfile/classFileParser.cpp
@@ -3751,18 +3751,24 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_UnsupportedClassVersionError(),
- "Unsupported major.minor version %u.%u",
+ "Unsupported class file version %u.%u, "
+ "this version of the Java Runtime only recognizes class file versions up to %u.%u",
major_version,
- minor_version);
+ minor_version,
+ JAVA_MAX_SUPPORTED_VERSION,
+ JAVA_MAX_SUPPORTED_MINOR_VERSION);
} else {
ResourceMark rm(THREAD);
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_UnsupportedClassVersionError(),
- "%s : Unsupported major.minor version %u.%u",
+ "%s has been compiled by a more recent version of the Java Runtime (class file version %u.%u), "
+ "this version of the Java Runtime only recognizes class file versions up to %u.%u",
name->as_C_string(),
major_version,
- minor_version);
+ minor_version,
+ JAVA_MAX_SUPPORTED_VERSION,
+ JAVA_MAX_SUPPORTED_MINOR_VERSION);
}
return nullHandle;
}
@@ -4175,8 +4181,12 @@ ClassFileParser::~ClassFileParser() {
clear_class_metadata();
- // deallocate the klass if already created.
- MetadataFactory::free_metadata(_loader_data, _klass);
+ // deallocate the klass if already created. Don't directly deallocate, but add
+ // to the deallocate list so that the klass is removed from the CLD::_klasses list
+ // at a safepoint.
+ if (_klass != NULL) {
+ _loader_data->add_to_deallocate_list(_klass);
+ }
_klass = NULL;
}
diff --git a/src/share/vm/classfile/defaultMethods.cpp b/src/share/vm/classfile/defaultMethods.cpp
index 72020ba78..b5e4da63f 100644
--- a/src/share/vm/classfile/defaultMethods.cpp
+++ b/src/share/vm/classfile/defaultMethods.cpp
@@ -390,20 +390,6 @@ class MethodFamily : public ResourceObj {
Symbol* get_exception_message() { return _exception_message; }
Symbol* get_exception_name() { return _exception_name; }
- // Return true if the specified klass has a static method that matches
- // the name and signature of the target method.
- bool has_matching_static(InstanceKlass* root) {
- if (_members.length() > 0) {
- Pair<Method*,QualifiedState> entry = _members.at(0);
- Method* impl = root->find_method(entry.first->name(),
- entry.first->signature());
- if ((impl != NULL) && impl->is_static()) {
- return true;
- }
- }
- return false;
- }
-
// Either sets the target or the exception error message
void determine_target(InstanceKlass* root, TRAPS) {
if (has_target() || throws_exception()) {
@@ -433,21 +419,19 @@ class MethodFamily : public ResourceObj {
// If the root klass has a static method with matching name and signature
// then do not generate an overpass method because it will hide the
// static method during resolution.
- if (!has_matching_static(root)) {
- if (qualified_methods.length() == 0) {
- _exception_message = generate_no_defaults_message(CHECK);
- } else {
- assert(root != NULL, "Null root class");
- _exception_message = generate_method_message(root->name(), qualified_methods.at(0), CHECK);
- }
- _exception_name = vmSymbols::java_lang_AbstractMethodError();
+ if (qualified_methods.length() == 0) {
+ _exception_message = generate_no_defaults_message(CHECK);
+ } else {
+ assert(root != NULL, "Null root class");
+ _exception_message = generate_method_message(root->name(), qualified_methods.at(0), CHECK);
}
+ _exception_name = vmSymbols::java_lang_AbstractMethodError();
// If only one qualified method is default, select that
} else if (num_defaults == 1) {
_selected_target = qualified_methods.at(default_index);
- } else if (num_defaults > 1 && !has_matching_static(root)) {
+ } else if (num_defaults > 1) {
_exception_message = generate_conflicts_message(&qualified_methods,CHECK);
_exception_name = vmSymbols::java_lang_IncompatibleClassChangeError();
if (TraceDefaultMethods) {
diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/javaClasses.cpp
index 06e75ea25..29339c375 100644
--- a/src/share/vm/classfile/javaClasses.cpp
+++ b/src/share/vm/classfile/javaClasses.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -482,8 +482,8 @@ void java_lang_String::print(Handle java_string, outputStream* st) {
}
}
-static void initialize_static_field(fieldDescriptor* fd, TRAPS) {
- Handle mirror (THREAD, fd->field_holder()->java_mirror());
+
+static void initialize_static_field(fieldDescriptor* fd, Handle mirror, TRAPS) {
assert(mirror.not_null() && fd->is_static(), "just checking");
if (fd->has_initial_value()) {
BasicType t = fd->field_type();
@@ -550,21 +550,45 @@ void java_lang_Class::fixup_mirror(KlassHandle k, TRAPS) {
create_mirror(k, Handle(NULL), CHECK);
}
-oop java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAPS) {
+void java_lang_Class::initialize_mirror_fields(KlassHandle k,
+ Handle mirror,
+ Handle protection_domain,
+ TRAPS) {
+ // Allocate a simple java object for a lock.
+ // This needs to be a java object because during class initialization
+ // it can be held across a java call.
+ typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK);
+ set_init_lock(mirror(), r);
+
+ // Set protection domain also
+ set_protection_domain(mirror(), protection_domain());
+
+ // Initialize static fields
+ InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, mirror, CHECK);
+}
+
+void java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAPS) {
assert(k->java_mirror() == NULL, "should only assign mirror once");
// Use this moment of initialization to cache modifier_flags also,
// to support Class.getModifiers(). Instance classes recalculate
// the cached flags after the class file is parsed, but before the
// class is put into the system dictionary.
- int computed_modifiers = k->compute_modifier_flags(CHECK_0);
+ int computed_modifiers = k->compute_modifier_flags(CHECK);
k->set_modifier_flags(computed_modifiers);
// Class_klass has to be loaded because it is used to allocate
// the mirror.
if (SystemDictionary::Class_klass_loaded()) {
// Allocate mirror (java.lang.Class instance)
- Handle mirror = InstanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance(k, CHECK_0);
+ Handle mirror = InstanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance(k, CHECK);
+
+ // Setup indirection from mirror->klass
+ if (!k.is_null()) {
+ java_lang_Class::set_klass(mirror(), k());
+ }
InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass());
+ assert(oop_size(mirror()) == mk->instance_size(k), "should have been set");
+
java_lang_Class::set_static_oop_field_count(mirror(), mk->compute_static_oop_field_count(mirror()));
// It might also have a component mirror. This mirror must already exist.
@@ -577,29 +601,32 @@ oop java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAP
assert(k->oop_is_objArray(), "Must be");
Klass* element_klass = ObjArrayKlass::cast(k())->element_klass();
assert(element_klass != NULL, "Must have an element klass");
- comp_mirror = element_klass->java_mirror();
+ comp_mirror = element_klass->java_mirror();
}
assert(comp_mirror.not_null(), "must have a mirror");
- // Two-way link between the array klass and its component mirror:
+ // Two-way link between the array klass and its component mirror:
ArrayKlass::cast(k())->set_component_mirror(comp_mirror());
set_array_klass(comp_mirror(), k());
} else {
assert(k->oop_is_instance(), "Must be");
- // Allocate a simple java object for a lock.
- // This needs to be a java object because during class initialization
- // it can be held across a java call.
- typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_NULL);
- set_init_lock(mirror(), r);
-
- // Set protection domain also
- set_protection_domain(mirror(), protection_domain());
+ initialize_mirror_fields(k, mirror, protection_domain, THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ // If any of the fields throws an exception like OOM remove the klass field
+ // from the mirror so GC doesn't follow it after the klass has been deallocated.
+ // This mirror looks like a primitive type, which logically it is because it
+ // it represents no class.
+ java_lang_Class::set_klass(mirror(), NULL);
+ return;
+ }
+ }
- // Initialize static fields
- InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL);
+ // Setup indirection from klass->mirror last
+ // after any exceptions can happen during allocations.
+ if (!k.is_null()) {
+ k->set_java_mirror(mirror());
}
- return mirror();
} else {
if (fixup_mirror_list() == NULL) {
GrowableArray<Klass*>* list =
@@ -607,12 +634,10 @@ oop java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAP
set_fixup_mirror_list(list);
}
fixup_mirror_list()->push(k());
- return NULL;
}
}
-
int java_lang_Class::oop_size(oop java_class) {
assert(_oop_size_offset != 0, "must be set");
return java_class->int_field(_oop_size_offset);
diff --git a/src/share/vm/classfile/javaClasses.hpp b/src/share/vm/classfile/javaClasses.hpp
index 886a48264..8b3bfc4db 100644
--- a/src/share/vm/classfile/javaClasses.hpp
+++ b/src/share/vm/classfile/javaClasses.hpp
@@ -246,11 +246,12 @@ class java_lang_Class : AllStatic {
static void set_init_lock(oop java_class, oop init_lock);
static void set_protection_domain(oop java_class, oop protection_domain);
+ static void initialize_mirror_fields(KlassHandle k, Handle mirror, Handle protection_domain, TRAPS);
public:
static void compute_offsets();
// Instance creation
- static oop create_mirror(KlassHandle k, Handle protection_domain, TRAPS);
+ static void create_mirror(KlassHandle k, Handle protection_domain, TRAPS);
static void fixup_mirror(KlassHandle k, TRAPS);
static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
// Conversion
diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp
index 703443517..aa8121595 100644
--- a/src/share/vm/classfile/systemDictionary.cpp
+++ b/src/share/vm/classfile/systemDictionary.cpp
@@ -826,47 +826,6 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
}
} // load_instance_class loop
- if (HAS_PENDING_EXCEPTION) {
- // An exception, such as OOM could have happened at various places inside
- // load_instance_class. We might have partially initialized a shared class
- // and need to clean it up.
- if (class_loader.is_null()) {
- // In some cases k may be null. Let's find the shared class again.
- instanceKlassHandle ik(THREAD, find_shared_class(name));
- if (ik.not_null()) {
- if (ik->class_loader_data() == NULL) {
- // We didn't go as far as Klass::restore_unshareable_info(),
- // so nothing to clean up.
- } else {
- Klass *kk;
- {
- MutexLocker mu(SystemDictionary_lock, THREAD);
- kk = find_class(d_index, d_hash, name, ik->class_loader_data());
- }
- if (kk != NULL) {
- // No clean up is needed if the shared class has been entered
- // into system dictionary, as load_shared_class() won't be called
- // again.
- } else {
- // This must be done outside of the SystemDictionary_lock to
- // avoid deadlock.
- //
- // Note that Klass::restore_unshareable_info (called via
- // load_instance_class above) is also called outside
- // of SystemDictionary_lock. Other threads are blocked from
- // loading this class because they are waiting on the
- // SystemDictionary_lock until this thread removes
- // the placeholder below.
- //
- // This need to be re-thought when parallel-capable non-boot
- // classloaders are supported by CDS (today they're not).
- clean_up_shared_class(ik, class_loader, THREAD);
- }
- }
- }
- }
- }
-
if (load_instance_added == true) {
// clean up placeholder entries for LOAD_INSTANCE success or error
// This brackets the SystemDictionary updates for both defining
@@ -1272,19 +1231,6 @@ instanceKlassHandle SystemDictionary::load_shared_class(
return ik;
}
-void SystemDictionary::clean_up_shared_class(instanceKlassHandle ik, Handle class_loader, TRAPS) {
- // Updating methods must be done under a lock so multiple
- // threads don't update these in parallel
- // Shared classes are all currently loaded by the bootstrap
- // classloader, so this will never cause a deadlock on
- // a custom class loader lock.
- {
- Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
- check_loader_lock_contention(lockObject, THREAD);
- ObjectLocker ol(lockObject, THREAD, true);
- ik->remove_unshareable_info();
- }
-}
instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
diff --git a/src/share/vm/classfile/systemDictionary.hpp b/src/share/vm/classfile/systemDictionary.hpp
index 88132f520..097a1e034 100644
--- a/src/share/vm/classfile/systemDictionary.hpp
+++ b/src/share/vm/classfile/systemDictionary.hpp
@@ -617,7 +617,6 @@ private:
Handle class_loader, TRAPS);
static instanceKlassHandle load_shared_class(instanceKlassHandle ik,
Handle class_loader, TRAPS);
- static void clean_up_shared_class(instanceKlassHandle ik, Handle class_loader, TRAPS);
static instanceKlassHandle load_instance_class(Symbol* class_name, Handle class_loader, TRAPS);
static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
static void check_loader_lock_contention(Handle loader_lock, TRAPS);
diff --git a/src/share/vm/classfile/verificationType.hpp b/src/share/vm/classfile/verificationType.hpp
index eec0bf3f5..16266477e 100644
--- a/src/share/vm/classfile/verificationType.hpp
+++ b/src/share/vm/classfile/verificationType.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -299,7 +299,7 @@ class VerificationType VALUE_OBJ_CLASS_SPEC {
int dimensions() const {
assert(is_array(), "Must be an array");
int index = 0;
- while (name()->byte_at(index++) == '[');
+ while (name()->byte_at(index) == '[') index++;
return index;
}
diff --git a/src/share/vm/classfile/verifier.cpp b/src/share/vm/classfile/verifier.cpp
index 2e6474743..c75a29d0f 100644
--- a/src/share/vm/classfile/verifier.cpp
+++ b/src/share/vm/classfile/verifier.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1943,7 +1943,7 @@ bool ClassVerifier::is_protected_access(instanceKlassHandle this_class,
InstanceKlass* target_instance = InstanceKlass::cast(target_class);
fieldDescriptor fd;
if (is_method) {
- Method* m = target_instance->uncached_lookup_method(field_name, field_sig);
+ Method* m = target_instance->uncached_lookup_method(field_name, field_sig, Klass::normal);
if (m != NULL && m->is_protected()) {
if (!this_class->is_same_class_package(m->method_holder())) {
return true;
@@ -2280,7 +2280,8 @@ void ClassVerifier::verify_invoke_init(
ref_class_type.name(), CHECK_VERIFY(this));
Method* m = InstanceKlass::cast(ref_klass)->uncached_lookup_method(
vmSymbols::object_initializer_name(),
- cp->signature_ref_at(bcs->get_index_u2()));
+ cp->signature_ref_at(bcs->get_index_u2()),
+ Klass::normal);
instanceKlassHandle mh(THREAD, m->method_holder());
if (m->is_protected() && !mh->is_same_class_package(_klass())) {
bool assignable = current_type().is_assignable_from(
diff --git a/src/share/vm/code/debugInfo.hpp b/src/share/vm/code/debugInfo.hpp
index cf0a9a6d3..287ff876c 100644
--- a/src/share/vm/code/debugInfo.hpp
+++ b/src/share/vm/code/debugInfo.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -273,8 +273,8 @@ class DebugInfoReadStream : public CompressedReadStream {
}
Method* read_method() {
Method* o = (Method*)(code()->metadata_at(read_int()));
- assert(o == NULL ||
- o->is_metaspace_object(), "meta data only");
+ // is_metadata() is a faster check than is_metaspace_object()
+ assert(o == NULL || o->is_metadata(), "meta data only");
return o;
}
ScopeValue* read_object_value();
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
index f21a92f38..ff93b41b8 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
@@ -1288,7 +1288,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
print_heap_before_gc();
trace_heap_before_gc(gc_tracer);
- size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
+ size_t metadata_prev_used = MetaspaceAux::used_bytes();
verify_region_sets_optional();
diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
index b4f9a0b00..76021adf1 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
@@ -1204,7 +1204,7 @@ void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
(_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
if (full) {
- _metaspace_used_bytes_before_gc = MetaspaceAux::allocated_used_bytes();
+ _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
}
}
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
index 65f9ece3b..bcb1cf4d2 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
@@ -184,7 +184,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
size_t prev_used = heap->used();
// Capture metadata size before collection for sizing.
- size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
+ size_t metadata_prev_used = MetaspaceAux::used_bytes();
// For PrintGCDetails
size_t old_gen_prev_used = old_gen->used_in_bytes();
diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
index 8b4f0bd46..393629439 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
@@ -927,7 +927,7 @@ public:
_heap_used = heap->used();
_young_gen_used = heap->young_gen()->used_in_bytes();
_old_gen_used = heap->old_gen()->used_in_bytes();
- _metadata_used = MetaspaceAux::allocated_used_bytes();
+ _metadata_used = MetaspaceAux::used_bytes();
};
size_t heap_used() const { return _heap_used; }
diff --git a/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp b/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp
index 7f6dca0fb..23cb113c1 100644
--- a/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp
+++ b/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp
@@ -62,16 +62,16 @@ public:
};
class MetaspaceSizes : public StackObj {
- size_t _capacity;
+ size_t _committed;
size_t _used;
size_t _reserved;
public:
- MetaspaceSizes() : _capacity(0), _used(0), _reserved(0) {}
- MetaspaceSizes(size_t capacity, size_t used, size_t reserved) :
- _capacity(capacity), _used(used), _reserved(reserved) {}
+ MetaspaceSizes() : _committed(0), _used(0), _reserved(0) {}
+ MetaspaceSizes(size_t committed, size_t used, size_t reserved) :
+ _committed(committed), _used(used), _reserved(reserved) {}
- size_t capacity() const { return _capacity; }
+ size_t committed() const { return _committed; }
size_t used() const { return _used; }
size_t reserved() const { return _reserved; }
};
diff --git a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp
index b7d6e8e6b..95ca83a4a 100644
--- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp
+++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp
@@ -258,7 +258,7 @@ void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary
static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) {
TraceStructMetaspaceSizes meta_sizes;
- meta_sizes.set_capacity(sizes.capacity());
+ meta_sizes.set_committed(sizes.committed());
meta_sizes.set_used(sizes.used());
meta_sizes.set_reserved(sizes.reserved());
diff --git a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
index 6f5350343..d901ddf49 100644
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
@@ -103,6 +103,15 @@ bool VM_GC_Operation::doit_prologue() {
assert(((_gc_cause != GCCause::_no_gc) &&
(_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
+ // To be able to handle a GC the VM initialization needs to be completed.
+ if (!is_init_completed()) {
+ vm_exit_during_initialization(
+ err_msg("GC triggered before VM initialization completed. Try increasing "
+ "NewSize, current value " UINTX_FORMAT "%s.",
+ byte_size_in_proper_unit(NewSize),
+ proper_unit_for_byte_size(NewSize)));
+ }
+
acquire_pending_list_lock();
// If the GC count has changed someone beat us to the collection
// Get the Heap_lock after the pending_list_lock.
diff --git a/src/share/vm/gc_interface/collectedHeap.cpp b/src/share/vm/gc_interface/collectedHeap.cpp
index 071ca3812..4b33afd66 100644
--- a/src/share/vm/gc_interface/collectedHeap.cpp
+++ b/src/share/vm/gc_interface/collectedHeap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -85,16 +85,16 @@ GCHeapSummary CollectedHeap::create_heap_summary() {
MetaspaceSummary CollectedHeap::create_metaspace_summary() {
const MetaspaceSizes meta_space(
- MetaspaceAux::allocated_capacity_bytes(),
- MetaspaceAux::allocated_used_bytes(),
+ MetaspaceAux::committed_bytes(),
+ MetaspaceAux::used_bytes(),
MetaspaceAux::reserved_bytes());
const MetaspaceSizes data_space(
- MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType),
- MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType),
+ MetaspaceAux::committed_bytes(Metaspace::NonClassType),
+ MetaspaceAux::used_bytes(Metaspace::NonClassType),
MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
const MetaspaceSizes class_space(
- MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType),
- MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
+ MetaspaceAux::committed_bytes(Metaspace::ClassType),
+ MetaspaceAux::used_bytes(Metaspace::ClassType),
MetaspaceAux::reserved_bytes(Metaspace::ClassType));
const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
@@ -582,36 +582,6 @@ void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
}
}
-oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS) {
- debug_only(check_for_valid_allocation_state());
- assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
- assert(size >= 0, "int won't convert to size_t");
- HeapWord* obj;
- assert(ScavengeRootsInCode > 0, "must be");
- obj = common_mem_allocate_init(real_klass, size, CHECK_NULL);
- post_allocation_setup_common(klass, obj);
- assert(Universe::is_bootstrapping() ||
- !((oop)obj)->is_array(), "must not be an array");
- NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
- oop mirror = (oop)obj;
-
- java_lang_Class::set_oop_size(mirror, size);
-
- // Setup indirections
- if (!real_klass.is_null()) {
- java_lang_Class::set_klass(mirror, real_klass());
- real_klass->set_java_mirror(mirror);
- }
-
- InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass());
- assert(size == mk->instance_size(real_klass), "should have been set");
-
- // notify jvmti and dtrace
- post_allocation_notify(klass, (oop)obj);
-
- return mirror;
-}
-
/////////////// Unit tests ///////////////
#ifndef PRODUCT
diff --git a/src/share/vm/gc_interface/collectedHeap.hpp b/src/share/vm/gc_interface/collectedHeap.hpp
index 550757dff..a9d271be2 100644
--- a/src/share/vm/gc_interface/collectedHeap.hpp
+++ b/src/share/vm/gc_interface/collectedHeap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -151,7 +151,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
inline static void post_allocation_setup_no_klass_install(KlassHandle klass,
HeapWord* objPtr);
- inline static void post_allocation_setup_obj(KlassHandle klass, HeapWord* obj);
+ inline static void post_allocation_setup_obj(KlassHandle klass, HeapWord* obj, int size);
inline static void post_allocation_setup_array(KlassHandle klass,
HeapWord* obj, int length);
@@ -312,9 +312,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// May be overridden to set additional parallelism.
virtual void set_par_threads(uint t) { _n_par_threads = t; };
- // Allocate and initialize instances of Class
- static oop Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS);
-
// General obj/array allocation facilities.
inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
diff --git a/src/share/vm/gc_interface/collectedHeap.inline.hpp b/src/share/vm/gc_interface/collectedHeap.inline.hpp
index 0bf322f82..89315a942 100644
--- a/src/share/vm/gc_interface/collectedHeap.inline.hpp
+++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,7 +70,7 @@ void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
}
// Support for jvmti and dtrace
-inline void post_allocation_notify(KlassHandle klass, oop obj) {
+inline void post_allocation_notify(KlassHandle klass, oop obj, int size) {
// support low memory notifications (no-op if not enabled)
LowMemoryDetector::detect_low_memory_for_collected_pools();
@@ -80,18 +80,19 @@ inline void post_allocation_notify(KlassHandle klass, oop obj) {
if (DTraceAllocProbes) {
// support for Dtrace object alloc event (no-op most of the time)
if (klass() != NULL && klass()->name() != NULL) {
- SharedRuntime::dtrace_object_alloc(obj);
+ SharedRuntime::dtrace_object_alloc(obj, size);
}
}
}
void CollectedHeap::post_allocation_setup_obj(KlassHandle klass,
- HeapWord* obj) {
+ HeapWord* obj,
+ int size) {
post_allocation_setup_common(klass, obj);
assert(Universe::is_bootstrapping() ||
!((oop)obj)->is_array(), "must not be an array");
// notify jvmti and dtrace
- post_allocation_notify(klass, (oop)obj);
+ post_allocation_notify(klass, (oop)obj, size);
}
void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
@@ -103,9 +104,10 @@ void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
assert(length >= 0, "length should be non-negative");
((arrayOop)obj)->set_length(length);
post_allocation_setup_common(klass, obj);
- assert(((oop)obj)->is_array(), "must be an array");
+ oop new_obj = (oop)obj;
+ assert(new_obj->is_array(), "must be an array");
// notify jvmti and dtrace (must be after length is set for dtrace)
- post_allocation_notify(klass, (oop)obj);
+ post_allocation_notify(klass, new_obj, new_obj->size());
}
HeapWord* CollectedHeap::common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS) {
@@ -199,7 +201,7 @@ oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) {
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
- post_allocation_setup_obj(klass, obj);
+ post_allocation_setup_obj(klass, obj, size);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj;
}
diff --git a/src/share/vm/interpreter/linkResolver.cpp b/src/share/vm/interpreter/linkResolver.cpp
index aa11784b7..8bcfc1f1e 100644
--- a/src/share/vm/interpreter/linkResolver.cpp
+++ b/src/share/vm/interpreter/linkResolver.cpp
@@ -243,7 +243,8 @@ void LinkResolver::resolve_klass(KlassHandle& result, constantPoolHandle pool, i
// Look up method in klasses, including static methods
// Then look up local default methods
void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, bool checkpolymorphism, bool in_imethod_resolve, TRAPS) {
- Method* result_oop = klass->uncached_lookup_method(name, signature);
+ // Ignore overpasses so statics can be found during resolution
+ Method* result_oop = klass->uncached_lookup_method(name, signature, Klass::skip_overpass);
// JDK 8, JVMS 5.4.3.4: Interface method resolution should
// ignore static and non-public methods of java.lang.Object,
@@ -256,6 +257,12 @@ void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle kl
result_oop = NULL;
}
+ // Before considering default methods, check for an overpass in the
+ // current class if a method has not been found.
+ if (result_oop == NULL) {
+ result_oop = InstanceKlass::cast(klass())->find_method(name, signature);
+ }
+
if (result_oop == NULL) {
Array<Method*>* default_methods = InstanceKlass::cast(klass())->default_methods();
if (default_methods != NULL) {
@@ -276,11 +283,11 @@ void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle kl
// returns first instance method
// Looks up method in classes, then looks up local default methods
void LinkResolver::lookup_instance_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
- Method* result_oop = klass->uncached_lookup_method(name, signature);
+ Method* result_oop = klass->uncached_lookup_method(name, signature, Klass::normal);
result = methodHandle(THREAD, result_oop);
while (!result.is_null() && result->is_static() && result->method_holder()->super() != NULL) {
KlassHandle super_klass = KlassHandle(THREAD, result->method_holder()->super());
- result = methodHandle(THREAD, super_klass->uncached_lookup_method(name, signature));
+ result = methodHandle(THREAD, super_klass->uncached_lookup_method(name, signature, Klass::normal));
}
if (result.is_null()) {
@@ -302,7 +309,7 @@ int LinkResolver::vtable_index_of_interface_method(KlassHandle klass,
// First check in default method array
if (!resolved_method->is_abstract() &&
(InstanceKlass::cast(klass())->default_methods() != NULL)) {
- int index = InstanceKlass::find_method_index(InstanceKlass::cast(klass())->default_methods(), name, signature);
+ int index = InstanceKlass::find_method_index(InstanceKlass::cast(klass())->default_methods(), name, signature, false);
if (index >= 0 ) {
vtable_index = InstanceKlass::cast(klass())->default_vtable_indices()->at(index);
}
@@ -322,7 +329,7 @@ void LinkResolver::lookup_method_in_interfaces(methodHandle& result, KlassHandle
// Specify 'true' in order to skip default methods when searching the
// interfaces. Function lookup_method_in_klasses() already looked for
// the method in the default methods table.
- result = methodHandle(THREAD, ik->lookup_method_in_all_interfaces(name, signature, true));
+ result = methodHandle(THREAD, ik->lookup_method_in_all_interfaces(name, signature, Klass::skip_defaults));
}
void LinkResolver::lookup_polymorphic_method(methodHandle& result,
diff --git a/src/share/vm/memory/gcLocker.cpp b/src/share/vm/memory/gcLocker.cpp
index 09fb73bb0..3b208dfff 100644
--- a/src/share/vm/memory/gcLocker.cpp
+++ b/src/share/vm/memory/gcLocker.cpp
@@ -28,7 +28,6 @@
#include "memory/sharedHeap.hpp"
volatile jint GC_locker::_jni_lock_count = 0;
-volatile jint GC_locker::_lock_count = 0;
volatile bool GC_locker::_needs_gc = false;
volatile bool GC_locker::_doing_gc = false;
@@ -102,7 +101,7 @@ void GC_locker::jni_lock(JavaThread* thread) {
// We check that at least one thread is in a critical region before
// blocking because blocked threads are woken up by a thread exiting
// a JNI critical region.
- while ((needs_gc() && is_jni_active()) || _doing_gc) {
+ while (is_active_and_needs_gc() || _doing_gc) {
JNICritical_lock->wait();
}
thread->enter_critical();
@@ -116,27 +115,20 @@ void GC_locker::jni_unlock(JavaThread* thread) {
_jni_lock_count--;
decrement_debug_jni_lock_count();
thread->exit_critical();
- if (needs_gc() && !is_jni_active()) {
+ if (needs_gc() && !is_active_internal()) {
// We're the last thread out. Cause a GC to occur.
- // GC will also check is_active, so this check is not
- // strictly needed. It's added here to make it clear that
- // the GC will NOT be performed if any other caller
- // of GC_locker::lock() still needs GC locked.
- if (!is_active_internal()) {
- _doing_gc = true;
- {
- // Must give up the lock while at a safepoint
- MutexUnlocker munlock(JNICritical_lock);
- if (PrintJNIGCStalls && PrintGCDetails) {
- ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
- gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
- gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
- }
- Universe::heap()->collect(GCCause::_gc_locker);
+ _doing_gc = true;
+ {
+ // Must give up the lock while at a safepoint
+ MutexUnlocker munlock(JNICritical_lock);
+ if (PrintJNIGCStalls && PrintGCDetails) {
+ ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
+ gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
+ gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
}
- _doing_gc = false;
+ Universe::heap()->collect(GCCause::_gc_locker);
}
-
+ _doing_gc = false;
_needs_gc = false;
JNICritical_lock->notify_all();
}
diff --git a/src/share/vm/memory/gcLocker.hpp b/src/share/vm/memory/gcLocker.hpp
index 60bebdf0f..f12aa6755 100644
--- a/src/share/vm/memory/gcLocker.hpp
+++ b/src/share/vm/memory/gcLocker.hpp
@@ -54,8 +54,6 @@ class GC_locker: public AllStatic {
// safepointing and decremented during the slow path of GC_locker
// unlocking.
static volatile jint _jni_lock_count; // number of jni active instances.
-
- static volatile jint _lock_count; // number of other active instances
static volatile bool _needs_gc; // heap is filling, we need a GC
// note: bool is typedef'd as jint
static volatile bool _doing_gc; // unlock_critical() is doing a GC
@@ -66,12 +64,6 @@ class GC_locker: public AllStatic {
static volatile jint _debug_jni_lock_count;
#endif
- // Accessors
- static bool is_jni_active() {
- assert(_needs_gc, "only valid when _needs_gc is set");
- return _jni_lock_count > 0;
- }
-
// At a safepoint, visit all threads and count the number of active
// critical sections. This is used to ensure that all active
// critical sections are exited before a new one is started.
@@ -82,7 +74,7 @@ class GC_locker: public AllStatic {
static bool is_active_internal() {
verify_critical_count();
- return _lock_count > 0 || _jni_lock_count > 0;
+ return _jni_lock_count > 0;
}
public:
@@ -132,10 +124,6 @@ class GC_locker: public AllStatic {
// not a stable predicate.
static void stall_until_clear();
- // Non-structured GC locking: currently needed for JNI. Use with care!
- static void lock();
- static void unlock();
-
// The following two methods are used for JNI critical regions.
// If we find that we failed to perform a GC because the GC_locker
// was active, arrange for one as soon as possible by allowing
diff --git a/src/share/vm/memory/gcLocker.inline.hpp b/src/share/vm/memory/gcLocker.inline.hpp
index 37b4231bb..e77d5436b 100644
--- a/src/share/vm/memory/gcLocker.inline.hpp
+++ b/src/share/vm/memory/gcLocker.inline.hpp
@@ -27,22 +27,6 @@
#include "memory/gcLocker.hpp"
-inline void GC_locker::lock() {
- // cast away volatile
- Atomic::inc(&_lock_count);
- CHECK_UNHANDLED_OOPS_ONLY(
- if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count++; })
- assert(Universe::heap() == NULL ||
- !Universe::heap()->is_gc_active(), "locking failed");
-}
-
-inline void GC_locker::unlock() {
- // cast away volatile
- Atomic::dec(&_lock_count);
- CHECK_UNHANDLED_OOPS_ONLY(
- if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count--; })
-}
-
inline void GC_locker::lock_critical(JavaThread* thread) {
if (!thread->in_critical()) {
if (needs_gc()) {
diff --git a/src/share/vm/memory/genCollectedHeap.cpp b/src/share/vm/memory/genCollectedHeap.cpp
index eaaf0d7c6..ed8ed2446 100644
--- a/src/share/vm/memory/genCollectedHeap.cpp
+++ b/src/share/vm/memory/genCollectedHeap.cpp
@@ -374,7 +374,7 @@ void GenCollectedHeap::do_collection(bool full,
ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
- const size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
+ const size_t metadata_prev_used = MetaspaceAux::used_bytes();
print_heap_before_gc();
diff --git a/src/share/vm/memory/metaspace.cpp b/src/share/vm/memory/metaspace.cpp
index 6dcc70792..4d25fe8c7 100644
--- a/src/share/vm/memory/metaspace.cpp
+++ b/src/share/vm/memory/metaspace.cpp
@@ -1447,7 +1447,7 @@ void MetaspaceGC::compute_new_size() {
uint current_shrink_factor = _shrink_factor;
_shrink_factor = 0;
- const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
+ const size_t used_after_gc = MetaspaceAux::capacity_bytes();
const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
@@ -2537,8 +2537,8 @@ void SpaceManager::mangle_freed_chunks() {
// MetaspaceAux
-size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
-size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
+size_t MetaspaceAux::_capacity_words[] = {0, 0};
+size_t MetaspaceAux::_used_words[] = {0, 0};
size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
@@ -2551,38 +2551,38 @@ size_t MetaspaceAux::free_bytes() {
void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
assert_lock_strong(SpaceManager::expand_lock());
- assert(words <= allocated_capacity_words(mdtype),
+ assert(words <= capacity_words(mdtype),
err_msg("About to decrement below 0: words " SIZE_FORMAT
- " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
- words, mdtype, allocated_capacity_words(mdtype)));
- _allocated_capacity_words[mdtype] -= words;
+ " is greater than _capacity_words[%u] " SIZE_FORMAT,
+ words, mdtype, capacity_words(mdtype)));
+ _capacity_words[mdtype] -= words;
}
void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
assert_lock_strong(SpaceManager::expand_lock());
// Needs to be atomic
- _allocated_capacity_words[mdtype] += words;
+ _capacity_words[mdtype] += words;
}
void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
- assert(words <= allocated_used_words(mdtype),
+ assert(words <= used_words(mdtype),
err_msg("About to decrement below 0: words " SIZE_FORMAT
- " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
- words, mdtype, allocated_used_words(mdtype)));
+ " is greater than _used_words[%u] " SIZE_FORMAT,
+ words, mdtype, used_words(mdtype)));
// For CMS deallocation of the Metaspaces occurs during the
// sweep which is a concurrent phase. Protection by the expand_lock()
// is not enough since allocation is on a per Metaspace basis
// and protected by the Metaspace lock.
jlong minus_words = (jlong) - (jlong) words;
- Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
+ Atomic::add_ptr(minus_words, &_used_words[mdtype]);
}
void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
- // _allocated_used_words tracks allocations for
+ // _used_words tracks allocations for
// each piece of metadata. Those allocations are
// generally done concurrently by different application
// threads so must be done atomically.
- Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
+ Atomic::add_ptr(words, &_used_words[mdtype]);
}
size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
@@ -2629,16 +2629,16 @@ size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
size_t MetaspaceAux::capacity_bytes_slow() {
#ifdef PRODUCT
- // Use allocated_capacity_bytes() in PRODUCT instead of this function.
+ // Use capacity_bytes() in PRODUCT instead of this function.
guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
#endif
size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
- assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
- err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
+ assert(capacity_bytes() == class_capacity + non_class_capacity,
+ err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT
" class_capacity + non_class_capacity " SIZE_FORMAT
" class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
- allocated_capacity_bytes(), class_capacity + non_class_capacity,
+ capacity_bytes(), class_capacity + non_class_capacity,
class_capacity, non_class_capacity));
return class_capacity + non_class_capacity;
@@ -2698,14 +2698,14 @@ void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
"->" SIZE_FORMAT
"(" SIZE_FORMAT ")",
prev_metadata_used,
- allocated_used_bytes(),
+ used_bytes(),
reserved_bytes());
} else {
gclog_or_tty->print(" " SIZE_FORMAT "K"
"->" SIZE_FORMAT "K"
"(" SIZE_FORMAT "K)",
prev_metadata_used/K,
- allocated_used_bytes()/K,
+ used_bytes()/K,
reserved_bytes()/K);
}
@@ -2721,8 +2721,8 @@ void MetaspaceAux::print_on(outputStream* out) {
"capacity " SIZE_FORMAT "K, "
"committed " SIZE_FORMAT "K, "
"reserved " SIZE_FORMAT "K",
- allocated_used_bytes()/K,
- allocated_capacity_bytes()/K,
+ used_bytes()/K,
+ capacity_bytes()/K,
committed_bytes()/K,
reserved_bytes()/K);
@@ -2733,8 +2733,8 @@ void MetaspaceAux::print_on(outputStream* out) {
"capacity " SIZE_FORMAT "K, "
"committed " SIZE_FORMAT "K, "
"reserved " SIZE_FORMAT "K",
- allocated_used_bytes(ct)/K,
- allocated_capacity_bytes(ct)/K,
+ used_bytes(ct)/K,
+ capacity_bytes(ct)/K,
committed_bytes(ct)/K,
reserved_bytes(ct)/K);
}
@@ -2836,42 +2836,42 @@ void MetaspaceAux::verify_free_chunks() {
void MetaspaceAux::verify_capacity() {
#ifdef ASSERT
- size_t running_sum_capacity_bytes = allocated_capacity_bytes();
+ size_t running_sum_capacity_bytes = capacity_bytes();
// For purposes of the running sum of capacity, verify against capacity
size_t capacity_in_use_bytes = capacity_bytes_slow();
assert(running_sum_capacity_bytes == capacity_in_use_bytes,
- err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
+ err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT
" capacity_bytes_slow()" SIZE_FORMAT,
running_sum_capacity_bytes, capacity_in_use_bytes));
for (Metaspace::MetadataType i = Metaspace::ClassType;
i < Metaspace:: MetadataTypeCount;
i = (Metaspace::MetadataType)(i + 1)) {
size_t capacity_in_use_bytes = capacity_bytes_slow(i);
- assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
- err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
+ assert(capacity_bytes(i) == capacity_in_use_bytes,
+ err_msg("capacity_bytes(%u) " SIZE_FORMAT
" capacity_bytes_slow(%u)" SIZE_FORMAT,
- i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
+ i, capacity_bytes(i), i, capacity_in_use_bytes));
}
#endif
}
void MetaspaceAux::verify_used() {
#ifdef ASSERT
- size_t running_sum_used_bytes = allocated_used_bytes();
+ size_t running_sum_used_bytes = used_bytes();
// For purposes of the running sum of used, verify against used
size_t used_in_use_bytes = used_bytes_slow();
- assert(allocated_used_bytes() == used_in_use_bytes,
- err_msg("allocated_used_bytes() " SIZE_FORMAT
+ assert(used_bytes() == used_in_use_bytes,
+ err_msg("used_bytes() " SIZE_FORMAT
" used_bytes_slow()" SIZE_FORMAT,
- allocated_used_bytes(), used_in_use_bytes));
+ used_bytes(), used_in_use_bytes));
for (Metaspace::MetadataType i = Metaspace::ClassType;
i < Metaspace:: MetadataTypeCount;
i = (Metaspace::MetadataType)(i + 1)) {
size_t used_in_use_bytes = used_bytes_slow(i);
- assert(allocated_used_bytes(i) == used_in_use_bytes,
- err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
+ assert(used_bytes(i) == used_in_use_bytes,
+ err_msg("used_bytes(%u) " SIZE_FORMAT
" used_bytes_slow(%u)" SIZE_FORMAT,
- i, allocated_used_bytes(i), i, used_in_use_bytes));
+ i, used_bytes(i), i, used_in_use_bytes));
}
#endif
}
diff --git a/src/share/vm/memory/metaspace.hpp b/src/share/vm/memory/metaspace.hpp
index cd02b4a15..1beddef8e 100644
--- a/src/share/vm/memory/metaspace.hpp
+++ b/src/share/vm/memory/metaspace.hpp
@@ -280,11 +280,11 @@ class MetaspaceAux : AllStatic {
// allocated to a Metaspace. This is used instead of
// iterating over all the classloaders. One for each
// type of Metadata
- static size_t _allocated_capacity_words[Metaspace:: MetadataTypeCount];
- // Running sum of space in all Metachunks that have
+ static size_t _capacity_words[Metaspace:: MetadataTypeCount];
+ // Running sum of space in all Metachunks that
// are being used for metadata. One for each
// type of Metadata.
- static size_t _allocated_used_words[Metaspace:: MetadataTypeCount];
+ static size_t _used_words[Metaspace:: MetadataTypeCount];
public:
// Decrement and increment _allocated_capacity_words
@@ -308,32 +308,32 @@ class MetaspaceAux : AllStatic {
static size_t free_chunks_total_bytes();
static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype);
- static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
- return _allocated_capacity_words[mdtype];
+ static size_t capacity_words(Metaspace::MetadataType mdtype) {
+ return _capacity_words[mdtype];
}
- static size_t allocated_capacity_words() {
- return allocated_capacity_words(Metaspace::NonClassType) +
- allocated_capacity_words(Metaspace::ClassType);
+ static size_t capacity_words() {
+ return capacity_words(Metaspace::NonClassType) +
+ capacity_words(Metaspace::ClassType);
}
- static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
- return allocated_capacity_words(mdtype) * BytesPerWord;
+ static size_t capacity_bytes(Metaspace::MetadataType mdtype) {
+ return capacity_words(mdtype) * BytesPerWord;
}
- static size_t allocated_capacity_bytes() {
- return allocated_capacity_words() * BytesPerWord;
+ static size_t capacity_bytes() {
+ return capacity_words() * BytesPerWord;
}
- static size_t allocated_used_words(Metaspace::MetadataType mdtype) {
- return _allocated_used_words[mdtype];
+ static size_t used_words(Metaspace::MetadataType mdtype) {
+ return _used_words[mdtype];
}
- static size_t allocated_used_words() {
- return allocated_used_words(Metaspace::NonClassType) +
- allocated_used_words(Metaspace::ClassType);
+ static size_t used_words() {
+ return used_words(Metaspace::NonClassType) +
+ used_words(Metaspace::ClassType);
}
- static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
- return allocated_used_words(mdtype) * BytesPerWord;
+ static size_t used_bytes(Metaspace::MetadataType mdtype) {
+ return used_words(mdtype) * BytesPerWord;
}
- static size_t allocated_used_bytes() {
- return allocated_used_words() * BytesPerWord;
+ static size_t used_bytes() {
+ return used_words() * BytesPerWord;
}
static size_t free_bytes();
diff --git a/src/share/vm/memory/metaspaceCounters.cpp b/src/share/vm/memory/metaspaceCounters.cpp
index 3ad462d08..44da7dbe5 100644
--- a/src/share/vm/memory/metaspaceCounters.cpp
+++ b/src/share/vm/memory/metaspaceCounters.cpp
@@ -66,7 +66,7 @@ class MetaspacePerfCounters: public CHeapObj<mtInternal> {
MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
size_t MetaspaceCounters::used() {
- return MetaspaceAux::allocated_used_bytes();
+ return MetaspaceAux::used_bytes();
}
size_t MetaspaceCounters::capacity() {
@@ -98,7 +98,7 @@ void MetaspaceCounters::update_performance_counters() {
MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
size_t CompressedClassSpaceCounters::used() {
- return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
+ return MetaspaceAux::used_bytes(Metaspace::ClassType);
}
size_t CompressedClassSpaceCounters::capacity() {
diff --git a/src/share/vm/memory/metaspaceShared.cpp b/src/share/vm/memory/metaspaceShared.cpp
index ef51c9266..c8c6b236e 100644
--- a/src/share/vm/memory/metaspaceShared.cpp
+++ b/src/share/vm/memory/metaspaceShared.cpp
@@ -645,9 +645,6 @@ void MetaspaceShared::preload_and_dump(TRAPS) {
TraceTime timer("Dump Shared Spaces", TraceStartupTime);
ResourceMark rm;
- // Lock out GC - is it necessary? I don't think we care.
- No_GC_Verifier no_gc;
-
// Preload classes to be shared.
// Should use some os:: method rather than fopen() here. aB.
// Construct the path to the class list (in jre/lib)
diff --git a/src/share/vm/memory/universe.cpp b/src/share/vm/memory/universe.cpp
index f09ba89ac..216e8aca5 100644
--- a/src/share/vm/memory/universe.cpp
+++ b/src/share/vm/memory/universe.cpp
@@ -632,7 +632,6 @@ jint universe_init() {
guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
"oop size is not not a multiple of HeapWord size");
TraceTime timer("Genesis", TraceStartupTime);
- GC_locker::lock(); // do not allow gc during bootstrapping
JavaClasses::compute_hard_coded_offsets();
jint status = Universe::initialize_heap();
@@ -1164,8 +1163,6 @@ bool universe_post_init() {
MemoryService::add_metaspace_memory_pools();
- GC_locker::unlock(); // allow gc after bootstrapping
-
MemoryService::set_universe_heap(Universe::_collectedHeap);
return true;
}
diff --git a/src/share/vm/oops/arrayKlass.cpp b/src/share/vm/oops/arrayKlass.cpp
index fcb46b11f..c55992f07 100644
--- a/src/share/vm/oops/arrayKlass.cpp
+++ b/src/share/vm/oops/arrayKlass.cpp
@@ -64,10 +64,10 @@ oop ArrayKlass::multi_allocate(int rank, jint* sizes, TRAPS) {
return NULL;
}
-Method* ArrayKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
+Method* ArrayKlass::uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const {
// There are no methods in an array klass but the super class (Object) has some
assert(super(), "super klass must be present");
- return super()->uncached_lookup_method(name, signature);
+ return super()->uncached_lookup_method(name, signature, mode);
}
ArrayKlass::ArrayKlass(Symbol* name) {
diff --git a/src/share/vm/oops/arrayKlass.hpp b/src/share/vm/oops/arrayKlass.hpp
index 7b4ad2e9a..f42d96e16 100644
--- a/src/share/vm/oops/arrayKlass.hpp
+++ b/src/share/vm/oops/arrayKlass.hpp
@@ -86,7 +86,7 @@ class ArrayKlass: public Klass {
objArrayOop allocate_arrayArray(int n, int length, TRAPS);
// Lookup operations
- Method* uncached_lookup_method(Symbol* name, Symbol* signature) const;
+ Method* uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const;
// Casting from Klass*
static ArrayKlass* cast(Klass* k) {
diff --git a/src/share/vm/oops/constantPool.cpp b/src/share/vm/oops/constantPool.cpp
index 90abff7b0..0f98ceb05 100644
--- a/src/share/vm/oops/constantPool.cpp
+++ b/src/share/vm/oops/constantPool.cpp
@@ -144,6 +144,10 @@ void ConstantPool::initialize_resolved_references(ClassLoaderData* loader_data,
// CDS support. Create a new resolved_references array.
void ConstantPool::restore_unshareable_info(TRAPS) {
+ // Only create the new resolved references array and lock if it hasn't been
+ // attempted before
+ if (resolved_references() != NULL) return;
+
// restore the C++ vtable from the shared archive
restore_vtable();
diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKlass.cpp
index dde25fa05..4785462a4 100644
--- a/src/share/vm/oops/instanceKlass.cpp
+++ b/src/share/vm/oops/instanceKlass.cpp
@@ -1329,17 +1329,18 @@ void InstanceKlass::do_local_static_fields(FieldClosure* cl) {
}
-void InstanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
+void InstanceKlass::do_local_static_fields(void f(fieldDescriptor*, Handle, TRAPS), Handle mirror, TRAPS) {
instanceKlassHandle h_this(THREAD, this);
- do_local_static_fields_impl(h_this, f, CHECK);
+ do_local_static_fields_impl(h_this, f, mirror, CHECK);
}
-void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
- for (JavaFieldStream fs(this_oop()); !fs.done(); fs.next()) {
+void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_k,
+ void f(fieldDescriptor* fd, Handle mirror, TRAPS), Handle mirror, TRAPS) {
+ for (JavaFieldStream fs(this_k()); !fs.done(); fs.next()) {
if (fs.access_flags().is_static()) {
fieldDescriptor& fd = fs.field_descriptor();
- f(&fd, CHECK);
+ f(&fd, mirror, CHECK);
}
}
}
@@ -1428,7 +1429,11 @@ static int binary_search(Array<Method*>* methods, Symbol* name) {
// find_method looks up the name/signature in the local methods array
Method* InstanceKlass::find_method(Symbol* name, Symbol* signature) const {
- return InstanceKlass::find_method(methods(), name, signature);
+ return find_method_impl(name, signature, false);
+}
+
+Method* InstanceKlass::find_method_impl(Symbol* name, Symbol* signature, bool skipping_overpass) const {
+ return InstanceKlass::find_method_impl(methods(), name, signature, skipping_overpass);
}
// find_instance_method looks up the name/signature in the local methods array
@@ -1445,40 +1450,49 @@ Method* InstanceKlass::find_instance_method(
// find_method looks up the name/signature in the local methods array
Method* InstanceKlass::find_method(
Array<Method*>* methods, Symbol* name, Symbol* signature) {
- int hit = find_method_index(methods, name, signature);
+ return InstanceKlass::find_method_impl(methods, name, signature, false);
+}
+
+Method* InstanceKlass::find_method_impl(
+ Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass) {
+ int hit = find_method_index(methods, name, signature, skipping_overpass);
return hit >= 0 ? methods->at(hit): NULL;
}
// Used directly for default_methods to find the index into the
// default_vtable_indices, and indirectly by find_method
// find_method_index looks in the local methods array to return the index
-// of the matching name/signature
+// of the matching name/signature. If, overpass methods are being ignored,
+// the search continues to find a potential non-overpass match. This capability
+// is important during method resolution to prefer a static method, for example,
+// over an overpass method.
int InstanceKlass::find_method_index(
- Array<Method*>* methods, Symbol* name, Symbol* signature) {
+ Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass) {
int hit = binary_search(methods, name);
if (hit != -1) {
Method* m = methods->at(hit);
// Do linear search to find matching signature. First, quick check
- // for common case
- if (m->signature() == signature) return hit;
+ // for common case, ignoring overpasses if requested.
+ if ((m->signature() == signature) && (!skipping_overpass || !m->is_overpass())) return hit;
+
// search downwards through overloaded methods
int i;
for (i = hit - 1; i >= 0; --i) {
Method* m = methods->at(i);
assert(m->is_method(), "must be method");
if (m->name() != name) break;
- if (m->signature() == signature) return i;
+ if ((m->signature() == signature) && (!skipping_overpass || !m->is_overpass())) return i;
}
// search upwards
for (i = hit + 1; i < methods->length(); ++i) {
Method* m = methods->at(i);
assert(m->is_method(), "must be method");
if (m->name() != name) break;
- if (m->signature() == signature) return i;
+ if ((m->signature() == signature) && (!skipping_overpass || !m->is_overpass())) return i;
}
// not found
#ifdef ASSERT
- int index = linear_search(methods, name, signature);
+ int index = skipping_overpass ? -1 : linear_search(methods, name, signature);
assert(index == -1, err_msg("binary search should have found entry %d", index));
#endif
}
@@ -1504,16 +1518,16 @@ int InstanceKlass::find_method_by_name(
// uncached_lookup_method searches both the local class methods array and all
// superclasses methods arrays, skipping any overpass methods in superclasses.
-Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
+Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const {
+ MethodLookupMode lookup_mode = mode;
Klass* klass = const_cast<InstanceKlass*>(this);
- bool dont_ignore_overpasses = true; // For the class being searched, find its overpasses.
while (klass != NULL) {
- Method* method = InstanceKlass::cast(klass)->find_method(name, signature);
- if ((method != NULL) && (dont_ignore_overpasses || !method->is_overpass())) {
+ Method* method = InstanceKlass::cast(klass)->find_method_impl(name, signature, (lookup_mode == skip_overpass));
+ if (method != NULL) {
return method;
}
klass = InstanceKlass::cast(klass)->super();
- dont_ignore_overpasses = false; // Ignore overpass methods in all superclasses.
+ lookup_mode = skip_overpass; // Always ignore overpass methods in superclasses
}
return NULL;
}
@@ -1528,7 +1542,7 @@ Method* InstanceKlass::lookup_method_in_ordered_interfaces(Symbol* name,
}
// Look up interfaces
if (m == NULL) {
- m = lookup_method_in_all_interfaces(name, signature, false);
+ m = lookup_method_in_all_interfaces(name, signature, normal);
}
return m;
}
@@ -1538,7 +1552,7 @@ Method* InstanceKlass::lookup_method_in_ordered_interfaces(Symbol* name,
// They should only be found in the initial InterfaceMethodRef
Method* InstanceKlass::lookup_method_in_all_interfaces(Symbol* name,
Symbol* signature,
- bool skip_default_methods) const {
+ MethodLookupMode mode) const {
Array<Klass*>* all_ifs = transitive_interfaces();
int num_ifs = all_ifs->length();
InstanceKlass *ik = NULL;
@@ -1546,7 +1560,7 @@ Method* InstanceKlass::lookup_method_in_all_interfaces(Symbol* name,
ik = InstanceKlass::cast(all_ifs->at(i));
Method* m = ik->lookup_method(name, signature);
if (m != NULL && m->is_public() && !m->is_static() &&
- (!skip_default_methods || !m->is_default_method())) {
+ ((mode != skip_defaults) || !m->is_default_method())) {
return m;
}
}
@@ -2280,9 +2294,7 @@ void InstanceKlass::restore_unshareable_info(TRAPS) {
int num_methods = methods->length();
for (int index2 = 0; index2 < num_methods; ++index2) {
methodHandle m(THREAD, methods->at(index2));
- m()->link_method(m, CHECK);
- // restore method's vtable by calling a virtual function
- m->restore_vtable();
+ m->restore_unshareable_info(CHECK);
}
if (JvmtiExport::has_redefined_a_class()) {
// Reinitialize vtable because RedefineClasses may have changed some
diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKlass.hpp
index b25c75afc..31e5accf2 100644
--- a/src/share/vm/oops/instanceKlass.hpp
+++ b/src/share/vm/oops/instanceKlass.hpp
@@ -518,14 +518,14 @@ class InstanceKlass: public Klass {
static Method* find_instance_method(Array<Method*>* methods, Symbol* name, Symbol* signature);
// find a local method index in default_methods (returns -1 if not found)
- static int find_method_index(Array<Method*>* methods, Symbol* name, Symbol* signature);
+ static int find_method_index(Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass);
// lookup operation (returns NULL if not found)
- Method* uncached_lookup_method(Symbol* name, Symbol* signature) const;
+ Method* uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const;
// lookup a method in all the interfaces that this class implements
// (returns NULL if not found)
- Method* lookup_method_in_all_interfaces(Symbol* name, Symbol* signature, bool skip_default_methods) const;
+ Method* lookup_method_in_all_interfaces(Symbol* name, Symbol* signature, MethodLookupMode mode) const;
// lookup a method in local defaults then in all interfaces
// (returns NULL if not found)
@@ -830,7 +830,7 @@ class InstanceKlass: public Klass {
// Iterators
void do_local_static_fields(FieldClosure* cl);
void do_nonstatic_fields(FieldClosure* cl); // including inherited fields
- void do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS);
+ void do_local_static_fields(void f(fieldDescriptor*, Handle, TRAPS), Handle, TRAPS);
void methods_do(void f(Method* method));
void array_klasses_do(void f(Klass* k));
@@ -1038,7 +1038,7 @@ private:
static void set_initialization_state_and_notify_impl (instanceKlassHandle this_oop, ClassState state, TRAPS);
static void call_class_initializer_impl (instanceKlassHandle this_oop, TRAPS);
static Klass* array_klass_impl (instanceKlassHandle this_oop, bool or_null, int n, TRAPS);
- static void do_local_static_fields_impl (instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS);
+ static void do_local_static_fields_impl (instanceKlassHandle this_oop, void f(fieldDescriptor* fd, Handle, TRAPS), Handle, TRAPS);
/* jni_id_for_impl for jfieldID only */
static JNIid* jni_id_for_impl (instanceKlassHandle this_oop, int offset);
@@ -1048,6 +1048,10 @@ private:
// Returns the array class with this class as element type
Klass* array_klass_impl(bool or_null, TRAPS);
+ // find a local method (returns NULL if not found)
+ Method* find_method_impl(Symbol* name, Symbol* signature, bool skipping_overpass) const;
+ static Method* find_method_impl(Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass);
+
// Free CHeap allocated fields.
void release_C_heap_structures();
public:
diff --git a/src/share/vm/oops/instanceMirrorKlass.cpp b/src/share/vm/oops/instanceMirrorKlass.cpp
index 70c2ca191..fd05124f8 100644
--- a/src/share/vm/oops/instanceMirrorKlass.cpp
+++ b/src/share/vm/oops/instanceMirrorKlass.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -367,7 +367,12 @@ instanceOop InstanceMirrorKlass::allocate_instance(KlassHandle k, TRAPS) {
// Query before forming handle.
int size = instance_size(k);
KlassHandle h_k(THREAD, this);
- instanceOop i = (instanceOop) CollectedHeap::Class_obj_allocate(h_k, size, k, CHECK_NULL);
+ instanceOop i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
+
+ // Since mirrors can be variable sized because of the static fields, store
+ // the size in the mirror itself.
+ java_lang_Class::set_oop_size(i, size);
+
return i;
}
diff --git a/src/share/vm/oops/klass.cpp b/src/share/vm/oops/klass.cpp
index adca246aa..2e8ef6f8f 100644
--- a/src/share/vm/oops/klass.cpp
+++ b/src/share/vm/oops/klass.cpp
@@ -129,7 +129,7 @@ bool Klass::compute_is_subtype_of(Klass* k) {
}
-Method* Klass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
+Method* Klass::uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const {
#ifdef ASSERT
tty->print_cr("Error: uncached_lookup_method called on a klass oop."
" Likely error: reflection method does not correctly"
@@ -483,12 +483,8 @@ void Klass::oops_do(OopClosure* cl) {
}
void Klass::remove_unshareable_info() {
- if (!DumpSharedSpaces) {
- // Clean up after OOM during class loading
- if (class_loader_data() != NULL) {
- class_loader_data()->remove_class(this);
- }
- }
+ assert (DumpSharedSpaces, "only called for DumpSharedSpaces");
+
set_subklass(NULL);
set_next_sibling(NULL);
// Clear the java mirror
@@ -500,17 +496,26 @@ void Klass::remove_unshareable_info() {
}
void Klass::restore_unshareable_info(TRAPS) {
- ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
- // Restore class_loader_data to the null class loader data
- set_class_loader_data(loader_data);
-
- // Add to null class loader list first before creating the mirror
- // (same order as class file parsing)
- loader_data->add_class(this);
+ // If an exception happened during CDS restore, some of these fields may already be
+ // set. We leave the class on the CLD list, even if incomplete so that we don't
+ // modify the CLD list outside a safepoint.
+ if (class_loader_data() == NULL) {
+ ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+ // Restore class_loader_data to the null class loader data
+ set_class_loader_data(loader_data);
+
+ // Add to null class loader list first before creating the mirror
+ // (same order as class file parsing)
+ loader_data->add_class(this);
+ }
// Recreate the class mirror. The protection_domain is always null for
// boot loader, for now.
- java_lang_Class::create_mirror(this, Handle(NULL), CHECK);
+ // Only recreate it if not present. A previous attempt to restore may have
+ // gotten an OOM later but keep the mirror if it was created.
+ if (java_mirror() == NULL) {
+ java_lang_Class::create_mirror(this, Handle(NULL), CHECK);
+ }
}
Klass* Klass::array_klass_or_null(int rank) {
diff --git a/src/share/vm/oops/klass.hpp b/src/share/vm/oops/klass.hpp
index a31a23573..0d9dbcee5 100644
--- a/src/share/vm/oops/klass.hpp
+++ b/src/share/vm/oops/klass.hpp
@@ -182,6 +182,8 @@ class Klass : public Metadata {
void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw();
public:
+ enum MethodLookupMode { normal, skip_overpass, skip_defaults };
+
bool is_klass() const volatile { return true; }
// super
@@ -421,10 +423,10 @@ class Klass : public Metadata {
virtual void initialize(TRAPS);
// lookup operation for MethodLookupCache
friend class MethodLookupCache;
- virtual Method* uncached_lookup_method(Symbol* name, Symbol* signature) const;
+ virtual Method* uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const;
public:
Method* lookup_method(Symbol* name, Symbol* signature) const {
- return uncached_lookup_method(name, signature);
+ return uncached_lookup_method(name, signature, normal);
}
// array class with specific rank
diff --git a/src/share/vm/oops/klassVtable.cpp b/src/share/vm/oops/klassVtable.cpp
index a7fc062b7..ad185c7f1 100644
--- a/src/share/vm/oops/klassVtable.cpp
+++ b/src/share/vm/oops/klassVtable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -622,7 +622,7 @@ bool klassVtable::needs_new_vtable_entry(methodHandle target_method,
// this check for all access permissions.
InstanceKlass *sk = InstanceKlass::cast(super);
if (sk->has_miranda_methods()) {
- if (sk->lookup_method_in_all_interfaces(name, signature, false) != NULL) {
+ if (sk->lookup_method_in_all_interfaces(name, signature, Klass::normal) != NULL) {
return false; // found a matching miranda; we do not need a new entry
}
}
@@ -698,7 +698,7 @@ bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
&& mo->method_holder() != NULL
&& mo->method_holder()->super() != NULL)
{
- mo = mo->method_holder()->super()->uncached_lookup_method(name, signature);
+ mo = mo->method_holder()->super()->uncached_lookup_method(name, signature, Klass::normal);
}
if (mo == NULL || mo->access_flags().is_private() ) {
// super class hierarchy does not implement it or protection is different
@@ -743,7 +743,7 @@ void klassVtable::add_new_mirandas_to_lists(
if (is_miranda(im, class_methods, default_methods, super)) { // is it a miranda at all?
InstanceKlass *sk = InstanceKlass::cast(super);
// check if it is a duplicate of a super's miranda
- if (sk->lookup_method_in_all_interfaces(im->name(), im->signature(), false) == NULL) {
+ if (sk->lookup_method_in_all_interfaces(im->name(), im->signature(), Klass::normal) == NULL) {
new_mirandas->append(im);
}
if (all_mirandas != NULL) {
diff --git a/src/share/vm/oops/metadata.hpp b/src/share/vm/oops/metadata.hpp
index 84a60893e..dc52c452e 100644
--- a/src/share/vm/oops/metadata.hpp
+++ b/src/share/vm/oops/metadata.hpp
@@ -42,6 +42,7 @@ class Metadata : public MetaspaceObj {
// Rehashing support for tables containing pointers to this
unsigned int new_hash(juint seed) { ShouldNotReachHere(); return 0; }
+ virtual bool is_metadata() const volatile { return true; }
virtual bool is_klass() const volatile { return false; }
virtual bool is_method() const volatile { return false; }
virtual bool is_methodData() const volatile { return false; }
diff --git a/src/share/vm/oops/method.cpp b/src/share/vm/oops/method.cpp
index d97a626eb..69adbea3d 100644
--- a/src/share/vm/oops/method.cpp
+++ b/src/share/vm/oops/method.cpp
@@ -905,6 +905,19 @@ address Method::make_adapters(methodHandle mh, TRAPS) {
return adapter->get_c2i_entry();
}
+void Method::restore_unshareable_info(TRAPS) {
+ // Since restore_unshareable_info can be called more than once for a method, don't
+ // redo any work. If this field is restored, there is nothing to do.
+ if (_from_compiled_entry == NULL) {
+ // restore method's vtable by calling a virtual function
+ restore_vtable();
+
+ methodHandle mh(THREAD, this);
+ link_method(mh, CHECK);
+ }
+}
+
+
// The verified_code_entry() must be called when a invoke is resolved
// on this method.
diff --git a/src/share/vm/oops/method.hpp b/src/share/vm/oops/method.hpp
index 3f5819391..93495524b 100644
--- a/src/share/vm/oops/method.hpp
+++ b/src/share/vm/oops/method.hpp
@@ -156,6 +156,8 @@ class Method : public Metadata {
void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); }
bool is_method() const volatile { return true; }
+ void restore_unshareable_info(TRAPS);
+
// accessors for instance variables
ConstMethod* constMethod() const { return _constMethod; }
diff --git a/src/share/vm/opto/loopTransform.cpp b/src/share/vm/opto/loopTransform.cpp
index fcf623ca7..16e5e8c1e 100644
--- a/src/share/vm/opto/loopTransform.cpp
+++ b/src/share/vm/opto/loopTransform.cpp
@@ -1147,6 +1147,7 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
// Now force out all loop-invariant dominating tests. The optimizer
// finds some, but we _know_ they are all useless.
peeled_dom_test_elim(loop,old_new);
+ loop->record_for_igvn();
}
//------------------------------is_invariant-----------------------------
diff --git a/src/share/vm/opto/loopnode.cpp b/src/share/vm/opto/loopnode.cpp
index 719a1fb34..4f11936ec 100644
--- a/src/share/vm/opto/loopnode.cpp
+++ b/src/share/vm/opto/loopnode.cpp
@@ -3171,17 +3171,16 @@ bool PhaseIdealLoop::verify_dominance(Node* n, Node* use, Node* LCA, Node* early
bool had_error = false;
#ifdef ASSERT
if (early != C->root()) {
- // Make sure that there's a dominance path from use to LCA
- Node* d = use;
- while (d != LCA) {
- d = idom(d);
+ // Make sure that there's a dominance path from LCA to early
+ Node* d = LCA;
+ while (d != early) {
if (d == C->root()) {
- tty->print_cr("*** Use %d isn't dominated by def %s", use->_idx, n->_idx);
- n->dump();
- use->dump();
+ dump_bad_graph("Bad graph detected in compute_lca_of_uses", n, early, LCA);
+ tty->print_cr("*** Use %d isn't dominated by def %d ***", use->_idx, n->_idx);
had_error = true;
break;
}
+ d = idom(d);
}
}
#endif
@@ -3434,6 +3433,13 @@ void PhaseIdealLoop::build_loop_late_post( Node *n ) {
_igvn._worklist.push(n); // Maybe we'll normalize it, if no more loops.
}
+#ifdef ASSERT
+ if (_verify_only && !n->is_CFG()) {
+ // Check def-use domination.
+ compute_lca_of_uses(n, get_ctrl(n), true /* verify */);
+ }
+#endif
+
// CFG and pinned nodes already handled
if( n->in(0) ) {
if( n->in(0)->is_top() ) return; // Dead?
diff --git a/src/share/vm/opto/loopopts.cpp b/src/share/vm/opto/loopopts.cpp
index f9a87bbac..922c991f3 100644
--- a/src/share/vm/opto/loopopts.cpp
+++ b/src/share/vm/opto/loopopts.cpp
@@ -2698,6 +2698,7 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
// Inhibit more partial peeling on this loop
new_head_clone->set_partial_peel_loop();
C->set_major_progress();
+ loop->record_for_igvn();
#if !defined(PRODUCT)
if (TracePartialPeeling) {
diff --git a/src/share/vm/opto/machnode.hpp b/src/share/vm/opto/machnode.hpp
index da7c58f73..2bbbd3a65 100644
--- a/src/share/vm/opto/machnode.hpp
+++ b/src/share/vm/opto/machnode.hpp
@@ -210,7 +210,14 @@ public:
bool may_be_short_branch() const { return (flags() & Flag_may_be_short_branch) != 0; }
// Avoid back to back some instructions on some CPUs.
- bool avoid_back_to_back() const { return (flags() & Flag_avoid_back_to_back) != 0; }
+ enum AvoidBackToBackFlag { AVOID_NONE = 0,
+ AVOID_BEFORE = Flag_avoid_back_to_back_before,
+ AVOID_AFTER = Flag_avoid_back_to_back_after,
+ AVOID_BEFORE_AND_AFTER = AVOID_BEFORE | AVOID_AFTER };
+
+ bool avoid_back_to_back(AvoidBackToBackFlag flag_value) const {
+ return (flags() & flag_value) == flag_value;
+ }
// instruction implemented with a call
bool has_call() const { return (flags() & Flag_has_call) != 0; }
diff --git a/src/share/vm/opto/node.hpp b/src/share/vm/opto/node.hpp
index 5f883d5f1..0b51d0487 100644
--- a/src/share/vm/opto/node.hpp
+++ b/src/share/vm/opto/node.hpp
@@ -645,17 +645,18 @@ public:
// Flags are sorted by usage frequency.
enum NodeFlags {
- Flag_is_Copy = 0x01, // should be first bit to avoid shift
- Flag_rematerialize = Flag_is_Copy << 1,
+ Flag_is_Copy = 0x01, // should be first bit to avoid shift
+ Flag_rematerialize = Flag_is_Copy << 1,
Flag_needs_anti_dependence_check = Flag_rematerialize << 1,
- Flag_is_macro = Flag_needs_anti_dependence_check << 1,
- Flag_is_Con = Flag_is_macro << 1,
- Flag_is_cisc_alternate = Flag_is_Con << 1,
- Flag_is_dead_loop_safe = Flag_is_cisc_alternate << 1,
- Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
- Flag_avoid_back_to_back = Flag_may_be_short_branch << 1,
- Flag_has_call = Flag_avoid_back_to_back << 1,
- Flag_is_expensive = Flag_has_call << 1,
+ Flag_is_macro = Flag_needs_anti_dependence_check << 1,
+ Flag_is_Con = Flag_is_macro << 1,
+ Flag_is_cisc_alternate = Flag_is_Con << 1,
+ Flag_is_dead_loop_safe = Flag_is_cisc_alternate << 1,
+ Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
+ Flag_avoid_back_to_back_before = Flag_may_be_short_branch << 1,
+ Flag_avoid_back_to_back_after = Flag_avoid_back_to_back_before << 1,
+ Flag_has_call = Flag_avoid_back_to_back_after << 1,
+ Flag_is_expensive = Flag_has_call << 1,
_max_flags = (Flag_is_expensive << 1) - 1 // allow flags combination
};
diff --git a/src/share/vm/opto/output.cpp b/src/share/vm/opto/output.cpp
index debf13693..1b32dea11 100644
--- a/src/share/vm/opto/output.cpp
+++ b/src/share/vm/opto/output.cpp
@@ -411,7 +411,7 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
blk_size += nop_size;
}
}
- if (mach->avoid_back_to_back()) {
+ if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
// Nop is inserted between "avoid back to back" instructions.
// ScheduleAndBundle() can rearrange nodes in a block,
// check for all offsets inside this block.
@@ -439,7 +439,7 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
last_call_adr = blk_starts[i]+blk_size;
}
// Remember end of avoid_back_to_back offset
- if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back()) {
+ if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
last_avoid_back_to_back_adr = blk_starts[i]+blk_size;
}
}
@@ -525,11 +525,11 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
int new_size = replacement->size(_regalloc);
int diff = br_size - new_size;
assert(diff >= (int)nop_size, "short_branch size should be smaller");
- // Conservatively take into accound padding between
+ // Conservatively take into account padding between
// avoid_back_to_back branches. Previous branch could be
// converted into avoid_back_to_back branch during next
// rounds.
- if (needs_padding && replacement->avoid_back_to_back()) {
+ if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
jmp_offset[i] += nop_size;
diff -= nop_size;
}
@@ -548,7 +548,7 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
}
} // (mach->may_be_short_branch())
if (mach != NULL && (mach->may_be_short_branch() ||
- mach->avoid_back_to_back())) {
+ mach->avoid_back_to_back(MachNode::AVOID_AFTER))) {
last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
}
blk_starts[i+1] -= adjust_block_start;
@@ -1313,7 +1313,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
padding = nop_size;
}
- if (padding == 0 && mach->avoid_back_to_back() &&
+ if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
current_offset == last_avoid_back_to_back_offset) {
// Avoid back to back some instructions.
padding = nop_size;
@@ -1407,7 +1407,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
int new_size = replacement->size(_regalloc);
assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
// Insert padding between avoid_back_to_back branches.
- if (needs_padding && replacement->avoid_back_to_back()) {
+ if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
MachNode *nop = new (this) MachNopNode();
block->insert_node(nop, j++);
_cfg->map_node_to_block(nop, block);
@@ -1515,7 +1515,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
last_call_offset = current_offset;
}
- if (n->is_Mach() && n->as_Mach()->avoid_back_to_back()) {
+ if (n->is_Mach() && n->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
// Avoid back to back some instructions.
last_avoid_back_to_back_offset = current_offset;
}
diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp
index 978679ac4..b7706304b 100644
--- a/src/share/vm/prims/jvm.cpp
+++ b/src/share/vm/prims/jvm.cpp
@@ -1217,7 +1217,8 @@ JVM_ENTRY(jobject, JVM_DoPrivileged(JNIEnv *env, jclass cls, jobject action, job
// get run() method
Method* m_oop = object->klass()->uncached_lookup_method(
vmSymbols::run_method_name(),
- vmSymbols::void_object_signature());
+ vmSymbols::void_object_signature(),
+ Klass::normal);
methodHandle m (THREAD, m_oop);
if (m.is_null() || !m->is_method() || !m()->is_public() || m()->is_static()) {
THROW_MSG_0(vmSymbols::java_lang_InternalError(), "No run method");
diff --git a/src/share/vm/prims/nativeLookup.cpp b/src/share/vm/prims/nativeLookup.cpp
index d30151326..188e7fce9 100644
--- a/src/share/vm/prims/nativeLookup.cpp
+++ b/src/share/vm/prims/nativeLookup.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -408,7 +408,7 @@ address NativeLookup::base_library_lookup(const char* class_name, const char* me
// Find method and invoke standard lookup
methodHandle method (THREAD,
- klass->uncached_lookup_method(m_name, s_name));
+ klass->uncached_lookup_method(m_name, s_name, Klass::normal));
address result = lookup(method, in_base_library, CATCH);
assert(in_base_library, "must be in basic library");
guarantee(result != NULL, "must be non NULL");
diff --git a/src/share/vm/prims/whitebox.cpp b/src/share/vm/prims/whitebox.cpp
index 76f38fc9d..80cb0dfa5 100644
--- a/src/share/vm/prims/whitebox.cpp
+++ b/src/share/vm/prims/whitebox.cpp
@@ -438,6 +438,30 @@ WB_ENTRY(jboolean, WB_EnqueueMethodForCompilation(JNIEnv* env, jobject o, jobjec
return (mh->queued_for_compilation() || nm != NULL);
WB_END
+class VM_WhiteBoxOperation : public VM_Operation {
+ public:
+ VM_WhiteBoxOperation() { }
+ VMOp_Type type() const { return VMOp_WhiteBoxOperation; }
+ bool allow_nested_vm_operations() const { return true; }
+};
+
+class AlwaysFalseClosure : public BoolObjectClosure {
+ public:
+ bool do_object_b(oop p) { return false; }
+};
+
+static AlwaysFalseClosure always_false;
+
+class VM_WhiteBoxCleanMethodData : public VM_WhiteBoxOperation {
+ public:
+ VM_WhiteBoxCleanMethodData(MethodData* mdo) : _mdo(mdo) { }
+ void doit() {
+ _mdo->clean_method_data(&always_false);
+ }
+ private:
+ MethodData* _mdo;
+};
+
WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
CHECK_JNI_EXCEPTION(env);
@@ -453,6 +477,8 @@ WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
for (int i = 0; i < arg_count; i++) {
mdo->set_arg_modified(i, 0);
}
+ VM_WhiteBoxCleanMethodData op(mdo);
+ VMThread::execute(&op);
}
mh->clear_not_c1_compilable();
diff --git a/src/share/vm/runtime/sharedRuntime.cpp b/src/share/vm/runtime/sharedRuntime.cpp
index 486034348..0b3d62893 100644
--- a/src/share/vm/runtime/sharedRuntime.cpp
+++ b/src/share/vm/runtime/sharedRuntime.cpp
@@ -960,14 +960,13 @@ jlong SharedRuntime::get_java_tid(Thread* thread) {
* it gets turned into a tail-call on sparc, which runs into dtrace bug
* 6254741. Once that is fixed we can remove the dummy return value.
*/
-int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
- return dtrace_object_alloc_base(Thread::current(), o);
+int SharedRuntime::dtrace_object_alloc(oopDesc* o, int size) {
+ return dtrace_object_alloc_base(Thread::current(), o, size);
}
-int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) {
+int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o, int size) {
assert(DTraceAllocProbes, "wrong call");
Klass* klass = o->klass();
- int size = o->size();
Symbol* name = klass->name();
#ifndef USDT2
HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread),
diff --git a/src/share/vm/runtime/sharedRuntime.hpp b/src/share/vm/runtime/sharedRuntime.hpp
index bb3ec22c3..17b0e030f 100644
--- a/src/share/vm/runtime/sharedRuntime.hpp
+++ b/src/share/vm/runtime/sharedRuntime.hpp
@@ -261,8 +261,8 @@ class SharedRuntime: AllStatic {
static void register_finalizer(JavaThread* thread, oopDesc* obj);
// dtrace notifications
- static int dtrace_object_alloc(oopDesc* o);
- static int dtrace_object_alloc_base(Thread* thread, oopDesc* o);
+ static int dtrace_object_alloc(oopDesc* o, int size);
+ static int dtrace_object_alloc_base(Thread* thread, oopDesc* o, int size);
static int dtrace_method_entry(JavaThread* thread, Method* m);
static int dtrace_method_exit(JavaThread* thread, Method* m);
diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp
index 6a10e73ab..37fba0986 100644
--- a/src/share/vm/runtime/thread.cpp
+++ b/src/share/vm/runtime/thread.cpp
@@ -239,7 +239,6 @@ Thread::Thread() {
debug_only(_allow_allocation_count = 0;)
NOT_PRODUCT(_allow_safepoint_count = 0;)
NOT_PRODUCT(_skip_gcalot = false;)
- CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;)
_jvmti_env_iteration_count = 0;
set_allocated_bytes(0);
_vm_operation_started_count = 0;
diff --git a/src/share/vm/runtime/thread.hpp b/src/share/vm/runtime/thread.hpp
index 1388432c2..eb6247908 100644
--- a/src/share/vm/runtime/thread.hpp
+++ b/src/share/vm/runtime/thread.hpp
@@ -249,9 +249,6 @@ class Thread: public ThreadShadow {
// Used by SkipGCALot class.
NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
- // Record when GC is locked out via the GC_locker mechanism
- CHECK_UNHANDLED_OOPS_ONLY(int _gc_locked_out_count;)
-
friend class No_Alloc_Verifier;
friend class No_Safepoint_Verifier;
friend class Pause_No_Safepoint_Verifier;
@@ -397,7 +394,6 @@ class Thread: public ThreadShadow {
void clear_unhandled_oops() {
if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
}
- bool is_gc_locked_out() { return _gc_locked_out_count > 0; }
#endif // CHECK_UNHANDLED_OOPS
#ifndef PRODUCT
diff --git a/src/share/vm/runtime/unhandledOops.cpp b/src/share/vm/runtime/unhandledOops.cpp
index 4cc584e8b..cc0002d42 100644
--- a/src/share/vm/runtime/unhandledOops.cpp
+++ b/src/share/vm/runtime/unhandledOops.cpp
@@ -113,9 +113,7 @@ void UnhandledOops::unregister_unhandled_oop(oop* op) {
void UnhandledOops::clear_unhandled_oops() {
assert (CheckUnhandledOops, "should only be called with checking option");
- if (_thread->is_gc_locked_out()) {
- return;
- }
+
for (int k = 0; k < _oop_list->length(); k++) {
UnhandledOopEntry entry = _oop_list->at(k);
// If an entry is on the unhandled oop list but isn't on the stack
diff --git a/src/share/vm/runtime/vm_operations.hpp b/src/share/vm/runtime/vm_operations.hpp
index 0cb3a18f9..34151e0e4 100644
--- a/src/share/vm/runtime/vm_operations.hpp
+++ b/src/share/vm/runtime/vm_operations.hpp
@@ -95,6 +95,7 @@
template(Exit) \
template(LinuxDllLoad) \
template(RotateGCLog) \
+ template(WhiteBoxOperation) \
class VM_Operation: public CHeapObj<mtInternal> {
public:
diff --git a/src/share/vm/services/memoryPool.cpp b/src/share/vm/services/memoryPool.cpp
index 655ee68e9..5d9c2538b 100644
--- a/src/share/vm/services/memoryPool.cpp
+++ b/src/share/vm/services/memoryPool.cpp
@@ -268,7 +268,7 @@ MemoryUsage MetaspacePool::get_memory_usage() {
}
size_t MetaspacePool::used_in_bytes() {
- return MetaspaceAux::allocated_used_bytes();
+ return MetaspaceAux::used_bytes();
}
size_t MetaspacePool::calculate_max_size() const {
@@ -280,7 +280,7 @@ CompressedKlassSpacePool::CompressedKlassSpacePool() :
MemoryPool("Compressed Class Space", NonHeap, 0, CompressedClassSpaceSize, true, false) { }
size_t CompressedKlassSpacePool::used_in_bytes() {
- return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
+ return MetaspaceAux::used_bytes(Metaspace::ClassType);
}
MemoryUsage CompressedKlassSpacePool::get_memory_usage() {
diff --git a/src/share/vm/trace/trace.xml b/src/share/vm/trace/trace.xml
index cd71b64a2..fc3419836 100644
--- a/src/share/vm/trace/trace.xml
+++ b/src/share/vm/trace/trace.xml
@@ -185,7 +185,7 @@ Declares a structure type that can be used in other events.
</event>
<struct id="MetaspaceSizes">
- <value type="BYTES64" field="capacity" label="Capacity" description="Total available memory to allocate in" />
+ <value type="BYTES64" field="committed" label="Committed" description="Committed memory for this space" />
<value type="BYTES64" field="used" label="Used" description="Bytes allocated by objects in the space" />
<value type="BYTES64" field="reserved" label="Reserved" description="Reserved memory for this space" />
</struct>