aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorroland <none@none>2013-08-21 13:34:45 +0200
committerroland <none@none>2013-08-21 13:34:45 +0200
commitebbc325223a8e617a18338073756d826f460323a (patch)
treebffcaf0b12e17daa365f1814622fa97cbbd1b33c
parented3bcdde138da3d6490864bbae12d2cf9968e6fd (diff)
7199175: JSR 292: C1 needs patching when invokedynamic/invokehandle call site is not linked
Summary: Do patching rather bailing out for unlinked call with appendix Reviewed-by: twisti, kvn
-rw-r--r--src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp5
-rw-r--r--src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp2
-rw-r--r--src/cpu/sparc/vm/c1_Runtime1_sparc.cpp6
-rw-r--r--src/cpu/x86/vm/c1_CodeStubs_x86.cpp3
-rw-r--r--src/cpu/x86/vm/c1_LIRAssembler_x86.cpp2
-rw-r--r--src/cpu/x86/vm/c1_Runtime1_x86.cpp7
-rw-r--r--src/share/vm/c1/c1_CodeStubs.hpp5
-rw-r--r--src/share/vm/c1/c1_GraphBuilder.cpp28
-rw-r--r--src/share/vm/c1/c1_LIR.hpp2
-rw-r--r--src/share/vm/c1/c1_LIRAssembler.cpp11
-rw-r--r--src/share/vm/c1/c1_LIRAssembler.hpp2
-rw-r--r--src/share/vm/c1/c1_Runtime1.cpp129
-rw-r--r--src/share/vm/c1/c1_Runtime1.hpp2
-rw-r--r--src/share/vm/c1/c1_globals.cpp2
-rw-r--r--src/share/vm/c1/c1_globals.hpp10
-rw-r--r--src/share/vm/ci/ciEnv.cpp4
-rw-r--r--src/share/vm/ci/ciEnv.hpp1
-rw-r--r--src/share/vm/ci/ciMethod.hpp4
-rw-r--r--src/share/vm/ci/ciObjectFactory.cpp5
-rw-r--r--src/share/vm/ci/ciObjectFactory.hpp2
-rw-r--r--src/share/vm/runtime/globals.cpp3
-rw-r--r--src/share/vm/runtime/globals_extension.hpp5
-rw-r--r--src/share/vm/runtime/sharedRuntime.cpp9
23 files changed, 185 insertions, 64 deletions
diff --git a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp
index 113665220..c60390553 100644
--- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp
+++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp
@@ -307,7 +307,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
assert(a_byte == *start++, "should be the same code");
}
#endif
- } else if (_id == load_mirror_id) {
+ } else if (_id == load_mirror_id || _id == load_appendix_id) {
// produce a copy of the load mirror instruction for use by the being initialized case
#ifdef ASSERT
address start = __ pc();
@@ -384,6 +384,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
+ case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
default: ShouldNotReachHere();
}
__ bind(call_patch);
@@ -397,7 +398,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
ce->add_call_info_here(_info);
__ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
__ delayed()->nop();
- if (_id == load_klass_id || _id == load_mirror_id) {
+ if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
CodeSection* cs = __ code_section();
address pc = (address)_pc_start;
RelocIterator iter(cs, pc, pc + 1);
diff --git a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
index 647450159..12d51571c 100644
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
@@ -520,7 +520,7 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) {
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
// Allocate a new index in table to hold the object once it's been patched
int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
- PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id, oop_index);
+ PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
diff --git a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
index be4ae63e6..bc6331035 100644
--- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
+++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
@@ -804,6 +804,12 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
break;
+ case load_appendix_patching_id:
+ { __ set_info("load_appendix_patching", dont_gc_arguments);
+ oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
+ }
+ break;
+
case dtrace_object_alloc_id:
{ // O0: object
__ set_info("dtrace_object_alloc", dont_gc_arguments);
diff --git a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
index cef3cdbbe..1e0c3d9c5 100644
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
+++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
@@ -402,6 +402,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
+ case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
default: ShouldNotReachHere();
}
__ bind(call_patch);
@@ -419,7 +420,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
__ nop();
}
- if (_id == load_klass_id || _id == load_mirror_id) {
+ if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
CodeSection* cs = __ code_section();
RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
diff --git a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
index b5bceeb60..334d0cc92 100644
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
@@ -362,7 +362,7 @@ int LIR_Assembler::check_icache() {
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
jobject o = NULL;
- PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id);
+ PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
__ movoop(reg, o);
patching_epilog(patch, lir_patch_normal, reg, info);
}
diff --git a/src/cpu/x86/vm/c1_Runtime1_x86.cpp b/src/cpu/x86/vm/c1_Runtime1_x86.cpp
index ff9c11d86..e4066e6b4 100644
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp
@@ -1499,6 +1499,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
break;
+ case load_appendix_patching_id:
+ { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
+ // we should set up register map
+ oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
+ }
+ break;
+
case dtrace_object_alloc_id:
{ // rax,: object
StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
diff --git a/src/share/vm/c1/c1_CodeStubs.hpp b/src/share/vm/c1/c1_CodeStubs.hpp
index 7235cd6c3..5f4a04c5b 100644
--- a/src/share/vm/c1/c1_CodeStubs.hpp
+++ b/src/share/vm/c1/c1_CodeStubs.hpp
@@ -364,7 +364,8 @@ class PatchingStub: public CodeStub {
enum PatchID {
access_field_id,
load_klass_id,
- load_mirror_id
+ load_mirror_id,
+ load_appendix_id
};
enum constants {
patch_info_size = 3
@@ -417,7 +418,7 @@ class PatchingStub: public CodeStub {
}
NativeMovRegMem* n_move = nativeMovRegMem_at(pc_start());
n_move->set_offset(field_offset);
- } else if (_id == load_klass_id || _id == load_mirror_id) {
+ } else if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
assert(_obj != noreg, "must have register object for load_klass/load_mirror");
#ifdef ASSERT
// verify that we're pointing at a NativeMovConstReg
diff --git a/src/share/vm/c1/c1_GraphBuilder.cpp b/src/share/vm/c1/c1_GraphBuilder.cpp
index b84c8911e..1d0b9243d 100644
--- a/src/share/vm/c1/c1_GraphBuilder.cpp
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp
@@ -1667,9 +1667,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
assert(declared_signature != NULL, "cannot be null");
- // FIXME bail out for now
- if (Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
- BAILOUT("unlinked call site (FIXME needs patching or recompile support)");
+ if (!C1PatchInvokeDynamic && Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
+ BAILOUT("unlinked call site (C1PatchInvokeDynamic is off)");
}
// we have to make sure the argument size (incl. the receiver)
@@ -1713,10 +1712,23 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
break;
}
+ } else {
+ if (bc_raw == Bytecodes::_invokehandle) {
+ assert(!will_link, "should come here only for unlinked call");
+ code = Bytecodes::_invokespecial;
+ }
}
// Push appendix argument (MethodType, CallSite, etc.), if one.
- if (stream()->has_appendix()) {
+ bool patch_for_appendix = false;
+ int patching_appendix_arg = 0;
+ if (C1PatchInvokeDynamic &&
+ (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot))) {
+ Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before()));
+ apush(arg);
+ patch_for_appendix = true;
+ patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1;
+ } else if (stream()->has_appendix()) {
ciObject* appendix = stream()->get_appendix();
Value arg = append(new Constant(new ObjectConstant(appendix)));
apush(arg);
@@ -1732,7 +1744,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
!(// %%% FIXME: Are both of these relevant?
target->is_method_handle_intrinsic() ||
- target->is_compiled_lambda_form())) {
+ target->is_compiled_lambda_form()) &&
+ !patch_for_appendix) {
Value receiver = NULL;
ciInstanceKlass* receiver_klass = NULL;
bool type_is_exact = false;
@@ -1850,7 +1863,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
// check if we could do inlining
if (!PatchALot && Inline && klass->is_loaded() &&
(klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
- && target->is_loaded()) {
+ && target->is_loaded()
+ && !patch_for_appendix) {
// callee is known => check if we have static binding
assert(target->is_loaded(), "callee must be known");
if (code == Bytecodes::_invokestatic ||
@@ -1901,7 +1915,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code == Bytecodes::_invokespecial ||
code == Bytecodes::_invokevirtual ||
code == Bytecodes::_invokeinterface;
- Values* args = state()->pop_arguments(target->arg_size_no_receiver());
+ Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg);
Value recv = has_receiver ? apop() : NULL;
int vtable_index = Method::invalid_vtable_index;
diff --git a/src/share/vm/c1/c1_LIR.hpp b/src/share/vm/c1/c1_LIR.hpp
index fab85e575..d0dca72f3 100644
--- a/src/share/vm/c1/c1_LIR.hpp
+++ b/src/share/vm/c1/c1_LIR.hpp
@@ -1211,8 +1211,6 @@ class LIR_OpJavaCall: public LIR_OpCall {
bool is_invokedynamic() const { return code() == lir_dynamic_call; }
bool is_method_handle_invoke() const {
return
- is_invokedynamic() // An invokedynamic is always a MethodHandle call site.
- ||
method()->is_compiled_lambda_form() // Java-generated adapter
||
method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic
diff --git a/src/share/vm/c1/c1_LIRAssembler.cpp b/src/share/vm/c1/c1_LIRAssembler.cpp
index a76f5bb2e..9ae527054 100644
--- a/src/share/vm/c1/c1_LIRAssembler.cpp
+++ b/src/share/vm/c1/c1_LIRAssembler.cpp
@@ -93,12 +93,23 @@ void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_cod
default:
ShouldNotReachHere();
}
+ } else if (patch->id() == PatchingStub::load_appendix_id) {
+ Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
+ assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
} else {
ShouldNotReachHere();
}
#endif
}
+PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
+ IRScope* scope = info->scope();
+ Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
+ if (Bytecodes::has_optional_appendix(bc_raw)) {
+ return PatchingStub::load_appendix_id;
+ }
+ return PatchingStub::load_mirror_id;
+}
//---------------------------------------------------------------
diff --git a/src/share/vm/c1/c1_LIRAssembler.hpp b/src/share/vm/c1/c1_LIRAssembler.hpp
index 4ced297c0..57df2725e 100644
--- a/src/share/vm/c1/c1_LIRAssembler.hpp
+++ b/src/share/vm/c1/c1_LIRAssembler.hpp
@@ -119,6 +119,8 @@ class LIR_Assembler: public CompilationResourceObj {
void comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op);
+ PatchingStub::PatchID patching_id(CodeEmitInfo* info);
+
public:
LIR_Assembler(Compilation* c);
~LIR_Assembler();
diff --git a/src/share/vm/c1/c1_Runtime1.cpp b/src/share/vm/c1/c1_Runtime1.cpp
index 9a1c4cce2..037bc31f6 100644
--- a/src/share/vm/c1/c1_Runtime1.cpp
+++ b/src/share/vm/c1/c1_Runtime1.cpp
@@ -819,6 +819,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code
+ Handle appendix(THREAD, NULL); // oop needed by appendix_patching code
bool load_klass_or_mirror_patch_id =
(stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
@@ -888,10 +889,32 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
mirror = Handle(THREAD, m);
}
break;
- default: Unimplemented();
+ default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
}
// convert to handle
load_klass = KlassHandle(THREAD, k);
+ } else if (stub_id == load_appendix_patching_id) {
+ Bytecode_invoke bytecode(caller_method, bci);
+ Bytecodes::Code bc = bytecode.invoke_code();
+
+ CallInfo info;
+ constantPoolHandle pool(thread, caller_method->constants());
+ int index = bytecode.index();
+ LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
+ appendix = info.resolved_appendix();
+ switch (bc) {
+ case Bytecodes::_invokehandle: {
+ int cache_index = ConstantPool::decode_cpcache_index(index, true);
+ assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
+ pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
+ break;
+ }
+ case Bytecodes::_invokedynamic: {
+ pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
+ break;
+ }
+ default: fatal("unexpected bytecode for load_appendix_patching_id");
+ }
} else {
ShouldNotReachHere();
}
@@ -992,8 +1015,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
n_copy->data() == (intptr_t)Universe::non_oop_word(),
"illegal init value");
if (stub_id == Runtime1::load_klass_patching_id) {
- assert(load_klass() != NULL, "klass not set");
- n_copy->set_data((intx) (load_klass()));
+ assert(load_klass() != NULL, "klass not set");
+ n_copy->set_data((intx) (load_klass()));
} else {
assert(mirror() != NULL, "klass not set");
n_copy->set_data((intx) (mirror()));
@@ -1002,43 +1025,55 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
if (TracePatching) {
Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
}
-
-#if defined(SPARC) || defined(PPC)
- // Update the location in the nmethod with the proper
- // metadata. When the code was generated, a NULL was stuffed
- // in the metadata table and that table needs to be update to
- // have the right value. On intel the value is kept
- // directly in the instruction instead of in the metadata
- // table, so set_data above effectively updated the value.
- nmethod* nm = CodeCache::find_nmethod(instr_pc);
- assert(nm != NULL, "invalid nmethod_pc");
- RelocIterator mds(nm, copy_buff, copy_buff + 1);
- bool found = false;
- while (mds.next() && !found) {
- if (mds.type() == relocInfo::oop_type) {
- assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
- oop_Relocation* r = mds.oop_reloc();
- oop* oop_adr = r->oop_addr();
- *oop_adr = mirror();
- r->fix_oop_relocation();
- found = true;
- } else if (mds.type() == relocInfo::metadata_type) {
- assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
- metadata_Relocation* r = mds.metadata_reloc();
- Metadata** metadata_adr = r->metadata_addr();
- *metadata_adr = load_klass();
- r->fix_metadata_relocation();
- found = true;
- }
- }
- assert(found, "the metadata must exist!");
-#endif
-
+ }
+ } else if (stub_id == Runtime1::load_appendix_patching_id) {
+ NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
+ assert(n_copy->data() == 0 ||
+ n_copy->data() == (intptr_t)Universe::non_oop_word(),
+ "illegal init value");
+ n_copy->set_data((intx) (appendix()));
+
+ if (TracePatching) {
+ Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
}
} else {
ShouldNotReachHere();
}
+#if defined(SPARC) || defined(PPC)
+ if (load_klass_or_mirror_patch_id ||
+ stub_id == Runtime1::load_appendix_patching_id) {
+ // Update the location in the nmethod with the proper
+ // metadata. When the code was generated, a NULL was stuffed
+ // in the metadata table and that table needs to be update to
+ // have the right value. On intel the value is kept
+ // directly in the instruction instead of in the metadata
+ // table, so set_data above effectively updated the value.
+ nmethod* nm = CodeCache::find_nmethod(instr_pc);
+ assert(nm != NULL, "invalid nmethod_pc");
+ RelocIterator mds(nm, copy_buff, copy_buff + 1);
+ bool found = false;
+ while (mds.next() && !found) {
+ if (mds.type() == relocInfo::oop_type) {
+ assert(stub_id == Runtime1::load_mirror_patching_id ||
+ stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
+ oop_Relocation* r = mds.oop_reloc();
+ oop* oop_adr = r->oop_addr();
+ *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
+ r->fix_oop_relocation();
+ found = true;
+ } else if (mds.type() == relocInfo::metadata_type) {
+ assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
+ metadata_Relocation* r = mds.metadata_reloc();
+ Metadata** metadata_adr = r->metadata_addr();
+ *metadata_adr = load_klass();
+ r->fix_metadata_relocation();
+ found = true;
+ }
+ }
+ assert(found, "the metadata must exist!");
+ }
+#endif
if (do_patch) {
// replace instructions
// first replace the tail, then the call
@@ -1077,7 +1112,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
ICache::invalidate_range(instr_pc, *byte_count);
NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
- if (load_klass_or_mirror_patch_id) {
+ if (load_klass_or_mirror_patch_id ||
+ stub_id == Runtime1::load_appendix_patching_id) {
relocInfo::relocType rtype =
(stub_id == Runtime1::load_klass_patching_id) ?
relocInfo::metadata_type :
@@ -1118,7 +1154,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
// If we are patching in a non-perm oop, make sure the nmethod
// is on the right list.
- if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
+ if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
+ (appendix.not_null() && appendix->is_scavengable()))) {
MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
guarantee(nm != NULL, "only nmethods can contain non-perm oops");
@@ -1179,6 +1216,24 @@ int Runtime1::move_mirror_patching(JavaThread* thread) {
return caller_is_deopted();
}
+int Runtime1::move_appendix_patching(JavaThread* thread) {
+//
+// NOTE: we are still in Java
+//
+ Thread* THREAD = thread;
+ debug_only(NoHandleMark nhm;)
+ {
+ // Enter VM mode
+
+ ResetNoHandleMark rnhm;
+ patch_code(thread, load_appendix_patching_id);
+ }
+ // Back in JAVA, use no oops DON'T safepoint
+
+ // Return true if calling code is deoptimized
+
+ return caller_is_deopted();
+}
//
// Entry point for compiled code. We want to patch a nmethod.
// We don't do a normal VM transition here because we want to
diff --git a/src/share/vm/c1/c1_Runtime1.hpp b/src/share/vm/c1/c1_Runtime1.hpp
index 9b12d2622..e41f2f188 100644
--- a/src/share/vm/c1/c1_Runtime1.hpp
+++ b/src/share/vm/c1/c1_Runtime1.hpp
@@ -67,6 +67,7 @@ class StubAssembler;
stub(access_field_patching) \
stub(load_klass_patching) \
stub(load_mirror_patching) \
+ stub(load_appendix_patching) \
stub(g1_pre_barrier_slow) \
stub(g1_post_barrier_slow) \
stub(fpu2long_stub) \
@@ -160,6 +161,7 @@ class Runtime1: public AllStatic {
static int access_field_patching(JavaThread* thread);
static int move_klass_patching(JavaThread* thread);
static int move_mirror_patching(JavaThread* thread);
+ static int move_appendix_patching(JavaThread* thread);
static void patch_code(JavaThread* thread, StubID stub_id);
diff --git a/src/share/vm/c1/c1_globals.cpp b/src/share/vm/c1/c1_globals.cpp
index a611f033e..553b9aa43 100644
--- a/src/share/vm/c1/c1_globals.cpp
+++ b/src/share/vm/c1/c1_globals.cpp
@@ -25,4 +25,4 @@
#include "precompiled.hpp"
#include "c1/c1_globals.hpp"
-C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
+C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
diff --git a/src/share/vm/c1/c1_globals.hpp b/src/share/vm/c1/c1_globals.hpp
index 844880be2..3dceebc9d 100644
--- a/src/share/vm/c1/c1_globals.hpp
+++ b/src/share/vm/c1/c1_globals.hpp
@@ -54,7 +54,7 @@
//
// Defines all global flags used by the client compiler.
//
-#define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct) \
+#define C1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
\
/* Printing */ \
notproduct(bool, PrintC1Statistics, false, \
@@ -333,15 +333,19 @@
"Use CHA and exact type results at call sites when updating MDOs")\
\
product(bool, C1UpdateMethodData, trueInTiered, \
- "Update MethodData*s in Tier1-generated code") \
+ "Update MethodData*s in Tier1-generated code") \
\
develop(bool, PrintCFGToFile, false, \
"print control flow graph to a separate file during compilation") \
\
+ diagnostic(bool, C1PatchInvokeDynamic, true, \
+ "Patch invokedynamic appendix not known at compile time") \
+ \
+ \
// Read default values for c1 globals
-C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_NOTPRODUCT_FLAG)
+C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
#endif // SHARE_VM_C1_C1_GLOBALS_HPP
diff --git a/src/share/vm/ci/ciEnv.cpp b/src/share/vm/ci/ciEnv.cpp
index 7776db5eb..0102b2b21 100644
--- a/src/share/vm/ci/ciEnv.cpp
+++ b/src/share/vm/ci/ciEnv.cpp
@@ -1150,6 +1150,10 @@ void ciEnv::record_out_of_memory_failure() {
record_method_not_compilable("out of memory");
}
+ciInstance* ciEnv::unloaded_ciinstance() {
+ GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
+}
+
void ciEnv::dump_replay_data(outputStream* out) {
VM_ENTRY_MARK;
MutexLocker ml(Compile_lock);
diff --git a/src/share/vm/ci/ciEnv.hpp b/src/share/vm/ci/ciEnv.hpp
index 45dd42eb2..01f417d2f 100644
--- a/src/share/vm/ci/ciEnv.hpp
+++ b/src/share/vm/ci/ciEnv.hpp
@@ -400,6 +400,7 @@ public:
static ciInstanceKlass* unloaded_ciinstance_klass() {
return _unloaded_ciinstance_klass;
}
+ ciInstance* unloaded_ciinstance();
ciKlass* find_system_klass(ciSymbol* klass_name);
// Note: To find a class from its name string, use ciSymbol::make,
diff --git a/src/share/vm/ci/ciMethod.hpp b/src/share/vm/ci/ciMethod.hpp
index 8305547c5..ddff0ac9b 100644
--- a/src/share/vm/ci/ciMethod.hpp
+++ b/src/share/vm/ci/ciMethod.hpp
@@ -177,6 +177,10 @@ class ciMethod : public ciMetadata {
address bcp = code() + bci;
return Bytecodes::java_code_at(NULL, bcp);
}
+ Bytecodes::Code raw_code_at_bci(int bci) {
+ address bcp = code() + bci;
+ return Bytecodes::code_at(NULL, bcp);
+ }
BCEscapeAnalyzer *get_bcea();
ciMethodBlocks *get_method_blocks();
diff --git a/src/share/vm/ci/ciObjectFactory.cpp b/src/share/vm/ci/ciObjectFactory.cpp
index cc1be034d..a22fcf62c 100644
--- a/src/share/vm/ci/ciObjectFactory.cpp
+++ b/src/share/vm/ci/ciObjectFactory.cpp
@@ -563,7 +563,10 @@ ciInstance* ciObjectFactory::get_unloaded_method_type_constant(ciSymbol* signatu
return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass());
}
-
+ciInstance* ciObjectFactory::get_unloaded_object_constant() {
+ if (ciEnv::_Object_klass == NULL) return NULL;
+ return get_unloaded_instance(ciEnv::_Object_klass->as_instance_klass());
+}
//------------------------------------------------------------------
// ciObjectFactory::get_empty_methodData
diff --git a/src/share/vm/ci/ciObjectFactory.hpp b/src/share/vm/ci/ciObjectFactory.hpp
index 29de514b2..ba3d88c12 100644
--- a/src/share/vm/ci/ciObjectFactory.hpp
+++ b/src/share/vm/ci/ciObjectFactory.hpp
@@ -131,6 +131,8 @@ public:
ciInstance* get_unloaded_method_type_constant(ciSymbol* signature);
+ ciInstance* get_unloaded_object_constant();
+
// Get the ciMethodData representing the methodData for a method
// with none.
ciMethodData* get_empty_methodData();
diff --git a/src/share/vm/runtime/globals.cpp b/src/share/vm/runtime/globals.cpp
index a6c47bfe1..280e15976 100644
--- a/src/share/vm/runtime/globals.cpp
+++ b/src/share/vm/runtime/globals.cpp
@@ -205,6 +205,7 @@ void Flag::print_as_flag(outputStream* st) {
#define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 product}", DEFAULT },
#define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 pd product}", DEFAULT },
+#define C1_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 diagnostic}", DEFAULT },
#ifdef PRODUCT
#define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
#define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc) /* flag is constant */
@@ -260,7 +261,7 @@ static Flag flagTable[] = {
G1_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT)
#endif // INCLUDE_ALL_GCS
#ifdef COMPILER1
- C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
+ C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_DIAGNOSTIC_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
#endif
#ifdef COMPILER2
C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
diff --git a/src/share/vm/runtime/globals_extension.hpp b/src/share/vm/runtime/globals_extension.hpp
index 00d06fe27..bc4fd4a74 100644
--- a/src/share/vm/runtime/globals_extension.hpp
+++ b/src/share/vm/runtime/globals_extension.hpp
@@ -57,6 +57,7 @@
#define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
#define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
+#define C1_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
#ifdef PRODUCT
#define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
#define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc) /* flag is constant */
@@ -99,7 +100,7 @@ typedef enum {
G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER)
#endif // INCLUDE_ALL_GCS
#ifdef COMPILER1
- C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
+ C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_DIAGNOSTIC_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
#endif
#ifdef COMPILER2
C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
@@ -131,6 +132,7 @@ typedef enum {
#define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
#define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#define C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
#ifdef PRODUCT
#define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
#define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) /* flag is constant */
@@ -204,6 +206,7 @@ typedef enum {
C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
C1_PRODUCT_FLAG_MEMBER_WITH_TYPE,
C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+ C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
#endif
#ifdef COMPILER2
diff --git a/src/share/vm/runtime/sharedRuntime.cpp b/src/share/vm/runtime/sharedRuntime.cpp
index 874f59391..d014eda2f 100644
--- a/src/share/vm/runtime/sharedRuntime.cpp
+++ b/src/share/vm/runtime/sharedRuntime.cpp
@@ -1051,7 +1051,8 @@ Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
// Find receiver for non-static call
if (bc != Bytecodes::_invokestatic &&
- bc != Bytecodes::_invokedynamic) {
+ bc != Bytecodes::_invokedynamic &&
+ bc != Bytecodes::_invokehandle) {
// This register map must be update since we need to find the receiver for
// compiled frames. The receiver might be in a register.
RegisterMap reg_map2(thread);
@@ -1078,7 +1079,7 @@ Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
#ifdef ASSERT
// Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
- if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) {
+ if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic && bc != Bytecodes::_invokehandle) {
assert(receiver.not_null(), "should have thrown exception");
KlassHandle receiver_klass(THREAD, receiver->klass());
Klass* rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
@@ -1240,9 +1241,9 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
#endif
if (is_virtual) {
- assert(receiver.not_null(), "sanity check");
+ assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
bool static_bound = call_info.resolved_method()->can_be_statically_bound();
- KlassHandle h_klass(THREAD, receiver->klass());
+ KlassHandle h_klass(THREAD, invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass());
CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
is_optimized, static_bound, virtual_call_info,
CHECK_(methodHandle()));