aboutsummaryrefslogtreecommitdiff
path: root/src/cpu
diff options
context:
space:
mode:
authornever <none@none>2011-07-15 15:35:50 -0700
committernever <none@none>2011-07-15 15:35:50 -0700
commitb52cc7882c3c6d789b246850db890f6e05597268 (patch)
tree33b4e071694b05d98c3c98c8d407ff46df72a4b1 /src/cpu
parentfe59fe4d17f67abbead69492454cdeb08db0cc58 (diff)
6990212: JSR 292 JVMTI MethodEnter hook is not called for JSR 292 bootstrap and target methods
Summary: check for single stepping when dispatching invokes from method handles Reviewed-by: coleenp, twisti, kvn, dsamersoff
Diffstat (limited to 'src/cpu')
-rw-r--r--src/cpu/sparc/vm/methodHandles_sparc.cpp60
-rw-r--r--src/cpu/sparc/vm/methodHandles_sparc.hpp4
-rw-r--r--src/cpu/x86/vm/interp_masm_x86_32.cpp4
-rw-r--r--src/cpu/x86/vm/interp_masm_x86_64.cpp2
-rw-r--r--src/cpu/x86/vm/methodHandles_x86.cpp49
-rw-r--r--src/cpu/x86/vm/methodHandles_x86.hpp4
6 files changed, 83 insertions, 40 deletions
diff --git a/src/cpu/sparc/vm/methodHandles_sparc.cpp b/src/cpu/sparc/vm/methodHandles_sparc.cpp
index 4a383ae56..3df5a8eae 100644
--- a/src/cpu/sparc/vm/methodHandles_sparc.cpp
+++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp
@@ -524,6 +524,30 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
}
#endif // ASSERT
+
+void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp) {
+ assert(method == G5_method, "interpreter calling convention");
+ __ verify_oop(method);
+ __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target);
+ if (JvmtiExport::can_post_interpreter_events()) {
+ // JVMTI events, such as single-stepping, are implemented partly by avoiding running
+ // compiled code in threads for which the event is enabled. Check here for
+ // interp_only_mode if these events CAN be enabled.
+ __ verify_thread();
+ Label skip_compiled_code;
+
+ const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
+ __ ld(interp_only, temp);
+ __ tst(temp);
+ __ br(Assembler::notZero, true, Assembler::pn, skip_compiled_code);
+ __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
+ __ bind(skip_compiled_code);
+ }
+ __ jmp(target, 0);
+ __ delayed()->nop();
+}
+
+
// Code generation
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
// I5_savedSP/O5_savedSP: sender SP (must preserve)
@@ -1105,9 +1129,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
// Some handy addresses:
- Address G5_method_fie( G5_method, in_bytes(methodOopDesc::from_interpreted_offset()));
- Address G5_method_fce( G5_method, in_bytes(methodOopDesc::from_compiled_offset()));
-
Address G3_mh_vmtarget( G3_method_handle, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes());
Address G3_dmh_vmindex( G3_method_handle, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes());
@@ -1136,24 +1157,23 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
case _raise_exception:
{
// Not a real MH entry, but rather shared code for raising an
- // exception. Since we use the compiled entry, arguments are
- // expected in compiler argument registers.
+ // exception. For sharing purposes the arguments are passed into registers
+ // and then placed in the intepreter calling convention here.
assert(raise_exception_method(), "must be set");
assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
- __ mov(O5_savedSP, SP); // Cut the stack back to where the caller started.
-
- Label L_no_method;
- // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method
__ set(AddressLiteral((address) &_raise_exception_method), G5_method);
__ ld_ptr(Address(G5_method, 0), G5_method);
const int jobject_oop_offset = 0;
__ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
- __ verify_oop(G5_method);
- __ jump_indirect_to(G5_method_fce, O3_scratch); // jump to compiled entry
- __ delayed()->nop();
+ adjust_SP_and_Gargs_down_by_slots(_masm, 3, noreg, noreg);
+
+ __ st_ptr(O0_code, __ argument_address(constant(2), noreg, 0));
+ __ st_ptr(O1_actual, __ argument_address(constant(1), noreg, 0));
+ __ st_ptr(O2_required, __ argument_address(constant(0), noreg, 0));
+ jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
}
break;
@@ -1161,7 +1181,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
case _invokespecial_mh:
{
__ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop
- __ verify_oop(G5_method);
// Same as TemplateTable::invokestatic or invokespecial,
// minus the CP setup and profiling:
if (ek == _invokespecial_mh) {
@@ -1171,8 +1190,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ null_check(G3_method_handle);
__ verify_oop(G3_method_handle);
}
- __ jump_indirect_to(G5_method_fie, O1_scratch);
- __ delayed()->nop();
+ jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
}
break;
@@ -1204,9 +1222,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
__ ld_ptr(vtable_entry_addr, G5_method);
- __ verify_oop(G5_method);
- __ jump_indirect_to(G5_method_fie, O1_scratch);
- __ delayed()->nop();
+ jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
}
break;
@@ -1237,9 +1253,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
O3_scratch,
no_such_interface);
- __ verify_oop(G5_method);
- __ jump_indirect_to(G5_method_fie, O1_scratch);
- __ delayed()->nop();
+ jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
__ bind(no_such_interface);
// Throw an exception.
@@ -1283,9 +1297,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
if (direct_to_method) {
__ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop
- __ verify_oop(G5_method);
- __ jump_indirect_to(G5_method_fie, O1_scratch);
- __ delayed()->nop();
+ jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
} else {
__ load_heap_oop(G3_mh_vmtarget, G3_method_handle); // target is a methodOop
__ verify_oop(G3_method_handle);
diff --git a/src/cpu/sparc/vm/methodHandles_sparc.hpp b/src/cpu/sparc/vm/methodHandles_sparc.hpp
index 4d2588b48..4a0bca804 100644
--- a/src/cpu/sparc/vm/methodHandles_sparc.hpp
+++ b/src/cpu/sparc/vm/methodHandles_sparc.hpp
@@ -221,4 +221,8 @@ public:
"reference is a MH");
}
+ // Similar to InterpreterMacroAssembler::jump_from_interpreted.
+ // Takes care of special dispatch from single stepping too.
+ static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, Register temp2);
+
static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
diff --git a/src/cpu/x86/vm/interp_masm_x86_32.cpp b/src/cpu/x86/vm/interp_masm_x86_32.cpp
index ce3571338..a97d91312 100644
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp
+++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp
@@ -403,9 +403,9 @@ void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register
// interp_only_mode if these events CAN be enabled.
get_thread(temp);
// interp_only is an int, on little endian it is sufficient to test the byte only
- // Is a cmpl faster (ce
+ // Is a cmpl faster?
cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
- jcc(Assembler::zero, run_compiled_code);
+ jccb(Assembler::zero, run_compiled_code);
jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
bind(run_compiled_code);
}
diff --git a/src/cpu/x86/vm/interp_masm_x86_64.cpp b/src/cpu/x86/vm/interp_masm_x86_64.cpp
index 4b7884cec..cee2705bb 100644
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp
+++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp
@@ -402,7 +402,7 @@ void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register
// interp_only is an int, on little endian it is sufficient to test the byte only
// Is a cmpl faster?
cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0);
- jcc(Assembler::zero, run_compiled_code);
+ jccb(Assembler::zero, run_compiled_code);
jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
bind(run_compiled_code);
}
diff --git a/src/cpu/x86/vm/methodHandles_x86.cpp b/src/cpu/x86/vm/methodHandles_x86.cpp
index 4df24afb1..30b62c9c5 100644
--- a/src/cpu/x86/vm/methodHandles_x86.cpp
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp
@@ -546,6 +546,28 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
}
#endif //ASSERT
+void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp) {
+ if (JvmtiExport::can_post_interpreter_events()) {
+ Label run_compiled_code;
+ // JVMTI events, such as single-stepping, are implemented partly by avoiding running
+ // compiled code in threads for which the event is enabled. Check here for
+ // interp_only_mode if these events CAN be enabled.
+#ifdef _LP64
+ Register rthread = r15_thread;
+#else
+ Register rthread = temp;
+ __ get_thread(rthread);
+#endif
+ // interp_only is an int, on little endian it is sufficient to test the byte only
+ // Is a cmpl faster?
+ __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0);
+ __ jccb(Assembler::zero, run_compiled_code);
+ __ jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
+ __ bind(run_compiled_code);
+ }
+ __ jmp(Address(method, methodOopDesc::from_interpreted_offset()));
+}
+
// Code generation
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
// rbx: methodOop
@@ -1120,9 +1142,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
// some handy addresses
- Address rbx_method_fie( rbx, methodOopDesc::from_interpreted_offset() );
- Address rbx_method_fce( rbx, methodOopDesc::from_compiled_offset() );
-
Address rcx_mh_vmtarget( rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() );
Address rcx_dmh_vmindex( rcx_recv, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes() );
@@ -1163,8 +1182,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
assert(raise_exception_method(), "must be set");
assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
- const Register rdi_pc = rax;
- __ pop(rdi_pc); // caller PC
+ const Register rax_pc = rax;
+ __ pop(rax_pc); // caller PC
__ mov(rsp, saved_last_sp); // cut the stack back to where the caller started
Register rbx_method = rbx_temp;
@@ -1172,11 +1191,15 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
const int jobject_oop_offset = 0;
__ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject
- __ verify_oop(rbx_method);
- NOT_LP64(__ push(rarg2_required));
- __ push(rdi_pc); // restore caller PC
- __ jmp(rbx_method_fce); // jump to compiled entry
+ __ movptr(rsi, rsp);
+ __ subptr(rsp, 3 * wordSize);
+ __ push(rax_pc); // restore caller PC
+
+ __ movptr(__ argument_address(constant(2)), rarg0_code);
+ __ movptr(__ argument_address(constant(1)), rarg1_actual);
+ __ movptr(__ argument_address(constant(0)), rarg2_required);
+ jump_from_method_handle(_masm, rbx_method, rax);
}
break;
@@ -1195,7 +1218,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ null_check(rcx_recv);
__ verify_oop(rcx_recv);
}
- __ jmp(rbx_method_fie);
+ jump_from_method_handle(_masm, rbx_method, rax);
}
break;
@@ -1228,7 +1251,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ movptr(rbx_method, vtable_entry_addr);
__ verify_oop(rbx_method);
- __ jmp(rbx_method_fie);
+ jump_from_method_handle(_masm, rbx_method, rax);
}
break;
@@ -1263,7 +1286,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
no_such_interface);
__ verify_oop(rbx_method);
- __ jmp(rbx_method_fie);
+ jump_from_method_handle(_masm, rbx_method, rax);
__ hlt();
__ bind(no_such_interface);
@@ -1311,7 +1334,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Register rbx_method = rbx_temp;
__ load_heap_oop(rbx_method, rcx_mh_vmtarget);
__ verify_oop(rbx_method);
- __ jmp(rbx_method_fie);
+ jump_from_method_handle(_masm, rbx_method, rax);
} else {
__ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
__ verify_oop(rcx_recv);
diff --git a/src/cpu/x86/vm/methodHandles_x86.hpp b/src/cpu/x86/vm/methodHandles_x86.hpp
index 6361e20fa..c4ed0b706 100644
--- a/src/cpu/x86/vm/methodHandles_x86.hpp
+++ b/src/cpu/x86/vm/methodHandles_x86.hpp
@@ -291,6 +291,10 @@ public:
"reference is a MH");
}
+ // Similar to InterpreterMacroAssembler::jump_from_interpreted.
+ // Takes care of special dispatch from single stepping too.
+ static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp);
+
static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
static Register saved_last_sp_register() {