aboutsummaryrefslogtreecommitdiff
path: root/src/cpu/x86
diff options
context:
space:
mode:
authornever <none@none>2011-07-15 15:35:50 -0700
committernever <none@none>2011-07-15 15:35:50 -0700
commitb52cc7882c3c6d789b246850db890f6e05597268 (patch)
tree33b4e071694b05d98c3c98c8d407ff46df72a4b1 /src/cpu/x86
parentfe59fe4d17f67abbead69492454cdeb08db0cc58 (diff)
6990212: JSR 292 JVMTI MethodEnter hook is not called for JSR 292 bootstrap and target methods
Summary: check for single stepping when dispatching invokes from method handles Reviewed-by: coleenp, twisti, kvn, dsamersoff
Diffstat (limited to 'src/cpu/x86')
-rw-r--r--src/cpu/x86/vm/interp_masm_x86_32.cpp4
-rw-r--r--src/cpu/x86/vm/interp_masm_x86_64.cpp2
-rw-r--r--src/cpu/x86/vm/methodHandles_x86.cpp49
-rw-r--r--src/cpu/x86/vm/methodHandles_x86.hpp4
4 files changed, 43 insertions, 16 deletions
diff --git a/src/cpu/x86/vm/interp_masm_x86_32.cpp b/src/cpu/x86/vm/interp_masm_x86_32.cpp
index ce3571338..a97d91312 100644
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp
+++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp
@@ -403,9 +403,9 @@ void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register
// interp_only_mode if these events CAN be enabled.
get_thread(temp);
// interp_only is an int, on little endian it is sufficient to test the byte only
- // Is a cmpl faster (ce
+ // Is a cmpl faster?
cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
- jcc(Assembler::zero, run_compiled_code);
+ jccb(Assembler::zero, run_compiled_code);
jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
bind(run_compiled_code);
}
diff --git a/src/cpu/x86/vm/interp_masm_x86_64.cpp b/src/cpu/x86/vm/interp_masm_x86_64.cpp
index 4b7884cec..cee2705bb 100644
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp
+++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp
@@ -402,7 +402,7 @@ void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register
// interp_only is an int, on little endian it is sufficient to test the byte only
// Is a cmpl faster?
cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0);
- jcc(Assembler::zero, run_compiled_code);
+ jccb(Assembler::zero, run_compiled_code);
jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
bind(run_compiled_code);
}
diff --git a/src/cpu/x86/vm/methodHandles_x86.cpp b/src/cpu/x86/vm/methodHandles_x86.cpp
index 4df24afb1..30b62c9c5 100644
--- a/src/cpu/x86/vm/methodHandles_x86.cpp
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp
@@ -546,6 +546,28 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
}
#endif //ASSERT
+void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp) {
+ if (JvmtiExport::can_post_interpreter_events()) {
+ Label run_compiled_code;
+ // JVMTI events, such as single-stepping, are implemented partly by avoiding running
+ // compiled code in threads for which the event is enabled. Check here for
+ // interp_only_mode if these events CAN be enabled.
+#ifdef _LP64
+ Register rthread = r15_thread;
+#else
+ Register rthread = temp;
+ __ get_thread(rthread);
+#endif
+ // interp_only is an int, on little endian it is sufficient to test the byte only
+ // Is a cmpl faster?
+ __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0);
+ __ jccb(Assembler::zero, run_compiled_code);
+ __ jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
+ __ bind(run_compiled_code);
+ }
+ __ jmp(Address(method, methodOopDesc::from_interpreted_offset()));
+}
+
// Code generation
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
// rbx: methodOop
@@ -1120,9 +1142,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
// some handy addresses
- Address rbx_method_fie( rbx, methodOopDesc::from_interpreted_offset() );
- Address rbx_method_fce( rbx, methodOopDesc::from_compiled_offset() );
-
Address rcx_mh_vmtarget( rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() );
Address rcx_dmh_vmindex( rcx_recv, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes() );
@@ -1163,8 +1182,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
assert(raise_exception_method(), "must be set");
assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
- const Register rdi_pc = rax;
- __ pop(rdi_pc); // caller PC
+ const Register rax_pc = rax;
+ __ pop(rax_pc); // caller PC
__ mov(rsp, saved_last_sp); // cut the stack back to where the caller started
Register rbx_method = rbx_temp;
@@ -1172,11 +1191,15 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
const int jobject_oop_offset = 0;
__ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject
- __ verify_oop(rbx_method);
- NOT_LP64(__ push(rarg2_required));
- __ push(rdi_pc); // restore caller PC
- __ jmp(rbx_method_fce); // jump to compiled entry
+ __ movptr(rsi, rsp);
+ __ subptr(rsp, 3 * wordSize);
+ __ push(rax_pc); // restore caller PC
+
+ __ movptr(__ argument_address(constant(2)), rarg0_code);
+ __ movptr(__ argument_address(constant(1)), rarg1_actual);
+ __ movptr(__ argument_address(constant(0)), rarg2_required);
+ jump_from_method_handle(_masm, rbx_method, rax);
}
break;
@@ -1195,7 +1218,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ null_check(rcx_recv);
__ verify_oop(rcx_recv);
}
- __ jmp(rbx_method_fie);
+ jump_from_method_handle(_masm, rbx_method, rax);
}
break;
@@ -1228,7 +1251,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ movptr(rbx_method, vtable_entry_addr);
__ verify_oop(rbx_method);
- __ jmp(rbx_method_fie);
+ jump_from_method_handle(_masm, rbx_method, rax);
}
break;
@@ -1263,7 +1286,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
no_such_interface);
__ verify_oop(rbx_method);
- __ jmp(rbx_method_fie);
+ jump_from_method_handle(_masm, rbx_method, rax);
__ hlt();
__ bind(no_such_interface);
@@ -1311,7 +1334,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Register rbx_method = rbx_temp;
__ load_heap_oop(rbx_method, rcx_mh_vmtarget);
__ verify_oop(rbx_method);
- __ jmp(rbx_method_fie);
+ jump_from_method_handle(_masm, rbx_method, rax);
} else {
__ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
__ verify_oop(rcx_recv);
diff --git a/src/cpu/x86/vm/methodHandles_x86.hpp b/src/cpu/x86/vm/methodHandles_x86.hpp
index 6361e20fa..c4ed0b706 100644
--- a/src/cpu/x86/vm/methodHandles_x86.hpp
+++ b/src/cpu/x86/vm/methodHandles_x86.hpp
@@ -291,6 +291,10 @@ public:
"reference is a MH");
}
+ // Similar to InterpreterMacroAssembler::jump_from_interpreted.
+ // Takes care of special dispatch from single stepping too.
+ static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp);
+
static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
static Register saved_last_sp_register() {