aboutsummaryrefslogtreecommitdiff
path: root/src/cpu/x86/vm
diff options
context:
space:
mode:
authorcoleenp <none@none>2012-09-01 13:25:18 -0400
committercoleenp <none@none>2012-09-01 13:25:18 -0400
commitc4f2a125e3006c1715c12ec6ec682389b24d7cac (patch)
tree00340ce50c7adf1b8db7e987946c2c9d747f37a3 /src/cpu/x86/vm
parentab8cad1d47e8e46e6e633ee5eb4cb0cdb7a0e7bb (diff)
6964458: Reimplement class meta-data storage to use native memory
Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com> --HG-- rename : src/cpu/sparc/vm/dump_sparc.cpp => src/cpu/sparc/vm/metaspaceShared_sparc.cpp rename : src/cpu/x86/vm/dump_x86_32.cpp => src/cpu/x86/vm/metaspaceShared_x86_32.cpp rename : src/cpu/x86/vm/dump_x86_64.cpp => src/cpu/x86/vm/metaspaceShared_x86_64.cpp rename : src/cpu/zero/vm/dump_zero.cpp => src/cpu/zero/vm/metaspaceShared_zero.cpp rename : src/share/vm/oops/compiledICHolderOop.cpp => src/share/vm/oops/compiledICHolder.cpp rename : src/share/vm/oops/compiledICHolderOop.hpp => src/share/vm/oops/compiledICHolder.hpp rename : src/share/vm/oops/constMethodOop.cpp => src/share/vm/oops/constMethod.cpp rename : src/share/vm/oops/constMethodOop.hpp => src/share/vm/oops/constMethod.hpp rename : src/share/vm/oops/constantPoolOop.cpp => src/share/vm/oops/constantPool.cpp rename : src/share/vm/oops/constantPoolOop.hpp => src/share/vm/oops/constantPool.hpp rename : src/share/vm/oops/cpCacheOop.cpp => src/share/vm/oops/cpCache.cpp rename : src/share/vm/oops/cpCacheOop.hpp => src/share/vm/oops/cpCache.hpp rename : src/share/vm/oops/methodOop.cpp => src/share/vm/oops/method.cpp rename : src/share/vm/oops/methodOop.hpp => src/share/vm/oops/method.hpp rename : src/share/vm/oops/methodDataOop.cpp => src/share/vm/oops/methodData.cpp rename : src/share/vm/oops/methodDataOop.hpp => src/share/vm/oops/methodData.hpp
Diffstat (limited to 'src/cpu/x86/vm')
-rw-r--r--src/cpu/x86/vm/assembler_x86.cpp115
-rw-r--r--src/cpu/x86/vm/assembler_x86.hpp24
-rw-r--r--src/cpu/x86/vm/bytecodeInterpreter_x86.cpp6
-rw-r--r--src/cpu/x86/vm/c1_CodeStubs_x86.cpp35
-rw-r--r--src/cpu/x86/vm/c1_LIRAssembler_x86.cpp77
-rw-r--r--src/cpu/x86/vm/c1_LIRGenerator_x86.cpp8
-rw-r--r--src/cpu/x86/vm/c1_MacroAssembler_x86.cpp10
-rw-r--r--src/cpu/x86/vm/c1_Runtime1_x86.cpp48
-rw-r--r--src/cpu/x86/vm/c1_globals_x86.hpp5
-rw-r--r--src/cpu/x86/vm/c2_globals_x86.hpp3
-rw-r--r--src/cpu/x86/vm/cppInterpreter_x86.cpp138
-rw-r--r--src/cpu/x86/vm/frame_x86.cpp18
-rw-r--r--src/cpu/x86/vm/frame_x86.hpp2
-rw-r--r--src/cpu/x86/vm/frame_x86.inline.hpp14
-rw-r--r--src/cpu/x86/vm/icBuffer_x86.cpp17
-rw-r--r--src/cpu/x86/vm/interp_masm_x86_32.cpp49
-rw-r--r--src/cpu/x86/vm/interp_masm_x86_32.hpp11
-rw-r--r--src/cpu/x86/vm/interp_masm_x86_64.cpp49
-rw-r--r--src/cpu/x86/vm/interp_masm_x86_64.hpp11
-rw-r--r--src/cpu/x86/vm/interpreterRT_x86_32.cpp8
-rw-r--r--src/cpu/x86/vm/interpreterRT_x86_64.cpp8
-rw-r--r--src/cpu/x86/vm/interpreter_x86_32.cpp12
-rw-r--r--src/cpu/x86/vm/interpreter_x86_64.cpp16
-rw-r--r--src/cpu/x86/vm/metaspaceShared_x86_32.cpp (renamed from src/cpu/x86/vm/dump_x86_32.cpp)14
-rw-r--r--src/cpu/x86/vm/metaspaceShared_x86_64.cpp (renamed from src/cpu/x86/vm/dump_x86_64.cpp)14
-rw-r--r--src/cpu/x86/vm/methodHandles_x86.cpp48
-rw-r--r--src/cpu/x86/vm/methodHandles_x86.hpp4
-rw-r--r--src/cpu/x86/vm/relocInfo_x86.cpp14
-rw-r--r--src/cpu/x86/vm/sharedRuntime_x86_32.cpp36
-rw-r--r--src/cpu/x86/vm/sharedRuntime_x86_64.cpp36
-rw-r--r--src/cpu/x86/vm/stubGenerator_x86_32.cpp28
-rw-r--r--src/cpu/x86/vm/stubGenerator_x86_64.cpp31
-rw-r--r--src/cpu/x86/vm/templateInterpreter_x86_32.cpp138
-rw-r--r--src/cpu/x86/vm/templateInterpreter_x86_64.cpp139
-rw-r--r--src/cpu/x86/vm/templateTable_x86_32.cpp243
-rw-r--r--src/cpu/x86/vm/templateTable_x86_64.cpp253
-rw-r--r--src/cpu/x86/vm/vtableStubs_x86_32.cpp22
-rw-r--r--src/cpu/x86/vm/vtableStubs_x86_64.cpp22
-rw-r--r--src/cpu/x86/vm/x86.ad11
-rw-r--r--src/cpu/x86/vm/x86_32.ad95
-rw-r--r--src/cpu/x86/vm/x86_64.ad85
41 files changed, 903 insertions, 1014 deletions
diff --git a/src/cpu/x86/vm/assembler_x86.cpp b/src/cpu/x86/vm/assembler_x86.cpp
index abf837339..611d7ab50 100644
--- a/src/cpu/x86/vm/assembler_x86.cpp
+++ b/src/cpu/x86/vm/assembler_x86.cpp
@@ -57,6 +57,7 @@ AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
_target = target;
switch (rtype) {
case relocInfo::oop_type:
+ case relocInfo::metadata_type:
// Oops are a special case. Normally they would be their own section
// but in cases like icBuffer they are literals in the code stream that
// we don't have a section for. We use none so that we get a literal address
@@ -154,10 +155,10 @@ Address::Address(address loc, RelocationHolder spec) {
// Convert the raw encoding form into the form expected by the constructor for
// Address. An index of 4 (rsp) corresponds to having no index, so convert
// that to noreg for the Address constructor.
-Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) {
+Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
RelocationHolder rspec;
- if (disp_is_oop) {
- rspec = Relocation::spec_simple(relocInfo::oop_type);
+ if (disp_reloc != relocInfo::none) {
+ rspec = Relocation::spec_simple(disp_reloc);
}
bool valid_index = index != rsp->encoding();
if (valid_index) {
@@ -270,17 +271,6 @@ void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t im
}
}
-void Assembler::emit_arith(int op1, int op2, Register dst, jobject obj) {
- LP64_ONLY(ShouldNotReachHere());
- assert(isByte(op1) && isByte(op2), "wrong opcode");
- assert((op1 & 0x01) == 1, "should be 32bit operation");
- assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
- InstructionMark im(this);
- emit_byte(op1);
- emit_byte(op2 | encode(dst));
- emit_data((intptr_t)obj, relocInfo::oop_type, 0);
-}
-
void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
assert(isByte(op1) && isByte(op2), "wrong opcode");
@@ -5572,6 +5562,14 @@ void MacroAssembler::call_VM_leaf_base(address entry_point,
increment(rsp, number_of_arguments * wordSize);
}
+void MacroAssembler::cmpklass(Address src1, Metadata* obj) {
+ cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::cmpklass(Register src1, Metadata* obj) {
+ cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
+}
+
void MacroAssembler::cmpoop(Address src1, jobject obj) {
cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
}
@@ -5753,6 +5751,14 @@ void MacroAssembler::movoop(Address dst, jobject obj) {
mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
}
+void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
+ mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
+ mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
+}
+
void MacroAssembler::movptr(Register dst, AddressLiteral src) {
if (src.is_lval()) {
mov_literal32(dst, (intptr_t)src.target(), src.rspec());
@@ -5804,6 +5810,9 @@ void MacroAssembler::pushoop(jobject obj) {
push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
}
+void MacroAssembler::pushklass(Metadata* obj) {
+ push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate());
+}
void MacroAssembler::pushptr(AddressLiteral src) {
if (src.is_lval()) {
@@ -5856,13 +5865,13 @@ void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rd
if (os::message_box(msg, "Execution stopped, print registers?")) {
print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip);
BREAKPOINT;
- assert(false, "start up GDB");
}
} else {
ttyLocker ttyl;
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
- assert(false, err_msg("DEBUG MESSAGE: %s", msg));
}
+ // Don't assert holding the ttyLock
+ assert(false, err_msg("DEBUG MESSAGE: %s", msg));
ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
}
@@ -6280,6 +6289,15 @@ void MacroAssembler::movoop(Address dst, jobject obj) {
movq(dst, rscratch1);
}
+void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
+ mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
+ mov_literal64(rscratch1, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
+ movq(dst, rscratch1);
+}
+
void MacroAssembler::movptr(Register dst, AddressLiteral src) {
if (src.is_lval()) {
mov_literal64(dst, (intptr_t)src.target(), src.rspec());
@@ -6321,6 +6339,11 @@ void MacroAssembler::pushoop(jobject obj) {
push(rscratch1);
}
+void MacroAssembler::pushklass(Metadata* obj) {
+ mov_metadata(rscratch1, obj);
+ push(rscratch1);
+}
+
void MacroAssembler::pushptr(AddressLiteral src) {
lea(rscratch1, src);
if (src.is_lval()) {
@@ -6655,6 +6678,12 @@ void MacroAssembler::call(AddressLiteral entry) {
}
}
+void MacroAssembler::ic_call(address entry) {
+ RelocationHolder rh = virtual_call_Relocation::spec(pc());
+ movptr(rax, (intptr_t)Universe::non_oop_word());
+ call(AddressLiteral(entry, rh));
+}
+
// Implementation of call_VM versions
void MacroAssembler::call_VM(Register oop_result,
@@ -6923,9 +6952,7 @@ void MacroAssembler::call_VM_base(Register oop_result,
// get oop result if there is one and reset the value in the thread
if (oop_result->is_valid()) {
- movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
- movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
- verify_oop(oop_result, "broken oop in call_VM_base");
+ get_vm_result(oop_result, java_thread);
}
}
@@ -7016,6 +7043,17 @@ void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Reg
MacroAssembler::call_VM_leaf_base(entry_point, 4);
}
+void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
+ movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
+ movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
+ verify_oop(oop_result, "broken oop in call_VM_base");
+}
+
+void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
+ movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
+ movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD);
+}
+
void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
}
@@ -9097,20 +9135,20 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
"caller must use same register for non-constant itable index as for method");
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
- int vtable_base = instanceKlass::vtable_start_offset() * wordSize;
+ int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
int itentry_off = itableMethodEntry::method_offset_in_bytes();
int scan_step = itableOffsetEntry::size() * wordSize;
int vte_size = vtableEntry::size() * wordSize;
Address::ScaleFactor times_vte_scale = Address::times_ptr;
assert(vte_size == wordSize, "else adjust times_vte_scale");
- movl(scan_temp, Address(recv_klass, instanceKlass::vtable_length_offset() * wordSize));
+ movl(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
// %%% Could store the aligned, prescaled offset in the klassoop.
lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
if (HeapWordsPerLong > 1) {
// Round up to align_object_offset boundary
- // see code for instanceKlass::start_of_itable!
+ // see code for InstanceKlass::start_of_itable!
round_to(scan_temp, BytesPerLong);
}
@@ -9160,7 +9198,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
void MacroAssembler::lookup_virtual_method(Register recv_klass,
RegisterOrConstant vtable_index,
Register method_result) {
- const int base = instanceKlass::vtable_start_offset() * wordSize;
+ const int base = InstanceKlass::vtable_start_offset() * wordSize;
assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
Address vtable_entry_addr(recv_klass,
vtable_index, Address::times_ptr,
@@ -9335,33 +9373,19 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
// We will consult the secondary-super array.
movptr(rdi, secondary_supers_addr);
// Load the array length. (Positive movl does right thing on LP64.)
- movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
+ movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
// Skip to start of data.
- addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
+ addptr(rdi, Array<Klass*>::base_offset_in_bytes());
// Scan RCX words at [RDI] for an occurrence of RAX.
// Set NZ/Z based on last compare.
// Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
// not change flags (only scas instruction which is repeated sets flags).
// Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
-#ifdef _LP64
- // This part is tricky, as values in supers array could be 32 or 64 bit wide
- // and we store values in objArrays always encoded, thus we need to encode
- // the value of rax before repne. Note that rax is dead after the repne.
- if (UseCompressedOops) {
- encode_heap_oop_not_null(rax); // Changes flags.
- // The superclass is never null; it would be a basic system error if a null
- // pointer were to sneak in here. Note that we have already loaded the
- // Klass::super_check_offset from the super_klass in the fast path,
- // so if there is a null in that register, we are already in the afterlife.
- testl(rax,rax); // Set Z = 0
- repne_scanl();
- } else
-#endif // _LP64
- {
+
testptr(rax,rax); // Set Z = 0
repne_scan();
- }
+
// Unspill the temp. registers:
if (pushed_rdi) pop(rdi);
if (pushed_rcx) pop(rcx);
@@ -9907,7 +9931,7 @@ void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
void MacroAssembler::load_klass(Register dst, Register src) {
#ifdef _LP64
- if (UseCompressedOops) {
+ if (UseCompressedKlassPointers) {
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
decode_heap_oop_not_null(dst);
} else
@@ -9917,7 +9941,7 @@ void MacroAssembler::load_klass(Register dst, Register src) {
void MacroAssembler::load_prototype_header(Register dst, Register src) {
#ifdef _LP64
- if (UseCompressedOops) {
+ if (UseCompressedKlassPointers) {
assert (Universe::heap() != NULL, "java heap should be initialized");
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
if (Universe::narrow_oop_shift() != 0) {
@@ -9942,7 +9966,7 @@ void MacroAssembler::load_prototype_header(Register dst, Register src) {
void MacroAssembler::store_klass(Register dst, Register src) {
#ifdef _LP64
- if (UseCompressedOops) {
+ if (UseCompressedKlassPointers) {
encode_heap_oop_not_null(src);
movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
} else
@@ -9952,6 +9976,7 @@ void MacroAssembler::store_klass(Register dst, Register src) {
void MacroAssembler::load_heap_oop(Register dst, Address src) {
#ifdef _LP64
+ // FIXME: Must change all places where we try to load the klass.
if (UseCompressedOops) {
movl(dst, src);
decode_heap_oop(dst);
@@ -10016,7 +10041,7 @@ void MacroAssembler::store_heap_oop_null(Address dst) {
#ifdef _LP64
void MacroAssembler::store_klass_gap(Register dst, Register src) {
- if (UseCompressedOops) {
+ if (UseCompressedKlassPointers) {
// Store to klass gap in destination
movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
}
diff --git a/src/cpu/x86/vm/assembler_x86.hpp b/src/cpu/x86/vm/assembler_x86.hpp
index d06f499ca..ff8c605b0 100644
--- a/src/cpu/x86/vm/assembler_x86.hpp
+++ b/src/cpu/x86/vm/assembler_x86.hpp
@@ -299,7 +299,7 @@ class Address VALUE_OBJ_CLASS_SPEC {
// Convert the raw encoding form into the form expected by the constructor for
// Address. An index of 4 (rsp) corresponds to having no index, so convert
// that to noreg for the Address constructor.
- static Address make_raw(int base, int index, int scale, int disp, bool disp_is_oop);
+ static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc);
static Address make_array(ArrayAddress);
@@ -390,14 +390,6 @@ class RuntimeAddress: public AddressLiteral {
};
-class OopAddress: public AddressLiteral {
-
- public:
-
- OopAddress(address target) : AddressLiteral(target, relocInfo::oop_type){}
-
-};
-
class ExternalAddress: public AddressLiteral {
private:
static relocInfo::relocType reloc_for_target(address target) {
@@ -668,8 +660,6 @@ private:
void emit_arith(int op1, int op2, Register dst, int32_t imm32);
// Force generation of a 4 byte immediate value even if it fits into 8bit
void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32);
- // only 32bit??
- void emit_arith(int op1, int op2, Register dst, jobject obj);
void emit_arith(int op1, int op2, Register dst, Register src);
void emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre);
@@ -1972,6 +1962,9 @@ class MacroAssembler: public Assembler {
Register arg_1, Register arg_2, Register arg_3,
bool check_exceptions = true);
+ void get_vm_result (Register oop_result, Register thread);
+ void get_vm_result_2(Register metadata_result, Register thread);
+
// These always tightly bind to MacroAssembler::call_VM_base
// bypassing the virtual implementation
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
@@ -2387,6 +2380,8 @@ class MacroAssembler: public Assembler {
void cmp32(Register src1, Address src2);
#ifndef _LP64
+ void cmpklass(Address dst, Metadata* obj);
+ void cmpklass(Register dst, Metadata* obj);
void cmpoop(Address dst, jobject obj);
void cmpoop(Register dst, jobject obj);
#endif // _LP64
@@ -2486,6 +2481,9 @@ class MacroAssembler: public Assembler {
// for jumps/calls.
void call(AddressLiteral entry);
+ // Emit the CompiledIC call idiom
+ void ic_call(address entry);
+
// Jumps
// NOTE: these jumps tranfer to the effective address of dst NOT
@@ -2723,6 +2721,9 @@ public:
void movoop(Register dst, jobject obj);
void movoop(Address dst, jobject obj);
+ void mov_metadata(Register dst, Metadata* obj);
+ void mov_metadata(Address dst, Metadata* obj);
+
void movptr(ArrayAddress dst, Register src);
// can this do an lea?
void movptr(Register dst, ArrayAddress src);
@@ -2775,6 +2776,7 @@ public:
void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
void pushoop(jobject obj);
+ void pushklass(Metadata* obj);
// sign extend as need a l to ptr sized element
void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
diff --git a/src/cpu/x86/vm/bytecodeInterpreter_x86.cpp b/src/cpu/x86/vm/bytecodeInterpreter_x86.cpp
index 5e606224f..237c617ce 100644
--- a/src/cpu/x86/vm/bytecodeInterpreter_x86.cpp
+++ b/src/cpu/x86/vm/bytecodeInterpreter_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,8 +28,8 @@
#include "interpreter/bytecodeInterpreter.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
-#include "oops/methodDataOop.hpp"
-#include "oops/methodOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
diff --git a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
index 618a37c91..be2c1097a 100644
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
+++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -284,7 +284,24 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
}
if (_id == load_klass_id) {
// produce a copy of the load klass instruction for use by the being initialized case
+#ifdef ASSERT
+ address start = __ pc();
+#endif
+ Metadata* o = NULL;
+ __ mov_metadata(_obj, o);
+#ifdef ASSERT
+ for (int i = 0; i < _bytes_to_copy; i++) {
+ address ptr = (address)(_pc_start + i);
+ int a_byte = (*ptr) & 0xFF;
+ assert(a_byte == *start++, "should be the same code");
+ }
+#endif
+ } else if (_id == load_mirror_id) {
+ // produce a copy of the load mirror instruction for use by the being
+ // initialized case
+#ifdef ASSERT
address start = __ pc();
+#endif
jobject o = NULL;
__ movoop(_obj, o);
#ifdef ASSERT
@@ -306,7 +323,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
address end_of_patch = __ pc();
int bytes_to_skip = 0;
- if (_id == load_klass_id) {
+ if (_id == load_mirror_id) {
int offset = __ offset();
if (CommentedAssembly) {
__ block_comment(" being_initialized check");
@@ -318,9 +335,9 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
__ push(tmp2);
// Load without verification to keep code size small. We need it because
// begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
- __ load_heap_oop_not_null(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
+ __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
__ get_thread(tmp);
- __ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset()));
+ __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
__ pop(tmp2);
__ pop(tmp);
__ jcc(Assembler::notEqual, call_patch);
@@ -357,9 +374,11 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
address entry = __ pc();
NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
address target = NULL;
+ relocInfo::relocType reloc_type = relocInfo::none;
switch (_id) {
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
- case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break;
+ case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
+ case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
default: ShouldNotReachHere();
}
__ bind(call_patch);
@@ -377,10 +396,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
__ nop();
}
- if (_id == load_klass_id) {
+ if (_id == load_klass_id || _id == load_mirror_id) {
CodeSection* cs = __ code_section();
RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
- relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, relocInfo::oop_type, relocInfo::none);
+ relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
}
}
@@ -420,7 +439,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
//---------------slow case: call to native-----------------
__ bind(_entry);
// Figure out where the args should go
- // This should really convert the IntrinsicID to the methodOop and signature
+ // This should really convert the IntrinsicID to the Method* and signature
// but I don't know how to do that.
//
VMRegPair args[5];
diff --git a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
index ff065764b..e0b8b18b5 100644
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
@@ -361,11 +361,17 @@ int LIR_Assembler::check_icache() {
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
jobject o = NULL;
- PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
+ PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id);
__ movoop(reg, o);
patching_epilog(patch, lir_patch_normal, reg, info);
}
+void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
+ Metadata* o = NULL;
+ PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
+ __ mov_metadata(reg, o);
+ patching_epilog(patch, lir_patch_normal, reg, info);
+}
// This specifies the rsp decrement needed to build the frame
int LIR_Assembler::initial_frame_size_in_bytes() {
@@ -448,7 +454,7 @@ int LIR_Assembler::emit_unwind_handler() {
if (compilation()->env()->dtrace_method_probes()) {
__ get_thread(rax);
__ movptr(Address(rsp, 0), rax);
- __ movoop(Address(rsp, sizeof(void*)), method()->constant_encoding());
+ __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
}
@@ -669,6 +675,15 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
break;
}
+ case T_METADATA: {
+ if (patch_code != lir_patch_none) {
+ klass2reg_with_patching(dest->as_register(), info);
+ } else {
+ __ mov_metadata(dest->as_register(), c->as_metadata());
+ }
+ break;
+ }
+
case T_FLOAT: {
if (dest->is_single_xmm()) {
if (c->is_zero_float()) {
@@ -1570,8 +1585,8 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
if (op->init_check()) {
__ cmpb(Address(op->klass()->as_register(),
- instanceKlass::init_state_offset()),
- instanceKlass::fully_initialized);
+ InstanceKlass::init_state_offset()),
+ InstanceKlass::fully_initialized);
add_debug_info_for_null_check_here(op->stub()->info());
__ jcc(Assembler::notEqual, *op->stub()->entry());
}
@@ -1687,10 +1702,10 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
assert_different_registers(obj, k_RInfo, klass_RInfo);
if (!k->is_loaded()) {
- jobject2reg_with_patching(k_RInfo, op->info_for_patch());
+ klass2reg_with_patching(k_RInfo, op->info_for_patch());
} else {
#ifdef _LP64
- __ movoop(k_RInfo, k->constant_encoding());
+ __ mov_metadata(k_RInfo, k->constant_encoding());
#endif // _LP64
}
assert(obj != k_RInfo, "must be different");
@@ -1701,7 +1716,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
__ jccb(Assembler::notEqual, not_null);
// Object is null; update MDO and exit
Register mdo = klass_RInfo;
- __ movoop(mdo, md->constant_encoding());
+ __ mov_metadata(mdo, md->constant_encoding());
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
__ orl(data_addr, header_bits);
@@ -1716,7 +1731,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
// get object class
// not a safepoint as obj null check happens earlier
#ifdef _LP64
- if (UseCompressedOops) {
+ if (UseCompressedKlassPointers) {
__ load_klass(Rtmp1, obj);
__ cmpptr(k_RInfo, Rtmp1);
} else {
@@ -1724,7 +1739,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
}
#else
if (k->is_loaded()) {
- __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
+ __ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
} else {
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
}
@@ -1740,7 +1755,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
#ifdef _LP64
__ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
#else
- __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
+ __ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
#endif // _LP64
if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
__ jcc(Assembler::notEqual, *failure_target);
@@ -1752,7 +1767,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
#ifdef _LP64
__ cmpptr(klass_RInfo, k_RInfo);
#else
- __ cmpoop(klass_RInfo, k->constant_encoding());
+ __ cmpklass(klass_RInfo, k->constant_encoding());
#endif // _LP64
__ jcc(Assembler::equal, *success_target);
@@ -1760,7 +1775,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
#ifdef _LP64
__ push(k_RInfo);
#else
- __ pushoop(k->constant_encoding());
+ __ pushklass(k->constant_encoding());
#endif // _LP64
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ pop(klass_RInfo);
@@ -1788,14 +1803,14 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
if (op->should_profile()) {
Register mdo = klass_RInfo, recv = k_RInfo;
__ bind(profile_cast_success);
- __ movoop(mdo, md->constant_encoding());
+ __ mov_metadata(mdo, md->constant_encoding());
__ load_klass(recv, obj);
Label update_done;
type_profile_helper(mdo, md, data, recv, success);
__ jmp(*success);
__ bind(profile_cast_failure);
- __ movoop(mdo, md->constant_encoding());
+ __ mov_metadata(mdo, md->constant_encoding());
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ subptr(counter_addr, DataLayout::counter_increment);
__ jmp(*failure);
@@ -1839,7 +1854,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ jccb(Assembler::notEqual, not_null);
// Object is null; update MDO and exit
Register mdo = klass_RInfo;
- __ movoop(mdo, md->constant_encoding());
+ __ mov_metadata(mdo, md->constant_encoding());
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
__ orl(data_addr, header_bits);
@@ -1871,14 +1886,14 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
if (op->should_profile()) {
Register mdo = klass_RInfo, recv = k_RInfo;
__ bind(profile_cast_success);
- __ movoop(mdo, md->constant_encoding());
+ __ mov_metadata(mdo, md->constant_encoding());
__ load_klass(recv, value);
Label update_done;
type_profile_helper(mdo, md, data, recv, &done);
__ jmpb(done);
__ bind(profile_cast_failure);
- __ movoop(mdo, md->constant_encoding());
+ __ mov_metadata(mdo, md->constant_encoding());
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ subptr(counter_addr, DataLayout::counter_increment);
__ jmp(*stub->entry());
@@ -2864,13 +2879,11 @@ void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
- RelocationHolder rh = virtual_call_Relocation::spec(pc());
- __ movoop(IC_Klass, (jobject)Universe::non_oop_word());
+ __ ic_call(op->addr());
+ add_call_info(code_offset(), op->info());
assert(!os::is_MP() ||
- (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
+ (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
"must be aligned");
- __ call(AddressLiteral(op->addr(), rh));
- add_call_info(code_offset(), op->info());
}
@@ -2897,7 +2910,7 @@ void LIR_Assembler::emit_static_call_stub() {
}
}
__ relocate(static_stub_Relocation::spec(call_pc));
- __ movoop(rbx, (jobject)NULL);
+ __ mov_metadata(rbx, (Metadata*)NULL);
// must be set to -1 at code generation time
assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
// On 64bit this will die since it will take a movq & jmp, must be only a jmp
@@ -3258,7 +3271,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// We don't know the array types are compatible
if (basic_type != T_OBJECT) {
// Simple test for basic type arrays
- if (UseCompressedOops) {
+ if (UseCompressedKlassPointers) {
__ movl(tmp, src_klass_addr);
__ cmpl(tmp, dst_klass_addr);
} else {
@@ -3418,23 +3431,23 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// subtype which we can't check or src is the same array as dst
// but not necessarily exactly of type default_type.
Label known_ok, halt;
- __ movoop(tmp, default_type->constant_encoding());
+ __ mov_metadata(tmp, default_type->constant_encoding());
#ifdef _LP64
- if (UseCompressedOops) {
+ if (UseCompressedKlassPointers) {
__ encode_heap_oop(tmp);
}
#endif
if (basic_type != T_OBJECT) {
- if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr);
+ if (UseCompressedKlassPointers) __ cmpl(tmp, dst_klass_addr);
else __ cmpptr(tmp, dst_klass_addr);
__ jcc(Assembler::notEqual, halt);
- if (UseCompressedOops) __ cmpl(tmp, src_klass_addr);
+ if (UseCompressedKlassPointers) __ cmpl(tmp, src_klass_addr);
else __ cmpptr(tmp, src_klass_addr);
__ jcc(Assembler::equal, known_ok);
} else {
- if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr);
+ if (UseCompressedKlassPointers) __ cmpl(tmp, dst_klass_addr);
else __ cmpptr(tmp, dst_klass_addr);
__ jcc(Assembler::equal, known_ok);
__ cmpptr(src, dst);
@@ -3517,7 +3530,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
assert(data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
Register mdo = op->mdo()->as_register();
- __ movoop(mdo, md->constant_encoding());
+ __ mov_metadata(mdo, md->constant_encoding());
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
Bytecodes::Code bc = method->java_code_at_bci(bci);
const bool callee_is_static = callee->is_loaded() && callee->is_static();
@@ -3533,7 +3546,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciKlass* known_klass = op->known_holder();
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
// We know the type that will be seen at this call site; we can
- // statically update the methodDataOop rather than needing to do
+ // statically update the MethodData* rather than needing to do
// dynamic tests on the receiver type
// NOTE: we should probably put a lock around this search to
@@ -3558,7 +3571,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciKlass* receiver = vc_data->receiver(i);
if (receiver == NULL) {
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
- __ movoop(recv_addr, known_klass->constant_encoding());
+ __ mov_metadata(recv_addr, known_klass->constant_encoding());
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ addptr(data_addr, DataLayout::counter_increment);
return;
diff --git a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
index deca1a0ed..e2345914e 100644
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1061,11 +1061,11 @@ void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
LIR_Opr len = length.result();
CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
- ciObject* obj = (ciObject*) ciObjArrayKlass::make(x->klass());
+ ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
if (obj == ciEnv::unloaded_ciobjarrayklass()) {
BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
}
- jobject2reg_with_patching(klass_reg, obj, patching_info);
+ klass2reg_with_patching(klass_reg, obj, patching_info);
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
LIR_Opr result = rlock_result(x);
@@ -1104,7 +1104,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
}
LIR_Opr reg = result_register_for(x->type());
- jobject2reg_with_patching(reg, x->klass(), patching_info);
+ klass2reg_with_patching(reg, x->klass(), patching_info);
LIR_Opr rank = FrameMap::rbx_opr;
__ move(LIR_OprFact::intConst(x->rank()), rank);
diff --git a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
index 5ef8cf54d..c1eb9d0f2 100644
--- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
+++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -157,7 +157,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
}
#ifdef _LP64
- if (UseCompressedOops) { // Take care not to kill klass
+ if (UseCompressedKlassPointers) { // Take care not to kill klass
movptr(t1, klass);
encode_heap_oop_not_null(t1);
movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
@@ -171,7 +171,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
}
#ifdef _LP64
- else if (UseCompressedOops) {
+ else if (UseCompressedKlassPointers) {
xorptr(t1, t1);
store_klass_gap(obj, t1);
}
@@ -334,7 +334,7 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
int start_offset = offset();
- if (UseCompressedOops) {
+ if (UseCompressedKlassPointers) {
load_klass(rscratch1, receiver);
cmpptr(rscratch1, iCache);
} else {
@@ -345,7 +345,7 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
jump_cc(Assembler::notEqual,
RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
- assert(UseCompressedOops || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
+ assert(UseCompressedKlassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
}
diff --git a/src/cpu/x86/vm/c1_Runtime1_x86.cpp b/src/cpu/x86/vm/c1_Runtime1_x86.cpp
index 30df6087d..e02d5f6af 100644
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
#include "c1/c1_Runtime1.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
-#include "oops/compiledICHolderOop.hpp"
+#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_x86.hpp"
@@ -41,11 +41,11 @@
// Implementation of StubAssembler
-int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) {
+int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
// setup registers
const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
- assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2, "registers must be different");
- assert(oop_result1 != thread && oop_result2 != thread, "registers must be different");
+ assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
+ assert(oop_result1 != thread && metadata_result != thread, "registers must be different");
assert(args_size >= 0, "illegal args_size");
bool align_stack = false;
#ifdef _LP64
@@ -109,7 +109,7 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
if (oop_result1->is_valid()) {
movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
}
- if (oop_result2->is_valid()) {
+ if (metadata_result->is_valid()) {
movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
}
if (frame_size() == no_frame_size) {
@@ -124,30 +124,26 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
}
// get oop results if there are any and reset the values in the thread
if (oop_result1->is_valid()) {
- movptr(oop_result1, Address(thread, JavaThread::vm_result_offset()));
- movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
- verify_oop(oop_result1);
+ get_vm_result(oop_result1, thread);
}
- if (oop_result2->is_valid()) {
- movptr(oop_result2, Address(thread, JavaThread::vm_result_2_offset()));
- movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
- verify_oop(oop_result2);
+ if (metadata_result->is_valid()) {
+ get_vm_result_2(metadata_result, thread);
}
return call_offset;
}
-int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) {
+int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
#ifdef _LP64
mov(c_rarg1, arg1);
#else
push(arg1);
#endif // _LP64
- return call_RT(oop_result1, oop_result2, entry, 1);
+ return call_RT(oop_result1, metadata_result, entry, 1);
}
-int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) {
+int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
#ifdef _LP64
if (c_rarg1 == arg2) {
if (c_rarg2 == arg1) {
@@ -164,11 +160,11 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
push(arg2);
push(arg1);
#endif // _LP64
- return call_RT(oop_result1, oop_result2, entry, 2);
+ return call_RT(oop_result1, metadata_result, entry, 2);
}
-int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) {
+int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
#ifdef _LP64
// if there is any conflict use the stack
if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
@@ -190,7 +186,7 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
push(arg2);
push(arg1);
#endif // _LP64
- return call_RT(oop_result1, oop_result2, entry, 3);
+ return call_RT(oop_result1, metadata_result, entry, 3);
}
@@ -1027,7 +1023,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
if (id == fast_new_instance_init_check_id) {
// make sure the klass is initialized
- __ cmpb(Address(klass, instanceKlass::init_state_offset()), instanceKlass::fully_initialized);
+ __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
__ jcc(Assembler::notEqual, slow_path);
}
@@ -1106,7 +1102,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
OopMap* map = save_live_registers(sasm, 3);
// Retrieve bci
__ movl(bci, Address(rbp, 2*BytesPerWord));
- // And a pointer to the methodOop
+ // And a pointer to the Method*
__ movptr(method, Address(rbp, 3*BytesPerWord));
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
oop_maps = new OopMapSet();
@@ -1291,8 +1287,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(register_finalizer);
__ enter();
OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
- int call_offset = __ call_RT(noreg, noreg,
- CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
+ int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, oop_map);
@@ -1496,6 +1491,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
break;
+ case load_mirror_patching_id:
+ { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments);
+ // we should set up register map
+ oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
+ }
+ break;
+
case dtrace_object_alloc_id:
{ // rax,: object
StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
diff --git a/src/cpu/x86/vm/c1_globals_x86.hpp b/src/cpu/x86/vm/c1_globals_x86.hpp
index d6a5cc45c..98e02b16c 100644
--- a/src/cpu/x86/vm/c1_globals_x86.hpp
+++ b/src/cpu/x86/vm/c1_globals_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,8 +51,7 @@ define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx,CodeCacheMinBlockLength, 1);
-define_pd_global(uintx,PermSize, 12*M );
-define_pd_global(uintx,MaxPermSize, 64*M );
+define_pd_global(uintx,MetaspaceSize, 12*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
define_pd_global(bool, CICompileOSR, true );
diff --git a/src/cpu/x86/vm/c2_globals_x86.hpp b/src/cpu/x86/vm/c2_globals_x86.hpp
index 749c48f5e..5ad08b0f1 100644
--- a/src/cpu/x86/vm/c2_globals_x86.hpp
+++ b/src/cpu/x86/vm/c2_globals_x86.hpp
@@ -88,8 +88,7 @@ define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(uintx,CodeCacheMinBlockLength, 4);
// Heap related flags
-define_pd_global(uintx,PermSize, ScaleForWordSize(16*M));
-define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
+define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M));
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
diff --git a/src/cpu/x86/vm/cppInterpreter_x86.cpp b/src/cpu/x86/vm/cppInterpreter_x86.cpp
index f40052280..5b3f99aca 100644
--- a/src/cpu/x86/vm/cppInterpreter_x86.cpp
+++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp
@@ -30,8 +30,8 @@
#include "interpreter/interpreterGenerator.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "oops/arrayOop.hpp"
-#include "oops/methodDataOop.hpp"
-#include "oops/methodOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
@@ -421,7 +421,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
// state == address of new interpreterState
// rsp == bottom of method's expression stack.
- const Address const_offset (rbx, methodOopDesc::const_offset());
+ const Address const_offset (rbx, Method::const_offset());
// On entry sp is the sender's sp. This includes the space for the arguments
@@ -471,8 +471,8 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
__ get_thread(rax); // get vm's javathread*
__ movptr(STATE(_thread), rax); // state->_bcp = codes()
#endif // _LP64
- __ movptr(rdx, Address(rbx, methodOopDesc::const_offset())); // get constantMethodOop
- __ lea(rdx, Address(rdx, constMethodOopDesc::codes_offset())); // get code base
+ __ movptr(rdx, Address(rbx, Method::const_offset())); // get constantMethodOop
+ __ lea(rdx, Address(rdx, ConstMethod::codes_offset())); // get code base
if (native) {
__ movptr(STATE(_bcp), (int32_t)NULL_WORD); // state->_bcp = NULL
} else {
@@ -481,9 +481,9 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
__ xorptr(rdx, rdx);
__ movptr(STATE(_oop_temp), rdx); // state->_oop_temp = NULL (only really needed for native)
__ movptr(STATE(_mdx), rdx); // state->_mdx = NULL
- __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
- __ movptr(rdx, Address(rdx, constMethodOopDesc::constants_offset()));
- __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
+ __ movptr(rdx, Address(rbx, Method::const_offset()));
+ __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
+ __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
__ movptr(STATE(_constants), rdx); // state->_constants = constants()
__ movptr(STATE(_method), rbx); // state->_method = method()
@@ -500,7 +500,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
// immediately.
// synchronize method
- const Address access_flags (rbx, methodOopDesc::access_flags_offset());
+ const Address access_flags (rbx, Method::access_flags_offset());
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
Label not_synced;
@@ -517,9 +517,9 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
__ testl(rax, JVM_ACC_STATIC);
__ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case)
__ jcc(Assembler::zero, done);
- __ movptr(rax, Address(rbx, methodOopDesc::const_offset()));
- __ movptr(rax, Address(rax, constMethodOopDesc::constants_offset()));
- __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(rax, Address(rbx, Method::const_offset()));
+ __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
+ __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
__ movptr(rax, Address(rax, mirror_offset));
__ bind(done);
// add space for monitor & lock
@@ -538,8 +538,8 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
// compute full expression stack limit
- const Address size_of_stack (rbx, methodOopDesc::max_stack_offset());
- const int extra_stack = 0; //6815692//methodOopDesc::extra_stack_words();
+ const Address size_of_stack (rbx, Method::max_stack_offset());
+ const int extra_stack = 0; //6815692//Method::extra_stack_words();
__ load_unsigned_short(rdx, size_of_stack); // get size of expression stack in words
__ negptr(rdx); // so we can subtract in next step
// Allocate expression stack
@@ -570,11 +570,11 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
//
void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
- const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
- const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset());
+ const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
+ const Address backedge_counter (rbx, Method::backedge_counter_offset() + InvocationCounter::counter_offset());
- if (ProfileInterpreter) { // %%% Merge this into methodDataOop
- __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
+ if (ProfileInterpreter) { // %%% Merge this into MethodData*
+ __ incrementl(Address(rbx,Method::interpreter_invocation_counter_offset()));
}
// Update standard invocation counters
__ movl(rax, backedge_counter); // load backedge counter
@@ -611,7 +611,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// C++ interpreter only
// rsi/r13 - previous interpreter state pointer
- const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
+ const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
// InterpreterRuntime::frequency_counter_overflow takes one argument
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
@@ -638,14 +638,14 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
//
// Asm interpreter
// rdx: number of additional locals this frame needs (what we must check)
- // rbx,: methodOop
+ // rbx,: Method*
// C++ Interpreter
// rsi/r13: previous interpreter frame state object
// rdi: &locals[0]
// rcx: # of locals
// rdx: number of additional locals this frame needs (what we must check)
- // rbx: methodOop
+ // rbx: Method*
// destroyed on exit
// rax,
@@ -682,11 +682,11 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
const Address stack_size(thread, Thread::stack_size_offset());
// locals + overhead, in bytes
- const Address size_of_stack (rbx, methodOopDesc::max_stack_offset());
+ const Address size_of_stack (rbx, Method::max_stack_offset());
// Always give one monitor to allow us to start interp if sync method.
// Any additional monitors need a check when moving the expression stack
const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
- const int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries();
+ const int extra_stack = 0; //6815692//Method::extra_stack_entries();
__ load_unsigned_short(rax, size_of_stack); // get size of expression stack in words
__ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), extra_stack + one_monitor));
__ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size));
@@ -738,7 +738,7 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
}
// Find preallocated monitor and lock method (C++ interpreter)
-// rbx - methodOop
+// rbx - Method*
//
void InterpreterGenerator::lock_method(void) {
// assumes state == rsi/r13 == pointer to current interpreterState
@@ -746,7 +746,7 @@ void InterpreterGenerator::lock_method(void) {
//
// synchronize method
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
- const Address access_flags (rbx, methodOopDesc::access_flags_offset());
+ const Address access_flags (rbx, Method::access_flags_offset());
const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
@@ -771,9 +771,9 @@ void InterpreterGenerator::lock_method(void) {
__ testl(rax, JVM_ACC_STATIC);
__ movptr(rax, Address(rdi, 0)); // get receiver (assume this is frequent case)
__ jcc(Assembler::zero, done);
- __ movptr(rax, Address(rbx, methodOopDesc::const_offset()));
- __ movptr(rax, Address(rax, constMethodOopDesc::constants_offset()));
- __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(rax, Address(rbx, Method::const_offset()));
+ __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
+ __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
__ movptr(rax, Address(rax, mirror_offset));
__ bind(done);
}
@@ -793,7 +793,7 @@ void InterpreterGenerator::lock_method(void) {
address InterpreterGenerator::generate_accessor_entry(void) {
- // rbx: methodOop
+ // rbx: Method*
// rsi/r13: senderSP must preserved for slow path, set SP to it on fast path
@@ -825,14 +825,14 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ jcc(Assembler::zero, slow_path);
// read first instruction word and extract bytecode @ 1 and index @ 2
- __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
- __ movptr(rdi, Address(rdx, constMethodOopDesc::constants_offset()));
- __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
+ __ movptr(rdx, Address(rbx, Method::const_offset()));
+ __ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
+ __ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
// Shift codes right to get the index on the right.
// The bytecode fetched looks like <index><0xb4><0x2a>
__ shrl(rdx, 2*BitsPerByte);
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
- __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
+ __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
// rax,: local 0
// rbx,: method
@@ -849,7 +849,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ movl(rcx,
Address(rdi,
rdx,
- Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
+ Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(rcx, 2*BitsPerByte);
__ andl(rcx, 0xFF);
__ cmpl(rcx, Bytecodes::_getfield);
@@ -859,11 +859,11 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ movptr(rcx,
Address(rdi,
rdx,
- Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()));
+ Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
__ movl(rdx,
Address(rdi,
rdx,
- Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset()));
+ Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
Label notByte, notShort, notChar;
const Address field_address (rax, rcx, Address::times_1);
@@ -970,17 +970,17 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// determine code generation flags
bool inc_counter = UseCompiler || CountCompiledCalls;
- // rbx: methodOop
+ // rbx: Method*
// rcx: receiver (unused)
// rsi/r13: previous interpreter state (if called from C++ interpreter) must preserve
// in any case. If called via c1/c2/call_stub rsi/r13 is junk (to use) but harmless
// to save/restore.
address entry_point = __ pc();
- const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
- const Address size_of_locals (rbx, methodOopDesc::size_of_locals_offset());
- const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
- const Address access_flags (rbx, methodOopDesc::access_flags_offset());
+ const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
+ const Address size_of_locals (rbx, Method::size_of_locals_offset());
+ const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
+ const Address access_flags (rbx, Method::access_flags_offset());
// rsi/r13 == state/locals rdi == prevstate
const Register locals = rdi;
@@ -988,7 +988,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// get parameter size (always needed)
__ load_unsigned_short(rcx, size_of_parameters);
- // rbx: methodOop
+ // rbx: Method*
// rcx: size of parameters
__ pop(rax); // get return address
// for natives the size of locals is zero
@@ -1111,7 +1111,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// allocate space for parameters
__ movptr(method, STATE(_method));
__ verify_oop(method);
- __ load_unsigned_short(t, Address(method, methodOopDesc::size_of_parameters_offset()));
+ __ load_unsigned_short(t, Address(method, Method::size_of_parameters_offset()));
__ shll(t, 2);
#ifdef _LP64
__ subptr(rsp, t);
@@ -1127,7 +1127,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
Label pending_exception_present;
{ Label L;
- __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
+ __ movptr(t, Address(method, Method::signature_handler_offset()));
__ testptr(t, t);
__ jcc(Assembler::notZero, L);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method, false);
@@ -1135,7 +1135,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, pending_exception_present);
__ verify_oop(method);
- __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
+ __ movptr(t, Address(method, Method::signature_handler_offset()));
__ bind(L);
}
#ifdef ASSERT
@@ -1171,26 +1171,26 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// get native function entry point
{ Label L;
- __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
+ __ movptr(rax, Address(method, Method::native_function_offset()));
__ testptr(rax, rax);
__ jcc(Assembler::notZero, L);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
__ movptr(method, STATE(_method));
__ verify_oop(method);
- __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
+ __ movptr(rax, Address(method, Method::native_function_offset()));
__ bind(L);
}
// pass mirror handle if static call
{ Label L;
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
- __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
+ __ movl(t, Address(method, Method::access_flags_offset()));
__ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L);
// get mirror
- __ movptr(t, Address(method, methodOopDesc:: const_offset()));
- __ movptr(t, Address(t, constMethodOopDesc::constants_offset()));
- __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(t, Address(method, Method:: const_offset()));
+ __ movptr(t, Address(t, ConstMethod::constants_offset()));
+ __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
__ movptr(t, Address(t, mirror_offset));
// copy mirror into activation object
__ movptr(STATE(_oop_temp), t);
@@ -1438,7 +1438,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// do unlocking if necessary
{ Label L;
- __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
+ __ movl(t, Address(method, Method::access_flags_offset()));
__ testl(t, JVM_ACC_SYNCHRONIZED);
__ jcc(Assembler::zero, L);
// the code below should be shared with interpreter macro assembler implementation
@@ -1634,7 +1634,7 @@ void CppInterpreterGenerator::generate_more_monitors() {
//
// Arguments:
//
-// rbx: methodOop
+// rbx: Method*
// rcx: receiver - unused (retrieved from stack as needed)
// rsi/r13: previous frame manager state (NULL from the call_stub/c1/c2)
//
@@ -1657,7 +1657,7 @@ static address interpreter_frame_manager = NULL;
address InterpreterGenerator::generate_normal_entry(bool synchronized) {
- // rbx: methodOop
+ // rbx: Method*
// rsi/r13: sender sp
// Because we redispatch "recursive" interpreter entries thru this same entry point
@@ -1700,9 +1700,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// save sender sp
__ push(rcx);
- const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
- const Address size_of_locals (rbx, methodOopDesc::size_of_locals_offset());
- const Address access_flags (rbx, methodOopDesc::access_flags_offset());
+ const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
+ const Address size_of_locals (rbx, Method::size_of_locals_offset());
+ const Address access_flags (rbx, Method::access_flags_offset());
// const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
// const Address monitor_block_bot (rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
@@ -1711,7 +1711,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// get parameter size (always needed)
__ load_unsigned_short(rcx, size_of_parameters);
- // rbx: methodOop
+ // rbx: Method*
// rcx: size of parameters
__ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
@@ -1927,7 +1927,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
if (UseSSE < 2) {
__ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
__ movptr(rbx, STATE(_result._to_call._callee)); // get method just executed
- __ movl(rcx, Address(rbx, methodOopDesc::result_index_offset()));
+ __ movl(rcx, Address(rbx, Method::result_index_offset()));
__ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index
__ jcc(Assembler::equal, do_float);
__ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE)); // Result stub address array index
@@ -1989,10 +1989,10 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ movptr(rbx, STATE(_result._to_call._callee));
// callee left args on top of expression stack, remove them
- __ load_unsigned_short(rcx, Address(rbx, methodOopDesc::size_of_parameters_offset()));
+ __ load_unsigned_short(rcx, Address(rbx, Method::size_of_parameters_offset()));
__ lea(rsp, Address(rsp, rcx, Address::times_ptr));
- __ movl(rcx, Address(rbx, methodOopDesc::result_index_offset()));
+ __ movl(rcx, Address(rbx, Method::result_index_offset()));
ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack);
// Address index(noreg, rax, Address::times_ptr);
__ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr)));
@@ -2019,7 +2019,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ movptr(rbx, STATE(_method)); // get method just executed
__ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call?
- __ movl(rax, Address(rbx, methodOopDesc::result_index_offset())); // get result type index
+ __ movl(rax, Address(rbx, Method::result_index_offset())); // get result type index
__ jcc(Assembler::equal, return_to_initial_caller); // back to native code (call_stub/c1/c2)
// Copy result to callers java stack
@@ -2253,7 +2253,7 @@ InterpreterGenerator::InterpreterGenerator(StubQueue* code)
// Deoptimization helpers for C++ interpreter
// How much stack a method activation needs in words.
-int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
+int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
const int stub_code = 4; // see generate_call_stub
// Save space for one monitor to get into the interpreted method in case
@@ -2267,9 +2267,9 @@ int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
const int overhead_size = sizeof(BytecodeInterpreter)/wordSize +
( frame::sender_sp_offset - frame::link_offset) + 2;
- const int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries();
+ const int extra_stack = 0; //6815692//Method::extra_stack_entries();
const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
- Interpreter::stackElementWords();
+ Interpreter::stackElementWords;
return overhead_size + method_stack + stub_code;
}
@@ -2285,7 +2285,7 @@ static int size_activation_helper(int extra_locals_size, int monitor_size) {
void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
frame* caller,
frame* current,
- methodOop method,
+ Method* method,
intptr_t* locals,
intptr_t* stack,
intptr_t* stack_base,
@@ -2333,7 +2333,7 @@ void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
// Need +1 here because stack_base points to the word just above the first expr stack entry
// and stack_limit is supposed to point to the word just below the last expr stack entry.
// See generate_compute_interpreter_state.
- int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries();
+ int extra_stack = 0; //6815692//Method::extra_stack_entries();
to_fill->_stack_limit = stack_base - (method->max_stack() + extra_stack + 1);
to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
@@ -2342,7 +2342,7 @@ void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
"Stack top out of range");
}
-int AbstractInterpreter::layout_activation(methodOop method,
+int AbstractInterpreter::layout_activation(Method* method,
int tempcount, //
int popframe_extra_args,
int moncount,
@@ -2381,7 +2381,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
monitor_size);
// Now with full size expression stack
- int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries();
+ int extra_stack = 0; //6815692//Method::extra_stack_entries();
int full_frame_size = short_frame_size + (method->max_stack() + extra_stack) * BytesPerWord;
// and now with only live portion of the expression stack
diff --git a/src/cpu/x86/vm/frame_x86.cpp b/src/cpu/x86/vm/frame_x86.cpp
index d9b21d7d9..b8ec9afa5 100644
--- a/src/cpu/x86/vm/frame_x86.cpp
+++ b/src/cpu/x86/vm/frame_x86.cpp
@@ -26,7 +26,7 @@
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "oops/markOop.hpp"
-#include "oops/methodOop.hpp"
+#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
@@ -494,7 +494,7 @@ frame frame::sender(RegisterMap* map) const {
bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
assert(is_interpreted_frame(), "must be interpreter frame");
- methodOop method = interpreter_frame_method();
+ Method* method = interpreter_frame_method();
// When unpacking an optimized frame the frame pointer is
// adjusted with:
int diff = (method->max_locals() - method->size_of_parameters()) *
@@ -531,7 +531,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// first the method
- methodOop m = *interpreter_frame_method_addr();
+ Method* m = *interpreter_frame_method_addr();
// validate the method we'd find in this potential sender
if (!Universe::heap()->is_valid_method(m)) return false;
@@ -549,13 +549,9 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
return false;
}
- // validate constantPoolCacheOop
-
- constantPoolCacheOop cp = *interpreter_frame_cache_addr();
-
- if (cp == NULL ||
- !Space::is_aligned(cp) ||
- !Universe::heap()->is_permanent((void*)cp)) return false;
+ // validate ConstantPoolCache*
+ ConstantPoolCache* cp = *interpreter_frame_cache_addr();
+ if (cp == NULL || !cp->is_metadata()) return false;
// validate locals
@@ -576,7 +572,7 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
interpreterState istate = get_interpreterState();
#endif // CC_INTERP
assert(is_interpreted_frame(), "interpreted frame expected");
- methodOop method = interpreter_frame_method();
+ Method* method = interpreter_frame_method();
BasicType type = method->result_type();
intptr_t* tos_addr;
diff --git a/src/cpu/x86/vm/frame_x86.hpp b/src/cpu/x86/vm/frame_x86.hpp
index 01f6e6cc8..b3c3f416c 100644
--- a/src/cpu/x86/vm/frame_x86.hpp
+++ b/src/cpu/x86/vm/frame_x86.hpp
@@ -44,7 +44,7 @@
// [pointer to locals ] = locals() locals_offset
// [constant pool cache ] = cache() cache_offset
// [methodData ] = mdp() mdx_offset
-// [methodOop ] = method() method_offset
+// [Method* ] = method() method_offset
// [last sp ] = last_sp() last_sp_offset
// [old stack pointer ] (sender_sp) sender_sp_offset
// [old frame pointer ] <- fp = link()
diff --git a/src/cpu/x86/vm/frame_x86.inline.hpp b/src/cpu/x86/vm/frame_x86.inline.hpp
index 2d542eae4..55459c09d 100644
--- a/src/cpu/x86/vm/frame_x86.inline.hpp
+++ b/src/cpu/x86/vm/frame_x86.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -174,14 +174,14 @@ inline intptr_t* frame::interpreter_frame_bcx_addr() const {
// Constant pool cache
-inline constantPoolCacheOop* frame::interpreter_frame_cache_addr() const {
+inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
assert(is_interpreted_frame(), "must be interpreted");
return &(get_interpreterState()->_constants);
}
// Method
-inline methodOop* frame::interpreter_frame_method_addr() const {
+inline Method** frame::interpreter_frame_method_addr() const {
assert(is_interpreted_frame(), "must be interpreted");
return &(get_interpreterState()->_method);
}
@@ -221,14 +221,14 @@ inline intptr_t* frame::interpreter_frame_mdx_addr() const {
// Constant pool cache
-inline constantPoolCacheOop* frame::interpreter_frame_cache_addr() const {
- return (constantPoolCacheOop*)addr_at(interpreter_frame_cache_offset);
+inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
+ return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
}
// Method
-inline methodOop* frame::interpreter_frame_method_addr() const {
- return (methodOop*)addr_at(interpreter_frame_method_offset);
+inline Method** frame::interpreter_frame_method_addr() const {
+ return (Method**)addr_at(interpreter_frame_method_offset);
}
// top of expression stack
diff --git a/src/cpu/x86/vm/icBuffer_x86.cpp b/src/cpu/x86/vm/icBuffer_x86.cpp
index 992a67947..ede401d4d 100644
--- a/src/cpu/x86/vm/icBuffer_x86.cpp
+++ b/src/cpu/x86/vm/icBuffer_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,16 +44,16 @@ int InlineCacheBuffer::ic_stub_code_size() {
-void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, oop cached_oop, address entry_point) {
+void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
ResourceMark rm;
CodeBuffer code(code_begin, ic_stub_code_size());
MacroAssembler* masm = new MacroAssembler(&code);
- // note: even though the code contains an embedded oop, we do not need reloc info
+ // note: even though the code contains an embedded value, we do not need reloc info
// because
- // (1) the oop is old (i.e., doesn't matter for scavenges)
+ // (1) the value is old (i.e., doesn't matter for scavenges)
// (2) these ICStubs are removed *before* a GC happens, so the roots disappear
- assert(cached_oop == NULL || cached_oop->is_perm(), "must be perm oop");
- masm->lea(rax, OopAddress((address) cached_oop));
+ // assert(cached_value == NULL || cached_oop->is_perm(), "must be perm oop");
+ masm->lea(rax, AddressLiteral((address) cached_value, relocInfo::metadata_type));
masm->jump(ExternalAddress(entry_point));
}
@@ -65,10 +65,11 @@ address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
}
-oop InlineCacheBuffer::ic_buffer_cached_oop(address code_begin) {
+void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
// creation also verifies the object
NativeMovConstReg* move = nativeMovConstReg_at(code_begin);
// Verifies the jump
NativeJump* jump = nativeJump_at(move->next_instruction_address());
- return (oop)move->data();
+ void* o = (void*)move->data();
+ return o;
}
diff --git a/src/cpu/x86/vm/interp_masm_x86_32.cpp b/src/cpu/x86/vm/interp_masm_x86_32.cpp
index b0ebcfd3b..9a54ac1ae 100644
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp
+++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,8 +28,8 @@
#include "interpreter/interpreterRuntime.hpp"
#include "oops/arrayOop.hpp"
#include "oops/markOop.hpp"
-#include "oops/methodDataOop.hpp"
-#include "oops/methodOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "prims/jvmtiThreadState.hpp"
@@ -223,10 +223,9 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_off
// Check if the secondary index definition is still ~x, otherwise
// we have to change the following assembler code to calculate the
// plain index.
- assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
+ assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
notl(reg); // convert to plain index
} else if (index_size == sizeof(u1)) {
- assert(EnableInvokeDynamic, "tiny index used only for JSR 292");
load_unsigned_byte(reg, Address(rsi, bcp_offset));
} else {
ShouldNotReachHere();
@@ -240,6 +239,7 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Regis
get_cache_index_at_bcp(index, bcp_offset, index_size);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
+ assert(exact_log2(in_words(ConstantPoolCacheEntry::size())) == 2, "else change next line");
shlptr(index, 2); // convert from field index to ConstantPoolCacheEntry index
}
@@ -251,7 +251,7 @@ void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register
int bcp_offset,
size_t index_size) {
get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
- movptr(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
+ movptr(bytecode, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
const int shift_count = (1 + byte_no) * BitsPerByte;
assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
(byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),
@@ -269,13 +269,32 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, R
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset
+ assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
shll(tmp, 2 + LogBytesPerWord);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
// skip past the header
- addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
+ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
addptr(cache, tmp); // construct pointer to cache entry
}
+// Load object from cpool->resolved_references(index)
+void InterpreterMacroAssembler::load_resolved_reference_at_index(
+ Register result, Register index) {
+ assert_different_registers(result, index);
+ // convert from field index to resolved_references() index and from
+ // word index to byte offset. Since this is a java object, it can be compressed
+ Register tmp = index; // reuse
+ shll(tmp, LogBytesPerHeapOop);
+
+ get_constant_pool(result);
+ // load pointer for resolved_references[] objArray
+ movptr(result, Address(result, ConstantPool::resolved_references_offset_in_bytes()));
+ // JNIHandles::resolve(obj);
+ movptr(result, Address(result, 0));
+ // Add in the index
+ addptr(result, tmp);
+ load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. EAX holds the super_klass. Blows ECX.
@@ -427,11 +446,11 @@ void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register
// Is a cmpl faster?
cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
jccb(Assembler::zero, run_compiled_code);
- jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
+ jmp(Address(method, Method::interpreter_entry_offset()));
bind(run_compiled_code);
}
- jmp(Address(method, methodOopDesc::from_interpreted_offset()));
+ jmp(Address(method, Method::from_interpreted_offset()));
}
@@ -526,7 +545,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
movbool(do_not_unlock_if_synchronized, false); // reset the flag
movptr(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); // get method access flags
- movl(rcx, Address(rbx, methodOopDesc::access_flags_offset()));
+ movl(rcx, Address(rbx, Method::access_flags_offset()));
testl(rcx, JVM_ACC_SYNCHRONIZED);
jcc(Assembler::zero, unlocked);
@@ -820,7 +839,7 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
get_method(rbx);
// Test MDO to avoid the call if it is NULL.
- movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
+ movptr(rax, Address(rbx, in_bytes(Method::method_data_offset())));
testptr(rax, rax);
jcc(Assembler::zero, set_mdp);
// rbx,: method
@@ -828,8 +847,8 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, rsi);
// rax,: mdi
// mdo is guaranteed to be non-zero here, we checked for it before the call.
- movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
- addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
+ movptr(rbx, Address(rbx, in_bytes(Method::method_data_offset())));
+ addptr(rbx, in_bytes(MethodData::data_offset()));
addptr(rax, rbx);
bind(set_mdp);
movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
@@ -851,8 +870,8 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
// If the mdp is valid, it will point to a DataLayout header which is
// consistent with the bcp. The converse is highly probable also.
load_unsigned_short(rdx, Address(rcx, in_bytes(DataLayout::bci_offset())));
- addptr(rdx, Address(rbx, methodOopDesc::const_offset()));
- lea(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
+ addptr(rdx, Address(rbx, Method::const_offset()));
+ lea(rdx, Address(rdx, ConstMethod::codes_offset()));
cmpptr(rdx, rsi);
jcc(Assembler::equal, verify_continue);
// rbx,: method
diff --git a/src/cpu/x86/vm/interp_masm_x86_32.hpp b/src/cpu/x86/vm/interp_masm_x86_32.hpp
index 458325765..7f8463a9a 100644
--- a/src/cpu/x86/vm/interp_masm_x86_32.hpp
+++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp
@@ -77,10 +77,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Helpers for runtime call arguments/results
void get_method(Register reg) { movptr(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); }
- void get_const(Register reg) { get_method(reg); movptr(reg, Address(reg, methodOopDesc::const_offset())); }
- void get_constant_pool(Register reg) { get_const(reg); movptr(reg, Address(reg, constMethodOopDesc::constants_offset())); }
- void get_constant_pool_cache(Register reg) { get_constant_pool(reg); movptr(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes())); }
- void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
+ void get_const(Register reg) { get_method(reg); movptr(reg, Address(reg, Method::const_offset())); }
+ void get_constant_pool(Register reg) { get_const(reg); movptr(reg, Address(reg, ConstMethod::constants_offset())); }
+ void get_constant_pool_cache(Register reg) { get_constant_pool(reg); movptr(reg, Address(reg, ConstantPool::cache_offset_in_bytes())); }
+ void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movptr(tags, Address(cpool, ConstantPool::tags_offset_in_bytes()));
}
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
@@ -88,6 +88,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
+ // load cpool->resolved_references(index);
+ void load_resolved_reference_at_index(Register result, Register index);
+
// Expression stack
void f2ieee(); // truncate ftos to 32bits
void d2ieee(); // truncate dtos to 64bits
diff --git a/src/cpu/x86/vm/interp_masm_x86_64.cpp b/src/cpu/x86/vm/interp_masm_x86_64.cpp
index 2790c2a89..856f017f7 100644
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp
+++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,8 +28,8 @@
#include "interpreter/interpreterRuntime.hpp"
#include "oops/arrayOop.hpp"
#include "oops/markOop.hpp"
-#include "oops/methodDataOop.hpp"
-#include "oops/methodOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "prims/jvmtiThreadState.hpp"
@@ -221,10 +221,9 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
// Check if the secondary index definition is still ~x, otherwise
// we have to change the following assembler code to calculate the
// plain index.
- assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
+ assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
notl(index); // convert to plain index
} else if (index_size == sizeof(u1)) {
- assert(EnableInvokeDynamic, "tiny index used only for JSR 292");
load_unsigned_byte(index, Address(r13, bcp_offset));
} else {
ShouldNotReachHere();
@@ -241,6 +240,7 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
+ assert(exact_log2(in_words(ConstantPoolCacheEntry::size())) == 2, "else change next line");
shll(index, 2);
}
@@ -254,7 +254,7 @@ void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register
get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
// We use a 32-bit load here since the layout of 64-bit words on
// little-endian machines allow us that.
- movl(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
+ movl(bytecode, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
const int shift_count = (1 + byte_no) * BitsPerByte;
assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
(byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),
@@ -274,13 +274,32 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset
+ assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
shll(tmp, 2 + LogBytesPerWord);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
// skip past the header
- addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
+ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
addptr(cache, tmp); // construct pointer to cache entry
}
+// Load object from cpool->resolved_references(index)
+void InterpreterMacroAssembler::load_resolved_reference_at_index(
+ Register result, Register index) {
+ assert_different_registers(result, index);
+ // convert from field index to resolved_references() index and from
+ // word index to byte offset. Since this is a java object, it can be compressed
+ Register tmp = index; // reuse
+ shll(tmp, LogBytesPerHeapOop);
+
+ get_constant_pool(result);
+ // load pointer for resolved_references[] objArray
+ movptr(result, Address(result, ConstantPool::resolved_references_offset_in_bytes()));
+ // JNIHandles::resolve(obj);
+ movptr(result, Address(result, 0));
+ // Add in the index
+ addptr(result, tmp);
+ load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is a
// subtype of super_klass.
@@ -426,11 +445,11 @@ void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register
// Is a cmpl faster?
cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0);
jccb(Assembler::zero, run_compiled_code);
- jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
+ jmp(Address(method, Method::interpreter_entry_offset()));
bind(run_compiled_code);
}
- jmp(Address(method, methodOopDesc::from_interpreted_offset()));
+ jmp(Address(method, Method::from_interpreted_offset()));
}
@@ -526,7 +545,7 @@ void InterpreterMacroAssembler::remove_activation(
// get method access flags
movptr(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
- movl(rcx, Address(rbx, methodOopDesc::access_flags_offset()));
+ movl(rcx, Address(rbx, Method::access_flags_offset()));
testl(rcx, JVM_ACC_SYNCHRONIZED);
jcc(Assembler::zero, unlocked);
@@ -834,7 +853,7 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
get_method(rbx);
// Test MDO to avoid the call if it is NULL.
- movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
+ movptr(rax, Address(rbx, in_bytes(Method::method_data_offset())));
testptr(rax, rax);
jcc(Assembler::zero, set_mdp);
// rbx: method
@@ -842,8 +861,8 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13);
// rax: mdi
// mdo is guaranteed to be non-zero here, we checked for it before the call.
- movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
- addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
+ movptr(rbx, Address(rbx, in_bytes(Method::method_data_offset())));
+ addptr(rbx, in_bytes(MethodData::data_offset()));
addptr(rax, rbx);
bind(set_mdp);
movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
@@ -866,8 +885,8 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
// consistent with the bcp. The converse is highly probable also.
load_unsigned_short(c_rarg2,
Address(c_rarg3, in_bytes(DataLayout::bci_offset())));
- addptr(c_rarg2, Address(rbx, methodOopDesc::const_offset()));
- lea(c_rarg2, Address(c_rarg2, constMethodOopDesc::codes_offset()));
+ addptr(c_rarg2, Address(rbx, Method::const_offset()));
+ lea(c_rarg2, Address(c_rarg2, ConstMethod::codes_offset()));
cmpptr(c_rarg2, r13);
jcc(Assembler::equal, verify_continue);
// rbx: method
diff --git a/src/cpu/x86/vm/interp_masm_x86_64.hpp b/src/cpu/x86/vm/interp_masm_x86_64.hpp
index 8486c348b..eb8c4f0a4 100644
--- a/src/cpu/x86/vm/interp_masm_x86_64.hpp
+++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp
@@ -86,22 +86,22 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_const(Register reg) {
get_method(reg);
- movptr(reg, Address(reg, methodOopDesc::const_offset()));
+ movptr(reg, Address(reg, Method::const_offset()));
}
void get_constant_pool(Register reg) {
get_const(reg);
- movptr(reg, Address(reg, constMethodOopDesc::constants_offset()));
+ movptr(reg, Address(reg, ConstMethod::constants_offset()));
}
void get_constant_pool_cache(Register reg) {
get_constant_pool(reg);
- movptr(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes()));
+ movptr(reg, Address(reg, ConstantPool::cache_offset_in_bytes()));
}
void get_cpool_and_tags(Register cpool, Register tags) {
get_constant_pool(cpool);
- movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
+ movptr(tags, Address(cpool, ConstantPool::tags_offset_in_bytes()));
}
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
@@ -110,6 +110,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
+ // load cpool->resolved_references(index);
+ void load_resolved_reference_at_index(Register result, Register index);
+
void pop_ptr(Register r = rax);
void pop_i(Register r = rax);
void pop_l(Register r = rax);
diff --git a/src/cpu/x86/vm/interpreterRT_x86_32.cpp b/src/cpu/x86/vm/interpreterRT_x86_32.cpp
index 05b3ded09..eb66640c3 100644
--- a/src/cpu/x86/vm/interpreterRT_x86_32.cpp
+++ b/src/cpu/x86/vm/interpreterRT_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/universe.inline.hpp"
-#include "oops/methodOop.hpp"
+#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
@@ -131,8 +131,8 @@ class SlowSignatureHandler: public NativeSignatureIterator {
}
};
-IRT_ENTRY(address, InterpreterRuntime::slow_signature_handler(JavaThread* thread, methodOopDesc* method, intptr_t* from, intptr_t* to))
- methodHandle m(thread, (methodOop)method);
+IRT_ENTRY(address, InterpreterRuntime::slow_signature_handler(JavaThread* thread, Method* method, intptr_t* from, intptr_t* to))
+ methodHandle m(thread, (Method*)method);
assert(m->is_native(), "sanity check");
// handle arguments
SlowSignatureHandler(m, (address)from, to + 1).iterate(UCONST64(-1));
diff --git a/src/cpu/x86/vm/interpreterRT_x86_64.cpp b/src/cpu/x86/vm/interpreterRT_x86_64.cpp
index c3a1f5413..959ed6e32 100644
--- a/src/cpu/x86/vm/interpreterRT_x86_64.cpp
+++ b/src/cpu/x86/vm/interpreterRT_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/universe.inline.hpp"
-#include "oops/methodOop.hpp"
+#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
@@ -480,10 +480,10 @@ class SlowSignatureHandler
IRT_ENTRY(address,
InterpreterRuntime::slow_signature_handler(JavaThread* thread,
- methodOopDesc* method,
+ Method* method,
intptr_t* from,
intptr_t* to))
- methodHandle m(thread, (methodOop)method);
+ methodHandle m(thread, (Method*)method);
assert(m->is_native(), "sanity check");
// handle arguments
diff --git a/src/cpu/x86/vm/interpreter_x86_32.cpp b/src/cpu/x86/vm/interpreter_x86_32.cpp
index ddc4e6112..bed8137e4 100644
--- a/src/cpu/x86/vm/interpreter_x86_32.cpp
+++ b/src/cpu/x86/vm/interpreter_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,8 +30,8 @@
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"
#include "oops/arrayOop.hpp"
-#include "oops/methodDataOop.hpp"
-#include "oops/methodOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
@@ -76,7 +76,7 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
address InterpreterGenerator::generate_empty_entry(void) {
- // rbx,: methodOop
+ // rbx,: Method*
// rcx: receiver (unused)
// rsi: previous interpreter state (C++ interpreter) must preserve
// rsi: sender sp must set sp to this value on return
@@ -107,7 +107,7 @@ address InterpreterGenerator::generate_empty_entry(void) {
address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
- // rbx,: methodOop
+ // rbx,: Method*
// rcx: scratrch
// rsi: sender sp
@@ -219,7 +219,7 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
// Attempt to execute abstract method. Throw exception
address InterpreterGenerator::generate_abstract_entry(void) {
- // rbx,: methodOop
+ // rbx,: Method*
// rcx: receiver (unused)
// rsi: previous interpreter state (C++ interpreter) must preserve
diff --git a/src/cpu/x86/vm/interpreter_x86_64.cpp b/src/cpu/x86/vm/interpreter_x86_64.cpp
index e86c13c02..3b0a6b445 100644
--- a/src/cpu/x86/vm/interpreter_x86_64.cpp
+++ b/src/cpu/x86/vm/interpreter_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,8 +30,8 @@
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"
#include "oops/arrayOop.hpp"
-#include "oops/methodDataOop.hpp"
-#include "oops/methodOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
@@ -92,7 +92,7 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// Do Int register here
switch ( i ) {
case 0:
- __ movl(rscratch1, Address(rbx, methodOopDesc::access_flags_offset()));
+ __ movl(rscratch1, Address(rbx, Method::access_flags_offset()));
__ testl(rscratch1, JVM_ACC_STATIC);
__ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
break;
@@ -177,7 +177,7 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
}
// Now handle integrals. Only do c_rarg1 if not static.
- __ movl(c_rarg3, Address(rbx, methodOopDesc::access_flags_offset()));
+ __ movl(c_rarg3, Address(rbx, Method::access_flags_offset()));
__ testl(c_rarg3, JVM_ACC_STATIC);
__ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
@@ -202,7 +202,7 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
- // rbx,: methodOop
+ // rbx,: Method*
// rcx: scratrch
// r13: sender sp
@@ -303,7 +303,7 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
// Abstract method entry
// Attempt to execute abstract method. Throw exception
address InterpreterGenerator::generate_abstract_entry(void) {
- // rbx: methodOop
+ // rbx: Method*
// r13: sender SP
address entry_point = __ pc();
@@ -328,7 +328,7 @@ address InterpreterGenerator::generate_abstract_entry(void) {
// Empty method, generate a very fast return.
address InterpreterGenerator::generate_empty_entry(void) {
- // rbx: methodOop
+ // rbx: Method*
// r13: sender sp must set sp to this value on return
if (!UseFastEmptyMethods) {
diff --git a/src/cpu/x86/vm/dump_x86_32.cpp b/src/cpu/x86/vm/metaspaceShared_x86_32.cpp
index a9e9ff0d9..a43fafdd3 100644
--- a/src/cpu/x86/vm/dump_x86_32.cpp
+++ b/src/cpu/x86/vm/metaspaceShared_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,11 +24,7 @@
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
-#include "memory/compactingPermGenGen.hpp"
-#include "memory/generation.inline.hpp"
-#include "memory/space.inline.hpp"
-
-
+#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
@@ -36,7 +32,7 @@
// the Klass itself as the first argument. Example:
//
// oop obj;
-// int size = obj->klass()->klass_part()->oop_size(this);
+// int size = obj->klass()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
@@ -47,14 +43,14 @@
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
-// differing only by an ordinal constant, and they bear no releationship
+// differing only by an ordinal constant, and they bear no relationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
-void CompactingPermGenGen::generate_vtable_methods(void** vtbl_list,
+void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
diff --git a/src/cpu/x86/vm/dump_x86_64.cpp b/src/cpu/x86/vm/metaspaceShared_x86_64.cpp
index b74d2ed72..2ef2abf6a 100644
--- a/src/cpu/x86/vm/dump_x86_64.cpp
+++ b/src/cpu/x86/vm/metaspaceShared_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,11 +24,7 @@
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
-#include "memory/compactingPermGenGen.hpp"
-#include "memory/generation.inline.hpp"
-#include "memory/space.inline.hpp"
-
-
+#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
@@ -36,7 +32,7 @@
// the Klass itself as the first argument. Example:
//
// oop obj;
-// int size = obj->klass()->klass_part()->oop_size(this);
+// int size = obj->klass()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
@@ -47,14 +43,14 @@
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
-// differing only by an ordinal constant, and they bear no releationship
+// differing only by an ordinal constant, and they bear no relationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
#define __ masm->
-void CompactingPermGenGen::generate_vtable_methods(void** vtbl_list,
+void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
diff --git a/src/cpu/x86/vm/methodHandles_x86.cpp b/src/cpu/x86/vm/methodHandles_x86.cpp
index 03cb0c5b9..f6a04b2c4 100644
--- a/src/cpu/x86/vm/methodHandles_x86.cpp
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp
@@ -47,9 +47,9 @@ static RegisterOrConstant constant(int value) {
void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
if (VerifyMethodHandles)
- verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(),
+ verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class),
"MH argument is a Class");
- __ load_heap_oop(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes()));
+ __ movptr(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes()));
}
#ifdef ASSERT
@@ -64,12 +64,10 @@ static int check_nonzero(const char* xname, int x) {
#ifdef ASSERT
void MethodHandles::verify_klass(MacroAssembler* _masm,
- Register obj, KlassHandle klass,
+ Register obj, SystemDictionary::WKID klass_id,
const char* error_message) {
- oop* klass_addr = klass.raw_value();
- assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() &&
- klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(),
- "must be one of the SystemDictionaryHandles");
+ Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
+ KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
Register temp = rdi;
Register temp2 = noreg;
LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr
@@ -137,12 +135,12 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
// Is a cmpl faster?
__ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0);
__ jccb(Assembler::zero, run_compiled_code);
- __ jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
+ __ jmp(Address(method, Method::interpreter_entry_offset()));
__ BIND(run_compiled_code);
}
- const ByteSize entry_offset = for_compiler_entry ? methodOopDesc::from_compiled_offset() :
- methodOopDesc::from_interpreted_offset();
+ const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
+ Method::from_interpreted_offset();
__ jmp(Address(method, entry_offset));
}
@@ -165,16 +163,15 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ verify_oop(method_temp);
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
__ verify_oop(method_temp);
- // the following assumes that a methodOop is normally compressed in the vmtarget field:
- __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
- __ verify_oop(method_temp);
+ // the following assumes that a Method* is normally compressed in the vmtarget field:
+ __ movptr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
__ load_sized_value(temp2,
- Address(method_temp, methodOopDesc::size_of_parameters_offset()),
+ Address(method_temp, Method::size_of_parameters_offset()),
sizeof(u2), /*is_signed*/ false);
- // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), "");
+ // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
Label L;
__ cmpptr(recv, __ argument_address(temp2, -1));
__ jcc(Assembler::equal, L);
@@ -203,7 +200,7 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
}
// rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
- // rbx: methodOop
+ // rbx: Method*
// rdx: argument locator (parameter slot count, added to rsp)
// rcx: used as temp to hold mh or receiver
// rax, rdi: garbage temps, blown away
@@ -221,14 +218,14 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
if (VerifyMethodHandles) {
Label L;
BLOCK_COMMENT("verify_intrinsic_id {");
- __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) iid);
+ __ cmpb(Address(rbx_method, Method::intrinsic_id_offset_in_bytes()), (int) iid);
__ jcc(Assembler::equal, L);
if (iid == vmIntrinsics::_linkToVirtual ||
iid == vmIntrinsics::_linkToSpecial) {
// could do this for all kinds, but would explode assembly code size
- trace_method_handle(_masm, "bad methodOop::intrinsic_id");
+ trace_method_handle(_masm, "bad Method*::intrinsic_id");
}
- __ STOP("bad methodOop::intrinsic_id");
+ __ STOP("bad Method*::intrinsic_id");
__ bind(L);
BLOCK_COMMENT("} verify_intrinsic_id");
}
@@ -239,9 +236,9 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
__ load_sized_value(rdx_argp,
- Address(rbx_method, methodOopDesc::size_of_parameters_offset()),
+ Address(rbx_method, Method::size_of_parameters_offset()),
sizeof(u2), /*is_signed*/ false);
- // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), "");
+ // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
rdx_first_arg_addr = __ argument_address(rdx_argp, -1);
} else {
DEBUG_ONLY(rdx_argp = noreg);
@@ -343,7 +340,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// The method is a member invoker used by direct method handles.
if (VerifyMethodHandles) {
// make sure the trailing argument really is a MemberName (caller responsibility)
- verify_klass(_masm, member_reg, SystemDictionaryHandles::MemberName_klass(),
+ verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MemberName),
"MemberName required for invokeVirtual etc.");
}
@@ -401,7 +398,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
}
- __ load_heap_oop(rbx_method, member_vmtarget);
+ __ movptr(rbx_method, member_vmtarget);
method_is_live = true;
break;
@@ -409,7 +406,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
}
- __ load_heap_oop(rbx_method, member_vmtarget);
+ __ movptr(rbx_method, member_vmtarget);
method_is_live = true;
break;
@@ -437,7 +434,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget
// at this point. And VerifyMethodHandles has already checked clazz, if needed.
- // get target methodOop & entry point
+ // get target Method* & entry point
__ lookup_virtual_method(temp1_recv_klass, temp2_index, rbx_method);
method_is_live = true;
break;
@@ -653,4 +650,3 @@ void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adapt
BLOCK_COMMENT("} trace_method_handle");
}
#endif //PRODUCT
-
diff --git a/src/cpu/x86/vm/methodHandles_x86.hpp b/src/cpu/x86/vm/methodHandles_x86.hpp
index 0692da620..62342eefb 100644
--- a/src/cpu/x86/vm/methodHandles_x86.hpp
+++ b/src/cpu/x86/vm/methodHandles_x86.hpp
@@ -35,11 +35,11 @@ public:
static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg);
static void verify_klass(MacroAssembler* _masm,
- Register obj, KlassHandle klass,
+ Register obj, SystemDictionary::WKID klass_id,
const char* error_message = "wrong klass") NOT_DEBUG_RETURN;
static void verify_method_handle(MacroAssembler* _masm, Register mh_reg) {
- verify_klass(_masm, mh_reg, SystemDictionaryHandles::MethodHandle_klass(),
+ verify_klass(_masm, mh_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MethodHandle),
"reference is a MH");
}
diff --git a/src/cpu/x86/vm/relocInfo_x86.cpp b/src/cpu/x86/vm/relocInfo_x86.cpp
index 67c98e26c..1023695e8 100644
--- a/src/cpu/x86/vm/relocInfo_x86.cpp
+++ b/src/cpu/x86/vm/relocInfo_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,12 +47,21 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
}
} else if (which == Assembler::narrow_oop_operand) {
address disp = Assembler::locate_operand(addr(), which);
+ // both compressed oops and compressed classes look the same
+ if (Universe::heap()->is_in_reserved((oop)x)) {
if (verify_only) {
assert(*(uint32_t*) disp == oopDesc::encode_heap_oop((oop)x), "instructions must match");
} else {
*(int32_t*) disp = oopDesc::encode_heap_oop((oop)x);
}
} else {
+ if (verify_only) {
+ assert(*(uint32_t*) disp == oopDesc::encode_klass((Klass*)x), "instructions must match");
+ } else {
+ *(int32_t*) disp = oopDesc::encode_klass((Klass*)x);
+ }
+ }
+ } else {
// Note: Use runtime_call_type relocations for call32_operand.
address ip = addr();
address disp = Assembler::locate_operand(ip, which);
@@ -239,3 +248,6 @@ void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, Co
}
#endif // _LP64
}
+
+void metadata_Relocation::pd_fix_value(address x) {
+}
diff --git a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
index 1f2503164..3bf6763f5 100644
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
+++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
@@ -29,7 +29,7 @@
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
-#include "oops/compiledICHolderOop.hpp"
+#include "oops/compiledICHolder.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
@@ -453,8 +453,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
// Patch the callers callsite with entry to compiled code if it exists.
static void patch_callers_callsite(MacroAssembler *masm) {
Label L;
- __ verify_oop(rbx);
- __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
+ __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
// Schedule the branch target address early.
// Call into the VM to patch the caller, then jump to compiled callee
@@ -486,7 +485,6 @@ static void patch_callers_callsite(MacroAssembler *masm) {
__ push(rax);
// VM needs target method
__ push(rbx);
- __ verify_oop(rbx);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
__ addptr(rsp, 2*wordSize);
@@ -631,7 +629,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
}
// Schedule the branch target address early.
- __ movptr(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset())));
+ __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
// And repush original return address
__ push(rax);
__ jmp(rcx);
@@ -746,7 +744,7 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// Will jump to the compiled code just as if compiled code was doing it.
// Pre-load the register-jump target early, to schedule it better.
- __ movptr(rdi, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
+ __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
// Now generate the shuffle code. Pick up all register args and move the
// rest through the floating point stack top.
@@ -859,8 +857,8 @@ static void gen_i2c_adapter(MacroAssembler *masm,
__ get_thread(rax);
__ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
- // move methodOop to rax, in case we end up in an c2i adapter.
- // the c2i adapters expect methodOop in rax, (c2) because c2's
+ // move Method* to rax, in case we end up in an c2i adapter.
+ // the c2i adapters expect Method* in rax, (c2) because c2's
// resolve stubs return the result (the method) in rax,.
// I'd love to fix this.
__ mov(rax, rbx);
@@ -880,7 +878,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
// -------------------------------------------------------------------------
- // Generate a C2I adapter. On entry we know rbx, holds the methodOop during calls
+ // Generate a C2I adapter. On entry we know rbx, holds the Method* during calls
// to the interpreter. The args start out packed in the compiled layout. They
// need to be unpacked into the interpreter layout. This will almost always
// require some stack space. We grow the current (compiled) stack, then repack
@@ -898,18 +896,14 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
{
Label missed;
-
- __ verify_oop(holder);
__ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
- __ verify_oop(temp);
-
- __ cmpptr(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset()));
- __ movptr(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset()));
+ __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
+ __ movptr(rbx, Address(holder, CompiledICHolder::holder_method_offset()));
__ jcc(Assembler::notEqual, missed);
// Method might have been compiled since the call site was patched to
// interpreted if that is the case treat it as a miss so we can get
// the call site corrected.
- __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
+ __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, skip_fixup);
__ bind(missed);
@@ -1918,7 +1912,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
{
SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
- __ movoop(rax, JNIHandles::make_local(method()));
+ __ mov_metadata(rax, method());
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
thread, rax);
@@ -1926,7 +1920,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// RedefineClasses() tracing support for obsolete method entry
if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
- __ movoop(rax, JNIHandles::make_local(method()));
+ __ mov_metadata(rax, method());
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
thread, rax);
@@ -2184,7 +2178,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
// Tell dtrace about this method exit
save_native_result(masm, ret_type, stack_slots);
- __ movoop(rax, JNIHandles::make_local(method()));
+ __ mov_metadata(rax, method());
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
thread, rax);
@@ -3427,8 +3421,8 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
__ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, pending);
- // get the returned methodOop
- __ movptr(rbx, Address(thread, JavaThread::vm_result_offset()));
+ // get the returned Method*
+ __ get_vm_result_2(rbx, thread);
__ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
__ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
diff --git a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
index 11180eeae..b605c4af6 100644
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
@@ -29,7 +29,7 @@
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
-#include "oops/compiledICHolderOop.hpp"
+#include "oops/compiledICHolder.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
@@ -413,8 +413,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
// Patch the callers callsite with entry to compiled code if it exists.
static void patch_callers_callsite(MacroAssembler *masm) {
Label L;
- __ verify_oop(rbx);
- __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
+ __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
// Save the current stack pointer
@@ -428,8 +427,6 @@ static void patch_callers_callsite(MacroAssembler *masm) {
__ andptr(rsp, -(StackAlignmentInBytes));
__ push_CPU_state();
-
- __ verify_oop(rbx);
// VM needs caller's callsite
// VM needs target method
// This needs to be a long call since we will relocate this adapter to
@@ -586,7 +583,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
}
// Schedule the branch target address early.
- __ movptr(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset())));
+ __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
__ jmp(rcx);
}
@@ -698,7 +695,7 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// Will jump to the compiled code just as if compiled code was doing it.
// Pre-load the register-jump target early, to schedule it better.
- __ movptr(r11, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
+ __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
// Now generate the shuffle code. Pick up all register args and move the
// rest through the floating point stack top.
@@ -793,8 +790,8 @@ static void gen_i2c_adapter(MacroAssembler *masm,
__ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
- // put methodOop where a c2i would expect should we end up there
- // only needed becaus eof c2 resolve stubs return methodOop as a result in
+ // put Method* where a c2i would expect should we end up there
+ // only needed becaus eof c2 resolve stubs return Method* as a result in
// rax
__ mov(rax, rbx);
__ jmp(r11);
@@ -812,7 +809,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
// -------------------------------------------------------------------------
- // Generate a C2I adapter. On entry we know rbx holds the methodOop during calls
+ // Generate a C2I adapter. On entry we know rbx holds the Method* during calls
// to the interpreter. The args start out packed in the compiled layout. They
// need to be unpacked into the interpreter layout. This will almost always
// require some stack space. We grow the current (compiled) stack, then repack
@@ -829,12 +826,9 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Register temp = rbx;
{
- __ verify_oop(holder);
__ load_klass(temp, receiver);
- __ verify_oop(temp);
-
- __ cmpptr(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset()));
- __ movptr(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset()));
+ __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
+ __ movptr(rbx, Address(holder, CompiledICHolder::holder_method_offset()));
__ jcc(Assembler::equal, ok);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
@@ -842,7 +836,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
// Method might have been compiled since the call site was patched to
// interpreted if that is the case treat it as a miss so we can get
// the call site corrected.
- __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
+ __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, skip_fixup);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
}
@@ -2184,7 +2178,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
SkipIfEqual skip(masm, &DTraceMethodProbes, false);
// protect the args we've loaded
save_args(masm, total_c_args, c_arg, out_regs);
- __ movoop(c_rarg1, JNIHandles::make_local(method()));
+ __ mov_metadata(c_rarg1, method());
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
r15_thread, c_rarg1);
@@ -2195,7 +2189,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
// protect the args we've loaded
save_args(masm, total_c_args, c_arg, out_regs);
- __ movoop(c_rarg1, JNIHandles::make_local(method()));
+ __ mov_metadata(c_rarg1, method());
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
r15_thread, c_rarg1);
@@ -2448,7 +2442,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
{
SkipIfEqual skip(masm, &DTraceMethodProbes, false);
save_native_result(masm, ret_type, stack_slots);
- __ movoop(c_rarg1, JNIHandles::make_local(method()));
+ __ mov_metadata(c_rarg1, method());
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
r15_thread, c_rarg1);
@@ -3879,8 +3873,8 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
__ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, pending);
- // get the returned methodOop
- __ movptr(rbx, Address(r15_thread, JavaThread::vm_result_offset()));
+ // get the returned Method*
+ __ get_vm_result_2(rbx, r15_thread);
__ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
__ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
diff --git a/src/cpu/x86/vm/stubGenerator_x86_32.cpp b/src/cpu/x86/vm/stubGenerator_x86_32.cpp
index aff25c834..8a9de37e1 100644
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"
-#include "oops/methodOop.hpp"
+#include "oops/method.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
@@ -234,7 +234,7 @@ class StubGenerator: public StubCodeGenerator {
// call Java function
__ BIND(parameters_done);
- __ movptr(rbx, method); // get methodOop
+ __ movptr(rbx, method); // get Method*
__ movptr(rax, entry_point); // get entry_point
__ mov(rsi, rsp); // set sender sp
BLOCK_COMMENT("call Java function");
@@ -682,29 +682,11 @@ class StubGenerator: public StubCodeGenerator {
__ cmpptr(rdx, oop_bits);
__ jcc(Assembler::notZero, error);
- // make sure klass is 'reasonable'
+ // make sure klass is 'reasonable', which is not zero.
__ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
__ testptr(rax, rax);
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
-
- // Check if the klass is in the right area of memory
- const int klass_mask = Universe::verify_klass_mask();
- const int klass_bits = Universe::verify_klass_bits();
- __ mov(rdx, rax);
- __ andptr(rdx, klass_mask);
- __ cmpptr(rdx, klass_bits);
- __ jcc(Assembler::notZero, error);
-
- // make sure klass' klass is 'reasonable'
- __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass' klass
- __ testptr(rax, rax);
- __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
-
- __ mov(rdx, rax);
- __ andptr(rdx, klass_mask);
- __ cmpptr(rdx, klass_bits);
- __ jcc(Assembler::notZero, error); // if klass not in right area
- // of memory it is broken too.
+ // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
// return if everything seems ok
__ bind(exit);
diff --git a/src/cpu/x86/vm/stubGenerator_x86_64.cpp b/src/cpu/x86/vm/stubGenerator_x86_64.cpp
index 3e4d5be2c..de60df86f 100644
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"
-#include "oops/methodOop.hpp"
+#include "oops/method.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
@@ -109,7 +109,7 @@ class StubGenerator: public StubCodeGenerator {
// c_rarg0: call wrapper address address
// c_rarg1: result address
// c_rarg2: result type BasicType
- // c_rarg3: method methodOop
+ // c_rarg3: method Method*
// c_rarg4: (interpreter) entry point address
// c_rarg5: parameters intptr_t*
// 16(rbp): parameter size (in words) int
@@ -139,7 +139,7 @@ class StubGenerator: public StubCodeGenerator {
// c_rarg0: call wrapper address address
// c_rarg1: result address
// c_rarg2: result type BasicType
- // c_rarg3: method methodOop
+ // c_rarg3: method Method*
// 48(rbp): (interpreter) entry point address
// 56(rbp): parameters intptr_t*
// 64(rbp): parameter size (in words) int
@@ -332,7 +332,7 @@ class StubGenerator: public StubCodeGenerator {
// call Java function
__ BIND(parameters_done);
- __ movptr(rbx, method); // get methodOop
+ __ movptr(rbx, method); // get Method*
__ movptr(c_rarg1, entry_point); // get entry_point
__ mov(r13, rsp); // set sender sp
BLOCK_COMMENT("call Java function");
@@ -1027,28 +1027,11 @@ class StubGenerator: public StubCodeGenerator {
// set r12 to heapbase for load_klass()
__ reinit_heapbase();
- // make sure klass is 'reasonable'
+ // make sure klass is 'reasonable', which is not zero.
__ load_klass(rax, rax); // get klass
__ testptr(rax, rax);
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
- // Check if the klass is in the right area of memory
- __ mov(c_rarg2, rax);
- __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
- __ andptr(c_rarg2, c_rarg3);
- __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
- __ cmpptr(c_rarg2, c_rarg3);
- __ jcc(Assembler::notZero, error);
-
- // make sure klass' klass is 'reasonable'
- __ load_klass(rax, rax);
- __ testptr(rax, rax);
- __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
- // Check if the klass' klass is in the right area of memory
- __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
- __ andptr(rax, c_rarg3);
- __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
- __ cmpptr(rax, c_rarg3);
- __ jcc(Assembler::notZero, error);
+ // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
// return if everything seems ok
__ bind(exit);
diff --git a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
index bbf297b3d..4aff1d966 100644
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
@@ -30,8 +30,8 @@
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"
#include "oops/arrayOop.hpp"
-#include "oops/methodDataOop.hpp"
-#include "oops/methodOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
@@ -201,7 +201,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
__ bind(L_got_cache);
__ movl(rbx, Address(rbx, rcx,
- Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
+ Address::times_ptr, ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::flags_offset()));
__ andptr(rbx, 0xFF);
__ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));
@@ -343,34 +343,34 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
// rcx: invocation counter
//
void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
- const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
+ const Address invocation_counter(rbx, in_bytes(Method::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
- // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
+ // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
if (TieredCompilation) {
int increment = InvocationCounter::count_increment;
int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
Label no_mdo, done;
if (ProfileInterpreter) {
// Are we profiling?
- __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
+ __ movptr(rax, Address(rbx, Method::method_data_offset()));
__ testptr(rax, rax);
__ jccb(Assembler::zero, no_mdo);
// Increment counter in the MDO
- const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
+ const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
__ jmpb(done);
}
__ bind(no_mdo);
- // Increment counter in methodOop (we don't need to load it, it's in rcx).
+ // Increment counter in Method* (we don't need to load it, it's in rcx).
__ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
__ bind(done);
} else {
- const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() +
+ const Address backedge_counter (rbx, Method::backedge_counter_offset() +
InvocationCounter::counter_offset());
- if (ProfileInterpreter) { // %%% Merge this into methodDataOop
- __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
+ if (ProfileInterpreter) { // %%% Merge this into MethodData*
+ __ incrementl(Address(rbx,Method::interpreter_invocation_counter_offset()));
}
// Update standard invocation counters
__ movl(rax, backedge_counter); // load backedge counter
@@ -424,7 +424,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// C++ interpreter only
// rsi - previous interpreter state pointer
- const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
+ const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
// InterpreterRuntime::frequency_counter_overflow takes one argument
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
@@ -433,7 +433,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
__ movptr(rax, (intptr_t)false);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
- __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
+ __ movptr(rbx, Address(rbp, method_offset)); // restore Method*
// Preserve invariant that rsi/rdi contain bcp/locals of sender frame
// and jump to the interpreted entry.
@@ -450,7 +450,7 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
//
// Asm interpreter
// rdx: number of additional locals this frame needs (what we must check)
- // rbx,: methodOop
+ // rbx,: Method*
// destroyed on exit
// rax,
@@ -542,11 +542,11 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
}
// Allocate monitor and lock method (asm interpreter)
-// rbx, - methodOop
+// rbx, - Method*
//
void InterpreterGenerator::lock_method(void) {
// synchronize method
- const Address access_flags (rbx, methodOopDesc::access_flags_offset());
+ const Address access_flags (rbx, Method::access_flags_offset());
const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
@@ -566,9 +566,9 @@ void InterpreterGenerator::lock_method(void) {
__ testl(rax, JVM_ACC_STATIC);
__ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
__ jcc(Assembler::zero, done);
- __ movptr(rax, Address(rbx, methodOopDesc::const_offset()));
- __ movptr(rax, Address(rax, constMethodOopDesc::constants_offset()));
- __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(rax, Address(rbx, Method::const_offset()));
+ __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
+ __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
__ movptr(rax, Address(rax, mirror_offset));
__ bind(done);
}
@@ -592,24 +592,24 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
__ push(rsi); // set sender sp
__ push((int32_t)NULL_WORD); // leave last_sp as null
- __ movptr(rsi, Address(rbx,methodOopDesc::const_offset())); // get constMethodOop
- __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
- __ push(rbx); // save methodOop
+ __ movptr(rsi, Address(rbx,Method::const_offset())); // get ConstMethod*
+ __ lea(rsi, Address(rsi,ConstMethod::codes_offset())); // get codebase
+ __ push(rbx); // save Method*
if (ProfileInterpreter) {
Label method_data_continue;
- __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
+ __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
__ testptr(rdx, rdx);
__ jcc(Assembler::zero, method_data_continue);
- __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset()));
+ __ addptr(rdx, in_bytes(MethodData::data_offset()));
__ bind(method_data_continue);
__ push(rdx); // set the mdp (method data pointer)
} else {
__ push(0);
}
- __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
- __ movptr(rdx, Address(rdx, constMethodOopDesc::constants_offset()));
- __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
+ __ movptr(rdx, Address(rbx, Method::const_offset()));
+ __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
+ __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
__ push(rdx); // set constant pool cache
__ push(rdi); // set locals pointer
if (native_call) {
@@ -633,7 +633,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
address InterpreterGenerator::generate_accessor_entry(void) {
- // rbx,: methodOop
+ // rbx,: Method*
// rcx: receiver (preserve for slow entry into asm interpreter)
// rsi: senderSP must preserved for slow path, set SP to it on fast path
@@ -664,14 +664,14 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ jcc(Assembler::zero, slow_path);
// read first instruction word and extract bytecode @ 1 and index @ 2
- __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
- __ movptr(rdi, Address(rdx, constMethodOopDesc::constants_offset()));
- __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
+ __ movptr(rdx, Address(rbx, Method::const_offset()));
+ __ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
+ __ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
// Shift codes right to get the index on the right.
// The bytecode fetched looks like <index><0xb4><0x2a>
__ shrl(rdx, 2*BitsPerByte);
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
- __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
+ __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
// rax,: local 0
// rbx,: method
@@ -688,7 +688,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ movl(rcx,
Address(rdi,
rdx,
- Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
+ Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(rcx, 2*BitsPerByte);
__ andl(rcx, 0xFF);
__ cmpl(rcx, Bytecodes::_getfield);
@@ -698,11 +698,11 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ movptr(rcx,
Address(rdi,
rdx,
- Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()));
+ Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
__ movl(rdx,
Address(rdi,
rdx,
- Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset()));
+ Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
Label notByte, notShort, notChar;
const Address field_address (rax, rcx, Address::times_1);
@@ -789,7 +789,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
//
// This code is based on generate_accessor_enty.
- // rbx,: methodOop
+ // rbx,: Method*
// rcx: receiver (preserve for slow entry into asm interpreter)
// rsi: senderSP must preserved for slow path, set SP to it on fast path
@@ -863,15 +863,15 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// determine code generation flags
bool inc_counter = UseCompiler || CountCompiledCalls;
- // rbx,: methodOop
+ // rbx,: Method*
// rsi: sender sp
// rsi: previous interpreter state (C++ interpreter) must preserve
address entry_point = __ pc();
- const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
- const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
- const Address access_flags (rbx, methodOopDesc::access_flags_offset());
+ const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
+ const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
+ const Address access_flags (rbx, Method::access_flags_offset());
// get parameter size (always needed)
__ load_unsigned_short(rcx, size_of_parameters);
@@ -880,7 +880,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// and the arguments are already on the stack and we only add a handful of words
// to the stack
- // rbx,: methodOop
+ // rbx,: Method*
// rcx: size of parameters
// rsi: sender sp
@@ -988,8 +988,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// allocate space for parameters
__ get_method(method);
- __ verify_oop(method);
- __ load_unsigned_short(t, Address(method, methodOopDesc::size_of_parameters_offset()));
+ __ load_unsigned_short(t, Address(method, Method::size_of_parameters_offset()));
__ shlptr(t, Interpreter::logStackElementSize);
__ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
__ subptr(rsp, t);
@@ -997,12 +996,12 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// get signature handler
{ Label L;
- __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
+ __ movptr(t, Address(method, Method::signature_handler_offset()));
__ testptr(t, t);
__ jcc(Assembler::notZero, L);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
__ get_method(method);
- __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
+ __ movptr(t, Address(method, Method::signature_handler_offset()));
__ bind(L);
}
@@ -1024,13 +1023,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// pass mirror handle if static call
{ Label L;
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
- __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
+ __ movl(t, Address(method, Method::access_flags_offset()));
__ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L);
// get mirror
- __ movptr(t, Address(method, methodOopDesc:: const_offset()));
- __ movptr(t, Address(t, constMethodOopDesc::constants_offset()));
- __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(t, Address(method, Method:: const_offset()));
+ __ movptr(t, Address(t, ConstMethod::constants_offset()));
+ __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
__ movptr(t, Address(t, mirror_offset));
// copy mirror into activation frame
__ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t);
@@ -1042,14 +1041,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// get native function entry point
{ Label L;
- __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
+ __ movptr(rax, Address(method, Method::native_function_offset()));
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
__ cmpptr(rax, unsatisfied.addr());
__ jcc(Assembler::notEqual, L);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
__ get_method(method);
- __ verify_oop(method);
- __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
+ __ movptr(rax, Address(method, Method::native_function_offset()));
__ bind(L);
}
@@ -1218,9 +1216,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// i.e., bci == 0 <=> rsi == code_base()
// Can't call_VM until bcp is within reasonable.
__ get_method(method); // method is junk from thread_in_native to now.
- __ verify_oop(method);
- __ movptr(rsi, Address(method,methodOopDesc::const_offset())); // get constMethodOop
- __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
+ __ movptr(rsi, Address(method,Method::const_offset())); // get ConstMethod*
+ __ lea(rsi, Address(rsi,ConstMethod::codes_offset())); // get codebase
// handle exceptions (exception handling will handle unlocking!)
{ Label L;
@@ -1236,7 +1233,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// do unlocking if necessary
{ Label L;
- __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
+ __ movl(t, Address(method, Method::access_flags_offset()));
__ testl(t, JVM_ACC_SYNCHRONIZED);
__ jcc(Assembler::zero, L);
// the code below should be shared with interpreter macro assembler implementation
@@ -1296,20 +1293,20 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// determine code generation flags
bool inc_counter = UseCompiler || CountCompiledCalls;
- // rbx,: methodOop
+ // rbx,: Method*
// rsi: sender sp
address entry_point = __ pc();
- const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
- const Address size_of_locals (rbx, methodOopDesc::size_of_locals_offset());
- const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
- const Address access_flags (rbx, methodOopDesc::access_flags_offset());
+ const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
+ const Address size_of_locals (rbx, Method::size_of_locals_offset());
+ const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
+ const Address access_flags (rbx, Method::access_flags_offset());
// get parameter size (always needed)
__ load_unsigned_short(rcx, size_of_parameters);
- // rbx,: methodOop
+ // rbx,: Method*
// rcx: size of parameters
// rsi: sender_sp (could differ from sp+wordSize if we were called via c2i )
@@ -1464,7 +1461,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
//
// Arguments:
//
-// rbx,: methodOop
+// rbx,: Method*
// rcx: receiver
//
//
@@ -1490,7 +1487,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// [ expr. stack bottom ]
// [ saved rsi ]
// [ current rdi ]
-// [ methodOop ]
+// [ Method* ]
// [ saved rbp, ] <--- rbp,
// [ return address ]
// [ local variable m ]
@@ -1556,7 +1553,7 @@ bool AbstractInterpreter::can_be_compiled(methodHandle m) {
}
// How much stack a method activation needs in words.
-int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
+int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
const int stub_code = 4; // see generate_call_stub
// Save space for one monitor to get into the interpreted method in case
@@ -1568,7 +1565,7 @@ int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
// be sure to change this if you add/subtract anything to/from the overhead area
const int overhead_size = -frame::interpreter_frame_initial_sp_offset;
- const int extra_stack = methodOopDesc::extra_stack_entries();
+ const int extra_stack = Method::extra_stack_entries();
const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
Interpreter::stackElementWords;
return overhead_size + method_stack + stub_code;
@@ -1576,7 +1573,7 @@ int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
// asm based interpreter deoptimization helpers
-int AbstractInterpreter::layout_activation(methodOop method,
+int AbstractInterpreter::layout_activation(Method* method,
int tempcount,
int popframe_extra_args,
int moncount,
@@ -1737,8 +1734,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// Compute size of arguments for saving when returning to deoptimized caller
__ get_method(rax);
- __ verify_oop(rax);
- __ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset())));
+ __ load_unsigned_short(rax, Address(rax, in_bytes(Method::size_of_parameters_offset())));
__ shlptr(rax, Interpreter::logStackElementSize);
__ restore_locals();
__ subptr(rdi, rax);
@@ -1815,9 +1811,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ remove_activation(vtos, rdx, false, true, false);
// restore exception
__ get_thread(thread);
- __ movptr(rax, Address(thread, JavaThread::vm_result_offset()));
- __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
- __ verify_oop(rax);
+ __ get_vm_result(rax, thread);
// Inbetween activations - previous activation type unknown yet
// compute continuation point - the continuation point expects
diff --git a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
index c597926c3..75318ab42 100644
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
@@ -30,8 +30,8 @@
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"
#include "oops/arrayOop.hpp"
-#include "oops/methodDataOop.hpp"
-#include "oops/methodOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
@@ -185,7 +185,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ bind(L_got_cache);
__ movl(rbx, Address(rbx, rcx,
Address::times_ptr,
- in_bytes(constantPoolCacheOopDesc::base_offset()) +
+ in_bytes(ConstantPoolCache::base_offset()) +
3 * wordSize));
__ andl(rbx, 0xFF);
__ lea(rsp, Address(rsp, rbx, Address::times_8));
@@ -299,36 +299,36 @@ void InterpreterGenerator::generate_counter_incr(
Label* overflow,
Label* profile_method,
Label* profile_method_continue) {
- const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
+ const Address invocation_counter(rbx, in_bytes(Method::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
- // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
+ // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
if (TieredCompilation) {
int increment = InvocationCounter::count_increment;
int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
Label no_mdo, done;
if (ProfileInterpreter) {
// Are we profiling?
- __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
+ __ movptr(rax, Address(rbx, Method::method_data_offset()));
__ testptr(rax, rax);
__ jccb(Assembler::zero, no_mdo);
// Increment counter in the MDO
- const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
+ const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
__ jmpb(done);
}
__ bind(no_mdo);
- // Increment counter in methodOop (we don't need to load it, it's in ecx).
+ // Increment counter in Method* (we don't need to load it, it's in ecx).
__ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
__ bind(done);
} else {
const Address backedge_counter(rbx,
- methodOopDesc::backedge_counter_offset() +
+ Method::backedge_counter_offset() +
InvocationCounter::counter_offset());
- if (ProfileInterpreter) { // %%% Merge this into methodDataOop
+ if (ProfileInterpreter) { // %%% Merge this into MethodData*
__ incrementl(Address(rbx,
- methodOopDesc::interpreter_invocation_counter_offset()));
+ Method::interpreter_invocation_counter_offset()));
}
// Update standard invocation counters
__ movl(rax, backedge_counter); // load backedge counter
@@ -370,7 +370,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// rdx is not restored. Doesn't appear to really be set.
const Address size_of_parameters(rbx,
- methodOopDesc::size_of_parameters_offset());
+ Method::size_of_parameters_offset());
// InterpreterRuntime::frequency_counter_overflow takes two
// arguments, the first (thread) is passed by call_VM, the second
@@ -385,7 +385,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
InterpreterRuntime::frequency_counter_overflow),
c_rarg1);
- __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
+ __ movptr(rbx, Address(rbp, method_offset)); // restore Method*
// Preserve invariant that r13/r14 contain bcp/locals of sender frame
// and jump to the interpreted entry.
__ jmp(*do_continue, relocInfo::none);
@@ -401,7 +401,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
//
// Args:
// rdx: number of additional locals this frame needs (what we must check)
-// rbx: methodOop
+// rbx: Method*
//
// Kills:
// rax
@@ -487,7 +487,7 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
// Allocate monitor and lock method (asm interpreter)
//
// Args:
-// rbx: methodOop
+// rbx: Method*
// r14: locals
//
// Kills:
@@ -496,7 +496,7 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
// rscratch1, rscratch2 (scratch regs)
void InterpreterGenerator::lock_method(void) {
// synchronize method
- const Address access_flags(rbx, methodOopDesc::access_flags_offset());
+ const Address access_flags(rbx, Method::access_flags_offset());
const Address monitor_block_top(
rbp,
frame::interpreter_frame_monitor_block_top_offset * wordSize);
@@ -522,10 +522,10 @@ void InterpreterGenerator::lock_method(void) {
// get receiver (assume this is frequent case)
__ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
__ jcc(Assembler::zero, done);
- __ movptr(rax, Address(rbx, methodOopDesc::const_offset()));
- __ movptr(rax, Address(rax, constMethodOopDesc::constants_offset()));
+ __ movptr(rax, Address(rbx, Method::const_offset()));
+ __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
__ movptr(rax, Address(rax,
- constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ ConstantPool::pool_holder_offset_in_bytes()));
__ movptr(rax, Address(rax, mirror_offset));
#ifdef ASSERT
@@ -555,7 +555,7 @@ void InterpreterGenerator::lock_method(void) {
//
// Args:
// rax: return address
-// rbx: methodOop
+// rbx: Method*
// r14: pointer to locals
// r13: sender sp
// rdx: cp cache
@@ -565,24 +565,24 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
__ enter(); // save old & set new rbp
__ push(r13); // set sender sp
__ push((int)NULL_WORD); // leave last_sp as null
- __ movptr(r13, Address(rbx, methodOopDesc::const_offset())); // get constMethodOop
- __ lea(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase
- __ push(rbx); // save methodOop
+ __ movptr(r13, Address(rbx, Method::const_offset())); // get ConstMethod*
+ __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase
+ __ push(rbx); // save Method*
if (ProfileInterpreter) {
Label method_data_continue;
- __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
+ __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
__ testptr(rdx, rdx);
__ jcc(Assembler::zero, method_data_continue);
- __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset()));
+ __ addptr(rdx, in_bytes(MethodData::data_offset()));
__ bind(method_data_continue);
__ push(rdx); // set the mdp (method data pointer)
} else {
__ push(0);
}
- __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
- __ movptr(rdx, Address(rdx, constMethodOopDesc::constants_offset()));
- __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
+ __ movptr(rdx, Address(rbx, Method::const_offset()));
+ __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
+ __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
__ push(rdx); // set constant pool cache
__ push(r14); // set locals pointer
if (native_call) {
@@ -604,7 +604,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// Call an accessor method (assuming it is resolved, otherwise drop
// into vanilla (slow path) entry
address InterpreterGenerator::generate_accessor_entry(void) {
- // rbx: methodOop
+ // rbx: Method*
// r13: senderSP must preserver for slow path, set SP to it on fast path
@@ -632,14 +632,14 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ jcc(Assembler::zero, slow_path);
// read first instruction word and extract bytecode @ 1 and index @ 2
- __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
- __ movptr(rdi, Address(rdx, constMethodOopDesc::constants_offset()));
- __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
+ __ movptr(rdx, Address(rbx, Method::const_offset()));
+ __ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
+ __ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
// Shift codes right to get the index on the right.
// The bytecode fetched looks like <index><0xb4><0x2a>
__ shrl(rdx, 2 * BitsPerByte);
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
- __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
+ __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
// rax: local 0
// rbx: method
@@ -655,7 +655,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
Address(rdi,
rdx,
Address::times_8,
- constantPoolCacheOopDesc::base_offset() +
+ ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::indices_offset()));
__ shrl(rcx, 2 * BitsPerByte);
__ andl(rcx, 0xFF);
@@ -667,14 +667,14 @@ address InterpreterGenerator::generate_accessor_entry(void) {
Address(rdi,
rdx,
Address::times_8,
- constantPoolCacheOopDesc::base_offset() +
+ ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::f2_offset()));
// edx: flags
__ movl(rdx,
Address(rdi,
rdx,
Address::times_8,
- constantPoolCacheOopDesc::base_offset() +
+ ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::flags_offset()));
Label notObj, notInt, notByte, notShort;
@@ -771,7 +771,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
//
// This code is based on generate_accessor_enty.
//
- // rbx: methodOop
+ // rbx: Method*
// r13: senderSP must preserve for slow path, set SP to it on fast path
@@ -839,17 +839,17 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// determine code generation flags
bool inc_counter = UseCompiler || CountCompiledCalls;
- // rbx: methodOop
+ // rbx: Method*
// r13: sender sp
address entry_point = __ pc();
- const Address size_of_parameters(rbx, methodOopDesc::
+ const Address size_of_parameters(rbx, Method::
size_of_parameters_offset());
- const Address invocation_counter(rbx, methodOopDesc::
+ const Address invocation_counter(rbx, Method::
invocation_counter_offset() +
InvocationCounter::counter_offset());
- const Address access_flags (rbx, methodOopDesc::access_flags_offset());
+ const Address access_flags (rbx, Method::access_flags_offset());
// get parameter size (always needed)
__ load_unsigned_short(rcx, size_of_parameters);
@@ -858,7 +858,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// expression stack and the arguments are already on the stack and
// we only add a handful of words to the stack
- // rbx: methodOop
+ // rbx: Method*
// rcx: size of parameters
// r13: sender sp
__ pop(rax); // get return address
@@ -967,10 +967,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// allocate space for parameters
__ get_method(method);
- __ verify_oop(method);
__ load_unsigned_short(t,
Address(method,
- methodOopDesc::size_of_parameters_offset()));
+ Method::size_of_parameters_offset()));
__ shll(t, Interpreter::logStackElementSize);
__ subptr(rsp, t);
@@ -980,7 +979,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// get signature handler
{
Label L;
- __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
+ __ movptr(t, Address(method, Method::signature_handler_offset()));
__ testptr(t, t);
__ jcc(Assembler::notZero, L);
__ call_VM(noreg,
@@ -988,7 +987,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
InterpreterRuntime::prepare_native_call),
method);
__ get_method(method);
- __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
+ __ movptr(t, Address(method, Method::signature_handler_offset()));
__ bind(L);
}
@@ -1018,13 +1017,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
{
Label L;
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
- __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
+ __ movl(t, Address(method, Method::access_flags_offset()));
__ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L);
// get mirror
- __ movptr(t, Address(method, methodOopDesc::const_offset()));
- __ movptr(t, Address(t, constMethodOopDesc::constants_offset()));
- __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(t, Address(method, Method::const_offset()));
+ __ movptr(t, Address(t, ConstMethod::constants_offset()));
+ __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
__ movptr(t, Address(t, mirror_offset));
// copy mirror into activation frame
__ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
@@ -1038,7 +1037,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// get native function entry point
{
Label L;
- __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
+ __ movptr(rax, Address(method, Method::native_function_offset()));
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
__ movptr(rscratch2, unsatisfied.addr());
__ cmpptr(rax, rscratch2);
@@ -1048,8 +1047,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
InterpreterRuntime::prepare_native_call),
method);
__ get_method(method);
- __ verify_oop(method);
- __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
+ __ movptr(rax, Address(method, Method::native_function_offset()));
__ bind(L);
}
@@ -1201,12 +1199,11 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// until here. Also can't call_VM until the bcp has been
// restored. Need bcp for throwing exception below so get it now.
__ get_method(method);
- __ verify_oop(method);
// restore r13 to have legal interpreter frame, i.e., bci == 0 <=>
// r13 == code_base()
- __ movptr(r13, Address(method, methodOopDesc::const_offset())); // get constMethodOop
- __ lea(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase
+ __ movptr(r13, Address(method, Method::const_offset())); // get ConstMethod*
+ __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase
// handle exceptions (exception handling will handle unlocking!)
{
Label L;
@@ -1226,7 +1223,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// do unlocking if necessary
{
Label L;
- __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
+ __ movl(t, Address(method, Method::access_flags_offset()));
__ testl(t, JVM_ACC_SYNCHRONIZED);
__ jcc(Assembler::zero, L);
// the code below should be shared with interpreter macro
@@ -1301,22 +1298,22 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// determine code generation flags
bool inc_counter = UseCompiler || CountCompiledCalls;
- // ebx: methodOop
+ // ebx: Method*
// r13: sender sp
address entry_point = __ pc();
const Address size_of_parameters(rbx,
- methodOopDesc::size_of_parameters_offset());
- const Address size_of_locals(rbx, methodOopDesc::size_of_locals_offset());
+ Method::size_of_parameters_offset());
+ const Address size_of_locals(rbx, Method::size_of_locals_offset());
const Address invocation_counter(rbx,
- methodOopDesc::invocation_counter_offset() +
+ Method::invocation_counter_offset() +
InvocationCounter::counter_offset());
- const Address access_flags(rbx, methodOopDesc::access_flags_offset());
+ const Address access_flags(rbx, Method::access_flags_offset());
// get parameter size (always needed)
__ load_unsigned_short(rcx, size_of_parameters);
- // rbx: methodOop
+ // rbx: Method*
// rcx: size of parameters
// r13: sender_sp (could differ from sp+wordSize if we were called via c2i )
@@ -1480,7 +1477,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
//
// Arguments:
//
-// rbx: methodOop
+// rbx: Method*
//
// Stack layout immediately at entry
//
@@ -1505,7 +1502,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// [ expr. stack bottom ]
// [ saved r13 ]
// [ current r14 ]
-// [ methodOop ]
+// [ Method* ]
// [ saved ebp ] <--- rbp
// [ return address ]
// [ local variable m ]
@@ -1574,7 +1571,7 @@ bool AbstractInterpreter::can_be_compiled(methodHandle m) {
}
// How much stack a method activation needs in words.
-int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
+int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
const int entry_size = frame::interpreter_frame_monitor_size();
// total overhead size: entry_size + (saved rbp thru expr stack
@@ -1584,13 +1581,13 @@ int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
-(frame::interpreter_frame_initial_sp_offset) + entry_size;
const int stub_code = frame::entry_frame_after_call_words;
- const int extra_stack = methodOopDesc::extra_stack_entries();
+ const int extra_stack = Method::extra_stack_entries();
const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
Interpreter::stackElementWords;
return (overhead_size + method_stack + stub_code);
}
-int AbstractInterpreter::layout_activation(methodOop method,
+int AbstractInterpreter::layout_activation(Method* method,
int tempcount,
int popframe_extra_args,
int moncount,
@@ -1755,7 +1752,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// Compute size of arguments for saving when returning to
// deoptimized caller
__ get_method(rax);
- __ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::
+ __ load_unsigned_short(rax, Address(rax, in_bytes(Method::
size_of_parameters_offset())));
__ shll(rax, Interpreter::logStackElementSize);
__ restore_locals(); // XXX do we need this?
@@ -1832,9 +1829,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// remove the activation (without doing throws on illegalMonitorExceptions)
__ remove_activation(vtos, rdx, false, true, false);
// restore exception
- __ movptr(rax, Address(r15_thread, JavaThread::vm_result_offset()));
- __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
- __ verify_oop(rax);
+ __ get_vm_result(rax, r15_thread);
// In between activations - previous activation type unknown yet
// compute continuation point - the continuation point expects the
diff --git a/src/cpu/x86/vm/templateTable_x86_32.cpp b/src/cpu/x86/vm/templateTable_x86_32.cpp
index fc19edca4..07f5f118a 100644
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"
#include "memory/universe.inline.hpp"
-#include "oops/methodDataOop.hpp"
+#include "oops/methodData.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
@@ -357,17 +357,13 @@ void TemplateTable::ldc(bool wide) {
__ load_unsigned_byte(rbx, at_bcp(1));
}
__ get_cpool_and_tags(rcx, rax);
- const int base_offset = constantPoolOopDesc::header_size() * wordSize;
- const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
+ const int base_offset = ConstantPool::header_size() * wordSize;
+ const int tags_offset = Array<u1>::base_offset_in_bytes();
// get type
__ xorptr(rdx, rdx);
__ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
- // unresolved string - get the resolved string
- __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
- __ jccb(Assembler::equal, call_ldc);
-
// unresolved class - get the resolved class
__ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
__ jccb(Assembler::equal, call_ldc);
@@ -400,73 +396,46 @@ void TemplateTable::ldc(bool wide) {
{ Label L;
__ cmpl(rdx, JVM_CONSTANT_Integer);
__ jcc(Assembler::equal, L);
- __ cmpl(rdx, JVM_CONSTANT_String);
- __ jcc(Assembler::equal, L);
- __ cmpl(rdx, JVM_CONSTANT_Object);
- __ jcc(Assembler::equal, L);
+ // String and Object are rewritten to fast_aldc
__ stop("unexpected tag type in ldc");
__ bind(L);
}
#endif
- Label isOop;
- // atos and itos
- // Integer is only non-oop type we will see here
- __ cmpl(rdx, JVM_CONSTANT_Integer);
- __ jccb(Assembler::notEqual, isOop);
+ // itos JVM_CONSTANT_Integer only
__ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
__ push(itos);
- __ jmp(Done);
- __ bind(isOop);
- __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
- __ push(atos);
-
- if (VerifyOops) {
- __ verify_oop(rax);
- }
__ bind(Done);
}
// Fast path for caching oop constants.
-// %%% We should use this to handle Class and String constants also.
-// %%% It will simplify the ldc/primitive path considerably.
void TemplateTable::fast_aldc(bool wide) {
transition(vtos, atos);
- if (!EnableInvokeDynamic) {
- // We should not encounter this bytecode if !EnableInvokeDynamic.
- // The verifier will stop it. However, if we get past the verifier,
- // this will stop the thread in a reasonable way, without crashing the JVM.
- __ call_VM(noreg, CAST_FROM_FN_PTR(address,
- InterpreterRuntime::throw_IncompatibleClassChangeError));
- // the call_VM checks for exception, so we should never return here.
- __ should_not_reach_here();
- return;
- }
+ Register result = rax;
+ Register tmp = rdx;
+ int index_size = wide ? sizeof(u2) : sizeof(u1);
- const Register cache = rcx;
- const Register index = rdx;
+ Label resolved;
- resolve_cache_and_index(f12_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
- if (VerifyOops) {
- __ verify_oop(rax);
- }
+ // We are resolved if the resolved reference cache entry contains a
+ // non-null object (String, MethodType, etc.)
+ assert_different_registers(result, tmp);
+ __ get_cache_index_at_bcp(tmp, 1, index_size);
+ __ load_resolved_reference_at_index(result, tmp);
+ __ testl(result, result);
+ __ jcc(Assembler::notZero, resolved);
- Label L_done, L_throw_exception;
- const Register con_klass_temp = rcx; // same as cache
- __ load_klass(con_klass_temp, rax);
- __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
- __ jcc(Assembler::notEqual, L_done);
- __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
- __ jcc(Assembler::notEqual, L_throw_exception);
- __ xorptr(rax, rax);
- __ jmp(L_done);
+ address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
- // Load the exception from the system-array which wraps it:
- __ bind(L_throw_exception);
- __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
- __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
+ // first time invocation - must resolve first
+ __ movl(tmp, (int)bytecode());
+ __ call_VM(result, entry, tmp);
- __ bind(L_done);
+ __ bind(resolved);
+
+ if (VerifyOops) {
+ __ verify_oop(result);
+ }
}
void TemplateTable::ldc2_w() {
@@ -475,8 +444,8 @@ void TemplateTable::ldc2_w() {
__ get_unsigned_2_byte_index_at_bcp(rbx, 1);
__ get_cpool_and_tags(rcx, rax);
- const int base_offset = constantPoolOopDesc::header_size() * wordSize;
- const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
+ const int base_offset = ConstantPool::header_size() * wordSize;
+ const int tags_offset = Array<u1>::base_offset_in_bytes();
// get type
__ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
@@ -1328,7 +1297,7 @@ void TemplateTable::dop2(Operation op) {
case mul: {
Label L_strict;
Label L_join;
- const Address access_flags (rcx, methodOopDesc::access_flags_offset());
+ const Address access_flags (rcx, Method::access_flags_offset());
__ get_method(rcx);
__ movl(rcx, access_flags);
__ testl(rcx, JVM_ACC_STRICT);
@@ -1347,7 +1316,7 @@ void TemplateTable::dop2(Operation op) {
case div: {
Label L_strict;
Label L_join;
- const Address access_flags (rcx, methodOopDesc::access_flags_offset());
+ const Address access_flags (rcx, Method::access_flags_offset());
__ get_method(rcx);
__ movl(rcx, access_flags);
__ testl(rcx, JVM_ACC_STRICT);
@@ -1576,8 +1545,8 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ get_method(rcx); // ECX holds method
__ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
- const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset();
- const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset();
+ const ByteSize be_offset = Method::backedge_counter_offset() + InvocationCounter::counter_offset();
+ const ByteSize inv_offset = Method::invocation_counter_offset() + InvocationCounter::counter_offset();
const int method_offset = frame::interpreter_frame_method_offset * wordSize;
// Load up EDX with the branch displacement
@@ -1595,8 +1564,8 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
// compute return address as bci in rax,
- __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset())));
- __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
+ __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(ConstMethod::codes_offset())));
+ __ subptr(rax, Address(rcx, Method::const_offset()));
// Adjust the bcp in RSI by the displacement in EDX
__ addptr(rsi, rdx);
// Push return address
@@ -1632,18 +1601,18 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
if (ProfileInterpreter) {
// Are we profiling?
- __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
+ __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
__ testptr(rbx, rbx);
__ jccb(Assembler::zero, no_mdo);
// Increment the MDO backedge counter
- const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
+ const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow);
__ jmp(dispatch);
}
__ bind(no_mdo);
- // Increment backedge counter in methodOop
+ // Increment backedge counter in Method*
__ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow);
} else {
@@ -1672,7 +1641,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ jcc(Assembler::below, dispatch);
// When ProfileInterpreter is on, the backedge_count comes from the
- // methodDataOop, which value does not get reset on the call to
+ // MethodData*, which value does not get reset on the call to
// frequency_counter_overflow(). To avoid excessive calls to the overflow
// routine while the method is being compiled, add a second test to make
// sure the overflow function is called only once every overflow_frequency.
@@ -1822,9 +1791,9 @@ void TemplateTable::ret() {
__ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
__ profile_ret(rbx, rcx);
__ get_method(rax);
- __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
+ __ movptr(rsi, Address(rax, Method::const_offset()));
__ lea(rsi, Address(rsi, rbx, Address::times_1,
- constMethodOopDesc::codes_offset()));
+ ConstMethod::codes_offset()));
__ dispatch_next(vtos);
}
@@ -1835,8 +1804,8 @@ void TemplateTable::wide_ret() {
__ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
__ profile_ret(rbx, rcx);
__ get_method(rax);
- __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
- __ lea(rsi, Address(rsi, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
+ __ movptr(rsi, Address(rax, Method::const_offset()));
+ __ lea(rsi, Address(rsi, rbx, Address::times_1, ConstMethod::codes_offset()));
__ dispatch_next(vtos);
}
@@ -2080,31 +2049,17 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constrain
}
void TemplateTable::resolve_cache_and_index(int byte_no,
- Register result,
Register Rcache,
Register index,
size_t index_size) {
const Register temp = rbx;
- assert_different_registers(result, Rcache, index, temp);
+ assert_different_registers(Rcache, index, temp);
Label resolved;
- if (byte_no == f12_oop) {
- // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.)
- // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because
- // there is a 1-1 relation between bytecode type and CP entry type.
- // The caller will also load a methodOop from f2.
- assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
- __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
- __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
- __ testptr(result, result);
- __ jcc(Assembler::notEqual, resolved);
- } else {
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
- assert(result == noreg, ""); //else change code for setting result
__ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
__ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
__ jcc(Assembler::equal, resolved);
- }
// resolve first time through
address entry;
@@ -2119,8 +2074,6 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
- case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
- case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
default:
fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
break;
@@ -2129,8 +2082,6 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
__ call_VM(noreg, entry, temp);
// Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
- if (result != noreg)
- __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
__ bind(resolved);
}
@@ -2144,7 +2095,7 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
bool is_static = false) {
assert_different_registers(cache, index, flags, off);
- ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
+ ByteSize cp_base_offset = ConstantPoolCache::base_offset();
// Field offset
__ movptr(off, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
@@ -2156,6 +2107,8 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
if (is_static) {
__ movptr(obj, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
+ const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+ __ movptr(obj, Address(obj, mirror_offset));
}
}
@@ -2176,31 +2129,21 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
// determine constant pool cache field offsets
assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
const int method_offset = in_bytes(
- constantPoolCacheOopDesc::base_offset() +
+ ConstantPoolCache::base_offset() +
((byte_no == f2_byte)
? ConstantPoolCacheEntry::f2_offset()
: ConstantPoolCacheEntry::f1_offset()));
- const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
+ const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::flags_offset());
// access constant pool cache fields
- const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
+ const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::f2_offset());
- if (byte_no == f12_oop) {
- // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'.
- // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset).
- // See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle.
size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
- resolve_cache_and_index(byte_no, itable_index, cache, index, index_size);
- __ movptr(method, Address(cache, index, Address::times_ptr, index_offset));
- itable_index = noreg; // hack to disable load below
- } else {
- resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
+ resolve_cache_and_index(byte_no, cache, index, index_size);
__ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
- }
+
if (itable_index != noreg) {
- // pick up itable index from f2 also:
- assert(byte_no == f1_byte, "already picked up f1");
__ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
}
__ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
@@ -2223,7 +2166,7 @@ void TemplateTable::jvmti_post_field_access(Register cache,
__ jcc(Assembler::zero, L1);
// cache entry pointer
- __ addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
+ __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
__ shll(index, LogBytesPerWord);
__ addptr(cache, index);
if (is_static) {
@@ -2257,7 +2200,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
const Register off = rbx;
const Register flags = rax;
- resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
+ resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
jvmti_post_field_access(cache, index, is_static, false);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@@ -2395,7 +2338,7 @@ void TemplateTable::getstatic(int byte_no) {
// The function may destroy various registers, just not the cache and index registers.
void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
- ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
+ ByteSize cp_base_offset = ConstantPoolCache::base_offset();
if (JvmtiExport::can_post_field_modification()) {
// Check to see if a field modification watch has been set before we take
@@ -2466,7 +2409,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
const Register off = rbx;
const Register flags = rax;
- resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
+ resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
jvmti_post_field_mod(cache, index, is_static);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@@ -2708,7 +2651,7 @@ void TemplateTable::jvmti_post_fast_field_mod() {
void TemplateTable::fast_storefield(TosState state) {
transition(state, vtos);
- ByteSize base = constantPoolCacheOopDesc::base_offset();
+ ByteSize base = ConstantPoolCache::base_offset();
jvmti_post_fast_field_mod();
@@ -2827,7 +2770,7 @@ void TemplateTable::fast_accessfield(TosState state) {
__ movptr(rbx, Address(rcx,
rbx,
Address::times_ptr,
- in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
+ in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())));
// rax,: object
@@ -2864,7 +2807,7 @@ void TemplateTable::fast_xaccess(TosState state) {
__ movptr(rbx, Address(rcx,
rdx,
Address::times_ptr,
- in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
+ in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())));
// make sure exception is reported in correct bcp range (getfield is next instruction)
__ increment(rsi);
__ null_check(rax);
@@ -2926,12 +2869,15 @@ void TemplateTable::prepare_invoke(int byte_no,
// maybe push appendix to arguments (just before return address)
if (is_invokedynamic || is_invokehandle) {
Label L_no_push;
- __ verify_oop(index);
__ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
__ jccb(Assembler::zero, L_no_push);
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
+ __ push(rbx);
+ __ mov(rbx, index);
+ __ load_resolved_reference_at_index(index, rbx);
+ __ pop(rbx);
__ push(index); // push appendix (MethodType, CallSite, etc.)
__ bind(L_no_push);
}
@@ -2992,11 +2938,10 @@ void TemplateTable::invokevirtual_helper(Register index,
const Register method = index; // method must be rbx
assert(method == rbx,
- "methodOop must be rbx for interpreter calling convention");
+ "Method* must be rbx for interpreter calling convention");
// do the call - the index is actually the method to call
- // that is, f2 is a vtable index if !is_vfinal, else f2 is a methodOop
- __ verify_oop(method);
+ // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
// It's final, need a null check here!
__ null_check(recv);
@@ -3011,12 +2956,11 @@ void TemplateTable::invokevirtual_helper(Register index,
// get receiver klass
__ null_check(recv, oopDesc::klass_offset_in_bytes());
__ load_klass(rax, recv);
- __ verify_oop(rax);
// profile this call
__ profile_virtual_call(rax, rdi, rdx);
- // get target methodOop & entry point
+ // get target Method* & entry point
__ lookup_virtual_method(rax, index, method);
__ jump_from_interpreted(method, rdx);
}
@@ -3041,12 +2985,11 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
- prepare_invoke(byte_no, rbx, noreg, // get f1 methodOop
+ prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
rcx); // get receiver also for null check
__ verify_oop(rcx);
__ null_check(rcx);
// do the call
- __ verify_oop(rbx);
__ profile_call(rax);
__ jump_from_interpreted(rbx, rax);
}
@@ -3055,9 +2998,8 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
- prepare_invoke(byte_no, rbx); // get f1 methodOop
+ prepare_invoke(byte_no, rbx); // get f1 Method*
// do the call
- __ verify_oop(rbx);
__ profile_call(rax);
__ jump_from_interpreted(rbx, rax);
}
@@ -3073,7 +3015,7 @@ void TemplateTable::fast_invokevfinal(int byte_no) {
void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
- prepare_invoke(byte_no, rax, rbx, // get f1 klassOop, f2 itable index
+ prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 itable index
rcx, rdx); // recv, flags
// rax: interface klass (from f1)
@@ -3097,7 +3039,6 @@ void TemplateTable::invokeinterface(int byte_no) {
__ restore_locals(); // restore rdi
__ null_check(rcx, oopDesc::klass_offset_in_bytes());
__ load_klass(rdx, rcx);
- __ verify_oop(rdx);
// profile this call
__ profile_virtual_call(rdx, rsi, rdi);
@@ -3110,7 +3051,7 @@ void TemplateTable::invokeinterface(int byte_no) {
rbx, rsi,
no_such_interface);
- // rbx: methodOop to call
+ // rbx: Method* to call
// rcx: receiver
// Check for abstract method error
// Note: This should be done more efficiently via a throw_abstract_method_error
@@ -3121,7 +3062,7 @@ void TemplateTable::invokeinterface(int byte_no) {
// do the call
// rcx: receiver
- // rbx,: methodOop
+ // rbx,: Method*
__ jump_from_interpreted(rbx, rdx);
__ should_not_reach_here();
@@ -3151,7 +3092,7 @@ void TemplateTable::invokeinterface(int byte_no) {
void TemplateTable::invokehandle(int byte_no) {
transition(vtos, vtos);
- assert(byte_no == f12_oop, "use this argument");
+ assert(byte_no == f1_byte, "use this argument");
const Register rbx_method = rbx; // (from f2)
const Register rax_mtype = rax; // (from f1)
const Register rcx_recv = rcx;
@@ -3164,7 +3105,7 @@ void TemplateTable::invokehandle(int byte_no) {
}
prepare_invoke(byte_no,
- rbx_method, rax_mtype, // get f2 methodOop, f1 MethodType
+ rbx_method, rax_mtype, // get f2 Method*, f1 MethodType
rcx_recv);
__ verify_oop(rbx_method);
__ verify_oop(rcx_recv);
@@ -3181,7 +3122,7 @@ void TemplateTable::invokehandle(int byte_no) {
void TemplateTable::invokedynamic(int byte_no) {
transition(vtos, vtos);
- assert(byte_no == f12_oop, "use this argument");
+ assert(byte_no == f1_byte, "use this argument");
if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic.
@@ -3199,7 +3140,7 @@ void TemplateTable::invokedynamic(int byte_no) {
prepare_invoke(byte_no, rbx_method, rax_callsite);
- // rax: CallSite object (from f1)
+ // rax: CallSite object (from cpool->resolved_references[])
// rbx: MH.linkToCallSite method (from f2)
// Note: rax_callsite is already pushed by prepare_invoke
@@ -3229,22 +3170,22 @@ void TemplateTable::_new() {
__ get_cpool_and_tags(rcx, rax);
// Make sure the class we're about to instantiate has been resolved.
- // This is done before loading instanceKlass to be consistent with the order
- // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
- const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
+ // This is done before loading InstanceKlass to be consistent with the order
+ // how Constant Pool is updated (see ConstantPool::klass_at_put)
+ const int tags_offset = Array<u1>::base_offset_in_bytes();
__ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
__ jcc(Assembler::notEqual, slow_case_no_pop);
- // get instanceKlass
- __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(constantPoolOopDesc)));
+ // get InstanceKlass
+ __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(ConstantPool)));
__ push(rcx); // save the contexts of klass for initializing the header
// make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized
- __ cmpb(Address(rcx, instanceKlass::init_state_offset()), instanceKlass::fully_initialized);
+ __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
__ jcc(Assembler::notEqual, slow_case);
- // get instance_size in instanceKlass (scaled to a count of bytes)
+ // get instance_size in InstanceKlass (scaled to a count of bytes)
__ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
// test to see if it has a finalizer or is malformed in some way
__ testl(rdx, Klass::_lh_instance_slow_path_bit);
@@ -3414,18 +3355,23 @@ void TemplateTable::checkcast() {
__ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
__ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
// See if bytecode has already been quicked
- __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
+ __ cmpb(Address(rdx, rbx, Address::times_1, Array<u1>::base_offset_in_bytes()), JVM_CONSTANT_Class);
__ jcc(Assembler::equal, quicked);
__ push(atos);
- call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
+ // vm_result_2 has metadata result
+ // borrow rdi from locals
+ __ get_thread(rdi);
+ __ get_vm_result_2(rax, rdi);
+ __ restore_locals();
__ pop_ptr(rdx);
__ jmpb(resolved);
// Get superklass in EAX and subklass in EBX
__ bind(quicked);
__ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check
- __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
+ __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(ConstantPool)));
__ bind(resolved);
__ load_klass(rbx, rdx);
@@ -3465,11 +3411,16 @@ void TemplateTable::instanceof() {
__ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
__ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
// See if bytecode has already been quicked
- __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
+ __ cmpb(Address(rdx, rbx, Address::times_1, Array<u1>::base_offset_in_bytes()), JVM_CONSTANT_Class);
__ jcc(Assembler::equal, quicked);
__ push(atos);
- call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
+ // vm_result_2 has metadata result
+ // borrow rdi from locals
+ __ get_thread(rdi);
+ __ get_vm_result_2(rax, rdi);
+ __ restore_locals();
__ pop_ptr(rdx);
__ load_klass(rdx, rdx);
__ jmp(resolved);
@@ -3477,7 +3428,7 @@ void TemplateTable::instanceof() {
// Get superklass in EAX and subklass in EDX
__ bind(quicked);
__ load_klass(rdx, rax);
- __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
+ __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(ConstantPool)));
__ bind(resolved);
diff --git a/src/cpu/x86/vm/templateTable_x86_64.cpp b/src/cpu/x86/vm/templateTable_x86_64.cpp
index b13567c08..92ea99d3d 100644
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"
#include "memory/universe.inline.hpp"
-#include "oops/methodDataOop.hpp"
+#include "oops/methodData.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
@@ -367,16 +367,12 @@ void TemplateTable::ldc(bool wide) {
}
__ get_cpool_and_tags(rcx, rax);
- const int base_offset = constantPoolOopDesc::header_size() * wordSize;
- const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
+ const int base_offset = ConstantPool::header_size() * wordSize;
+ const int tags_offset = Array<u1>::base_offset_in_bytes();
// get type
__ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
- // unresolved string - get the resolved string
- __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
- __ jccb(Assembler::equal, call_ldc);
-
// unresolved class - get the resolved class
__ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
__ jccb(Assembler::equal, call_ldc);
@@ -411,76 +407,46 @@ void TemplateTable::ldc(bool wide) {
Label L;
__ cmpl(rdx, JVM_CONSTANT_Integer);
__ jcc(Assembler::equal, L);
- __ cmpl(rdx, JVM_CONSTANT_String);
- __ jcc(Assembler::equal, L);
- __ cmpl(rdx, JVM_CONSTANT_Object);
- __ jcc(Assembler::equal, L);
+ // String and Object are rewritten to fast_aldc
__ stop("unexpected tag type in ldc");
__ bind(L);
}
#endif
- // atos and itos
- Label isOop;
- __ cmpl(rdx, JVM_CONSTANT_Integer);
- __ jcc(Assembler::notEqual, isOop);
+ // itos JVM_CONSTANT_Integer only
__ movl(rax, Address(rcx, rbx, Address::times_8, base_offset));
__ push_i(rax);
- __ jmp(Done);
-
- __ bind(isOop);
- __ movptr(rax, Address(rcx, rbx, Address::times_8, base_offset));
- __ push_ptr(rax);
-
- if (VerifyOops) {
- __ verify_oop(rax);
- }
-
__ bind(Done);
}
// Fast path for caching oop constants.
-// %%% We should use this to handle Class and String constants also.
-// %%% It will simplify the ldc/primitive path considerably.
void TemplateTable::fast_aldc(bool wide) {
transition(vtos, atos);
- if (!EnableInvokeDynamic) {
- // We should not encounter this bytecode if !EnableInvokeDynamic.
- // The verifier will stop it. However, if we get past the verifier,
- // this will stop the thread in a reasonable way, without crashing the JVM.
- __ call_VM(noreg, CAST_FROM_FN_PTR(address,
- InterpreterRuntime::throw_IncompatibleClassChangeError));
- // the call_VM checks for exception, so we should never return here.
- __ should_not_reach_here();
- return;
- }
+ Register result = rax;
+ Register tmp = rdx;
+ int index_size = wide ? sizeof(u2) : sizeof(u1);
- const Register cache = rcx;
- const Register index = rdx;
+ Label resolved;
- resolve_cache_and_index(f12_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
- if (VerifyOops) {
- __ verify_oop(rax);
- }
+ // We are resolved if the resolved reference cache entry contains a
+ // non-null object (String, MethodType, etc.)
+ assert_different_registers(result, tmp);
+ __ get_cache_index_at_bcp(tmp, 1, index_size);
+ __ load_resolved_reference_at_index(result, tmp);
+ __ testl(result, result);
+ __ jcc(Assembler::notZero, resolved);
- Label L_done, L_throw_exception;
- const Register con_klass_temp = rcx; // same as cache
- const Register array_klass_temp = rdx; // same as index
- __ load_klass(con_klass_temp, rax);
- __ lea(array_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
- __ cmpptr(con_klass_temp, Address(array_klass_temp, 0));
- __ jcc(Assembler::notEqual, L_done);
- __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
- __ jcc(Assembler::notEqual, L_throw_exception);
- __ xorptr(rax, rax);
- __ jmp(L_done);
-
- // Load the exception from the system-array which wraps it:
- __ bind(L_throw_exception);
- __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
- __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
+ address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
+
+ // first time invocation - must resolve first
+ __ movl(tmp, (int)bytecode());
+ __ call_VM(result, entry, tmp);
- __ bind(L_done);
+ __ bind(resolved);
+
+ if (VerifyOops) {
+ __ verify_oop(result);
+ }
}
void TemplateTable::ldc2_w() {
@@ -489,8 +455,8 @@ void TemplateTable::ldc2_w() {
__ get_unsigned_2_byte_index_at_bcp(rbx, 1);
__ get_cpool_and_tags(rcx, rax);
- const int base_offset = constantPoolOopDesc::header_size() * wordSize;
- const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
+ const int base_offset = ConstantPool::header_size() * wordSize;
+ const int tags_offset = Array<u1>::base_offset_in_bytes();
// get type
__ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
@@ -1596,9 +1562,9 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
// holds bumped taken count
- const ByteSize be_offset = methodOopDesc::backedge_counter_offset() +
+ const ByteSize be_offset = Method::backedge_counter_offset() +
InvocationCounter::counter_offset();
- const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() +
+ const ByteSize inv_offset = Method::invocation_counter_offset() +
InvocationCounter::counter_offset();
const int method_offset = frame::interpreter_frame_method_offset * wordSize;
@@ -1620,8 +1586,8 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// compute return address as bci in rax
__ lea(rax, at_bcp((is_wide ? 5 : 3) -
- in_bytes(constMethodOopDesc::codes_offset())));
- __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
+ in_bytes(ConstMethod::codes_offset())));
+ __ subptr(rax, Address(rcx, Method::const_offset()));
// Adjust the bcp in r13 by the displacement in rdx
__ addptr(r13, rdx);
// jsr returns atos that is not an oop
@@ -1656,18 +1622,18 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
if (ProfileInterpreter) {
// Are we profiling?
- __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
+ __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
__ testptr(rbx, rbx);
__ jccb(Assembler::zero, no_mdo);
// Increment the MDO backedge counter
- const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
+ const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow);
__ jmp(dispatch);
}
__ bind(no_mdo);
- // Increment backedge counter in methodOop
+ // Increment backedge counter in Method*
__ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow);
} else {
@@ -1696,7 +1662,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ jcc(Assembler::below, dispatch);
// When ProfileInterpreter is on, the backedge_count comes
- // from the methodDataOop, which value does not get reset on
+ // from the MethodData*, which value does not get reset on
// the call to frequency_counter_overflow(). To avoid
// excessive calls to the overflow routine while the method is
// being compiled, add a second test to make sure the overflow
@@ -1854,9 +1820,9 @@ void TemplateTable::ret() {
__ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp
__ profile_ret(rbx, rcx);
__ get_method(rax);
- __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
+ __ movptr(r13, Address(rax, Method::const_offset()));
__ lea(r13, Address(r13, rbx, Address::times_1,
- constMethodOopDesc::codes_offset()));
+ ConstMethod::codes_offset()));
__ dispatch_next(vtos);
}
@@ -1866,8 +1832,8 @@ void TemplateTable::wide_ret() {
__ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
__ profile_ret(rbx, rcx);
__ get_method(rax);
- __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
- __ lea(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
+ __ movptr(r13, Address(rax, Method::const_offset()));
+ __ lea(r13, Address(r13, rbx, Address::times_1, ConstMethod::codes_offset()));
__ dispatch_next(vtos);
}
@@ -2117,31 +2083,17 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
}
void TemplateTable::resolve_cache_and_index(int byte_no,
- Register result,
Register Rcache,
Register index,
size_t index_size) {
const Register temp = rbx;
- assert_different_registers(result, Rcache, index, temp);
+ assert_different_registers(Rcache, index, temp);
Label resolved;
- if (byte_no == f12_oop) {
- // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.)
- // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because
- // there is a 1-1 relation between bytecode type and CP entry type.
- // The caller will also load a methodOop from f2.
- assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
- __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
- __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
- __ testptr(result, result);
- __ jcc(Assembler::notEqual, resolved);
- } else {
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
- assert(result == noreg, ""); //else change code for setting result
__ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
__ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
__ jcc(Assembler::equal, resolved);
- }
// resolve first time through
address entry;
@@ -2164,12 +2116,6 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
case Bytecodes::_invokedynamic:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
break;
- case Bytecodes::_fast_aldc:
- entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
- break;
- case Bytecodes::_fast_aldc_w:
- entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
- break;
default:
fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
break;
@@ -2179,8 +2125,6 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
// Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
- if (result != noreg)
- __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
__ bind(resolved);
}
@@ -2193,7 +2137,7 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
bool is_static = false) {
assert_different_registers(cache, index, flags, off);
- ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
+ ByteSize cp_base_offset = ConstantPoolCache::base_offset();
// Field offset
__ movptr(off, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset +
@@ -2208,6 +2152,8 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
__ movptr(obj, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f1_offset())));
+ const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+ __ movptr(obj, Address(obj, mirror_offset));
}
}
@@ -2228,38 +2174,27 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
// determine constant pool cache field offsets
assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
const int method_offset = in_bytes(
- constantPoolCacheOopDesc::base_offset() +
+ ConstantPoolCache::base_offset() +
((byte_no == f2_byte)
? ConstantPoolCacheEntry::f2_offset()
: ConstantPoolCacheEntry::f1_offset()));
- const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
+ const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::flags_offset());
// access constant pool cache fields
- const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
+ const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::f2_offset());
- if (byte_no == f12_oop) {
- // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'.
- // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset).
- // See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle.
size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
- resolve_cache_and_index(byte_no, itable_index, cache, index, index_size);
- __ movptr(method, Address(cache, index, Address::times_ptr, index_offset));
- itable_index = noreg; // hack to disable load below
- } else {
- resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
+ resolve_cache_and_index(byte_no, cache, index, index_size);
__ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
- }
+
if (itable_index != noreg) {
- // pick up itable index from f2 also:
- assert(byte_no == f1_byte, "already picked up f1");
+ // pick up itable or appendix index from f2 also:
__ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
}
__ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
}
-
-// The registers cache and index expected to be set before call.
// Correct values of the cache and index registers are preserved.
void TemplateTable::jvmti_post_field_access(Register cache, Register index,
bool is_static, bool has_tos) {
@@ -2278,7 +2213,7 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index,
__ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
// cache entry pointer
- __ addptr(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset()));
+ __ addptr(c_rarg2, in_bytes(ConstantPoolCache::base_offset()));
__ shll(c_rarg3, LogBytesPerWord);
__ addptr(c_rarg2, c_rarg3);
if (is_static) {
@@ -2314,7 +2249,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
const Register flags = rax;
const Register bc = c_rarg3; // uses same reg as obj, so don't mix them
- resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
+ resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
jvmti_post_field_access(cache, index, is_static, false);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@@ -2453,7 +2388,7 @@ void TemplateTable::getstatic(int byte_no) {
void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
transition(vtos, vtos);
- ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
+ ByteSize cp_base_offset = ConstantPoolCache::base_offset();
if (JvmtiExport::can_post_field_modification()) {
// Check to see if a field modification watch has been set before
@@ -2517,7 +2452,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
const Register flags = rax;
const Register bc = c_rarg3;
- resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
+ resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
jvmti_post_field_mod(cache, index, is_static);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
@@ -2741,7 +2676,7 @@ void TemplateTable::jvmti_post_fast_field_mod() {
void TemplateTable::fast_storefield(TosState state) {
transition(state, vtos);
- ByteSize base = constantPoolCacheOopDesc::base_offset();
+ ByteSize base = ConstantPoolCache::base_offset();
jvmti_post_fast_field_mod();
@@ -2841,13 +2776,13 @@ void TemplateTable::fast_accessfield(TosState state) {
// [jk] not needed currently
// if (os::is_MP()) {
// __ movl(rdx, Address(rcx, rbx, Address::times_8,
- // in_bytes(constantPoolCacheOopDesc::base_offset() +
+ // in_bytes(ConstantPoolCache::base_offset() +
// ConstantPoolCacheEntry::flags_offset())));
// __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
// __ andl(rdx, 0x1);
// }
__ movptr(rbx, Address(rcx, rbx, Address::times_8,
- in_bytes(constantPoolCacheOopDesc::base_offset() +
+ in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::f2_offset())));
// rax: object
@@ -2904,7 +2839,7 @@ void TemplateTable::fast_xaccess(TosState state) {
__ get_cache_and_index_at_bcp(rcx, rdx, 2);
__ movptr(rbx,
Address(rcx, rdx, Address::times_8,
- in_bytes(constantPoolCacheOopDesc::base_offset() +
+ in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::f2_offset())));
// make sure exception is reported in correct bcp range (getfield is
// next instruction)
@@ -2929,7 +2864,7 @@ void TemplateTable::fast_xaccess(TosState state) {
// if (os::is_MP()) {
// Label notVolatile;
// __ movl(rdx, Address(rcx, rdx, Address::times_8,
- // in_bytes(constantPoolCacheOopDesc::base_offset() +
+ // in_bytes(ConstantPoolCache::base_offset() +
// ConstantPoolCacheEntry::flags_offset())));
// __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
// __ testl(rdx, 0x1);
@@ -2984,12 +2919,15 @@ void TemplateTable::prepare_invoke(int byte_no,
// maybe push appendix to arguments (just before return address)
if (is_invokedynamic || is_invokehandle) {
Label L_no_push;
- __ verify_oop(index);
__ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
- __ jccb(Assembler::zero, L_no_push);
+ __ jcc(Assembler::zero, L_no_push);
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
+ __ push(rbx);
+ __ mov(rbx, index);
+ __ load_resolved_reference_at_index(index, rbx);
+ __ pop(rbx);
__ push(index); // push appendix (MethodType, CallSite, etc.)
__ bind(L_no_push);
}
@@ -3052,11 +2990,10 @@ void TemplateTable::invokevirtual_helper(Register index,
const Register method = index; // method must be rbx
assert(method == rbx,
- "methodOop must be rbx for interpreter calling convention");
+ "Method* must be rbx for interpreter calling convention");
// do the call - the index is actually the method to call
- // that is, f2 is a vtable index if !is_vfinal, else f2 is a methodOop
- __ verify_oop(method);
+ // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
// It's final, need a null check here!
__ null_check(recv);
@@ -3071,12 +3008,11 @@ void TemplateTable::invokevirtual_helper(Register index,
// get receiver klass
__ null_check(recv, oopDesc::klass_offset_in_bytes());
__ load_klass(rax, recv);
- __ verify_oop(rax);
// profile this call
__ profile_virtual_call(rax, r14, rdx);
- // get target methodOop & entry point
+ // get target Method* & entry point
__ lookup_virtual_method(rax, index, method);
__ jump_from_interpreted(method, rdx);
}
@@ -3101,12 +3037,11 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
- prepare_invoke(byte_no, rbx, noreg, // get f1 methodOop
+ prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
rcx); // get receiver also for null check
__ verify_oop(rcx);
__ null_check(rcx);
// do the call
- __ verify_oop(rbx);
__ profile_call(rax);
__ jump_from_interpreted(rbx, rax);
}
@@ -3115,9 +3050,8 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
- prepare_invoke(byte_no, rbx); // get f1 methodOop
+ prepare_invoke(byte_no, rbx); // get f1 Method*
// do the call
- __ verify_oop(rbx);
__ profile_call(rax);
__ jump_from_interpreted(rbx, rax);
}
@@ -3131,7 +3065,7 @@ void TemplateTable::fast_invokevfinal(int byte_no) {
void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
- prepare_invoke(byte_no, rax, rbx, // get f1 klassOop, f2 itable index
+ prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 itable index
rcx, rdx); // recv, flags
// rax: interface klass (from f1)
@@ -3155,7 +3089,6 @@ void TemplateTable::invokeinterface(int byte_no) {
__ restore_locals(); // restore r14
__ null_check(rcx, oopDesc::klass_offset_in_bytes());
__ load_klass(rdx, rcx);
- __ verify_oop(rdx);
// profile this call
__ profile_virtual_call(rdx, r13, r14);
@@ -3168,7 +3101,7 @@ void TemplateTable::invokeinterface(int byte_no) {
rbx, r13,
no_such_interface);
- // rbx: methodOop to call
+ // rbx: Method* to call
// rcx: receiver
// Check for abstract method error
// Note: This should be done more efficiently via a throw_abstract_method_error
@@ -3179,7 +3112,7 @@ void TemplateTable::invokeinterface(int byte_no) {
// do the call
// rcx: receiver
- // rbx,: methodOop
+ // rbx,: Method*
__ jump_from_interpreted(rbx, rdx);
__ should_not_reach_here();
@@ -3210,7 +3143,7 @@ void TemplateTable::invokeinterface(int byte_no) {
void TemplateTable::invokehandle(int byte_no) {
transition(vtos, vtos);
- assert(byte_no == f12_oop, "use this argument");
+ assert(byte_no == f1_byte, "use this argument");
const Register rbx_method = rbx; // f2
const Register rax_mtype = rax; // f1
const Register rcx_recv = rcx;
@@ -3223,7 +3156,7 @@ void TemplateTable::invokehandle(int byte_no) {
}
prepare_invoke(byte_no,
- rbx_method, rax_mtype, // get f2 methodOop, f1 MethodType
+ rbx_method, rax_mtype, // get f2 Method*, f1 MethodType
rcx_recv);
__ verify_oop(rbx_method);
__ verify_oop(rcx_recv);
@@ -3240,7 +3173,7 @@ void TemplateTable::invokehandle(int byte_no) {
void TemplateTable::invokedynamic(int byte_no) {
transition(vtos, vtos);
- assert(byte_no == f12_oop, "use this argument");
+ assert(byte_no == f1_byte, "use this argument");
if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic.
@@ -3258,7 +3191,7 @@ void TemplateTable::invokedynamic(int byte_no) {
prepare_invoke(byte_no, rbx_method, rax_callsite);
- // rax: CallSite object (from f1)
+ // rax: CallSite object (from cpool->resolved_references[])
// rbx: MH.linkToCallSite method (from f2)
// Note: rax_callsite is already pushed by prepare_invoke
@@ -3287,25 +3220,25 @@ void TemplateTable::_new() {
__ get_cpool_and_tags(rsi, rax);
// Make sure the class we're about to instantiate has been resolved.
- // This is done before loading instanceKlass to be consistent with the order
- // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
- const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
+ // This is done before loading InstanceKlass to be consistent with the order
+ // how Constant Pool is updated (see ConstantPool::klass_at_put)
+ const int tags_offset = Array<u1>::base_offset_in_bytes();
__ cmpb(Address(rax, rdx, Address::times_1, tags_offset),
JVM_CONSTANT_Class);
__ jcc(Assembler::notEqual, slow_case);
- // get instanceKlass
+ // get InstanceKlass
__ movptr(rsi, Address(rsi, rdx,
- Address::times_8, sizeof(constantPoolOopDesc)));
+ Address::times_8, sizeof(ConstantPool)));
// make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized
__ cmpb(Address(rsi,
- instanceKlass::init_state_offset()),
- instanceKlass::fully_initialized);
+ InstanceKlass::init_state_offset()),
+ InstanceKlass::fully_initialized);
__ jcc(Assembler::notEqual, slow_case);
- // get instance_size in instanceKlass (scaled to a count of bytes)
+ // get instance_size in InstanceKlass (scaled to a count of bytes)
__ movl(rdx,
Address(rsi,
Klass::layout_helper_offset()));
@@ -3470,11 +3403,13 @@ void TemplateTable::checkcast() {
// See if bytecode has already been quicked
__ cmpb(Address(rdx, rbx,
Address::times_1,
- typeArrayOopDesc::header_size(T_BYTE) * wordSize),
+ Array<u1>::base_offset_in_bytes()),
JVM_CONSTANT_Class);
__ jcc(Assembler::equal, quicked);
__ push(atos); // save receiver for result, and for GC
- call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
+ // vm_result_2 has metadata result
+ __ get_vm_result_2(rax, r15_thread);
__ pop_ptr(rdx); // restore receiver
__ jmpb(resolved);
@@ -3482,7 +3417,7 @@ void TemplateTable::checkcast() {
__ bind(quicked);
__ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
__ movptr(rax, Address(rcx, rbx,
- Address::times_8, sizeof(constantPoolOopDesc)));
+ Address::times_8, sizeof(ConstantPool)));
__ bind(resolved);
__ load_klass(rbx, rdx);
@@ -3523,12 +3458,14 @@ void TemplateTable::instanceof() {
// See if bytecode has already been quicked
__ cmpb(Address(rdx, rbx,
Address::times_1,
- typeArrayOopDesc::header_size(T_BYTE) * wordSize),
+ Array<u1>::base_offset_in_bytes()),
JVM_CONSTANT_Class);
__ jcc(Assembler::equal, quicked);
__ push(atos); // save receiver for result, and for GC
- call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
+ // vm_result_2 has metadata result
+ __ get_vm_result_2(rax, r15_thread);
__ pop_ptr(rdx); // restore receiver
__ verify_oop(rdx);
__ load_klass(rdx, rdx);
@@ -3538,7 +3475,7 @@ void TemplateTable::instanceof() {
__ bind(quicked);
__ load_klass(rdx, rax);
__ movptr(rax, Address(rcx, rbx,
- Address::times_8, sizeof(constantPoolOopDesc)));
+ Address::times_8, sizeof(ConstantPool)));
__ bind(resolved);
diff --git a/src/cpu/x86/vm/vtableStubs_x86_32.cpp b/src/cpu/x86/vm/vtableStubs_x86_32.cpp
index 87108d5a1..edf1ab1bf 100644
--- a/src/cpu/x86/vm/vtableStubs_x86_32.cpp
+++ b/src/cpu/x86/vm/vtableStubs_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -81,7 +81,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
if (DebugVtables) {
Label L;
// check offset vs vtable length
- __ cmpl(Address(rax, instanceKlass::vtable_length_offset()*wordSize), vtable_index*vtableEntry::size());
+ __ cmpl(Address(rax, InstanceKlass::vtable_length_offset()*wordSize), vtable_index*vtableEntry::size());
__ jcc(Assembler::greater, L);
__ movl(rbx, vtable_index);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), rcx, rbx);
@@ -91,24 +91,24 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const Register method = rbx;
- // load methodOop and target address
+ // load Method* and target address
__ lookup_virtual_method(rax, vtable_index, method);
if (DebugVtables) {
Label L;
__ cmpptr(method, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
- __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD);
+ __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L);
__ stop("Vtable entry is NULL");
__ bind(L);
}
// rax,: receiver klass
- // method (rbx): methodOop
+ // method (rbx): Method*
// rcx: receiver
address ame_addr = __ pc();
- __ jmp( Address(method, methodOopDesc::from_compiled_offset()));
+ __ jmp( Address(method, Method::from_compiled_offset()));
masm->flush();
@@ -159,14 +159,14 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
const Register method = rbx;
Label throw_icce;
- // Get methodOop and entrypoint for compiler
+ // Get Method* and entrypoint for compiler
__ lookup_interface_method(// inputs: rec. class, interface, itable index
rsi, rax, itable_index,
// outputs: method, scan temp. reg
method, rdi,
throw_icce);
- // method (rbx): methodOop
+ // method (rbx): Method*
// rcx: receiver
#ifdef ASSERT
@@ -174,15 +174,15 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
Label L1;
__ cmpptr(method, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L1);
- __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD);
+ __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L1);
- __ stop("methodOop is null");
+ __ stop("Method* is null");
__ bind(L1);
}
#endif // ASSERT
address ame_addr = __ pc();
- __ jmp(Address(method, methodOopDesc::from_compiled_offset()));
+ __ jmp(Address(method, Method::from_compiled_offset()));
__ bind(throw_icce);
__ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
diff --git a/src/cpu/x86/vm/vtableStubs_x86_64.cpp b/src/cpu/x86/vm/vtableStubs_x86_64.cpp
index 5592c6fe2..578023ab4 100644
--- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp
+++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -73,7 +73,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
if (DebugVtables) {
Label L;
// check offset vs vtable length
- __ cmpl(Address(rax, instanceKlass::vtable_length_offset() * wordSize),
+ __ cmpl(Address(rax, InstanceKlass::vtable_length_offset() * wordSize),
vtable_index * vtableEntry::size());
__ jcc(Assembler::greater, L);
__ movl(rbx, vtable_index);
@@ -83,7 +83,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
}
#endif // PRODUCT
- // load methodOop and target address
+ // load Method* and target address
const Register method = rbx;
__ lookup_virtual_method(rax, vtable_index, method);
@@ -92,16 +92,16 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
Label L;
__ cmpptr(method, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
- __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD);
+ __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L);
__ stop("Vtable entry is NULL");
__ bind(L);
}
// rax: receiver klass
- // rbx: methodOop
+ // rbx: Method*
// rcx: receiver
address ame_addr = __ pc();
- __ jmp( Address(rbx, methodOopDesc::from_compiled_offset()));
+ __ jmp( Address(rbx, Method::from_compiled_offset()));
__ flush();
@@ -161,14 +161,14 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
const Register method = rbx;
Label throw_icce;
- // Get methodOop and entrypoint for compiler
+ // Get Method* and entrypoint for compiler
__ lookup_interface_method(// inputs: rec. class, interface, itable index
r10, rax, itable_index,
// outputs: method, scan temp. reg
method, r11,
throw_icce);
- // method (rbx): methodOop
+ // method (rbx): Method*
// j_rarg0: receiver
#ifdef ASSERT
@@ -176,17 +176,17 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
Label L2;
__ cmpptr(method, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L2);
- __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD);
+ __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L2);
__ stop("compiler entrypoint is null");
__ bind(L2);
}
#endif // ASSERT
- // rbx: methodOop
+ // rbx: Method*
// j_rarg0: receiver
address ame_addr = __ pc();
- __ jmp(Address(method, methodOopDesc::from_compiled_offset()));
+ __ jmp(Address(method, Method::from_compiled_offset()));
__ bind(throw_icce);
__ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
diff --git a/src/cpu/x86/vm/x86.ad b/src/cpu/x86/vm/x86.ad
index 6bb14ef16..71e26fb20 100644
--- a/src/cpu/x86/vm/x86.ad
+++ b/src/cpu/x86/vm/x86.ad
@@ -488,17 +488,6 @@ source %{
static address double_signflip() { return (address)double_signflip_pool; }
#endif
-// Map Types to machine register types
-const int Matcher::base2reg[Type::lastype] = {
- Node::NotAMachineReg,0,0, Op_RegI, Op_RegL, 0, Op_RegN,
- Node::NotAMachineReg, Node::NotAMachineReg, /* tuple, array */
- Op_VecS, Op_VecD, Op_VecX, Op_VecY, /* Vectors */
- Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, /* the pointers */
- 0, 0/*abio*/,
- Op_RegP /* Return address */, 0, /* the memories */
- Op_RegF, Op_RegF, Op_RegF, Op_RegD, Op_RegD, Op_RegD,
- 0 /*bottom*/
-};
const bool Matcher::match_rule_supported(int opcode) {
if (!has_match_rule(opcode))
diff --git a/src/cpu/x86/vm/x86_32.ad b/src/cpu/x86/vm/x86_32.ad
index 02e5b3224..e82d799ec 100644
--- a/src/cpu/x86/vm/x86_32.ad
+++ b/src/cpu/x86/vm/x86_32.ad
@@ -367,7 +367,7 @@ void store_to_stackslot(CodeBuffer &cbuf, int opcode, int rm_field, int disp) {
}
// rRegI ereg, memory mem) %{ // emit_reg_mem
-void encode_RegMem( CodeBuffer &cbuf, int reg_encoding, int base, int index, int scale, int displace, bool displace_is_oop ) {
+void encode_RegMem( CodeBuffer &cbuf, int reg_encoding, int base, int index, int scale, int displace, relocInfo::relocType disp_reloc ) {
// There is no index & no scale, use form without SIB byte
if ((index == 0x4) &&
(scale == 0) && (base != ESP_enc)) {
@@ -377,7 +377,7 @@ void encode_RegMem( CodeBuffer &cbuf, int reg_encoding, int base, int index, int
}
else { // If 8-bit displacement, mode 0x1
if ((displace >= -128) && (displace <= 127)
- && !(displace_is_oop) ) {
+ && (disp_reloc == relocInfo::none) ) {
emit_rm(cbuf, 0x1, reg_encoding, base);
emit_d8(cbuf, displace);
}
@@ -385,16 +385,16 @@ void encode_RegMem( CodeBuffer &cbuf, int reg_encoding, int base, int index, int
if (base == -1) { // Special flag for absolute address
emit_rm(cbuf, 0x0, reg_encoding, 0x5);
// (manual lies; no SIB needed here)
- if ( displace_is_oop ) {
- emit_d32_reloc(cbuf, displace, relocInfo::oop_type, 1);
+ if ( disp_reloc != relocInfo::none ) {
+ emit_d32_reloc(cbuf, displace, disp_reloc, 1);
} else {
emit_d32 (cbuf, displace);
}
}
else { // Normal base + offset
emit_rm(cbuf, 0x2, reg_encoding, base);
- if ( displace_is_oop ) {
- emit_d32_reloc(cbuf, displace, relocInfo::oop_type, 1);
+ if ( disp_reloc != relocInfo::none ) {
+ emit_d32_reloc(cbuf, displace, disp_reloc, 1);
} else {
emit_d32 (cbuf, displace);
}
@@ -410,7 +410,7 @@ void encode_RegMem( CodeBuffer &cbuf, int reg_encoding, int base, int index, int
}
else { // If 8-bit displacement, mode 0x1
if ((displace >= -128) && (displace <= 127)
- && !(displace_is_oop) ) {
+ && (disp_reloc == relocInfo::none) ) {
emit_rm(cbuf, 0x1, reg_encoding, 0x4);
emit_rm(cbuf, scale, index, base);
emit_d8(cbuf, displace);
@@ -423,8 +423,8 @@ void encode_RegMem( CodeBuffer &cbuf, int reg_encoding, int base, int index, int
emit_rm(cbuf, 0x2, reg_encoding, 0x4);
emit_rm(cbuf, scale, index, base);
}
- if ( displace_is_oop ) {
- emit_d32_reloc(cbuf, displace, relocInfo::oop_type, 1);
+ if ( disp_reloc != relocInfo::none ) {
+ emit_d32_reloc(cbuf, displace, disp_reloc, 1);
} else {
emit_d32 (cbuf, displace);
}
@@ -689,7 +689,7 @@ static int impl_helper( CodeBuffer *cbuf, bool do_size, bool is_load, int offset
int opcode, const char *op_str, int size, outputStream* st ) {
if( cbuf ) {
emit_opcode (*cbuf, opcode );
- encode_RegMem(*cbuf, Matcher::_regEncode[reg], ESP_enc, 0x4, 0, offset, false);
+ encode_RegMem(*cbuf, Matcher::_regEncode[reg], ESP_enc, 0x4, 0, offset, relocInfo::none);
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) st->print("\n\t");
@@ -1090,7 +1090,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
}
if( cbuf ) {
emit_opcode (*cbuf, op );
- encode_RegMem(*cbuf, 0x0, ESP_enc, 0x4, 0, offset, false);
+ encode_RegMem(*cbuf, 0x0, ESP_enc, 0x4, 0, offset, relocInfo::none);
emit_opcode (*cbuf, 0xDD ); // FSTP ST(i)
emit_d8 (*cbuf, 0xD8+Matcher::_regEncode[dst_first] );
#ifndef PRODUCT
@@ -1260,8 +1260,8 @@ void emit_java_to_interp(CodeBuffer &cbuf ) {
if (base == NULL) return; // CodeBuffer::expand failed
// static stub relocation stores the instruction address of the call
__ relocate(static_stub_Relocation::spec(mark), RELOC_IMM32);
- // static stub relocation also tags the methodOop in the code-stream.
- __ movoop(rbx, (jobject)NULL); // method is zapped till fixup time
+ // static stub relocation also tags the Method* in the code-stream.
+ __ mov_metadata(rbx, (Metadata*)NULL); // method is zapped till fixup time
// This is recognized as unresolved by relocs/nativeInst/ic code
__ jump(RuntimeAddress(__ pc()));
@@ -1891,26 +1891,15 @@ encode %{
%}
enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
- // !!!!!
- // Generate "Mov EAX,0x00", placeholder instruction to load oop-info
- // emit_call_dynamic_prologue( cbuf );
- cbuf.set_insts_mark();
- emit_opcode(cbuf, 0xB8 + EAX_enc); // mov EAX,-1
- emit_d32_reloc(cbuf, (int)Universe::non_oop_word(), oop_Relocation::spec_for_immediate(), RELOC_IMM32);
- address virtual_call_oop_addr = cbuf.insts_mark();
- // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
- // who we intended to call.
- cbuf.set_insts_mark();
- $$$emit8$primary;
- emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
- virtual_call_Relocation::spec(virtual_call_oop_addr), RELOC_IMM32 );
+ MacroAssembler _masm(&cbuf);
+ __ ic_call((address)$meth$$method);
%}
enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL
- int disp = in_bytes(methodOopDesc::from_compiled_offset());
+ int disp = in_bytes(Method::from_compiled_offset());
assert( -128 <= disp && disp <= 127, "compiled_code_offset isn't small");
- // CALL *[EAX+in_bytes(methodOopDesc::from_compiled_code_entry_point_offset())]
+ // CALL *[EAX+in_bytes(Method::from_compiled_code_entry_point_offset())]
cbuf.set_insts_mark();
$$$emit8$primary;
emit_rm(cbuf, 0x01, $secondary, EAX_enc ); // R/M byte
@@ -2127,8 +2116,8 @@ encode %{
int index = $mem$$index;
int scale = $mem$$scale;
int displace = $mem$$disp;
- bool disp_is_oop = $mem->disp_is_oop();
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_is_oop);
+ relocInfo::relocType disp_reloc = $mem->disp_reloc();
+ encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
%}
enc_class RegMem_Hi(eRegL ereg, memory mem) %{ // emit_reg_mem
@@ -2137,8 +2126,8 @@ encode %{
int index = $mem$$index;
int scale = $mem$$scale;
int displace = $mem$$disp + 4; // Offset is 4 further in memory
- assert( !$mem->disp_is_oop(), "Cannot add 4 to oop" );
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, false/*disp_is_oop*/);
+ assert( $mem->disp_reloc() == relocInfo::none, "Cannot add 4 to oop" );
+ encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, relocInfo::none);
%}
enc_class move_long_small_shift( eRegL dst, immI_1_31 cnt ) %{
@@ -2192,8 +2181,8 @@ encode %{
int index = $mem$$index;
int scale = $mem$$scale;
int displace = $mem$$disp + $disp_for_half$$constant;
- bool disp_is_oop = false;
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_is_oop);
+ relocInfo::relocType disp_reloc = relocInfo::none;
+ encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
%}
// !!!!! Special Custom Code used by MemMove, and stack access instructions !!!!!
@@ -2207,8 +2196,8 @@ encode %{
int index = $mem$$index;
int scale = $mem$$scale;
int displace = $mem$$disp;
- assert( !$mem->disp_is_oop(), "No oops here because no relo info allowed" );
- encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, false);
+ assert( $mem->disp_reloc() == relocInfo::none, "No oops here because no reloc info allowed" );
+ encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, relocInfo::none);
%}
enc_class RMopc_Mem (immI rm_opcode, memory mem) %{
@@ -2217,8 +2206,8 @@ encode %{
int index = $mem$$index;
int scale = $mem$$scale;
int displace = $mem$$disp;
- bool disp_is_oop = $mem->disp_is_oop(); // disp-as-oop when working with static globals
- encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_is_oop);
+ relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
+ encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_reloc);
%}
enc_class RegLea (rRegI dst, rRegI src0, immI src1 ) %{ // emit_reg_lea
@@ -2227,8 +2216,8 @@ encode %{
int index = 0x04; // 0x04 indicates no index
int scale = 0x00; // 0x00 indicates no scale
int displace = $src1$$constant; // 0x00 indicates no displacement
- bool disp_is_oop = false;
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_is_oop);
+ relocInfo::relocType disp_reloc = relocInfo::none;
+ encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
%}
enc_class min_enc (rRegI dst, rRegI src) %{ // MIN
@@ -2263,7 +2252,7 @@ encode %{
int index = $mem$$index;
int scale = $mem$$scale;
int displace = $mem$$disp;
- bool disp_is_oop = $mem->disp_is_oop(); // disp-as-oop when working with static globals
+ relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
if( $src$$reg != FPR1L_enc ) {
reg_encoding = 0x3; // Store & pop
emit_opcode( cbuf, 0xD9 ); // FLD (i.e., push it)
@@ -2271,7 +2260,7 @@ encode %{
}
cbuf.set_insts_mark(); // Mark start of opcode for reloc info in mem operand
emit_opcode(cbuf,$primary);
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_is_oop);
+ encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
%}
enc_class neg_reg(rRegI dst) %{
@@ -2321,8 +2310,8 @@ encode %{
int index = $mem$$index;
int scale = $mem$$scale;
int displace = $mem$$disp;
- bool disp_is_oop = $mem->disp_is_oop();
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_is_oop);
+ relocInfo::relocType disp_reloc = $mem->disp_reloc();
+ encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
// ADD $p,$tmp
emit_opcode(cbuf,0x03);
emit_rm(cbuf, 0x3, $p$$reg, tmpReg);
@@ -3645,8 +3634,8 @@ encode %{
int index = $mem$$index;
int scale = $mem$$scale;
int displace = $mem$$disp;
- bool disp_is_oop = $mem->disp_is_oop(); // disp-as-oop when working with static globals
- encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_is_oop);
+ relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
+ encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_reloc);
store_to_stackslot( cbuf, 0x0DF, 0x07, $dst$$disp );
%}
@@ -3663,8 +3652,8 @@ encode %{
int index = $mem$$index;
int scale = $mem$$scale;
int displace = $mem$$disp;
- bool disp_is_oop = $mem->disp_is_oop(); // disp-as-oop when working with static globals
- encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_is_oop);
+ relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
+ encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_reloc);
%}
// Safepoint Poll. This polls the safepoint page, and causes an
@@ -5817,8 +5806,8 @@ instruct popCountL_mem(rRegI dst, memory mem, rRegI tmp, eFlagsReg cr) %{
ins_encode %{
//__ popcntl($dst$$Register, $mem$$Address$$first);
//__ popcntl($tmp$$Register, $mem$$Address$$second);
- __ popcntl($dst$$Register, Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, false));
- __ popcntl($tmp$$Register, Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, false));
+ __ popcntl($dst$$Register, Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none));
+ __ popcntl($tmp$$Register, Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none));
__ addl($dst$$Register, $tmp$$Register);
%}
ins_pipe(ialu_reg);
@@ -6185,8 +6174,8 @@ instruct loadL(eRegL dst, load_long_memory mem) %{
"MOV $dst.hi,$mem+4" %}
ins_encode %{
- Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, false);
- Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, false);
+ Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+ Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none);
__ movl($dst$$Register, Amemlo);
__ movl(HIGH_FROM_LOW($dst$$Register), Amemhi);
%}
@@ -11795,7 +11784,7 @@ instruct compP_eReg_mem(eFlagsRegU cr, eRegP op1, memory op2) %{
// Only works because non-oop pointers must be raw pointers
// and raw pointers have no anti-dependencies.
instruct compP_mem_eReg( eFlagsRegU cr, eRegP op1, memory op2 ) %{
- predicate( !n->in(2)->in(2)->bottom_type()->isa_oop_ptr() );
+ predicate( n->in(2)->in(2)->bottom_type()->reloc() == relocInfo::none );
match(Set cr (CmpP op1 (LoadP op2)));
format %{ "CMPu $op1,$op2" %}
diff --git a/src/cpu/x86/vm/x86_64.ad b/src/cpu/x86/vm/x86_64.ad
index 96b71b615..8c0fb376f 100644
--- a/src/cpu/x86/vm/x86_64.ad
+++ b/src/cpu/x86/vm/x86_64.ad
@@ -516,6 +516,7 @@ void emit_d32_reloc(CodeBuffer& cbuf, int d32, RelocationHolder const& rspec, in
#ifdef ASSERT
if (rspec.reloc()->type() == relocInfo::oop_type &&
d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
+ assert(Universe::heap()->is_in_reserved((address)(intptr_t)d32), "should be real oop");
assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
}
#endif
@@ -542,6 +543,7 @@ void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, RelocationHolder const& rspec
#ifdef ASSERT
if (rspec.reloc()->type() == relocInfo::oop_type &&
d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
+ assert(Universe::heap()->is_in_reserved((address)d64), "should be real oop");
assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()),
"cannot embed scavengable oops in code");
}
@@ -568,9 +570,9 @@ void store_to_stackslot(CodeBuffer &cbuf, int opcode, int rm_field, int disp)
// rRegI ereg, memory mem) %{ // emit_reg_mem
void encode_RegMem(CodeBuffer &cbuf,
int reg,
- int base, int index, int scale, int disp, bool disp_is_oop)
+ int base, int index, int scale, int disp, relocInfo::relocType disp_reloc)
{
- assert(!disp_is_oop, "cannot have disp");
+ assert(disp_reloc == relocInfo::none, "cannot have disp");
int regenc = reg & 7;
int baseenc = base & 7;
int indexenc = index & 7;
@@ -580,7 +582,7 @@ void encode_RegMem(CodeBuffer &cbuf,
// If no displacement, mode is 0x0; unless base is [RBP] or [R13]
if (disp == 0 && base != RBP_enc && base != R13_enc) {
emit_rm(cbuf, 0x0, regenc, baseenc); // *
- } else if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
+ } else if (-0x80 <= disp && disp < 0x80 && disp_reloc == relocInfo::none) {
// If 8-bit displacement, mode 0x1
emit_rm(cbuf, 0x1, regenc, baseenc); // *
emit_d8(cbuf, disp);
@@ -588,7 +590,7 @@ void encode_RegMem(CodeBuffer &cbuf,
// If 32-bit displacement
if (base == -1) { // Special flag for absolute address
emit_rm(cbuf, 0x0, regenc, 0x5); // *
- if (disp_is_oop) {
+ if (disp_reloc != relocInfo::none) {
emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
} else {
emit_d32(cbuf, disp);
@@ -596,7 +598,7 @@ void encode_RegMem(CodeBuffer &cbuf,
} else {
// Normal base + offset
emit_rm(cbuf, 0x2, regenc, baseenc); // *
- if (disp_is_oop) {
+ if (disp_reloc != relocInfo::none) {
emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
} else {
emit_d32(cbuf, disp);
@@ -611,7 +613,7 @@ void encode_RegMem(CodeBuffer &cbuf,
emit_rm(cbuf, 0x0, regenc, 0x4); // *
emit_rm(cbuf, scale, indexenc, baseenc);
} else {
- if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
+ if (-0x80 <= disp && disp < 0x80 && disp_reloc == relocInfo::none) {
// If 8-bit displacement, mode 0x1
emit_rm(cbuf, 0x1, regenc, 0x4); // *
emit_rm(cbuf, scale, indexenc, baseenc);
@@ -625,7 +627,7 @@ void encode_RegMem(CodeBuffer &cbuf,
emit_rm(cbuf, 0x2, regenc, 0x4);
emit_rm(cbuf, scale, indexenc, baseenc); // *
}
- if (disp_is_oop) {
+ if (disp_reloc != relocInfo::none) {
emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
} else {
emit_d32(cbuf, disp);
@@ -1382,8 +1384,8 @@ void emit_java_to_interp(CodeBuffer& cbuf)
if (base == NULL) return; // CodeBuffer::expand failed
// static stub relocation stores the instruction address of the call
__ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64);
- // static stub relocation also tags the methodOop in the code-stream.
- __ movoop(rbx, (jobject) NULL); // method is zapped till fixup time
+ // static stub relocation also tags the Method* in the code-stream.
+ __ mov_metadata(rbx, (Metadata*) NULL); // method is zapped till fixup time
// This is recognized as unresolved by relocs/nativeinst/ic code
__ jump(RuntimeAddress(__ pc()));
@@ -2044,35 +2046,15 @@ encode %{
}
%}
- enc_class Java_Dynamic_Call(method meth)
- %{
- // JAVA DYNAMIC CALL
- // !!!!!
- // Generate "movq rax, -1", placeholder instruction to load oop-info
- // emit_call_dynamic_prologue( cbuf );
- cbuf.set_insts_mark();
-
- // movq rax, -1
- emit_opcode(cbuf, Assembler::REX_W);
- emit_opcode(cbuf, 0xB8 | RAX_enc);
- emit_d64_reloc(cbuf,
- (int64_t) Universe::non_oop_word(),
- oop_Relocation::spec_for_immediate(), RELOC_IMM64);
- address virtual_call_oop_addr = cbuf.insts_mark();
- // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
- // who we intended to call.
- cbuf.set_insts_mark();
- $$$emit8$primary;
- emit_d32_reloc(cbuf,
- (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
- virtual_call_Relocation::spec(virtual_call_oop_addr),
- RELOC_DISP32);
+ enc_class Java_Dynamic_Call(method meth) %{
+ MacroAssembler _masm(&cbuf);
+ __ ic_call((address)$meth$$method);
%}
enc_class Java_Compiled_Call(method meth)
%{
// JAVA COMPILED CALL
- int disp = in_bytes(methodOopDesc:: from_compiled_offset());
+ int disp = in_bytes(Method:: from_compiled_offset());
// XXX XXX offset is 128 is 1.5 NON-PRODUCT !!!
// assert(-0x80 <= disp && disp < 0x80, "compiled_code_offset isn't small");
@@ -2190,8 +2172,8 @@ encode %{
}
emit_opcode(cbuf, 0xB8 | dstenc);
// This next line should be generated from ADLC
- if ($src->constant_is_oop()) {
- emit_d64_reloc(cbuf, $src$$constant, relocInfo::oop_type, RELOC_IMM64);
+ if ($src->constant_reloc() != relocInfo::none) {
+ emit_d64_reloc(cbuf, $src$$constant, $src->constant_reloc(), RELOC_IMM64);
} else {
emit_d64(cbuf, $src$$constant);
}
@@ -2452,9 +2434,9 @@ encode %{
int index = $mem$$index;
int scale = $mem$$scale;
int disp = $mem$$disp;
- bool disp_is_oop = $mem->disp_is_oop();
+ relocInfo::relocType disp_reloc = $mem->disp_reloc();
- encode_RegMem(cbuf, reg, base, index, scale, disp, disp_is_oop);
+ encode_RegMem(cbuf, reg, base, index, scale, disp, disp_reloc);
%}
enc_class RM_opc_mem(immI rm_opcode, memory mem)
@@ -2467,11 +2449,11 @@ encode %{
int scale = $mem$$scale;
int displace = $mem$$disp;
- bool disp_is_oop = $mem->disp_is_oop(); // disp-as-oop when
+ relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when
// working with static
// globals
encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace,
- disp_is_oop);
+ disp_reloc);
%}
enc_class reg_lea(rRegI dst, rRegI src0, immI src1)
@@ -2481,9 +2463,9 @@ encode %{
int index = 0x04; // 0x04 indicates no index
int scale = 0x00; // 0x00 indicates no scale
int displace = $src1$$constant; // 0x00 indicates no displacement
- bool disp_is_oop = false;
+ relocInfo::relocType disp_reloc = relocInfo::none;
encode_RegMem(cbuf, reg_encoding, base, index, scale, displace,
- disp_is_oop);
+ disp_reloc);
%}
enc_class neg_reg(rRegI dst)
@@ -3169,7 +3151,7 @@ operand immN0() %{
operand immP31()
%{
- predicate(!n->as_Type()->type()->isa_oopptr()
+ predicate(n->as_Type()->type()->reloc() == relocInfo::none
&& (n->get_ptr() >> 31) == 0);
match(ConP);
@@ -6466,8 +6448,8 @@ instruct encodeHeapOop_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{
%}
instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{
- predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
- n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant);
+ predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
+ n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
match(Set dst (DecodeN src));
effect(KILL cr);
format %{ "decode_heap_oop $dst,$src" %}
@@ -6483,8 +6465,8 @@ instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{
%}
instruct decodeHeapOop_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
- predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
- n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
+ predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
+ n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
match(Set dst (DecodeN src));
effect(KILL cr);
format %{ "decode_heap_oop_not_null $dst,$src" %}
@@ -10348,7 +10330,7 @@ instruct compP_rReg_mem(rFlagsRegU cr, rRegP op1, memory op2)
// and raw pointers have no anti-dependencies.
instruct compP_mem_rReg(rFlagsRegU cr, rRegP op1, memory op2)
%{
- predicate(!n->in(2)->in(2)->bottom_type()->isa_oop_ptr());
+ predicate(n->in(2)->in(2)->bottom_type()->reloc() == relocInfo::none);
match(Set cr (CmpP op1 (LoadP op2)));
format %{ "cmpq $op1, $op2\t# raw ptr" %}
@@ -10757,8 +10739,8 @@ instruct partialSubtypeCheck(rdi_RegP result,
ins_cost(1100); // slightly larger than the next version
format %{ "movq rdi, [$sub + in_bytes(Klass::secondary_supers_offset())]\n\t"
- "movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t"
- "addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t"
+ "movl rcx, [rdi + Array<Klass*>::length_offset_in_bytes()]\t# length to scan\n\t"
+ "addq rdi, Array<Klass*>::base_offset_in_bytes()\t# Skip to start of data; set NZ in case count is zero\n\t"
"repne scasq\t# Scan *rdi++ for a match with rax while rcx--\n\t"
"jne,s miss\t\t# Missed: rdi not-zero\n\t"
"movq [$sub + in_bytes(Klass::secondary_super_cache_offset())], $super\t# Hit: update cache\n\t"
@@ -10780,8 +10762,8 @@ instruct partialSubtypeCheck_vs_Zero(rFlagsReg cr,
ins_cost(1000);
format %{ "movq rdi, [$sub + in_bytes(Klass::secondary_supers_offset())]\n\t"
- "movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t"
- "addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t"
+ "movl rcx, [rdi + Array<Klass*>::length_offset_in_bytes()]\t# length to scan\n\t"
+ "addq rdi, Array<Klass*>::base_offset_in_bytes()\t# Skip to start of data; set NZ in case count is zero\n\t"
"repne scasq\t# Scan *rdi++ for a match with rax while cx-- != 0\n\t"
"jne,s miss\t\t# Missed: flags nz\n\t"
"movq [$sub + in_bytes(Klass::secondary_super_cache_offset())], $super\t# Hit: update cache\n\t"
@@ -11060,7 +11042,6 @@ instruct CallDynamicJavaDirect(method meth)
ins_cost(300);
format %{ "movq rax, #Universe::non_oop_word()\n\t"
"call,dynamic " %}
- opcode(0xE8); /* E8 cd */
ins_encode(Java_Dynamic_Call(meth), call_epilog);
ins_pipe(pipe_slow);
ins_alignment(4);