aboutsummaryrefslogtreecommitdiff
path: root/src/share/vm/c1
diff options
context:
space:
mode:
authoriveresov <none@none>2010-11-30 23:23:40 -0800
committeriveresov <none@none>2010-11-30 23:23:40 -0800
commit61117a198d5c72b2b2ec5b2d4b0fde59ca480a4f (patch)
tree8378aa96ac19b9583d8a238cdaee0896e0c89de8 /src/share/vm/c1
parent8ab8d32ae3f2b4a9ea9cb79079ef402c1241b2c5 (diff)
6985015: C1 needs to support compressed oops
Summary: This change implements compressed oops for C1 for x64 and sparc. The changes are mostly on the codegen level, with a few exceptions when we do access things outside of the heap that are uncompressed from the IR. Compressed oops are now also enabled with tiered. Reviewed-by: twisti, kvn, never, phh
Diffstat (limited to 'src/share/vm/c1')
-rw-r--r--src/share/vm/c1/c1_FrameMap.hpp8
-rw-r--r--src/share/vm/c1/c1_GraphBuilder.cpp2
-rw-r--r--src/share/vm/c1/c1_Instruction.hpp11
-rw-r--r--src/share/vm/c1/c1_LIR.cpp2
-rw-r--r--src/share/vm/c1/c1_LIR.hpp16
-rw-r--r--src/share/vm/c1/c1_LIRAssembler.cpp12
-rw-r--r--src/share/vm/c1/c1_LIRAssembler.hpp12
-rw-r--r--src/share/vm/c1/c1_LIRGenerator.cpp47
-rw-r--r--src/share/vm/c1/c1_LinearScan.cpp6
-rw-r--r--src/share/vm/c1/c1_Runtime1.cpp5
10 files changed, 76 insertions, 45 deletions
diff --git a/src/share/vm/c1/c1_FrameMap.hpp b/src/share/vm/c1/c1_FrameMap.hpp
index 91ee545e7..c112ad540 100644
--- a/src/share/vm/c1/c1_FrameMap.hpp
+++ b/src/share/vm/c1/c1_FrameMap.hpp
@@ -76,8 +76,8 @@ class FrameMap : public CompilationResourceObj {
nof_cpu_regs_reg_alloc = pd_nof_cpu_regs_reg_alloc,
nof_fpu_regs_reg_alloc = pd_nof_fpu_regs_reg_alloc,
- nof_caller_save_cpu_regs = pd_nof_caller_save_cpu_regs_frame_map,
- nof_caller_save_fpu_regs = pd_nof_caller_save_fpu_regs_frame_map,
+ max_nof_caller_save_cpu_regs = pd_nof_caller_save_cpu_regs_frame_map,
+ nof_caller_save_fpu_regs = pd_nof_caller_save_fpu_regs_frame_map,
spill_slot_size_in_bytes = 4
};
@@ -97,7 +97,7 @@ class FrameMap : public CompilationResourceObj {
static Register _cpu_rnr2reg [nof_cpu_regs];
static int _cpu_reg2rnr [nof_cpu_regs];
- static LIR_Opr _caller_save_cpu_regs [nof_caller_save_cpu_regs];
+ static LIR_Opr _caller_save_cpu_regs [max_nof_caller_save_cpu_regs];
static LIR_Opr _caller_save_fpu_regs [nof_caller_save_fpu_regs];
int _framesize;
@@ -243,7 +243,7 @@ class FrameMap : public CompilationResourceObj {
VMReg regname(LIR_Opr opr) const;
static LIR_Opr caller_save_cpu_reg_at(int i) {
- assert(i >= 0 && i < nof_caller_save_cpu_regs, "out of bounds");
+ assert(i >= 0 && i < max_nof_caller_save_cpu_regs, "out of bounds");
return _caller_save_cpu_regs[i];
}
diff --git a/src/share/vm/c1/c1_GraphBuilder.cpp b/src/share/vm/c1/c1_GraphBuilder.cpp
index 81239201c..492e21f4d 100644
--- a/src/share/vm/c1/c1_GraphBuilder.cpp
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp
@@ -2795,7 +2795,7 @@ void GraphBuilder::setup_osr_entry_block() {
get = append(new UnsafeGetRaw(as_BasicType(local->type()), e,
append(new Constant(new IntConstant(offset))),
0,
- true));
+ true /*unaligned*/, true /*wide*/));
}
_state->store_local(index, get);
}
diff --git a/src/share/vm/c1/c1_Instruction.hpp b/src/share/vm/c1/c1_Instruction.hpp
index 43e8f87e7..63b30819c 100644
--- a/src/share/vm/c1/c1_Instruction.hpp
+++ b/src/share/vm/c1/c1_Instruction.hpp
@@ -2110,20 +2110,23 @@ BASE(UnsafeRawOp, UnsafeOp)
LEAF(UnsafeGetRaw, UnsafeRawOp)
private:
- bool _may_be_unaligned; // For OSREntry
+ bool _may_be_unaligned, _is_wide; // For OSREntry
public:
- UnsafeGetRaw(BasicType basic_type, Value addr, bool may_be_unaligned)
+ UnsafeGetRaw(BasicType basic_type, Value addr, bool may_be_unaligned, bool is_wide = false)
: UnsafeRawOp(basic_type, addr, false) {
_may_be_unaligned = may_be_unaligned;
+ _is_wide = is_wide;
}
- UnsafeGetRaw(BasicType basic_type, Value base, Value index, int log2_scale, bool may_be_unaligned)
+ UnsafeGetRaw(BasicType basic_type, Value base, Value index, int log2_scale, bool may_be_unaligned, bool is_wide = false)
: UnsafeRawOp(basic_type, base, index, log2_scale, false) {
_may_be_unaligned = may_be_unaligned;
+ _is_wide = is_wide;
}
- bool may_be_unaligned() { return _may_be_unaligned; }
+ bool may_be_unaligned() { return _may_be_unaligned; }
+ bool is_wide() { return _is_wide; }
};
diff --git a/src/share/vm/c1/c1_LIR.cpp b/src/share/vm/c1/c1_LIR.cpp
index e26a176d8..bce59a55d 100644
--- a/src/share/vm/c1/c1_LIR.cpp
+++ b/src/share/vm/c1/c1_LIR.cpp
@@ -1742,6 +1742,8 @@ const char * LIR_Op1::name() const {
return "unaligned move";
case lir_move_volatile:
return "volatile_move";
+ case lir_move_wide:
+ return "wide_move";
default:
ShouldNotReachHere();
return "illegal_op";
diff --git a/src/share/vm/c1/c1_LIR.hpp b/src/share/vm/c1/c1_LIR.hpp
index 2b8fb7110..7a3574cfa 100644
--- a/src/share/vm/c1/c1_LIR.hpp
+++ b/src/share/vm/c1/c1_LIR.hpp
@@ -985,6 +985,7 @@ enum LIR_MoveKind {
lir_move_normal,
lir_move_volatile,
lir_move_unaligned,
+ lir_move_wide,
lir_move_max_flag
};
@@ -1932,7 +1933,20 @@ class LIR_List: public CompilationResourceObj {
void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
-
+ void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) {
+ if (UseCompressedOops) {
+ append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide));
+ } else {
+ move(src, dst, info);
+ }
+ }
+ void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) {
+ if (UseCompressedOops) {
+ append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide));
+ } else {
+ move(src, dst, info);
+ }
+ }
void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
void oop2reg (jobject o, LIR_Opr reg) { append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o), reg)); }
diff --git a/src/share/vm/c1/c1_LIRAssembler.cpp b/src/share/vm/c1/c1_LIRAssembler.cpp
index 153ff3f86..c42281997 100644
--- a/src/share/vm/c1/c1_LIRAssembler.cpp
+++ b/src/share/vm/c1/c1_LIRAssembler.cpp
@@ -489,7 +489,9 @@ void LIR_Assembler::emit_op1(LIR_Op1* op) {
volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
} else {
move_op(op->in_opr(), op->result_opr(), op->type(),
- op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned);
+ op->patch_code(), op->info(), op->pop_fpu_stack(),
+ op->move_kind() == lir_move_unaligned,
+ op->move_kind() == lir_move_wide);
}
break;
@@ -758,7 +760,7 @@ void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_
}
-void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned) {
+void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
if (src->is_register()) {
if (dest->is_register()) {
assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
@@ -767,7 +769,7 @@ void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
reg2stack(src, dest, type, pop_fpu_stack);
} else if (dest->is_address()) {
- reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, unaligned);
+ reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
} else {
ShouldNotReachHere();
}
@@ -790,13 +792,13 @@ void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
const2stack(src, dest);
} else if (dest->is_address()) {
assert(patch_code == lir_patch_none, "no patching allowed here");
- const2mem(src, dest, type, info);
+ const2mem(src, dest, type, info, wide);
} else {
ShouldNotReachHere();
}
} else if (src->is_address()) {
- mem2reg(src, dest, type, patch_code, info, unaligned);
+ mem2reg(src, dest, type, patch_code, info, wide, unaligned);
} else {
ShouldNotReachHere();
diff --git a/src/share/vm/c1/c1_LIRAssembler.hpp b/src/share/vm/c1/c1_LIRAssembler.hpp
index 5ddc52c52..24f62862c 100644
--- a/src/share/vm/c1/c1_LIRAssembler.hpp
+++ b/src/share/vm/c1/c1_LIRAssembler.hpp
@@ -165,15 +165,17 @@ class LIR_Assembler: public CompilationResourceObj {
void const2reg (LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info);
void const2stack(LIR_Opr src, LIR_Opr dest);
- void const2mem (LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info);
+ void const2mem (LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide);
void reg2stack (LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack);
void reg2reg (LIR_Opr src, LIR_Opr dest);
- void reg2mem (LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned);
+ void reg2mem (LIR_Opr src, LIR_Opr dest, BasicType type,
+ LIR_PatchCode patch_code, CodeEmitInfo* info,
+ bool pop_fpu_stack, bool wide, bool unaligned);
void stack2reg (LIR_Opr src, LIR_Opr dest, BasicType type);
void stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type);
void mem2reg (LIR_Opr src, LIR_Opr dest, BasicType type,
- LIR_PatchCode patch_code = lir_patch_none,
- CodeEmitInfo* info = NULL, bool unaligned = false);
+ LIR_PatchCode patch_code,
+ CodeEmitInfo* info, bool wide, bool unaligned);
void prefetchr (LIR_Opr src);
void prefetchw (LIR_Opr src);
@@ -211,7 +213,7 @@ class LIR_Assembler: public CompilationResourceObj {
void roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack);
void move_op(LIR_Opr src, LIR_Opr result, BasicType type,
- LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned);
+ LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide);
void volatile_move_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info);
void comp_mem_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info); // info set for null exceptions
void comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr result, LIR_Op2* op);
diff --git a/src/share/vm/c1/c1_LIRGenerator.cpp b/src/share/vm/c1/c1_LIRGenerator.cpp
index 41bd2c5dd..b81d639f5 100644
--- a/src/share/vm/c1/c1_LIRGenerator.cpp
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp
@@ -864,11 +864,11 @@ void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
// MDO cells are intptr_t, so the data_reg width is arch-dependent.
LIR_Opr data_reg = new_pointer_register();
LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
- __ move(LIR_OprFact::address(data_addr), data_reg);
+ __ move(data_addr, data_reg);
// Use leal instead of add to avoid destroying condition codes on x86
LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
__ leal(LIR_OprFact::address(fake_incr_value), data_reg);
- __ move(data_reg, LIR_OprFact::address(data_addr));
+ __ move(data_reg, data_addr);
}
}
@@ -1009,12 +1009,12 @@ void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
operand_for_instruction(phi));
LIR_Opr thread_reg = getThreadPointer();
- __ move(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
- exceptionOopOpr());
- __ move(LIR_OprFact::oopConst(NULL),
- new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
- __ move(LIR_OprFact::oopConst(NULL),
- new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
+ __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
+ exceptionOopOpr());
+ __ move_wide(LIR_OprFact::oopConst(NULL),
+ new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
+ __ move_wide(LIR_OprFact::oopConst(NULL),
+ new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
LIR_Opr result = new_register(T_OBJECT);
__ move(exceptionOopOpr(), result);
@@ -1085,7 +1085,7 @@ void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
void LIRGenerator::do_Return(Return* x) {
if (compilation()->env()->dtrace_method_probes()) {
BasicTypeList signature;
- signature.append(T_INT); // thread
+ signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
signature.append(T_OBJECT); // methodOop
LIR_OprList* args = new LIR_OprList();
args->append(getThreadPointer());
@@ -1122,8 +1122,8 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
info = state_for(x);
}
__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info);
- __ move(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() +
- klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result);
+ __ move_wide(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() +
+ klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result);
}
@@ -1131,7 +1131,7 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
void LIRGenerator::do_currentThread(Intrinsic* x) {
assert(x->number_of_arguments() == 0, "wrong type");
LIR_Opr reg = rlock_result(x);
- __ load(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
+ __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
}
@@ -1908,7 +1908,11 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
__ unaligned_move(addr, reg);
} else {
- __ move(addr, reg);
+ if (dst_type == T_OBJECT && x->is_wide()) {
+ __ move_wide(addr, reg);
+ } else {
+ __ move(addr, reg);
+ }
}
}
@@ -2287,7 +2291,7 @@ void LIRGenerator::do_Base(Base* x) {
if (compilation()->env()->dtrace_method_probes()) {
BasicTypeList signature;
- signature.append(T_INT); // thread
+ signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
signature.append(T_OBJECT); // methodOop
LIR_OprList* args = new LIR_OprList();
args->append(getThreadPointer());
@@ -2352,11 +2356,14 @@ void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR
} else {
LIR_Address* addr = loc->as_address_ptr();
param->load_for_store(addr->type());
- if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
- __ unaligned_move(param->result(), addr);
- } else {
- __ move(param->result(), addr);
- }
+ if (addr->type() == T_OBJECT) {
+ __ move_wide(param->result(), addr);
+ } else
+ if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
+ __ unaligned_move(param->result(), addr);
+ } else {
+ __ move(param->result(), addr);
+ }
}
}
@@ -2368,7 +2375,7 @@ void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR
} else {
assert(loc->is_address(), "just checking");
receiver->load_for_store(T_OBJECT);
- __ move(receiver->result(), loc);
+ __ move_wide(receiver->result(), loc->as_address_ptr());
}
}
}
diff --git a/src/share/vm/c1/c1_LinearScan.cpp b/src/share/vm/c1/c1_LinearScan.cpp
index c77b3d19c..fe118944a 100644
--- a/src/share/vm/c1/c1_LinearScan.cpp
+++ b/src/share/vm/c1/c1_LinearScan.cpp
@@ -1273,7 +1273,7 @@ void LinearScan::build_intervals() {
int caller_save_registers[LinearScan::nof_regs];
int i;
- for (i = 0; i < FrameMap::nof_caller_save_cpu_regs; i++) {
+ for (i = 0; i < FrameMap::nof_caller_save_cpu_regs(); i++) {
LIR_Opr opr = FrameMap::caller_save_cpu_reg_at(i);
assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
@@ -3557,7 +3557,7 @@ void RegisterVerifier::process_operations(LIR_List* ops, IntervalList* input_sta
// invalidate all caller save registers at calls
if (visitor.has_call()) {
- for (j = 0; j < FrameMap::nof_caller_save_cpu_regs; j++) {
+ for (j = 0; j < FrameMap::nof_caller_save_cpu_regs(); j++) {
state_put(input_state, reg_num(FrameMap::caller_save_cpu_reg_at(j)), NULL);
}
for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) {
@@ -5596,7 +5596,7 @@ void LinearScanWalker::init_vars_for_alloc(Interval* cur) {
_last_reg = pd_last_fpu_reg;
} else {
_first_reg = pd_first_cpu_reg;
- _last_reg = pd_last_cpu_reg;
+ _last_reg = FrameMap::last_cpu_reg();
}
assert(0 <= _first_reg && _first_reg < LinearScan::nof_regs, "out of range");
diff --git a/src/share/vm/c1/c1_Runtime1.cpp b/src/share/vm/c1/c1_Runtime1.cpp
index fe754ab78..89cb28c38 100644
--- a/src/share/vm/c1/c1_Runtime1.cpp
+++ b/src/share/vm/c1/c1_Runtime1.cpp
@@ -1174,7 +1174,7 @@ JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int d
memmove(dst_addr, src_addr, length << l2es);
return ac_ok;
} else if (src->is_objArray() && dst->is_objArray()) {
- if (UseCompressedOops) { // will need for tiered
+ if (UseCompressedOops) {
narrowOop *src_addr = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos);
narrowOop *dst_addr = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos);
return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
@@ -1210,10 +1210,11 @@ JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
if (UseCompressedOops) {
bs->write_ref_array_pre((narrowOop*)dst, num);
+ Copy::conjoint_oops_atomic((narrowOop*) src, (narrowOop*) dst, num);
} else {
bs->write_ref_array_pre((oop*)dst, num);
+ Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
}
- Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
bs->write_ref_array(dst, num);
JRT_END