aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoraph <none@none>2014-05-29 13:27:46 -0400
committeraph <none@none>2014-05-29 13:27:46 -0400
commitabe91b56f3fbbc281affbd0e182136c76bce76bd (patch)
tree29d27c0ee1e95c96d14f82a5fe0a1ad040e0a273
parentf9e5c2a3e27f457a2e3d03c8ef4ba0cc6defc280 (diff)
parent3ca26cb7d7ba2d09ba78e5181d8afe222944f2b4 (diff)
Merge
-rw-r--r--src/cpu/aarch64/vm/aarch64_call.cpp2
-rw-r--r--src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp2
-rw-r--r--src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp20
-rw-r--r--src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp76
-rw-r--r--src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.hpp8
-rw-r--r--src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp4
-rw-r--r--src/cpu/aarch64/vm/compiledIC_aarch64.cpp4
-rw-r--r--src/cpu/aarch64/vm/interp_masm_aarch64.cpp4
-rw-r--r--src/cpu/aarch64/vm/interp_masm_aarch64.hpp8
-rw-r--r--src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp2
-rw-r--r--src/cpu/aarch64/vm/macroAssembler_aarch64.cpp112
-rw-r--r--src/cpu/aarch64/vm/macroAssembler_aarch64.hpp122
-rw-r--r--src/cpu/aarch64/vm/methodHandles_aarch64.cpp6
-rw-r--r--src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp15
-rw-r--r--src/cpu/aarch64/vm/stubGenerator_aarch64.cpp74
-rw-r--r--src/cpu/aarch64/vm/stubRoutines_aarch64.cpp245
-rw-r--r--src/cpu/aarch64/vm/stubRoutines_aarch64.hpp12
-rw-r--r--src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp125
-rw-r--r--src/cpu/aarch64/vm/templateTable_aarch64.cpp5
-rw-r--r--src/cpu/aarch64/vm/vm_version_aarch64.cpp4
20 files changed, 664 insertions, 186 deletions
diff --git a/src/cpu/aarch64/vm/aarch64_call.cpp b/src/cpu/aarch64/vm/aarch64_call.cpp
index 1dfb85ce9..7e1738c31 100644
--- a/src/cpu/aarch64/vm/aarch64_call.cpp
+++ b/src/cpu/aarch64/vm/aarch64_call.cpp
@@ -180,7 +180,7 @@ extern "C" void setup_arm_sim(void *sp, u_int64_t calltype)
default:
break;
case MacroAssembler::ret_type_integral:
- // this overwrites the saved rax
+ // this overwrites the saved r0
*return_slot = sim->getCPUState().xreg(R0, 0);
break;
case MacroAssembler::ret_type_float:
diff --git a/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp b/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp
index 318adf458..cdedc64ec 100644
--- a/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp
+++ b/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp
@@ -209,7 +209,7 @@ void NewInstanceStub::emit_code(LIR_Assembler* ce) {
__ bl(RuntimeAddress(Runtime1::entry_for(_stub_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
- assert(_result->as_register() == r0, "result must in rax,");
+ assert(_result->as_register() == r0, "result must in r0,");
__ b(_continuation);
}
diff --git a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
index f418fb67c..44a1e389b 100644
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
@@ -378,7 +378,7 @@ int LIR_Assembler::emit_exception_handler() {
int offset = code_offset();
- // the exception oop and pc are in rax, and rdx
+ // the exception oop and pc are in r0, and r3
// no other registers need to be preserved, so invalidate them
__ invalidate_registers(false, true, true, false, true, true);
@@ -2073,7 +2073,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
add_call_info(pc_for_athrow_offset, info); // for exception handler
__ verify_not_null_oop(r0);
- // search an exception handler (rax: exception oop, rdx: throwing pc)
+ // search an exception handler (r0: exception oop, r3: throwing pc)
if (compilation()->has_fpu_code()) {
unwind_id = Runtime1::handle_exception_id;
} else {
@@ -2647,7 +2647,21 @@ void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
}
void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
- fatal("CRC32 intrinsic is not implemented on this platform");
+ assert(op->crc()->is_single_cpu(), "crc must be register");
+ assert(op->val()->is_single_cpu(), "byte value must be register");
+ assert(op->result_opr()->is_single_cpu(), "result must be register");
+ Register crc = op->crc()->as_register();
+ Register val = op->val()->as_register();
+ Register res = op->result_opr()->as_register();
+
+ assert_different_registers(val, crc, res);
+ unsigned long offset;
+ __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
+ if (offset) __ add(res, res, offset);
+
+ __ ornw(crc, zr, crc); // ~crc
+ __ update_byte_crc32(crc, val, res);
+ __ ornw(res, zr, crc); // ~crc
}
void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
diff --git a/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp b/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp
index 228c8d9a6..644fa4bba 100644
--- a/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp
+++ b/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp
@@ -958,7 +958,81 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
}
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
- fatal("CRC32 intrinsic is not implemented on this platform");
+ assert(UseCRC32Intrinsics, "why are we here?");
+ // Make all state_for calls early since they can emit code
+ LIR_Opr result = rlock_result(x);
+ int flags = 0;
+ switch (x->id()) {
+ case vmIntrinsics::_updateCRC32: {
+ LIRItem crc(x->argument_at(0), this);
+ LIRItem val(x->argument_at(1), this);
+ // val is destroyed by update_crc32
+ val.set_destroys_register();
+ crc.load_item();
+ val.load_item();
+ __ update_crc32(crc.result(), val.result(), result);
+ break;
+ }
+ case vmIntrinsics::_updateBytesCRC32:
+ case vmIntrinsics::_updateByteBufferCRC32: {
+ bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
+
+ LIRItem crc(x->argument_at(0), this);
+ LIRItem buf(x->argument_at(1), this);
+ LIRItem off(x->argument_at(2), this);
+ LIRItem len(x->argument_at(3), this);
+ buf.load_item();
+ off.load_nonconstant();
+
+ LIR_Opr index = off.result();
+ int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
+ if(off.result()->is_constant()) {
+ index = LIR_OprFact::illegalOpr;
+ offset += off.result()->as_jint();
+ }
+ LIR_Opr base_op = buf.result();
+
+ if (index->is_valid()) {
+ LIR_Opr tmp = new_register(T_LONG);
+ __ convert(Bytecodes::_i2l, index, tmp);
+ index = tmp;
+ }
+
+ if (offset) {
+ LIR_Opr tmp = new_pointer_register();
+ __ add(base_op, LIR_OprFact::intConst(offset), tmp);
+ base_op = tmp;
+ offset = 0;
+ }
+
+ LIR_Address* a = new LIR_Address(base_op,
+ index,
+ LIR_Address::times_1,
+ offset,
+ T_BYTE);
+ BasicTypeList signature(3);
+ signature.append(T_INT);
+ signature.append(T_ADDRESS);
+ signature.append(T_INT);
+ CallingConvention* cc = frame_map()->c_calling_convention(&signature);
+ const LIR_Opr result_reg = result_register_for(x->type());
+
+ LIR_Opr addr = new_pointer_register();
+ __ leal(LIR_OprFact::address(a), addr);
+
+ crc.load_item_force(cc->at(0));
+ __ move(addr, cc->at(1));
+ len.load_item_force(cc->at(2));
+
+ __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
+ __ move(result_reg, result);
+
+ break;
+ }
+ default: {
+ ShouldNotReachHere();
+ }
+ }
}
// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
diff --git a/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.hpp b/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.hpp
index 7e5096fd1..e35e39f0e 100644
--- a/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.hpp
+++ b/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.hpp
@@ -54,7 +54,7 @@ void zero_memory(Register addr, Register len, Register t1);
Register result);
// locking
- // hdr : must be rax, contents destroyed
+ // hdr : must be r0, contents destroyed
// obj : must point to the object to lock, contents preserved
// disp_hdr: must point to the displaced header location, contents preserved
// scratch : scratch register, contents destroyed
@@ -64,7 +64,7 @@ void zero_memory(Register addr, Register len, Register t1);
// unlocking
// hdr : contents destroyed
// obj : must point to the object to lock, contents preserved
- // disp_hdr: must be eax & must point to the displaced header location, contents destroyed
+ // disp_hdr: must be r0 & must point to the displaced header location, contents destroyed
void unlock_object(Register swap, Register obj, Register lock, Label& slow_case);
void initialize_object(
@@ -79,7 +79,7 @@ void zero_memory(Register addr, Register len, Register t1);
// allocation of fixed-size objects
// (can also be used to allocate fixed-size arrays, by setting
// hdr_size correctly and storing the array length afterwards)
- // obj : must be rax, will contain pointer to allocated object
+ // obj : will contain pointer to allocated object
// t1, t2 : scratch registers - contents destroyed
// header_size: size of object header in words
// object_size: total size of object in words
@@ -91,7 +91,7 @@ void zero_memory(Register addr, Register len, Register t1);
};
// allocation of arrays
- // obj : must be rax, will contain pointer to allocated object
+ // obj : will contain pointer to allocated object
// len : array length in number of elements
// t : scratch register - contents destroyed
// header_size: size of object header in words
diff --git a/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp b/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
index 0929edeb1..fa5027dbd 100644
--- a/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
+++ b/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
@@ -850,7 +850,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// refilling the TLAB or allocating directly from eden.
Label retry_tlab, try_eden;
const Register thread =
- __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi
+ __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves r19 & r3, returns rthread
__ bind(retry_tlab);
@@ -945,7 +945,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
oop_maps->add_gc_map(call_offset, map);
restore_live_registers_except_r0(sasm);
- // rax,: new multi array
+ // r0,: new multi array
__ verify_oop(r0);
}
break;
diff --git a/src/cpu/aarch64/vm/compiledIC_aarch64.cpp b/src/cpu/aarch64/vm/compiledIC_aarch64.cpp
index f3e0941e5..b4d6e220d 100644
--- a/src/cpu/aarch64/vm/compiledIC_aarch64.cpp
+++ b/src/cpu/aarch64/vm/compiledIC_aarch64.cpp
@@ -81,8 +81,8 @@ CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
// Stub is fixed up when the corresponding call is converted from
// calling compiled code to calling interpreted code.
- // movq rbx, 0
- // jmp -5 # to self
+ // movq rmethod, 0
+ // jmp -4 # to self
// address mark = cbuf.insts_mark(); // Get mark within main instrs section.
diff --git a/src/cpu/aarch64/vm/interp_masm_aarch64.cpp b/src/cpu/aarch64/vm/interp_masm_aarch64.cpp
index c09c31f01..c5a593ecd 100644
--- a/src/cpu/aarch64/vm/interp_masm_aarch64.cpp
+++ b/src/cpu/aarch64/vm/interp_masm_aarch64.cpp
@@ -263,7 +263,7 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
// Do the check.
- check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows rcx
+ check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
// Profile the failure of the check.
profile_typecheck_failed(r2); // blows r2
@@ -721,7 +721,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
save_bcp(); // Save in case of exception
// Convert from BasicObjectLock structure to object and BasicLock
- // structure Store the BasicLock address into %rax
+ // structure Store the BasicLock address into %r0
lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
// Load oop into obj_reg(%c_rarg3)
diff --git a/src/cpu/aarch64/vm/interp_masm_aarch64.hpp b/src/cpu/aarch64/vm/interp_masm_aarch64.hpp
index 5afcf7849..ce47cafab 100644
--- a/src/cpu/aarch64/vm/interp_masm_aarch64.hpp
+++ b/src/cpu/aarch64/vm/interp_masm_aarch64.hpp
@@ -166,14 +166,14 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Dispatching
void dispatch_prolog(TosState state, int step = 0);
void dispatch_epilog(TosState state, int step = 0);
- // dispatch via ebx (assume ebx is loaded already)
+ // dispatch via rscratch1
void dispatch_only(TosState state);
- // dispatch normal table via ebx (assume ebx is loaded already)
+ // dispatch normal table via rscratch1 (assume rscratch1 is loaded already)
void dispatch_only_normal(TosState state);
void dispatch_only_noverify(TosState state);
- // load ebx from [esi + step] and dispatch via ebx
+ // load rscratch1 from [rbcp + step] and dispatch via rscratch1
void dispatch_next(TosState state, int step = 0);
- // load ebx from [esi] and dispatch via ebx and table
+ // load rscratch1 from [esi] and dispatch via rscratch1 and table
void dispatch_via (TosState state, address* table);
// jump to an invoked target
diff --git a/src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp b/src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp
index 32584bbed..c0aaa1de4 100644
--- a/src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp
+++ b/src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp
@@ -46,6 +46,8 @@ void generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpa
address generate_empty_entry(void);
address generate_accessor_entry(void);
address generate_Reference_get_entry();
+ address generate_CRC32_update_entry();
+ address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
void lock_method(void);
void generate_stack_overflow_check(void);
diff --git a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
index 95a1e8b07..d89ddad22 100644
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
@@ -126,6 +126,7 @@ void MacroAssembler::pd_patch_instruction(address branch, address target) {
} else {
assert((jbyte *)target ==
((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base ||
+ target == StubRoutines::crc_table_addr() ||
(address)target == os::get_polling_page(),
"adrp must be polling page or byte map base");
assert(offset_lo == 0, "offset must be 0 for polling page or byte map base");
@@ -1932,9 +1933,6 @@ void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
BytecodeCounter::print();
}
#endif
- // To see where a verify_oop failed, get $ebx+40/X for this frame.
- // XXX correct this offset for amd64
- // This is the value of eip which points to where verify_oop will return.
if (os::message_box(msg, "Execution stopped, print registers?")) {
ttyLocker ttyl;
tty->print_cr(" pc = 0x%016lx", pc);
@@ -2045,6 +2043,114 @@ void MacroAssembler::pop_CPU_state() {
pop(0x3fffffff, sp); // integer registers except lr & sp
}
+/**
+ * Emits code to update CRC-32 with a byte value according to constants in table
+ *
+ * @param [in,out]crc Register containing the crc.
+ * @param [in]val Register containing the byte to fold into the CRC.
+ * @param [in]table Register containing the table of crc constants.
+ *
+ * uint32_t crc;
+ * val = crc_table[(val ^ crc) & 0xFF];
+ * crc = val ^ (crc >> 8);
+ *
+ */
+void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
+ eor(val, val, crc);
+ andr(val, val, 0xff);
+ ldrw(val, Address(table, val, Address::lsl(2)));
+ eor(crc, val, crc, Assembler::LSR, 8);
+}
+
+/**
+ * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3
+ *
+ * @param [in,out]crc Register containing the crc.
+ * @param [in]v Register containing the 32-bit to fold into the CRC.
+ * @param [in]table0 Register containing table 0 of crc constants.
+ * @param [in]table1 Register containing table 1 of crc constants.
+ * @param [in]table2 Register containing table 2 of crc constants.
+ * @param [in]table3 Register containing table 3 of crc constants.
+ *
+ * uint32_t crc;
+ * v = crc ^ v
+ * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24]
+ *
+ */
+void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp,
+ Register table0, Register table1, Register table2, Register table3,
+ bool upper) {
+ eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0);
+ uxtb(tmp, v);
+ ldrw(crc, Address(table3, tmp, Address::lsl(2)));
+ ubfx(tmp, v, 8, 8);
+ ldrw(tmp, Address(table2, tmp, Address::lsl(2)));
+ eor(crc, crc, tmp);
+ ubfx(tmp, v, 16, 8);
+ ldrw(tmp, Address(table1, tmp, Address::lsl(2)));
+ eor(crc, crc, tmp);
+ ubfx(tmp, v, 24, 8);
+ ldrw(tmp, Address(table0, tmp, Address::lsl(2)));
+ eor(crc, crc, tmp);
+}
+
+/**
+ * @param crc register containing existing CRC (32-bit)
+ * @param buf register pointing to input byte buffer (byte*)
+ * @param len register containing number of bytes
+ * @param table register that will contain address of CRC table
+ * @param tmp scratch register
+ */
+void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len,
+ Register table0, Register table1, Register table2, Register table3,
+ Register tmp, Register tmp2, Register tmp3) {
+ Label L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit;
+ unsigned long offset;
+ ornw(crc, zr, crc);
+ adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset);
+ if (offset) add(table0, table0, offset);
+ add(table1, table0, 1*256*sizeof(juint));
+ add(table2, table0, 2*256*sizeof(juint));
+ add(table3, table0, 3*256*sizeof(juint));
+ subs(len, len, 16);
+ br(Assembler::GE, L_by16_loop);
+ adds(len, len, 16-4);
+ br(Assembler::GE, L_by4_loop);
+ adds(len, len, 4);
+ br(Assembler::GT, L_by1_loop);
+ b(L_exit);
+
+ BIND(L_by4_loop);
+ ldrw(tmp, Address(post(buf, 4)));
+ update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3);
+ subs(len, len, 4);
+ br(Assembler::GE, L_by4_loop);
+ adds(len, len, 4);
+ br(Assembler::LE, L_exit);
+ BIND(L_by1_loop);
+ subs(len, len, 1);
+ ldrb(tmp, Address(post(buf, 1)));
+ update_byte_crc32(crc, tmp, table0);
+ br(Assembler::GT, L_by1_loop);
+ b(L_exit);
+
+ align(CodeEntryAlignment);
+ BIND(L_by16_loop);
+ subs(len, len, 16);
+ ldp(tmp, tmp3, Address(post(buf, 16)));
+ update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
+ update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
+ update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false);
+ update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true);
+ br(Assembler::GE, L_by16_loop);
+ adds(len, len, 16-4);
+ br(Assembler::GE, L_by4_loop);
+ adds(len, len, 4);
+ br(Assembler::GT, L_by1_loop);
+ BIND(L_exit);
+ ornw(crc, zr, crc);
+}
+
SkipIfEqual::SkipIfEqual(
MacroAssembler* masm, const bool* flag_addr, bool value) {
_masm = masm;
diff --git a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
index 4b0a103cf..c0ea2e446 100644
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
@@ -103,7 +103,7 @@ class MacroAssembler: public Assembler {
// Biased locking support
// lock_reg and obj_reg must be loaded up with the appropriate values.
- // swap_reg must be rax, and is killed.
+ // swap_reg is killed.
// tmp_reg is optional. If it is supplied (i.e., != noreg) it will
// be killed; if not supplied, push/pop will be used internally to
// allocate a temporary (inefficient, avoid if possible).
@@ -765,88 +765,6 @@ public:
void int3();
#endif
- // currently unimplemented
-#if 0
- // Long operation macros for a 32bit cpu
- // Long negation for Java
- void lneg(Register hi, Register lo);
-
- // Long multiplication for Java
- // (destroys contents of eax, ebx, ecx and edx)
- void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
-
- // Long shifts for Java
- // (semantics as described in JVM spec.)
- void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f)
- void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
-
- // Long compare for Java
- // (semantics as described in JVM spec.)
- void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
-
-
- // misc
-
- // Sign extension
- void sign_extend_short(Register reg);
- void sign_extend_byte(Register reg);
-
- // Division by power of 2, rounding towards 0
- void division_with_shift(Register reg, int shift_value);
-#endif
-
- // unimpelements
-#if 0
- // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
- //
- // CF (corresponds to C0) if x < y
- // PF (corresponds to C2) if unordered
- // ZF (corresponds to C3) if x = y
- //
- // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
- // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
- void fcmp(Register tmp);
- // Variant of the above which allows y to be further down the stack
- // and which only pops x and y if specified. If pop_right is
- // specified then pop_left must also be specified.
- void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
-
- // Floating-point comparison for Java
- // Compares the top-most stack entries on the FPU stack and stores the result in dst.
- // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
- // (semantics as described in JVM spec.)
- void fcmp2int(Register dst, bool unordered_is_less);
- // Variant of the above which allows y to be further down the stack
- // and which only pops x and y if specified. If pop_right is
- // specified then pop_left must also be specified.
- void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
-
- // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
- // tmp is a temporary register, if none is available use noreg
- void fremr(Register tmp);
-
-
- // Inlined sin/cos generator for Java; must not use CPU instruction
- // directly on Intel as it does not have high enough precision
- // outside of the range [-pi/4, pi/4]. Extra argument indicate the
- // number of FPU stack slots in use; all but the topmost will
- // require saving if a slow case is necessary. Assumes argument is
- // on FP TOS; result is on FP TOS. No cpu registers are changed by
- // this code.
- void trigfunc(char trig, int num_fpu_regs_in_use = 1);
-
- // branch to L if FPU flag C2 is set/not set
- // tmp is a temporary register, if none is available use noreg
- void jC2 (Register tmp, Label& L);
- void jnC2(Register tmp, Label& L);
-
- void push_IU_state();
- void pop_IU_state();
-
- void push_FPU_state();
- void pop_FPU_state();
-#endif
-
void push_CPU_state();
void pop_CPU_state() ;
@@ -1011,33 +929,6 @@ public:
// Support for serializing memory accesses between threads
void serialize_memory(Register thread, Register tmp);
- // unimplemented
-#if 0
- void verify_tlab();
-
- // Biased locking support
- // lock_reg and obj_reg must be loaded up with the appropriate values.
- // swap_reg must be rax, and is killed.
- // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
- // be killed; if not supplied, push/pop will be used internally to
- // allocate a temporary (inefficient, avoid if possible).
- // Optional slow case is for implementations (interpreter and C1) which branch to
- // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
- // Returns offset of first potentially-faulting instruction for null
- // check info (currently consumed only by C1). If
- // swap_reg_contains_mark is true then returns -1 as it is assumed
- // the calling code has already passed any potential faults.
- int biased_locking_enter(Register lock_reg, Register obj_reg,
- Register swap_reg, Register tmp_reg,
- bool swap_reg_contains_mark,
- Label& done, Label* slow_case = NULL,
- BiasedLockingCounters* counters = NULL);
- void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
-
-
- Condition negate_condition(Condition cond);
-#endif
-
// Arithmetics
void addptr(Address dst, int32_t src) {
@@ -1226,6 +1117,11 @@ public:
void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b);
#endif
+ // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
+ void kernel_crc32(Register crc, Register buf, Register len,
+ Register table0, Register table1, Register table2, Register table3,
+ Register tmp, Register tmp2, Register tmp3);
+
#undef VIRTUAL
// Stack push and pop individual 64 bit registers
@@ -1367,6 +1263,12 @@ public:
// Used by aarch64.ad to control code generation
static bool use_acq_rel_for_volatile_fields();
+
+ // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
+ void update_byte_crc32(Register crc, Register val, Register table);
+ void update_word_crc32(Register crc, Register v, Register tmp,
+ Register table0, Register table1, Register table2, Register table3,
+ bool upper = false);
};
// Used by aarch64.ad to control code generation
diff --git a/src/cpu/aarch64/vm/methodHandles_aarch64.cpp b/src/cpu/aarch64/vm/methodHandles_aarch64.cpp
index 5c4810ba9..f1a6da3dc 100644
--- a/src/cpu/aarch64/vm/methodHandles_aarch64.cpp
+++ b/src/cpu/aarch64/vm/methodHandles_aarch64.cpp
@@ -262,7 +262,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// temps used in this code are not used in *either* compiled or interpreted calling sequences
Register temp1 = r10;
Register temp2 = r11;
- Register temp3 = r14; // r13 is live ty this point: it contains the sender SP
+ Register temp3 = r14; // r13 is live by this point: it contains the sender SP
if (for_compiler_entry) {
assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
@@ -331,7 +331,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// Live registers at this point:
// member_reg - MemberName that was the trailing argument
// temp1_recv_klass - klass of stacked receiver, if needed
- // rsi/r13 - interpreter linkage (if interpreted) ??? FIXME
+ // r13 - interpreter linkage (if interpreted) ??? FIXME
// r1 ... r0 - compiler arguments (if compiled)
Label L_incompatible_class_change_error;
@@ -416,7 +416,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
break;
}
- // live at this point: rmethod, rsi/r13 (if interpreted)
+ // live at this point: rmethod, r13 (if interpreted)
// After figuring out which concrete method to call, jump into it.
// Note that this works in the interpreter with no data motion.
diff --git a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
index 7118cda1f..e6af17c43 100644
--- a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
+++ b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
@@ -479,7 +479,7 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// stack pointer. It also recalculates and aligns sp.
// A c2i adapter is frameless because the *callee* frame, which is
- // interpreted, routinely repairs its caller's es (from sender_sp,
+ // interpreted, routinely repairs its caller's sp (from sender_sp,
// which is set up via the senderSP register).
// In other words, if *either* the caller or callee is interpreted, we can
@@ -702,7 +702,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
AArch64Simulator *sim = NULL;
size_t len = 65536;
if (NotifySimulator) {
- name = new char[len];
+ name = NEW_C_HEAP_ARRAY(char, len, mtInternal);
}
if (name) {
@@ -757,7 +757,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
name[0] = 'c';
name[2] = 'i';
sim->notifyCompile(name, c2i_entry);
- delete[] name;
+ FREE_C_HEAP_ARRAY(char, name, mtInternal);
}
#endif
@@ -1608,9 +1608,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Mark location of rfp (someday)
// map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
- // Use eax, ebx as temporaries during any memory-memory moves we have to do
- // All inbound args are referenced based on rfp and all outbound args via sp.
-
int float_args = 0;
int int_args = 0;
@@ -1959,9 +1956,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Don't use call_VM as it will see a possible pending exception and forward it
// and never return here preventing us from clearing _last_native_pc down below.
- // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
- // preserved and correspond to the bcp/locals pointers. So we do a runtime call
- // by hand.
//
save_native_result(masm, ret_type, stack_slots);
__ mov(c_rarg0, rthread);
@@ -2887,7 +2881,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
oop_maps->add_gc_map( __ offset() - start, map);
- // rax contains the address we are going to jump to assuming no exception got installed
+ // r0 contains the address we are going to jump to assuming no exception got installed
// clear last_Java_sp
__ reset_last_Java_frame(false, true);
@@ -2990,7 +2984,6 @@ void OptoRuntime::generate_exception_blob() {
// Store exception in Thread object. We cannot pass any arguments to the
// handle_exception call, since we do not want to make any assumption
// about the size of the frame where the exception happened in.
- // c_rarg0 is either rdi (Linux) or rcx (Windows).
__ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
__ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
diff --git a/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp b/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
index a6d847c60..dd9523dc3 100644
--- a/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
+++ b/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
@@ -456,7 +456,7 @@ class StubGenerator: public StubCodeGenerator {
// not the case if the callee is compiled code => need to setup the
// rsp.
//
- // rax: exception oop
+ // r0: exception oop
// NOTE: this is used as a target from the signal handler so it
// needs an x86 prolog which returns into the current simulator
@@ -850,21 +850,6 @@ class StubGenerator: public StubCodeGenerator {
void array_overlap_test(Label& L_no_overlap, Address::sxtw sf) { __ b(L_no_overlap); }
void array_overlap_test(address no_overlap_target, Label* NOLp, int sf) { Unimplemented(); }
- // Shuffle first three arg regs on Windows into Linux/Solaris locations.
- //
- // Outputs:
- // rdi - rcx
- // rsi - rdx
- // rdx - r8
- // rcx - r9
- //
- // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter
- // are non-volatile. r9 and r10 should not be used by the caller.
- //
- void setup_arg_regs(int nargs = 3) { Unimplemented(); }
-
- void restore_arg_regs() { Unimplemented(); }
-
// Generate code for an array write pre barrier
//
// addr - starting address
@@ -1796,8 +1781,8 @@ class StubGenerator: public StubCodeGenerator {
// rsp+40 - element count (32-bits)
//
// Output:
- // rax == 0 - success
- // rax == -1^K - failure, where K is partial transfer count
+ // r0 == 0 - success
+ // r0 == -1^K - failure, where K is partial transfer count
//
address generate_generic_copy(const char *name,
address byte_copy_entry, address short_copy_entry,
@@ -1944,6 +1929,50 @@ class StubGenerator: public StubCodeGenerator {
}
#endif
+ /**
+ * Arguments:
+ *
+ * Inputs:
+ * c_rarg0 - int crc
+ * c_rarg1 - byte* buf
+ * c_rarg2 - int length
+ *
+ * Output:
+ * r0 - int crc result
+ *
+ * Preserves:
+ * r13
+ *
+ */
+ address generate_updateBytesCRC32() {
+ assert(UseCRC32Intrinsics, "what are we doing here?");
+
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32");
+
+ address start = __ pc();
+
+ const Register crc = c_rarg0; // crc
+ const Register buf = c_rarg1; // source java byte array address
+ const Register len = c_rarg2; // length
+ const Register table0 = c_rarg3; // crc_table address
+ const Register table1 = c_rarg4;
+ const Register table2 = c_rarg5;
+ const Register table3 = c_rarg6;
+ const Register tmp3 = c_rarg7;
+
+ BLOCK_COMMENT("Entry:");
+ __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+ __ kernel_crc32(crc, buf, len,
+ table0, table1, table2, table3, rscratch1, rscratch2, tmp3);
+
+ __ leave(); // required for proper stackwalking of RuntimeStub frame
+ __ ret(lr);
+
+ return start;
+ }
+
#undef __
#define __ masm->
@@ -2113,8 +2142,8 @@ class StubGenerator: public StubCodeGenerator {
generate_handler_for_unsafe_access();
// platform dependent
- StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
- StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp();
+ StubRoutines::aarch64::_get_previous_fp_entry = generate_get_previous_fp();
+ StubRoutines::aarch64::_get_previous_sp_entry = generate_get_previous_sp();
// Build this early so it's available for the interpreter.
StubRoutines::_throw_StackOverflowError_entry =
@@ -2122,6 +2151,11 @@ class StubGenerator: public StubCodeGenerator {
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_StackOverflowError));
+ if (UseCRC32Intrinsics) {
+ // set table address before stub generation which use it
+ StubRoutines::_crc_table_adr = (address)StubRoutines::aarch64::_crc_table;
+ StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
+ }
}
void generate_all() {
diff --git a/src/cpu/aarch64/vm/stubRoutines_aarch64.cpp b/src/cpu/aarch64/vm/stubRoutines_aarch64.cpp
index 02160486e..d55740524 100644
--- a/src/cpu/aarch64/vm/stubRoutines_aarch64.cpp
+++ b/src/cpu/aarch64/vm/stubRoutines_aarch64.cpp
@@ -33,14 +33,237 @@
// Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file.
-address StubRoutines::x86::_get_previous_fp_entry = NULL;
-address StubRoutines::x86::_get_previous_sp_entry = NULL;
-
-address StubRoutines::x86::_f2i_fixup = NULL;
-address StubRoutines::x86::_f2l_fixup = NULL;
-address StubRoutines::x86::_d2i_fixup = NULL;
-address StubRoutines::x86::_d2l_fixup = NULL;
-address StubRoutines::x86::_float_sign_mask = NULL;
-address StubRoutines::x86::_float_sign_flip = NULL;
-address StubRoutines::x86::_double_sign_mask = NULL;
-address StubRoutines::x86::_double_sign_flip = NULL;
+address StubRoutines::aarch64::_get_previous_fp_entry = NULL;
+address StubRoutines::aarch64::_get_previous_sp_entry = NULL;
+
+address StubRoutines::aarch64::_f2i_fixup = NULL;
+address StubRoutines::aarch64::_f2l_fixup = NULL;
+address StubRoutines::aarch64::_d2i_fixup = NULL;
+address StubRoutines::aarch64::_d2l_fixup = NULL;
+address StubRoutines::aarch64::_float_sign_mask = NULL;
+address StubRoutines::aarch64::_float_sign_flip = NULL;
+address StubRoutines::aarch64::_double_sign_mask = NULL;
+address StubRoutines::aarch64::_double_sign_flip = NULL;
+
+/**
+ * crc_table[] from jdk/src/share/native/java/util/zip/zlib-1.2.5/crc32.h
+ */
+juint StubRoutines::aarch64::_crc_table[]
+ __attribute__ ((aligned(4096))) =
+{
+ // Table 0
+ 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL,
+ 0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL,
+ 0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL,
+ 0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL,
+ 0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL,
+ 0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL,
+ 0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL,
+ 0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL,
+ 0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL,
+ 0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL,
+ 0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL,
+ 0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL,
+ 0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL,
+ 0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL,
+ 0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL,
+ 0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL,
+ 0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL,
+ 0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL,
+ 0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL,
+ 0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL,
+ 0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL,
+ 0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL,
+ 0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL,
+ 0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL,
+ 0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL,
+ 0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL,
+ 0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL,
+ 0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL,
+ 0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL,
+ 0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL,
+ 0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL,
+ 0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL,
+ 0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL,
+ 0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL,
+ 0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL,
+ 0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL,
+ 0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL,
+ 0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL,
+ 0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL,
+ 0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL,
+ 0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL,
+ 0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL,
+ 0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL,
+ 0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL,
+ 0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL,
+ 0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL,
+ 0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL,
+ 0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL,
+ 0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL,
+ 0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL,
+ 0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL,
+ 0x2d02ef8dUL,
+
+ // Table 1
+ 0x00000000UL, 0x191b3141UL, 0x32366282UL, 0x2b2d53c3UL, 0x646cc504UL,
+ 0x7d77f445UL, 0x565aa786UL, 0x4f4196c7UL, 0xc8d98a08UL, 0xd1c2bb49UL,
+ 0xfaefe88aUL, 0xe3f4d9cbUL, 0xacb54f0cUL, 0xb5ae7e4dUL, 0x9e832d8eUL,
+ 0x87981ccfUL, 0x4ac21251UL, 0x53d92310UL, 0x78f470d3UL, 0x61ef4192UL,
+ 0x2eaed755UL, 0x37b5e614UL, 0x1c98b5d7UL, 0x05838496UL, 0x821b9859UL,
+ 0x9b00a918UL, 0xb02dfadbUL, 0xa936cb9aUL, 0xe6775d5dUL, 0xff6c6c1cUL,
+ 0xd4413fdfUL, 0xcd5a0e9eUL, 0x958424a2UL, 0x8c9f15e3UL, 0xa7b24620UL,
+ 0xbea97761UL, 0xf1e8e1a6UL, 0xe8f3d0e7UL, 0xc3de8324UL, 0xdac5b265UL,
+ 0x5d5daeaaUL, 0x44469febUL, 0x6f6bcc28UL, 0x7670fd69UL, 0x39316baeUL,
+ 0x202a5aefUL, 0x0b07092cUL, 0x121c386dUL, 0xdf4636f3UL, 0xc65d07b2UL,
+ 0xed705471UL, 0xf46b6530UL, 0xbb2af3f7UL, 0xa231c2b6UL, 0x891c9175UL,
+ 0x9007a034UL, 0x179fbcfbUL, 0x0e848dbaUL, 0x25a9de79UL, 0x3cb2ef38UL,
+ 0x73f379ffUL, 0x6ae848beUL, 0x41c51b7dUL, 0x58de2a3cUL, 0xf0794f05UL,
+ 0xe9627e44UL, 0xc24f2d87UL, 0xdb541cc6UL, 0x94158a01UL, 0x8d0ebb40UL,
+ 0xa623e883UL, 0xbf38d9c2UL, 0x38a0c50dUL, 0x21bbf44cUL, 0x0a96a78fUL,
+ 0x138d96ceUL, 0x5ccc0009UL, 0x45d73148UL, 0x6efa628bUL, 0x77e153caUL,
+ 0xbabb5d54UL, 0xa3a06c15UL, 0x888d3fd6UL, 0x91960e97UL, 0xded79850UL,
+ 0xc7cca911UL, 0xece1fad2UL, 0xf5facb93UL, 0x7262d75cUL, 0x6b79e61dUL,
+ 0x4054b5deUL, 0x594f849fUL, 0x160e1258UL, 0x0f152319UL, 0x243870daUL,
+ 0x3d23419bUL, 0x65fd6ba7UL, 0x7ce65ae6UL, 0x57cb0925UL, 0x4ed03864UL,
+ 0x0191aea3UL, 0x188a9fe2UL, 0x33a7cc21UL, 0x2abcfd60UL, 0xad24e1afUL,
+ 0xb43fd0eeUL, 0x9f12832dUL, 0x8609b26cUL, 0xc94824abUL, 0xd05315eaUL,
+ 0xfb7e4629UL, 0xe2657768UL, 0x2f3f79f6UL, 0x362448b7UL, 0x1d091b74UL,
+ 0x04122a35UL, 0x4b53bcf2UL, 0x52488db3UL, 0x7965de70UL, 0x607eef31UL,
+ 0xe7e6f3feUL, 0xfefdc2bfUL, 0xd5d0917cUL, 0xcccba03dUL, 0x838a36faUL,
+ 0x9a9107bbUL, 0xb1bc5478UL, 0xa8a76539UL, 0x3b83984bUL, 0x2298a90aUL,
+ 0x09b5fac9UL, 0x10aecb88UL, 0x5fef5d4fUL, 0x46f46c0eUL, 0x6dd93fcdUL,
+ 0x74c20e8cUL, 0xf35a1243UL, 0xea412302UL, 0xc16c70c1UL, 0xd8774180UL,
+ 0x9736d747UL, 0x8e2de606UL, 0xa500b5c5UL, 0xbc1b8484UL, 0x71418a1aUL,
+ 0x685abb5bUL, 0x4377e898UL, 0x5a6cd9d9UL, 0x152d4f1eUL, 0x0c367e5fUL,
+ 0x271b2d9cUL, 0x3e001cddUL, 0xb9980012UL, 0xa0833153UL, 0x8bae6290UL,
+ 0x92b553d1UL, 0xddf4c516UL, 0xc4eff457UL, 0xefc2a794UL, 0xf6d996d5UL,
+ 0xae07bce9UL, 0xb71c8da8UL, 0x9c31de6bUL, 0x852aef2aUL, 0xca6b79edUL,
+ 0xd37048acUL, 0xf85d1b6fUL, 0xe1462a2eUL, 0x66de36e1UL, 0x7fc507a0UL,
+ 0x54e85463UL, 0x4df36522UL, 0x02b2f3e5UL, 0x1ba9c2a4UL, 0x30849167UL,
+ 0x299fa026UL, 0xe4c5aeb8UL, 0xfdde9ff9UL, 0xd6f3cc3aUL, 0xcfe8fd7bUL,
+ 0x80a96bbcUL, 0x99b25afdUL, 0xb29f093eUL, 0xab84387fUL, 0x2c1c24b0UL,
+ 0x350715f1UL, 0x1e2a4632UL, 0x07317773UL, 0x4870e1b4UL, 0x516bd0f5UL,
+ 0x7a468336UL, 0x635db277UL, 0xcbfad74eUL, 0xd2e1e60fUL, 0xf9ccb5ccUL,
+ 0xe0d7848dUL, 0xaf96124aUL, 0xb68d230bUL, 0x9da070c8UL, 0x84bb4189UL,
+ 0x03235d46UL, 0x1a386c07UL, 0x31153fc4UL, 0x280e0e85UL, 0x674f9842UL,
+ 0x7e54a903UL, 0x5579fac0UL, 0x4c62cb81UL, 0x8138c51fUL, 0x9823f45eUL,
+ 0xb30ea79dUL, 0xaa1596dcUL, 0xe554001bUL, 0xfc4f315aUL, 0xd7626299UL,
+ 0xce7953d8UL, 0x49e14f17UL, 0x50fa7e56UL, 0x7bd72d95UL, 0x62cc1cd4UL,
+ 0x2d8d8a13UL, 0x3496bb52UL, 0x1fbbe891UL, 0x06a0d9d0UL, 0x5e7ef3ecUL,
+ 0x4765c2adUL, 0x6c48916eUL, 0x7553a02fUL, 0x3a1236e8UL, 0x230907a9UL,
+ 0x0824546aUL, 0x113f652bUL, 0x96a779e4UL, 0x8fbc48a5UL, 0xa4911b66UL,
+ 0xbd8a2a27UL, 0xf2cbbce0UL, 0xebd08da1UL, 0xc0fdde62UL, 0xd9e6ef23UL,
+ 0x14bce1bdUL, 0x0da7d0fcUL, 0x268a833fUL, 0x3f91b27eUL, 0x70d024b9UL,
+ 0x69cb15f8UL, 0x42e6463bUL, 0x5bfd777aUL, 0xdc656bb5UL, 0xc57e5af4UL,
+ 0xee530937UL, 0xf7483876UL, 0xb809aeb1UL, 0xa1129ff0UL, 0x8a3fcc33UL,
+ 0x9324fd72UL,
+
+ // Table 2
+ 0x00000000UL, 0x01c26a37UL, 0x0384d46eUL, 0x0246be59UL, 0x0709a8dcUL,
+ 0x06cbc2ebUL, 0x048d7cb2UL, 0x054f1685UL, 0x0e1351b8UL, 0x0fd13b8fUL,
+ 0x0d9785d6UL, 0x0c55efe1UL, 0x091af964UL, 0x08d89353UL, 0x0a9e2d0aUL,
+ 0x0b5c473dUL, 0x1c26a370UL, 0x1de4c947UL, 0x1fa2771eUL, 0x1e601d29UL,
+ 0x1b2f0bacUL, 0x1aed619bUL, 0x18abdfc2UL, 0x1969b5f5UL, 0x1235f2c8UL,
+ 0x13f798ffUL, 0x11b126a6UL, 0x10734c91UL, 0x153c5a14UL, 0x14fe3023UL,
+ 0x16b88e7aUL, 0x177ae44dUL, 0x384d46e0UL, 0x398f2cd7UL, 0x3bc9928eUL,
+ 0x3a0bf8b9UL, 0x3f44ee3cUL, 0x3e86840bUL, 0x3cc03a52UL, 0x3d025065UL,
+ 0x365e1758UL, 0x379c7d6fUL, 0x35dac336UL, 0x3418a901UL, 0x3157bf84UL,
+ 0x3095d5b3UL, 0x32d36beaUL, 0x331101ddUL, 0x246be590UL, 0x25a98fa7UL,
+ 0x27ef31feUL, 0x262d5bc9UL, 0x23624d4cUL, 0x22a0277bUL, 0x20e69922UL,
+ 0x2124f315UL, 0x2a78b428UL, 0x2bbade1fUL, 0x29fc6046UL, 0x283e0a71UL,
+ 0x2d711cf4UL, 0x2cb376c3UL, 0x2ef5c89aUL, 0x2f37a2adUL, 0x709a8dc0UL,
+ 0x7158e7f7UL, 0x731e59aeUL, 0x72dc3399UL, 0x7793251cUL, 0x76514f2bUL,
+ 0x7417f172UL, 0x75d59b45UL, 0x7e89dc78UL, 0x7f4bb64fUL, 0x7d0d0816UL,
+ 0x7ccf6221UL, 0x798074a4UL, 0x78421e93UL, 0x7a04a0caUL, 0x7bc6cafdUL,
+ 0x6cbc2eb0UL, 0x6d7e4487UL, 0x6f38fadeUL, 0x6efa90e9UL, 0x6bb5866cUL,
+ 0x6a77ec5bUL, 0x68315202UL, 0x69f33835UL, 0x62af7f08UL, 0x636d153fUL,
+ 0x612bab66UL, 0x60e9c151UL, 0x65a6d7d4UL, 0x6464bde3UL, 0x662203baUL,
+ 0x67e0698dUL, 0x48d7cb20UL, 0x4915a117UL, 0x4b531f4eUL, 0x4a917579UL,
+ 0x4fde63fcUL, 0x4e1c09cbUL, 0x4c5ab792UL, 0x4d98dda5UL, 0x46c49a98UL,
+ 0x4706f0afUL, 0x45404ef6UL, 0x448224c1UL, 0x41cd3244UL, 0x400f5873UL,
+ 0x4249e62aUL, 0x438b8c1dUL, 0x54f16850UL, 0x55330267UL, 0x5775bc3eUL,
+ 0x56b7d609UL, 0x53f8c08cUL, 0x523aaabbUL, 0x507c14e2UL, 0x51be7ed5UL,
+ 0x5ae239e8UL, 0x5b2053dfUL, 0x5966ed86UL, 0x58a487b1UL, 0x5deb9134UL,
+ 0x5c29fb03UL, 0x5e6f455aUL, 0x5fad2f6dUL, 0xe1351b80UL, 0xe0f771b7UL,
+ 0xe2b1cfeeUL, 0xe373a5d9UL, 0xe63cb35cUL, 0xe7fed96bUL, 0xe5b86732UL,
+ 0xe47a0d05UL, 0xef264a38UL, 0xeee4200fUL, 0xeca29e56UL, 0xed60f461UL,
+ 0xe82fe2e4UL, 0xe9ed88d3UL, 0xebab368aUL, 0xea695cbdUL, 0xfd13b8f0UL,
+ 0xfcd1d2c7UL, 0xfe976c9eUL, 0xff5506a9UL, 0xfa1a102cUL, 0xfbd87a1bUL,
+ 0xf99ec442UL, 0xf85cae75UL, 0xf300e948UL, 0xf2c2837fUL, 0xf0843d26UL,
+ 0xf1465711UL, 0xf4094194UL, 0xf5cb2ba3UL, 0xf78d95faUL, 0xf64fffcdUL,
+ 0xd9785d60UL, 0xd8ba3757UL, 0xdafc890eUL, 0xdb3ee339UL, 0xde71f5bcUL,
+ 0xdfb39f8bUL, 0xddf521d2UL, 0xdc374be5UL, 0xd76b0cd8UL, 0xd6a966efUL,
+ 0xd4efd8b6UL, 0xd52db281UL, 0xd062a404UL, 0xd1a0ce33UL, 0xd3e6706aUL,
+ 0xd2241a5dUL, 0xc55efe10UL, 0xc49c9427UL, 0xc6da2a7eUL, 0xc7184049UL,
+ 0xc25756ccUL, 0xc3953cfbUL, 0xc1d382a2UL, 0xc011e895UL, 0xcb4dafa8UL,
+ 0xca8fc59fUL, 0xc8c97bc6UL, 0xc90b11f1UL, 0xcc440774UL, 0xcd866d43UL,
+ 0xcfc0d31aUL, 0xce02b92dUL, 0x91af9640UL, 0x906dfc77UL, 0x922b422eUL,
+ 0x93e92819UL, 0x96a63e9cUL, 0x976454abUL, 0x9522eaf2UL, 0x94e080c5UL,
+ 0x9fbcc7f8UL, 0x9e7eadcfUL, 0x9c381396UL, 0x9dfa79a1UL, 0x98b56f24UL,
+ 0x99770513UL, 0x9b31bb4aUL, 0x9af3d17dUL, 0x8d893530UL, 0x8c4b5f07UL,
+ 0x8e0de15eUL, 0x8fcf8b69UL, 0x8a809decUL, 0x8b42f7dbUL, 0x89044982UL,
+ 0x88c623b5UL, 0x839a6488UL, 0x82580ebfUL, 0x801eb0e6UL, 0x81dcdad1UL,
+ 0x8493cc54UL, 0x8551a663UL, 0x8717183aUL, 0x86d5720dUL, 0xa9e2d0a0UL,
+ 0xa820ba97UL, 0xaa6604ceUL, 0xaba46ef9UL, 0xaeeb787cUL, 0xaf29124bUL,
+ 0xad6fac12UL, 0xacadc625UL, 0xa7f18118UL, 0xa633eb2fUL, 0xa4755576UL,
+ 0xa5b73f41UL, 0xa0f829c4UL, 0xa13a43f3UL, 0xa37cfdaaUL, 0xa2be979dUL,
+ 0xb5c473d0UL, 0xb40619e7UL, 0xb640a7beUL, 0xb782cd89UL, 0xb2cddb0cUL,
+ 0xb30fb13bUL, 0xb1490f62UL, 0xb08b6555UL, 0xbbd72268UL, 0xba15485fUL,
+ 0xb853f606UL, 0xb9919c31UL, 0xbcde8ab4UL, 0xbd1ce083UL, 0xbf5a5edaUL,
+ 0xbe9834edUL,
+
+ // Table 3
+ 0x00000000UL, 0xb8bc6765UL, 0xaa09c88bUL, 0x12b5afeeUL, 0x8f629757UL,
+ 0x37def032UL, 0x256b5fdcUL, 0x9dd738b9UL, 0xc5b428efUL, 0x7d084f8aUL,
+ 0x6fbde064UL, 0xd7018701UL, 0x4ad6bfb8UL, 0xf26ad8ddUL, 0xe0df7733UL,
+ 0x58631056UL, 0x5019579fUL, 0xe8a530faUL, 0xfa109f14UL, 0x42acf871UL,
+ 0xdf7bc0c8UL, 0x67c7a7adUL, 0x75720843UL, 0xcdce6f26UL, 0x95ad7f70UL,
+ 0x2d111815UL, 0x3fa4b7fbUL, 0x8718d09eUL, 0x1acfe827UL, 0xa2738f42UL,
+ 0xb0c620acUL, 0x087a47c9UL, 0xa032af3eUL, 0x188ec85bUL, 0x0a3b67b5UL,
+ 0xb28700d0UL, 0x2f503869UL, 0x97ec5f0cUL, 0x8559f0e2UL, 0x3de59787UL,
+ 0x658687d1UL, 0xdd3ae0b4UL, 0xcf8f4f5aUL, 0x7733283fUL, 0xeae41086UL,
+ 0x525877e3UL, 0x40edd80dUL, 0xf851bf68UL, 0xf02bf8a1UL, 0x48979fc4UL,
+ 0x5a22302aUL, 0xe29e574fUL, 0x7f496ff6UL, 0xc7f50893UL, 0xd540a77dUL,
+ 0x6dfcc018UL, 0x359fd04eUL, 0x8d23b72bUL, 0x9f9618c5UL, 0x272a7fa0UL,
+ 0xbafd4719UL, 0x0241207cUL, 0x10f48f92UL, 0xa848e8f7UL, 0x9b14583dUL,
+ 0x23a83f58UL, 0x311d90b6UL, 0x89a1f7d3UL, 0x1476cf6aUL, 0xaccaa80fUL,
+ 0xbe7f07e1UL, 0x06c36084UL, 0x5ea070d2UL, 0xe61c17b7UL, 0xf4a9b859UL,
+ 0x4c15df3cUL, 0xd1c2e785UL, 0x697e80e0UL, 0x7bcb2f0eUL, 0xc377486bUL,
+ 0xcb0d0fa2UL, 0x73b168c7UL, 0x6104c729UL, 0xd9b8a04cUL, 0x446f98f5UL,
+ 0xfcd3ff90UL, 0xee66507eUL, 0x56da371bUL, 0x0eb9274dUL, 0xb6054028UL,
+ 0xa4b0efc6UL, 0x1c0c88a3UL, 0x81dbb01aUL, 0x3967d77fUL, 0x2bd27891UL,
+ 0x936e1ff4UL, 0x3b26f703UL, 0x839a9066UL, 0x912f3f88UL, 0x299358edUL,
+ 0xb4446054UL, 0x0cf80731UL, 0x1e4da8dfUL, 0xa6f1cfbaUL, 0xfe92dfecUL,
+ 0x462eb889UL, 0x549b1767UL, 0xec277002UL, 0x71f048bbUL, 0xc94c2fdeUL,
+ 0xdbf98030UL, 0x6345e755UL, 0x6b3fa09cUL, 0xd383c7f9UL, 0xc1366817UL,
+ 0x798a0f72UL, 0xe45d37cbUL, 0x5ce150aeUL, 0x4e54ff40UL, 0xf6e89825UL,
+ 0xae8b8873UL, 0x1637ef16UL, 0x048240f8UL, 0xbc3e279dUL, 0x21e91f24UL,
+ 0x99557841UL, 0x8be0d7afUL, 0x335cb0caUL, 0xed59b63bUL, 0x55e5d15eUL,
+ 0x47507eb0UL, 0xffec19d5UL, 0x623b216cUL, 0xda874609UL, 0xc832e9e7UL,
+ 0x708e8e82UL, 0x28ed9ed4UL, 0x9051f9b1UL, 0x82e4565fUL, 0x3a58313aUL,
+ 0xa78f0983UL, 0x1f336ee6UL, 0x0d86c108UL, 0xb53aa66dUL, 0xbd40e1a4UL,
+ 0x05fc86c1UL, 0x1749292fUL, 0xaff54e4aUL, 0x322276f3UL, 0x8a9e1196UL,
+ 0x982bbe78UL, 0x2097d91dUL, 0x78f4c94bUL, 0xc048ae2eUL, 0xd2fd01c0UL,
+ 0x6a4166a5UL, 0xf7965e1cUL, 0x4f2a3979UL, 0x5d9f9697UL, 0xe523f1f2UL,
+ 0x4d6b1905UL, 0xf5d77e60UL, 0xe762d18eUL, 0x5fdeb6ebUL, 0xc2098e52UL,
+ 0x7ab5e937UL, 0x680046d9UL, 0xd0bc21bcUL, 0x88df31eaUL, 0x3063568fUL,
+ 0x22d6f961UL, 0x9a6a9e04UL, 0x07bda6bdUL, 0xbf01c1d8UL, 0xadb46e36UL,
+ 0x15080953UL, 0x1d724e9aUL, 0xa5ce29ffUL, 0xb77b8611UL, 0x0fc7e174UL,
+ 0x9210d9cdUL, 0x2aacbea8UL, 0x38191146UL, 0x80a57623UL, 0xd8c66675UL,
+ 0x607a0110UL, 0x72cfaefeUL, 0xca73c99bUL, 0x57a4f122UL, 0xef189647UL,
+ 0xfdad39a9UL, 0x45115eccUL, 0x764dee06UL, 0xcef18963UL, 0xdc44268dUL,
+ 0x64f841e8UL, 0xf92f7951UL, 0x41931e34UL, 0x5326b1daUL, 0xeb9ad6bfUL,
+ 0xb3f9c6e9UL, 0x0b45a18cUL, 0x19f00e62UL, 0xa14c6907UL, 0x3c9b51beUL,
+ 0x842736dbUL, 0x96929935UL, 0x2e2efe50UL, 0x2654b999UL, 0x9ee8defcUL,
+ 0x8c5d7112UL, 0x34e11677UL, 0xa9362eceUL, 0x118a49abUL, 0x033fe645UL,
+ 0xbb838120UL, 0xe3e09176UL, 0x5b5cf613UL, 0x49e959fdUL, 0xf1553e98UL,
+ 0x6c820621UL, 0xd43e6144UL, 0xc68bceaaUL, 0x7e37a9cfUL, 0xd67f4138UL,
+ 0x6ec3265dUL, 0x7c7689b3UL, 0xc4caeed6UL, 0x591dd66fUL, 0xe1a1b10aUL,
+ 0xf3141ee4UL, 0x4ba87981UL, 0x13cb69d7UL, 0xab770eb2UL, 0xb9c2a15cUL,
+ 0x017ec639UL, 0x9ca9fe80UL, 0x241599e5UL, 0x36a0360bUL, 0x8e1c516eUL,
+ 0x866616a7UL, 0x3eda71c2UL, 0x2c6fde2cUL, 0x94d3b949UL, 0x090481f0UL,
+ 0xb1b8e695UL, 0xa30d497bUL, 0x1bb12e1eUL, 0x43d23e48UL, 0xfb6e592dUL,
+ 0xe9dbf6c3UL, 0x516791a6UL, 0xccb0a91fUL, 0x740cce7aUL, 0x66b96194UL,
+ 0xde0506f1UL
+};
diff --git a/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp b/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp
index 9c12c6432..f27d61766 100644
--- a/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp
+++ b/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp
@@ -24,8 +24,8 @@
*
*/
-#ifndef CPU_AARCH64_VM_STUBROUTINES_AARCH64_64_HPP
-#define CPU_AARCH64_VM_STUBROUTINES_AARCH64_64_HPP
+#ifndef CPU_AARCH64_VM_STUBROUTINES_AARCH64_HPP
+#define CPU_AARCH64_VM_STUBROUTINES_AARCH64_HPP
// This file holds the platform specific parts of the StubRoutines
// definition. See stubRoutines.hpp for a description on how to
@@ -45,7 +45,7 @@ enum platform_dependent_constants {
code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
};
-class x86 {
+class aarch64 {
friend class StubGenerator;
private:
@@ -113,6 +113,10 @@ class x86 {
{
return _double_sign_flip;
}
+
+ private:
+ static juint _crc_table[];
+
};
-#endif // CPU_AARCH64_VM_STUBROUTINES_AARCH64_64_HPP
+#endif // CPU_AARCH64_VM_STUBROUTINES_AARCH64_HPP
diff --git a/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp b/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp
index ea071b699..fb7cd7327 100644
--- a/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp
+++ b/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp
@@ -673,6 +673,123 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
return NULL;
}
+/**
+ * Method entry for static native methods:
+ * int java.util.zip.CRC32.update(int crc, int b)
+ */
+address InterpreterGenerator::generate_CRC32_update_entry() {
+ if (UseCRC32Intrinsics) {
+ address entry = __ pc();
+
+ // rmethod: Method*
+ // r13: senderSP must preserved for slow path
+ // esp: args
+
+ Label slow_path;
+ // If we need a safepoint check, generate full interpreter entry.
+ ExternalAddress state(SafepointSynchronize::address_of_state());
+ unsigned long offset;
+ __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
+ __ ldrw(rscratch1, Address(rscratch1, offset));
+ assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
+ __ cbnz(rscratch1, slow_path);
+
+ // We don't generate local frame and don't align stack because
+ // we call stub code and there is no safepoint on this path.
+
+ // Load parameters
+ const Register crc = c_rarg0; // crc
+ const Register val = c_rarg1; // source java byte value
+ const Register tbl = c_rarg2; // scratch
+
+ // Arguments are reversed on java expression stack
+ __ ldrw(val, Address(esp, 0)); // byte value
+ __ ldrw(crc, Address(esp, wordSize)); // Initial CRC
+
+ __ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset);
+ __ add(tbl, tbl, offset);
+
+ __ ornw(crc, zr, crc); // ~crc
+ __ update_byte_crc32(crc, val, tbl);
+ __ ornw(crc, zr, crc); // ~crc
+
+ // result in c_rarg0
+
+ __ andr(sp, r13, -16);
+ __ ret(lr);
+
+ // generate a vanilla native entry as the slow path
+ __ bind(slow_path);
+
+ (void) generate_native_entry(false);
+
+ return entry;
+ }
+ return generate_native_entry(false);
+}
+
+/**
+ * Method entry for static native methods:
+ * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
+ * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
+ */
+address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+ if (UseCRC32Intrinsics) {
+ address entry = __ pc();
+
+ // rmethod,: Method*
+ // r13: senderSP must preserved for slow path
+
+ Label slow_path;
+ // If we need a safepoint check, generate full interpreter entry.
+ ExternalAddress state(SafepointSynchronize::address_of_state());
+ unsigned long offset;
+ __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
+ __ ldrw(rscratch1, Address(rscratch1, offset));
+ assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
+ __ cbnz(rscratch1, slow_path);
+
+ // We don't generate local frame and don't align stack because
+ // we call stub code and there is no safepoint on this path.
+
+ // Load parameters
+ const Register crc = c_rarg0; // crc
+ const Register buf = c_rarg1; // source java byte array address
+ const Register len = c_rarg2; // length
+ const Register off = len; // offset (never overlaps with 'len')
+
+ // Arguments are reversed on java expression stack
+ // Calculate address of start element
+ if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
+ __ ldr(buf, Address(esp, 2*wordSize)); // long buf
+ __ ldrw(off, Address(esp, wordSize)); // offset
+ __ add(buf, buf, off); // + offset
+ __ ldrw(crc, Address(esp, 4*wordSize)); // Initial CRC
+ } else {
+ __ ldr(buf, Address(esp, 2*wordSize)); // byte[] array
+ __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
+ __ ldrw(off, Address(esp, wordSize)); // offset
+ __ add(buf, buf, off); // + offset
+ __ ldrw(crc, Address(esp, 3*wordSize)); // Initial CRC
+ }
+ // Can now load 'len' since we're finished with 'off'
+ __ ldrw(len, Address(esp, 0x0)); // Length
+
+ __ andr(sp, r13, -16); // Restore the caller's SP
+
+ // We are frameless so we can just jump to the stub.
+ __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()));
+
+ // generate a vanilla native entry as the slow path
+ __ bind(slow_path);
+
+ (void) generate_native_entry(false);
+
+ return entry;
+ }
+ return generate_native_entry(false);
+}
+
void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// Bang each page in the shadow zone. We can't assume it's been done for
// an interpreter frame with greater than a page of locals, so each page
@@ -1097,7 +1214,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// not properly paired (was bug - gri 11/22/99).
__ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
- // restore potential result in edx:eax, call result handler to
+ // restore potential result in r0:d0, call result handler to
// restore potential result in ST0 & handle result
__ pop(ltos);
@@ -1373,6 +1490,12 @@ address AbstractInterpreterGenerator::generate_method_entry(
case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
+ case Interpreter::java_util_zip_CRC32_update
+ : entry_point = ((InterpreterGenerator*)this)->generate_CRC32_update_entry(); break;
+ case Interpreter::java_util_zip_CRC32_updateBytes
+ : // fall thru
+ case Interpreter::java_util_zip_CRC32_updateByteBuffer
+ : entry_point = ((InterpreterGenerator*)this)->generate_CRC32_updateBytes_entry(kind); break;
default : ShouldNotReachHere(); break;
}
diff --git a/src/cpu/aarch64/vm/templateTable_aarch64.cpp b/src/cpu/aarch64/vm/templateTable_aarch64.cpp
index 10c097aa8..9984425dd 100644
--- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp
+++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp
@@ -2724,7 +2724,7 @@ void TemplateTable::fast_storefield(TosState state)
// access constant pool cache
__ get_cache_and_index_at_bcp(r2, r1, 1);
- // test for volatile with rdx
+ // test for volatile with r3
__ ldrw(r3, Address(r2, in_bytes(base +
ConstantPoolCacheEntry::flags_offset())));
@@ -3188,7 +3188,7 @@ void TemplateTable::invokedynamic(int byte_no) {
// r0: CallSite object (from cpool->resolved_references[])
// rmethod: MH.linkToCallSite method (from f2)
- // Note: rax_callsite is already pushed by prepare_invoke
+ // Note: r0_callsite is already pushed by prepare_invoke
// %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call
@@ -3657,7 +3657,6 @@ void TemplateTable::monitorexit()
__ should_not_reach_here();
// call run-time routine
- // rsi: points to monitor entry
__ bind(found);
__ push_ptr(r0); // make sure object is on stack (contract with oopMaps)
__ unlock_object(c_rarg1);
diff --git a/src/cpu/aarch64/vm/vm_version_aarch64.cpp b/src/cpu/aarch64/vm/vm_version_aarch64.cpp
index bb1ac22d1..983b85585 100644
--- a/src/cpu/aarch64/vm/vm_version_aarch64.cpp
+++ b/src/cpu/aarch64/vm/vm_version_aarch64.cpp
@@ -91,6 +91,10 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 256);
FLAG_SET_DEFAULT(PrefetchFieldsAhead, 256);
FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 256);
+
+ if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
+ UseCRC32Intrinsics = true;
+ }
}
void VM_Version::initialize() {