diff options
author | "Andrew Dinn ext:(%22) <adinn@redhat.com> | 2013-05-23 10:49:51 +0100 |
---|---|---|
committer | "Andrew Dinn ext:(%22) <adinn@redhat.com> | 2013-05-23 10:49:51 +0100 |
commit | 5e16252ba4eb05b3af63cd8fe95211b2e90977ec (patch) | |
tree | e10bd96d58e33afe0eb641a850fbb557cb604b09 /src | |
parent | e52c27625b07bf4df11ff484682aaafaa9d68417 (diff) |
restored C1 changes lost in the merge
Diffstat (limited to 'src')
-rw-r--r-- | src/cpu/aarch64/vm/assembler_aarch64.cpp | 2 | ||||
-rw-r--r-- | src/cpu/aarch64/vm/assembler_aarch64.hpp | 22 | ||||
-rw-r--r-- | src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp | 6 | ||||
-rw-r--r-- | src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp | 43 | ||||
-rw-r--r-- | src/cpu/aarch64/vm/frame_aarch64.cpp | 78 | ||||
-rw-r--r-- | src/cpu/aarch64/vm/frame_aarch64.hpp | 2 | ||||
-rw-r--r-- | src/cpu/aarch64/vm/macroAssembler_aarch64.cpp | 27 | ||||
-rw-r--r-- | src/cpu/aarch64/vm/register_aarch64.hpp | 4 | ||||
-rw-r--r-- | src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp | 35 | ||||
-rw-r--r-- | src/cpu/aarch64/vm/templateTable_aarch64.cpp | 6 | ||||
-rw-r--r-- | src/share/vm/runtime/arguments.cpp | 5 |
11 files changed, 134 insertions, 96 deletions
diff --git a/src/cpu/aarch64/vm/assembler_aarch64.cpp b/src/cpu/aarch64/vm/assembler_aarch64.cpp index 6be7392e2..aa46c85eb 100644 --- a/src/cpu/aarch64/vm/assembler_aarch64.cpp +++ b/src/cpu/aarch64/vm/assembler_aarch64.cpp @@ -32,7 +32,7 @@ #include "interpreter/interpreter.hpp" #ifndef PRODUCT -const unsigned long Assembler::asm_bp = 0x00007fffee097c64; +const unsigned long Assembler::asm_bp = 0x00007fffee09ac88; #endif #include "compiler/disassembler.hpp" diff --git a/src/cpu/aarch64/vm/assembler_aarch64.hpp b/src/cpu/aarch64/vm/assembler_aarch64.hpp index 992382ea2..b72fc2962 100644 --- a/src/cpu/aarch64/vm/assembler_aarch64.hpp +++ b/src/cpu/aarch64/vm/assembler_aarch64.hpp @@ -1548,17 +1548,31 @@ public: data_processing(op31, type, opcode, Vd, Vn); \ } - INSN(fmovs, 0b000, 0b00, 0b000000); +private: + INSN(i_fmovs, 0b000, 0b00, 0b000000); +public: INSN(fabss, 0b000, 0b00, 0b000001); INSN(fnegs, 0b000, 0b00, 0b000010); INSN(fsqrts, 0b000, 0b00, 0b000011); - INSN(fcvts, 0b000, 0b00, 0b000101); + INSN(fcvts, 0b000, 0b00, 0b000101); // Single-precision to double-precision - INSN(fmovd, 0b000, 0b01, 0b000000); +private: + INSN(i_fmovd, 0b000, 0b01, 0b000000); +public: INSN(fabsd, 0b000, 0b01, 0b000001); INSN(fnegd, 0b000, 0b01, 0b000010); INSN(fsqrtd, 0b000, 0b01, 0b000011); - INSN(fcvtd, 0b000, 0b01, 0b000100); + INSN(fcvtd, 0b000, 0b01, 0b000100); // Double-precision to single-precision + + void fmovd(FloatRegister Vd, FloatRegister Vn) { + assert(Vd != Vn, "should be"); + i_fmovd(Vd, Vn); + } + + void fmovs(FloatRegister Vd, FloatRegister Vn) { + assert(Vd != Vn, "should be"); + i_fmovs(Vd, Vn); + } #undef INSN diff --git a/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp b/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp index b5d5fdf67..57a590863 100644 --- a/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp +++ b/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp @@ -55,11 +55,13 @@ void ConversionStub::emit_code(LIR_Assembler* ce) { Address(__ pre(sp, -2 * wordSize))); if (bytecode() == Bytecodes::_f2i) { - __ fmovs(v0, input()->as_float_reg()); + if (v0 != input()->as_float_reg()) + __ fmovs(v0, input()->as_float_reg()); __ call_VM_leaf_base1(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 0, 1, MacroAssembler::ret_type_integral); } else { - __ fmovd(v0, input()->as_double_reg()); + if (v0 != input()->as_double_reg()) + __ fmovd(v0, input()->as_double_reg()); __ call_VM_leaf_base1(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 0, 1, MacroAssembler::ret_type_integral); } diff --git a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp index 6d25dac1d..ec2add8f2 100644 --- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp +++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp @@ -578,16 +578,31 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { } break; case T_INT: + case T_FLOAT: { Register reg = zr; - if (c->as_jint() == 0) + if (c->as_jint_bits() == 0) __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix())); else { - __ movw(rscratch1, c->as_jint()); + __ movw(rscratch1, c->as_jint_bits()); __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix())); } } break; + case T_LONG: + case T_DOUBLE: + { + Register reg = zr; + if (c->as_jlong_bits() == 0) + __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(), + lo_word_offset_in_bytes)); + else { + __ mov(rscratch1, (intptr_t)c->as_jlong_bits()); + __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(), + lo_word_offset_in_bytes)); + } + } + break; default: ShouldNotReachHere(); } @@ -676,7 +691,7 @@ void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { __ fmovs(dest->as_float_reg(), src->as_float_reg()); } else if (dest->is_double_fpu()) { - __ fmovd(src->as_double_reg(), src->as_double_reg()); + __ fmovd(dest->as_double_reg(), src->as_double_reg()); } else { ShouldNotReachHere(); @@ -688,7 +703,7 @@ void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool po if (type == T_ARRAY || type == T_OBJECT) { __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); __ verify_oop(src->as_register()); - } else if (type == T_METADATA) { + } else if (type == T_METADATA || type == T_DOUBLE) { __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); } else { __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix())); @@ -741,6 +756,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch case T_DOUBLE: { __ strd(src->as_double_reg(), as_Address(to_addr)); + break; } case T_ARRAY: // fall through @@ -748,7 +764,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch if (UseCompressedOops && !wide) { __ strw(compressed_src, as_Address(to_addr, rscratch2)); } else { - __ str(compressed_src, as_Address(to_addr, noreg)); + __ str(compressed_src, as_Address(to_addr)); } break; case T_METADATA: @@ -803,7 +819,7 @@ void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { if (type == T_ARRAY || type == T_OBJECT) { __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); __ verify_oop(dest->as_register()); - } else if (type == T_METADATA) { + } else if (type == T_METADATA || type == T_DOUBLE) { __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); } else { __ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); @@ -835,8 +851,14 @@ void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { } void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { - stack2reg(src, FrameMap::rscratch1_opr, src->type()); - reg2stack(FrameMap::rscratch1_opr, dest, dest->type(), false); + LIR_Opr temp; + if (type == T_LONG) + temp = FrameMap::rscratch1_long_opr; + else + temp = FrameMap::rscratch1_opr; + + stack2reg(src, temp, src->type()); + reg2stack(temp, dest, dest->type(), false); } @@ -1046,6 +1068,11 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { __ fcvts(dest->as_double_reg(), src->as_float_reg()); break; } + case Bytecodes::_d2f: + { + __ fcvtd(dest->as_float_reg(), src->as_double_reg()); + break; + } case Bytecodes::_i2c: { __ ubfx(dest->as_register(), src->as_register(), 0, 16); diff --git a/src/cpu/aarch64/vm/frame_aarch64.cpp b/src/cpu/aarch64/vm/frame_aarch64.cpp index 92f940c34..4181bef59 100644 --- a/src/cpu/aarch64/vm/frame_aarch64.cpp +++ b/src/cpu/aarch64/vm/frame_aarch64.cpp @@ -440,34 +440,18 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const { // in C2 code but it will have been pushed onto the stack. so we // have to find it relative to the unextended sp - assert(_cb->frame_size() > 0, "must have non-zero frame size"); - intptr_t* sender_sp = unextended_sp() + _cb->frame_size(); - intptr_t* unextended_sp = sender_sp; + assert(_cb->frame_size() >= 0, "must have non-zero frame size"); + intptr_t* l_sender_sp = unextended_sp() + _cb->frame_size(); + intptr_t* unextended_sp = l_sender_sp; // the return_address is always the word on the stack - address sender_pc = (address) *(sender_sp-1); + address sender_pc = (address) *(l_sender_sp-1); - intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset); + intptr_t** saved_fp_addr = (intptr_t**) (l_sender_sp - frame::sender_sp_offset); - if (map->update_map()) { - // Tell GC to use argument oopmaps for some runtime stubs that need it. - // For C1, the runtime stub might not have oop maps, so set this flag - // outside of update_register_map. - map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread())); - if (_cb->oop_maps() != NULL) { - OopMapSet::update_register_map(this, map); - } + assert (sender_sp() == l_sender_sp, "should be"); + assert (*saved_fp_addr == link(), "should be"); - // Since the prolog does the save and restore of EBP there is no oopmap - // for it so we must fill in its location as if there was an oopmap entry - // since if our caller was compiled code there could be live jvm state in it. - update_map_with_saved_link(map, saved_fp_addr); - } - - return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc); -} - -frame frame::sender_for_stub_frame(RegisterMap* map) const { if (map->update_map()) { // Tell GC to use argument oopmaps for some runtime stubs that need it. // For C1, the runtime stub might not have oop maps, so set this flag @@ -480,12 +464,10 @@ frame frame::sender_for_stub_frame(RegisterMap* map) const { // Since the prolog does the save and restore of EBP there is no oopmap // for it so we must fill in its location as if there was an oopmap entry // since if our caller was compiled code there could be live jvm state in it. - - intptr_t** saved_fp_addr = (intptr_t**)fp(); update_map_with_saved_link(map, saved_fp_addr); } - return frame(sender_sp(), link(), sender_pc()); + return frame(l_sender_sp, unextended_sp, *saved_fp_addr, sender_pc); } //------------------------------------------------------------------------------ @@ -504,10 +486,7 @@ frame frame::zsender(RegisterMap* map) const { // This test looks odd: why is it not is_compiled_frame() ? That's // because stubs also have OOP maps. if (_cb != NULL) { - //if (_cb->is_nmethod()) - return sender_for_compiled_frame(map); - //else - //return sender_for_stub_frame(map); + return sender_for_compiled_frame(map); } // Must be native-compiled frame, i.e. the marshaling code for native @@ -712,7 +691,23 @@ intptr_t* frame::real_fp() const { static __thread long nextfp; static __thread long nextpc; -extern "C" void pf(unsigned long fp, unsigned long pc) { +static void printbc(Method *m, intptr_t bcx) { + const char *name; + char buf[16]; + if (m->validate_bci_from_bcx(bcx) < 0 + || !m->contains((address)bcx)) { + name = "???"; + snprintf(buf, sizeof buf, "(bad)"); + } else { + int bci = m->bci_from((address)bcx); + snprintf(buf, sizeof buf, "%d", bci); + name = Bytecodes::name(m->code_at(bci)); + } + ResourceMark rm; + printf("%s : %s ==> %s\n", m->name_and_sig_as_C_string(), buf, name); +} + +extern "C" void pf(unsigned long fp, unsigned long pc, unsigned long bcx) { if (! fp) return; @@ -732,11 +727,13 @@ extern "C" void pf(unsigned long fp, unsigned long pc) { nextfp = p[frame::link_offset]; nextpc = p[frame::return_addr_offset]; + if (bcx == -1ul) + bcx = p[frame::interpreter_frame_bcx_offset]; + if (Interpreter::contains((address)pc)) { Method* m = (Method*)p[frame::interpreter_frame_method_offset]; if(m && m->is_method()) { - ResourceMark rm; - printf("%s\n", m->name_and_sig_as_C_string()); + printbc(m, bcx); } else printf("not a Method\n"); } else { @@ -750,7 +747,7 @@ extern "C" void pf(unsigned long fp, unsigned long pc) { } extern "C" void npf() { - pf (nextfp, nextpc); + pf (nextfp, nextpc, -1); } // support for printing out where we are in a Java method @@ -760,16 +757,5 @@ extern "C" void pm(unsigned long fp, unsigned long bcx) { DESCRIBE_FP_OFFSET(interpreter_frame_method); unsigned long *p = (unsigned long *)fp; Method* m = (Method*)p[frame::interpreter_frame_method_offset]; - int bci = 0; - const char *name; - if (m->validate_bci_from_bcx(bcx) < 0 - || !m->contains((address)bcx)) { - bci = 0; - name = "???"; - } else { - bci = m->bci_from((address)bcx); - name = Bytecodes::name(m->code_at(bci)); - } - ResourceMark rm; - printf("%s : %d ==> %s\n", m->name_and_sig_as_C_string(), bci, name); + printbc(m, bcx); } diff --git a/src/cpu/aarch64/vm/frame_aarch64.hpp b/src/cpu/aarch64/vm/frame_aarch64.hpp index a5af6a557..78ef0abaa 100644 --- a/src/cpu/aarch64/vm/frame_aarch64.hpp +++ b/src/cpu/aarch64/vm/frame_aarch64.hpp @@ -203,8 +203,6 @@ frame zsender(RegisterMap* map) const; - frame sender_for_stub_frame(RegisterMap* map) const; - #ifndef CC_INTERP // deoptimization support void interpreter_frame_set_last_sp(intptr_t* sp); diff --git a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp index 27f222523..e1f75fcc6 100644 --- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp +++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp @@ -793,13 +793,14 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) // Get super_klass value into r0 (even if it was in r5 or r2). - bool pushed_r0 = false, pushed_r2 = false, pushed_r5 = false; + bool pushed_r0 = false, pushed_r2 = IS_A_TEMP(r2), pushed_r5 = IS_A_TEMP(r5); + if (super_klass != r0 || UseCompressedOops) { - if (!IS_A_TEMP(r0)) { push(r0); pushed_r0 = true; } - mov(r0, super_klass); + if (!IS_A_TEMP(r0)) + pushed_r0 = true; } - if (!IS_A_TEMP(r2)) { push(r2); pushed_r2 = true; } - if (!IS_A_TEMP(r5)) { push(r5); pushed_r5 = true; } + + push(r0->bit(pushed_r0) | r2->bit(pushed_r2) | r2->bit(pushed_r5), sp); #ifndef PRODUCT mov(rscratch2, (address)&SharedRuntime::_partial_subtype_ctr); @@ -816,25 +817,15 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, // Skip to start of data. add(r5, r5, Array<Klass*>::base_offset_in_bytes()); + cmp(sp, zr); // Clear Z flag; SP is never zero // Scan R2 words at [R5] for an occurrence of R0. // Set NZ/Z based on last compare. - // Z flag value will not be set by 'repne' if R2 == 0 since 'repne' does - // not change flags (only scas instruction which is repeated sets flags). - repne_scan(r5, r0, r2, rscratch1); // Unspill the temp. registers: - if (pushed_r5) pop(r5); - if (pushed_r2) pop(r2); - if (pushed_r0) pop(r0); - - if (set_cond_codes) { - // Special hack for the AD files: r5 is guaranteed non-zero. - assert(!pushed_r5, "r5 must be left non-NULL"); - // Also, the condition codes are properly set Z/NZ on succeed/failure. - } + pop(r0->bit(pushed_r0) | r2->bit(pushed_r2) | r2->bit(pushed_r5), sp); - cbz(r2, *L_failure); + br(Assembler::NE, *L_failure); // Success. Cache the super we found and proceed in triumph. str(super_klass, super_cache_addr); diff --git a/src/cpu/aarch64/vm/register_aarch64.hpp b/src/cpu/aarch64/vm/register_aarch64.hpp index d0126f9e2..2493cca12 100644 --- a/src/cpu/aarch64/vm/register_aarch64.hpp +++ b/src/cpu/aarch64/vm/register_aarch64.hpp @@ -59,8 +59,8 @@ class RegisterImpl: public AbstractRegisterImpl { bool is_valid() const { return 0 <= (intptr_t)this && (intptr_t)this < number_of_registers; } bool has_byte_register() const { return 0 <= (intptr_t)this && (intptr_t)this < number_of_byte_registers; } const char* name() const; - int encoding_nocheck() const { return (intptr_t)this; } - + int encoding_nocheck() const { return (intptr_t)this; } + unsigned long bit(bool yes) const { return yes << encoding(); } }; // The integer registers of the aarch64 architecture diff --git a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp index 62dfccfa6..fdb887251 100644 --- a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp +++ b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp @@ -960,7 +960,14 @@ static void object_move(MacroAssembler* masm, } // A float arg may have to do float reg int reg conversion -static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { Unimplemented(); } +static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { + if (src.first() != dst.first()) { + if (src.is_single_phys_reg() && dst.is_single_phys_reg()) + __ fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); + else + ShouldNotReachHere(); + } +} // A long move static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { @@ -987,11 +994,13 @@ static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { // A double move -static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { - if (src.is_single_phys_reg() && dst.is_single_phys_reg()) - __ fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); - else - ShouldNotReachHere(); +static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { + if (src.first() != dst.first()) { + if (src.is_single_phys_reg() && dst.is_single_phys_reg()) + __ fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); + else + ShouldNotReachHere(); + } } @@ -1143,8 +1152,12 @@ static void rt_call(MacroAssembler* masm, address dest, int gpargs, int fpargs, if (cb) { __ bl(RuntimeAddress(dest)); } else { + assert((unsigned)gpargs < 256, "eek!"); + assert((unsigned)fpargs < 32, "eek!"); __ mov(rscratch1, RuntimeAddress(dest)); - __ brx86(rscratch1, gpargs, fpargs, type); + __ mov(rscratch2, (gpargs << 6) | (fpargs << 2) | type); + __ brx86(rscratch1, rscratch2); + // __ brx86(rscratch1, gpargs, fpargs, type); } } @@ -1801,7 +1814,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // least significant 2 bits clear. // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg - __ sub(swap_reg, swap_reg, sp); + __ sub(swap_reg, sp, swap_reg); + __ neg(swap_reg, swap_reg); __ ands(swap_reg, swap_reg, 3 - os::vm_page_size()); // Save the test result, for recursive case, the result is zero @@ -2451,8 +2465,9 @@ void SharedRuntime::generate_deopt_blob() { __ ldr(lr, Address(r2)); __ enter(); - // Allocate a full sized register save area. - __ sub(sp, sp, frame_size_in_words * wordSize); + // Allocate a full sized register save area. We subtract 2 because + // enter() just pushed 2 words + __ sub(sp, sp, (frame_size_in_words - 2) * wordSize); // Restore frame locals after moving the frame __ strd(v0, Address(sp, RegisterSaver::v0_offset_in_bytes())); diff --git a/src/cpu/aarch64/vm/templateTable_aarch64.cpp b/src/cpu/aarch64/vm/templateTable_aarch64.cpp index 61f85b782..7a1ccd9e3 100644 --- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp +++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp @@ -1578,15 +1578,15 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) if (is_jsr) { // Pre-load the next target bytecode into rscratch1 - __ load_unsigned_byte(rscratch1, Address(rbcp, 0)); + __ load_unsigned_byte(rscratch1, Address(rbcp, r2)); // compute return address as bci __ ldr(rscratch2, Address(rmethod, Method::const_offset())); __ add(rscratch2, rscratch2, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3)); - // Adjust the bcp by the 16-bit displacement in r2 - __ add(rbcp, rbcp, r2); __ sub(r1, rbcp, rscratch2); __ push_i(r1); + // Adjust the bcp by the 16-bit displacement in r2 + __ add(rbcp, rbcp, r2); __ dispatch_only(vtos); return; } diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp index 21e5cce7b..c6b1a832a 100644 --- a/src/share/vm/runtime/arguments.cpp +++ b/src/share/vm/runtime/arguments.cpp @@ -2103,8 +2103,13 @@ jint Arguments::parse_vm_init_args(const JavaVMInitArgs* args) { Arguments::_ClipInlining = ClipInlining; Arguments::_BackgroundCompilation = BackgroundCompilation; +#ifdef TARGET_ARCH_aarch64 + // AArch64 needs to default to -Xint + set_mode_flags(_int); +#else // Setup flags for mixed which is the default set_mode_flags(_mixed); +#endif // Parse JAVA_TOOL_OPTIONS environment variable (if present) jint result = parse_java_tool_options_environment_variable(&scp, &scp_assembly_required); |