aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoraph <none@none>2014-06-19 10:05:45 -0400
committeraph <none@none>2014-06-19 10:05:45 -0400
commit4b03e6ff564b65b86ae6d9a3fba85cfa7b891642 (patch)
treec6c532b6022bb950c431ff1acd461245d0815a1f
parent1e27d4b4dcab24daf26339456633c3d64062930e (diff)
Remove obsolete C1 patching code.
-rw-r--r--src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp128
-rw-r--r--src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp104
-rw-r--r--src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.hpp2
-rw-r--r--src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp286
-rw-r--r--src/cpu/aarch64/vm/globals_aarch64.hpp6
-rw-r--r--src/cpu/aarch64/vm/nativeInst_aarch64.cpp3
-rw-r--r--src/cpu/aarch64/vm/relocInfo_aarch64.cpp75
-rw-r--r--src/share/vm/code/relocInfo.cpp5
-rw-r--r--src/share/vm/code/relocInfo.hpp4
9 files changed, 92 insertions, 521 deletions
diff --git a/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp b/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp
index cdedc64ec..2b6b0f852 100644
--- a/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp
+++ b/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp
@@ -320,133 +320,7 @@ void PatchingStub::align_patch_site(MacroAssembler* masm) {
}
void PatchingStub::emit_code(LIR_Assembler* ce) {
- assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
-
- Label call_patch;
-
- // static field accesses have special semantics while the class
- // initializer is being run so we emit a test which can be used to
- // check that this code is being executed by the initializing
- // thread.
- address being_initialized_entry = __ pc();
- if (CommentedAssembly) {
- __ block_comment(" patch template");
- }
-
- // make a copy the code which is going to be patched.
- for (int i = 0; i < _bytes_to_copy; i++) {
- address ptr = (address)(_pc_start + i);
- int a_byte = (*ptr) & 0xFF;
- __ emit_int8(a_byte);
- }
-
- address end_of_patch = __ pc();
- int bytes_to_skip = 0;
- if (_id == load_mirror_id) {
- int offset = __ offset();
- if (CommentedAssembly) {
- __ block_comment(" being_initialized check");
- }
- assert(_obj != noreg, "must be a valid register");
- Register tmp = r0;
- Register tmp2 = r19;
- __ stp(tmp, tmp2, Address(__ pre(sp, -2 * wordSize)));
- // Load without verification to keep code size small. We need it because
- // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
- __ ldr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
- __ ldr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
- __ cmp(rthread, tmp);
- __ ldp(tmp, tmp2, Address(__ post(sp, 2 * wordSize)));
- __ br(Assembler::NE, call_patch);
-
- // access_field patches may execute the patched code before it's
- // copied back into place so we need to jump back into the main
- // code of the nmethod to continue execution.
- __ b(_patch_site_continuation);
-
- // make sure this extra code gets skipped
- bytes_to_skip += __ offset() - offset;
- }
- if (CommentedAssembly) {
- __ block_comment("patch data");
- }
- // Now emit the patch record telling the runtime how to find the
- // pieces of the patch.
- int sizeof_patch_record = 8;
- bytes_to_skip += sizeof_patch_record;
-
- // emit the offsets needed to find the code to patch
- int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
-
- // If this is a field access, the offset is held in the constant
- // pool rather than embedded in the instruction, so we don't copy
- // any instructions: we set the value in the constant pool and
- // overwrite the NativeGeneralJump.
- {
- Label L;
- __ br(Assembler::AL, L);
- __ emit_int8(0);
- __ emit_int8(being_initialized_entry_offset);
- if (_id == access_field_id) {
- __ emit_int8(bytes_to_skip + _bytes_to_copy);
- __ emit_int8(0);
- } else {
- __ emit_int8(bytes_to_skip);
- __ emit_int8(_bytes_to_copy);
- }
- __ bind(L);
- }
-
- address patch_info_pc = __ pc();
- assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
-
- address entry = __ pc();
- NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
- address target = NULL;
- relocInfo::relocType reloc_type = relocInfo::none;
-
- switch (_id) {
- case access_field_id:
- target = Runtime1::entry_for(Runtime1::access_field_patching_id);
- reloc_type = relocInfo::section_word_type;
- break;
- case load_klass_id:
- target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
- reloc_type = relocInfo::metadata_type;
- break;
- case load_mirror_id:
- target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
- reloc_type = relocInfo::oop_type;
- break;
- case load_appendix_id:
- target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
- reloc_type = relocInfo::oop_type;
- break;
- default: ShouldNotReachHere();
- }
-
- __ bind(call_patch);
-
- if (CommentedAssembly) {
- __ block_comment("patch entry point");
- }
- __ bl(RuntimeAddress(target));
- assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
- ce->add_call_info_here(_info);
- int jmp_off = __ offset();
- __ b(_patch_site_entry);
- // Add enough nops so deoptimization can overwrite the jmp above with a call
- // and not destroy the world.
- // FIXME: AArch64 doesn't really need this
- // __ nop(); __ nop();
- // if (_id == load_klass_id
- // || _id == load_mirror_id
- // || _id == access_field_id
- // ) {
- // CodeSection* cs = __ code_section();
- // RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
- // relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
- // }
+ assert(false, "AArch64 should not use C1 runtime patching");
}
diff --git a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
index fb2f18958..1f3433460 100644
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
@@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
+#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
@@ -200,10 +201,7 @@ Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
if (Address::offset_ok_for_immed(addr_offset, addr->scale()))
return Address(base, addr_offset, Address::lsl(addr->scale()));
else {
- // This is a rather long-winded instruction sequence, but the
- // offset is atomically patchable. See PatchingStub::install().
- Address const_addr = InternalAddress(int_constant(addr_offset));
- __ ldr_constant(tmp, const_addr);
+ __ mov(tmp, addr_offset);
return Address(base, tmp, Address::lsl(addr->scale()));
}
}
@@ -320,21 +318,36 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) {
}
}
+void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
+ address target = NULL;
+ relocInfo::relocType reloc_type = relocInfo::none;
-void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
- // Allocate a new index in table to hold the object once it's been patched
- int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
- PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
-
- if (DeoptimizeWhenPatching) {
- __ nop();
- } else {
- RelocationHolder rspec = oop_Relocation::spec(oop_index);
- address const_ptr = int_constant(-1);
- __ code()->consts()->relocate(const_ptr, rspec);
- __ ldr_constant(reg, InternalAddress(const_ptr));
+ switch (patching_id(info)) {
+ case PatchingStub::access_field_id:
+ target = Runtime1::entry_for(Runtime1::access_field_patching_id);
+ reloc_type = relocInfo::section_word_type;
+ break;
+ case PatchingStub::load_klass_id:
+ target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
+ reloc_type = relocInfo::metadata_type;
+ break;
+ case PatchingStub::load_mirror_id:
+ target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
+ reloc_type = relocInfo::oop_type;
+ break;
+ case PatchingStub::load_appendix_id:
+ target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
+ reloc_type = relocInfo::oop_type;
+ break;
+ default: ShouldNotReachHere();
}
- patching_epilog(patch, lir_patch_normal, reg, info);
+
+ __ bl(RuntimeAddress(target));
+ add_call_info_here(info);
+}
+
+void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
+ deoptimize_trap(info);
}
@@ -801,23 +814,21 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
PatchingStub* patch = NULL;
Register compressed_src = rscratch1;
+ if (patch_code != lir_patch_none) {
+ deoptimize_trap(info);
+ return;
+ }
+
if (type == T_ARRAY || type == T_OBJECT) {
__ verify_oop(src->as_register());
if (UseCompressedOops && !wide) {
__ encode_heap_oop(compressed_src, src->as_register());
- if (patch_code != lir_patch_none) {
- info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
- }
} else {
compressed_src = src->as_register();
}
}
- if (patch_code != lir_patch_none) {
- patch = new PatchingStub(_masm, PatchingStub::access_field_id);
- }
-
int null_check_here = code_offset();
switch (type) {
case T_FLOAT: {
@@ -875,10 +886,6 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
if (info != NULL) {
add_debug_info_for_null_check(null_check_here, info);
}
-
- if (patch_code != lir_patch_none) {
- patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
- }
}
@@ -915,13 +922,31 @@ void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
- Metadata* o = NULL;
- PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
- if (DeoptimizeWhenPatching)
- __ nop();
- else
- __ mov_metadata(reg, o);
- patching_epilog(patch, lir_patch_normal, reg, info);
+ address target = NULL;
+ relocInfo::relocType reloc_type = relocInfo::none;
+
+ switch (patching_id(info)) {
+ case PatchingStub::access_field_id:
+ target = Runtime1::entry_for(Runtime1::access_field_patching_id);
+ reloc_type = relocInfo::section_word_type;
+ break;
+ case PatchingStub::load_klass_id:
+ target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
+ reloc_type = relocInfo::metadata_type;
+ break;
+ case PatchingStub::load_mirror_id:
+ target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
+ reloc_type = relocInfo::oop_type;
+ break;
+ case PatchingStub::load_appendix_id:
+ target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
+ reloc_type = relocInfo::oop_type;
+ break;
+ default: ShouldNotReachHere();
+ }
+
+ __ bl(RuntimeAddress(target));
+ add_call_info_here(info);
}
void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
@@ -944,10 +969,9 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
__ verify_oop(addr->base()->as_pointer_register());
}
- PatchingStub* patch = NULL;
-
if (patch_code != lir_patch_none) {
- patch = new PatchingStub(_masm, PatchingStub::access_field_id);
+ deoptimize_trap(info);
+ return;
}
if (info != NULL) {
@@ -1019,10 +1043,6 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
ShouldNotReachHere();
}
- if (patch != NULL) {
- patching_epilog(patch, patch_code, addr->base()->as_register(), info);
- }
-
if (type == T_ARRAY || type == T_OBJECT) {
#ifdef _LP64
if (UseCompressedOops && !wide) {
diff --git a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.hpp b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.hpp
index bf2d70320..5a3f551ed 100644
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.hpp
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.hpp
@@ -64,6 +64,8 @@
void init() { tableswitch_count = 0; }
+ void deoptimize_trap(CodeEmitInfo *info);
+
public:
void store_parameter(Register r, int offset_from_esp_in_words);
diff --git a/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp b/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
index 622776326..a5e3ae810 100644
--- a/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
+++ b/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
@@ -1321,19 +1321,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
#undef __
-static Klass* resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
- Bytecode_field field_access(caller, bci);
- // This can be static or non-static field access
- Bytecodes::Code code = field_access.code();
-
- // We must load class, initialize class and resolvethe field
- fieldDescriptor result; // initialize class if needed
- constantPoolHandle constants(THREAD, caller->constants());
- LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL);
- return result.field_holder();
-}
-
-
// Simple helper to see if the caller of a runtime stub which
// entered the VM has been deoptimized
@@ -1347,261 +1334,40 @@ static bool caller_is_deopted() {
}
JRT_ENTRY(void, Runtime1::patch_code_aarch64(JavaThread* thread, Runtime1::StubID stub_id ))
- NOT_PRODUCT(_patch_code_slowcase_cnt++;)
-
- ResourceMark rm(thread);
+{
RegisterMap reg_map(thread, false);
- frame runtime_frame = thread->last_frame();
- frame caller_frame = runtime_frame.sender(&reg_map);
-
- if (DeoptimizeWhenPatching) {
- // According to the ARMv8 ARM, "Concurrent modification and
- // execution of instructions can lead to the resulting instruction
- // performing any behavior that can be achieved by executing any
- // sequence of instructions that can be executed from the same
- // Exception level, except where the instruction before
- // modification and the instruction after modification is a B, BL,
- // NOP, BKPT, SVC, HVC, or SMC instruction."
- //
- // This effectively makes the games we play when patching
- // impossible, so when we come across an access that needs
- // patching we must deoptimize.
-
- if (TracePatching) {
- tty->print_cr("Deoptimizing because patch is needed");
- }
- // It's possible the nmethod was invalidated in the last
- // safepoint, but if it's still alive then make it not_entrant.
- nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
- if (nm != NULL) {
- nm->make_not_entrant();
- }
- Deoptimization::deoptimize_frame(thread, caller_frame.id());
-
- // Return to the now deoptimized frame.
- return;
- }
-
- // last java frame on stack
- vframeStream vfst(thread, true);
- assert(!vfst.at_end(), "Java frame must exist");
-
- methodHandle caller_method(THREAD, vfst.method());
- // Note that caller_method->code() may not be same as caller_code because of OSR's
- // Note also that in the presence of inlining it is not guaranteed
- // that caller_method() == caller_code->method()
-
- int bci = vfst.bci();
- Bytecodes::Code code = caller_method()->java_code_at(bci);
-
- bool deoptimize_for_volatile = false;
- int patch_field_offset = -1;
- KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
- KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
- Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code
- fieldDescriptor result; // initialize class if needed
-
- bool load_klass_or_mirror_patch_id =
- (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
-
- if (stub_id == Runtime1::access_field_patching_id) {
-
- Bytecode_field field_access(caller_method, bci);
- fieldDescriptor result; // initialize class if needed
- Bytecodes::Code code = field_access.code();
- constantPoolHandle constants(THREAD, caller_method->constants());
- LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
- patch_field_offset = result.offset();
-
- // If we're patching a field which is volatile then at compile it
- // must not have been known to be volatile, so the generated code
- // isn't correct for a volatile reference. The nmethod has to be
- // deoptimized so that the code can be regenerated correctly.
- // This check is only needed for access_field_patching since this
- // is the path for patching field offsets. load_klass is only
- // used for patching references to oops which don't need special
- // handling in the volatile case.
- deoptimize_for_volatile = result.access_flags().is_volatile();
- } else if (load_klass_or_mirror_patch_id) {
- Klass* k = NULL;
- switch (code) {
- case Bytecodes::_putstatic:
- case Bytecodes::_getstatic:
- { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
- init_klass = KlassHandle(THREAD, klass);
- mirror = Handle(THREAD, klass->java_mirror());
- }
- break;
- case Bytecodes::_new:
- { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
- k = caller_method->constants()->klass_at(bnew.index(), CHECK);
- }
- break;
- case Bytecodes::_multianewarray:
- { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
- k = caller_method->constants()->klass_at(mna.index(), CHECK);
- }
- break;
- case Bytecodes::_instanceof:
- { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
- k = caller_method->constants()->klass_at(io.index(), CHECK);
- }
- break;
- case Bytecodes::_checkcast:
- { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
- k = caller_method->constants()->klass_at(cc.index(), CHECK);
- }
- break;
- case Bytecodes::_anewarray:
- { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
- Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);
- k = ek->array_klass(CHECK);
- }
- break;
- case Bytecodes::_ldc:
- case Bytecodes::_ldc_w:
- {
- Bytecode_loadconstant cc(caller_method, bci);
- oop m = cc.resolve_constant(CHECK);
- mirror = Handle(THREAD, m);
- }
- break;
- default: Unimplemented();
- }
- // convert to handle
- load_klass = KlassHandle(THREAD, k);
- } else {
- ShouldNotReachHere();
+ NOT_PRODUCT(_patch_code_slowcase_cnt++;)
+ // According to the ARMv8 ARM, "Concurrent modification and
+ // execution of instructions can lead to the resulting instruction
+ // performing any behavior that can be achieved by executing any
+ // sequence of instructions that can be executed from the same
+ // Exception level, except where the instruction before
+ // modification and the instruction after modification is a B, BL,
+ // NOP, BKPT, SVC, HVC, or SMC instruction."
+ //
+ // This effectively makes the games we play when patching
+ // impossible, so when we come across an access that needs
+ // patching we must deoptimize.
+
+ if (TracePatching) {
+ tty->print_cr("Deoptimizing because patch is needed");
}
- if (deoptimize_for_volatile) {
- // At compile time we assumed the field wasn't volatile but after
- // loading it turns out it was volatile so we have to throw the
- // compiled code out and let it be regenerated.
- if (TracePatching) {
- tty->print_cr("Deoptimizing for patching volatile field reference");
- }
- // It's possible the nmethod was invalidated in the last
- // safepoint, but if it's still alive then make it not_entrant.
- nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
- if (nm != NULL) {
- nm->make_not_entrant();
- }
-
- Deoptimization::deoptimize_frame(thread, caller_frame.id());
-
- // Return to the now deoptimized frame.
- }
+ frame runtime_frame = thread->last_frame();
+ frame caller_frame = runtime_frame.sender(&reg_map);
- // If we are patching in a non-perm oop, make sure the nmethod
- // is on the right list.
- if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
- MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
- nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
- guarantee(nm != NULL, "only nmethods can contain non-perm oops");
- if (!nm->on_scavenge_root_list())
- CodeCache::add_scavenge_root_nmethod(nm);
+ // It's possible the nmethod was invalidated in the last
+ // safepoint, but if it's still alive then make it not_entrant.
+ nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
+ if (nm != NULL) {
+ nm->make_not_entrant();
}
- // Now copy code back
- {
- MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
- //
- // Deoptimization may have happened while we waited for the lock.
- // In that case we don't bother to do any patching we just return
- // and let the deopt happen
- if (!caller_is_deopted()) {
- NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
- address instr_pc = jump->jump_destination();
- NativeInstruction* ni = nativeInstruction_at(instr_pc);
- if (ni->is_jump() ) {
- // the jump has not been patched yet
- address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();
- unsigned char* byte_count = (unsigned char*) (stub_location - 1);
- unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
- unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);
- address copy_buff = stub_location - *byte_skip - *byte_count;
- address being_initialized_entry = stub_location - *being_initialized_entry_offset;
- if (TracePatching) {
- tty->print_cr(" Patching %s at bci %d at address 0x%x (%s)", Bytecodes::name(code), bci,
- instr_pc, (stub_id == Runtime1::access_field_patching_id) ? "field" : "klass");
- nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc());
- assert(caller_code != NULL, "nmethod not found");
-
- // NOTE we use pc() not original_pc() because we already know they are
- // identical otherwise we'd have never entered this block of code
- OopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc());
- assert(map != NULL, "null check");
- map->print();
- tty->cr();
-
- Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
- }
-
- // The word in the constant pool needs fixing.
- unsigned insn = *(unsigned*)copy_buff;
- unsigned long *cpool_addr
- = (unsigned long *)MacroAssembler::target_addr_for_insn(instr_pc, insn);
-
- nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
- CodeBlob *cb = CodeCache::find_blob(caller_frame.pc());
- assert(nm != NULL, "invalid nmethod_pc");
- assert(address(cpool_addr) >= nm->consts_begin()
- && address(cpool_addr) < nm->consts_end(),
- "constant address should be inside constant pool");
-
- switch(stub_id) {
- case access_field_patching_id:
- *cpool_addr = patch_field_offset; break;
- case load_mirror_patching_id:
- *cpool_addr = cast_from_oop<uint64_t>(mirror()); break;
- case load_klass_patching_id:
- *cpool_addr = (uint64_t)load_klass(); break;
- default:
- ShouldNotReachHere();
- }
-
- // Update the location in the nmethod with the proper
- // metadata. When the code was generated, a NULL was stuffed
- // in the metadata table and that table needs to be update to
- // have the right value. On intel the value is kept
- // directly in the instruction instead of in the metadata
- // table, so set_data above effectively updated the value.
- //
- // FIXME: It's tempting to think that rather them putting OOPs
- // in the cpool we could refer directly to the locations in the
- // nmethod. However, we can't guarantee that an ADRP would be
- // able to reach them: an ADRP can only reach within +- 4GiB of
- // the PC using two instructions. While it's pretty unlikely
- // that we will exceed this limit, it's not impossible.
- RelocIterator mds(nm, (address)cpool_addr, (address)cpool_addr + 1);
- bool found = false;
- while (mds.next() && !found) {
- if (mds.type() == relocInfo::oop_type) {
- assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
- oop_Relocation* r = mds.oop_reloc();
- oop* oop_adr = r->oop_addr();
- *oop_adr = mirror();
- r->fix_oop_relocation();
- found = true;
- } else if (mds.type() == relocInfo::metadata_type) {
- assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
- metadata_Relocation* r = mds.metadata_reloc();
- Metadata** metadata_adr = r->metadata_addr();
- *metadata_adr = load_klass();
- r->fix_metadata_relocation();
- found = true;
- }
- }
-
- // And we overwrite the jump
- NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
-
- }
- }
- }
+ Deoptimization::deoptimize_frame(thread, caller_frame.id());
+ // Return to the now deoptimized frame.
+}
JRT_END
int Runtime1::access_field_patching(JavaThread* thread) {
diff --git a/src/cpu/aarch64/vm/globals_aarch64.hpp b/src/cpu/aarch64/vm/globals_aarch64.hpp
index 3dcdd6ffd..112560b8b 100644
--- a/src/cpu/aarch64/vm/globals_aarch64.hpp
+++ b/src/cpu/aarch64/vm/globals_aarch64.hpp
@@ -94,9 +94,6 @@ define_pd_global(intx, InlineSmallCode, 1000);
product(bool, NearCpool, true, \
"constant pool is close to instructions") \
\
- product(bool, DeoptimizeWhenPatching, true, \
- "doptimize instead of patching instructions") \
- \
notproduct(bool, UseAcqRelForVolatileFields, false, \
"Use acquire and release insns for volatile fields")
@@ -111,9 +108,6 @@ define_pd_global(intx, InlineSmallCode, 1000);
product(bool, NearCpool, true, \
"constant pool is close to instructions") \
\
- product(bool, DeoptimizeWhenPatching, true, \
- "doptimize instead of patching instructions") \
- \
notproduct(bool, UseAcqRelForVolatileFields, false, \
"Use acquire and release insns for volatile fields")
diff --git a/src/cpu/aarch64/vm/nativeInst_aarch64.cpp b/src/cpu/aarch64/vm/nativeInst_aarch64.cpp
index 81a006245..ba7ef859b 100644
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.cpp
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.cpp
@@ -242,8 +242,7 @@ void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
// MT-safe patching of a long jump instruction.
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
- assert((! DeoptimizeWhenPatching)
- || nativeInstruction_at(instr_addr)->is_jump_or_nop(),
+ assert(nativeInstruction_at(instr_addr)->is_jump_or_nop(),
"Aarch64 cannot replace non-jump with jump");
uint32_t instr = *(uint32_t*)code_buffer;
*(uint32_t*)instr_addr = instr;
diff --git a/src/cpu/aarch64/vm/relocInfo_aarch64.cpp b/src/cpu/aarch64/vm/relocInfo_aarch64.cpp
index 301f88a6b..12c5bc974 100644
--- a/src/cpu/aarch64/vm/relocInfo_aarch64.cpp
+++ b/src/cpu/aarch64/vm/relocInfo_aarch64.cpp
@@ -90,78 +90,3 @@ void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, Co
void metadata_Relocation::pd_fix_value(address x) {
}
-
-// We have a relocation that points to a pair of instructions that
-// load a constant from the constant pool. These are
-// ARDP; LDR reg [reg, #ofs]. However, until the constant is resolved
-// the first instruction may be a branch to a resolver stub, and the
-// resolver stub contains a copy of the ADRP that will replace the
-// branch instruction.
-//
-// So, when we relocate this code we have to adjust the offset in the
-// LDR instruction and the page offset in the copy of the ADRP
-// instruction that will overwrite the branch instruction. This is
-// done by Runtime1::patch_code_aarch64.
-
-void section_word_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
- unsigned insn1 = *(unsigned*)addr();
- if (! (Instruction_aarch64::extract(insn1, 30, 26) == 0b00101)) {
- // Unconditional branch (immediate)
- internal_word_Relocation::fix_relocation_after_move(src, dest);
- return;
- }
-
- address new_address = target();
-#ifdef ASSERT
- // Make sure this really is a cpool address
- address old_cpool_start = const_cast<CodeBuffer*>(src)->consts()->start();
- address old_cpool_end = const_cast<CodeBuffer*>(src)->consts()->end();
- address new_cpool_start = const_cast<CodeBuffer*>(dest)->consts()->start();
- address new_cpool_end = const_cast<CodeBuffer*>(dest)->consts()->end();
- address old_address = old_addr_for(target(), src, dest);
- assert(new_address >= new_cpool_start
- && new_address < new_cpool_end,
- "should be");
- assert(old_address >= old_cpool_start
- && old_address < old_cpool_end,
- "should be");
-#endif
-
- address stub_location = pd_call_destination(addr());
- unsigned char* byte_count = (unsigned char*) (stub_location - 1);
- unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
- address copy_buff = stub_location - *byte_skip - *byte_count;
- unsigned insn3 = *(unsigned*)copy_buff;
-
- if (NearCpool) {
- int offset = new_address - addr();
- Instruction_aarch64::spatch(copy_buff, 23, 5, offset >> 2);
- } else {
- // Unconditional branch (immediate)
- unsigned insn2 = ((unsigned*)addr())[1];
- if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001) {
- // Load/store register (unsigned immediate)
- unsigned size = Instruction_aarch64::extract(insn2, 31, 30);
-
- // Offset of address in a 4k page
- uint64_t new_offset = (uint64_t)target() & ((1<<12) - 1);
- // Fix the LDR instruction's offset
- Instruction_aarch64::patch(addr() + sizeof (unsigned),
- 21, 10, new_offset >> size);
-
- assert(Instruction_aarch64::extract(insn3, 28, 24) == 0b10000
- && Instruction_aarch64::extract(insn3, 31, 31),
- "instruction should be an ADRP");
-
- uint64_t insn_page = (uint64_t)addr() >> 12;
- uint64_t target_page = (uint64_t)target() >> 12;
- int page_offset = target_page - insn_page;
- int page_offset_lo = page_offset & 3;
- page_offset >>= 2;
- Instruction_aarch64::spatch(copy_buff, 23, 5, page_offset);
- Instruction_aarch64::patch(copy_buff, 30, 29, page_offset_lo);
-
- // Phew.
- }
- }
-}
diff --git a/src/share/vm/code/relocInfo.cpp b/src/share/vm/code/relocInfo.cpp
index e5fac02c8..4d09a3276 100644
--- a/src/share/vm/code/relocInfo.cpp
+++ b/src/share/vm/code/relocInfo.cpp
@@ -147,11 +147,6 @@ void RelocIterator::initialize(nmethod* nm, address begin, address limit) {
_section_end [CodeBuffer::SECT_STUBS ] = nm->stub_end() ;
assert(!has_current(), "just checking");
-#ifndef TARGET_ARCH_aarch64
- // aarch64 has relocs in the cpool
- assert(begin == NULL || begin >= nm->code_begin(), "in bounds");
- assert(limit == NULL || limit <= nm->code_end(), "in bounds");
-#endif
set_limits(begin, limit);
}
diff --git a/src/share/vm/code/relocInfo.hpp b/src/share/vm/code/relocInfo.hpp
index db9a14437..ad55a2fd9 100644
--- a/src/share/vm/code/relocInfo.hpp
+++ b/src/share/vm/code/relocInfo.hpp
@@ -1307,10 +1307,6 @@ class section_word_Relocation : public internal_word_Relocation {
//void pack_data_to -- inherited
void unpack_data();
-#ifdef TARGET_ARCH_aarch64
- void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest);
-#endif
-
private:
friend class RelocIterator;
section_word_Relocation() { }