aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authortwisti <none@none>2012-11-30 11:44:05 -0800
committertwisti <none@none>2012-11-30 11:44:05 -0800
commit97af567120be05d5fb4aa23a24d872cd6d31eb34 (patch)
treec2eaa562cc8aea9b66441f7ba8c46d34c8bd369e /src
parent908f12a2e9dc99b6f724c07af89ccfb975fadc91 (diff)
8003195: AbstractAssembler should not store code pointers but use the CodeSection directly
Reviewed-by: twisti, kvn Contributed-by: Bharadwaj Yadavalli <bharadwaj.yadavalli@oracle.com>
Diffstat (limited to 'src')
-rw-r--r--src/cpu/x86/vm/assembler_x86.cpp26
-rw-r--r--src/cpu/x86/vm/assembler_x86.hpp2
-rw-r--r--src/cpu/x86/vm/assembler_x86.inline.hpp6
-rw-r--r--src/share/vm/asm/assembler.cpp19
-rw-r--r--src/share/vm/asm/assembler.hpp50
-rw-r--r--src/share/vm/asm/assembler.inline.hpp52
-rw-r--r--src/share/vm/asm/codeBuffer.hpp14
7 files changed, 68 insertions, 101 deletions
diff --git a/src/cpu/x86/vm/assembler_x86.cpp b/src/cpu/x86/vm/assembler_x86.cpp
index 6b9677d30..bd7af9d63 100644
--- a/src/cpu/x86/vm/assembler_x86.cpp
+++ b/src/cpu/x86/vm/assembler_x86.cpp
@@ -1154,7 +1154,7 @@ void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
assert(entry != NULL, "call most probably wrong");
InstructionMark im(this);
emit_byte(0xE8);
- intptr_t disp = entry - (_code_pos + sizeof(int32_t));
+ intptr_t disp = entry - (pc() + sizeof(int32_t));
assert(is_simm32(disp), "must be 32bit offset (call2)");
// Technically, should use call32_operand, but this format is
// implied by the fact that we're emitting a call instruction.
@@ -1417,7 +1417,7 @@ void Assembler::jcc(Condition cc, Label& L, bool maybe_short) {
const int short_size = 2;
const int long_size = 6;
- intptr_t offs = (intptr_t)dst - (intptr_t)_code_pos;
+ intptr_t offs = (intptr_t)dst - (intptr_t)pc();
if (maybe_short && is8bit(offs - short_size)) {
// 0111 tttn #8-bit disp
emit_byte(0x70 | cc);
@@ -1447,14 +1447,14 @@ void Assembler::jccb(Condition cc, Label& L) {
const int short_size = 2;
address entry = target(L);
#ifdef ASSERT
- intptr_t dist = (intptr_t)entry - ((intptr_t)_code_pos + short_size);
+ intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
intptr_t delta = short_branch_delta();
if (delta != 0) {
dist += (dist < 0 ? (-delta) :delta);
}
assert(is8bit(dist), "Dispacement too large for a short jmp");
#endif
- intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos;
+ intptr_t offs = (intptr_t)entry - (intptr_t)pc();
// 0111 tttn #8-bit disp
emit_byte(0x70 | cc);
emit_byte((offs - short_size) & 0xFF);
@@ -1480,7 +1480,7 @@ void Assembler::jmp(Label& L, bool maybe_short) {
InstructionMark im(this);
const int short_size = 2;
const int long_size = 5;
- intptr_t offs = entry - _code_pos;
+ intptr_t offs = entry - pc();
if (maybe_short && is8bit(offs - short_size)) {
emit_byte(0xEB);
emit_byte((offs - short_size) & 0xFF);
@@ -1510,7 +1510,7 @@ void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
InstructionMark im(this);
emit_byte(0xE9);
assert(dest != NULL, "must have a target");
- intptr_t disp = dest - (_code_pos + sizeof(int32_t));
+ intptr_t disp = dest - (pc() + sizeof(int32_t));
assert(is_simm32(disp), "must be 32bit offset (jmp)");
emit_data(disp, rspec.reloc(), call32_operand);
}
@@ -1521,14 +1521,14 @@ void Assembler::jmpb(Label& L) {
address entry = target(L);
assert(entry != NULL, "jmp most probably wrong");
#ifdef ASSERT
- intptr_t dist = (intptr_t)entry - ((intptr_t)_code_pos + short_size);
+ intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
intptr_t delta = short_branch_delta();
if (delta != 0) {
dist += (dist < 0 ? (-delta) :delta);
}
assert(is8bit(dist), "Dispacement too large for a short jmp");
#endif
- intptr_t offs = entry - _code_pos;
+ intptr_t offs = entry - pc();
emit_byte(0xEB);
emit_byte((offs - short_size) & 0xFF);
} else {
@@ -4361,7 +4361,7 @@ bool Assembler::reachable(AddressLiteral adr) {
disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
if (!is_simm32(disp)) return false;
- disp = (int64_t)adr._target - ((int64_t)_code_pos + sizeof(int));
+ disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int));
// Because rip relative is a disp + address_of_next_instruction and we
// don't know the value of address_of_next_instruction we apply a fudge factor
@@ -4392,7 +4392,7 @@ void Assembler::emit_data64(jlong data,
relocInfo::relocType rtype,
int format) {
if (rtype == relocInfo::none) {
- emit_long64(data);
+ emit_int64(data);
} else {
emit_data64(data, Relocation::spec_simple(rtype), format);
}
@@ -4410,7 +4410,7 @@ void Assembler::emit_data64(jlong data,
#ifdef ASSERT
check_relocation(rspec, format);
#endif
- emit_long64(data);
+ emit_int64(data);
}
int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
@@ -4943,7 +4943,7 @@ void Assembler::mov64(Register dst, int64_t imm64) {
InstructionMark im(this);
int encode = prefixq_and_encode(dst->encoding());
emit_byte(0xB8 | encode);
- emit_long64(imm64);
+ emit_int64(imm64);
}
void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
@@ -7891,7 +7891,7 @@ void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
relocate(dst.reloc());
const int short_size = 2;
const int long_size = 6;
- int offs = (intptr_t)dst.target() - ((intptr_t)_code_pos);
+ int offs = (intptr_t)dst.target() - ((intptr_t)pc());
if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
// 0111 tttn #8-bit disp
emit_byte(0x70 | cc);
diff --git a/src/cpu/x86/vm/assembler_x86.hpp b/src/cpu/x86/vm/assembler_x86.hpp
index 8a9bbaf42..e2843b920 100644
--- a/src/cpu/x86/vm/assembler_x86.hpp
+++ b/src/cpu/x86/vm/assembler_x86.hpp
@@ -706,8 +706,6 @@ private:
void check_relocation(RelocationHolder const& rspec, int format);
#endif
- inline void emit_long64(jlong x);
-
void emit_data(jint data, relocInfo::relocType rtype, int format);
void emit_data(jint data, RelocationHolder const& rspec, int format);
void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
diff --git a/src/cpu/x86/vm/assembler_x86.inline.hpp b/src/cpu/x86/vm/assembler_x86.inline.hpp
index bf299c6da..cfb3b7e61 100644
--- a/src/cpu/x86/vm/assembler_x86.inline.hpp
+++ b/src/cpu/x86/vm/assembler_x86.inline.hpp
@@ -87,12 +87,6 @@ inline void Assembler::prefixq(Address adr, Register reg) {}
inline void Assembler::prefix(Address adr, XMMRegister reg) {}
inline void Assembler::prefixq(Address adr, XMMRegister reg) {}
-#else
-inline void Assembler::emit_long64(jlong x) {
- *(jlong*) _code_pos = x;
- _code_pos += sizeof(jlong);
- code_section()->set_end(_code_pos);
-}
#endif // _LP64
#endif // CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP
diff --git a/src/share/vm/asm/assembler.cpp b/src/share/vm/asm/assembler.cpp
index 479ae3532..5c2acdf78 100644
--- a/src/share/vm/asm/assembler.cpp
+++ b/src/share/vm/asm/assembler.cpp
@@ -56,16 +56,13 @@ AbstractAssembler::AbstractAssembler(CodeBuffer* code) {
if (code == NULL) return;
CodeSection* cs = code->insts();
cs->clear_mark(); // new assembler kills old mark
- _code_section = cs;
- _code_begin = cs->start();
- _code_limit = cs->limit();
- _code_pos = cs->end();
- _oop_recorder= code->oop_recorder();
- DEBUG_ONLY( _short_branch_delta = 0; )
- if (_code_begin == NULL) {
+ if (cs->start() == NULL) {
vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s",
code->name()));
}
+ _code_section = cs;
+ _oop_recorder= code->oop_recorder();
+ DEBUG_ONLY( _short_branch_delta = 0; )
}
void AbstractAssembler::set_code_section(CodeSection* cs) {
@@ -73,9 +70,6 @@ void AbstractAssembler::set_code_section(CodeSection* cs) {
assert(cs->is_allocated(), "need to pre-allocate this section");
cs->clear_mark(); // new assembly into this section kills old mark
_code_section = cs;
- _code_begin = cs->start();
- _code_limit = cs->limit();
- _code_pos = cs->end();
}
// Inform CodeBuffer that incoming code and relocation will be for stubs
@@ -83,7 +77,6 @@ address AbstractAssembler::start_a_stub(int required_space) {
CodeBuffer* cb = code();
CodeSection* cs = cb->stubs();
assert(_code_section == cb->insts(), "not in insts?");
- sync();
if (cs->maybe_expand_to_ensure_remaining(required_space)
&& cb->blob() == NULL) {
return NULL;
@@ -96,7 +89,6 @@ address AbstractAssembler::start_a_stub(int required_space) {
// Should not be called if start_a_stub() returned NULL
void AbstractAssembler::end_a_stub() {
assert(_code_section == code()->stubs(), "not in stubs?");
- sync();
set_code_section(code()->insts());
}
@@ -105,7 +97,6 @@ address AbstractAssembler::start_a_const(int required_space, int required_align)
CodeBuffer* cb = code();
CodeSection* cs = cb->consts();
assert(_code_section == cb->insts() || _code_section == cb->stubs(), "not in insts/stubs?");
- sync();
address end = cs->end();
int pad = -(intptr_t)end & (required_align-1);
if (cs->maybe_expand_to_ensure_remaining(pad + required_space)) {
@@ -124,12 +115,10 @@ address AbstractAssembler::start_a_const(int required_space, int required_align)
// in section cs (insts or stubs).
void AbstractAssembler::end_a_const(CodeSection* cs) {
assert(_code_section == code()->consts(), "not in consts?");
- sync();
set_code_section(cs);
}
void AbstractAssembler::flush() {
- sync();
ICache::invalidate_range(addr_at(0), offset());
}
diff --git a/src/share/vm/asm/assembler.hpp b/src/share/vm/asm/assembler.hpp
index 6ca4a454e..c283fc083 100644
--- a/src/share/vm/asm/assembler.hpp
+++ b/src/share/vm/asm/assembler.hpp
@@ -201,13 +201,10 @@ class AbstractAssembler : public ResourceObj {
protected:
CodeSection* _code_section; // section within the code buffer
- address _code_begin; // first byte of code buffer
- address _code_limit; // first byte after code buffer
- address _code_pos; // current code generation position
OopRecorder* _oop_recorder; // support for relocInfo::oop_type
// Code emission & accessing
- address addr_at(int pos) const { return _code_begin + pos; }
+ inline address addr_at(int pos) const;
// This routine is called with a label is used for an address.
// Labels and displacements truck in offsets, but target must return a PC.
@@ -217,10 +214,18 @@ class AbstractAssembler : public ResourceObj {
bool isByte(int x) const { return 0 <= x && x < 0x100; }
bool isShiftCount(int x) const { return 0 <= x && x < 32; }
- void emit_byte(int x); // emit a single byte
- void emit_word(int x); // emit a 16-bit word (not a wordSize word!)
- void emit_long(jint x); // emit a 32-bit word (not a longSize word!)
- void emit_address(address x); // emit an address (not a longSize word!)
+ void emit_byte(int x) { emit_int8 (x); } // deprecated
+ void emit_word(int x) { emit_int16(x); } // deprecated
+ void emit_long(jint x) { emit_int32(x); } // deprecated
+
+ inline void emit_int8( int8_t x);
+ inline void emit_int16( int16_t x);
+ inline void emit_int32( int32_t x);
+ inline void emit_int64( int64_t x);
+
+ inline void emit_float( jfloat x);
+ inline void emit_double(jdouble x);
+ inline void emit_address(address x);
// Instruction boundaries (required when emitting relocatable values).
class InstructionMark: public StackObj {
@@ -278,9 +283,6 @@ class AbstractAssembler : public ResourceObj {
// Creation
AbstractAssembler(CodeBuffer* code);
- // save end pointer back to code buf.
- void sync();
-
// ensure buf contains all code (call this before using/copying the code)
void flush();
@@ -308,12 +310,13 @@ class AbstractAssembler : public ResourceObj {
static bool is_simm32(intptr_t x) { return is_simm(x, 32); }
// Accessors
- CodeBuffer* code() const; // _code_section->outer()
CodeSection* code_section() const { return _code_section; }
- int sect() const; // return _code_section->index()
- address pc() const { return _code_pos; }
- int offset() const { return _code_pos - _code_begin; }
- int locator() const; // CodeBuffer::locator(offset(), sect())
+ inline CodeBuffer* code() const;
+ inline int sect() const;
+ inline address pc() const;
+ inline int offset() const;
+ inline int locator() const; // CodeBuffer::locator(offset(), sect())
+
OopRecorder* oop_recorder() const { return _oop_recorder; }
void set_oop_recorder(OopRecorder* r) { _oop_recorder = r; }
@@ -358,8 +361,7 @@ class AbstractAssembler : public ResourceObj {
CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
- *(jlong*)ptr = c;
- _code_pos = ptr + sizeof(c);
+ emit_int64(c);
end_a_const(c1);
}
return ptr;
@@ -368,8 +370,7 @@ class AbstractAssembler : public ResourceObj {
CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
- *(jdouble*)ptr = c;
- _code_pos = ptr + sizeof(c);
+ emit_double(c);
end_a_const(c1);
}
return ptr;
@@ -378,8 +379,7 @@ class AbstractAssembler : public ResourceObj {
CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
- *(jfloat*)ptr = c;
- _code_pos = ptr + sizeof(c);
+ emit_float(c);
end_a_const(c1);
}
return ptr;
@@ -388,8 +388,7 @@ class AbstractAssembler : public ResourceObj {
CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
- *(address*)ptr = c;
- _code_pos = ptr + sizeof(c);
+ emit_address(c);
end_a_const(c1);
}
return ptr;
@@ -399,8 +398,7 @@ class AbstractAssembler : public ResourceObj {
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
relocate(rspec);
- *(address*)ptr = c;
- _code_pos = ptr + sizeof(c);
+ emit_address(c);
end_a_const(c1);
}
return ptr;
diff --git a/src/share/vm/asm/assembler.inline.hpp b/src/share/vm/asm/assembler.inline.hpp
index 6eede4445..bcb03fbd7 100644
--- a/src/share/vm/asm/assembler.inline.hpp
+++ b/src/share/vm/asm/assembler.inline.hpp
@@ -30,49 +30,27 @@
#include "compiler/disassembler.hpp"
#include "runtime/threadLocalStorage.hpp"
-inline void AbstractAssembler::sync() {
- CodeSection* cs = code_section();
- guarantee(cs->start() == _code_begin, "must not shift code buffer");
- cs->set_end(_code_pos);
+inline address AbstractAssembler::addr_at(int pos) const {
+ return code_section()->start() + pos;
}
-inline void AbstractAssembler::emit_byte(int x) {
- assert(isByte(x), "not a byte");
- *(unsigned char*)_code_pos = (unsigned char)x;
- _code_pos += sizeof(unsigned char);
- sync();
-}
-
-
-inline void AbstractAssembler::emit_word(int x) {
- *(short*)_code_pos = (short)x;
- _code_pos += sizeof(short);
- sync();
-}
-
-
-inline void AbstractAssembler::emit_long(jint x) {
- *(jint*)_code_pos = x;
- _code_pos += sizeof(jint);
- sync();
-}
+void AbstractAssembler::emit_int8(int8_t x) { code_section()->emit_int8 (x); }
+void AbstractAssembler::emit_int16(int16_t x) { code_section()->emit_int16(x); }
+void AbstractAssembler::emit_int32(int32_t x) { code_section()->emit_int32(x); }
+void AbstractAssembler::emit_int64(int64_t x) { code_section()->emit_int64(x); }
-inline void AbstractAssembler::emit_address(address x) {
- *(address*)_code_pos = x;
- _code_pos += sizeof(address);
- sync();
-}
+void AbstractAssembler::emit_float(jfloat x) { code_section()->emit_float(x); }
+void AbstractAssembler::emit_double(jdouble x) { code_section()->emit_double(x); }
+void AbstractAssembler::emit_address(address x) { code_section()->emit_address(x); }
inline address AbstractAssembler::inst_mark() const {
return code_section()->mark();
}
-
inline void AbstractAssembler::set_inst_mark() {
code_section()->set_mark();
}
-
inline void AbstractAssembler::clear_inst_mark() {
code_section()->clear_mark();
}
@@ -80,9 +58,9 @@ inline void AbstractAssembler::clear_inst_mark() {
inline void AbstractAssembler::relocate(RelocationHolder const& rspec, int format) {
assert(!pd_check_instruction_mark()
- || inst_mark() == NULL || inst_mark() == _code_pos,
+ || inst_mark() == NULL || inst_mark() == code_section()->end(),
"call relocate() between instructions");
- code_section()->relocate(_code_pos, rspec, format);
+ code_section()->relocate(code_section()->end(), rspec, format);
}
@@ -94,6 +72,14 @@ inline int AbstractAssembler::sect() const {
return code_section()->index();
}
+inline address AbstractAssembler::pc() const {
+ return code_section()->end();
+}
+
+inline int AbstractAssembler::offset() const {
+ return code_section()->size();
+}
+
inline int AbstractAssembler::locator() const {
return CodeBuffer::locator(offset(), sect());
}
diff --git a/src/share/vm/asm/codeBuffer.hpp b/src/share/vm/asm/codeBuffer.hpp
index 63dba2dbb..18f5ffc50 100644
--- a/src/share/vm/asm/codeBuffer.hpp
+++ b/src/share/vm/asm/codeBuffer.hpp
@@ -30,8 +30,6 @@
#include "code/relocInfo.hpp"
class CodeComments;
-class AbstractAssembler;
-class MacroAssembler;
class PhaseCFG;
class Compile;
class BufferBlob;
@@ -194,10 +192,14 @@ class CodeSection VALUE_OBJ_CLASS_SPEC {
}
// Code emission
- void emit_int8 (int8_t x) { *((int8_t*) end()) = x; set_end(end() + 1); }
- void emit_int16(int16_t x) { *((int16_t*) end()) = x; set_end(end() + 2); }
- void emit_int32(int32_t x) { *((int32_t*) end()) = x; set_end(end() + 4); }
- void emit_int64(int64_t x) { *((int64_t*) end()) = x; set_end(end() + 8); }
+ void emit_int8 ( int8_t x) { *((int8_t*) end()) = x; set_end(end() + sizeof(int8_t)); }
+ void emit_int16( int16_t x) { *((int16_t*) end()) = x; set_end(end() + sizeof(int16_t)); }
+ void emit_int32( int32_t x) { *((int32_t*) end()) = x; set_end(end() + sizeof(int32_t)); }
+ void emit_int64( int64_t x) { *((int64_t*) end()) = x; set_end(end() + sizeof(int64_t)); }
+
+ void emit_float( jfloat x) { *((jfloat*) end()) = x; set_end(end() + sizeof(jfloat)); }
+ void emit_double(jdouble x) { *((jdouble*) end()) = x; set_end(end() + sizeof(jdouble)); }
+ void emit_address(address x) { *((address*) end()) = x; set_end(end() + sizeof(address)); }
// Share a scratch buffer for relocinfo. (Hacky; saves a resource allocation.)
void initialize_shared_locs(relocInfo* buf, int length);