aboutsummaryrefslogtreecommitdiff
path: root/src/share/vm/c1
diff options
context:
space:
mode:
author"Andrew Dinn ext:(%22) <adinn@redhat.com>2012-07-10 15:26:12 +0100
committer"Andrew Dinn ext:(%22) <adinn@redhat.com>2012-07-10 15:26:12 +0100
commitdaa2a922da8df8dca6ff8230bbd4757e7c251f2c (patch)
tree444843350362659ea185d53caffb241b46a7921b /src/share/vm/c1
parent60bd673086dc76432c7cd5221a22115296d08428 (diff)
Removed x86 code to create skeleton aarch64 tree
renamed all files in cpu and os_cpu tree with _x86 in their name to employ aarch64 instead modifed all shared files to include aarch64 specific files under new name when TARGET_ARCH_aarch64 is defined -- or alternatively when TARGET_ARCH_MODEL_aarch64 is defined in cases where there was a split between including x86_32 and x86/64 code. modified make system to build aarch64 server target with only the C1 compiler (yet, for execution on the amd64 platform) if SRCARCH=aarch64 is defined on the make command line gutted all x86-specific content from new aarch64 headers/sources, including static init, and inserted call to Undefined() in all method bodies added missing definition for class MacroAssembler anda few other necessary classes to assembler_aarch64.cpp and provided dummy stubs to allow Assembler to be driven. Removed the test code call which was in the template interpreter (from method generate_AARM64_loop()). Added a new file aarch64Test.cpp which provides a test hook method to drive the test method entry() found in assembler_aarch64.cpp and then exit. Arranged for this test hook method to be called under the jvm bootstrap init method at the first call into arch-specific code (in icache_aarch64.cpp). Added a minimal aarch64.ad architecture definition file but this is not really needed since we ar eonly building a C1 runtime.
Diffstat (limited to 'src/share/vm/c1')
-rw-r--r--src/share/vm/c1/c1_Defs.hpp6
-rw-r--r--src/share/vm/c1/c1_FpuStackSim.hpp3
-rw-r--r--src/share/vm/c1/c1_FrameMap.cpp3
-rw-r--r--src/share/vm/c1/c1_FrameMap.hpp3
-rw-r--r--src/share/vm/c1/c1_LIR.cpp4
-rw-r--r--src/share/vm/c1/c1_LIR.hpp2
-rw-r--r--src/share/vm/c1/c1_LIRAssembler.cpp4
-rw-r--r--src/share/vm/c1/c1_LIRAssembler.hpp3
-rw-r--r--src/share/vm/c1/c1_LinearScan.cpp21
-rw-r--r--src/share/vm/c1/c1_LinearScan.hpp6
-rw-r--r--src/share/vm/c1/c1_MacroAssembler.hpp6
-rw-r--r--src/share/vm/c1/c1_globals.hpp3
12 files changed, 62 insertions, 2 deletions
diff --git a/src/share/vm/c1/c1_Defs.hpp b/src/share/vm/c1/c1_Defs.hpp
index bebb3b0be..b0cd76373 100644
--- a/src/share/vm/c1/c1_Defs.hpp
+++ b/src/share/vm/c1/c1_Defs.hpp
@@ -29,6 +29,9 @@
#ifdef TARGET_ARCH_x86
# include "register_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "register_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "register_sparc.hpp"
#endif
@@ -53,6 +56,9 @@ enum {
#ifdef TARGET_ARCH_x86
# include "c1_Defs_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c1_Defs_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c1_Defs_sparc.hpp"
#endif
diff --git a/src/share/vm/c1/c1_FpuStackSim.hpp b/src/share/vm/c1/c1_FpuStackSim.hpp
index a1e4c3832..f07e97a4d 100644
--- a/src/share/vm/c1/c1_FpuStackSim.hpp
+++ b/src/share/vm/c1/c1_FpuStackSim.hpp
@@ -35,6 +35,9 @@ class FpuStackSim;
#ifdef TARGET_ARCH_x86
# include "c1_FpuStackSim_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c1_FpuStackSim_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c1_FpuStackSim_sparc.hpp"
#endif
diff --git a/src/share/vm/c1/c1_FrameMap.cpp b/src/share/vm/c1/c1_FrameMap.cpp
index ea50b276f..660534665 100644
--- a/src/share/vm/c1/c1_FrameMap.cpp
+++ b/src/share/vm/c1/c1_FrameMap.cpp
@@ -29,6 +29,9 @@
#ifdef TARGET_ARCH_x86
# include "vmreg_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "vmreg_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "vmreg_sparc.inline.hpp"
#endif
diff --git a/src/share/vm/c1/c1_FrameMap.hpp b/src/share/vm/c1/c1_FrameMap.hpp
index 288fc5c1b..1b3c68b95 100644
--- a/src/share/vm/c1/c1_FrameMap.hpp
+++ b/src/share/vm/c1/c1_FrameMap.hpp
@@ -85,6 +85,9 @@ class FrameMap : public CompilationResourceObj {
#ifdef TARGET_ARCH_x86
# include "c1_FrameMap_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c1_FrameMap_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c1_FrameMap_sparc.hpp"
#endif
diff --git a/src/share/vm/c1/c1_LIR.cpp b/src/share/vm/c1/c1_LIR.cpp
index c85e71f99..a00012b4f 100644
--- a/src/share/vm/c1/c1_LIR.cpp
+++ b/src/share/vm/c1/c1_LIR.cpp
@@ -43,6 +43,7 @@ Register LIR_OprDesc::as_register_hi() const {
}
#if defined(X86)
+#ifndef TARGET_ARCH_aarch64
XMMRegister LIR_OprDesc::as_xmm_float_reg() const {
return FrameMap::nr2xmmreg(xmm_regnr());
@@ -53,6 +54,7 @@ XMMRegister LIR_OprDesc::as_xmm_double_reg() const {
return FrameMap::nr2xmmreg(xmm_regnrLo());
}
+#endif
#endif // X86
#if defined(SPARC) || defined(PPC)
@@ -1499,10 +1501,12 @@ void LIR_OprDesc::print(outputStream* out) const {
out->print(as_register_hi()->name());
out->print(as_register_lo()->name());
#if defined(X86)
+#ifndef TARGET_ARCH_aarch64
} else if (is_single_xmm()) {
out->print(as_xmm_float_reg()->name());
} else if (is_double_xmm()) {
out->print(as_xmm_double_reg()->name());
+#endif
} else if (is_single_fpu()) {
out->print("fpu%d", fpu_regnr());
} else if (is_double_fpu()) {
diff --git a/src/share/vm/c1/c1_LIR.hpp b/src/share/vm/c1/c1_LIR.hpp
index eb9ff9a5f..9ab60eef0 100644
--- a/src/share/vm/c1/c1_LIR.hpp
+++ b/src/share/vm/c1/c1_LIR.hpp
@@ -433,10 +433,12 @@ class LIR_OprDesc: public CompilationResourceObj {
}
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
XMMRegister as_xmm_float_reg() const;
XMMRegister as_xmm_double_reg() const;
// for compatibility with RInfo
int fpu () const { return lo_reg_half(); }
+#endif
#endif // X86
#if defined(SPARC) || defined(ARM) || defined(PPC)
FloatRegister as_float_reg () const;
diff --git a/src/share/vm/c1/c1_LIRAssembler.cpp b/src/share/vm/c1/c1_LIRAssembler.cpp
index b3082cb0c..180b0409c 100644
--- a/src/share/vm/c1/c1_LIRAssembler.cpp
+++ b/src/share/vm/c1/c1_LIRAssembler.cpp
@@ -34,6 +34,10 @@
# include "nativeInst_x86.hpp"
# include "vmreg_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "nativeInst_aarch64.hpp"
+# include "vmreg_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
# include "vmreg_sparc.inline.hpp"
diff --git a/src/share/vm/c1/c1_LIRAssembler.hpp b/src/share/vm/c1/c1_LIRAssembler.hpp
index 58adf5919..24348c1ad 100644
--- a/src/share/vm/c1/c1_LIRAssembler.hpp
+++ b/src/share/vm/c1/c1_LIRAssembler.hpp
@@ -252,6 +252,9 @@ class LIR_Assembler: public CompilationResourceObj {
#ifdef TARGET_ARCH_x86
# include "c1_LIRAssembler_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c1_LIRAssembler_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c1_LIRAssembler_sparc.hpp"
#endif
diff --git a/src/share/vm/c1/c1_LinearScan.cpp b/src/share/vm/c1/c1_LinearScan.cpp
index 56f946365..d6cd728e8 100644
--- a/src/share/vm/c1/c1_LinearScan.cpp
+++ b/src/share/vm/c1/c1_LinearScan.cpp
@@ -35,6 +35,9 @@
#ifdef TARGET_ARCH_x86
# include "vmreg_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "vmreg_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "vmreg_sparc.inline.hpp"
#endif
@@ -137,11 +140,13 @@ int LinearScan::reg_num(LIR_Opr opr) {
} else if (opr->is_double_cpu()) {
return opr->cpu_regnrLo();
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
} else if (opr->is_single_xmm()) {
return opr->fpu_regnr() + pd_first_xmm_reg;
} else if (opr->is_double_xmm()) {
return opr->fpu_regnrLo() + pd_first_xmm_reg;
#endif
+#endif
} else if (opr->is_single_fpu()) {
return opr->fpu_regnr() + pd_first_fpu_reg;
} else if (opr->is_double_fpu()) {
@@ -1292,8 +1297,10 @@ void LinearScan::build_intervals() {
// perfomed and so the temp ranges would be useless
if (has_fpu_registers()) {
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
if (UseSSE < 2) {
#endif
+#endif
for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) {
LIR_Opr opr = FrameMap::caller_save_fpu_reg_at(i);
assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
@@ -1301,6 +1308,7 @@ void LinearScan::build_intervals() {
caller_save_registers[num_caller_save_registers++] = reg_num(opr);
}
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
}
if (UseSSE > 0) {
for (i = 0; i < FrameMap::nof_caller_save_xmm_regs; i++) {
@@ -1311,6 +1319,7 @@ void LinearScan::build_intervals() {
}
}
#endif
+#endif
}
assert(num_caller_save_registers <= LinearScan::nof_regs, "out of bounds");
@@ -2107,12 +2116,14 @@ LIR_Opr LinearScan::calc_operand_for_interval(const Interval* interval) {
#ifndef __SOFTFP__
case T_FLOAT: {
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
if (UseSSE >= 1) {
assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
assert(interval->assigned_regHi() == any_reg, "must not have hi register");
return LIR_OprFact::single_xmm(assigned_reg - pd_first_xmm_reg);
}
#endif
+#endif
assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
assert(interval->assigned_regHi() == any_reg, "must not have hi register");
@@ -2121,12 +2132,14 @@ LIR_Opr LinearScan::calc_operand_for_interval(const Interval* interval) {
case T_DOUBLE: {
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
if (UseSSE >= 2) {
assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)");
return LIR_OprFact::double_xmm(assigned_reg - pd_first_xmm_reg);
}
#endif
+#endif
#ifdef SPARC
assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
@@ -2602,6 +2615,7 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
return 1;
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
} else if (opr->is_single_xmm()) {
VMReg rname = opr->as_xmm_float_reg()->as_VMReg();
LocationValue* sv = new LocationValue(Location::new_reg_loc(Location::normal, rname));
@@ -2609,6 +2623,7 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
scope_values->append(sv);
return 1;
#endif
+#endif
} else if (opr->is_single_fpu()) {
#ifdef X86
@@ -2692,6 +2707,7 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
} else if (opr->is_double_xmm()) {
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation");
VMReg rname_first = opr->as_xmm_double_reg()->as_VMReg();
@@ -2707,6 +2723,7 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
}
# endif
#endif
+#endif
} else if (opr->is_double_fpu()) {
// On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of
@@ -3617,10 +3634,12 @@ void RegisterVerifier::process_operations(LIR_List* ops, IntervalList* input_sta
}
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) {
state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL);
}
#endif
+#endif
}
// process xhandler before output and temp operands
@@ -4536,9 +4555,11 @@ void Interval::print(outputStream* out) const {
} else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) {
opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg);
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
} else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= pd_last_xmm_reg) {
opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg);
#endif
+#endif
} else {
ShouldNotReachHere();
}
diff --git a/src/share/vm/c1/c1_LinearScan.hpp b/src/share/vm/c1/c1_LinearScan.hpp
index 0c06f1b01..75042cd47 100644
--- a/src/share/vm/c1/c1_LinearScan.hpp
+++ b/src/share/vm/c1/c1_LinearScan.hpp
@@ -128,8 +128,7 @@ class LinearScan : public CompilationResourceObj {
any_reg = -1,
nof_cpu_regs = pd_nof_cpu_regs_linearscan,
nof_fpu_regs = pd_nof_fpu_regs_linearscan,
- nof_xmm_regs = pd_nof_xmm_regs_linearscan,
- nof_regs = nof_cpu_regs + nof_fpu_regs + nof_xmm_regs
+ nof_regs = nof_cpu_regs + nof_fpu_regs
};
private:
@@ -976,6 +975,9 @@ class LinearScanTimers : public StackObj {
#ifdef TARGET_ARCH_x86
# include "c1_LinearScan_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c1_LinearScan_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c1_LinearScan_sparc.hpp"
#endif
diff --git a/src/share/vm/c1/c1_MacroAssembler.hpp b/src/share/vm/c1/c1_MacroAssembler.hpp
index 55d980350..28fa682fd 100644
--- a/src/share/vm/c1/c1_MacroAssembler.hpp
+++ b/src/share/vm/c1/c1_MacroAssembler.hpp
@@ -29,6 +29,9 @@
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "assembler_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
#endif
@@ -64,6 +67,9 @@ class C1_MacroAssembler: public MacroAssembler {
#ifdef TARGET_ARCH_x86
# include "c1_MacroAssembler_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c1_MacroAssembler_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c1_MacroAssembler_sparc.hpp"
#endif
diff --git a/src/share/vm/c1/c1_globals.hpp b/src/share/vm/c1/c1_globals.hpp
index 15f3cc10f..662890a99 100644
--- a/src/share/vm/c1/c1_globals.hpp
+++ b/src/share/vm/c1/c1_globals.hpp
@@ -29,6 +29,9 @@
#ifdef TARGET_ARCH_x86
# include "c1_globals_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c1_globals_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c1_globals_sparc.hpp"
#endif