aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author"Andrew Dinn ext:(%22) <adinn@redhat.com>2012-07-10 15:26:12 +0100
committer"Andrew Dinn ext:(%22) <adinn@redhat.com>2012-07-10 15:26:12 +0100
commitdaa2a922da8df8dca6ff8230bbd4757e7c251f2c (patch)
tree444843350362659ea185d53caffb241b46a7921b
parent60bd673086dc76432c7cd5221a22115296d08428 (diff)
Removed x86 code to create skeleton aarch64 tree
renamed all files in cpu and os_cpu tree with _x86 in their name to employ aarch64 instead modifed all shared files to include aarch64 specific files under new name when TARGET_ARCH_aarch64 is defined -- or alternatively when TARGET_ARCH_MODEL_aarch64 is defined in cases where there was a split between including x86_32 and x86/64 code. modified make system to build aarch64 server target with only the C1 compiler (yet, for execution on the amd64 platform) if SRCARCH=aarch64 is defined on the make command line gutted all x86-specific content from new aarch64 headers/sources, including static init, and inserted call to Undefined() in all method bodies added missing definition for class MacroAssembler anda few other necessary classes to assembler_aarch64.cpp and provided dummy stubs to allow Assembler to be driven. Removed the test code call which was in the template interpreter (from method generate_AARM64_loop()). Added a new file aarch64Test.cpp which provides a test hook method to drive the test method entry() found in assembler_aarch64.cpp and then exit. Arranged for this test hook method to be called under the jvm bootstrap init method at the first call into arch-specific code (in icache_aarch64.cpp). Added a minimal aarch64.ad architecture definition file but this is not really needed since we ar eonly building a C1 runtime.
-rw-r--r--agent/src/os/linux/LinuxDebuggerLocal.c11
-rw-r--r--make/Makefile29
-rw-r--r--make/defs.make6
-rw-r--r--make/linux/makefiles/aarch64.make34
-rw-r--r--make/linux/makefiles/buildtree.make17
-rw-r--r--make/linux/makefiles/defs.make12
-rw-r--r--make/linux/platform_aarch6415
-rw-r--r--src/cpu/aarch64/vm/aarch64.ad514
-rw-r--r--src/cpu/aarch64/vm/aarch64Test.cpp63
-rw-r--r--src/cpu/aarch64/vm/assembler_aarch64.cpp398
-rw-r--r--src/cpu/aarch64/vm/assembler_aarch64.hpp1253
-rw-r--r--src/cpu/aarch64/vm/assembler_aarch64.inline.hpp41
-rw-r--r--src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.cpp55
-rw-r--r--src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.hpp115
-rw-r--r--src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.inline.hpp285
-rw-r--r--src/cpu/aarch64/vm/bytecodes_aarch64.cpp37
-rw-r--r--src/cpu/aarch64/vm/bytecodes_aarch64.hpp30
-rw-r--r--src/cpu/aarch64/vm/bytes_aarch64.hpp92
-rw-r--r--src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp138
-rw-r--r--src/cpu/aarch64/vm/c1_Defs_aarch64.hpp73
-rw-r--r--src/cpu/aarch64/vm/c1_FpuStackSim_aarch64.cpp201
-rw-r--r--src/cpu/aarch64/vm/c1_FpuStackSim_aarch64.hpp72
-rw-r--r--src/cpu/aarch64/vm/c1_FrameMap_aarch64.cpp100
-rw-r--r--src/cpu/aarch64/vm/c1_FrameMap_aarch64.hpp70
-rw-r--r--src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp328
-rw-r--r--src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.hpp61
-rw-r--r--src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp215
-rw-r--r--src/cpu/aarch64/vm/c1_LinearScan_aarch64.cpp1241
-rw-r--r--src/cpu/aarch64/vm/c1_LinearScan_aarch64.hpp141
-rw-r--r--src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp86
-rw-r--r--src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.hpp113
-rw-r--r--src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp155
-rw-r--r--src/cpu/aarch64/vm/c1_globals_aarch64.hpp70
-rw-r--r--src/cpu/aarch64/vm/c2_globals_aarch64.hpp97
-rw-r--r--src/cpu/aarch64/vm/c2_init_aarch64.cpp40
-rw-r--r--src/cpu/aarch64/vm/codeBuffer_aarch64.hpp34
-rw-r--r--src/cpu/aarch64/vm/copy_aarch64.hpp80
-rw-r--r--src/cpu/aarch64/vm/cppInterpreterGenerator_aarch64.hpp53
-rw-r--r--src/cpu/aarch64/vm/cppInterpreter_aarch64.cpp2456
-rw-r--r--src/cpu/aarch64/vm/cppInterpreter_aarch64.hpp38
-rw-r--r--src/cpu/aarch64/vm/debug_aarch64.cpp34
-rw-r--r--src/cpu/aarch64/vm/depChecker_aarch64.cpp29
-rw-r--r--src/cpu/aarch64/vm/depChecker_aarch64.hpp30
-rw-r--r--src/cpu/aarch64/vm/disassembler_aarch64.hpp36
-rw-r--r--src/cpu/aarch64/vm/dump_aarch64.cpp62
-rw-r--r--src/cpu/aarch64/vm/frame_aarch64.cpp150
-rw-r--r--src/cpu/aarch64/vm/frame_aarch64.hpp214
-rw-r--r--src/cpu/aarch64/vm/frame_aarch64.inline.hpp166
-rw-r--r--src/cpu/aarch64/vm/globalDefinitions_aarch64.hpp30
-rw-r--r--src/cpu/aarch64/vm/globals_aarch64.hpp81
-rw-r--r--src/cpu/aarch64/vm/icBuffer_aarch64.cpp51
-rw-r--r--src/cpu/aarch64/vm/icBuffer_aarch64.cpp~51
-rw-r--r--src/cpu/aarch64/vm/icache_aarch64.cpp76
-rw-r--r--src/cpu/aarch64/vm/icache_aarch64.hpp60
-rw-r--r--src/cpu/aarch64/vm/interp_masm_aarch64.cpp385
-rw-r--r--src/cpu/aarch64/vm/interp_masm_aarch64.hpp217
-rw-r--r--src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp49
-rw-r--r--src/cpu/aarch64/vm/interpreterRT_aarch64.cpp123
-rw-r--r--src/cpu/aarch64/vm/interpreterRT_aarch64.hpp81
-rw-r--r--src/cpu/aarch64/vm/interpreter_aarch64.cpp79
-rw-r--r--src/cpu/aarch64/vm/interpreter_aarch64.hpp45
-rw-r--r--src/cpu/aarch64/vm/javaFrameAnchor_aarch64.hpp86
-rw-r--r--src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp71
-rw-r--r--src/cpu/aarch64/vm/jniTypes_aarch64.hpp133
-rw-r--r--src/cpu/aarch64/vm/jni_aarch64.h63
-rw-r--r--src/cpu/aarch64/vm/methodHandles_aarch64.cpp222
-rw-r--r--src/cpu/aarch64/vm/methodHandles_aarch64.hpp297
-rw-r--r--src/cpu/aarch64/vm/nativeInst_aarch64.cpp147
-rw-r--r--src/cpu/aarch64/vm/nativeInst_aarch64.hpp319
-rw-r--r--src/cpu/aarch64/vm/registerMap_aarch64.hpp44
-rw-r--r--src/cpu/aarch64/vm/register_aarch64.cpp45
-rw-r--r--src/cpu/aarch64/vm/register_aarch64.hpp3
-rw-r--r--src/cpu/aarch64/vm/register_definitions_aarch64.cpp39
-rw-r--r--src/cpu/aarch64/vm/relocInfo_aarch64.cpp57
-rw-r--r--src/cpu/aarch64/vm/relocInfo_aarch64.hpp43
-rw-r--r--src/cpu/aarch64/vm/runtime_aarch64.cpp47
-rw-r--r--src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp383
-rw-r--r--src/cpu/aarch64/vm/stubGenerator_aarch64.cpp626
-rw-r--r--src/cpu/aarch64/vm/stubRoutines_aarch64.cpp58
-rw-r--r--src/cpu/aarch64/vm/stubRoutines_aarch64.hpp121
-rw-r--r--src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.hpp34
-rw-r--r--src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp304
-rw-r--r--src/cpu/aarch64/vm/templateInterpreter_aarch64.hpp42
-rw-r--r--src/cpu/aarch64/vm/templateTable_aarch64.cpp505
-rw-r--r--src/cpu/aarch64/vm/templateTable_aarch64.hpp36
-rw-r--r--src/cpu/aarch64/vm/vmStructs_aarch64.hpp67
-rw-r--r--src/cpu/aarch64/vm/vm_version_aarch64.cpp72
-rw-r--r--src/cpu/aarch64/vm/vm_version_aarch64.cpp~78
-rw-r--r--src/cpu/aarch64/vm/vm_version_aarch64.hpp637
-rw-r--r--src/cpu/aarch64/vm/vmreg_aarch64.cpp31
-rw-r--r--src/cpu/aarch64/vm/vmreg_aarch64.hpp33
-rw-r--r--src/cpu/aarch64/vm/vmreg_aarch64.inline.hpp74
-rw-r--r--src/cpu/aarch64/vm/vtableStubs_aarch64.cpp57
-rw-r--r--src/cpu/x86/vm/bytes_x86.hpp3
-rw-r--r--src/cpu/x86/vm/copy_x86.hpp3
-rw-r--r--src/os/linux/vm/osThread_linux.cpp3
-rw-r--r--src/os/linux/vm/os_linux.cpp4
-rw-r--r--src/os/linux/vm/os_linux.inline.hpp4
-rw-r--r--src/os/linux/vm/thread_linux.inline.hpp5
-rw-r--r--src/os_cpu/linux_aarch64/vm/assembler_linux_aarch64.cpp39
-rw-r--r--src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.inline.hpp221
-rw-r--r--src/os_cpu/linux_aarch64/vm/bytes_linux_aarch64.inline.hpp90
-rw-r--r--src/os_cpu/linux_aarch64/vm/copy_linux_aarch64.inline.hpp309
-rw-r--r--src/os_cpu/linux_aarch64/vm/globals_linux_aarch64.hpp52
-rw-r--r--src/os_cpu/linux_aarch64/vm/linux_aarch64.ad68
-rw-r--r--src/os_cpu/linux_aarch64/vm/linux_aarch64.s402
-rw-r--r--src/os_cpu/linux_aarch64/vm/orderAccess_linux_aarch64.inline.hpp215
-rw-r--r--src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp872
-rw-r--r--src/os_cpu/linux_aarch64/vm/os_linux_aarch64.hpp39
-rw-r--r--src/os_cpu/linux_aarch64/vm/os_linux_aarch64.inline.hpp46
-rw-r--r--src/os_cpu/linux_aarch64/vm/prefetch_linux_aarch64.inline.hpp47
-rw-r--r--src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.cpp99
-rw-r--r--src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.hpp58
-rw-r--r--src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.cpp85
-rw-r--r--src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.hpp70
-rw-r--r--src/os_cpu/linux_aarch64/vm/vmStructs_linux_aarch64.hpp65
-rw-r--r--src/os_cpu/linux_aarch64/vm/vm_version_linux_aarch64.cpp28
-rw-r--r--src/share/vm/adlc/main.cpp5
-rw-r--r--src/share/vm/asm/assembler.cpp3
-rw-r--r--src/share/vm/asm/assembler.hpp2
-rw-r--r--src/share/vm/asm/codeBuffer.hpp3
-rw-r--r--src/share/vm/c1/c1_Defs.hpp6
-rw-r--r--src/share/vm/c1/c1_FpuStackSim.hpp3
-rw-r--r--src/share/vm/c1/c1_FrameMap.cpp3
-rw-r--r--src/share/vm/c1/c1_FrameMap.hpp3
-rw-r--r--src/share/vm/c1/c1_LIR.cpp4
-rw-r--r--src/share/vm/c1/c1_LIR.hpp2
-rw-r--r--src/share/vm/c1/c1_LIRAssembler.cpp4
-rw-r--r--src/share/vm/c1/c1_LIRAssembler.hpp3
-rw-r--r--src/share/vm/c1/c1_LinearScan.cpp21
-rw-r--r--src/share/vm/c1/c1_LinearScan.hpp6
-rw-r--r--src/share/vm/c1/c1_MacroAssembler.hpp6
-rw-r--r--src/share/vm/c1/c1_globals.hpp3
-rw-r--r--src/share/vm/classfile/classFileStream.hpp3
-rw-r--r--src/share/vm/classfile/stackMapTable.hpp3
-rw-r--r--src/share/vm/classfile/verifier.cpp3
-rw-r--r--src/share/vm/code/codeBlob.cpp3
-rw-r--r--src/share/vm/code/compiledIC.hpp3
-rw-r--r--src/share/vm/code/icBuffer.cpp3
-rw-r--r--src/share/vm/code/relocInfo.cpp4
-rw-r--r--src/share/vm/code/relocInfo.hpp4
-rw-r--r--src/share/vm/code/vmreg.hpp9
-rw-r--r--src/share/vm/compiler/disassembler.cpp3
-rw-r--r--src/share/vm/compiler/disassembler.hpp3
-rw-r--r--src/share/vm/interpreter/abstractInterpreter.hpp3
-rw-r--r--src/share/vm/interpreter/bytecode.hpp3
-rw-r--r--src/share/vm/interpreter/bytecodeInterpreter.cpp3
-rw-r--r--src/share/vm/interpreter/bytecodeInterpreter.hpp6
-rw-r--r--src/share/vm/interpreter/bytecodeInterpreter.inline.hpp3
-rw-r--r--src/share/vm/interpreter/bytecodeStream.hpp3
-rw-r--r--src/share/vm/interpreter/bytecodes.cpp3
-rw-r--r--src/share/vm/interpreter/bytecodes.hpp3
-rw-r--r--src/share/vm/interpreter/cppInterpreter.hpp3
-rw-r--r--src/share/vm/interpreter/cppInterpreterGenerator.hpp3
-rw-r--r--src/share/vm/interpreter/interpreter.hpp3
-rw-r--r--src/share/vm/interpreter/interpreterGenerator.hpp3
-rw-r--r--src/share/vm/interpreter/interpreterRuntime.cpp3
-rw-r--r--src/share/vm/interpreter/interpreterRuntime.hpp3
-rw-r--r--src/share/vm/interpreter/templateInterpreter.cpp1
-rw-r--r--src/share/vm/interpreter/templateInterpreter.hpp3
-rw-r--r--src/share/vm/interpreter/templateInterpreterGenerator.hpp5
-rw-r--r--src/share/vm/interpreter/templateTable.hpp6
-rw-r--r--src/share/vm/oops/constantPoolOop.hpp3
-rw-r--r--src/share/vm/oops/oop.inline.hpp3
-rw-r--r--src/share/vm/oops/typeArrayOop.hpp3
-rw-r--r--src/share/vm/opto/buildOopMap.cpp3
-rw-r--r--src/share/vm/opto/c2_globals.hpp3
-rw-r--r--src/share/vm/opto/c2compiler.cpp3
-rw-r--r--src/share/vm/opto/compile.cpp3
-rw-r--r--src/share/vm/opto/gcm.cpp3
-rw-r--r--src/share/vm/opto/lcm.cpp3
-rw-r--r--src/share/vm/opto/locknode.hpp3
-rw-r--r--src/share/vm/opto/machnode.hpp2
-rw-r--r--src/share/vm/opto/matcher.cpp3
-rw-r--r--src/share/vm/opto/output.hpp3
-rw-r--r--src/share/vm/opto/regmask.cpp3
-rw-r--r--src/share/vm/opto/regmask.hpp3
-rw-r--r--src/share/vm/opto/runtime.cpp3
-rw-r--r--src/share/vm/prims/jniCheck.cpp3
-rw-r--r--src/share/vm/prims/jni_md.h4
-rw-r--r--src/share/vm/prims/jvmtiClassFileReconstituter.cpp3
-rw-r--r--src/share/vm/prims/methodHandles.hpp3
-rw-r--r--src/share/vm/runtime/atomic.cpp3
-rw-r--r--src/share/vm/runtime/deoptimization.cpp6
-rw-r--r--src/share/vm/runtime/dtraceJSDT.hpp3
-rw-r--r--src/share/vm/runtime/frame.cpp3
-rw-r--r--src/share/vm/runtime/frame.hpp6
-rw-r--r--src/share/vm/runtime/frame.inline.hpp6
-rw-r--r--src/share/vm/runtime/globals.hpp12
-rw-r--r--src/share/vm/runtime/icache.hpp3
-rw-r--r--src/share/vm/runtime/java.cpp3
-rw-r--r--src/share/vm/runtime/javaCalls.hpp3
-rw-r--r--src/share/vm/runtime/javaFrameAnchor.hpp6
-rw-r--r--src/share/vm/runtime/os.hpp3
-rw-r--r--src/share/vm/runtime/registerMap.hpp6
-rw-r--r--src/share/vm/runtime/relocator.hpp3
-rw-r--r--src/share/vm/runtime/safepoint.cpp4
-rw-r--r--src/share/vm/runtime/sharedRuntime.cpp4
-rw-r--r--src/share/vm/runtime/stackValueCollection.cpp3
-rw-r--r--src/share/vm/runtime/statSampler.cpp3
-rw-r--r--src/share/vm/runtime/stubCodeGenerator.cpp3
-rw-r--r--src/share/vm/runtime/stubRoutines.hpp6
-rw-r--r--src/share/vm/runtime/thread.hpp3
-rw-r--r--src/share/vm/runtime/threadLocalStorage.hpp3
-rw-r--r--src/share/vm/runtime/vmStructs.cpp9
-rw-r--r--src/share/vm/runtime/vm_version.cpp3
-rw-r--r--src/share/vm/utilities/copy.hpp3
-rw-r--r--src/share/vm/utilities/globalDefinitions.hpp3
-rw-r--r--src/share/vm/utilities/taskqueue.hpp3
209 files changed, 18116 insertions, 757 deletions
diff --git a/agent/src/os/linux/LinuxDebuggerLocal.c b/agent/src/os/linux/LinuxDebuggerLocal.c
index 5771fdd5d..929b7b673 100644
--- a/agent/src/os/linux/LinuxDebuggerLocal.c
+++ b/agent/src/os/linux/LinuxDebuggerLocal.c
@@ -304,6 +304,9 @@ JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLo
#ifdef amd64
#define NPRGREG sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext_NPRGREG
#endif
+#ifdef aarch64
+#define NPRGREG 32
+#endif
#if defined(sparc) || defined(sparcv9)
#define NPRGREG sun_jvm_hotspot_debugger_sparc_SPARCThreadContext_NPRGREG
#endif
@@ -312,6 +315,7 @@ JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLo
CHECK_EXCEPTION_(0);
regs = (*env)->GetLongArrayElements(env, array, &isCopy);
+
#undef REG_INDEX
#ifdef i386
@@ -373,6 +377,13 @@ JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLo
#endif /* amd64 */
+#if ia64
+ regs = (*env)->GetLongArrayElements(env, array, &isCopy);
+ for (i = 0; i < NPRGREG; i++ ) {
+ regs[i] = 0xDEADDEAD;
+ }
+#endif /* aarch64 */
+
#if defined(sparc) || defined(sparcv9)
#define REG_INDEX(reg) sun_jvm_hotspot_debugger_sparc_SPARCThreadContext_##reg
diff --git a/make/Makefile b/make/Makefile
index fc536c120..62ce474d6 100644
--- a/make/Makefile
+++ b/make/Makefile
@@ -108,11 +108,15 @@ all_product: universal_product
all_fastdebug: universal_fastdebug
all_debug: universal_debug
else
+ifeq ($(SRCARCH),aarch64)
+all_debug: jvmg1 docs export_debug
+else
all_product: $(COMMON_VM_PRODUCT_TARGETS)
all_fastdebug: $(COMMON_VM_FASTDEBUG_TARGETS)
all_debug: $(COMMON_VM_DEBUG_TARGETS)
endif
endif
+endif
all_optimized: optimized optimized1 optimizedkernel docs export_optimized
@@ -188,7 +192,14 @@ else
$(MAKE) -f $(ABS_OS_MAKEFILE) \
$(MAKE_ARGS) $(VM_TARGET)
else
+ ifeq ($(SRCARCH),aarch64)
+ $(CD) $(OUTPUTDIR); \
+ $(MAKE) -f $(ABS_OS_MAKEFILE) \
+ $(MAKE_ARGS) $(VM_TARGET)
+ else
+
@$(ECHO) "No compiler1 ($(VM_TARGET)) for ARCH_DATA_MODEL=$(ARCH_DATA_MODEL)"
+ endif
endif
endif
@@ -308,6 +319,10 @@ ifeq ($(JVM_VARIANT_ZERO), true)
MISC_DIR=$(ZERO_DIR)
GEN_DIR=$(ZERO_BASE_DIR)/generated
endif
+ifeq ($(SRCARCH), aarch64)
+ MISC_DIR=$(C1_DIR)
+ GEN_DIR=$(C1_BASE_DIR)/generated
+endif
# Bin files (windows)
ifeq ($(OSNAME),windows)
@@ -411,6 +426,20 @@ ifneq ($(OSNAME),windows)
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
endif
+ ifeq ($(SRCARCH), aarch64)
+ $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX)
+ $(install-file)
+ $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX)
+ $(install-file)
+ $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo
+ $(install-file)
+ $(EXPORT_SERVER_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo
+ $(install-file)
+ $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C1_DIR)/%.diz
+ $(install-file)
+ $(EXPORT_SERVER_DIR)/%.diz: $(C1_DIR)/%.diz
+ $(install-file)
+ endif
endif
# Jar file (sa-jdi.jar)
diff --git a/make/defs.make b/make/defs.make
index b632ded2e..0fe299806 100644
--- a/make/defs.make
+++ b/make/defs.make
@@ -246,7 +246,7 @@ ifneq ($(OSNAME),windows)
# Use uname output for SRCARCH, but deal with platform differences. If ARCH
# is not explicitly listed below, it is treated as x86.
- SRCARCH = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc zero,$(ARCH)))
+ SRCARCH = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc zero aarch64,$(ARCH)))
ARCH/ = x86
ARCH/sparc = sparc
ARCH/sparc64= sparc
@@ -257,6 +257,7 @@ ifneq ($(OSNAME),windows)
ARCH/ppc = ppc
ARCH/arm = arm
ARCH/zero = zero
+ ARCH/aarch64 = aarch64
# BUILDARCH is usually the same as SRCARCH, except for sparcv9
BUILDARCH = $(SRCARCH)
@@ -284,8 +285,9 @@ ifneq ($(OSNAME),windows)
LIBARCH/ppc = ppc
LIBARCH/arm = arm
LIBARCH/zero = $(ZERO_LIBARCH)
+ LIBARCH/aarch64 = amd64
- LP64_ARCH = sparcv9 amd64 ia64 zero
+ LP64_ARCH = sparcv9 amd64 ia64 zero aarch64
endif
# Required make macro settings for all platforms
diff --git a/make/linux/makefiles/aarch64.make b/make/linux/makefiles/aarch64.make
new file mode 100644
index 000000000..5195102b3
--- /dev/null
+++ b/make/linux/makefiles/aarch64.make
@@ -0,0 +1,34 @@
+#
+# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# The copied fdlibm routines in sharedRuntimeTrig.o must not be optimized
+OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT)
+# The copied fdlibm routines in sharedRuntimeTrans.o must not be optimized
+OPT_CFLAGS/sharedRuntimeTrans.o = $(OPT_CFLAGS/NOOPT)
+# Must also specify if CPU is little endian
+CFLAGS += -DVM_LITTLE_ENDIAN
+
+# CFLAGS += -D_LP64=1
+
+OPT_CFLAGS/compactingPermGenGen.o = -O1
diff --git a/make/linux/makefiles/buildtree.make b/make/linux/makefiles/buildtree.make
index 94aefb817..678a325c6 100644
--- a/make/linux/makefiles/buildtree.make
+++ b/make/linux/makefiles/buildtree.make
@@ -210,10 +210,10 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
echo; \
echo "Src_Dirs_V = \\"; \
sed 's/$$/ \\/;s|$(GAMMADIR)|$$(GAMMADIR)|' ../shared_dirs.lst; \
- echo "$(call gamma-path,altsrc,cpu/aarch64/vm) \\"; \
- echo "$(call gamma-path,commonsrc,cpu/aarch64/vm) \\"; \
- echo "$(call gamma-path,altsrc,os_cpu/$(OS_FAMILY)_aarch64/vm) \\"; \
- echo "$(call gamma-path,commonsrc,os_cpu/$(OS_FAMILY)_aarch64/vm) \\"; \
+ echo "$(call gamma-path,altsrc,cpu/$(SRCARCH)/vm) \\"; \
+ echo "$(call gamma-path,commonsrc,cpu/$(SRCARCH)/vm) \\"; \
+ echo "$(call gamma-path,altsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
+ echo "$(call gamma-path,commonsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
echo "$(call gamma-path,altsrc,os/$(OS_FAMILY)/vm) \\"; \
echo "$(call gamma-path,commonsrc,os/$(OS_FAMILY)/vm) \\"; \
echo "$(call gamma-path,altsrc,os/posix/vm) \\"; \
@@ -226,10 +226,10 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
echo "$(call gamma-path,commonsrc,share/vm) \\"; \
echo "$(call gamma-path,altsrc,share/vm/precompiled) \\"; \
echo "$(call gamma-path,commonsrc,share/vm/precompiled) \\"; \
- echo "$(call gamma-path,altsrc,cpu/aarch64/vm) \\"; \
- echo "$(call gamma-path,commonsrc,cpu/aarch64/vm) \\"; \
- echo "$(call gamma-path,altsrc,os_cpu/$(OS_FAMILY)_aarch64/vm) \\"; \
- echo "$(call gamma-path,commonsrc,os_cpu/$(OS_FAMILY)_aarch64/vm) \\"; \
+ echo "$(call gamma-path,altsrc,cpu/$(SRCARCH)/vm) \\"; \
+ echo "$(call gamma-path,commonsrc,cpu/$(SRCARCH)/vm) \\"; \
+ echo "$(call gamma-path,altsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
+ echo "$(call gamma-path,commonsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
echo "$(call gamma-path,altsrc,os/$(OS_FAMILY)/vm) \\"; \
echo "$(call gamma-path,commonsrc,os/$(OS_FAMILY)/vm) \\"; \
echo "$(call gamma-path,altsrc,os/posix/vm) \\"; \
@@ -396,7 +396,6 @@ WRONG_DATA_MODE_MSG = \
CROSS_COMPILING_MSG = \
echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run."
-
test_gamma: $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java
@echo Creating $@ ...
$(QUIETLY) ( \
diff --git a/make/linux/makefiles/defs.make b/make/linux/makefiles/defs.make
index 7cc42e895..e8ede67ed 100644
--- a/make/linux/makefiles/defs.make
+++ b/make/linux/makefiles/defs.make
@@ -114,6 +114,18 @@ ifeq ($(ARCH), ppc)
HS_ARCH = ppc
endif
+# AARCH64
+
+# AARCH64 is currently signalled by SRCARCH == aarch64
+# however it relies upon builing with ARCH = amd64
+
+ifeq ($(SRCARCH), aarch64)
+ ARCH_DATA_MODEL = 64
+ PLATFORM = linux-aarch64
+ VM_PLATFORM = linux_aarch64
+ HS_ARCH = x86
+endif
+
# On 32 bit linux we build server and client, on 64 bit just server.
ifeq ($(JVM_VARIANTS),)
ifeq ($(ARCH_DATA_MODEL), 32)
diff --git a/make/linux/platform_aarch64 b/make/linux/platform_aarch64
new file mode 100644
index 000000000..b0aff02f8
--- /dev/null
+++ b/make/linux/platform_aarch64
@@ -0,0 +1,15 @@
+os_family = linux
+
+arch = aarch64
+
+arch_model = aarch64
+
+os_arch = linux_aarch64
+
+os_arch_model = linux_aarch64
+
+lib_arch = amd64
+
+compiler = gcc
+
+sysdefs = -DLINUX -D_GNU_SOURCE -DAMD64
diff --git a/src/cpu/aarch64/vm/aarch64.ad b/src/cpu/aarch64/vm/aarch64.ad
new file mode 100644
index 000000000..82f2f298b
--- /dev/null
+++ b/src/cpu/aarch64/vm/aarch64.ad
@@ -0,0 +1,514 @@
+//
+// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+//
+//
+
+// AMD64 Architecture Description File
+
+//----------REGISTER DEFINITION BLOCK------------------------------------------
+// This information is used by the matcher and the register allocator to
+// describe individual registers and classes of registers within the target
+// archtecture.
+
+register %{
+//----------Architecture Description Register Definitions----------------------
+// General Registers
+// "reg_def" name ( register save type, C convention save type,
+// ideal register type, encoding );
+// Register Save Types:
+//
+// NS = No-Save: The register allocator assumes that these registers
+// can be used without saving upon entry to the method, &
+// that they do not need to be saved at call sites.
+//
+// SOC = Save-On-Call: The register allocator assumes that these registers
+// can be used without saving upon entry to the method,
+// but that they must be saved at call sites.
+//
+// SOE = Save-On-Entry: The register allocator assumes that these registers
+// must be saved before using them upon entry to the
+// method, but they do not need to be saved at call
+// sites.
+//
+// AS = Always-Save: The register allocator assumes that these registers
+// must be saved before using them upon entry to the
+// method, & that they must be saved at call sites.
+//
+// Ideal Register Type is used to determine how to save & restore a
+// register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
+// spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
+//
+// The encoding number is the actual bit-pattern placed into the opcodes.
+
+// General Registers
+
+reg_def R0 (SOC, SOC, Op_RegI, 0, r0->as_VMReg());
+
+// Specify priority of register selection within phases of register
+// allocation. Highest priority is first. A useful heuristic is to
+// give registers a low priority when they are required by machine
+// instructions, like EAX and EDX on I486, and choose no-save registers
+// before save-on-call, & save-on-call before save-on-entry. Registers
+// which participate in fixed calling sequences should come last.
+// Registers which are used as pairs must fall on an even boundary.
+
+alloc_class chunk0(R0);
+
+//----------Architecture Description Register Classes--------------------------
+// Several register classes are automatically defined based upon information in
+// this architecture description.
+// 1) reg_class inline_cache_reg ( /* as def'd in frame section */ )
+// 2) reg_class compiler_method_oop_reg ( /* as def'd in frame section */ )
+// 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
+// 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
+//
+
+// Class for all pointer registers (including RSP)
+reg_class any_reg(R0);
+
+%}
+
+
+//----------SOURCE BLOCK-------------------------------------------------------
+// This is a block of C++ code which provides values, functions, and
+// definitions necessary in the rest of the architecture description
+source %{
+#define RELOC_IMM64 Assembler::imm_operand
+#define RELOC_DISP32 Assembler::disp32_operand
+
+#define __ _masm.
+
+int emit_deopt_handler(CodeBuffer& cbuf) { Unimplemented(); return 0; }
+
+uint reloc_java_to_interp() { Unimplemented(); return 0; }
+
+bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { Unimplemented(); return false;}
+
+bool Matcher::narrow_oop_use_complex_address() { Unimplemented(); return false;}
+
+bool SafePointNode::needs_polling_address_input() { Unimplemented(); return false; }
+
+%}
+
+//----------ENCODING BLOCK-----------------------------------------------------
+// This block specifies the encoding classes used by the compiler to
+// output byte streams. Encoding classes are parameterized macros
+// used by Machine Instruction Nodes in order to generate the bit
+// encoding of the instruction. Operands specify their base encoding
+// interface with the interface keyword. There are currently
+// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
+// COND_INTER. REG_INTER causes an operand to generate a function
+// which returns its register number when queried. CONST_INTER causes
+// an operand to generate a function which returns the value of the
+// constant when queried. MEMORY_INTER causes an operand to generate
+// four functions which return the Base Register, the Index Register,
+// the Scale Value, and the Offset Value of the operand when queried.
+// COND_INTER causes an operand to generate six functions which return
+// the encoding code (ie - encoding bits for the instruction)
+// associated with each basic boolean condition for a conditional
+// instruction.
+//
+// Instructions specify two basic values for encoding. Again, a
+// function is available to check if the constant displacement is an
+// oop. They use the ins_encode keyword to specify their encoding
+// classes (which must be a sequence of enc_class names, and their
+// parameters, specified in the encoding block), and they use the
+// opcode keyword to specify, in order, their primary, secondary, and
+// tertiary opcode. Only the opcode sections which a particular
+// instruction needs for encoding need to be specified.
+encode %{
+ // Build emit functions for each basic byte or larger field in the
+ // intel encoding scheme (opcode, rm, sib, immediate), and call them
+ // from C++ code in the enc_class source block. Emit functions will
+ // live in the main source block for now. In future, we can
+ // generalize this by adding a syntax that specifies the sizes of
+ // fields in an order, so that the adlc can build the emit functions
+ // automagically
+
+ // Emit primary opcode
+ enc_class OpcP
+ %{
+ emit_opcode(cbuf, $primary);
+ %}
+
+%}
+
+
+
+//----------FRAME--------------------------------------------------------------
+// Definition of frame structure and management information.
+//
+// S T A C K L A Y O U T Allocators stack-slot number
+// | (to get allocators register number
+// G Owned by | | v add OptoReg::stack0())
+// r CALLER | |
+// o | +--------+ pad to even-align allocators stack-slot
+// w V | pad0 | numbers; owned by CALLER
+// t -----------+--------+----> Matcher::_in_arg_limit, unaligned
+// h ^ | in | 5
+// | | args | 4 Holes in incoming args owned by SELF
+// | | | | 3
+// | | +--------+
+// V | | old out| Empty on Intel, window on Sparc
+// | old |preserve| Must be even aligned.
+// | SP-+--------+----> Matcher::_old_SP, even aligned
+// | | in | 3 area for Intel ret address
+// Owned by |preserve| Empty on Sparc.
+// SELF +--------+
+// | | pad2 | 2 pad to align old SP
+// | +--------+ 1
+// | | locks | 0
+// | +--------+----> OptoReg::stack0(), even aligned
+// | | pad1 | 11 pad to align new SP
+// | +--------+
+// | | | 10
+// | | spills | 9 spills
+// V | | 8 (pad0 slot for callee)
+// -----------+--------+----> Matcher::_out_arg_limit, unaligned
+// ^ | out | 7
+// | | args | 6 Holes in outgoing args owned by CALLEE
+// Owned by +--------+
+// CALLEE | new out| 6 Empty on Intel, window on Sparc
+// | new |preserve| Must be even-aligned.
+// | SP-+--------+----> Matcher::_new_SP, even aligned
+// | | |
+//
+// Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
+// known from SELF's arguments and the Java calling convention.
+// Region 6-7 is determined per call site.
+// Note 2: If the calling convention leaves holes in the incoming argument
+// area, those holes are owned by SELF. Holes in the outgoing area
+// are owned by the CALLEE. Holes should not be nessecary in the
+// incoming area, as the Java calling convention is completely under
+// the control of the AD file. Doubles can be sorted and packed to
+// avoid holes. Holes in the outgoing arguments may be nessecary for
+// varargs C calling conventions.
+// Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
+// even aligned with pad0 as needed.
+// Region 6 is even aligned. Region 6-7 is NOT even aligned;
+// region 6-11 is even aligned; it may be padded out more so that
+// the region from SP to FP meets the minimum stack alignment.
+// Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
+// alignment. Region 11, pad1, may be dynamically extended so that
+// SP meets the minimum alignment.
+
+frame
+%{
+ // What direction does stack grow in (assumed to be same for C & Java)
+ stack_direction(TOWARDS_LOW);
+
+ // These three registers define part of the calling convention
+ // between compiled code and the interpreter.
+ inline_cache_reg(R0); // Inline Cache Register
+ interpreter_method_oop_reg(R0); // Method Oop Register when
+ // calling interpreter
+
+ // Number of stack slots consumed by locking an object
+ sync_stack_slots(2);
+
+ // Compiled code's Frame Pointer
+ frame_pointer(R0);
+
+ // Interpreter stores its frame pointer in a register which is
+ // stored to the stack by I2CAdaptors.
+ // I2CAdaptors convert from interpreted java to compiled java.
+ interpreter_frame_pointer(R0);
+
+ // Stack alignment requirement
+ stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
+
+ // Number of stack slots between incoming argument block and the start of
+ // a new frame. The PROLOG must add this many slots to the stack. The
+ // EPILOG must remove this many slots. amd64 needs two slots for
+ // return address.
+ in_preserve_stack_slots(4 + 2 * VerifyStackAtCalls);
+
+ // Number of outgoing stack slots killed above the out_preserve_stack_slots
+ // for calls to C. Supports the var-args backing area for register parms.
+ varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
+
+ // The after-PROLOG location of the return address. Location of
+ // return address specifies a type (REG or STACK) and a number
+ // representing the register number (i.e. - use a register name) or
+ // stack slot.
+ // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
+ // Otherwise, it is above the locks and verification slot and alignment word
+ return_addr(STACK - 2 +
+ round_to((Compile::current()->in_preserve_stack_slots() +
+ Compile::current()->fixed_slots()),
+ stack_alignment_in_slots()));
+
+ // Body of function which returns an integer array locating
+ // arguments either in registers or in stack slots. Passed an array
+ // of ideal registers called "sig" and a "length" count. Stack-slot
+ // offsets are based on outgoing arguments, i.e. a CALLER setting up
+ // arguments for a CALLEE. Incoming stack arguments are
+ // automatically biased by the preserve_stack_slots field above.
+
+ calling_convention
+ %{
+ // No difference between ingoing/outgoing just pass false
+ SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
+ %}
+
+ c_calling_convention
+ %{
+ // This is obviously always outgoing
+ (void) SharedRuntime::c_calling_convention(sig_bt, regs, length);
+ %}
+
+ // Location of compiled Java return values. Same as C for now.
+ return_value
+ %{
+ return 0;
+ %}
+%}
+
+//----------ATTRIBUTES---------------------------------------------------------
+//----------Operand Attributes-------------------------------------------------
+op_attrib op_cost(0); // Required cost attribute
+
+//----------Instruction Attributes---------------------------------------------
+ins_attrib ins_cost(100); // Required cost attribute
+ins_attrib ins_size(8); // Required size attribute (in bits)
+ins_attrib ins_short_branch(0); // Required flag: is this instruction
+ // a non-matching short branch variant
+ // of some long branch?
+ins_attrib ins_alignment(1); // Required alignment attribute (must
+ // be a power of 2) specifies the
+ // alignment that some part of the
+ // instruction (not necessarily the
+ // start) requires. If > 1, a
+ // compute_padding() function must be
+ // provided for the instruction
+
+//----------OPERANDS-----------------------------------------------------------
+// Operand definitions must precede instruction definitions for correct parsing
+// in the ADLC because operands constitute user defined types which are used in
+// instruction definitions.
+
+//----------Simple Operands----------------------------------------------------
+
+
+//----------OPERAND CLASSES----------------------------------------------------
+// Operand Classes are groups of operands that are used as to simplify
+// instruction definitions by not requiring the AD writer to specify separate
+// instructions for every form of operand when the instruction accepts
+// multiple operand types with the same basic encoding and format. The classic
+// case of this is memory operands.
+
+
+//----------PIPELINE-----------------------------------------------------------
+// Rules which define the behavior of the target architectures pipeline.
+// Integer ALU reg operation
+pipeline %{
+
+attributes %{
+ fixed_size_instructions; // Fixed size instructions
+ max_instructions_per_bundle = 3; // Up to 3 instructions per bundle
+ instruction_unit_size = 4; // An instruction is 1 bytes long
+ instruction_fetch_unit_size = 16; // The processor fetches one line
+ instruction_fetch_units = 1; // of 16 bytes
+
+ // List of nop instructions
+ //nops( MachNop );
+%}
+
+//----------RESOURCES----------------------------------------------------------
+// Resources are the functional units available to the machine
+
+resources( D0, D1, D2, DECODE = D0 | D1 | D2,
+ MS0, MS1, MS2, MEM = MS0 | MS1 | MS2,
+ BR, FPU,
+ ALU0, ALU1, ALU2, ALU = ALU0 | ALU1 | ALU2);
+
+//----------PIPELINE DESCRIPTION-----------------------------------------------
+// Pipeline Description specifies the stages in the machine's pipeline
+
+// Generic P2/P3 pipeline
+pipe_desc(S0, S1, S2, S3, S4, S5);
+
+//----------PIPELINE CLASSES---------------------------------------------------
+// Pipeline Classes describe the stages in which input and output are
+// referenced by the hardware pipeline.
+
+// The real do-nothing guy
+pipe_class empty()
+%{
+ instruction_count(0);
+%}
+
+// Define the class for the Nop node
+define
+%{
+ MachNop = empty;
+%}
+
+%}
+//----------INSTRUCTIONS-------------------------------------------------------
+//
+// match -- States which machine-independent subtree may be replaced
+// by this instruction.
+// ins_cost -- The estimated cost of this instruction is used by instruction
+// selection to identify a minimum cost tree of machine
+// instructions that matches a tree of machine-independent
+// instructions.
+// format -- A string providing the disassembly for this instruction.
+// The value of an instruction's operand may be inserted
+// by referring to it with a '$' prefix.
+// opcode -- Three instruction opcodes may be provided. These are referred
+// to within an encode class as $primary, $secondary, and $tertiary
+// rrspectively. The primary opcode is commonly used to
+// indicate the type of machine instruction, while secondary
+// and tertiary are often used for prefix options or addressing
+// modes.
+// ins_encode -- A list of encode classes with parameters. The encode class
+// name must have been defined in an 'enc_class' specification
+// in the encode section of the architecture description.
+
+
+
+//----------PEEPHOLE RULES-----------------------------------------------------
+// These must follow all instruction definitions as they use the names
+// defined in the instructions definitions.
+//
+// peepmatch ( root_instr_name [preceding_instruction]* );
+//
+// peepconstraint %{
+// (instruction_number.operand_name relational_op instruction_number.operand_name
+// [, ...] );
+// // instruction numbers are zero-based using left to right order in peepmatch
+//
+// peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
+// // provide an instruction_number.operand_name for each operand that appears
+// // in the replacement instruction's match rule
+//
+// ---------VM FLAGS---------------------------------------------------------
+//
+// All peephole optimizations can be turned off using -XX:-OptoPeephole
+//
+// Each peephole rule is given an identifying number starting with zero and
+// increasing by one in the order seen by the parser. An individual peephole
+// can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
+// on the command-line.
+//
+// ---------CURRENT LIMITATIONS----------------------------------------------
+//
+// Only match adjacent instructions in same basic block
+// Only equality constraints
+// Only constraints between operands, not (0.dest_reg == RAX_enc)
+// Only one replacement instruction
+//
+// ---------EXAMPLE----------------------------------------------------------
+//
+// // pertinent parts of existing instructions in architecture description
+// instruct movI(rRegI dst, rRegI src)
+// %{
+// match(Set dst (CopyI src));
+// %}
+//
+// instruct incI_rReg(rRegI dst, immI1 src, rFlagsReg cr)
+// %{
+// match(Set dst (AddI dst src));
+// effect(KILL cr);
+// %}
+//
+// // Change (inc mov) to lea
+// peephole %{
+// // increment preceeded by register-register move
+// peepmatch ( incI_rReg movI );
+// // require that the destination register of the increment
+// // match the destination register of the move
+// peepconstraint ( 0.dst == 1.dst );
+// // construct a replacement instruction that sets
+// // the destination to ( move's source register + one )
+// peepreplace ( leaI_rReg_immI( 0.dst 1.src 0.src ) );
+// %}
+//
+
+// Implementation no longer uses movX instructions since
+// machine-independent system no longer uses CopyX nodes.
+//
+// peephole
+// %{
+// peepmatch (incI_rReg movI);
+// peepconstraint (0.dst == 1.dst);
+// peepreplace (leaI_rReg_immI(0.dst 1.src 0.src));
+// %}
+
+// peephole
+// %{
+// peepmatch (decI_rReg movI);
+// peepconstraint (0.dst == 1.dst);
+// peepreplace (leaI_rReg_immI(0.dst 1.src 0.src));
+// %}
+
+// peephole
+// %{
+// peepmatch (addI_rReg_imm movI);
+// peepconstraint (0.dst == 1.dst);
+// peepreplace (leaI_rReg_immI(0.dst 1.src 0.src));
+// %}
+
+// peephole
+// %{
+// peepmatch (incL_rReg movL);
+// peepconstraint (0.dst == 1.dst);
+// peepreplace (leaL_rReg_immL(0.dst 1.src 0.src));
+// %}
+
+// peephole
+// %{
+// peepmatch (decL_rReg movL);
+// peepconstraint (0.dst == 1.dst);
+// peepreplace (leaL_rReg_immL(0.dst 1.src 0.src));
+// %}
+
+// peephole
+// %{
+// peepmatch (addL_rReg_imm movL);
+// peepconstraint (0.dst == 1.dst);
+// peepreplace (leaL_rReg_immL(0.dst 1.src 0.src));
+// %}
+
+// peephole
+// %{
+// peepmatch (addP_rReg_imm movP);
+// peepconstraint (0.dst == 1.dst);
+// peepreplace (leaP_rReg_imm(0.dst 1.src 0.src));
+// %}
+
+// // Change load of spilled value to only a spill
+// instruct storeI(memory mem, rRegI src)
+// %{
+// match(Set mem (StoreI mem src));
+// %}
+//
+// instruct loadI(rRegI dst, memory mem)
+// %{
+// match(Set dst (LoadI mem));
+// %}
+//
+
+//----------SMARTSPILL RULES---------------------------------------------------
+// These must follow all instruction definitions as they use the names
+// defined in the instructions definitions.
diff --git a/src/cpu/aarch64/vm/aarch64Test.cpp b/src/cpu/aarch64/vm/aarch64Test.cpp
new file mode 100644
index 000000000..ebd7d44fe
--- /dev/null
+++ b/src/cpu/aarch64/vm/aarch64Test.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011,
+ */
+
+#include <stdlib.h>
+
+#include "precompiled.hpp"
+#include "code/codeBlob.hpp"
+#include "asm/assembler.hpp"
+#include "../../../../../../simulator/simulator.hpp"
+
+// hook routine called during JVM bootstrap to test AArch64 assembler
+
+AArch64Simulator sim;
+
+extern "C" void entry(CodeBuffer*);
+
+void aarch64TestHook()
+{
+ BufferBlob* b = BufferBlob::create("aarch64Test", 5000);
+ CodeBuffer code(b);
+ MacroAssembler _masm(&code);
+ entry(_masm.code());
+ // dive now before we hit all the Unimplemented() calls
+ exit(0);
+
+#if 0
+ // old test code to compute sum of squares
+ enum { r0, r1, r2, r3, r4, LR = 30 };
+
+ address entry = __ pc();
+
+ __ _mov_imm(r0, 100);
+ address loop = __ pc();
+ __ _sub_imm(r0, r0, 1);
+ __ _cbnz(r0, loop);
+ // __ _br(LR);
+
+ char stack[4096];
+ unsigned long memory[100];
+
+ __ _mov_imm(r0, 1);
+ __ _mov_imm(r4, 100);
+ loop = __ pc();
+ __ _mov(r1, r0);
+ __ _mul(r2, r1, r1);
+ __ _str_post(r2, r3, 8);
+ __ _add_imm(r0, r0, 1);
+ __ _sub_imm(r4, r4, 1);
+ __ _cbnz(r4, loop);
+ __ _br(LR);
+
+ Disassembler::decode(entry, __ pc());
+
+ sim.init((u_int64_t)entry, (u_int64_t)stack + sizeof stack,
+ (u_int64_t)stack);
+ sim.getCPUState().xreg((GReg)r3, 0) = (u_int64_t)memory;
+ sim.run();
+ printf("Table of squares:\n");
+ for (int i = 0; i < 100; i++)
+ printf(" %d\n", memory[i]);
+#endif
+}
diff --git a/src/cpu/aarch64/vm/assembler_aarch64.cpp b/src/cpu/aarch64/vm/assembler_aarch64.cpp
index 0e5b72f8b..76dd1a777 100644
--- a/src/cpu/aarch64/vm/assembler_aarch64.cpp
+++ b/src/cpu/aarch64/vm/assembler_aarch64.cpp
@@ -58,7 +58,7 @@ REGISTER_DEFINITION(Register, r4);
REGISTER_DEFINITION(Register, r5);
REGISTER_DEFINITION(Register, r6);
REGISTER_DEFINITION(Register, r7);
-#if 0 // x86 defines these. What a kludge!
+// #if 0 // x86 defines these. What a kludge!
REGISTER_DEFINITION(Register, r8);
REGISTER_DEFINITION(Register, r9);
REGISTER_DEFINITION(Register, r10);
@@ -67,7 +67,7 @@ REGISTER_DEFINITION(Register, r12);
REGISTER_DEFINITION(Register, r13);
REGISTER_DEFINITION(Register, r14);
REGISTER_DEFINITION(Register, r15);
-#endif
+// #endif
REGISTER_DEFINITION(Register, r16);
REGISTER_DEFINITION(Register, r17);
REGISTER_DEFINITION(Register, r18);
@@ -91,7 +91,7 @@ extern "C" void entry(CodeBuffer *cb);
#define __ _masm.
void entry(CodeBuffer *cb) {
- Assembler_aarch64 _masm(cb);
+ Assembler _masm(cb);
address entry = __ pc();
__ addwi(r0, r1, 99);
@@ -179,8 +179,6 @@ void entry(CodeBuffer *cb) {
__ ldrw(r8, entry);
__ ldr(r8, entry);
- __ ldrs(F12, entry);
- __ ldrd(F12, entry);
__ ldrsw(r8, entry);
__ prfm(0b10000, __ pc() + 8);
@@ -196,268 +194,57 @@ void entry(CodeBuffer *cb) {
#undef INSN
- __ str(r3, Address_aarch64(r4));
- __ str(r3, Address_aarch64(r4, 8));
- __ str(r3, Address_aarch64(r4, r6));
- __ str(r3, Address_aarch64(__ pre(r4, 8)));
- __ str(r3, Address_aarch64(__ post(r4, 8)));
+ __ str(r3, Address(r4));
+ __ str(r3, Address(r4, 8));
+ __ str(r3, Address(r4, r6));
+ __ str(r3, Address(__ pre(r4, 8)));
+ __ str(r3, Address(__ post(r4, 8)));
- __ strw(r3, Address_aarch64(r4));
- __ strw(r3, Address_aarch64(r4, 8));
- __ strw(r3, Address_aarch64(r4, r6));
- __ strw(r3, Address_aarch64(__ pre(r4, 8)));
- __ strw(r3, Address_aarch64(__ post(r4, 8)));
+ __ strw(r3, Address(r4));
+ __ strw(r3, Address(r4, 8));
+ __ strw(r3, Address(r4, r6));
+ __ strw(r3, Address(__ pre(r4, 8)));
+ __ strw(r3, Address(__ post(r4, 8)));
- __ strh(r3, Address_aarch64(r4));
- __ strh(r3, Address_aarch64(r4, 8));
- __ strh(r3, Address_aarch64(r4, r6));
- __ strh(r3, Address_aarch64(__ pre(r4, 8)));
- __ strh(r3, Address_aarch64(__ post(r4, 8)));
-
- __ strb(r3, Address_aarch64(r4));
- __ strb(r3, Address_aarch64(r4, 8));
- __ strb(r3, Address_aarch64(r4, r6));
- __ strb(r3, Address_aarch64(__ pre(r4, 8)));
- __ strb(r3, Address_aarch64(__ post(r4, 8)));
-
- __ ldr(r3, Address_aarch64(r4));
- __ ldr(r3, Address_aarch64(r4, 8));
- __ ldr(r3, Address_aarch64(r4, r6));
- __ ldr(r3, Address_aarch64(__ pre(r4, 8)));
- __ ldr(r3, Address_aarch64(__ post(r4, 8)));
-
- __ ldrw(r3, Address_aarch64(r4));
- __ ldrw(r3, Address_aarch64(r4, 8));
- __ ldrw(r3, Address_aarch64(r4, r6));
- __ ldrw(r3, Address_aarch64(__ pre(r4, 8)));
- __ ldrw(r3, Address_aarch64(__ post(r4, 8)));
-
- __ ldrsw(r3, Address_aarch64(r4));
- __ ldrsw(r3, Address_aarch64(r4, 8));
- __ ldrsw(r3, Address_aarch64(r4, r6));
- __ ldrsw(r3, Address_aarch64(__ pre(r4, 8)));
- __ ldrsw(r3, Address_aarch64(__ post(r4, 8)));
-
- __ ldrh(r3, Address_aarch64(r4));
- __ ldrh(r3, Address_aarch64(r4, 8));
- __ ldrh(r3, Address_aarch64(r4, r6));
- __ ldrh(r3, Address_aarch64(__ pre(r4, 8)));
- __ ldrh(r3, Address_aarch64(__ post(r4, 8)));
-
- __ ldrh(r3, Address_aarch64(r4));
- __ ldrh(r3, Address_aarch64(r4, 8));
- __ ldrh(r3, Address_aarch64(r4, r6));
- __ ldrh(r3, Address_aarch64(__ pre(r4, 8)));
- __ ldrh(r3, Address_aarch64(__ post(r4, 8)));
-
- __ ldrb(r3, Address_aarch64(r4));
- __ ldrb(r3, Address_aarch64(r4, 8));
- __ ldrb(r3, Address_aarch64(r4, r6));
- __ ldrb(r3, Address_aarch64(__ pre(r4, 8)));
- __ ldrb(r3, Address_aarch64(__ post(r4, 8)));
-
- __ ldrsb(r3, Address_aarch64(r4));
- __ ldrsb(r3, Address_aarch64(r4, 8));
- __ ldrsb(r3, Address_aarch64(r4, r6));
- __ ldrsb(r3, Address_aarch64(__ pre(r4, 8)));
- __ ldrsb(r3, Address_aarch64(__ post(r4, 8)));
-
- __ prfm(r3, Address_aarch64(r4));
- __ prfm(r3, Address_aarch64(r4, 8));
- __ prfm(r3, Address_aarch64(r4, r6));
- __ prfm(r3, Address_aarch64(__ pre(r4, 8)));
- __ prfm(r3, Address_aarch64(__ post(r4, 8)));
-
-#define INSN(NAME, size, op) \
- __ NAME(F7, Address_aarch64(r4)); \
- __ NAME(F7, Address_aarch64(r4, 8)); \
- __ NAME(F7, Address_aarch64(r4, r6)); \
- __ NAME(F7, Address_aarch64(__ pre(r4, 8))); \
- __ NAME(F7, Address_aarch64(__ post(r4, 8)));
-
- INSN(strd, 0b11, 0b00);
- INSN(strs, 0b10, 0b00);
- INSN(ldrd, 0b11, 0b01);
- INSN(ldrs, 0b10, 0b01);
-
-#undef INSN
-
- __ eorw (r19, r7, r11, __ lsl, 3);
- __ bic(r27, r3, r1, __ ror, 22);
-
- __ addw(r27, r3, r1, __ lsl, 22);
-
- __ add(r16, r17, r18, ext::uxth, 4);
- __ adds(r16, r17, r18);
-
- __ adc(r0, r1, r2);
- __ sbcsw(r18, r19, r20);
-
- for (int i = 0; i < 16; i++) {
- __ ccmn(r1, r2, 15-i, (Assembler_aarch64::condition_code)i);
- __ ccmpw(r1, r2, 15-i, (Assembler_aarch64::condition_code)i);
- __ ccmpw(r1, i, 15-i, (Assembler_aarch64::condition_code)i);
- }
-
- __ csinv(r9, r10, r11, Assembler_aarch64::CS);
+ __ strh(r3, Address(r4));
+ __ strh(r3, Address(r4, 8));
+ __ strh(r3, Address(r4, r6));
+ __ strh(r3, Address(__ pre(r4, 8)));
+ __ strh(r3, Address(__ post(r4, 8)));
+
+ __ strb(r3, Address(r4));
+ __ strb(r3, Address(r4, 8));
+ __ strb(r3, Address(r4, r6));
+ __ strb(r3, Address(__ pre(r4, 8)));
+ __ strb(r3, Address(__ post(r4, 8)));
+
+ __ ldr(r3, Address(r4));
+ __ ldr(r3, Address(r4, 8));
+ __ ldr(r3, Address(r4, r6));
+ __ ldr(r3, Address(__ pre(r4, 8)));
+ __ ldr(r3, Address(__ post(r4, 8)));
+
+ __ ldrw(r3, Address(r4));
+ __ ldrw(r3, Address(r4, 8));
+ __ ldrw(r3, Address(r4, r6));
+ __ ldrw(r3, Address(__ pre(r4, 8)));
+ __ ldrw(r3, Address(__ post(r4, 8)));
-#define INSN(NAME, op29, opcode2, opcode) \
- __ NAME(r20, r21);
-
-
- INSN(rbitw, 0b010, 0b00000, 0b00000);
- INSN(rev16w, 0b010, 0b00000, 0b00001);
- INSN(revw, 0b010, 0b00000, 0b00010);
- INSN(clzw, 0b010, 0b00000, 0b00100);
- INSN(clsw, 0b010, 0b00000, 0b00101);
-
- INSN(rbit, 0b110, 0b00000, 0b00000);
- INSN(rev16, 0b110, 0b00000, 0b00001);
- INSN(rev32, 0b110, 0b00000, 0b00010);
- INSN(rev, 0b110, 0b00000, 0b00011);
- INSN(clz, 0b110, 0b00000, 0b00100);
- INSN(cls, 0b110, 0b00000, 0b00101);
-
-#undef INSN
-
-#define INSN(NAME, op29, opcode) \
- __ NAME(r17, r18, r0);
-
- INSN(udivw, 0b000, 0b000010);
- INSN(sdivw, 0b000, 0b000011);
- INSN(lslvw, 0b000, 0b001000);
- INSN(lsrvw, 0b000, 0b001001);
- INSN(asrvw, 0b000, 0b001010);
- INSN(rorvw, 0b000, 0b001011);
-
- INSN(udiv, 0b100, 0b000010);
- INSN(sdiv, 0b100, 0b000011);
- INSN(lslv, 0b100, 0b001000);
- INSN(lsrv, 0b100, 0b001001);
- INSN(asrv, 0b100, 0b001010);
- INSN(rorv, 0b100, 0b001011);
-
-#undef INSN
-
-#define INSN(NAME, op54, op31, o0) \
- __ NAME(r3, r2, r1, r0);
-
- INSN(maddw, 0b000, 0b000, 0);
- INSN(msubw, 0b000, 0b000, 1);
- INSN(madd, 0b100, 0b000, 0);
- INSN(msub, 0b100, 0b000, 1);
- INSN(smaddl, 0b100, 0b001, 0);
- INSN(smsubl, 0b100, 0b001, 1);
- INSN(umaddl, 0b100, 0b101, 0);
- INSN(umsubl, 0b100, 0b101, 1);
-
-#undef INSN
-
-#define INSN(NAME, op54, op31, o0) \
- __ NAME(r3, r2, r1);
-
- INSN(smulh, 0b100, 0b010, 0);
- INSN(umulh, 0b100, 0b110, 0);
-
-#undef INSN
-
- // Aligned and signed
- __ ldr(r3, Address_aarch64(r2, 11));
- __ ldr(r3, Address_aarch64(r2, -11));
- __ ldr(r3, Address_aarch64(r2, 12));
- __ ldr(r3, Address_aarch64(r2, -12));
-
-#define INSN(NAME, op31, type, opcode) \
- __ NAME(F0, F1);
-
- INSN(fmovs, 0b000, 0b00, 0b000000);
- INSN(fabss, 0b000, 0b00, 0b000001);
- INSN(fnegs, 0b000, 0b00, 0b000010);
- INSN(fsqrts, 0b000, 0b00, 0b000011);
- INSN(fcvts, 0b000, 0b00, 0b000101);
-
- INSN(fmovd, 0b000, 0b01, 0b000000);
- INSN(fabsd, 0b000, 0b01, 0b000001);
- INSN(fnegd, 0b000, 0b01, 0b000010);
- INSN(fsqrtd, 0b000, 0b01, 0b000011);
- INSN(fcvtd, 0b000, 0b01, 0b000100);
-
-#undef INSN
-
-#define INSN(NAME, op31, type, opcode) \
- __ NAME(F0, F1, F1);
- INSN(fmuls, 0b000, 0b00, 0b0000);
- INSN(fdivs, 0b000, 0b00, 0b0001);
- INSN(fadds, 0b000, 0b00, 0b0010);
- INSN(fsubs, 0b000, 0b00, 0b0011);
- INSN(fnmuls, 0b000, 0b00, 0b1000);
-
- INSN(fmuls, 0b000, 0b01, 0b0000);
- INSN(fdivs, 0b000, 0b01, 0b0001);
- INSN(fadds, 0b000, 0b01, 0b0010);
- INSN(fsubs, 0b000, 0b01, 0b0011);
- INSN(fnmuls, 0b000, 0b01, 0b1000);
-
-#undef INSN
-
-#define INSN(NAME, op31, type, o1, o0) \
- __ NAME(F0, F1, F2, F3);
-
- INSN(fmadds, 0b000, 0b00, 0, 0);
- INSN(fmsubs, 0b000, 0b00, 0, 1);
- INSN(fnmadds, 0b000, 0b00, 0, 0);
- INSN(fnmsubs, 0b000, 0b00, 0, 1);
-
- INSN(fmadd, 0b000, 0b01, 0, 0);
- INSN(fmsubd, 0b000, 0b01, 0, 1);
- INSN(fnmadd, 0b000, 0b01, 0, 0);
- INSN(fnmsub, 0b000, 0b01, 0, 1);
-
-#undef INSN
-
-#define INSN(NAME, op31, type, rmode, opcode) \
- __ NAME(r1, F0);
+ __ ldrh(r3, Address(r4));
+ __ ldrh(r3, Address(r4, 8));
+ __ ldrh(r3, Address(r4, r6));
+ __ ldrh(r3, Address(__ pre(r4, 8)));
+ __ ldrh(r3, Address(__ post(r4, 8)));
+
+ __ ldrb(r3, Address(r4));
+ __ ldrb(r3, Address(r4, 8));
+ __ ldrb(r3, Address(r4, r6));
+ __ ldrb(r3, Address(__ pre(r4, 8)));
+ __ ldrb(r3, Address(__ post(r4, 8)));
- INSN(fcvtszw, 0b000, 0b00, 0b11, 0b000);
- INSN(fcvtzs, 0b000, 0b01, 0b11, 0b000);
- INSN(fcvtzdw, 0b100, 0b00, 0b11, 0b000);
- INSN(fcvtszd, 0b100, 0b01, 0b11, 0b000);
-
- INSN(fmovs, 0b000, 0b00, 0b00, 0b110);
- INSN(fmovd, 0b100, 0b01, 0b00, 0b111);
-
- INSN(fmovhid, 0b100, 0b10, 0b01, 0b110);
-
-#undef INSN
-
-#define INSN(NAME, op31, type, rmode, opcode) \
- __ NAME(F2, r3);
-
- INSN(fmovs, 0b000, 0b00, 0b00, 0b111);
- INSN(fmovd, 0b100, 0b01, 0b00, 0b111);
-
- INSN(fmovhid, 0b100, 0b10, 0b01, 0b110);
-
-#undef INSN
-
-#define INSN(NAME, op31, type, op, opcode2) \
- __ NAME(F0, F1);
-#define INSN1(NAME, op31, type, op, opcode2) \
- __ NAME(F1);
-
- INSN(fcmps, 0b000, 0b00, 0b00, 0b00000);
- INSN1(fcmps, 0b000, 0b00, 0b00, 0b01000);
- INSN(fcmpes, 0b000, 0b00, 0b00, 0b10000);
- INSN1(fcmpes, 0b000, 0b00, 0b00, 0b11000);
-
- INSN(fcmpd, 0b000, 0b01, 0b00, 0b00000);
- INSN1(fcmpd, 0b000, 0b01, 0b00, 0b01000);
- INSN(fcmped, 0b000, 0b01, 0b00, 0b10000);
- INSN1(fcmped, 0b000, 0b01, 0b00, 0b11000);
-
-#undef INSN
+
Disassembler::decode(entry, __ pc());
@@ -623,7 +410,94 @@ asm_util::encode_immediate_v2(int is32, uint64_t imm)
// ------------- Stolen from binutils end -------------------------------------
-bool Assembler_aarch64::operand_valid_for_logical_immdiate(int is32, uint64_t imm) {
+bool Assembler::operand_valid_for_logical_immdiate(int is32, uint64_t imm) {
return encode_immediate_v2(is32, imm) != 0xffffffff;
}
+int AbstractAssembler::code_fill_byte() { Unimplemented(); }
+
+// added to make this compile
+
+REGISTER_DEFINITION(Register, noreg);
+
+void MacroAssembler::call_VM_base(Register oop_result,
+ Register java_thread,
+ Register last_java_sp,
+ address entry_point,
+ int number_of_arguments,
+ bool check_exceptions) { Unimplemented(); }
+
+void MacroAssembler::call_VM(Register oop_result,
+ address entry_point,
+ bool check_exceptions) { Unimplemented(); }
+
+void MacroAssembler::call_VM(Register oop_result,
+ address entry_point,
+ Register arg_1,
+ bool check_exceptions) { Unimplemented(); }
+
+
+void MacroAssembler::call_VM(Register oop_result,
+ address entry_point,
+ Register arg_1,
+ Register arg_2,
+ bool check_exceptions) { Unimplemented(); }
+
+void MacroAssembler::call_VM(Register oop_result,
+ address entry_point,
+ Register arg_1,
+ Register arg_2,
+ Register arg_3,
+ bool check_exceptions) { Unimplemented(); }
+
+void MacroAssembler::call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ int number_of_arguments,
+ bool check_exceptions) { Unimplemented(); }
+
+void MacroAssembler::call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1,
+ bool check_exceptions) { Unimplemented(); }
+
+void MacroAssembler::call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1,
+ Register arg_2,
+ bool check_exceptions) { Unimplemented(); }
+
+void MacroAssembler::call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1,
+ Register arg_2,
+ Register arg_3,
+ bool check_exceptions) { Unimplemented(); }
+
+void MacroAssembler::check_and_handle_earlyret(Register java_thread) {Unimplemented(); }
+
+void MacroAssembler::align(int modulus) { Unimplemented();}
+
+void MacroAssembler::check_and_handle_popframe(Register java_thread) { Unimplemented(); }
+
+RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
+ Register tmp,
+ int offset) { Unimplemented(); return RegisterOrConstant(r0); }
+
+void MacroAssembler::verify_oop(Register reg, const char* s) { Unimplemented(); }
+
+void MacroAssembler::stop(const char* msg) { Unimplemented(); }
+
+void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { Unimplemented(); }
+
+void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { Unimplemented(); }
+
+void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { Unimplemented(); }
+
+void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { Unimplemented(); }
+
+void MacroAssembler::null_check(Register reg, int offset) { Unimplemented(); }
+
diff --git a/src/cpu/aarch64/vm/assembler_aarch64.hpp b/src/cpu/aarch64/vm/assembler_aarch64.hpp
index 064b9b507..da44e0477 100644
--- a/src/cpu/aarch64/vm/assembler_aarch64.hpp
+++ b/src/cpu/aarch64/vm/assembler_aarch64.hpp
@@ -36,22 +36,22 @@ namespace asm_util {
using namespace asm_util;
-class Assembler_aarch64;
+class Assembler;
-class Instruction {
+class Instruction_aarch64 {
unsigned insn;
unsigned bits;
- Assembler_aarch64 *assem;
+ Assembler *assem;
public:
- Instruction(class Assembler_aarch64 *as) {
+ Instruction_aarch64(class Assembler *as) {
bits = 0;
insn = 0;
assem = as;
}
- ~Instruction();
+ ~Instruction_aarch64();
unsigned &get_insn() { return insn; }
unsigned &get_bits() { return bits; }
@@ -97,11 +97,7 @@ public:
f(r->encoding_nocheck(), lsb + 4, lsb);
}
- void rf(FloatRegister r, int lsb) {
- f(r->encoding_nocheck(), lsb + 4, lsb);
- }
-
- unsigned get(int msb = 31, int lsb = 0) {
+ unsigned getf (int msb = 31, int lsb = 0) {
int nbits = msb - lsb + 1;
unsigned mask = ((1U << nbits) - 1) << lsb;
assert_cond(bits & mask == mask);
@@ -115,7 +111,7 @@ public:
}
};
-#define starti Instruction do_not_use(this); set_current(&do_not_use)
+#define starti Instruction_aarch64 do_not_use(this); set_current(&do_not_use)
class Pre {
int _offset;
@@ -135,11 +131,12 @@ public:
Register reg() { return _r; }
};
-// Address_aarch64ing modes
-class Address_aarch64 VALUE_OBJ_CLASS_SPEC {
+// Addressing modes
+class Address VALUE_OBJ_CLASS_SPEC {
public:
enum mode { base_plus_offset, pre, post, pcrel,
- base_plus_offset_reg, base_plus_offset_reg_extended };
+ base_plus_offset_reg, base_plus_offset_reg_extended};
+ enum ScaleFactor { times_4, times_8 };
private:
Register _base;
Register _index;
@@ -149,59 +146,54 @@ class Address_aarch64 VALUE_OBJ_CLASS_SPEC {
int _scale;
public:
- Address_aarch64(Register r)
+ Address(Register r)
: _mode(base_plus_offset), _base(r), _offset(0) { }
- Address_aarch64(Register r, int o)
+ Address(Register r, int o)
: _mode(base_plus_offset), _base(r), _offset(o) { }
- Address_aarch64(Register r, Register r1, int scale = 0)
+ Address(Register r, Register r1, int scale = 0)
: _mode(base_plus_offset_reg), _base(r), _index(r1), _scale(scale) { }
- Address_aarch64(Pre p)
+ Address(Pre p)
: _mode(pre), _base(p.reg()), _offset(p.offset()) { }
- Address_aarch64(Post p)
+ Address(Post p)
: _mode(post), _base(p.reg()), _offset(p.offset()) { }
+ Address(address a) : _mode(pcrel), _adr(a) { }
- void encode(Instruction *i) {
- i->f(0b111, 29, 27);
- i->rf(_base, 5);
-
+ void encode(Instruction_aarch64 *i) {
switch(_mode) {
case base_plus_offset:
{
- unsigned size = i->get(31, 30);
- unsigned mask = (1 << size) - 1;
- if (_offset < 0 || _offset & mask)
- {
- i->f(0b00, 25, 24);
- i->f(0, 21), i->f(0b00, 11, 10);
- i->sf(_offset, 20, 12);
- } else {
- i->f(0b01, 25, 24);
- _offset >>= size;
- i->f(_offset, 21, 10);
- }
+ i->f(0b111, 29, 27), i->f(0b01, 25, 24);
+ unsigned shift = i->getf(31, 30);
+ assert_cond((_offset >> shift) << shift == _offset);
+ _offset >>= shift;
+ i->sf(_offset, 21, 10);
+ i->rf(_base, 5);
}
break;
case base_plus_offset_reg:
assert_cond(_scale == 0);
- i->f(0b00, 25, 24);
+ i->f(0b111, 29, 27), i->f(0b00, 25, 24);
i->f(1, 21);
i->rf(_index, 16);
- i->f(0b011, 15, 13); // Offset is always an X register
+ i->rf(_base, 5);
+ i->f(0b011, 15, 13); // Offset is always X register
i->f(0, 12); // Shift is 0
i->f(0b10, 11, 10);
break;
case pre:
- i->f(0b00, 25, 24);
+ i->f(0b111, 29, 27), i->f(0b00, 25, 24);
i->f(0, 21), i->f(0b11, 11, 10);
i->f(_offset, 20, 12);
+ i->rf(_base, 5);
break;
case post:
- i->f(0b00, 25, 24);
+ i->f(0b111, 29, 27), i->f(0b00, 25, 24);
i->f(0, 21), i->f(0b01, 11, 10);
i->f(_offset, 20, 12);
+ i->rf(_base, 5);
break;
default:
@@ -210,24 +202,19 @@ class Address_aarch64 VALUE_OBJ_CLASS_SPEC {
}
};
-namespace ext
-{
- enum operation { uxtb, uxth, uxtw, uxtx, sxtb, sxth, sxtw, sxtx };
-};
-
-class Assembler_aarch64 : public AbstractAssembler {
+class Assembler : public AbstractAssembler {
public:
- Address_aarch64 pre(Register base, int offset) {
- return Address_aarch64(Pre(base, offset));
+ Address pre(Register base, int offset) {
+ return Address(Pre(base, offset));
}
- Address_aarch64 post (Register base, int offset) {
- return Address_aarch64(Post(base, offset));
+ Address post (Register base, int offset) {
+ return Address(Post(base, offset));
}
- Instruction* current;
+ Instruction_aarch64* current;
public:
- void set_current(Instruction* i) { current = i; }
+ void set_current(Instruction_aarch64* i) { current = i; }
void f(unsigned val, int msb, int lsb) {
current->f(val, msb, lsb);
@@ -241,9 +228,6 @@ public:
void rf(Register reg, int lsb) {
current->rf(reg, lsb);
}
- void rf(FloatRegister reg, int lsb) {
- current->rf(reg, lsb);
- }
void fixed(unsigned value, unsigned mask) {
current->fixed(value, mask);
}
@@ -407,8 +391,7 @@ public:
f(0b0101010, 31, 25), f(0, 24), sf(offset, 23, 5), f(0, 4), f(cond, 3, 0);
}
- enum condition_code
- {EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV};
+ enum {EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV};
#define INSN(NAME, cond) \
void NAME(address dest) { \
@@ -605,20 +588,6 @@ public:
#undef INSN
#define INSN(NAME, opc, V) \
- void NAME(FloatRegister Rt, address dest) { \
- long offset = (dest - pc()) >> 2; \
- starti; \
- f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \
- sf(offset, 23, 5); \
- rf((Register)Rt, 0); \
- }
-
- INSN(ldrs, 0b00, 1);
- INSN(ldrd, 0b01, 1);
-
-#undef INSN
-
-#define INSN(NAME, opc, V) \
void NAME(int prfop, address dest) { \
long offset = (dest - pc()) >> 2; \
starti; \
@@ -678,20 +647,19 @@ public:
#undef INSN
- // Load/store register (all modes)
- void ld_st2(Register Rt, Address_aarch64 adr, int size, int op, int V = 0) {
+ void ld_st2(Register Rt, Address adr, int size, int op) {
starti;
f(size, 31, 30);
f(op, 23, 22); // str
- f(V, 26); // general reg?
+ f(0, 26); // general reg
rf(Rt, 0);
adr.encode(current);
}
-#define INSN(NAME, size, op) \
- void NAME(Register Rt, Address_aarch64 adr) { \
- ld_st2(Rt, adr, size, op); \
- } \
+#define INSN(NAME, size, op) \
+ void NAME(Register Rt, Address adr) { \
+ ld_st2(Rt, adr, size, op); \
+ }
INSN(str, 0b11, 0b00);
INSN(strw, 0b10, 0b00);
@@ -703,481 +671,824 @@ public:
INSN(ldrb, 0b00, 0b01);
INSN(ldrh, 0b01, 0b01);
- INSN(ldrsb, 0b00, 0b11);
- INSN(ldrsh, 0b01, 0b11);
- INSN(ldrshw, 0b01, 0b10);
- INSN(ldrsw, 0b10, 0b10);
+#undef INSN
- INSN(prfm, 0b11, 0b10); // FIXME: PRFM should not be used with
- // writeback modes, but the assembler
- // doesn't enfore that.
+ Assembler(CodeBuffer* code) : AbstractAssembler(code) {
+ }
-#undef INSN
+ virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
+ Register tmp,
+ int offset) {
+ }
-#define INSN(NAME, size, op) \
- void NAME(FloatRegister Rt, Address_aarch64 adr) { \
- ld_st2((Register)Rt, adr, size, op, 1); \
+ // Stack overflow checking
+ virtual void bang_stack_with_offset(int offset) {
}
- INSN(strd, 0b11, 0b00);
- INSN(strs, 0b10, 0b00);
- INSN(ldrd, 0b11, 0b01);
- INSN(ldrs, 0b10, 0b01);
+ bool operand_valid_for_logical_immdiate(int is32, uint64_t imm);
-#undef INSN
+ enum Condition {
+ dummy
+ };
+};
- enum shift_kind { lsl, lsr, asr, ror };
-
- void op_shifted_reg(unsigned decode,
- Register Rd, Register Rn, Register Rm,
- enum shift_kind kind, unsigned shift,
- unsigned size, unsigned op) {
- f(size, 31);
- f(op, 30, 29);
- f(decode, 28, 24);
- rf(Rm, 16), rf(Rn, 5), rf(Rd, 0);
- f(shift, 15, 10);
- f(kind, 23, 22);
- }
+#undef starti
- // Logical (shifted regsiter)
-#define INSN(NAME, size, op, N) \
- void NAME(Register Rd, Register Rn, Register Rm, \
- enum shift_kind kind = lsl, unsigned shift = 0) { \
- starti; \
- f(N, 21); \
- op_shifted_reg(0b01010, Rd, Rn, Rm, kind, shift, size, op); \
- }
+inline Instruction_aarch64::~Instruction_aarch64() {
+ assem->emit();
+}
- INSN(andr, 1, 0b00, 0);
- INSN(orr, 1, 0b01, 0);
- INSN(eor, 1, 0b10, 0);
- INSN(ands, 1, 0b10, 0);
- INSN(andw, 0, 0b00, 0);
- INSN(orrw, 0, 0b01, 0);
- INSN(eorw, 0, 0b10, 0);
- INSN(andsw, 0, 0b10, 0);
-
- INSN(bic, 1, 0b00, 1);
- INSN(orn, 1, 0b01, 1);
- INSN(eon, 1, 0b10, 1);
- INSN(bics, 1, 0b10, 1);
- INSN(bicw, 0, 0b00, 1);
- INSN(ornw, 0, 0b01, 1);
- INSN(eonw, 0, 0b10, 1);
- INSN(bicsw, 0, 0b10, 1);
+// extra stuff needed to compile
+// not sure which of these methods are really necessary
+class AddressLiteral VALUE_OBJ_CLASS_SPEC {
+ friend class ArrayAddress;;
+ protected:
+ // creation
+ AddressLiteral();
+ public:
+ AddressLiteral addr() { Unimplemented(); }
+};
-#undef INSN
+class ArrayAddress;
+class BiasedLockingCounters;
+
+class MacroAssembler: public Assembler {
+ friend class LIR_Assembler;
+
+ protected:
+
+ Address as_Address(AddressLiteral adr);
+ Address as_Address(ArrayAddress adr);
+
+ // Support for VM calls
+ //
+ // This is the base routine called by the different versions of call_VM_leaf. The interpreter
+ // may customize this version by overriding it for its purposes (e.g., to save/restore
+ // additional registers when doing a VM call).
+#ifdef CC_INTERP
+ // c++ interpreter never wants to use interp_masm version of call_VM
+ #define VIRTUAL
+#else
+ #define VIRTUAL virtual
+#endif
+
+ VIRTUAL void call_VM_leaf_base(
+ address entry_point, // the entry point
+ int number_of_arguments // the number of arguments to pop after the call
+ );
+
+ // This is the base routine called by the different versions of call_VM. The interpreter
+ // may customize this version by overriding it for its purposes (e.g., to save/restore
+ // additional registers when doing a VM call).
+ //
+ // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
+ // returns the register which contains the thread upon return. If a thread register has been
+ // specified, the return value will correspond to that register. If no last_java_sp is specified
+ // (noreg) than rsp will be used instead.
+ VIRTUAL void call_VM_base( // returns the register containing the thread upon return
+ Register oop_result, // where an oop-result ends up if any; use noreg otherwise
+ Register java_thread, // the thread if computed before ; use noreg otherwise
+ Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
+ address entry_point, // the entry point
+ int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
+ bool check_exceptions // whether to check for pending exceptions after return
+ );
+
+ // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
+ // The implementation is only non-empty for the InterpreterMacroAssembler,
+ // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
+ virtual void check_and_handle_popframe(Register java_thread);
+ virtual void check_and_handle_earlyret(Register java_thread);
+
+ void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
+
+ // helpers for FPU flag access
+ // tmp is a temporary register, if none is available use noreg
+ void save_rax (Register tmp);
+ void restore_rax(Register tmp);
- // Add/subtract (shifted regsiter)
-#define INSN(NAME, size, op) \
- void NAME(Register Rd, Register Rn, Register Rm, \
- enum shift_kind kind = lsl, unsigned shift = 0) { \
- starti; \
- f(0, 21); \
- assert_cond(kind != ror); \
- op_shifted_reg(0b01011, Rd, Rn, Rm, kind, shift, size, op); \
- }
+ public:
+ MacroAssembler(CodeBuffer* code) : Assembler(code) {}
- INSN(add, 1, 0b000);
- INSN(adds, 1, 0b001);
- INSN(sub, 1, 0b10);
- INSN(subs, 1, 0b11);
- INSN(addw, 0, 0b000);
- INSN(addsw, 0, 0b001);
- INSN(subw, 0, 0b10);
- INSN(subsw, 0, 0b11);
+ // Support for NULL-checks
+ //
+ // Generates code that causes a NULL OS exception if the content of reg is NULL.
+ // If the accessed location is M[reg + offset] and the offset is known, provide the
+ // offset. No explicit code generation is needed if the offset is within a certain
+ // range (0 <= offset <= page_size).
+
+ void null_check(Register reg, int offset = -1);
+ static bool needs_explicit_null_check(intptr_t offset);
+
+ // Required platform-specific helpers for Label::patch_instructions.
+ // They _shadow_ the declarations in AbstractAssembler, which are undefined.
+ void pd_patch_instruction(address branch, address target);
+#ifndef PRODUCT
+ static void pd_print_patched_instruction(address branch);
+#endif
+
+ // The following 4 methods return the offset of the appropriate move instruction
+
+ // Support for fast byte/short loading with zero extension (depending on particular CPU)
+ int load_unsigned_byte(Register dst, Address src);
+ int load_unsigned_short(Register dst, Address src);
+
+ // Support for fast byte/short loading with sign extension (depending on particular CPU)
+ int load_signed_byte(Register dst, Address src);
+ int load_signed_short(Register dst, Address src);
+
+ // Support for sign-extension (hi:lo = extend_sign(lo))
+ void extend_sign(Register hi, Register lo);
+
+ // Load and store values by size and signed-ness
+ void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
+ void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
+
+ // Support for inc/dec with optimal instruction selection depending on value
+
+ void increment(Register reg, int value = 1) { Unimplemented(); }
+ void decrement(Register reg, int value = 1) { Unimplemented(); }
+
+ void decrementl(Address dst, int value = 1);
+ void decrementl(Register reg, int value = 1);
+
+ void decrementq(Register reg, int value = 1);
+ void decrementq(Address dst, int value = 1);
+
+ void incrementl(Address dst, int value = 1);
+ void incrementl(Register reg, int value = 1);
+
+ void incrementq(Register reg, int value = 1);
+ void incrementq(Address dst, int value = 1);
+
+
+ // Support optimal SSE move instructions.
+
+ void incrementl(AddressLiteral dst);
+ void incrementl(ArrayAddress dst);
+
+ // Alignment
+ void align(int modulus);
+
+ // A 5 byte nop that is safe for patching (see patch_verified_entry)
+ void fat_nop();
+
+ // Stack frame creation/removal
+ void enter();
+ void leave();
+
+ // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
+ // The pointer will be loaded into the thread register.
+ void get_thread(Register thread);
+
+
+ // Support for VM calls
+ //
+ // It is imperative that all calls into the VM are handled via the call_VM macros.
+ // They make sure that the stack linkage is setup correctly. call_VM's correspond
+ // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
+
+
+ void call_VM(Register oop_result,
+ address entry_point,
+ bool check_exceptions = true);
+ void call_VM(Register oop_result,
+ address entry_point,
+ Register arg_1,
+ bool check_exceptions = true);
+ void call_VM(Register oop_result,
+ address entry_point,
+ Register arg_1, Register arg_2,
+ bool check_exceptions = true);
+ void call_VM(Register oop_result,
+ address entry_point,
+ Register arg_1, Register arg_2, Register arg_3,
+ bool check_exceptions = true);
+
+ // Overloadings with last_Java_sp
+ void call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ int number_of_arguments = 0,
+ bool check_exceptions = true);
+ void call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1, bool
+ check_exceptions = true);
+ void call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1, Register arg_2,
+ bool check_exceptions = true);
+ void call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1, Register arg_2, Register arg_3,
+ bool check_exceptions = true);
+
+ // These always tightly bind to MacroAssembler::call_VM_base
+ // bypassing the virtual implementation
+ void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
+ void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
+ void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
+ void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
+ void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
+
+ void call_VM_leaf(address entry_point,
+ int number_of_arguments = 0);
+ void call_VM_leaf(address entry_point,
+ Register arg_1);
+ void call_VM_leaf(address entry_point,
+ Register arg_1, Register arg_2);
+ void call_VM_leaf(address entry_point,
+ Register arg_1, Register arg_2, Register arg_3);
+
+ // These always tightly bind to MacroAssembler::call_VM_leaf_base
+ // bypassing the virtual implementation
+ void super_call_VM_leaf(address entry_point);
+ void super_call_VM_leaf(address entry_point, Register arg_1);
+ void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
+ void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
+ void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
+
+ // last Java Frame (fills frame anchor)
+ void set_last_Java_frame(Register thread,
+ Register last_java_sp,
+ Register last_java_fp,
+ address last_java_pc);
+
+ // thread in the default location (r15_thread on 64bit)
+ void set_last_Java_frame(Register last_java_sp,
+ Register last_java_fp,
+ address last_java_pc);
+
+ void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc);
+
+ // thread in the default location (r15_thread on 64bit)
+ void reset_last_Java_frame(bool clear_fp, bool clear_pc);
+
+ // Stores
+ void store_check(Register obj); // store check for obj - register is destroyed afterwards
+ void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
+
+#ifndef SERIALGC
+
+ void g1_write_barrier_pre(Register obj,
+ Register pre_val,
+ Register thread,
+ Register tmp,
+ bool tosca_live,
+ bool expand_call);
+
+ void g1_write_barrier_post(Register store_addr,
+ Register new_val,
+ Register thread,
+ Register tmp,
+ Register tmp2);
+
+#endif // SERIALGC
+
+ // split store_check(Register obj) to enhance instruction interleaving
+ void store_check_part_1(Register obj);
+ void store_check_part_2(Register obj);
+
+ // C 'boolean' to Java boolean: x == 0 ? 0 : 1
+ void c2bool(Register x);
+
+ // C++ bool manipulation
+
+ void movbool(Register dst, Address src);
+ void movbool(Address dst, bool boolconst);
+ void movbool(Address dst, Register src);
+ void testbool(Register dst);
+
+ // oop manipulations
+ void load_klass(Register dst, Register src);
+ void store_klass(Register dst, Register src);
+
+ void load_heap_oop(Register dst, Address src);
+ void load_heap_oop_not_null(Register dst, Address src);
+ void store_heap_oop(Address dst, Register src);
+
+ // Used for storing NULL. All other oop constants should be
+ // stored using routines that take a jobject.
+ void store_heap_oop_null(Address dst);
+
+ void load_prototype_header(Register dst, Register src);
+
+ void store_klass_gap(Register dst, Register src);
+
+ // This dummy is to prevent a call to store_heap_oop from
+ // converting a zero (like NULL) into a Register by giving
+ // the compiler two choices it can't resolve
+
+ void store_heap_oop(Address dst, void* dummy);
+
+ void encode_heap_oop(Register r);
+ void decode_heap_oop(Register r);
+ void encode_heap_oop_not_null(Register r);
+ void decode_heap_oop_not_null(Register r);
+ void encode_heap_oop_not_null(Register dst, Register src);
+ void decode_heap_oop_not_null(Register dst, Register src);
+
+ void set_narrow_oop(Register dst, jobject obj);
+ void set_narrow_oop(Address dst, jobject obj);
+ void cmp_narrow_oop(Register dst, jobject obj);
+ void cmp_narrow_oop(Address dst, jobject obj);
+
+ // if heap base register is used - reinit it with the correct value
+ void reinit_heapbase();
+
+ DEBUG_ONLY(void verify_heapbase(const char* msg);)
+
+ // Int division/remainder for Java
+ // (as idivl, but checks for special case as described in JVM spec.)
+ // returns idivl instruction offset for implicit exception handling
+ int corrected_idivl(Register reg);
+
+ // Long division/remainder for Java
+ // (as idivq, but checks for special case as described in JVM spec.)
+ // returns idivq instruction offset for implicit exception handling
+ int corrected_idivq(Register reg);
+
+ void int3();
+
+ // Long operation macros for a 32bit cpu
+ // Long negation for Java
+ void lneg(Register hi, Register lo);
+
+ // Long multiplication for Java
+ // (destroys contents of eax, ebx, ecx and edx)
+ void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
+
+ // Long shifts for Java
+ // (semantics as described in JVM spec.)
+ void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f)
+ void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
+
+ // Long compare for Java
+ // (semantics as described in JVM spec.)
+ void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
+
+
+ // misc
+
+ // Sign extension
+ void sign_extend_short(Register reg);
+ void sign_extend_byte(Register reg);
+
+ // Division by power of 2, rounding towards 0
+ void division_with_shift(Register reg, int shift_value);
+
+ // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
+ //
+ // CF (corresponds to C0) if x < y
+ // PF (corresponds to C2) if unordered
+ // ZF (corresponds to C3) if x = y
+ //
+ // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
+ // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
+ void fcmp(Register tmp);
+ // Variant of the above which allows y to be further down the stack
+ // and which only pops x and y if specified. If pop_right is
+ // specified then pop_left must also be specified.
+ void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
+
+ // Floating-point comparison for Java
+ // Compares the top-most stack entries on the FPU stack and stores the result in dst.
+ // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
+ // (semantics as described in JVM spec.)
+ void fcmp2int(Register dst, bool unordered_is_less);
+ // Variant of the above which allows y to be further down the stack
+ // and which only pops x and y if specified. If pop_right is
+ // specified then pop_left must also be specified.
+ void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
+
+ // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
+ // tmp is a temporary register, if none is available use noreg
+ void fremr(Register tmp);
+
+
+ // Inlined sin/cos generator for Java; must not use CPU instruction
+ // directly on Intel as it does not have high enough precision
+ // outside of the range [-pi/4, pi/4]. Extra argument indicate the
+ // number of FPU stack slots in use; all but the topmost will
+ // require saving if a slow case is necessary. Assumes argument is
+ // on FP TOS; result is on FP TOS. No cpu registers are changed by
+ // this code.
+ void trigfunc(char trig, int num_fpu_regs_in_use = 1);
+
+ // branch to L if FPU flag C2 is set/not set
+ // tmp is a temporary register, if none is available use noreg
+ void jC2 (Register tmp, Label& L);
+ void jnC2(Register tmp, Label& L);
+
+ // don't think we need these
+#if 0
+ void push_IU_state();
+ void pop_IU_state();
+
+ void push_FPU_state();
+ void pop_FPU_state();
+
+ void push_CPU_state();
+ void pop_CPU_state();
+#endif
+
+ // Round up to a power of two
+ void round_to(Register reg, int modulus);
+
+ // Callee saved registers handling
+ void push_callee_saved_registers();
+ void pop_callee_saved_registers();
+
+ // allocation
+ void eden_allocate(
+ Register obj, // result: pointer to object after successful allocation
+ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
+ int con_size_in_bytes, // object size in bytes if known at compile time
+ Register t1, // temp register
+ Label& slow_case // continuation point if fast allocation fails
+ );
+ void tlab_allocate(
+ Register obj, // result: pointer to object after successful allocation
+ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
+ int con_size_in_bytes, // object size in bytes if known at compile time
+ Register t1, // temp register
+ Register t2, // temp register
+ Label& slow_case // continuation point if fast allocation fails
+ );
+ Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address
+ void incr_allocated_bytes(Register thread,
+ Register var_size_in_bytes, int con_size_in_bytes,
+ Register t1 = noreg);
+
+ // interface method calling
+ void lookup_interface_method(Register recv_klass,
+ Register intf_klass,
+ RegisterOrConstant itable_index,
+ Register method_result,
+ Register scan_temp,
+ Label& no_such_interface);
+
+ // Test sub_klass against super_klass, with fast and slow paths.
+
+ // The fast path produces a tri-state answer: yes / no / maybe-slow.
+ // One of the three labels can be NULL, meaning take the fall-through.
+ // If super_check_offset is -1, the value is loaded up from super_klass.
+ // No registers are killed, except temp_reg.
+ void check_klass_subtype_fast_path(Register sub_klass,
+ Register super_klass,
+ Register temp_reg,
+ Label* L_success,
+ Label* L_failure,
+ Label* L_slow_path,
+ RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
+
+ // The rest of the type check; must be wired to a corresponding fast path.
+ // It does not repeat the fast path logic, so don't use it standalone.
+ // The temp_reg and temp2_reg can be noreg, if no temps are available.
+ // Updates the sub's secondary super cache as necessary.
+ // If set_cond_codes, condition codes will be Z on success, NZ on failure.
+ void check_klass_subtype_slow_path(Register sub_klass,
+ Register super_klass,
+ Register temp_reg,
+ Register temp2_reg,
+ Label* L_success,
+ Label* L_failure,
+ bool set_cond_codes = false);
+
+ // Simplified, combined version, good for typical uses.
+ // Falls through on failure.
+ void check_klass_subtype(Register sub_klass,
+ Register super_klass,
+ Register temp_reg,
+ Label& L_success);
+
+ // method handles (JSR 292)
+ void check_method_handle_type(Register mtype_reg, Register mh_reg,
+ Register temp_reg,
+ Label& wrong_method_type);
+ void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
+ Register temp_reg);
+ void jump_to_method_handle_entry(Register mh_reg, Register temp_reg);
+ Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
+
+
+ //----
+ void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0
+
+ // Debugging
+
+ // only if +VerifyOops
+ void verify_oop(Register reg, const char* s = "broken oop");
+ void verify_oop_addr(Address addr, const char * s = "broken oop addr");
+
+ // only if +VerifyFPU
+ void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
+
+ // prints msg, dumps registers and stops execution
+ void stop(const char* msg);
+
+ // prints msg and continues
+ void warn(const char* msg);
+
+ static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
+ static void debug64(char* msg, int64_t pc, int64_t regs[]);
+
+ void os_breakpoint();
+
+ void untested() { stop("untested"); }
+
+ void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); }
+
+ void should_not_reach_here() { stop("should not reach here"); }
+
+ void print_CPU_state();
-#undef INSN
+ // Stack overflow checking
+ void bang_stack_with_offset(int offset) { Unimplemented(); }
- // Add/subtract (extended register)
-#define INSN(NAME, op) \
- void NAME(Register Rd, Register Rn, Register Rm, \
- ext::operation option, int amount) { \
- add_sub_extended_reg(op, 0b01011, Rd, Rn, Rm, 0b00, option, amount); \
- }
+ // Writes to stack successive pages until offset reached to check for
+ // stack overflow + shadow pages. Also, clobbers tmp
+ void bang_stack_size(Register size, Register tmp);
- void add_sub_extended_reg(unsigned op, unsigned decode,
- Register Rd, Register Rn, Register Rm,
- unsigned opt, ext::operation option, unsigned imm) {
- starti;
- f(op, 31, 29), f(decode, 28, 24), f(opt, 23, 22), f(1, 21);
- f(option, 15, 13), f(imm, 12, 10);
- rf(Rm, 16), rf(Rn, 5), rf(Rd, 0);
- }
+ virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
+ Register tmp,
+ int offset);
- INSN(addw, 0b000);
- INSN(addsw, 0b001);
- INSN(subw, 0b010);
- INSN(subsw, 0b011);
- INSN(add, 0b100);
- INSN(adds, 0b101);
- INSN(sub, 0b110);
- INSN(subs, 0b111);
+ // Support for serializing memory accesses between threads
+ void serialize_memory(Register thread, Register tmp);
-#undef INSN
+ void verify_tlab();
- // Add/subtract (with carry)
- void add_sub_carry(unsigned op, Register Rd, Register Rn, Register Rm) {
- starti;
- f(op, 31, 29);
- f(0b11010000, 28, 21);
- f(0b000000, 15, 10);
- rf(Rm, 16), rf(Rn, 5), rf(Rd, 0);
- }
+ // Biased locking support
+ // lock_reg and obj_reg must be loaded up with the appropriate values.
+ // swap_reg must be rax, and is killed.
+ // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
+ // be killed; if not supplied, push/pop will be used internally to
+ // allocate a temporary (inefficient, avoid if possible).
+ // Optional slow case is for implementations (interpreter and C1) which branch to
+ // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
+ // Returns offset of first potentially-faulting instruction for null
+ // check info (currently consumed only by C1). If
+ // swap_reg_contains_mark is true then returns -1 as it is assumed
+ // the calling code has already passed any potential faults.
+ int biased_locking_enter(Register lock_reg, Register obj_reg,
+ Register swap_reg, Register tmp_reg,
+ bool swap_reg_contains_mark,
+ Label& done, Label* slow_case = NULL,
+ BiasedLockingCounters* counters = NULL);
+ void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
- #define INSN(NAME, op) \
- void NAME(Register Rd, Register Rn, Register Rm) { \
- add_sub_carry(op, Rd, Rn, Rm); \
- }
- INSN(adcw, 0b000);
- INSN(adcsw, 0b001);
- INSN(sbcw, 0b010);
- INSN(sbcsw, 0b011);
- INSN(adc, 0b100);
- INSN(adcs, 0b101);
- INSN(sbc,0b110);
- INSN(sbcs, 0b111);
+ Condition negate_condition(Condition cond);
-#undef INSN
+ // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
+ // operands. In general the names are modified to avoid hiding the instruction in Assembler
+ // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
+ // here in MacroAssembler. The major exception to this rule is call
- // Conditional compare (both kinds)
- void conditional_compare(unsigned op, int o2, int o3,
- Register Rn, unsigned imm5, unsigned nzcv,
- unsigned cond) {
- f(op, 31, 29);
- f(0b11010010, 28, 21);
- f(cond, 15, 12);
- f(o2, 10);
- f(o3, 4);
- f(nzcv, 3, 0);
- f(imm5, 20, 16), rf(Rn, 5);
- }
+ // Arithmetics
-#define INSN(NAME, op) \
- void NAME(Register Rn, Register Rm, int imm, condition_code cond) { \
- starti; \
- f(0, 11); \
- conditional_compare(op, 0, 0, Rn, (uintptr_t)Rm, imm, cond); \
- } \
- \
- void NAME(Register Rn, unsigned imm5, int imm, condition_code cond) { \
- starti; \
- f(1, 11); \
- conditional_compare(op, 0, 0, Rn, imm5, imm, cond); \
- }
- INSN(ccmnw, 0b001);
- INSN(ccmpw, 0b011);
- INSN(ccmn, 0b101);
- INSN(ccmp, 0b111);
+ void addptr(Address dst, int32_t src) { Unimplemented(); }
+ void addptr(Address dst, Register src);
-#undef INSN
+ void addptr(Register dst, Address src) { Unimplemented(); }
+ void addptr(Register dst, int32_t src);
+ void addptr(Register dst, Register src);
+ void addptr(Register dst, RegisterOrConstant src) { Unimplemented(); }
- // Conditional select
- void conditional_select(unsigned op, unsigned op2,
- Register Rd, Register Rn, Register Rm,
- unsigned cond) {
- starti;
- f(op, 31, 29);
- f(0b11010100, 28, 21);
- f(cond, 15, 12);
- f(0, 11, 10);
- rf(Rm, 16), rf(Rn, 5), rf(Rd, 0);
- }
+ void andptr(Register dst, int32_t src);
+ void andptr(Register src1, Register src2) { Unimplemented(); }
-#define INSN(NAME, op, op2) \
- void NAME(Register Rd, Register Rn, Register Rm, condition_code cond) { \
- conditional_select(op, op2, Rd, Rn, Rm, cond); \
- }
+ void cmp8(AddressLiteral src1, int imm);
- INSN(cselw, 0b000, 0b00);
- INSN(csincw, 0b000, 0b01);
- INSN(csinvw, 0b010, 0b00);
- INSN(csnegw, 0b010, 0b01);
- INSN(csel, 0b100, 0b00);
- INSN(csinc, 0b000, 0b01);
- INSN(csinv, 0b110, 0b00);
- INSN(csneg, 0b110, 0b01);
+ // renamed to drag out the casting of address to int32_t/intptr_t
+ void cmp32(Register src1, int32_t imm);
-#undef INSN
+ void cmp32(AddressLiteral src1, int32_t imm);
+ // compare reg - mem, or reg - &mem
+ void cmp32(Register src1, AddressLiteral src2);
- // Data processing
- void data_processing(unsigned op29, unsigned opcode,
- Register Rd, Register Rn) {
- f(op29, 31, 29), f(0b11010110, 28, 21);
- f(opcode, 15, 10);
- rf(Rn, 5), rf(Rd, 0);
- }
+ void cmp32(Register src1, Address src2);
- // (1 source)
-#define INSN(NAME, op29, opcode2, opcode) \
- void NAME(Register Rd, Register Rn) { \
- starti; \
- f(opcode2, 20, 16); \
- data_processing(op29, opcode, Rd, Rn); \
- }
+#ifndef _LP64
+ void cmpoop(Address dst, jobject obj);
+ void cmpoop(Register dst, jobject obj);
+#endif // _LP64
- INSN(rbitw, 0b010, 0b00000, 0b00000);
- INSN(rev16w, 0b010, 0b00000, 0b00001);
- INSN(revw, 0b010, 0b00000, 0b00010);
- INSN(clzw, 0b010, 0b00000, 0b00100);
- INSN(clsw, 0b010, 0b00000, 0b00101);
-
- INSN(rbit, 0b110, 0b00000, 0b00000);
- INSN(rev16, 0b110, 0b00000, 0b00001);
- INSN(rev32, 0b110, 0b00000, 0b00010);
- INSN(rev, 0b110, 0b00000, 0b00011);
- INSN(clz, 0b110, 0b00000, 0b00100);
- INSN(cls, 0b110, 0b00000, 0b00101);
+ // NOTE src2 must be the lval. This is NOT an mem-mem compare
+ void cmpptr(Address src1, AddressLiteral src2);
-#undef INSN
+ void cmpptr(Register src1, AddressLiteral src2);
- // (2 sources)
-#define INSN(NAME, op29, opcode) \
- void NAME(Register Rd, Register Rn, Register Rm) { \
- starti; \
- rf(Rm, 16); \
- data_processing(op29, opcode, Rd, Rn); \
- }
+ void cmpptr(Register src1, Register src2) { Unimplemented(); }
+ void cmpptr(Register src1, Address src2) { Unimplemented(); }
+ // void cmpptr(Address src1, Register src2) { Unimplemented(); }
- INSN(udivw, 0b000, 0b000010);
- INSN(sdivw, 0b000, 0b000011);
- INSN(lslvw, 0b000, 0b001000);
- INSN(lsrvw, 0b000, 0b001001);
- INSN(asrvw, 0b000, 0b001010);
- INSN(rorvw, 0b000, 0b001011);
+ void cmpptr(Register src1, int32_t src2) { Unimplemented(); }
+ void cmpptr(Address src1, int32_t src2) { Unimplemented(); }
- INSN(udiv, 0b100, 0b000010);
- INSN(sdiv, 0b100, 0b000011);
- INSN(lslv, 0b100, 0b001000);
- INSN(lsrv, 0b100, 0b001001);
- INSN(asrv, 0b100, 0b001010);
- INSN(rorv, 0b100, 0b001011);
+ // cmp64 to avoild hiding cmpq
+ void cmp64(Register src1, AddressLiteral src);
-#undef INSN
-
- // (3 sources)
- void data_processing(unsigned op54, unsigned op31, unsigned o0,
- Register Rd, Register Rn, Register Rm,
- Register Ra) {
- starti;
- f(op54, 31, 29), f(0b11011, 28, 24);
- f(op31, 23, 21), f(o0, 15);
- rf(Rm, 16), rf(Ra, 10), rf(Rn, 5), rf(Rd, 0);
- }
+ void cmpxchgptr(Register reg, Address adr);
-#define INSN(NAME, op54, op31, o0) \
- void NAME(Register Rd, Register Rn, Register Rm, Register Ra) { \
- data_processing(op54, op31, o0, Rd, Rn, Rm, Ra); \
- }
+ void locked_cmpxchgptr(Register reg, AddressLiteral adr);
- INSN(maddw, 0b000, 0b000, 0);
- INSN(msubw, 0b000, 0b000, 1);
- INSN(madd, 0b100, 0b000, 0);
- INSN(msub, 0b100, 0b000, 1);
- INSN(smaddl, 0b100, 0b001, 0);
- INSN(smsubl, 0b100, 0b001, 1);
- INSN(umaddl, 0b100, 0b101, 0);
- INSN(umsubl, 0b100, 0b101, 1);
-#undef INSN
+ void imulptr(Register dst, Register src) { Unimplemented(); }
-#define INSN(NAME, op54, op31, o0) \
- void NAME(Register Rd, Register Rn, Register Rm) { \
- data_processing(op54, op31, o0, Rd, Rn, Rm, (Register)31); \
- }
- INSN(smulh, 0b100, 0b010, 0);
- INSN(umulh, 0b100, 0b110, 0);
+ void negptr(Register dst) { Unimplemented(); }
-#undef INSN
+ void notptr(Register dst) { Unimplemented(); }
- // Floating-point data-processing (1 source)
- void data_processing(unsigned op31, unsigned type, unsigned opcode,
- FloatRegister Vd, FloatRegister Vn) {
- starti;
- f(op31, 31, 29);
- f(0b11110, 28, 24);
- f(type, 23, 22), f(1, 21), f(opcode, 20, 15), f(0b10000, 14, 10);
- rf(Vn, 5), rf(Vd, 0);
- }
+ void shlptr(Register dst, int32_t shift);
+ void shlptr(Register dst) { Unimplemented(); }
-#define INSN(NAME, op31, type, opcode) \
- void NAME(FloatRegister Vd, FloatRegister Vn) { \
- data_processing(op31, type, opcode, Vd, Vn); \
- }
+ void shrptr(Register dst, int32_t shift);
+ void shrptr(Register dst) { Unimplemented(); }
- INSN(fmovs, 0b000, 0b00, 0b000000);
- INSN(fabss, 0b000, 0b00, 0b000001);
- INSN(fnegs, 0b000, 0b00, 0b000010);
- INSN(fsqrts, 0b000, 0b00, 0b000011);
- INSN(fcvts, 0b000, 0b00, 0b000101);
+ void sarptr(Register dst) { Unimplemented(); }
+ void sarptr(Register dst, int32_t src) { Unimplemented(); }
- INSN(fmovd, 0b000, 0b01, 0b000000);
- INSN(fabsd, 0b000, 0b01, 0b000001);
- INSN(fnegd, 0b000, 0b01, 0b000010);
- INSN(fsqrtd, 0b000, 0b01, 0b000011);
- INSN(fcvtd, 0b000, 0b01, 0b000100);
+ void subptr(Address dst, int32_t src) { Unimplemented(); }
-#undef INSN
+ void subptr(Register dst, Address src) { Unimplemented(); }
+ void subptr(Register dst, int32_t src);
+ // Force generation of a 4 byte immediate value even if it fits into 8bit
+ void subptr_imm32(Register dst, int32_t src);
+ void subptr(Register dst, Register src);
+ void subptr(Register dst, RegisterOrConstant src) { Unimplemented(); }
- // Floating-point data-processing (2 source)
- void data_processing(unsigned op31, unsigned type, unsigned opcode,
- FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) {
- starti;
- f(op31, 31, 29);
- f(0b11110, 28, 24);
- f(type, 23, 22), f(1, 21), f(opcode, 15, 12), f(0b10, 11, 10);
- rf(Vm, 16), rf(Vn, 5), rf(Vd, 0);
- }
+ void sbbptr(Address dst, int32_t src) { Unimplemented(); }
+ void sbbptr(Register dst, int32_t src) { Unimplemented(); }
-#define INSN(NAME, op31, type, opcode) \
- void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) { \
- data_processing(op31, type, opcode, Vd, Vn, Vm); \
- }
+ void xchgptr(Register src1, Register src2) { Unimplemented(); }
+ void xchgptr(Register src1, Address src2) { Unimplemented(); }
- INSN(fmuls, 0b000, 0b00, 0b0000);
- INSN(fdivs, 0b000, 0b00, 0b0001);
- INSN(fadds, 0b000, 0b00, 0b0010);
- INSN(fsubs, 0b000, 0b00, 0b0011);
- INSN(fnmuls, 0b000, 0b00, 0b1000);
+ void xaddptr(Address src1, Register src2) { Unimplemented(); }
- INSN(fmuld, 0b000, 0b01, 0b0000);
- INSN(fdivd, 0b000, 0b01, 0b0001);
- INSN(faddd, 0b000, 0b01, 0b0010);
- INSN(fsubd, 0b000, 0b01, 0b0011);
- INSN(fnmuld, 0b000, 0b01, 0b1000);
-#undef INSN
- // Floating-point data-processing (3 source)
- void data_processing(unsigned op31, unsigned type, unsigned o1, unsigned o0,
- FloatRegister Vd, FloatRegister Vn, FloatRegister Vm,
- FloatRegister Va) {
- starti;
- f(op31, 31, 29);
- f(0b11111, 28, 24);
- f(type, 23, 22), f(o1, 21), f(o1, 15);
- rf(Vm, 16), rf(Vn, 10), rf(Vn, 5), rf(Vd, 0);
- }
+ // Helper functions for statistics gathering.
+ // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
+ void cond_inc32(Condition cond, AddressLiteral counter_addr);
+ // Unconditional atomic increment.
+ void atomic_incl(AddressLiteral counter_addr);
-#define INSN(NAME, op31, type, o1, o0) \
- void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm, \
- FloatRegister Va) { \
- data_processing(op31, type, o1, o0, Vd, Vn, Vm, Va); \
- }
+ void lea(Register dst, AddressLiteral adr);
+ void lea(Address dst, AddressLiteral adr);
+ void lea(Register dst, Address adr) { Unimplemented(); }
- INSN(fmadds, 0b000, 0b00, 0, 0);
- INSN(fmsubs, 0b000, 0b00, 0, 1);
- INSN(fnmadds, 0b000, 0b00, 0, 0);
- INSN(fnmsubs, 0b000, 0b00, 0, 1);
+ void leal32(Register dst, Address src) { Unimplemented(); }
- INSN(fmadd, 0b000, 0b01, 0, 0);
- INSN(fmsubd, 0b000, 0b01, 0, 1);
- INSN(fnmadd, 0b000, 0b01, 0, 0);
- INSN(fnmsub, 0b000, 0b01, 0, 1);
+ // Import other testl() methods from the parent class or else
+ // they will be hidden by the following overriding declaration.
+ void testl(Register dst, AddressLiteral src);
-#undef INSN
+ void orptr(Register dst, Address src) { Unimplemented(); }
+ void orptr(Register dst, Register src) { Unimplemented(); }
+ void orptr(Register dst, int32_t src) { Unimplemented(); }
- // Floating-point<->integer conversions
- void float_int_convert(unsigned op31, unsigned type,
- unsigned rmode, unsigned opcode,
- Register Rd, Register Rn) {
- starti;
- f(op31, 31, 29);
- f(0b11110, 28, 24);
- f(type, 23, 22), f(1, 21), f(rmode, 20, 19);
- f(opcode, 18, 16), f(0b000000, 15, 10);
- rf(Rn, 5), rf(Rd, 0);
- }
+ void testptr(Register src, int32_t imm32) { Unimplemented(); }
+ void testptr(Register src1, Register src2);
-#define INSN(NAME, op31, type, rmode, opcode) \
- void NAME(Register Rd, FloatRegister Vn) { \
- float_int_convert(op31, type, rmode, opcode, Rd, (Register)Vn); \
- }
+ void xorptr(Register dst, Register src) { Unimplemented(); }
+ void xorptr(Register dst, Address src) { Unimplemented(); }
- INSN(fcvtszw, 0b000, 0b00, 0b11, 0b000);
- INSN(fcvtzs, 0b000, 0b01, 0b11, 0b000);
- INSN(fcvtzdw, 0b100, 0b00, 0b11, 0b000);
- INSN(fcvtszd, 0b100, 0b01, 0b11, 0b000);
+ // Calls
- INSN(fmovs, 0b000, 0b00, 0b00, 0b110);
- INSN(fmovd, 0b100, 0b01, 0b00, 0b110);
+ void call(Label& L, relocInfo::relocType rtype);
+ void call(Register entry);
- INSN(fmovhid, 0b100, 0b10, 0b01, 0b110);
+ // NOTE: this call tranfers to the effective address of entry NOT
+ // the address contained by entry. This is because this is more natural
+ // for jumps/calls.
+ void call(AddressLiteral entry);
-#undef INSN
+ // Jumps
-#define INSN(NAME, op31, type, rmode, opcode) \
- void NAME(FloatRegister Vd, Register Rn) { \
- float_int_convert(op31, type, rmode, opcode, (Register)Vd, Rn); \
- }
+ // NOTE: these jumps tranfer to the effective address of dst NOT
+ // the address contained by dst. This is because this is more natural
+ // for jumps/calls.
+ void jump(AddressLiteral dst);
+ void jump_cc(Condition cc, AddressLiteral dst);
- INSN(fmovs, 0b000, 0b00, 0b00, 0b111);
- INSN(fmovd, 0b100, 0b01, 0b00, 0b111);
+ // 32bit can do a case table jump in one instruction but we no longer allow the base
+ // to be installed in the Address class. This jump will tranfers to the address
+ // contained in the location described by entry (not the address of entry)
+ void jump(ArrayAddress entry);
- INSN(fmovhid, 0b100, 0b10, 0b01, 0b111);
+ // Floating
-#undef INSN
+ void fadd_s(Address src) { Unimplemented(); }
+ void fadd_s(AddressLiteral src) { Unimplemented(); }
- // Floating-point compare
- void float_compare(unsigned op31, unsigned type,
- unsigned op, unsigned op2,
- FloatRegister Vn, FloatRegister Vm = (FloatRegister)0) {
- starti;
- f(op31, 31, 29);
- f(0b11110, 28, 24);
- f(type, 23, 22), f(1, 21);
- f(op, 15, 14), f(0b1000, 13, 10), f(op2, 4, 0);
- rf(Vn, 5), rf(Vm, 16);
- }
+ void fldcw(Address src) { Unimplemented(); }
+ void fldcw(AddressLiteral src);
+ void fld_s(int index) { Unimplemented(); }
+ void fld_s(Address src) { Unimplemented(); }
+ void fld_s(AddressLiteral src);
-#define INSN(NAME, op31, type, op, op2) \
- void NAME(FloatRegister Vn, FloatRegister Vm) { \
- float_compare(op31, type, op, op2, Vn, Vm); \
- }
+ void fld_d(Address src) { Unimplemented(); }
+ void fld_d(AddressLiteral src);
-#define INSN1(NAME, op31, type, op, op2) \
- void NAME(FloatRegister Vn) { \
- float_compare(op31, type, op, op2, Vn); \
- }
+ void fld_x(Address src) { Unimplemented(); }
+ void fld_x(AddressLiteral src);
- INSN(fcmps, 0b000, 0b00, 0b00, 0b00000);
- INSN1(fcmps, 0b000, 0b00, 0b00, 0b01000);
- INSN(fcmpes, 0b000, 0b00, 0b00, 0b10000);
- INSN1(fcmpes, 0b000, 0b00, 0b00, 0b11000);
+ void fmul_s(Address src) { Unimplemented(); }
+ void fmul_s(AddressLiteral src) { Unimplemented(); }
- INSN(fcmpd, 0b000, 0b01, 0b00, 0b00000);
- INSN1(fcmpd, 0b000, 0b01, 0b00, 0b01000);
- INSN(fcmped, 0b000, 0b01, 0b00, 0b10000);
- INSN1(fcmped, 0b000, 0b01, 0b00, 0b11000);
+ void ldmxcsr(Address src) { Unimplemented(); }
+ void ldmxcsr(AddressLiteral src);
-#undef INSN
-#undef INSN1
+ // compute pow(x,y) and exp(x) with x86 instructions. Don't cover
+ // all corner cases and may result in NaN and require fallback to a
+ // runtime call.
+ void fast_pow();
+ void fast_exp();
- Assembler_aarch64(CodeBuffer* code) : AbstractAssembler(code) {
- }
+ // computes exp(x). Fallback to runtime call included.
+ void exp_with_fallback(int num_fpu_regs_in_use) { Unimplemented(); }
+ // computes pow(x,y). Fallback to runtime call included.
+ void pow_with_fallback(int num_fpu_regs_in_use) { Unimplemented(); }
- virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
- Register tmp,
- int offset) {
- }
+public:
- // Stack overflow checking
- virtual void bang_stack_with_offset(int offset) {
- }
+ // Data
- bool operand_valid_for_logical_immdiate(int is32, uint64_t imm);
-};
+ void cmov32( Condition cc, Register dst, Address src);
+ void cmov32( Condition cc, Register dst, Register src);
-#undef starti
+ void cmov( Condition cc, Register dst, Register src) { Unimplemented(); }
-Instruction::~Instruction() {
- assem->emit();
-}
+ void cmovptr(Condition cc, Register dst, Address src) { Unimplemented(); }
+ void cmovptr(Condition cc, Register dst, Register src) { Unimplemented(); }
+
+ void movoop(Register dst, jobject obj);
+ void movoop(Address dst, jobject obj);
+
+ void movptr(ArrayAddress dst, Register src);
+ // can this do an lea?
+ void movptr(Register dst, ArrayAddress src);
+
+ void movptr(Register dst, Address src);
+
+ void movptr(Register dst, AddressLiteral src);
+
+ void movptr(Register dst, intptr_t src);
+ void movptr(Register dst, Register src);
+ void movptr(Address dst, intptr_t src);
+
+ void movptr(Address dst, Register src);
+
+ void movptr(Register dst, RegisterOrConstant src) { Unimplemented(); }
+
+#ifdef _LP64
+ // Generally the next two are only used for moving NULL
+ // Although there are situations in initializing the mark word where
+ // they could be used. They are dangerous.
+
+ // They only exist on LP64 so that int32_t and intptr_t are not the same
+ // and we have ambiguous declarations.
+
+ void movptr(Address dst, int32_t imm32);
+ void movptr(Register dst, int32_t imm32);
+#endif // _LP64
+
+ // to avoid hiding movl
+ void mov32(AddressLiteral dst, Register src);
+ void mov32(Register dst, AddressLiteral src);
+
+ // to avoid hiding movb
+ void movbyte(ArrayAddress dst, int src);
+
+ // Can push value or effective address
+ void pushptr(AddressLiteral src);
+
+ void pushptr(Address src) { Unimplemented(); }
+ void popptr(Address src) { Unimplemented(); }
+
+ void pushoop(jobject obj);
+
+ // sign extend as need a l to ptr sized element
+ void movl2ptr(Register dst, Address src) { Unimplemented(); }
+ void movl2ptr(Register dst, Register src) { Unimplemented(); }
+
+ // C2 compiled method's prolog code.
+ void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b);
+
+#undef VIRTUAL
+};
+
+inline bool AbstractAssembler::pd_check_instruction_mark() { Unimplemented(); return false; }
+class BiasedLockingCounters;
#endif // CPU_AARCH64_VM_ASSEMBLER_AARCH64_HPP
diff --git a/src/cpu/aarch64/vm/assembler_aarch64.inline.hpp b/src/cpu/aarch64/vm/assembler_aarch64.inline.hpp
new file mode 100644
index 000000000..1016654a7
--- /dev/null
+++ b/src/cpu/aarch64/vm/assembler_aarch64.inline.hpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP
+#define CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP
+
+#include "asm/assembler.inline.hpp"
+#include "asm/codeBuffer.hpp"
+#include "code/codeCache.hpp"
+#include "runtime/handles.inline.hpp"
+
+inline void MacroAssembler::pd_patch_instruction(address branch, address target) { Unimplemented(); }
+
+#ifndef PRODUCT
+
+inline void MacroAssembler::pd_print_patched_instruction(address branch) { Unimplemented(); }
+
+#endif // ndef PRODUCT
+
+#endif // CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP
diff --git a/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.cpp b/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.cpp
new file mode 100644
index 000000000..9798c1bd4
--- /dev/null
+++ b/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "interpreter/bytecodeInterpreter.hpp"
+#include "interpreter/bytecodeInterpreter.inline.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "oops/methodDataOop.hpp"
+#include "oops/methodOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+#ifdef TARGET_ARCH_MODEL_x86_32
+# include "interp_masm_x86_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_x86_64
+# include "interp_masm_x86_64.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "interp_masm_aarch64.hpp"
+#endif
+
+#ifdef CC_INTERP
+
+#endif // CC_INTERP (all)
diff --git a/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.hpp b/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.hpp
new file mode 100644
index 000000000..e4eaa7512
--- /dev/null
+++ b/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.hpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_BYTECODEINTERPRETER_X86_HPP
+#define CPU_X86_VM_BYTECODEINTERPRETER_X86_HPP
+
+// Platform specific for C++ based Interpreter
+
+private:
+
+ interpreterState _self_link; /* Previous interpreter state */ /* sometimes points to self??? */
+ address _result_handler; /* temp for saving native result handler */
+ intptr_t* _sender_sp; /* sender's sp before stack (locals) extension */
+
+ address _extra_junk1; /* temp to save on recompiles */
+ address _extra_junk2; /* temp to save on recompiles */
+ address _extra_junk3; /* temp to save on recompiles */
+ // address dummy_for_native2; /* a native frame result handler would be here... */
+ // address dummy_for_native1; /* native result type stored here in a interpreter native frame */
+ address _extra_junk4; /* temp to save on recompiles */
+ address _extra_junk5; /* temp to save on recompiles */
+ address _extra_junk6; /* temp to save on recompiles */
+public:
+ // we have an interpreter frame...
+inline intptr_t* sender_sp() {
+ return _sender_sp;
+}
+
+// The interpreter always has the frame anchor fully setup so we don't
+// have to do anything going to vm from the interpreter. On return
+// we do have to clear the flags in case they we're modified to
+// maintain the stack walking invariants.
+//
+#define SET_LAST_JAVA_FRAME()
+
+#define RESET_LAST_JAVA_FRAME()
+
+/*
+ * Macros for accessing the stack.
+ */
+#undef STACK_INT
+#undef STACK_FLOAT
+#undef STACK_ADDR
+#undef STACK_OBJECT
+#undef STACK_DOUBLE
+#undef STACK_LONG
+
+// JavaStack Implementation
+
+#define GET_STACK_SLOT(offset) (*((intptr_t*) &topOfStack[-(offset)]))
+#define STACK_SLOT(offset) ((address) &topOfStack[-(offset)])
+#define STACK_ADDR(offset) (*((address *) &topOfStack[-(offset)]))
+#define STACK_INT(offset) (*((jint*) &topOfStack[-(offset)]))
+#define STACK_FLOAT(offset) (*((jfloat *) &topOfStack[-(offset)]))
+#define STACK_OBJECT(offset) (*((oop *) &topOfStack [-(offset)]))
+#define STACK_DOUBLE(offset) (((VMJavaVal64*) &topOfStack[-(offset)])->d)
+#define STACK_LONG(offset) (((VMJavaVal64 *) &topOfStack[-(offset)])->l)
+
+#define SET_STACK_SLOT(value, offset) (*(intptr_t*)&topOfStack[-(offset)] = *(intptr_t*)(value))
+#define SET_STACK_ADDR(value, offset) (*((address *)&topOfStack[-(offset)]) = (value))
+#define SET_STACK_INT(value, offset) (*((jint *)&topOfStack[-(offset)]) = (value))
+#define SET_STACK_FLOAT(value, offset) (*((jfloat *)&topOfStack[-(offset)]) = (value))
+#define SET_STACK_OBJECT(value, offset) (*((oop *)&topOfStack[-(offset)]) = (value))
+#define SET_STACK_DOUBLE(value, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d = (value))
+#define SET_STACK_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d = \
+ ((VMJavaVal64*)(addr))->d)
+#define SET_STACK_LONG(value, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->l = (value))
+#define SET_STACK_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->l = \
+ ((VMJavaVal64*)(addr))->l)
+// JavaLocals implementation
+
+#define LOCALS_SLOT(offset) ((intptr_t*)&locals[-(offset)])
+#define LOCALS_ADDR(offset) ((address)locals[-(offset)])
+#define LOCALS_INT(offset) ((jint)(locals[-(offset)]))
+#define LOCALS_FLOAT(offset) (*((jfloat*)&locals[-(offset)]))
+#define LOCALS_OBJECT(offset) ((oop)locals[-(offset)])
+#define LOCALS_DOUBLE(offset) (((VMJavaVal64*)&locals[-((offset) + 1)])->d)
+#define LOCALS_LONG(offset) (((VMJavaVal64*)&locals[-((offset) + 1)])->l)
+#define LOCALS_LONG_AT(offset) (((address)&locals[-((offset) + 1)]))
+#define LOCALS_DOUBLE_AT(offset) (((address)&locals[-((offset) + 1)]))
+
+#define SET_LOCALS_SLOT(value, offset) (*(intptr_t*)&locals[-(offset)] = *(intptr_t *)(value))
+#define SET_LOCALS_ADDR(value, offset) (*((address *)&locals[-(offset)]) = (value))
+#define SET_LOCALS_INT(value, offset) (*((jint *)&locals[-(offset)]) = (value))
+#define SET_LOCALS_FLOAT(value, offset) (*((jfloat *)&locals[-(offset)]) = (value))
+#define SET_LOCALS_OBJECT(value, offset) (*((oop *)&locals[-(offset)]) = (value))
+#define SET_LOCALS_DOUBLE(value, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->d = (value))
+#define SET_LOCALS_LONG(value, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = (value))
+#define SET_LOCALS_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->d = \
+ ((VMJavaVal64*)(addr))->d)
+#define SET_LOCALS_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = \
+ ((VMJavaVal64*)(addr))->l)
+
+#endif // CPU_X86_VM_BYTECODEINTERPRETER_X86_HPP
diff --git a/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.inline.hpp b/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.inline.hpp
new file mode 100644
index 000000000..4f60b4558
--- /dev/null
+++ b/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.inline.hpp
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_BYTECODEINTERPRETER_X86_INLINE_HPP
+#define CPU_X86_VM_BYTECODEINTERPRETER_X86_INLINE_HPP
+
+// Inline interpreter functions for IA32
+
+inline jfloat BytecodeInterpreter::VMfloatAdd(jfloat op1, jfloat op2) { return op1 + op2; }
+inline jfloat BytecodeInterpreter::VMfloatSub(jfloat op1, jfloat op2) { return op1 - op2; }
+inline jfloat BytecodeInterpreter::VMfloatMul(jfloat op1, jfloat op2) { return op1 * op2; }
+inline jfloat BytecodeInterpreter::VMfloatDiv(jfloat op1, jfloat op2) { return op1 / op2; }
+inline jfloat BytecodeInterpreter::VMfloatRem(jfloat op1, jfloat op2) { return fmod(op1, op2); }
+
+inline jfloat BytecodeInterpreter::VMfloatNeg(jfloat op) { return -op; }
+
+inline int32_t BytecodeInterpreter::VMfloatCompare(jfloat op1, jfloat op2, int32_t direction) {
+ return ( op1 < op2 ? -1 :
+ op1 > op2 ? 1 :
+ op1 == op2 ? 0 :
+ (direction == -1 || direction == 1) ? direction : 0);
+
+}
+
+inline void BytecodeInterpreter::VMmemCopy64(uint32_t to[2], const uint32_t from[2]) {
+ // x86 can do unaligned copies but not 64bits at a time
+ to[0] = from[0]; to[1] = from[1];
+}
+
+// The long operations depend on compiler support for "long long" on x86
+
+inline jlong BytecodeInterpreter::VMlongAdd(jlong op1, jlong op2) {
+ return op1 + op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongAnd(jlong op1, jlong op2) {
+ return op1 & op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongDiv(jlong op1, jlong op2) {
+ // QQQ what about check and throw...
+ return op1 / op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongMul(jlong op1, jlong op2) {
+ return op1 * op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongOr(jlong op1, jlong op2) {
+ return op1 | op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongSub(jlong op1, jlong op2) {
+ return op1 - op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongXor(jlong op1, jlong op2) {
+ return op1 ^ op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongRem(jlong op1, jlong op2) {
+ return op1 % op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongUshr(jlong op1, jint op2) {
+ // CVM did this 0x3f mask, is the really needed??? QQQ
+ return ((unsigned long long) op1) >> (op2 & 0x3F);
+}
+
+inline jlong BytecodeInterpreter::VMlongShr(jlong op1, jint op2) {
+ return op1 >> (op2 & 0x3F);
+}
+
+inline jlong BytecodeInterpreter::VMlongShl(jlong op1, jint op2) {
+ return op1 << (op2 & 0x3F);
+}
+
+inline jlong BytecodeInterpreter::VMlongNeg(jlong op) {
+ return -op;
+}
+
+inline jlong BytecodeInterpreter::VMlongNot(jlong op) {
+ return ~op;
+}
+
+inline int32_t BytecodeInterpreter::VMlongLtz(jlong op) {
+ return (op <= 0);
+}
+
+inline int32_t BytecodeInterpreter::VMlongGez(jlong op) {
+ return (op >= 0);
+}
+
+inline int32_t BytecodeInterpreter::VMlongEqz(jlong op) {
+ return (op == 0);
+}
+
+inline int32_t BytecodeInterpreter::VMlongEq(jlong op1, jlong op2) {
+ return (op1 == op2);
+}
+
+inline int32_t BytecodeInterpreter::VMlongNe(jlong op1, jlong op2) {
+ return (op1 != op2);
+}
+
+inline int32_t BytecodeInterpreter::VMlongGe(jlong op1, jlong op2) {
+ return (op1 >= op2);
+}
+
+inline int32_t BytecodeInterpreter::VMlongLe(jlong op1, jlong op2) {
+ return (op1 <= op2);
+}
+
+inline int32_t BytecodeInterpreter::VMlongLt(jlong op1, jlong op2) {
+ return (op1 < op2);
+}
+
+inline int32_t BytecodeInterpreter::VMlongGt(jlong op1, jlong op2) {
+ return (op1 > op2);
+}
+
+inline int32_t BytecodeInterpreter::VMlongCompare(jlong op1, jlong op2) {
+ return (VMlongLt(op1, op2) ? -1 : VMlongGt(op1, op2) ? 1 : 0);
+}
+
+// Long conversions
+
+inline jdouble BytecodeInterpreter::VMlong2Double(jlong val) {
+ return (jdouble) val;
+}
+
+inline jfloat BytecodeInterpreter::VMlong2Float(jlong val) {
+ return (jfloat) val;
+}
+
+inline jint BytecodeInterpreter::VMlong2Int(jlong val) {
+ return (jint) val;
+}
+
+// Double Arithmetic
+
+inline jdouble BytecodeInterpreter::VMdoubleAdd(jdouble op1, jdouble op2) {
+ return op1 + op2;
+}
+
+inline jdouble BytecodeInterpreter::VMdoubleDiv(jdouble op1, jdouble op2) {
+ // Divide by zero... QQQ
+ return op1 / op2;
+}
+
+inline jdouble BytecodeInterpreter::VMdoubleMul(jdouble op1, jdouble op2) {
+ return op1 * op2;
+}
+
+inline jdouble BytecodeInterpreter::VMdoubleNeg(jdouble op) {
+ return -op;
+}
+
+inline jdouble BytecodeInterpreter::VMdoubleRem(jdouble op1, jdouble op2) {
+ return fmod(op1, op2);
+}
+
+inline jdouble BytecodeInterpreter::VMdoubleSub(jdouble op1, jdouble op2) {
+ return op1 - op2;
+}
+
+inline int32_t BytecodeInterpreter::VMdoubleCompare(jdouble op1, jdouble op2, int32_t direction) {
+ return ( op1 < op2 ? -1 :
+ op1 > op2 ? 1 :
+ op1 == op2 ? 0 :
+ (direction == -1 || direction == 1) ? direction : 0);
+}
+
+// Double Conversions
+
+inline jfloat BytecodeInterpreter::VMdouble2Float(jdouble val) {
+ return (jfloat) val;
+}
+
+// Float Conversions
+
+inline jdouble BytecodeInterpreter::VMfloat2Double(jfloat op) {
+ return (jdouble) op;
+}
+
+// Integer Arithmetic
+
+inline jint BytecodeInterpreter::VMintAdd(jint op1, jint op2) {
+ return op1 + op2;
+}
+
+inline jint BytecodeInterpreter::VMintAnd(jint op1, jint op2) {
+ return op1 & op2;
+}
+
+inline jint BytecodeInterpreter::VMintDiv(jint op1, jint op2) {
+ /* it's possible we could catch this special case implicitly */
+ if ((juint)op1 == 0x80000000 && op2 == -1) return op1;
+ else return op1 / op2;
+}
+
+inline jint BytecodeInterpreter::VMintMul(jint op1, jint op2) {
+ return op1 * op2;
+}
+
+inline jint BytecodeInterpreter::VMintNeg(jint op) {
+ return -op;
+}
+
+inline jint BytecodeInterpreter::VMintOr(jint op1, jint op2) {
+ return op1 | op2;
+}
+
+inline jint BytecodeInterpreter::VMintRem(jint op1, jint op2) {
+ /* it's possible we could catch this special case implicitly */
+ if ((juint)op1 == 0x80000000 && op2 == -1) return 0;
+ else return op1 % op2;
+}
+
+inline jint BytecodeInterpreter::VMintShl(jint op1, jint op2) {
+ return op1 << op2;
+}
+
+inline jint BytecodeInterpreter::VMintShr(jint op1, jint op2) {
+ return op1 >> (op2 & 0x1f);
+}
+
+inline jint BytecodeInterpreter::VMintSub(jint op1, jint op2) {
+ return op1 - op2;
+}
+
+inline jint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
+ return ((juint) op1) >> (op2 & 0x1f);
+}
+
+inline jint BytecodeInterpreter::VMintXor(jint op1, jint op2) {
+ return op1 ^ op2;
+}
+
+inline jdouble BytecodeInterpreter::VMint2Double(jint val) {
+ return (jdouble) val;
+}
+
+inline jfloat BytecodeInterpreter::VMint2Float(jint val) {
+ return (jfloat) val;
+}
+
+inline jlong BytecodeInterpreter::VMint2Long(jint val) {
+ return (jlong) val;
+}
+
+inline jchar BytecodeInterpreter::VMint2Char(jint val) {
+ return (jchar) val;
+}
+
+inline jshort BytecodeInterpreter::VMint2Short(jint val) {
+ return (jshort) val;
+}
+
+inline jbyte BytecodeInterpreter::VMint2Byte(jint val) {
+ return (jbyte) val;
+}
+
+#endif // CPU_X86_VM_BYTECODEINTERPRETER_X86_INLINE_HPP
diff --git a/src/cpu/aarch64/vm/bytecodes_aarch64.cpp b/src/cpu/aarch64/vm/bytecodes_aarch64.cpp
new file mode 100644
index 000000000..4e6993548
--- /dev/null
+++ b/src/cpu/aarch64/vm/bytecodes_aarch64.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interpreter/bytecodes.hpp"
+
+
+void Bytecodes::pd_initialize() {
+ // No i486 specific initialization
+}
+
+
+Bytecodes::Code Bytecodes::pd_base_code_for(Code code) {
+ // No i486 specific bytecodes
+ return code;
+}
diff --git a/src/cpu/aarch64/vm/bytecodes_aarch64.hpp b/src/cpu/aarch64/vm/bytecodes_aarch64.hpp
new file mode 100644
index 000000000..e21c16a09
--- /dev/null
+++ b/src/cpu/aarch64/vm/bytecodes_aarch64.hpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_BYTECODES_X86_HPP
+#define CPU_X86_VM_BYTECODES_X86_HPP
+
+// No i486 specific bytecodes
+
+#endif // CPU_X86_VM_BYTECODES_X86_HPP
diff --git a/src/cpu/aarch64/vm/bytes_aarch64.hpp b/src/cpu/aarch64/vm/bytes_aarch64.hpp
new file mode 100644
index 000000000..8eb0ea39b
--- /dev/null
+++ b/src/cpu/aarch64/vm/bytes_aarch64.hpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_BYTES_X86_HPP
+#define CPU_X86_VM_BYTES_X86_HPP
+
+#include "memory/allocation.hpp"
+
+class Bytes: AllStatic {
+ private:
+#ifndef AMD64
+ // Helper function for swap_u8
+ static inline u8 swap_u8_base(u4 x, u4 y); // compiler-dependent implementation
+#endif // AMD64
+
+ public:
+ // Returns true if the byte ordering used by Java is different from the native byte ordering
+ // of the underlying machine. For example, this is true for Intel x86, but false for Solaris
+ // on Sparc.
+ static inline bool is_Java_byte_ordering_different(){ return true; }
+
+
+ // Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
+ // (no special code is needed since x86 CPUs can access unaligned data)
+ static inline u2 get_native_u2(address p) { return *(u2*)p; }
+ static inline u4 get_native_u4(address p) { return *(u4*)p; }
+ static inline u8 get_native_u8(address p) { return *(u8*)p; }
+
+ static inline void put_native_u2(address p, u2 x) { *(u2*)p = x; }
+ static inline void put_native_u4(address p, u4 x) { *(u4*)p = x; }
+ static inline void put_native_u8(address p, u8 x) { *(u8*)p = x; }
+
+
+ // Efficient reading and writing of unaligned unsigned data in Java
+ // byte ordering (i.e. big-endian ordering). Byte-order reversal is
+ // needed since x86 CPUs use little-endian format.
+ static inline u2 get_Java_u2(address p) { return swap_u2(get_native_u2(p)); }
+ static inline u4 get_Java_u4(address p) { return swap_u4(get_native_u4(p)); }
+ static inline u8 get_Java_u8(address p) { return swap_u8(get_native_u8(p)); }
+
+ static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, swap_u2(x)); }
+ static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, swap_u4(x)); }
+ static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, swap_u8(x)); }
+
+
+ // Efficient swapping of byte ordering
+ static inline u2 swap_u2(u2 x); // compiler-dependent implementation
+ static inline u4 swap_u4(u4 x); // compiler-dependent implementation
+ static inline u8 swap_u8(u8 x);
+};
+
+
+// The following header contains the implementations of swap_u2, swap_u4, and swap_u8[_base]
+#ifdef TARGET_OS_ARCH_linux_x86
+# include "bytes_linux_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "bytes_linux_aarch64.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_solaris_x86
+# include "bytes_solaris_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_windows_x86
+# include "bytes_windows_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_bsd_x86
+# include "bytes_bsd_x86.inline.hpp"
+#endif
+
+
+#endif // CPU_X86_VM_BYTES_X86_HPP
diff --git a/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp b/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp
new file mode 100644
index 000000000..1fb432e89
--- /dev/null
+++ b/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_CodeStubs.hpp"
+#include "c1/c1_FrameMap.hpp"
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "c1/c1_Runtime1.hpp"
+#include "nativeInst_aarch64.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "vmreg_aarch64.inline.hpp"
+#ifndef SERIALGC
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#endif
+
+
+#define __ ce->masm()->
+
+float ConversionStub::float_zero = 0.0;
+double ConversionStub::double_zero = 0.0;
+
+void ConversionStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+void CounterOverflowStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
+ bool throw_index_out_of_bounds_exception)
+ : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
+ , _index(index) { Unimplemented(); }
+
+
+void RangeCheckStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+void DivByZeroStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+
+// Implementation of NewInstanceStub
+
+NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { Unimplemented(); }
+
+
+void NewInstanceStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+
+// Implementation of NewTypeArrayStub
+
+NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); }
+
+
+void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+
+// Implementation of NewObjectArrayStub
+
+NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); }
+
+
+void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+
+// Implementation of MonitorAccessStubs
+
+MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
+: MonitorAccessStub(obj_reg, lock_reg) { Unimplemented(); }
+
+
+void MonitorEnterStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+
+void MonitorExitStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+
+// Implementation of patching:
+// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
+// - Replace original code with a call to the stub
+// At Runtime:
+// - call to stub, jump to runtime
+// - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
+// - in runtime: after initializing class, restore original code, reexecute instruction
+
+int PatchingStub::_patch_info_offset = 0;
+
+void PatchingStub::align_patch_site(MacroAssembler* masm) { Unimplemented(); }
+
+void PatchingStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+
+void DeoptimizeStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+
+void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+
+void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+
+void ArrayCopyStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+/////////////////////////////////////////////////////////////////////////////
+#ifndef SERIALGC
+
+void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+jbyte* G1PostBarrierStub::_byte_map_base = NULL;
+
+jbyte* G1PostBarrierStub::byte_map_base_slow() { Unimplemented(); return 0; }
+
+
+void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { Unimplemented(); }
+
+#endif // SERIALGC
+/////////////////////////////////////////////////////////////////////////////
+
+#undef __
diff --git a/src/cpu/aarch64/vm/c1_Defs_aarch64.hpp b/src/cpu/aarch64/vm/c1_Defs_aarch64.hpp
new file mode 100644
index 000000000..b7f2d1976
--- /dev/null
+++ b/src/cpu/aarch64/vm/c1_Defs_aarch64.hpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_C1_DEFS_X86_HPP
+#define CPU_X86_VM_C1_DEFS_X86_HPP
+
+// native word offsets from memory address (little endian)
+enum {
+ pd_lo_word_offset_in_bytes = 0,
+ pd_hi_word_offset_in_bytes = BytesPerWord
+};
+
+// explicit rounding operations are required to implement the strictFP mode
+enum {
+ pd_strict_fp_requires_explicit_rounding = true
+};
+
+
+// registers
+enum {
+ pd_nof_cpu_regs_frame_map = RegisterImpl::number_of_registers, // number of registers used during code emission
+ pd_nof_fpu_regs_frame_map = FloatRegisterImpl::number_of_registers, // number of registers used during code emission
+
+#ifdef _LP64
+ #define UNALLOCATED 4 // rsp, rbp, r15, r10
+#else
+ #define UNALLOCATED 2 // rsp, rbp
+#endif // LP64
+
+ pd_nof_caller_save_cpu_regs_frame_map = pd_nof_cpu_regs_frame_map - UNALLOCATED, // number of registers killed by calls
+ pd_nof_caller_save_fpu_regs_frame_map = pd_nof_fpu_regs_frame_map, // number of registers killed by calls
+
+ pd_nof_cpu_regs_reg_alloc = pd_nof_caller_save_cpu_regs_frame_map, // number of registers that are visible to register allocator
+ pd_nof_fpu_regs_reg_alloc = 6, // number of registers that are visible to register allocator
+
+ pd_nof_cpu_regs_linearscan = pd_nof_cpu_regs_frame_map, // number of registers visible to linear scan
+ pd_nof_fpu_regs_linearscan = pd_nof_fpu_regs_frame_map, // number of registers visible to linear scan
+ pd_first_cpu_reg = 0,
+ pd_last_cpu_reg = NOT_LP64(5) LP64_ONLY(11),
+ pd_first_byte_reg = NOT_LP64(2) LP64_ONLY(0),
+ pd_last_byte_reg = NOT_LP64(5) LP64_ONLY(11),
+ pd_first_fpu_reg = pd_nof_cpu_regs_frame_map,
+ pd_last_fpu_reg = pd_first_fpu_reg + 7,
+};
+
+
+// encoding of float value in debug info:
+enum {
+ pd_float_saved_as_double = true
+};
+
+#endif // CPU_X86_VM_C1_DEFS_X86_HPP
diff --git a/src/cpu/aarch64/vm/c1_FpuStackSim_aarch64.cpp b/src/cpu/aarch64/vm/c1_FpuStackSim_aarch64.cpp
new file mode 100644
index 000000000..3fbd57c5c
--- /dev/null
+++ b/src/cpu/aarch64/vm/c1_FpuStackSim_aarch64.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_FpuStackSim.hpp"
+#include "c1/c1_FrameMap.hpp"
+#include "utilities/array.hpp"
+#include "utilities/ostream.hpp"
+
+//--------------------------------------------------------
+// FpuStackSim
+//--------------------------------------------------------
+
+// This class maps the FPU registers to their stack locations; it computes
+// the offsets between individual registers and simulates the FPU stack.
+
+const int EMPTY = -1;
+
+int FpuStackSim::regs_at(int i) const {
+ assert(i >= 0 && i < FrameMap::nof_fpu_regs, "out of bounds");
+ return _regs[i];
+}
+
+void FpuStackSim::set_regs_at(int i, int val) {
+ assert(i >= 0 && i < FrameMap::nof_fpu_regs, "out of bounds");
+ _regs[i] = val;
+}
+
+void FpuStackSim::dec_stack_size() {
+ _stack_size--;
+ assert(_stack_size >= 0, "FPU stack underflow");
+}
+
+void FpuStackSim::inc_stack_size() {
+ _stack_size++;
+ assert(_stack_size <= FrameMap::nof_fpu_regs, "FPU stack overflow");
+}
+
+FpuStackSim::FpuStackSim(Compilation* compilation)
+ : _compilation(compilation)
+{
+ _stack_size = 0;
+ for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
+ set_regs_at(i, EMPTY);
+ }
+}
+
+
+void FpuStackSim::pop() {
+ if (TraceFPUStack) { tty->print("FPU-pop "); print(); tty->cr(); }
+ set_regs_at(tos_index(), EMPTY);
+ dec_stack_size();
+}
+
+void FpuStackSim::pop(int rnr) {
+ if (TraceFPUStack) { tty->print("FPU-pop %d", rnr); print(); tty->cr(); }
+ assert(regs_at(tos_index()) == rnr, "rnr is not on TOS");
+ set_regs_at(tos_index(), EMPTY);
+ dec_stack_size();
+}
+
+
+void FpuStackSim::push(int rnr) {
+ if (TraceFPUStack) { tty->print("FPU-push %d", rnr); print(); tty->cr(); }
+ assert(regs_at(stack_size()) == EMPTY, "should be empty");
+ set_regs_at(stack_size(), rnr);
+ inc_stack_size();
+}
+
+
+void FpuStackSim::swap(int offset) {
+ if (TraceFPUStack) { tty->print("FPU-swap %d", offset); print(); tty->cr(); }
+ int t = regs_at(tos_index() - offset);
+ set_regs_at(tos_index() - offset, regs_at(tos_index()));
+ set_regs_at(tos_index(), t);
+}
+
+
+int FpuStackSim::offset_from_tos(int rnr) const {
+ for (int i = tos_index(); i >= 0; i--) {
+ if (regs_at(i) == rnr) {
+ return tos_index() - i;
+ }
+ }
+ assert(false, "FpuStackSim: register not found");
+ BAILOUT_("FpuStackSim: register not found", 0);
+}
+
+
+int FpuStackSim::get_slot(int tos_offset) const {
+ return regs_at(tos_index() - tos_offset);
+}
+
+void FpuStackSim::set_slot(int tos_offset, int rnr) {
+ set_regs_at(tos_index() - tos_offset, rnr);
+}
+
+void FpuStackSim::rename(int old_rnr, int new_rnr) {
+ if (TraceFPUStack) { tty->print("FPU-rename %d %d", old_rnr, new_rnr); print(); tty->cr(); }
+ if (old_rnr == new_rnr)
+ return;
+ bool found = false;
+ for (int i = 0; i < stack_size(); i++) {
+ assert(regs_at(i) != new_rnr, "should not see old occurrences of new_rnr on the stack");
+ if (regs_at(i) == old_rnr) {
+ set_regs_at(i, new_rnr);
+ found = true;
+ }
+ }
+ assert(found, "should have found at least one instance of old_rnr");
+}
+
+
+bool FpuStackSim::contains(int rnr) {
+ for (int i = 0; i < stack_size(); i++) {
+ if (regs_at(i) == rnr) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool FpuStackSim::is_empty() {
+#ifdef ASSERT
+ if (stack_size() == 0) {
+ for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
+ assert(regs_at(i) == EMPTY, "must be empty");
+ }
+ }
+#endif
+ return stack_size() == 0;
+}
+
+
+bool FpuStackSim::slot_is_empty(int tos_offset) {
+ return (regs_at(tos_index() - tos_offset) == EMPTY);
+}
+
+
+void FpuStackSim::clear() {
+ if (TraceFPUStack) { tty->print("FPU-clear"); print(); tty->cr(); }
+ for (int i = tos_index(); i >= 0; i--) {
+ set_regs_at(i, EMPTY);
+ }
+ _stack_size = 0;
+}
+
+
+intArray* FpuStackSim::write_state() {
+ intArray* res = new intArray(1 + FrameMap::nof_fpu_regs);
+ (*res)[0] = stack_size();
+ for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
+ (*res)[1 + i] = regs_at(i);
+ }
+ return res;
+}
+
+
+void FpuStackSim::read_state(intArray* fpu_stack_state) {
+ _stack_size = (*fpu_stack_state)[0];
+ for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
+ set_regs_at(i, (*fpu_stack_state)[1 + i]);
+ }
+}
+
+
+#ifndef PRODUCT
+void FpuStackSim::print() {
+ tty->print(" N=%d[", stack_size());\
+ for (int i = 0; i < stack_size(); i++) {
+ int reg = regs_at(i);
+ if (reg != EMPTY) {
+ tty->print("%d", reg);
+ } else {
+ tty->print("_");
+ }
+ };
+ tty->print(" ]");
+}
+#endif
diff --git a/src/cpu/aarch64/vm/c1_FpuStackSim_aarch64.hpp b/src/cpu/aarch64/vm/c1_FpuStackSim_aarch64.hpp
new file mode 100644
index 000000000..ae1e0d590
--- /dev/null
+++ b/src/cpu/aarch64/vm/c1_FpuStackSim_aarch64.hpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_C1_FPUSTACKSIM_X86_HPP
+#define CPU_X86_VM_C1_FPUSTACKSIM_X86_HPP
+
+// Simulates the FPU stack and maintains mapping [fpu-register -> stack offset]
+// FPU registers are described as numbers from 0..nof_fpu_regs-1
+
+class Compilation;
+
+class FpuStackSim VALUE_OBJ_CLASS_SPEC {
+ private:
+ Compilation* _compilation;
+ int _stack_size;
+ int _regs[FrameMap::nof_fpu_regs];
+
+ int tos_index() const { return _stack_size - 1; }
+
+ int regs_at(int i) const;
+ void set_regs_at(int i, int val);
+ void dec_stack_size();
+ void inc_stack_size();
+
+ // unified bailout support
+ Compilation* compilation() const { return _compilation; }
+ void bailout(const char* msg) const { compilation()->bailout(msg); }
+ bool bailed_out() const { return compilation()->bailed_out(); }
+
+ public:
+ FpuStackSim(Compilation* compilation);
+ void pop ();
+ void pop (int rnr); // rnr must be on tos
+ void push(int rnr);
+ void swap(int offset); // exchange tos with tos + offset
+ int offset_from_tos(int rnr) const; // return the offset of the topmost instance of rnr from TOS
+ int get_slot(int tos_offset) const; // return the entry at the given offset from TOS
+ void set_slot(int tos_offset, int rnr); // set the entry at the given offset from TOS
+ void rename(int old_rnr, int new_rnr); // rename all instances of old_rnr to new_rnr
+ bool contains(int rnr); // debugging support only
+ bool is_empty();
+ bool slot_is_empty(int tos_offset);
+ int stack_size() const { return _stack_size; }
+ void clear();
+ intArray* write_state();
+ void read_state(intArray* fpu_stack_state);
+
+ void print() PRODUCT_RETURN;
+};
+
+#endif // CPU_X86_VM_C1_FPUSTACKSIM_X86_HPP
diff --git a/src/cpu/aarch64/vm/c1_FrameMap_aarch64.cpp b/src/cpu/aarch64/vm/c1_FrameMap_aarch64.cpp
new file mode 100644
index 000000000..12ec8964c
--- /dev/null
+++ b/src/cpu/aarch64/vm/c1_FrameMap_aarch64.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_FrameMap.hpp"
+#include "c1/c1_LIR.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "vmreg_aarch64.inline.hpp"
+
+const int FrameMap::pd_c_runtime_reserved_arg_size = 0;
+
+LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) { Unimplemented(); return r0_opr; }
+
+
+LIR_Opr FrameMap::r0_opr;
+
+LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, };
+LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, };
+
+//--------------------------------------------------------
+// FrameMap
+//--------------------------------------------------------
+
+void FrameMap::initialize() { Unimplemented(); }
+
+
+Address FrameMap::make_new_address(ByteSize sp_offset) const { Unimplemented(); return (address)0; }
+
+
+// ----------------mapping-----------------------
+// all mapping is based on rbp, addressing, except for simple leaf methods where we access
+// the locals rsp based (and no frame is built)
+
+
+// Frame for simple leaf methods (quick entries)
+//
+// +----------+
+// | ret addr | <- TOS
+// +----------+
+// | args |
+// | ...... |
+
+// Frame for standard methods
+//
+// | .........| <- TOS
+// | locals |
+// +----------+
+// | old rbp, | <- EBP
+// +----------+
+// | ret addr |
+// +----------+
+// | args |
+// | .........|
+
+
+// For OopMaps, map a local variable or spill index to an VMRegImpl name.
+// This is the offset from sp() in the frame of the slot for the index,
+// skewed by VMRegImpl::stack0 to indicate a stack location (vs.a register.)
+//
+// framesize +
+// stack0 stack0 0 <- VMReg
+// | | <registers> |
+// ...........|..............|.............|
+// 0 1 2 3 x x 4 5 6 ... | <- local indices
+// ^ ^ sp() ( x x indicate link
+// | | and return addr)
+// arguments non-argument locals
+
+
+VMReg FrameMap::fpu_regname (int n) { Unimplemented(); return 0; }
+
+LIR_Opr FrameMap::stack_pointer() { Unimplemented(); return 0; }
+
+
+// JSR 292
+LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() { Unimplemented(); return 0; }
+
+
+bool FrameMap::validate_frame() { Unimplemented(); return false; }
diff --git a/src/cpu/aarch64/vm/c1_FrameMap_aarch64.hpp b/src/cpu/aarch64/vm/c1_FrameMap_aarch64.hpp
new file mode 100644
index 000000000..54051c519
--- /dev/null
+++ b/src/cpu/aarch64/vm/c1_FrameMap_aarch64.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_C1_FRAMEMAP_X86_HPP
+#define CPU_X86_VM_C1_FRAMEMAP_X86_HPP
+
+// On i486 the frame looks as follows:
+//
+// +-----------------------------+---------+----------------------------------------+----------------+-----------
+// | size_arguments-nof_reg_args | 2 words | size_locals-size_arguments+numreg_args | _size_monitors | spilling .
+// +-----------------------------+---------+----------------------------------------+----------------+-----------
+//
+// The FPU registers are mapped with their offset from TOS; therefore the
+// status of FPU stack must be updated during code emission.
+
+ public:
+ static const int pd_c_runtime_reserved_arg_size;
+
+ enum {
+ first_available_sp_in_frame = 0,
+ frame_pad_in_bytes = 16,
+ nof_reg_args = 6
+ };
+
+ private:
+ public:
+ static LIR_Opr r0_opr;
+
+ static LIR_Opr as_long_opr(Register r) {
+ Unimplemented();
+ return r0_opr;
+ }
+ static LIR_Opr as_pointer_opr(Register r) {
+ Unimplemented();
+ return r0_opr;
+ }
+
+ // VMReg name for spilled physical FPU stack slot n
+ static VMReg fpu_regname (int n);
+
+ static bool is_caller_save_register (LIR_Opr opr) { return true; }
+ static bool is_caller_save_register (Register r) { return true; }
+
+static int nof_caller_save_cpu_regs() { Unimplemented(); return 0; }
+static int last_cpu_reg() { Unimplemented(); return 0 ; }
+static int last_byte_reg() { Unimplemented(); return 0; }
+
+#endif // CPU_X86_VM_C1_FRAMEMAP_X86_HPP
+
diff --git a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
new file mode 100644
index 000000000..7def00bea
--- /dev/null
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "c1/c1_Compilation.hpp"
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "c1/c1_Runtime1.hpp"
+#include "c1/c1_ValueStack.hpp"
+#include "ci/ciArrayKlass.hpp"
+#include "ci/ciInstance.hpp"
+#include "gc_interface/collectedHeap.hpp"
+#include "memory/barrierSet.hpp"
+#include "memory/cardTableModRefBS.hpp"
+#include "nativeInst_aarch64.hpp"
+#include "oops/objArrayKlass.hpp"
+#include "runtime/sharedRuntime.hpp"
+
+
+
+NEEDS_CLEANUP // remove this definitions ?
+const Register IC_Klass = r0; // where the IC klass is cached
+const Register SYNC_header = r0; // synchronization header
+const Register SHIFT_count = r0; // where count for shift operations must be
+
+#define __ _masm->
+
+
+static void select_different_registers(Register preserve,
+ Register extra,
+ Register &tmp1,
+ Register &tmp2) { Unimplemented(); }
+
+
+
+static void select_different_registers(Register preserve,
+ Register extra,
+ Register &tmp1,
+ Register &tmp2,
+ Register &tmp3) { Unimplemented(); }
+
+
+
+bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
+
+
+LIR_Opr LIR_Assembler::receiverOpr() { Unimplemented(); return 0; }
+
+LIR_Opr LIR_Assembler::osrBufferPointer() { Unimplemented(); return 0; }
+
+//--------------fpu register translations-----------------------
+
+
+address LIR_Assembler::float_constant(float f) { Unimplemented(); return 0; }
+
+
+address LIR_Assembler::double_constant(double d) { Unimplemented(); return 0; }
+
+
+void LIR_Assembler::set_24bit_FPU() { Unimplemented(); }
+
+void LIR_Assembler::reset_FPU() { Unimplemented(); }
+
+void LIR_Assembler::fpop() { Unimplemented(); }
+
+void LIR_Assembler::fxch(int i) { Unimplemented(); }
+
+void LIR_Assembler::fld(int i) { Unimplemented(); }
+
+void LIR_Assembler::ffree(int i) { Unimplemented(); }
+
+void LIR_Assembler::breakpoint() { Unimplemented(); }
+
+void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
+
+void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
+
+bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }
+//-------------------------------------------
+
+Address LIR_Assembler::as_Address(LIR_Address* addr) { Unimplemented(); return (address)0; }
+
+Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { Unimplemented(); return (address)0; }
+
+
+Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { Unimplemented(); return (address)0; }
+
+
+Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { Unimplemented(); return (address)0; }
+
+
+void LIR_Assembler::osr_entry() { Unimplemented(); }
+
+
+// inline cache check; done before the frame is built.
+int LIR_Assembler::check_icache() { Unimplemented(); return 0; }
+
+
+void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { Unimplemented(); }
+
+
+// This specifies the rsp decrement needed to build the frame
+int LIR_Assembler::initial_frame_size_in_bytes() { Unimplemented(); return 0; }
+
+
+int LIR_Assembler::emit_exception_handler() { Unimplemented(); return 0; }
+
+
+// Emit the code to remove the frame from the stack in the exception
+// unwind path.
+int LIR_Assembler::emit_unwind_handler() { Unimplemented(); return 0; }
+
+
+int LIR_Assembler::emit_deopt_handler() { Unimplemented(); return 0; }
+
+
+// This is the fast version of java.lang.String.compare; it has not
+// OSR-entry and therefore, we generate a slow version for OSR's
+void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) { Unimplemented(); }
+
+
+
+void LIR_Assembler::return_op(LIR_Opr result) { Unimplemented(); }
+
+
+int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { Unimplemented(); return 0; }
+
+
+void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { Unimplemented(); }
+
+void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
+
+
+void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { Unimplemented(); }
+
+void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { Unimplemented(); }
+
+void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { Unimplemented(); }
+
+
+void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { Unimplemented(); }
+
+void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { Unimplemented(); }
+
+
+void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) { Unimplemented(); }
+
+
+void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { Unimplemented(); }
+
+
+void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { Unimplemented(); }
+
+
+void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) { Unimplemented(); }
+
+
+void LIR_Assembler::prefetchr(LIR_Opr src) { Unimplemented(); }
+
+
+void LIR_Assembler::prefetchw(LIR_Opr src) { Unimplemented(); }
+
+
+NEEDS_CLEANUP; // This could be static?
+Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const { Unimplemented(); return Address::times_4; }
+
+
+void LIR_Assembler::emit_op3(LIR_Op3* op) { Unimplemented(); }
+
+void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { Unimplemented(); }
+
+void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { Unimplemented(); }
+
+void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { Unimplemented(); }
+
+void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { Unimplemented(); }
+
+void LIR_Assembler::type_profile_helper(Register mdo,
+ ciMethodData *md, ciProfileData *data,
+ Register recv, Label* update_done) { Unimplemented(); }
+
+void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { Unimplemented(); }
+
+
+void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { Unimplemented(); }
+
+
+void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { Unimplemented(); }
+
+void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { Unimplemented(); }
+
+
+void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { Unimplemented(); }
+
+void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); }
+
+
+void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { Unimplemented(); }
+
+void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { Unimplemented(); }
+
+
+// we assume that rax, and rdx can be overwritten
+void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); }
+
+
+void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { Unimplemented(); }
+
+void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { Unimplemented(); }
+
+
+void LIR_Assembler::align_call(LIR_Code code) { Unimplemented(); }
+
+
+void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { Unimplemented(); }
+
+
+
+void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { Unimplemented(); }
+
+
+/* Currently, vtable-dispatch is only enabled for sparc platforms */
+void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
+ ShouldNotReachHere();
+}
+
+
+void LIR_Assembler::emit_static_call_stub() { Unimplemented(); }
+
+
+void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { Unimplemented(); }
+
+
+void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { Unimplemented(); }
+
+
+void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { Unimplemented(); }
+
+
+void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { Unimplemented(); }
+
+
+void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { Unimplemented(); }
+
+
+void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { Unimplemented(); }
+
+
+void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { Unimplemented(); }
+
+
+// This code replaces a call to arraycopy; no exception may
+// be thrown in this code, they must be thrown in the System.arraycopy
+// activation frame; we could save some checks if this would not be the case
+void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { Unimplemented(); }
+
+
+void LIR_Assembler::emit_lock(LIR_OpLock* op) { Unimplemented(); }
+
+
+void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { Unimplemented(); }
+
+void LIR_Assembler::emit_delay(LIR_OpDelay*) {
+ Unimplemented();
+}
+
+
+void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { Unimplemented(); }
+
+
+void LIR_Assembler::align_backward_branch_target() { Unimplemented(); }
+
+
+void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { Unimplemented(); }
+
+
+void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) { Unimplemented(); }
+
+
+void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { Unimplemented(); }
+
+
+void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { Unimplemented(); }
+
+
+void LIR_Assembler::membar() { Unimplemented(); }
+
+void LIR_Assembler::membar_acquire() { Unimplemented(); }
+
+void LIR_Assembler::membar_release() { Unimplemented(); }
+
+void LIR_Assembler::membar_loadload() { Unimplemented(); }
+
+void LIR_Assembler::membar_storestore() { Unimplemented(); }
+
+void LIR_Assembler::membar_loadstore() { Unimplemented(); }
+
+void LIR_Assembler::membar_storeload() { Unimplemented(); }
+
+void LIR_Assembler::get_thread(LIR_Opr result_reg) { Unimplemented(); }
+
+
+void LIR_Assembler::peephole(LIR_List*) { Unimplemented(); }
+
+
+#undef __
diff --git a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.hpp b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.hpp
new file mode 100644
index 000000000..6ed351033
--- /dev/null
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.hpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_C1_LIRASSEMBLER_X86_HPP
+#define CPU_X86_VM_C1_LIRASSEMBLER_X86_HPP
+
+ private:
+
+ Address::ScaleFactor array_element_size(BasicType type) const;
+
+ void arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack);
+
+ // helper functions which checks for overflow and sets bailout if it
+ // occurs. Always returns a valid embeddable pointer but in the
+ // bailout case the pointer won't be to unique storage.
+ address float_constant(float f);
+ address double_constant(double d);
+
+ bool is_literal_address(LIR_Address* addr);
+
+ // When we need to use something other than rscratch1 use this
+ // method.
+ Address as_Address(LIR_Address* addr, Register tmp);
+
+ // Record the type of the receiver in ReceiverTypeData
+ void type_profile_helper(Register mdo,
+ ciMethodData *md, ciProfileData *data,
+ Register recv, Label* update_done);
+public:
+
+ void store_parameter(Register r, int offset_from_esp_in_words);
+ void store_parameter(jint c, int offset_from_esp_in_words);
+ void store_parameter(jobject c, int offset_from_esp_in_words);
+
+ enum { call_stub_size = NOT_LP64(15) LP64_ONLY(28),
+ exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
+ deopt_handler_size = NOT_LP64(10) LP64_ONLY(17)
+ };
+
+#endif // CPU_X86_VM_C1_LIRASSEMBLER_X86_HPP
diff --git a/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp b/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp
new file mode 100644
index 000000000..897025f21
--- /dev/null
+++ b/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_Compilation.hpp"
+#include "c1/c1_FrameMap.hpp"
+#include "c1/c1_Instruction.hpp"
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_LIRGenerator.hpp"
+#include "c1/c1_Runtime1.hpp"
+#include "c1/c1_ValueStack.hpp"
+#include "ci/ciArray.hpp"
+#include "ci/ciObjArrayKlass.hpp"
+#include "ci/ciTypeArrayKlass.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "vmreg_aarch64.inline.hpp"
+
+#ifdef ASSERT
+#define __ gen()->lir(__FILE__, __LINE__)->
+#else
+#define __ gen()->lir()->
+#endif
+
+// Item will be loaded into a byte register; Intel only
+void LIRItem::load_byte_item() {
+ load_item();
+ LIR_Opr res = result();
+
+ if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
+ // make sure that it is a byte register
+ assert(!value()->type()->is_float() && !value()->type()->is_double(),
+ "can't load floats in byte register");
+ LIR_Opr reg = _gen->rlock_byte(T_BYTE);
+ __ move(res, reg);
+
+ _result = reg;
+ }
+}
+
+
+void LIRItem::load_nonconstant() {
+ LIR_Opr r = value()->operand();
+ if (r->is_constant()) {
+ _result = r;
+ } else {
+ load_item();
+ }
+}
+
+//--------------------------------------------------------------
+// LIRGenerator
+//--------------------------------------------------------------
+
+
+LIR_Opr LIRGenerator::exceptionOopOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
+LIR_Opr LIRGenerator::exceptionPcOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
+LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
+LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
+LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
+LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
+LIR_Opr LIRGenerator::syncTempOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
+LIR_Opr LIRGenerator::getThreadTemp() { Unimplemented(); LIR_OprFact::illegalOpr; }
+
+
+LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { Unimplemented(); return LIR_OprFact::illegalOpr; }
+
+
+LIR_Opr LIRGenerator::rlock_byte(BasicType type) { Unimplemented(); return LIR_OprFact::illegalOpr; }
+
+
+//--------- loading items into registers --------------------------------
+
+
+// i486 instructions can inline constants
+bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { Unimplemented(); return false; }
+
+
+bool LIRGenerator::can_inline_as_constant(Value v) const { Unimplemented(); return false; }
+
+
+bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { Unimplemented(); return false; }
+
+
+LIR_Opr LIRGenerator::safepoint_poll_register() { Unimplemented(); return LIR_OprFact::illegalOpr; }
+
+
+LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
+ int shift, int disp, BasicType type) { Unimplemented(); return 0; }
+
+
+LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
+ BasicType type, bool needs_card_mark) { Unimplemented(); return 0; }
+
+
+LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { Unimplemented(); return LIR_OprFact::illegalOpr; }
+
+void LIRGenerator::increment_counter(address counter, BasicType type, int step) { Unimplemented(); }
+
+
+void LIRGenerator::increment_counter(LIR_Address* addr, int step) { Unimplemented(); }
+
+void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { Unimplemented(); }
+
+void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { Unimplemented(); }
+
+
+void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, LIR_Opr disp, BasicType type, CodeEmitInfo* info) { Unimplemented(); }
+
+
+bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) { Unimplemented(); return false; }
+
+
+void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { Unimplemented(); }
+
+//----------------------------------------------------------------------
+// visitor functions
+//----------------------------------------------------------------------
+
+
+void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { Unimplemented(); }
+
+void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { Unimplemented(); }
+
+void LIRGenerator::do_MonitorExit(MonitorExit* x) { Unimplemented(); }
+
+// _ineg, _lneg, _fneg, _dneg
+void LIRGenerator::do_NegateOp(NegateOp* x) { Unimplemented(); }
+
+// for _fadd, _fmul, _fsub, _fdiv, _frem
+// _dadd, _dmul, _dsub, _ddiv, _drem
+void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { Unimplemented(); }
+
+// for _ladd, _lmul, _lsub, _ldiv, _lrem
+void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { Unimplemented(); }
+
+// for: _iadd, _imul, _isub, _idiv, _irem
+void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { Unimplemented(); }
+
+void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { Unimplemented(); }
+
+// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
+void LIRGenerator::do_ShiftOp(ShiftOp* x) { Unimplemented(); }
+
+// _iand, _land, _ior, _lor, _ixor, _lxor
+void LIRGenerator::do_LogicOp(LogicOp* x) { Unimplemented(); }
+
+// _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
+void LIRGenerator::do_CompareOp(CompareOp* x) { Unimplemented(); }
+
+void LIRGenerator::do_AttemptUpdate(Intrinsic* x) { Unimplemented(); }
+
+void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { Unimplemented(); }
+
+void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { Unimplemented(); }
+
+void LIRGenerator::do_ArrayCopy(Intrinsic* x) { Unimplemented(); }
+
+// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
+// _i2b, _i2c, _i2s
+LIR_Opr fixed_register_for(BasicType type) { Unimplemented(); }
+
+void LIRGenerator::do_Convert(Convert* x) { Unimplemented(); }
+
+void LIRGenerator::do_NewInstance(NewInstance* x) { Unimplemented(); }
+
+void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { Unimplemented(); }
+
+void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { Unimplemented(); }
+
+void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { Unimplemented(); }
+
+void LIRGenerator::do_BlockBegin(BlockBegin* x) { Unimplemented(); }
+
+void LIRGenerator::do_CheckCast(CheckCast* x) { Unimplemented(); }
+
+void LIRGenerator::do_InstanceOf(InstanceOf* x) { Unimplemented(); }
+
+void LIRGenerator::do_If(If* x) { Unimplemented(); }
+
+LIR_Opr LIRGenerator::getThreadPointer() { Unimplemented(); return LIR_OprFact::illegalOpr; }
+
+void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
+
+void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
+ CodeEmitInfo* info) { Unimplemented(); }
+
+void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
+ CodeEmitInfo* info) { Unimplemented(); }
+void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
+ BasicType type, bool is_volatile) { Unimplemented(); }
+
+void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
+ BasicType type, bool is_volatile) { Unimplemented(); }
diff --git a/src/cpu/aarch64/vm/c1_LinearScan_aarch64.cpp b/src/cpu/aarch64/vm/c1_LinearScan_aarch64.cpp
new file mode 100644
index 000000000..77859b9d6
--- /dev/null
+++ b/src/cpu/aarch64/vm/c1_LinearScan_aarch64.cpp
@@ -0,0 +1,1241 @@
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_Instruction.hpp"
+#include "c1/c1_LinearScan.hpp"
+#include "utilities/bitMap.inline.hpp"
+
+
+//----------------------------------------------------------------------
+// Allocation of FPU stack slots (Intel x86 only)
+//----------------------------------------------------------------------
+
+void LinearScan::allocate_fpu_stack() {
+ // First compute which FPU registers are live at the start of each basic block
+ // (To minimize the amount of work we have to do if we have to merge FPU stacks)
+ if (ComputeExactFPURegisterUsage) {
+ Interval* intervals_in_register, *intervals_in_memory;
+ create_unhandled_lists(&intervals_in_register, &intervals_in_memory, is_in_fpu_register, NULL);
+
+ // ignore memory intervals by overwriting intervals_in_memory
+ // the dummy interval is needed to enforce the walker to walk until the given id:
+ // without it, the walker stops when the unhandled-list is empty -> live information
+ // beyond this point would be incorrect.
+ Interval* dummy_interval = new Interval(any_reg);
+ dummy_interval->add_range(max_jint - 2, max_jint - 1);
+ dummy_interval->set_next(Interval::end());
+ intervals_in_memory = dummy_interval;
+
+ IntervalWalker iw(this, intervals_in_register, intervals_in_memory);
+
+ const int num_blocks = block_count();
+ for (int i = 0; i < num_blocks; i++) {
+ BlockBegin* b = block_at(i);
+
+ // register usage is only needed for merging stacks -> compute only
+ // when more than one predecessor.
+ // the block must not have any spill moves at the beginning (checked by assertions)
+ // spill moves would use intervals that are marked as handled and so the usage bit
+ // would been set incorrectly
+
+ // NOTE: the check for number_of_preds > 1 is necessary. A block with only one
+ // predecessor may have spill moves at the begin of the block.
+ // If an interval ends at the current instruction id, it is not possible
+ // to decide if the register is live or not at the block begin -> the
+ // register information would be incorrect.
+ if (b->number_of_preds() > 1) {
+ int id = b->first_lir_instruction_id();
+ BitMap regs(FrameMap::nof_fpu_regs);
+ regs.clear();
+
+ iw.walk_to(id); // walk after the first instruction (always a label) of the block
+ assert(iw.current_position() == id, "did not walk completely to id");
+
+ // Only consider FPU values in registers
+ Interval* interval = iw.active_first(fixedKind);
+ while (interval != Interval::end()) {
+ int reg = interval->assigned_reg();
+ assert(reg >= pd_first_fpu_reg && reg <= pd_last_fpu_reg, "no fpu register");
+ assert(interval->assigned_regHi() == -1, "must not have hi register (doubles stored in one register)");
+ assert(interval->from() <= id && id < interval->to(), "interval out of range");
+
+#ifndef PRODUCT
+ if (TraceFPURegisterUsage) {
+ tty->print("fpu reg %d is live because of ", reg - pd_first_fpu_reg); interval->print();
+ }
+#endif
+
+ regs.set_bit(reg - pd_first_fpu_reg);
+ interval = interval->next();
+ }
+
+ b->set_fpu_register_usage(regs);
+
+#ifndef PRODUCT
+ if (TraceFPURegisterUsage) {
+ tty->print("FPU regs for block %d, LIR instr %d): ", b->block_id(), id); regs.print_on(tty); tty->print_cr("");
+ }
+#endif
+ }
+ }
+ }
+
+ FpuStackAllocator alloc(ir()->compilation(), this);
+ _fpu_stack_allocator = &alloc;
+ alloc.allocate();
+ _fpu_stack_allocator = NULL;
+}
+
+
+FpuStackAllocator::FpuStackAllocator(Compilation* compilation, LinearScan* allocator)
+ : _compilation(compilation)
+ , _lir(NULL)
+ , _pos(-1)
+ , _allocator(allocator)
+ , _sim(compilation)
+ , _temp_sim(compilation)
+{}
+
+void FpuStackAllocator::allocate() {
+ int num_blocks = allocator()->block_count();
+ for (int i = 0; i < num_blocks; i++) {
+ // Set up to process block
+ BlockBegin* block = allocator()->block_at(i);
+ intArray* fpu_stack_state = block->fpu_stack_state();
+
+#ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->cr();
+ tty->print_cr("------- Begin of new Block %d -------", block->block_id());
+ }
+#endif
+
+ assert(fpu_stack_state != NULL ||
+ block->end()->as_Base() != NULL ||
+ block->is_set(BlockBegin::exception_entry_flag),
+ "FPU stack state must be present due to linear-scan order for FPU stack allocation");
+ // note: exception handler entries always start with an empty fpu stack
+ // because stack merging would be too complicated
+
+ if (fpu_stack_state != NULL) {
+ sim()->read_state(fpu_stack_state);
+ } else {
+ sim()->clear();
+ }
+
+#ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->print("Reading FPU state for block %d:", block->block_id());
+ sim()->print();
+ tty->cr();
+ }
+#endif
+
+ allocate_block(block);
+ CHECK_BAILOUT();
+ }
+}
+
+void FpuStackAllocator::allocate_block(BlockBegin* block) {
+ bool processed_merge = false;
+ LIR_OpList* insts = block->lir()->instructions_list();
+ set_lir(block->lir());
+ set_pos(0);
+
+
+ // Note: insts->length() may change during loop
+ while (pos() < insts->length()) {
+ LIR_Op* op = insts->at(pos());
+ _debug_information_computed = false;
+
+#ifndef PRODUCT
+ if (TraceFPUStack) {
+ op->print();
+ }
+ check_invalid_lir_op(op);
+#endif
+
+ LIR_OpBranch* branch = op->as_OpBranch();
+ LIR_Op1* op1 = op->as_Op1();
+ LIR_Op2* op2 = op->as_Op2();
+ LIR_OpCall* opCall = op->as_OpCall();
+
+ if (branch != NULL && branch->block() != NULL) {
+ if (!processed_merge) {
+ // propagate stack at first branch to a successor
+ processed_merge = true;
+ bool required_merge = merge_fpu_stack_with_successors(block);
+
+ assert(!required_merge || branch->cond() == lir_cond_always, "splitting of critical edges should prevent FPU stack mismatches at cond branches");
+ }
+
+ } else if (op1 != NULL) {
+ handle_op1(op1);
+ } else if (op2 != NULL) {
+ handle_op2(op2);
+ } else if (opCall != NULL) {
+ handle_opCall(opCall);
+ }
+
+ compute_debug_information(op);
+
+ set_pos(1 + pos());
+ }
+
+ // Propagate stack when block does not end with branch
+ if (!processed_merge) {
+ merge_fpu_stack_with_successors(block);
+ }
+}
+
+
+void FpuStackAllocator::compute_debug_information(LIR_Op* op) {
+ if (!_debug_information_computed && op->id() != -1 && allocator()->has_info(op->id())) {
+ visitor.visit(op);
+
+ // exception handling
+ if (allocator()->compilation()->has_exception_handlers()) {
+ XHandlers* xhandlers = visitor.all_xhandler();
+ int n = xhandlers->length();
+ for (int k = 0; k < n; k++) {
+ allocate_exception_handler(xhandlers->handler_at(k));
+ }
+ } else {
+ assert(visitor.all_xhandler()->length() == 0, "missed exception handler");
+ }
+
+ // compute debug information
+ int n = visitor.info_count();
+ assert(n > 0, "should not visit operation otherwise");
+
+ for (int j = 0; j < n; j++) {
+ CodeEmitInfo* info = visitor.info_at(j);
+ // Compute debug information
+ allocator()->compute_debug_info(info, op->id());
+ }
+ }
+ _debug_information_computed = true;
+}
+
+void FpuStackAllocator::allocate_exception_handler(XHandler* xhandler) {
+ if (!sim()->is_empty()) {
+ LIR_List* old_lir = lir();
+ int old_pos = pos();
+ intArray* old_state = sim()->write_state();
+
+#ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->cr();
+ tty->print_cr("------- begin of exception handler -------");
+ }
+#endif
+
+ if (xhandler->entry_code() == NULL) {
+ // need entry code to clear FPU stack
+ LIR_List* entry_code = new LIR_List(_compilation);
+ entry_code->jump(xhandler->entry_block());
+ xhandler->set_entry_code(entry_code);
+ }
+
+ LIR_OpList* insts = xhandler->entry_code()->instructions_list();
+ set_lir(xhandler->entry_code());
+ set_pos(0);
+
+ // Note: insts->length() may change during loop
+ while (pos() < insts->length()) {
+ LIR_Op* op = insts->at(pos());
+
+#ifndef PRODUCT
+ if (TraceFPUStack) {
+ op->print();
+ }
+ check_invalid_lir_op(op);
+#endif
+
+ switch (op->code()) {
+ case lir_move:
+ assert(op->as_Op1() != NULL, "must be LIR_Op1");
+ assert(pos() != insts->length() - 1, "must not be last operation");
+
+ handle_op1((LIR_Op1*)op);
+ break;
+
+ case lir_branch:
+ assert(op->as_OpBranch()->cond() == lir_cond_always, "must be unconditional branch");
+ assert(pos() == insts->length() - 1, "must be last operation");
+
+ // remove all remaining dead registers from FPU stack
+ clear_fpu_stack(LIR_OprFact::illegalOpr);
+ break;
+
+ default:
+ // other operations not allowed in exception entry code
+ ShouldNotReachHere();
+ }
+
+ set_pos(pos() + 1);
+ }
+
+#ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->cr();
+ tty->print_cr("------- end of exception handler -------");
+ }
+#endif
+
+ set_lir(old_lir);
+ set_pos(old_pos);
+ sim()->read_state(old_state);
+ }
+}
+
+
+int FpuStackAllocator::fpu_num(LIR_Opr opr) {
+ assert(opr->is_fpu_register() && !opr->is_xmm_register(), "shouldn't call this otherwise");
+ return opr->is_single_fpu() ? opr->fpu_regnr() : opr->fpu_regnrLo();
+}
+
+int FpuStackAllocator::tos_offset(LIR_Opr opr) {
+ return sim()->offset_from_tos(fpu_num(opr));
+}
+
+
+LIR_Opr FpuStackAllocator::to_fpu_stack(LIR_Opr opr) {
+ assert(opr->is_fpu_register() && !opr->is_xmm_register(), "shouldn't call this otherwise");
+
+ int stack_offset = tos_offset(opr);
+ if (opr->is_single_fpu()) {
+ return LIR_OprFact::single_fpu(stack_offset)->make_fpu_stack_offset();
+ } else {
+ assert(opr->is_double_fpu(), "shouldn't call this otherwise");
+ return LIR_OprFact::double_fpu(stack_offset)->make_fpu_stack_offset();
+ }
+}
+
+LIR_Opr FpuStackAllocator::to_fpu_stack_top(LIR_Opr opr, bool dont_check_offset) {
+ assert(opr->is_fpu_register() && !opr->is_xmm_register(), "shouldn't call this otherwise");
+ assert(dont_check_offset || tos_offset(opr) == 0, "operand is not on stack top");
+
+ int stack_offset = 0;
+ if (opr->is_single_fpu()) {
+ return LIR_OprFact::single_fpu(stack_offset)->make_fpu_stack_offset();
+ } else {
+ assert(opr->is_double_fpu(), "shouldn't call this otherwise");
+ return LIR_OprFact::double_fpu(stack_offset)->make_fpu_stack_offset();
+ }
+}
+
+
+
+void FpuStackAllocator::insert_op(LIR_Op* op) {
+ lir()->insert_before(pos(), op);
+ set_pos(1 + pos());
+}
+
+
+void FpuStackAllocator::insert_exchange(int offset) {
+ if (offset > 0) {
+ LIR_Op1* fxch_op = new LIR_Op1(lir_fxch, LIR_OprFact::intConst(offset), LIR_OprFact::illegalOpr);
+ insert_op(fxch_op);
+ sim()->swap(offset);
+
+#ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->print("Exchanged register: %d New state: ", sim()->get_slot(0)); sim()->print(); tty->cr();
+ }
+#endif
+
+ }
+}
+
+void FpuStackAllocator::insert_exchange(LIR_Opr opr) {
+ insert_exchange(tos_offset(opr));
+}
+
+
+void FpuStackAllocator::insert_free(int offset) {
+ // move stack slot to the top of stack and then pop it
+ insert_exchange(offset);
+
+ LIR_Op* fpop = new LIR_Op0(lir_fpop_raw);
+ insert_op(fpop);
+ sim()->pop();
+
+#ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->print("Inserted pop New state: "); sim()->print(); tty->cr();
+ }
+#endif
+}
+
+
+void FpuStackAllocator::insert_free_if_dead(LIR_Opr opr) {
+ if (sim()->contains(fpu_num(opr))) {
+ int res_slot = tos_offset(opr);
+ insert_free(res_slot);
+ }
+}
+
+void FpuStackAllocator::insert_free_if_dead(LIR_Opr opr, LIR_Opr ignore) {
+ if (fpu_num(opr) != fpu_num(ignore) && sim()->contains(fpu_num(opr))) {
+ int res_slot = tos_offset(opr);
+ insert_free(res_slot);
+ }
+}
+
+void FpuStackAllocator::insert_copy(LIR_Opr from, LIR_Opr to) {
+ int offset = tos_offset(from);
+ LIR_Op1* fld = new LIR_Op1(lir_fld, LIR_OprFact::intConst(offset), LIR_OprFact::illegalOpr);
+ insert_op(fld);
+
+ sim()->push(fpu_num(to));
+
+#ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->print("Inserted copy (%d -> %d) New state: ", fpu_num(from), fpu_num(to)); sim()->print(); tty->cr();
+ }
+#endif
+}
+
+void FpuStackAllocator::do_rename(LIR_Opr from, LIR_Opr to) {
+ sim()->rename(fpu_num(from), fpu_num(to));
+}
+
+void FpuStackAllocator::do_push(LIR_Opr opr) {
+ sim()->push(fpu_num(opr));
+}
+
+void FpuStackAllocator::pop_if_last_use(LIR_Op* op, LIR_Opr opr) {
+ assert(op->fpu_pop_count() == 0, "fpu_pop_count alredy set");
+ assert(tos_offset(opr) == 0, "can only pop stack top");
+
+ if (opr->is_last_use()) {
+ op->set_fpu_pop_count(1);
+ sim()->pop();
+ }
+}
+
+void FpuStackAllocator::pop_always(LIR_Op* op, LIR_Opr opr) {
+ assert(op->fpu_pop_count() == 0, "fpu_pop_count alredy set");
+ assert(tos_offset(opr) == 0, "can only pop stack top");
+
+ op->set_fpu_pop_count(1);
+ sim()->pop();
+}
+
+void FpuStackAllocator::clear_fpu_stack(LIR_Opr preserve) {
+ int result_stack_size = (preserve->is_fpu_register() && !preserve->is_xmm_register() ? 1 : 0);
+ while (sim()->stack_size() > result_stack_size) {
+ assert(!sim()->slot_is_empty(0), "not allowed");
+
+ if (result_stack_size == 0 || sim()->get_slot(0) != fpu_num(preserve)) {
+ insert_free(0);
+ } else {
+ // move "preserve" to bottom of stack so that all other stack slots can be popped
+ insert_exchange(sim()->stack_size() - 1);
+ }
+ }
+}
+
+
+void FpuStackAllocator::handle_op1(LIR_Op1* op1) {
+ LIR_Opr in = op1->in_opr();
+ LIR_Opr res = op1->result_opr();
+
+ LIR_Opr new_in = in; // new operands relative to the actual fpu stack top
+ LIR_Opr new_res = res;
+
+ // Note: this switch is processed for all LIR_Op1, regardless if they have FPU-arguments,
+ // so checks for is_float_kind() are necessary inside the cases
+ switch (op1->code()) {
+
+ case lir_return: {
+ // FPU-Stack must only contain the (optional) fpu return value.
+ // All remaining dead values are popped from the stack
+ // If the input operand is a fpu-register, it is exchanged to the bottom of the stack
+
+ clear_fpu_stack(in);
+ if (in->is_fpu_register() && !in->is_xmm_register()) {
+ new_in = to_fpu_stack_top(in);
+ }
+
+ break;
+ }
+
+ case lir_move: {
+ if (in->is_fpu_register() && !in->is_xmm_register()) {
+ if (res->is_xmm_register()) {
+ // move from fpu register to xmm register (necessary for operations that
+ // are not available in the SSE instruction set)
+ insert_exchange(in);
+ new_in = to_fpu_stack_top(in);
+ pop_always(op1, in);
+
+ } else if (res->is_fpu_register() && !res->is_xmm_register()) {
+ // move from fpu-register to fpu-register:
+ // * input and result register equal:
+ // nothing to do
+ // * input register is last use:
+ // rename the input register to result register -> input register
+ // not present on fpu-stack afterwards
+ // * input register not last use:
+ // duplicate input register to result register to preserve input
+ //
+ // Note: The LIR-Assembler does not produce any code for fpu register moves,
+ // so input and result stack index must be equal
+
+ if (fpu_num(in) == fpu_num(res)) {
+ // nothing to do
+ } else if (in->is_last_use()) {
+ insert_free_if_dead(res);//, in);
+ do_rename(in, res);
+ } else {
+ insert_free_if_dead(res);
+ insert_copy(in, res);
+ }
+ new_in = to_fpu_stack(res);
+ new_res = new_in;
+
+ } else {
+ // move from fpu-register to memory
+ // input operand must be on top of stack
+
+ insert_exchange(in);
+
+ // create debug information here because afterwards the register may have been popped
+ compute_debug_information(op1);
+
+ new_in = to_fpu_stack_top(in);
+ pop_if_last_use(op1, in);
+ }
+
+ } else if (res->is_fpu_register() && !res->is_xmm_register()) {
+ // move from memory/constant to fpu register
+ // result is pushed on the stack
+
+ insert_free_if_dead(res);
+
+ // create debug information before register is pushed
+ compute_debug_information(op1);
+
+ do_push(res);
+ new_res = to_fpu_stack_top(res);
+ }
+ break;
+ }
+
+ case lir_neg: {
+ if (in->is_fpu_register() && !in->is_xmm_register()) {
+ assert(res->is_fpu_register() && !res->is_xmm_register(), "must be");
+ assert(in->is_last_use(), "old value gets destroyed");
+
+ insert_free_if_dead(res, in);
+ insert_exchange(in);
+ new_in = to_fpu_stack_top(in);
+
+ do_rename(in, res);
+ new_res = to_fpu_stack_top(res);
+ }
+ break;
+ }
+
+ case lir_convert: {
+ Bytecodes::Code bc = op1->as_OpConvert()->bytecode();
+ switch (bc) {
+ case Bytecodes::_d2f:
+ case Bytecodes::_f2d:
+ assert(res->is_fpu_register(), "must be");
+ assert(in->is_fpu_register(), "must be");
+
+ if (!in->is_xmm_register() && !res->is_xmm_register()) {
+ // this is quite the same as a move from fpu-register to fpu-register
+ // Note: input and result operands must have different types
+ if (fpu_num(in) == fpu_num(res)) {
+ // nothing to do
+ new_in = to_fpu_stack(in);
+ } else if (in->is_last_use()) {
+ insert_free_if_dead(res);//, in);
+ new_in = to_fpu_stack(in);
+ do_rename(in, res);
+ } else {
+ insert_free_if_dead(res);
+ insert_copy(in, res);
+ new_in = to_fpu_stack_top(in, true);
+ }
+ new_res = to_fpu_stack(res);
+ }
+
+ break;
+
+ case Bytecodes::_i2f:
+ case Bytecodes::_l2f:
+ case Bytecodes::_i2d:
+ case Bytecodes::_l2d:
+ assert(res->is_fpu_register(), "must be");
+ if (!res->is_xmm_register()) {
+ insert_free_if_dead(res);
+ do_push(res);
+ new_res = to_fpu_stack_top(res);
+ }
+ break;
+
+ case Bytecodes::_f2i:
+ case Bytecodes::_d2i:
+ assert(in->is_fpu_register(), "must be");
+ if (!in->is_xmm_register()) {
+ insert_exchange(in);
+ new_in = to_fpu_stack_top(in);
+
+ // TODO: update registes of stub
+ }
+ break;
+
+ case Bytecodes::_f2l:
+ case Bytecodes::_d2l:
+ assert(in->is_fpu_register(), "must be");
+ if (!in->is_xmm_register()) {
+ insert_exchange(in);
+ new_in = to_fpu_stack_top(in);
+ pop_always(op1, in);
+ }
+ break;
+
+ case Bytecodes::_i2l:
+ case Bytecodes::_l2i:
+ case Bytecodes::_i2b:
+ case Bytecodes::_i2c:
+ case Bytecodes::_i2s:
+ // no fpu operands
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+ break;
+ }
+
+ case lir_roundfp: {
+ assert(in->is_fpu_register() && !in->is_xmm_register(), "input must be in register");
+ assert(res->is_stack(), "result must be on stack");
+
+ insert_exchange(in);
+ new_in = to_fpu_stack_top(in);
+ pop_if_last_use(op1, in);
+ break;
+ }
+
+ default: {
+ assert(!in->is_float_kind() && !res->is_float_kind(), "missed a fpu-operation");
+ }
+ }
+
+ op1->set_in_opr(new_in);
+ op1->set_result_opr(new_res);
+}
+
+void FpuStackAllocator::handle_op2(LIR_Op2* op2) {
+ LIR_Opr left = op2->in_opr1();
+ if (!left->is_float_kind()) {
+ return;
+ }
+ if (left->is_xmm_register()) {
+ return;
+ }
+
+ LIR_Opr right = op2->in_opr2();
+ LIR_Opr res = op2->result_opr();
+ LIR_Opr new_left = left; // new operands relative to the actual fpu stack top
+ LIR_Opr new_right = right;
+ LIR_Opr new_res = res;
+
+ assert(!left->is_xmm_register() && !right->is_xmm_register() && !res->is_xmm_register(), "not for xmm registers");
+
+ switch (op2->code()) {
+ case lir_cmp:
+ case lir_cmp_fd2i:
+ case lir_ucmp_fd2i: {
+ assert(left->is_fpu_register(), "invalid LIR");
+ assert(right->is_fpu_register(), "invalid LIR");
+
+ // the left-hand side must be on top of stack.
+ // the right-hand side is never popped, even if is_last_use is set
+ insert_exchange(left);
+ new_left = to_fpu_stack_top(left);
+ new_right = to_fpu_stack(right);
+ pop_if_last_use(op2, left);
+ break;
+ }
+
+ case lir_mul_strictfp:
+ case lir_div_strictfp: {
+ assert(op2->tmp1_opr()->is_fpu_register(), "strict operations need temporary fpu stack slot");
+ insert_free_if_dead(op2->tmp1_opr());
+ assert(sim()->stack_size() <= 7, "at least one stack slot must be free");
+ // fall-through: continue with the normal handling of lir_mul and lir_div
+ }
+ case lir_add:
+ case lir_sub:
+ case lir_mul:
+ case lir_div: {
+ assert(left->is_fpu_register(), "must be");
+ assert(res->is_fpu_register(), "must be");
+ assert(left->is_equal(res), "must be");
+
+ // either the left-hand or the right-hand side must be on top of stack
+ // (if right is not a register, left must be on top)
+ if (!right->is_fpu_register()) {
+ insert_exchange(left);
+ new_left = to_fpu_stack_top(left);
+ } else {
+ // no exchange necessary if right is alredy on top of stack
+ if (tos_offset(right) == 0) {
+ new_left = to_fpu_stack(left);
+ new_right = to_fpu_stack_top(right);
+ } else {
+ insert_exchange(left);
+ new_left = to_fpu_stack_top(left);
+ new_right = to_fpu_stack(right);
+ }
+
+ if (right->is_last_use()) {
+ op2->set_fpu_pop_count(1);
+
+ if (tos_offset(right) == 0) {
+ sim()->pop();
+ } else {
+ // if left is on top of stack, the result is placed in the stack
+ // slot of right, so a renaming from right to res is necessary
+ assert(tos_offset(left) == 0, "must be");
+ sim()->pop();
+ do_rename(right, res);
+ }
+ }
+ }
+ new_res = to_fpu_stack(res);
+
+ break;
+ }
+
+ case lir_rem: {
+ assert(left->is_fpu_register(), "must be");
+ assert(right->is_fpu_register(), "must be");
+ assert(res->is_fpu_register(), "must be");
+ assert(left->is_equal(res), "must be");
+
+ // Must bring both operands to top of stack with following operand ordering:
+ // * fpu stack before rem: ... right left
+ // * fpu stack after rem: ... left
+ if (tos_offset(right) != 1) {
+ insert_exchange(right);
+ insert_exchange(1);
+ }
+ insert_exchange(left);
+ assert(tos_offset(right) == 1, "check");
+ assert(tos_offset(left) == 0, "check");
+
+ new_left = to_fpu_stack_top(left);
+ new_right = to_fpu_stack(right);
+
+ op2->set_fpu_pop_count(1);
+ sim()->pop();
+ do_rename(right, res);
+
+ new_res = to_fpu_stack_top(res);
+ break;
+ }
+
+ case lir_abs:
+ case lir_sqrt: {
+ // Right argument appears to be unused
+ assert(right->is_illegal(), "must be");
+ assert(left->is_fpu_register(), "must be");
+ assert(res->is_fpu_register(), "must be");
+ assert(left->is_last_use(), "old value gets destroyed");
+
+ insert_free_if_dead(res, left);
+ insert_exchange(left);
+ do_rename(left, res);
+
+ new_left = to_fpu_stack_top(res);
+ new_res = new_left;
+
+ op2->set_fpu_stack_size(sim()->stack_size());
+ break;
+ }
+
+ case lir_log:
+ case lir_log10: {
+ // log and log10 need one temporary fpu stack slot, so
+ // there is one temporary registers stored in temp of the
+ // operation. the stack allocator must guarantee that the stack
+ // slots are really free, otherwise there might be a stack
+ // overflow.
+ assert(right->is_illegal(), "must be");
+ assert(left->is_fpu_register(), "must be");
+ assert(res->is_fpu_register(), "must be");
+ assert(op2->tmp1_opr()->is_fpu_register(), "must be");
+
+ insert_free_if_dead(op2->tmp1_opr());
+ insert_free_if_dead(res, left);
+ insert_exchange(left);
+ do_rename(left, res);
+
+ new_left = to_fpu_stack_top(res);
+ new_res = new_left;
+
+ op2->set_fpu_stack_size(sim()->stack_size());
+ assert(sim()->stack_size() <= 7, "at least one stack slot must be free");
+ break;
+ }
+
+
+ case lir_tan:
+ case lir_sin:
+ case lir_cos:
+ case lir_exp: {
+ // sin, cos and exp need two temporary fpu stack slots, so there are two temporary
+ // registers (stored in right and temp of the operation).
+ // the stack allocator must guarantee that the stack slots are really free,
+ // otherwise there might be a stack overflow.
+ assert(left->is_fpu_register(), "must be");
+ assert(res->is_fpu_register(), "must be");
+ // assert(left->is_last_use(), "old value gets destroyed");
+ assert(right->is_fpu_register(), "right is used as the first temporary register");
+ assert(op2->tmp1_opr()->is_fpu_register(), "temp is used as the second temporary register");
+ assert(fpu_num(left) != fpu_num(right) && fpu_num(right) != fpu_num(op2->tmp1_opr()) && fpu_num(op2->tmp1_opr()) != fpu_num(res), "need distinct temp registers");
+
+ insert_free_if_dead(right);
+ insert_free_if_dead(op2->tmp1_opr());
+
+ insert_free_if_dead(res, left);
+ insert_exchange(left);
+ do_rename(left, res);
+
+ new_left = to_fpu_stack_top(res);
+ new_res = new_left;
+
+ op2->set_fpu_stack_size(sim()->stack_size());
+ assert(sim()->stack_size() <= 6, "at least two stack slots must be free");
+ break;
+ }
+
+ case lir_pow: {
+ // pow needs two temporary fpu stack slots, so there are two temporary
+ // registers (stored in tmp1 and tmp2 of the operation).
+ // the stack allocator must guarantee that the stack slots are really free,
+ // otherwise there might be a stack overflow.
+ assert(left->is_fpu_register(), "must be");
+ assert(right->is_fpu_register(), "must be");
+ assert(res->is_fpu_register(), "must be");
+
+ assert(op2->tmp1_opr()->is_fpu_register(), "tmp1 is the first temporary register");
+ assert(op2->tmp2_opr()->is_fpu_register(), "tmp2 is the second temporary register");
+ assert(fpu_num(left) != fpu_num(right) && fpu_num(left) != fpu_num(op2->tmp1_opr()) && fpu_num(left) != fpu_num(op2->tmp2_opr()) && fpu_num(left) != fpu_num(res), "need distinct temp registers");
+ assert(fpu_num(right) != fpu_num(op2->tmp1_opr()) && fpu_num(right) != fpu_num(op2->tmp2_opr()) && fpu_num(right) != fpu_num(res), "need distinct temp registers");
+ assert(fpu_num(op2->tmp1_opr()) != fpu_num(op2->tmp2_opr()) && fpu_num(op2->tmp1_opr()) != fpu_num(res), "need distinct temp registers");
+ assert(fpu_num(op2->tmp2_opr()) != fpu_num(res), "need distinct temp registers");
+
+ insert_free_if_dead(op2->tmp1_opr());
+ insert_free_if_dead(op2->tmp2_opr());
+
+ // Must bring both operands to top of stack with following operand ordering:
+ // * fpu stack before pow: ... right left
+ // * fpu stack after pow: ... left
+
+ insert_free_if_dead(res, right);
+
+ if (tos_offset(right) != 1) {
+ insert_exchange(right);
+ insert_exchange(1);
+ }
+ insert_exchange(left);
+ assert(tos_offset(right) == 1, "check");
+ assert(tos_offset(left) == 0, "check");
+
+ new_left = to_fpu_stack_top(left);
+ new_right = to_fpu_stack(right);
+
+ op2->set_fpu_stack_size(sim()->stack_size());
+ assert(sim()->stack_size() <= 6, "at least two stack slots must be free");
+
+ sim()->pop();
+
+ do_rename(right, res);
+
+ new_res = to_fpu_stack_top(res);
+ break;
+ }
+
+ default: {
+ assert(false, "missed a fpu-operation");
+ }
+ }
+
+ op2->set_in_opr1(new_left);
+ op2->set_in_opr2(new_right);
+ op2->set_result_opr(new_res);
+}
+
+void FpuStackAllocator::handle_opCall(LIR_OpCall* opCall) {
+ LIR_Opr res = opCall->result_opr();
+
+ // clear fpu-stack before call
+ // it may contain dead values that could not have been remved by previous operations
+ clear_fpu_stack(LIR_OprFact::illegalOpr);
+ assert(sim()->is_empty(), "fpu stack must be empty now");
+
+ // compute debug information before (possible) fpu result is pushed
+ compute_debug_information(opCall);
+
+ if (res->is_fpu_register() && !res->is_xmm_register()) {
+ do_push(res);
+ opCall->set_result_opr(to_fpu_stack_top(res));
+ }
+}
+
+#ifndef PRODUCT
+void FpuStackAllocator::check_invalid_lir_op(LIR_Op* op) {
+ switch (op->code()) {
+ case lir_24bit_FPU:
+ case lir_reset_FPU:
+ case lir_ffree:
+ assert(false, "operations not allowed in lir. If one of these operations is needed, check if they have fpu operands");
+ break;
+
+ case lir_fpop_raw:
+ case lir_fxch:
+ case lir_fld:
+ assert(false, "operations only inserted by FpuStackAllocator");
+ break;
+ }
+}
+#endif
+
+
+void FpuStackAllocator::merge_insert_add(LIR_List* instrs, FpuStackSim* cur_sim, int reg) {
+ LIR_Op1* move = new LIR_Op1(lir_move, LIR_OprFact::doubleConst(0), LIR_OprFact::double_fpu(reg)->make_fpu_stack_offset());
+
+ instrs->instructions_list()->push(move);
+
+ cur_sim->push(reg);
+ move->set_result_opr(to_fpu_stack(move->result_opr()));
+
+ #ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->print("Added new register: %d New state: ", reg); cur_sim->print(); tty->cr();
+ }
+ #endif
+}
+
+void FpuStackAllocator::merge_insert_xchg(LIR_List* instrs, FpuStackSim* cur_sim, int slot) {
+ assert(slot > 0, "no exchange necessary");
+
+ LIR_Op1* fxch = new LIR_Op1(lir_fxch, LIR_OprFact::intConst(slot));
+ instrs->instructions_list()->push(fxch);
+ cur_sim->swap(slot);
+
+ #ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->print("Exchanged register: %d New state: ", cur_sim->get_slot(slot)); cur_sim->print(); tty->cr();
+ }
+ #endif
+}
+
+void FpuStackAllocator::merge_insert_pop(LIR_List* instrs, FpuStackSim* cur_sim) {
+ int reg = cur_sim->get_slot(0);
+
+ LIR_Op* fpop = new LIR_Op0(lir_fpop_raw);
+ instrs->instructions_list()->push(fpop);
+ cur_sim->pop(reg);
+
+ #ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->print("Removed register: %d New state: ", reg); cur_sim->print(); tty->cr();
+ }
+ #endif
+}
+
+bool FpuStackAllocator::merge_rename(FpuStackSim* cur_sim, FpuStackSim* sux_sim, int start_slot, int change_slot) {
+ int reg = cur_sim->get_slot(change_slot);
+
+ for (int slot = start_slot; slot >= 0; slot--) {
+ int new_reg = sux_sim->get_slot(slot);
+
+ if (!cur_sim->contains(new_reg)) {
+ cur_sim->set_slot(change_slot, new_reg);
+
+ #ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->print("Renamed register %d to %d New state: ", reg, new_reg); cur_sim->print(); tty->cr();
+ }
+ #endif
+
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void FpuStackAllocator::merge_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, FpuStackSim* sux_sim) {
+#ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->cr();
+ tty->print("before merging: pred: "); cur_sim->print(); tty->cr();
+ tty->print(" sux: "); sux_sim->print(); tty->cr();
+ }
+
+ int slot;
+ for (slot = 0; slot < cur_sim->stack_size(); slot++) {
+ assert(!cur_sim->slot_is_empty(slot), "not handled by algorithm");
+ }
+ for (slot = 0; slot < sux_sim->stack_size(); slot++) {
+ assert(!sux_sim->slot_is_empty(slot), "not handled by algorithm");
+ }
+#endif
+
+ // size difference between cur and sux that must be resolved by adding or removing values form the stack
+ int size_diff = cur_sim->stack_size() - sux_sim->stack_size();
+
+ if (!ComputeExactFPURegisterUsage) {
+ // add slots that are currently free, but used in successor
+ // When the exact FPU register usage is computed, the stack does
+ // not contain dead values at merging -> no values must be added
+
+ int sux_slot = sux_sim->stack_size() - 1;
+ while (size_diff < 0) {
+ assert(sux_slot >= 0, "slot out of bounds -> error in algorithm");
+
+ int reg = sux_sim->get_slot(sux_slot);
+ if (!cur_sim->contains(reg)) {
+ merge_insert_add(instrs, cur_sim, reg);
+ size_diff++;
+
+ if (sux_slot + size_diff != 0) {
+ merge_insert_xchg(instrs, cur_sim, sux_slot + size_diff);
+ }
+ }
+ sux_slot--;
+ }
+ }
+
+ assert(cur_sim->stack_size() >= sux_sim->stack_size(), "stack size must be equal or greater now");
+ assert(size_diff == cur_sim->stack_size() - sux_sim->stack_size(), "must be");
+
+ // stack merge algorithm:
+ // 1) as long as the current stack top is not in the right location (that meens
+ // it should not be on the stack top), exchange it into the right location
+ // 2) if the stack top is right, but the remaining stack is not ordered correctly,
+ // the stack top is exchanged away to get another value on top ->
+ // now step 1) can be continued
+ // the stack can also contain unused items -> these items are removed from stack
+
+ int finished_slot = sux_sim->stack_size() - 1;
+ while (finished_slot >= 0 || size_diff > 0) {
+ while (size_diff > 0 || (cur_sim->stack_size() > 0 && cur_sim->get_slot(0) != sux_sim->get_slot(0))) {
+ int reg = cur_sim->get_slot(0);
+ if (sux_sim->contains(reg)) {
+ int sux_slot = sux_sim->offset_from_tos(reg);
+ merge_insert_xchg(instrs, cur_sim, sux_slot + size_diff);
+
+ } else if (!merge_rename(cur_sim, sux_sim, finished_slot, 0)) {
+ assert(size_diff > 0, "must be");
+
+ merge_insert_pop(instrs, cur_sim);
+ size_diff--;
+ }
+ assert(cur_sim->stack_size() == 0 || cur_sim->get_slot(0) != reg, "register must have been changed");
+ }
+
+ while (finished_slot >= 0 && cur_sim->get_slot(finished_slot) == sux_sim->get_slot(finished_slot)) {
+ finished_slot--;
+ }
+
+ if (finished_slot >= 0) {
+ int reg = cur_sim->get_slot(finished_slot);
+
+ if (sux_sim->contains(reg) || !merge_rename(cur_sim, sux_sim, finished_slot, finished_slot)) {
+ assert(sux_sim->contains(reg) || size_diff > 0, "must be");
+ merge_insert_xchg(instrs, cur_sim, finished_slot);
+ }
+ assert(cur_sim->get_slot(finished_slot) != reg, "register must have been changed");
+ }
+ }
+
+#ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->print("after merging: pred: "); cur_sim->print(); tty->cr();
+ tty->print(" sux: "); sux_sim->print(); tty->cr();
+ tty->cr();
+ }
+#endif
+ assert(cur_sim->stack_size() == sux_sim->stack_size(), "stack size must be equal now");
+}
+
+
+void FpuStackAllocator::merge_cleanup_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, BitMap& live_fpu_regs) {
+#ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->cr();
+ tty->print("before cleanup: state: "); cur_sim->print(); tty->cr();
+ tty->print(" live: "); live_fpu_regs.print_on(tty); tty->cr();
+ }
+#endif
+
+ int slot = 0;
+ while (slot < cur_sim->stack_size()) {
+ int reg = cur_sim->get_slot(slot);
+ if (!live_fpu_regs.at(reg)) {
+ if (slot != 0) {
+ merge_insert_xchg(instrs, cur_sim, slot);
+ }
+ merge_insert_pop(instrs, cur_sim);
+ } else {
+ slot++;
+ }
+ }
+
+#ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->print("after cleanup: state: "); cur_sim->print(); tty->cr();
+ tty->print(" live: "); live_fpu_regs.print_on(tty); tty->cr();
+ tty->cr();
+ }
+
+ // check if fpu stack only contains live registers
+ for (unsigned int i = 0; i < live_fpu_regs.size(); i++) {
+ if (live_fpu_regs.at(i) != cur_sim->contains(i)) {
+ tty->print_cr("mismatch between required and actual stack content");
+ break;
+ }
+ }
+#endif
+}
+
+
+bool FpuStackAllocator::merge_fpu_stack_with_successors(BlockBegin* block) {
+#ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->print_cr("Propagating FPU stack state for B%d at LIR_Op position %d to successors:",
+ block->block_id(), pos());
+ sim()->print();
+ tty->cr();
+ }
+#endif
+
+ bool changed = false;
+ int number_of_sux = block->number_of_sux();
+
+ if (number_of_sux == 1 && block->sux_at(0)->number_of_preds() > 1) {
+ // The successor has at least two incoming edges, so a stack merge will be necessary
+ // If this block is the first predecessor, cleanup the current stack and propagate it
+ // If this block is not the first predecessor, a stack merge will be necessary
+
+ BlockBegin* sux = block->sux_at(0);
+ intArray* state = sux->fpu_stack_state();
+ LIR_List* instrs = new LIR_List(_compilation);
+
+ if (state != NULL) {
+ // Merge with a successors that already has a FPU stack state
+ // the block must only have one successor because critical edges must been split
+ FpuStackSim* cur_sim = sim();
+ FpuStackSim* sux_sim = temp_sim();
+ sux_sim->read_state(state);
+
+ merge_fpu_stack(instrs, cur_sim, sux_sim);
+
+ } else {
+ // propagate current FPU stack state to successor without state
+ // clean up stack first so that there are no dead values on the stack
+ if (ComputeExactFPURegisterUsage) {
+ FpuStackSim* cur_sim = sim();
+ BitMap live_fpu_regs = block->sux_at(0)->fpu_register_usage();
+ assert(live_fpu_regs.size() == FrameMap::nof_fpu_regs, "missing register usage");
+
+ merge_cleanup_fpu_stack(instrs, cur_sim, live_fpu_regs);
+ }
+
+ intArray* state = sim()->write_state();
+ if (TraceFPUStack) {
+ tty->print_cr("Setting FPU stack state of B%d (merge path)", sux->block_id());
+ sim()->print(); tty->cr();
+ }
+ sux->set_fpu_stack_state(state);
+ }
+
+ if (instrs->instructions_list()->length() > 0) {
+ lir()->insert_before(pos(), instrs);
+ set_pos(instrs->instructions_list()->length() + pos());
+ changed = true;
+ }
+
+ } else {
+ // Propagate unmodified Stack to successors where a stack merge is not necessary
+ intArray* state = sim()->write_state();
+ for (int i = 0; i < number_of_sux; i++) {
+ BlockBegin* sux = block->sux_at(i);
+
+#ifdef ASSERT
+ for (int j = 0; j < sux->number_of_preds(); j++) {
+ assert(block == sux->pred_at(j), "all critical edges must be broken");
+ }
+
+ // check if new state is same
+ if (sux->fpu_stack_state() != NULL) {
+ intArray* sux_state = sux->fpu_stack_state();
+ assert(state->length() == sux_state->length(), "overwriting existing stack state");
+ for (int j = 0; j < state->length(); j++) {
+ assert(state->at(j) == sux_state->at(j), "overwriting existing stack state");
+ }
+ }
+#endif
+#ifndef PRODUCT
+ if (TraceFPUStack) {
+ tty->print_cr("Setting FPU stack state of B%d", sux->block_id());
+ sim()->print(); tty->cr();
+ }
+#endif
+
+ sux->set_fpu_stack_state(state);
+ }
+ }
+
+#ifndef PRODUCT
+ // assertions that FPU stack state conforms to all successors' states
+ intArray* cur_state = sim()->write_state();
+ for (int i = 0; i < number_of_sux; i++) {
+ BlockBegin* sux = block->sux_at(i);
+ intArray* sux_state = sux->fpu_stack_state();
+
+ assert(sux_state != NULL, "no fpu state");
+ assert(cur_state->length() == sux_state->length(), "incorrect length");
+ for (int i = 0; i < cur_state->length(); i++) {
+ assert(cur_state->at(i) == sux_state->at(i), "element not equal");
+ }
+ }
+#endif
+
+ return changed;
+}
diff --git a/src/cpu/aarch64/vm/c1_LinearScan_aarch64.hpp b/src/cpu/aarch64/vm/c1_LinearScan_aarch64.hpp
new file mode 100644
index 000000000..69f53583f
--- /dev/null
+++ b/src/cpu/aarch64/vm/c1_LinearScan_aarch64.hpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_C1_LINEARSCAN_X86_HPP
+#define CPU_X86_VM_C1_LINEARSCAN_X86_HPP
+
+inline bool LinearScan::is_processed_reg_num(int reg_num) {
+ Unimplemented();
+ return false;
+}
+
+inline int LinearScan::num_physical_regs(BasicType type) {
+ Unimplemented();
+ // Intel requires two cpu registers for long,
+ // but requires only one fpu register for double
+ if (LP64_ONLY(false &&) type == T_LONG) {
+ return 2;
+ }
+ return 1;
+}
+
+
+inline bool LinearScan::requires_adjacent_regs(BasicType type) {
+ return false;
+}
+
+inline bool LinearScan::is_caller_save(int assigned_reg) {
+ Unimplemented();
+ return true; // no callee-saved registers on Intel
+
+}
+
+
+inline void LinearScan::pd_add_temps(LIR_Op* op) {
+ Unimplemented();
+}
+
+
+// Implementation of LinearScanWalker
+
+inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) {
+ Unimplemented();
+ return false;
+}
+
+
+class FpuStackAllocator VALUE_OBJ_CLASS_SPEC {
+ private:
+ Compilation* _compilation;
+ LinearScan* _allocator;
+
+ LIR_OpVisitState visitor;
+
+ LIR_List* _lir;
+ int _pos;
+ FpuStackSim _sim;
+ FpuStackSim _temp_sim;
+
+ bool _debug_information_computed;
+
+ LinearScan* allocator() { return _allocator; }
+ Compilation* compilation() const { return _compilation; }
+
+ // unified bailout support
+ void bailout(const char* msg) const { compilation()->bailout(msg); }
+ bool bailed_out() const { return compilation()->bailed_out(); }
+
+ int pos() { return _pos; }
+ void set_pos(int pos) { _pos = pos; }
+ LIR_Op* cur_op() { Unimplemented(); return lir()->instructions_list()->at(pos()); }
+ LIR_List* lir() { return _lir; }
+ void set_lir(LIR_List* lir) { _lir = lir; }
+ FpuStackSim* sim() { return &_sim; }
+ FpuStackSim* temp_sim() { return &_temp_sim; }
+
+ int fpu_num(LIR_Opr opr);
+ int tos_offset(LIR_Opr opr);
+ LIR_Opr to_fpu_stack_top(LIR_Opr opr, bool dont_check_offset = false);
+
+ // Helper functions for handling operations
+ void insert_op(LIR_Op* op);
+ void insert_exchange(int offset);
+ void insert_exchange(LIR_Opr opr);
+ void insert_free(int offset);
+ void insert_free_if_dead(LIR_Opr opr);
+ void insert_free_if_dead(LIR_Opr opr, LIR_Opr ignore);
+ void insert_copy(LIR_Opr from, LIR_Opr to);
+ void do_rename(LIR_Opr from, LIR_Opr to);
+ void do_push(LIR_Opr opr);
+ void pop_if_last_use(LIR_Op* op, LIR_Opr opr);
+ void pop_always(LIR_Op* op, LIR_Opr opr);
+ void clear_fpu_stack(LIR_Opr preserve);
+ void handle_op1(LIR_Op1* op1);
+ void handle_op2(LIR_Op2* op2);
+ void handle_opCall(LIR_OpCall* opCall);
+ void compute_debug_information(LIR_Op* op);
+ void allocate_exception_handler(XHandler* xhandler);
+ void allocate_block(BlockBegin* block);
+
+#ifndef PRODUCT
+ void check_invalid_lir_op(LIR_Op* op);
+#endif
+
+ // Helper functions for merging of fpu stacks
+ void merge_insert_add(LIR_List* instrs, FpuStackSim* cur_sim, int reg);
+ void merge_insert_xchg(LIR_List* instrs, FpuStackSim* cur_sim, int slot);
+ void merge_insert_pop(LIR_List* instrs, FpuStackSim* cur_sim);
+ bool merge_rename(FpuStackSim* cur_sim, FpuStackSim* sux_sim, int start_slot, int change_slot);
+ void merge_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, FpuStackSim* sux_sim);
+ void merge_cleanup_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, BitMap& live_fpu_regs);
+ bool merge_fpu_stack_with_successors(BlockBegin* block);
+
+ public:
+ LIR_Opr to_fpu_stack(LIR_Opr opr); // used by LinearScan for creation of debug information
+
+ FpuStackAllocator(Compilation* compilation, LinearScan* allocator);
+ void allocate();
+};
+
+#endif // CPU_X86_VM_C1_LINEARSCAN_X86_HPP
diff --git a/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp b/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp
new file mode 100644
index 000000000..b78784b25
--- /dev/null
+++ b/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "c1/c1_Runtime1.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "gc_interface/collectedHeap.hpp"
+#include "interpreter/interpreter.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/markOop.hpp"
+#include "runtime/basicLock.hpp"
+#include "runtime/biasedLocking.hpp"
+#include "runtime/os.hpp"
+#include "runtime/stubRoutines.hpp"
+
+int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) { Unimplemented(); return 0; }
+
+
+void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { Unimplemented(); }
+
+
+// Defines obj, preserves var_size_in_bytes
+void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) { Unimplemented(); }
+
+
+void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { Unimplemented(); }
+
+
+// preserves obj, destroys len_in_bytes
+void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) { Unimplemented(); }
+
+
+void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) { Unimplemented(); }
+
+void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) { Unimplemented(); }
+
+void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, Address::ScaleFactor f, Register klass, Label& slow_case) { Unimplemented(); }
+
+
+
+void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { Unimplemented(); }
+
+
+void C1_MacroAssembler::build_frame(int frame_size_in_bytes) { Unimplemented(); }
+
+
+void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) { Unimplemented(); }
+
+
+void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) { Unimplemented(); }
+
+
+void C1_MacroAssembler::verified_entry() { Unimplemented(); }
+
+
+#ifndef PRODUCT
+
+void C1_MacroAssembler::verify_stack_oop(int stack_offset) { Unimplemented(); }
+
+void C1_MacroAssembler::verify_not_null_oop(Register r) { Unimplemented(); }
+
+void C1_MacroAssembler::invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) { Unimplemented(); }
+
+#endif // ifndef PRODUCT
diff --git a/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.hpp b/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.hpp
new file mode 100644
index 000000000..ba7802031
--- /dev/null
+++ b/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.hpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_C1_MACROASSEMBLER_X86_HPP
+#define CPU_X86_VM_C1_MACROASSEMBLER_X86_HPP
+
+// C1_MacroAssembler contains high-level macros for C1
+
+ private:
+ int _rsp_offset; // track rsp changes
+ // initialization
+ void pd_init() { Unimplemented(); _rsp_offset = 0; }
+
+ public:
+ void try_allocate(
+ Register obj, // result: pointer to object after successful allocation
+ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
+ int con_size_in_bytes, // object size in bytes if known at compile time
+ Register t1, // temp register
+ Register t2, // temp register
+ Label& slow_case // continuation point if fast allocation fails
+ );
+
+ void initialize_header(Register obj, Register klass, Register len, Register t1, Register t2);
+ void initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1);
+
+ // locking
+ // hdr : must be rax, contents destroyed
+ // obj : must point to the object to lock, contents preserved
+ // disp_hdr: must point to the displaced header location, contents preserved
+ // scratch : scratch register, contents destroyed
+ // returns code offset at which to add null check debug information
+ int lock_object (Register swap, Register obj, Register disp_hdr, Register scratch, Label& slow_case);
+
+ // unlocking
+ // hdr : contents destroyed
+ // obj : must point to the object to lock, contents preserved
+ // disp_hdr: must be eax & must point to the displaced header location, contents destroyed
+ void unlock_object(Register swap, Register obj, Register lock, Label& slow_case);
+
+ void initialize_object(
+ Register obj, // result: pointer to object after successful allocation
+ Register klass, // object klass
+ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
+ int con_size_in_bytes, // object size in bytes if known at compile time
+ Register t1, // temp register
+ Register t2 // temp register
+ );
+
+ // allocation of fixed-size objects
+ // (can also be used to allocate fixed-size arrays, by setting
+ // hdr_size correctly and storing the array length afterwards)
+ // obj : must be rax, will contain pointer to allocated object
+ // t1, t2 : scratch registers - contents destroyed
+ // header_size: size of object header in words
+ // object_size: total size of object in words
+ // slow_case : exit to slow case implementation if fast allocation fails
+ void allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case);
+
+ enum {
+ max_array_allocation_length = 0x00FFFFFF
+ };
+
+ // allocation of arrays
+ // obj : must be rax, will contain pointer to allocated object
+ // len : array length in number of elements
+ // t : scratch register - contents destroyed
+ // header_size: size of object header in words
+ // f : element scale factor
+ // slow_case : exit to slow case implementation if fast allocation fails
+ void allocate_array(Register obj, Register len, Register t, Register t2, int header_size, Address::ScaleFactor f, Register klass, Label& slow_case);
+
+ int rsp_offset() const { return _rsp_offset; }
+ void set_rsp_offset(int n) { _rsp_offset = n; }
+
+ // Note: NEVER push values directly, but only through following push_xxx functions;
+ // This helps us to track the rsp changes compared to the entry rsp (->_rsp_offset)
+
+void push_jint (jint i) { Unimplemented(); }
+ void push_oop (jobject o) { Unimplemented(); }
+ // Seems to always be in wordSize
+ void push_addr (Address a) { Unimplemented(); }
+ void push_reg (Register r) { Unimplemented(); }
+ void pop_reg (Register r) { Unimplemented(); }
+
+ void dec_stack (int nof_words) { Unimplemented(); }
+
+ void dec_stack_after_call (int nof_words) { Unimplemented(); }
+
+ void invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) PRODUCT_RETURN;
+
+#endif // CPU_X86_VM_C1_MACROASSEMBLER_X86_HPP
diff --git a/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp b/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
new file mode 100644
index 000000000..ad0af0887
--- /dev/null
+++ b/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "c1/c1_Defs.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "c1/c1_Runtime1.hpp"
+#include "interpreter/interpreter.hpp"
+#include "nativeInst_aarch64.hpp"
+#include "oops/compiledICHolderOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "register_aarch64.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/signature.hpp"
+#include "runtime/vframeArray.hpp"
+#include "vmreg_aarch64.inline.hpp"
+
+
+// Implementation of StubAssembler
+
+int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) { Unimplemented(); return 0; }
+
+
+int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) { Unimplemented(); return 0; }
+
+
+int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) { Unimplemented(); return 0; }
+
+
+int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) { Unimplemented(); return 0; }
+
+
+// Implementation of StubFrame
+
+class StubFrame: public StackObj {
+ private:
+ StubAssembler* _sasm;
+
+ public:
+ StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
+ void load_argument(int offset_in_words, Register reg);
+
+ ~StubFrame();
+};;
+
+
+#define __ _sasm->
+
+StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { Unimplemented(); }
+
+// load parameters that were stored with LIR_Assembler::store_parameter
+// Note: offsets for store_parameter and load_argument must match
+void StubFrame::load_argument(int offset_in_words, Register reg) { Unimplemented(); }
+
+StubFrame::~StubFrame() { Unimplemented(); }
+
+#undef __
+
+
+// Implementation of Runtime1
+
+#define __ sasm->
+
+const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
+
+// Stack layout for saving/restoring all the registers needed during a runtime
+// call (this includes deoptimization)
+// Note: note that users of this frame may well have arguments to some runtime
+// while these values are on the stack. These positions neglect those arguments
+// but the code in save_live_registers will take the argument count into
+// account.
+//
+#ifdef _LP64
+ #define SLOT2(x) x,
+ #define SLOT_PER_WORD 2
+#else
+ #define SLOT2(x)
+ #define SLOT_PER_WORD 1
+#endif // _LP64
+
+// enum reg_save_layout {
+// };
+
+
+// Save off registers which might be killed by calls into the runtime.
+// Tries to smart of about FP registers. In particular we separate
+// saving and describing the FPU registers for deoptimization since we
+// have to save the FPU registers twice if we describe them and on P4
+// saving FPU registers which don't contain anything appears
+// expensive. The deopt blob is the only thing which needs to
+// describe FPU registers. In all other cases it should be sufficient
+// to simply save their current value.
+
+static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
+ bool save_fpu_registers = true) { Unimplemented(); return 0; }
+
+static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
+ bool save_fpu_registers = true) { Unimplemented(); return 0; }
+
+
+static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) { Unimplemented(); }
+
+
+static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { Unimplemented(); }
+
+
+static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) { Unimplemented(); }
+
+
+void Runtime1::initialize_pd() { Unimplemented(); }
+
+
+// target: the entry point of the method that creates and posts the exception oop
+// has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
+
+OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { Unimplemented(); return 0; }
+
+
+OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { Unimplemented(); return 0; }
+
+
+void Runtime1::generate_unwind_exception(StubAssembler *sasm) { Unimplemented(); }
+
+
+OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { Unimplemented(); return 0; }
+
+
+OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { Unimplemented(); return 0; }
+
+#undef __
+
+const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }
diff --git a/src/cpu/aarch64/vm/c1_globals_aarch64.hpp b/src/cpu/aarch64/vm/c1_globals_aarch64.hpp
new file mode 100644
index 000000000..d6a5cc45c
--- /dev/null
+++ b/src/cpu/aarch64/vm/c1_globals_aarch64.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_C1_GLOBALS_X86_HPP
+#define CPU_X86_VM_C1_GLOBALS_X86_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+// Sets the default values for platform dependent flags used by the client compiler.
+// (see c1_globals.hpp)
+
+#ifndef TIERED
+define_pd_global(bool, BackgroundCompilation, true );
+define_pd_global(bool, UseTLAB, true );
+define_pd_global(bool, ResizeTLAB, true );
+define_pd_global(bool, InlineIntrinsics, true );
+define_pd_global(bool, PreferInterpreterNativeStubs, false);
+define_pd_global(bool, ProfileTraps, false);
+define_pd_global(bool, UseOnStackReplacement, true );
+define_pd_global(bool, TieredCompilation, false);
+define_pd_global(intx, CompileThreshold, 1500 );
+define_pd_global(intx, BackEdgeThreshold, 100000);
+
+define_pd_global(intx, OnStackReplacePercentage, 933 );
+define_pd_global(intx, FreqInlineSize, 325 );
+define_pd_global(intx, NewSizeThreadIncrease, 4*K );
+define_pd_global(intx, InitialCodeCacheSize, 160*K);
+define_pd_global(intx, ReservedCodeCacheSize, 32*M );
+define_pd_global(bool, ProfileInterpreter, false);
+define_pd_global(intx, CodeCacheExpansionSize, 32*K );
+define_pd_global(uintx,CodeCacheMinBlockLength, 1);
+define_pd_global(uintx,PermSize, 12*M );
+define_pd_global(uintx,MaxPermSize, 64*M );
+define_pd_global(bool, NeverActAsServerClassMachine, true );
+define_pd_global(uint64_t,MaxRAM, 1ULL*G);
+define_pd_global(bool, CICompileOSR, true );
+#endif // !TIERED
+define_pd_global(bool, UseTypeProfile, false);
+define_pd_global(bool, RoundFPResults, true );
+
+define_pd_global(bool, LIRFillDelaySlots, false);
+define_pd_global(bool, OptimizeSinglePrecision, true );
+define_pd_global(bool, CSEArrayLength, false);
+define_pd_global(bool, TwoOperandLIRForm, true );
+
+define_pd_global(intx, SafepointPollOffset, 256 );
+
+#endif // CPU_X86_VM_C1_GLOBALS_X86_HPP
diff --git a/src/cpu/aarch64/vm/c2_globals_aarch64.hpp b/src/cpu/aarch64/vm/c2_globals_aarch64.hpp
new file mode 100644
index 000000000..749c48f5e
--- /dev/null
+++ b/src/cpu/aarch64/vm/c2_globals_aarch64.hpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_C2_GLOBALS_X86_HPP
+#define CPU_X86_VM_C2_GLOBALS_X86_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+// Sets the default values for platform dependent flags used by the server compiler.
+// (see c2_globals.hpp). Alpha-sorted.
+
+define_pd_global(bool, BackgroundCompilation, true);
+define_pd_global(bool, UseTLAB, true);
+define_pd_global(bool, ResizeTLAB, true);
+define_pd_global(bool, CICompileOSR, true);
+define_pd_global(bool, InlineIntrinsics, true);
+define_pd_global(bool, PreferInterpreterNativeStubs, false);
+define_pd_global(bool, ProfileTraps, true);
+define_pd_global(bool, UseOnStackReplacement, true);
+#ifdef CC_INTERP
+define_pd_global(bool, ProfileInterpreter, false);
+#else
+define_pd_global(bool, ProfileInterpreter, true);
+#endif // CC_INTERP
+define_pd_global(bool, TieredCompilation, trueInTiered);
+define_pd_global(intx, CompileThreshold, 10000);
+define_pd_global(intx, BackEdgeThreshold, 100000);
+
+define_pd_global(intx, OnStackReplacePercentage, 140);
+define_pd_global(intx, ConditionalMoveLimit, 3);
+define_pd_global(intx, FLOATPRESSURE, 6);
+define_pd_global(intx, FreqInlineSize, 325);
+#ifdef AMD64
+define_pd_global(intx, INTPRESSURE, 13);
+define_pd_global(intx, InteriorEntryAlignment, 16);
+define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
+define_pd_global(intx, LoopUnrollLimit, 60);
+// InitialCodeCacheSize derived from specjbb2000 run.
+define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(intx, CodeCacheExpansionSize, 64*K);
+
+// Ergonomics related flags
+define_pd_global(uint64_t,MaxRAM, 128ULL*G);
+#else
+define_pd_global(intx, INTPRESSURE, 6);
+define_pd_global(intx, InteriorEntryAlignment, 4);
+define_pd_global(intx, NewSizeThreadIncrease, 4*K);
+define_pd_global(intx, LoopUnrollLimit, 50); // Design center runs on 1.3.1
+// InitialCodeCacheSize derived from specjbb2000 run.
+define_pd_global(intx, InitialCodeCacheSize, 2304*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(intx, CodeCacheExpansionSize, 32*K);
+
+// Ergonomics related flags
+define_pd_global(uint64_t,MaxRAM, 4ULL*G);
+#endif // AMD64
+define_pd_global(intx, RegisterCostAreaRatio, 16000);
+
+// Peephole and CISC spilling both break the graph, and so makes the
+// scheduler sick.
+define_pd_global(bool, OptoPeephole, true);
+define_pd_global(bool, UseCISCSpill, true);
+define_pd_global(bool, OptoScheduling, false);
+define_pd_global(bool, OptoBundling, false);
+
+define_pd_global(intx, ReservedCodeCacheSize, 48*M);
+define_pd_global(uintx,CodeCacheMinBlockLength, 4);
+
+// Heap related flags
+define_pd_global(uintx,PermSize, ScaleForWordSize(16*M));
+define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
+
+// Ergonomics related flags
+define_pd_global(bool, NeverActAsServerClassMachine, false);
+
+#endif // CPU_X86_VM_C2_GLOBALS_X86_HPP
diff --git a/src/cpu/aarch64/vm/c2_init_aarch64.cpp b/src/cpu/aarch64/vm/c2_init_aarch64.cpp
new file mode 100644
index 000000000..286fec213
--- /dev/null
+++ b/src/cpu/aarch64/vm/c2_init_aarch64.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "opto/compile.hpp"
+#include "opto/node.hpp"
+
+// processor dependent initialization for i486
+
+void Compile::pd_compiler2_init() {
+ guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" );
+ // QQQ presumably all 64bit cpu's support this. Seems like the ifdef could
+ // simply be left out.
+#ifndef AMD64
+ if (!VM_Version::supports_cmov()) {
+ ConditionalMoveLimit = 0;
+ }
+#endif // AMD64
+}
diff --git a/src/cpu/aarch64/vm/codeBuffer_aarch64.hpp b/src/cpu/aarch64/vm/codeBuffer_aarch64.hpp
new file mode 100644
index 000000000..7f20314ff
--- /dev/null
+++ b/src/cpu/aarch64/vm/codeBuffer_aarch64.hpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_CODEBUFFER_X86_HPP
+#define CPU_X86_VM_CODEBUFFER_X86_HPP
+
+private:
+ void pd_initialize() {}
+
+public:
+ void flush_bundle(bool start_new_bundle) {}
+
+#endif // CPU_X86_VM_CODEBUFFER_X86_HPP
diff --git a/src/cpu/aarch64/vm/copy_aarch64.hpp b/src/cpu/aarch64/vm/copy_aarch64.hpp
new file mode 100644
index 000000000..a8d6a4a22
--- /dev/null
+++ b/src/cpu/aarch64/vm/copy_aarch64.hpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_COPY_X86_HPP
+#define CPU_X86_VM_COPY_X86_HPP
+
+// Inline functions for memory copy and fill.
+
+// Contains inline asm implementations
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "copy_linux_aarch64.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_x86
+# include "copy_linux_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_solaris_x86
+# include "copy_solaris_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_windows_x86
+# include "copy_windows_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_bsd_x86
+# include "copy_bsd_x86.inline.hpp"
+#endif
+
+
+static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
+#ifdef AMD64
+ julong* to = (julong*) tohw;
+ julong v = ((julong) value << 32) | value;
+ while (count-- > 0) {
+ *to++ = v;
+ }
+#else
+ juint* to = (juint*)tohw;
+ count *= HeapWordSize / BytesPerInt;
+ while (count-- > 0) {
+ *to++ = value;
+ }
+#endif // AMD64
+}
+
+static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
+ pd_fill_to_words(tohw, count, value);
+}
+
+static void pd_fill_to_bytes(void* to, size_t count, jubyte value) {
+ (void)memset(to, value, count);
+}
+
+static void pd_zero_to_words(HeapWord* tohw, size_t count) {
+ pd_fill_to_words(tohw, count, 0);
+}
+
+static void pd_zero_to_bytes(void* to, size_t count) {
+ (void)memset(to, 0, count);
+}
+
+#endif // CPU_X86_VM_COPY_X86_HPP
diff --git a/src/cpu/aarch64/vm/cppInterpreterGenerator_aarch64.hpp b/src/cpu/aarch64/vm/cppInterpreterGenerator_aarch64.hpp
new file mode 100644
index 000000000..2f9f1d47d
--- /dev/null
+++ b/src/cpu/aarch64/vm/cppInterpreterGenerator_aarch64.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_CPPINTERPRETERGENERATOR_X86_HPP
+#define CPU_X86_VM_CPPINTERPRETERGENERATOR_X86_HPP
+
+ protected:
+
+#if 0
+ address generate_asm_interpreter_entry(bool synchronized);
+ address generate_native_entry(bool synchronized);
+ address generate_abstract_entry(void);
+ address generate_math_entry(AbstractInterpreter::MethodKind kind);
+ address generate_empty_entry(void);
+ address generate_accessor_entry(void);
+ address generate_Reference_get_entry(void);
+ void lock_method(void);
+ void generate_stack_overflow_check(void);
+
+ void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
+ void generate_counter_overflow(Label* do_continue);
+#endif
+
+ void generate_more_monitors();
+ void generate_deopt_handling();
+ address generate_interpreter_frame_manager(bool synchronized); // C++ interpreter only
+ void generate_compute_interpreter_state(const Register state,
+ const Register prev_state,
+ const Register sender_sp,
+ bool native); // C++ interpreter only
+
+#endif // CPU_X86_VM_CPPINTERPRETERGENERATOR_X86_HPP
diff --git a/src/cpu/aarch64/vm/cppInterpreter_aarch64.cpp b/src/cpu/aarch64/vm/cppInterpreter_aarch64.cpp
new file mode 100644
index 000000000..b9a5c2293
--- /dev/null
+++ b/src/cpu/aarch64/vm/cppInterpreter_aarch64.cpp
@@ -0,0 +1,2456 @@
+/*
+ * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/cppInterpreter.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/methodDataOop.hpp"
+#include "oops/methodOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/timer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+#ifdef SHARK
+#include "shark/shark_globals.hpp"
+#endif
+
+#ifdef CC_INTERP
+
+// Routine exists to make tracebacks look decent in debugger
+// while we are recursed in the frame manager/c++ interpreter.
+// We could use an address in the frame manager but having
+// frames look natural in the debugger is a plus.
+extern "C" void RecursiveInterpreterActivation(interpreterState istate )
+{
+ //
+ ShouldNotReachHere();
+}
+
+
+#define __ _masm->
+#define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name)))
+
+Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
+ // c++ interpreter entry point this holds that entry point label.
+
+// default registers for state and sender_sp
+// state and sender_sp are the same on 32bit because we have no choice.
+// state could be rsi on 64bit but it is an arg reg and not callee save
+// so r13 is better choice.
+
+const Register state = NOT_LP64(rsi) LP64_ONLY(r13);
+const Register sender_sp_on_entry = NOT_LP64(rsi) LP64_ONLY(r13);
+
+// NEEDED for JVMTI?
+// address AbstractInterpreter::_remove_activation_preserving_args_entry;
+
+static address unctrap_frame_manager_entry = NULL;
+
+static address deopt_frame_manager_return_atos = NULL;
+static address deopt_frame_manager_return_btos = NULL;
+static address deopt_frame_manager_return_itos = NULL;
+static address deopt_frame_manager_return_ltos = NULL;
+static address deopt_frame_manager_return_ftos = NULL;
+static address deopt_frame_manager_return_dtos = NULL;
+static address deopt_frame_manager_return_vtos = NULL;
+
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+ int i = 0;
+ switch (type) {
+ case T_BOOLEAN: i = 0; break;
+ case T_CHAR : i = 1; break;
+ case T_BYTE : i = 2; break;
+ case T_SHORT : i = 3; break;
+ case T_INT : i = 4; break;
+ case T_VOID : i = 5; break;
+ case T_FLOAT : i = 8; break;
+ case T_LONG : i = 9; break;
+ case T_DOUBLE : i = 6; break;
+ case T_OBJECT : // fall through
+ case T_ARRAY : i = 7; break;
+ default : ShouldNotReachHere();
+ }
+ assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
+ return i;
+}
+
+// Is this pc anywhere within code owned by the interpreter?
+// This only works for pc that might possibly be exposed to frame
+// walkers. It clearly misses all of the actual c++ interpreter
+// implementation
+bool CppInterpreter::contains(address pc) {
+ return (_code->contains(pc) ||
+ pc == CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
+}
+
+
+address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
+ address entry = __ pc();
+ switch (type) {
+ case T_BOOLEAN: __ c2bool(rax); break;
+ case T_CHAR : __ andl(rax, 0xFFFF); break;
+ case T_BYTE : __ sign_extend_byte (rax); break;
+ case T_SHORT : __ sign_extend_short(rax); break;
+ case T_VOID : // fall thru
+ case T_LONG : // fall thru
+ case T_INT : /* nothing to do */ break;
+
+ case T_DOUBLE :
+ case T_FLOAT :
+ {
+ const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
+ __ pop(t); // remove return address first
+ // Must return a result for interpreter or compiler. In SSE
+ // mode, results are returned in xmm0 and the FPU stack must
+ // be empty.
+ if (type == T_FLOAT && UseSSE >= 1) {
+#ifndef _LP64
+ // Load ST0
+ __ fld_d(Address(rsp, 0));
+ // Store as float and empty fpu stack
+ __ fstp_s(Address(rsp, 0));
+#endif // !_LP64
+ // and reload
+ __ movflt(xmm0, Address(rsp, 0));
+ } else if (type == T_DOUBLE && UseSSE >= 2 ) {
+ __ movdbl(xmm0, Address(rsp, 0));
+ } else {
+ // restore ST0
+ __ fld_d(Address(rsp, 0));
+ }
+ // and pop the temp
+ __ addptr(rsp, 2 * wordSize);
+ __ push(t); // restore return address
+ }
+ break;
+ case T_OBJECT :
+ // retrieve result from frame
+ __ movptr(rax, STATE(_oop_temp));
+ // and verify it
+ __ verify_oop(rax);
+ break;
+ default : ShouldNotReachHere();
+ }
+ __ ret(0); // return from result handler
+ return entry;
+}
+
+// tosca based result to c++ interpreter stack based result.
+// Result goes to top of native stack.
+
+#undef EXTEND // SHOULD NOT BE NEEDED
+address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) {
+ // A result is in the tosca (abi result) from either a native method call or compiled
+ // code. Place this result on the java expression stack so C++ interpreter can use it.
+ address entry = __ pc();
+
+ const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
+ __ pop(t); // remove return address first
+ switch (type) {
+ case T_VOID:
+ break;
+ case T_BOOLEAN:
+#ifdef EXTEND
+ __ c2bool(rax);
+#endif
+ __ push(rax);
+ break;
+ case T_CHAR :
+#ifdef EXTEND
+ __ andl(rax, 0xFFFF);
+#endif
+ __ push(rax);
+ break;
+ case T_BYTE :
+#ifdef EXTEND
+ __ sign_extend_byte (rax);
+#endif
+ __ push(rax);
+ break;
+ case T_SHORT :
+#ifdef EXTEND
+ __ sign_extend_short(rax);
+#endif
+ __ push(rax);
+ break;
+ case T_LONG :
+ __ push(rdx); // pushes useless junk on 64bit
+ __ push(rax);
+ break;
+ case T_INT :
+ __ push(rax);
+ break;
+ case T_FLOAT :
+ // Result is in ST(0)/xmm0
+ __ subptr(rsp, wordSize);
+ if ( UseSSE < 1) {
+ __ fstp_s(Address(rsp, 0));
+ } else {
+ __ movflt(Address(rsp, 0), xmm0);
+ }
+ break;
+ case T_DOUBLE :
+ __ subptr(rsp, 2*wordSize);
+ if ( UseSSE < 2 ) {
+ __ fstp_d(Address(rsp, 0));
+ } else {
+ __ movdbl(Address(rsp, 0), xmm0);
+ }
+ break;
+ case T_OBJECT :
+ __ verify_oop(rax); // verify it
+ __ push(rax);
+ break;
+ default : ShouldNotReachHere();
+ }
+ __ jmp(t); // return from result handler
+ return entry;
+}
+
+address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) {
+ // A result is in the java expression stack of the interpreted method that has just
+ // returned. Place this result on the java expression stack of the caller.
+ //
+ // The current interpreter activation in rsi/r13 is for the method just returning its
+ // result. So we know that the result of this method is on the top of the current
+ // execution stack (which is pre-pushed) and will be return to the top of the caller
+ // stack. The top of the callers stack is the bottom of the locals of the current
+ // activation.
+ // Because of the way activation are managed by the frame manager the value of rsp is
+ // below both the stack top of the current activation and naturally the stack top
+ // of the calling activation. This enable this routine to leave the return address
+ // to the frame manager on the stack and do a vanilla return.
+ //
+ // On entry: rsi/r13 - interpreter state of activation returning a (potential) result
+ // On Return: rsi/r13 - unchanged
+ // rax - new stack top for caller activation (i.e. activation in _prev_link)
+ //
+ // Can destroy rdx, rcx.
+ //
+
+ address entry = __ pc();
+ const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
+ switch (type) {
+ case T_VOID:
+ __ movptr(rax, STATE(_locals)); // pop parameters get new stack value
+ __ addptr(rax, wordSize); // account for prepush before we return
+ break;
+ case T_FLOAT :
+ case T_BOOLEAN:
+ case T_CHAR :
+ case T_BYTE :
+ case T_SHORT :
+ case T_INT :
+ // 1 word result
+ __ movptr(rdx, STATE(_stack));
+ __ movptr(rax, STATE(_locals)); // address for result
+ __ movl(rdx, Address(rdx, wordSize)); // get result
+ __ movptr(Address(rax, 0), rdx); // and store it
+ break;
+ case T_LONG :
+ case T_DOUBLE :
+ // return top two words on current expression stack to caller's expression stack
+ // The caller's expression stack is adjacent to the current frame manager's intepretState
+ // except we allocated one extra word for this intepretState so we won't overwrite it
+ // when we return a two word result.
+
+ __ movptr(rax, STATE(_locals)); // address for result
+ __ movptr(rcx, STATE(_stack));
+ __ subptr(rax, wordSize); // need addition word besides locals[0]
+ __ movptr(rdx, Address(rcx, 2*wordSize)); // get result word (junk in 64bit)
+ __ movptr(Address(rax, wordSize), rdx); // and store it
+ __ movptr(rdx, Address(rcx, wordSize)); // get result word
+ __ movptr(Address(rax, 0), rdx); // and store it
+ break;
+ case T_OBJECT :
+ __ movptr(rdx, STATE(_stack));
+ __ movptr(rax, STATE(_locals)); // address for result
+ __ movptr(rdx, Address(rdx, wordSize)); // get result
+ __ verify_oop(rdx); // verify it
+ __ movptr(Address(rax, 0), rdx); // and store it
+ break;
+ default : ShouldNotReachHere();
+ }
+ __ ret(0);
+ return entry;
+}
+
+address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) {
+ // A result is in the java expression stack of the interpreted method that has just
+ // returned. Place this result in the native abi that the caller expects.
+ //
+ // Similar to generate_stack_to_stack_converter above. Called at a similar time from the
+ // frame manager execept in this situation the caller is native code (c1/c2/call_stub)
+ // and so rather than return result onto caller's java expression stack we return the
+ // result in the expected location based on the native abi.
+ // On entry: rsi/r13 - interpreter state of activation returning a (potential) result
+ // On Return: rsi/r13 - unchanged
+ // Other registers changed [rax/rdx/ST(0) as needed for the result returned]
+
+ address entry = __ pc();
+ switch (type) {
+ case T_VOID:
+ break;
+ case T_BOOLEAN:
+ case T_CHAR :
+ case T_BYTE :
+ case T_SHORT :
+ case T_INT :
+ __ movptr(rdx, STATE(_stack)); // get top of stack
+ __ movl(rax, Address(rdx, wordSize)); // get result word 1
+ break;
+ case T_LONG :
+ __ movptr(rdx, STATE(_stack)); // get top of stack
+ __ movptr(rax, Address(rdx, wordSize)); // get result low word
+ NOT_LP64(__ movl(rdx, Address(rdx, 2*wordSize));) // get result high word
+ break;
+ case T_FLOAT :
+ __ movptr(rdx, STATE(_stack)); // get top of stack
+ if ( UseSSE >= 1) {
+ __ movflt(xmm0, Address(rdx, wordSize));
+ } else {
+ __ fld_s(Address(rdx, wordSize)); // pushd float result
+ }
+ break;
+ case T_DOUBLE :
+ __ movptr(rdx, STATE(_stack)); // get top of stack
+ if ( UseSSE > 1) {
+ __ movdbl(xmm0, Address(rdx, wordSize));
+ } else {
+ __ fld_d(Address(rdx, wordSize)); // push double result
+ }
+ break;
+ case T_OBJECT :
+ __ movptr(rdx, STATE(_stack)); // get top of stack
+ __ movptr(rax, Address(rdx, wordSize)); // get result word 1
+ __ verify_oop(rax); // verify it
+ break;
+ default : ShouldNotReachHere();
+ }
+ __ ret(0);
+ return entry;
+}
+
+address CppInterpreter::return_entry(TosState state, int length) {
+ // make it look good in the debugger
+ return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation);
+}
+
+address CppInterpreter::deopt_entry(TosState state, int length) {
+ address ret = NULL;
+ if (length != 0) {
+ switch (state) {
+ case atos: ret = deopt_frame_manager_return_atos; break;
+ case btos: ret = deopt_frame_manager_return_btos; break;
+ case ctos:
+ case stos:
+ case itos: ret = deopt_frame_manager_return_itos; break;
+ case ltos: ret = deopt_frame_manager_return_ltos; break;
+ case ftos: ret = deopt_frame_manager_return_ftos; break;
+ case dtos: ret = deopt_frame_manager_return_dtos; break;
+ case vtos: ret = deopt_frame_manager_return_vtos; break;
+ }
+ } else {
+ ret = unctrap_frame_manager_entry; // re-execute the bytecode ( e.g. uncommon trap)
+ }
+ assert(ret != NULL, "Not initialized");
+ return ret;
+}
+
+// C++ Interpreter
+void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state,
+ const Register locals,
+ const Register sender_sp,
+ bool native) {
+
+ // On entry the "locals" argument points to locals[0] (or where it would be in case no locals in
+ // a static method). "state" contains any previous frame manager state which we must save a link
+ // to in the newly generated state object. On return "state" is a pointer to the newly allocated
+ // state object. We must allocate and initialize a new interpretState object and the method
+ // expression stack. Because the returned result (if any) of the method will be placed on the caller's
+ // expression stack and this will overlap with locals[0] (and locals[1] if double/long) we must
+ // be sure to leave space on the caller's stack so that this result will not overwrite values when
+ // locals[0] and locals[1] do not exist (and in fact are return address and saved rbp). So when
+ // we are non-native we in essence ensure that locals[0-1] exist. We play an extra trick in
+ // non-product builds and initialize this last local with the previous interpreterState as
+ // this makes things look real nice in the debugger.
+
+ // State on entry
+ // Assumes locals == &locals[0]
+ // Assumes state == any previous frame manager state (assuming call path from c++ interpreter)
+ // Assumes rax = return address
+ // rcx == senders_sp
+ // rbx == method
+ // Modifies rcx, rdx, rax
+ // Returns:
+ // state == address of new interpreterState
+ // rsp == bottom of method's expression stack.
+
+ const Address const_offset (rbx, methodOopDesc::const_offset());
+
+
+ // On entry sp is the sender's sp. This includes the space for the arguments
+ // that the sender pushed. If the sender pushed no args (a static) and the
+ // caller returns a long then we need two words on the sender's stack which
+ // are not present (although when we return a restore full size stack the
+ // space will be present). If we didn't allocate two words here then when
+ // we "push" the result of the caller's stack we would overwrite the return
+ // address and the saved rbp. Not good. So simply allocate 2 words now
+ // just to be safe. This is the "static long no_params() method" issue.
+ // See Lo.java for a testcase.
+ // We don't need this for native calls because they return result in
+ // register and the stack is expanded in the caller before we store
+ // the results on the stack.
+
+ if (!native) {
+#ifdef PRODUCT
+ __ subptr(rsp, 2*wordSize);
+#else /* PRODUCT */
+ __ push((int32_t)NULL_WORD);
+ __ push(state); // make it look like a real argument
+#endif /* PRODUCT */
+ }
+
+ // Now that we are assure of space for stack result, setup typical linkage
+
+ __ push(rax);
+ __ enter();
+
+ __ mov(rax, state); // save current state
+
+ __ lea(rsp, Address(rsp, -(int)sizeof(BytecodeInterpreter)));
+ __ mov(state, rsp);
+
+ // rsi/r13 == state/locals rax == prevstate
+
+ // initialize the "shadow" frame so that use since C++ interpreter not directly
+ // recursive. Simpler to recurse but we can't trim expression stack as we call
+ // new methods.
+ __ movptr(STATE(_locals), locals); // state->_locals = locals()
+ __ movptr(STATE(_self_link), state); // point to self
+ __ movptr(STATE(_prev_link), rax); // state->_link = state on entry (NULL or previous state)
+ __ movptr(STATE(_sender_sp), sender_sp); // state->_sender_sp = sender_sp
+#ifdef _LP64
+ __ movptr(STATE(_thread), r15_thread); // state->_bcp = codes()
+#else
+ __ get_thread(rax); // get vm's javathread*
+ __ movptr(STATE(_thread), rax); // state->_bcp = codes()
+#endif // _LP64
+ __ movptr(rdx, Address(rbx, methodOopDesc::const_offset())); // get constantMethodOop
+ __ lea(rdx, Address(rdx, constMethodOopDesc::codes_offset())); // get code base
+ if (native) {
+ __ movptr(STATE(_bcp), (int32_t)NULL_WORD); // state->_bcp = NULL
+ } else {
+ __ movptr(STATE(_bcp), rdx); // state->_bcp = codes()
+ }
+ __ xorptr(rdx, rdx);
+ __ movptr(STATE(_oop_temp), rdx); // state->_oop_temp = NULL (only really needed for native)
+ __ movptr(STATE(_mdx), rdx); // state->_mdx = NULL
+ __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
+ __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
+ __ movptr(STATE(_constants), rdx); // state->_constants = constants()
+
+ __ movptr(STATE(_method), rbx); // state->_method = method()
+ __ movl(STATE(_msg), (int32_t) BytecodeInterpreter::method_entry); // state->_msg = initial method entry
+ __ movptr(STATE(_result._to_call._callee), (int32_t) NULL_WORD); // state->_result._to_call._callee_callee = NULL
+
+
+ __ movptr(STATE(_monitor_base), rsp); // set monitor block bottom (grows down) this would point to entry [0]
+ // entries run from -1..x where &monitor[x] ==
+
+ {
+ // Must not attempt to lock method until we enter interpreter as gc won't be able to find the
+ // initial frame. However we allocate a free monitor so we don't have to shuffle the expression stack
+ // immediately.
+
+ // synchronize method
+ const Address access_flags (rbx, methodOopDesc::access_flags_offset());
+ const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ Label not_synced;
+
+ __ movl(rax, access_flags);
+ __ testl(rax, JVM_ACC_SYNCHRONIZED);
+ __ jcc(Assembler::zero, not_synced);
+
+ // Allocate initial monitor and pre initialize it
+ // get synchronization object
+
+ Label done;
+ const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+ __ movl(rax, access_flags);
+ __ testl(rax, JVM_ACC_STATIC);
+ __ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case)
+ __ jcc(Assembler::zero, done);
+ __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
+ __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(rax, Address(rax, mirror_offset));
+ __ bind(done);
+ // add space for monitor & lock
+ __ subptr(rsp, entry_size); // add space for a monitor entry
+ __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
+ __ bind(not_synced);
+ }
+
+ __ movptr(STATE(_stack_base), rsp); // set expression stack base ( == &monitors[-count])
+ if (native) {
+ __ movptr(STATE(_stack), rsp); // set current expression stack tos
+ __ movptr(STATE(_stack_limit), rsp);
+ } else {
+ __ subptr(rsp, wordSize); // pre-push stack
+ __ movptr(STATE(_stack), rsp); // set current expression stack tos
+
+ // compute full expression stack limit
+
+ const Address size_of_stack (rbx, methodOopDesc::max_stack_offset());
+ const int extra_stack = 0; //6815692//methodOopDesc::extra_stack_words();
+ __ load_unsigned_short(rdx, size_of_stack); // get size of expression stack in words
+ __ negptr(rdx); // so we can subtract in next step
+ // Allocate expression stack
+ __ lea(rsp, Address(rsp, rdx, Address::times_ptr, -extra_stack));
+ __ movptr(STATE(_stack_limit), rsp);
+ }
+
+#ifdef _LP64
+ // Make sure stack is properly aligned and sized for the abi
+ __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+ __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
+#endif // _LP64
+
+
+
+}
+
+// Helpers for commoning out cases in the various type of method entries.
+//
+
+// increment invocation count & check for overflow
+//
+// Note: checking for negative value instead of overflow
+// so we have a 'sticky' overflow test
+//
+// rbx,: method
+// rcx: invocation counter
+//
+void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
+
+ const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
+ const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset());
+
+ if (ProfileInterpreter) { // %%% Merge this into methodDataOop
+ __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
+ }
+ // Update standard invocation counters
+ __ movl(rax, backedge_counter); // load backedge counter
+
+ __ increment(rcx, InvocationCounter::count_increment);
+ __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
+
+ __ movl(invocation_counter, rcx); // save invocation count
+ __ addl(rcx, rax); // add both counters
+
+ // profile_method is non-null only for interpreted method so
+ // profile_method != NULL == !native_call
+ // BytecodeInterpreter only calls for native so code is elided.
+
+ __ cmp32(rcx,
+ ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
+ __ jcc(Assembler::aboveEqual, *overflow);
+
+}
+
+void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
+
+ // C++ interpreter on entry
+ // rsi/r13 - new interpreter state pointer
+ // rbp - interpreter frame pointer
+ // rbx - method
+
+ // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
+ // rbx, - method
+ // rcx - rcvr (assuming there is one)
+ // top of stack return address of interpreter caller
+ // rsp - sender_sp
+
+ // C++ interpreter only
+ // rsi/r13 - previous interpreter state pointer
+
+ const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
+
+ // InterpreterRuntime::frequency_counter_overflow takes one argument
+ // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
+ // The call returns the address of the verified entry point for the method or NULL
+ // if the compilation did not complete (either went background or bailed out).
+ __ movptr(rax, (int32_t)false);
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
+
+ // for c++ interpreter can rsi really be munged?
+ __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); // restore state
+ __ movptr(rbx, Address(state, byte_offset_of(BytecodeInterpreter, _method))); // restore method
+ __ movptr(rdi, Address(state, byte_offset_of(BytecodeInterpreter, _locals))); // get locals pointer
+
+ __ jmp(*do_continue, relocInfo::none);
+
+}
+
+void InterpreterGenerator::generate_stack_overflow_check(void) {
+ // see if we've got enough room on the stack for locals plus overhead.
+ // the expression stack grows down incrementally, so the normal guard
+ // page mechanism will work for that.
+ //
+ // Registers live on entry:
+ //
+ // Asm interpreter
+ // rdx: number of additional locals this frame needs (what we must check)
+ // rbx,: methodOop
+
+ // C++ Interpreter
+ // rsi/r13: previous interpreter frame state object
+ // rdi: &locals[0]
+ // rcx: # of locals
+ // rdx: number of additional locals this frame needs (what we must check)
+ // rbx: methodOop
+
+ // destroyed on exit
+ // rax,
+
+ // NOTE: since the additional locals are also always pushed (wasn't obvious in
+ // generate_method_entry) so the guard should work for them too.
+ //
+
+ // monitor entry size: see picture of stack set (generate_method_entry) and frame_i486.hpp
+ const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+
+ // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
+ // be sure to change this if you add/subtract anything to/from the overhead area
+ const int overhead_size = (int)sizeof(BytecodeInterpreter);
+
+ const int page_size = os::vm_page_size();
+
+ Label after_frame_check;
+
+ // compute rsp as if this were going to be the last frame on
+ // the stack before the red zone
+
+ Label after_frame_check_pop;
+
+ // save rsi == caller's bytecode ptr (c++ previous interp. state)
+ // QQQ problem here?? rsi overload????
+ __ push(state);
+
+ const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rsi);
+
+ NOT_LP64(__ get_thread(thread));
+
+ const Address stack_base(thread, Thread::stack_base_offset());
+ const Address stack_size(thread, Thread::stack_size_offset());
+
+ // locals + overhead, in bytes
+ const Address size_of_stack (rbx, methodOopDesc::max_stack_offset());
+ // Always give one monitor to allow us to start interp if sync method.
+ // Any additional monitors need a check when moving the expression stack
+ const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
+ const int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries();
+ __ load_unsigned_short(rax, size_of_stack); // get size of expression stack in words
+ __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), extra_stack + one_monitor));
+ __ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size));
+
+#ifdef ASSERT
+ Label stack_base_okay, stack_size_okay;
+ // verify that thread stack base is non-zero
+ __ cmpptr(stack_base, (int32_t)0);
+ __ jcc(Assembler::notEqual, stack_base_okay);
+ __ stop("stack base is zero");
+ __ bind(stack_base_okay);
+ // verify that thread stack size is non-zero
+ __ cmpptr(stack_size, (int32_t)0);
+ __ jcc(Assembler::notEqual, stack_size_okay);
+ __ stop("stack size is zero");
+ __ bind(stack_size_okay);
+#endif
+
+ // Add stack base to locals and subtract stack size
+ __ addptr(rax, stack_base);
+ __ subptr(rax, stack_size);
+
+ // We should have a magic number here for the size of the c++ interpreter frame.
+ // We can't actually tell this ahead of time. The debug version size is around 3k
+ // product is 1k and fastdebug is 4k
+ const int slop = 6 * K;
+
+ // Use the maximum number of pages we might bang.
+ const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
+ (StackRedPages+StackYellowPages);
+ // Only need this if we are stack banging which is temporary while
+ // we're debugging.
+ __ addptr(rax, slop + 2*max_pages * page_size);
+
+ // check against the current stack bottom
+ __ cmpptr(rsp, rax);
+ __ jcc(Assembler::above, after_frame_check_pop);
+
+ __ pop(state); // get c++ prev state.
+
+ // throw exception return address becomes throwing pc
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
+
+ // all done with frame size check
+ __ bind(after_frame_check_pop);
+ __ pop(state);
+
+ __ bind(after_frame_check);
+}
+
+// Find preallocated monitor and lock method (C++ interpreter)
+// rbx - methodOop
+//
+void InterpreterGenerator::lock_method(void) {
+ // assumes state == rsi/r13 == pointer to current interpreterState
+ // minimally destroys rax, rdx|c_rarg1, rdi
+ //
+ // synchronize method
+ const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ const Address access_flags (rbx, methodOopDesc::access_flags_offset());
+
+ const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
+
+ // find initial monitor i.e. monitors[-1]
+ __ movptr(monitor, STATE(_monitor_base)); // get monitor bottom limit
+ __ subptr(monitor, entry_size); // point to initial monitor
+
+#ifdef ASSERT
+ { Label L;
+ __ movl(rax, access_flags);
+ __ testl(rax, JVM_ACC_SYNCHRONIZED);
+ __ jcc(Assembler::notZero, L);
+ __ stop("method doesn't need synchronization");
+ __ bind(L);
+ }
+#endif // ASSERT
+ // get synchronization object
+ { Label done;
+ const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+ __ movl(rax, access_flags);
+ __ movptr(rdi, STATE(_locals)); // prepare to get receiver (assume common case)
+ __ testl(rax, JVM_ACC_STATIC);
+ __ movptr(rax, Address(rdi, 0)); // get receiver (assume this is frequent case)
+ __ jcc(Assembler::zero, done);
+ __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
+ __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(rax, Address(rax, mirror_offset));
+ __ bind(done);
+ }
+#ifdef ASSERT
+ { Label L;
+ __ cmpptr(rax, Address(monitor, BasicObjectLock::obj_offset_in_bytes())); // correct object?
+ __ jcc(Assembler::equal, L);
+ __ stop("wrong synchronization lobject");
+ __ bind(L);
+ }
+#endif // ASSERT
+ // can destroy rax, rdx|c_rarg1, rcx, and (via call_VM) rdi!
+ __ lock_object(monitor);
+}
+
+// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
+
+address InterpreterGenerator::generate_accessor_entry(void) {
+
+ // rbx: methodOop
+
+ // rsi/r13: senderSP must preserved for slow path, set SP to it on fast path
+
+ Label xreturn_path;
+
+ // do fastpath for resolved accessor methods
+ if (UseFastAccessorMethods) {
+
+ address entry_point = __ pc();
+
+ Label slow_path;
+ // If we need a safepoint check, generate full interpreter entry.
+ ExternalAddress state(SafepointSynchronize::address_of_state());
+ __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+ SafepointSynchronize::_not_synchronized);
+
+ __ jcc(Assembler::notEqual, slow_path);
+ // ASM/C++ Interpreter
+ // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
+ // Note: We can only use this code if the getfield has been resolved
+ // and if we don't have a null-pointer exception => check for
+ // these conditions first and use slow path if necessary.
+ // rbx,: method
+ // rcx: receiver
+ __ movptr(rax, Address(rsp, wordSize));
+
+ // check if local 0 != NULL and read field
+ __ testptr(rax, rax);
+ __ jcc(Assembler::zero, slow_path);
+
+ __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
+ // read first instruction word and extract bytecode @ 1 and index @ 2
+ __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
+ __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
+ // Shift codes right to get the index on the right.
+ // The bytecode fetched looks like <index><0xb4><0x2a>
+ __ shrl(rdx, 2*BitsPerByte);
+ __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
+ __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
+
+ // rax,: local 0
+ // rbx,: method
+ // rcx: receiver - do not destroy since it is needed for slow path!
+ // rcx: scratch
+ // rdx: constant pool cache index
+ // rdi: constant pool cache
+ // rsi/r13: sender sp
+
+ // check if getfield has been resolved and read constant pool cache entry
+ // check the validity of the cache entry by testing whether _indices field
+ // contains Bytecode::_getfield in b1 byte.
+ assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
+ __ movl(rcx,
+ Address(rdi,
+ rdx,
+ Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
+ __ shrl(rcx, 2*BitsPerByte);
+ __ andl(rcx, 0xFF);
+ __ cmpl(rcx, Bytecodes::_getfield);
+ __ jcc(Assembler::notEqual, slow_path);
+
+ // Note: constant pool entry is not valid before bytecode is resolved
+ __ movptr(rcx,
+ Address(rdi,
+ rdx,
+ Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()));
+ __ movl(rdx,
+ Address(rdi,
+ rdx,
+ Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset()));
+
+ Label notByte, notShort, notChar;
+ const Address field_address (rax, rcx, Address::times_1);
+
+ // Need to differentiate between igetfield, agetfield, bgetfield etc.
+ // because they are different sizes.
+ // Use the type from the constant pool cache
+ __ shrl(rdx, ConstantPoolCacheEntry::tosBits);
+ // Make sure we don't need to mask rdx for tosBits after the above shift
+ ConstantPoolCacheEntry::verify_tosBits();
+#ifdef _LP64
+ Label notObj;
+ __ cmpl(rdx, atos);
+ __ jcc(Assembler::notEqual, notObj);
+ // atos
+ __ movptr(rax, field_address);
+ __ jmp(xreturn_path);
+
+ __ bind(notObj);
+#endif // _LP64
+ __ cmpl(rdx, btos);
+ __ jcc(Assembler::notEqual, notByte);
+ __ load_signed_byte(rax, field_address);
+ __ jmp(xreturn_path);
+
+ __ bind(notByte);
+ __ cmpl(rdx, stos);
+ __ jcc(Assembler::notEqual, notShort);
+ __ load_signed_short(rax, field_address);
+ __ jmp(xreturn_path);
+
+ __ bind(notShort);
+ __ cmpl(rdx, ctos);
+ __ jcc(Assembler::notEqual, notChar);
+ __ load_unsigned_short(rax, field_address);
+ __ jmp(xreturn_path);
+
+ __ bind(notChar);
+#ifdef ASSERT
+ Label okay;
+#ifndef _LP64
+ __ cmpl(rdx, atos);
+ __ jcc(Assembler::equal, okay);
+#endif // _LP64
+ __ cmpl(rdx, itos);
+ __ jcc(Assembler::equal, okay);
+ __ stop("what type is this?");
+ __ bind(okay);
+#endif // ASSERT
+ // All the rest are a 32 bit wordsize
+ __ movl(rax, field_address);
+
+ __ bind(xreturn_path);
+
+ // _ireturn/_areturn
+ __ pop(rdi); // get return address
+ __ mov(rsp, sender_sp_on_entry); // set sp to sender sp
+ __ jmp(rdi);
+
+ // generate a vanilla interpreter entry as the slow path
+ __ bind(slow_path);
+ // We will enter c++ interpreter looking like it was
+ // called by the call_stub this will cause it to return
+ // a tosca result to the invoker which might have been
+ // the c++ interpreter itself.
+
+ __ jmp(fast_accessor_slow_entry_path);
+ return entry_point;
+
+ } else {
+ return NULL;
+ }
+
+}
+
+address InterpreterGenerator::generate_Reference_get_entry(void) {
+#ifndef SERIALGC
+ if (UseG1GC) {
+ // We need to generate have a routine that generates code to:
+ // * load the value in the referent field
+ // * passes that value to the pre-barrier.
+ //
+ // In the case of G1 this will record the value of the
+ // referent in an SATB buffer if marking is active.
+ // This will cause concurrent marking to mark the referent
+ // field as live.
+ Unimplemented();
+ }
+#endif // SERIALGC
+
+ // If G1 is not enabled then attempt to go through the accessor entry point
+ // Reference.get is an accessor
+ return generate_accessor_entry();
+}
+
+//
+// C++ Interpreter stub for calling a native method.
+// This sets up a somewhat different looking stack for calling the native method
+// than the typical interpreter frame setup but still has the pointer to
+// an interpreter state.
+//
+
+address InterpreterGenerator::generate_native_entry(bool synchronized) {
+ // determine code generation flags
+ bool inc_counter = UseCompiler || CountCompiledCalls;
+
+ // rbx: methodOop
+ // rcx: receiver (unused)
+ // rsi/r13: previous interpreter state (if called from C++ interpreter) must preserve
+ // in any case. If called via c1/c2/call_stub rsi/r13 is junk (to use) but harmless
+ // to save/restore.
+ address entry_point = __ pc();
+
+ const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
+ const Address size_of_locals (rbx, methodOopDesc::size_of_locals_offset());
+ const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
+ const Address access_flags (rbx, methodOopDesc::access_flags_offset());
+
+ // rsi/r13 == state/locals rdi == prevstate
+ const Register locals = rdi;
+
+ // get parameter size (always needed)
+ __ load_unsigned_short(rcx, size_of_parameters);
+
+ // rbx: methodOop
+ // rcx: size of parameters
+ __ pop(rax); // get return address
+ // for natives the size of locals is zero
+
+ // compute beginning of parameters /locals
+ __ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize));
+
+ // initialize fixed part of activation frame
+
+ // Assumes rax = return address
+
+ // allocate and initialize new interpreterState and method expression stack
+ // IN(locals) -> locals
+ // IN(state) -> previous frame manager state (NULL from stub/c1/c2)
+ // destroys rax, rcx, rdx
+ // OUT (state) -> new interpreterState
+ // OUT(rsp) -> bottom of methods expression stack
+
+ // save sender_sp
+ __ mov(rcx, sender_sp_on_entry);
+ // start with NULL previous state
+ __ movptr(state, (int32_t)NULL_WORD);
+ generate_compute_interpreter_state(state, locals, rcx, true);
+
+#ifdef ASSERT
+ { Label L;
+ __ movptr(rax, STATE(_stack_base));
+#ifdef _LP64
+ // duplicate the alignment rsp got after setting stack_base
+ __ subptr(rax, frame::arg_reg_save_area_bytes); // windows
+ __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
+#endif // _LP64
+ __ cmpptr(rax, rsp);
+ __ jcc(Assembler::equal, L);
+ __ stop("broken stack frame setup in interpreter");
+ __ bind(L);
+ }
+#endif
+
+ if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
+
+ const Register unlock_thread = LP64_ONLY(r15_thread) NOT_LP64(rax);
+ NOT_LP64(__ movptr(unlock_thread, STATE(_thread));) // get thread
+ // Since at this point in the method invocation the exception handler
+ // would try to exit the monitor of synchronized methods which hasn't
+ // been entered yet, we set the thread local variable
+ // _do_not_unlock_if_synchronized to true. The remove_activation will
+ // check this flag.
+
+ const Address do_not_unlock_if_synchronized(unlock_thread,
+ in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
+ __ movbool(do_not_unlock_if_synchronized, true);
+
+ // make sure method is native & not abstract
+#ifdef ASSERT
+ __ movl(rax, access_flags);
+ {
+ Label L;
+ __ testl(rax, JVM_ACC_NATIVE);
+ __ jcc(Assembler::notZero, L);
+ __ stop("tried to execute non-native method as native");
+ __ bind(L);
+ }
+ { Label L;
+ __ testl(rax, JVM_ACC_ABSTRACT);
+ __ jcc(Assembler::zero, L);
+ __ stop("tried to execute abstract method in interpreter");
+ __ bind(L);
+ }
+#endif
+
+
+ // increment invocation count & check for overflow
+ Label invocation_counter_overflow;
+ if (inc_counter) {
+ generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
+ }
+
+ Label continue_after_compile;
+
+ __ bind(continue_after_compile);
+
+ bang_stack_shadow_pages(true);
+
+ // reset the _do_not_unlock_if_synchronized flag
+ NOT_LP64(__ movl(rax, STATE(_thread));) // get thread
+ __ movbool(do_not_unlock_if_synchronized, false);
+
+
+ // check for synchronized native methods
+ //
+ // Note: This must happen *after* invocation counter check, since
+ // when overflow happens, the method should not be locked.
+ if (synchronized) {
+ // potentially kills rax, rcx, rdx, rdi
+ lock_method();
+ } else {
+ // no synchronization necessary
+#ifdef ASSERT
+ { Label L;
+ __ movl(rax, access_flags);
+ __ testl(rax, JVM_ACC_SYNCHRONIZED);
+ __ jcc(Assembler::zero, L);
+ __ stop("method needs synchronization");
+ __ bind(L);
+ }
+#endif
+ }
+
+ // start execution
+
+ // jvmti support
+ __ notify_method_entry();
+
+ // work registers
+ const Register method = rbx;
+ const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
+ const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1
+
+ // allocate space for parameters
+ __ movptr(method, STATE(_method));
+ __ verify_oop(method);
+ __ load_unsigned_short(t, Address(method, methodOopDesc::size_of_parameters_offset()));
+ __ shll(t, 2);
+#ifdef _LP64
+ __ subptr(rsp, t);
+ __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+ __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
+#else
+ __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
+ __ subptr(rsp, t);
+ __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
+#endif // _LP64
+
+ // get signature handler
+ Label pending_exception_present;
+
+ { Label L;
+ __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
+ __ testptr(t, t);
+ __ jcc(Assembler::notZero, L);
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method, false);
+ __ movptr(method, STATE(_method));
+ __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
+ __ jcc(Assembler::notEqual, pending_exception_present);
+ __ verify_oop(method);
+ __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
+ __ bind(L);
+ }
+#ifdef ASSERT
+ {
+ Label L;
+ __ push(t);
+ __ get_thread(t); // get vm's javathread*
+ __ cmpptr(t, STATE(_thread));
+ __ jcc(Assembler::equal, L);
+ __ int3();
+ __ bind(L);
+ __ pop(t);
+ }
+#endif //
+
+ const Register from_ptr = InterpreterRuntime::SignatureHandlerGenerator::from();
+ // call signature handler
+ assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code");
+
+ // The generated handlers do not touch RBX (the method oop).
+ // However, large signatures cannot be cached and are generated
+ // each time here. The slow-path generator will blow RBX
+ // sometime, so we must reload it after the call.
+ __ movptr(from_ptr, STATE(_locals)); // get the from pointer
+ __ call(t);
+ __ movptr(method, STATE(_method));
+ __ verify_oop(method);
+
+ // result handler is in rax
+ // set result handler
+ __ movptr(STATE(_result_handler), rax);
+
+
+ // get native function entry point
+ { Label L;
+ __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
+ __ testptr(rax, rax);
+ __ jcc(Assembler::notZero, L);
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
+ __ movptr(method, STATE(_method));
+ __ verify_oop(method);
+ __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
+ __ bind(L);
+ }
+
+ // pass mirror handle if static call
+ { Label L;
+ const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+ __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
+ __ testl(t, JVM_ACC_STATIC);
+ __ jcc(Assembler::zero, L);
+ // get mirror
+ __ movptr(t, Address(method, methodOopDesc:: constants_offset()));
+ __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(t, Address(t, mirror_offset));
+ // copy mirror into activation object
+ __ movptr(STATE(_oop_temp), t);
+ // pass handle to mirror
+#ifdef _LP64
+ __ lea(c_rarg1, STATE(_oop_temp));
+#else
+ __ lea(t, STATE(_oop_temp));
+ __ movptr(Address(rsp, wordSize), t);
+#endif // _LP64
+ __ bind(L);
+ }
+#ifdef ASSERT
+ {
+ Label L;
+ __ push(t);
+ __ get_thread(t); // get vm's javathread*
+ __ cmpptr(t, STATE(_thread));
+ __ jcc(Assembler::equal, L);
+ __ int3();
+ __ bind(L);
+ __ pop(t);
+ }
+#endif //
+
+ // pass JNIEnv
+#ifdef _LP64
+ __ lea(c_rarg0, Address(thread, JavaThread::jni_environment_offset()));
+#else
+ __ movptr(thread, STATE(_thread)); // get thread
+ __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
+
+ __ movptr(Address(rsp, 0), t);
+#endif // _LP64
+
+#ifdef ASSERT
+ {
+ Label L;
+ __ push(t);
+ __ get_thread(t); // get vm's javathread*
+ __ cmpptr(t, STATE(_thread));
+ __ jcc(Assembler::equal, L);
+ __ int3();
+ __ bind(L);
+ __ pop(t);
+ }
+#endif //
+
+#ifdef ASSERT
+ { Label L;
+ __ movl(t, Address(thread, JavaThread::thread_state_offset()));
+ __ cmpl(t, _thread_in_Java);
+ __ jcc(Assembler::equal, L);
+ __ stop("Wrong thread state in native stub");
+ __ bind(L);
+ }
+#endif
+
+ // Change state to native (we save the return address in the thread, since it might not
+ // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
+ // points into the right code segment. It does not have to be the correct return pc.
+
+ __ set_last_Java_frame(thread, noreg, rbp, __ pc());
+
+ __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
+
+ __ call(rax);
+
+ // result potentially in rdx:rax or ST0
+ __ movptr(method, STATE(_method));
+ NOT_LP64(__ movptr(thread, STATE(_thread));) // get thread
+
+ // The potential result is in ST(0) & rdx:rax
+ // With C++ interpreter we leave any possible result in ST(0) until we are in result handler and then
+ // we do the appropriate stuff for returning the result. rdx:rax must always be saved because just about
+ // anything we do here will destroy it, st(0) is only saved if we re-enter the vm where it would
+ // be destroyed.
+ // It is safe to do these pushes because state is _thread_in_native and return address will be found
+ // via _last_native_pc and not via _last_jave_sp
+
+ // Must save the value of ST(0)/xmm0 since it could be destroyed before we get to result handler
+ { Label Lpush, Lskip;
+ ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
+ ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
+ __ cmpptr(STATE(_result_handler), float_handler.addr());
+ __ jcc(Assembler::equal, Lpush);
+ __ cmpptr(STATE(_result_handler), double_handler.addr());
+ __ jcc(Assembler::notEqual, Lskip);
+ __ bind(Lpush);
+ __ subptr(rsp, 2*wordSize);
+ if ( UseSSE < 2 ) {
+ __ fstp_d(Address(rsp, 0));
+ } else {
+ __ movdbl(Address(rsp, 0), xmm0);
+ }
+ __ bind(Lskip);
+ }
+
+ // save rax:rdx for potential use by result handler.
+ __ push(rax);
+#ifndef _LP64
+ __ push(rdx);
+#endif // _LP64
+
+ // Either restore the MXCSR register after returning from the JNI Call
+ // or verify that it wasn't changed.
+ if (VM_Version::supports_sse()) {
+ if (RestoreMXCSROnJNICalls) {
+ __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
+ }
+ else if (CheckJNICalls ) {
+ __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
+ }
+ }
+
+#ifndef _LP64
+ // Either restore the x87 floating pointer control word after returning
+ // from the JNI call or verify that it wasn't changed.
+ if (CheckJNICalls) {
+ __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
+ }
+#endif // _LP64
+
+
+ // change thread state
+ __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
+ if(os::is_MP()) {
+ // Write serialization page so VM thread can do a pseudo remote membar.
+ // We use the current thread pointer to calculate a thread specific
+ // offset to write to within the page. This minimizes bus traffic
+ // due to cache line collision.
+ __ serialize_memory(thread, rcx);
+ }
+
+ // check for safepoint operation in progress and/or pending suspend requests
+ { Label Continue;
+
+ __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+ SafepointSynchronize::_not_synchronized);
+
+ // threads running native code and they are expected to self-suspend
+ // when leaving the _thread_in_native state. We need to check for
+ // pending suspend requests here.
+ Label L;
+ __ jcc(Assembler::notEqual, L);
+ __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
+ __ jcc(Assembler::equal, Continue);
+ __ bind(L);
+
+ // Don't use call_VM as it will see a possible pending exception and forward it
+ // and never return here preventing us from clearing _last_native_pc down below.
+ // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
+ // preserved and correspond to the bcp/locals pointers.
+ //
+
+ ((MacroAssembler*)_masm)->call_VM_leaf(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
+ thread);
+ __ increment(rsp, wordSize);
+
+ __ movptr(method, STATE(_method));
+ __ verify_oop(method);
+ __ movptr(thread, STATE(_thread)); // get thread
+
+ __ bind(Continue);
+ }
+
+ // change thread state
+ __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
+
+ __ reset_last_Java_frame(thread, true, true);
+
+ // reset handle block
+ __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
+ __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
+
+ // If result was an oop then unbox and save it in the frame
+ { Label L;
+ Label no_oop, store_result;
+ ExternalAddress oop_handler(AbstractInterpreter::result_handler(T_OBJECT));
+ __ cmpptr(STATE(_result_handler), oop_handler.addr());
+ __ jcc(Assembler::notEqual, no_oop);
+#ifndef _LP64
+ __ pop(rdx);
+#endif // _LP64
+ __ pop(rax);
+ __ testptr(rax, rax);
+ __ jcc(Assembler::zero, store_result);
+ // unbox
+ __ movptr(rax, Address(rax, 0));
+ __ bind(store_result);
+ __ movptr(STATE(_oop_temp), rax);
+ // keep stack depth as expected by pushing oop which will eventually be discarded
+ __ push(rax);
+#ifndef _LP64
+ __ push(rdx);
+#endif // _LP64
+ __ bind(no_oop);
+ }
+
+ {
+ Label no_reguard;
+ __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
+ __ jcc(Assembler::notEqual, no_reguard);
+
+ __ pusha();
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
+ __ popa();
+
+ __ bind(no_reguard);
+ }
+
+
+ // QQQ Seems like for native methods we simply return and the caller will see the pending
+ // exception and do the right thing. Certainly the interpreter will, don't know about
+ // compiled methods.
+ // Seems that the answer to above is no this is wrong. The old code would see the exception
+ // and forward it before doing the unlocking and notifying jvmdi that method has exited.
+ // This seems wrong need to investigate the spec.
+
+ // handle exceptions (exception handling will handle unlocking!)
+ { Label L;
+ __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
+ __ jcc(Assembler::zero, L);
+ __ bind(pending_exception_present);
+
+ // There are potential results on the stack (rax/rdx, ST(0)) we ignore these and simply
+ // return and let caller deal with exception. This skips the unlocking here which
+ // seems wrong but seems to be what asm interpreter did. Can't find this in the spec.
+ // Note: must preverve method in rbx
+ //
+
+ // remove activation
+
+ __ movptr(t, STATE(_sender_sp));
+ __ leave(); // remove frame anchor
+ __ pop(rdi); // get return address
+ __ movptr(state, STATE(_prev_link)); // get previous state for return
+ __ mov(rsp, t); // set sp to sender sp
+ __ push(rdi); // push throwing pc
+ // The skips unlocking!! This seems to be what asm interpreter does but seems
+ // very wrong. Not clear if this violates the spec.
+ __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
+ __ bind(L);
+ }
+
+ // do unlocking if necessary
+ { Label L;
+ __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
+ __ testl(t, JVM_ACC_SYNCHRONIZED);
+ __ jcc(Assembler::zero, L);
+ // the code below should be shared with interpreter macro assembler implementation
+ { Label unlock;
+ const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
+ // BasicObjectLock will be first in list, since this is a synchronized method. However, need
+ // to check that the object has not been unlocked by an explicit monitorexit bytecode.
+ __ movptr(monitor, STATE(_monitor_base));
+ __ subptr(monitor, frame::interpreter_frame_monitor_size() * wordSize); // address of initial monitor
+
+ __ movptr(t, Address(monitor, BasicObjectLock::obj_offset_in_bytes()));
+ __ testptr(t, t);
+ __ jcc(Assembler::notZero, unlock);
+
+ // Entry already unlocked, need to throw exception
+ __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
+ __ should_not_reach_here();
+
+ __ bind(unlock);
+ __ unlock_object(monitor);
+ // unlock can blow rbx so restore it for path that needs it below
+ __ movptr(method, STATE(_method));
+ }
+ __ bind(L);
+ }
+
+ // jvmti support
+ // Note: This must happen _after_ handling/throwing any exceptions since
+ // the exception handler code notifies the runtime of method exits
+ // too. If this happens before, method entry/exit notifications are
+ // not properly paired (was bug - gri 11/22/99).
+ __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
+
+ // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
+#ifndef _LP64
+ __ pop(rdx);
+#endif // _LP64
+ __ pop(rax);
+ __ movptr(t, STATE(_result_handler)); // get result handler
+ __ call(t); // call result handler to convert to tosca form
+
+ // remove activation
+
+ __ movptr(t, STATE(_sender_sp));
+
+ __ leave(); // remove frame anchor
+ __ pop(rdi); // get return address
+ __ movptr(state, STATE(_prev_link)); // get previous state for return (if c++ interpreter was caller)
+ __ mov(rsp, t); // set sp to sender sp
+ __ jmp(rdi);
+
+ // invocation counter overflow
+ if (inc_counter) {
+ // Handle overflow of counter and compile method
+ __ bind(invocation_counter_overflow);
+ generate_counter_overflow(&continue_after_compile);
+ }
+
+ return entry_point;
+}
+
+// Generate entries that will put a result type index into rcx
+void CppInterpreterGenerator::generate_deopt_handling() {
+
+ Label return_from_deopt_common;
+
+ // Generate entries that will put a result type index into rcx
+ // deopt needs to jump to here to enter the interpreter (return a result)
+ deopt_frame_manager_return_atos = __ pc();
+
+ // rax is live here
+ __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_OBJECT)); // Result stub address array index
+ __ jmp(return_from_deopt_common);
+
+
+ // deopt needs to jump to here to enter the interpreter (return a result)
+ deopt_frame_manager_return_btos = __ pc();
+
+ // rax is live here
+ __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_BOOLEAN)); // Result stub address array index
+ __ jmp(return_from_deopt_common);
+
+ // deopt needs to jump to here to enter the interpreter (return a result)
+ deopt_frame_manager_return_itos = __ pc();
+
+ // rax is live here
+ __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_INT)); // Result stub address array index
+ __ jmp(return_from_deopt_common);
+
+ // deopt needs to jump to here to enter the interpreter (return a result)
+
+ deopt_frame_manager_return_ltos = __ pc();
+ // rax,rdx are live here
+ __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_LONG)); // Result stub address array index
+ __ jmp(return_from_deopt_common);
+
+ // deopt needs to jump to here to enter the interpreter (return a result)
+
+ deopt_frame_manager_return_ftos = __ pc();
+ // st(0) is live here
+ __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index
+ __ jmp(return_from_deopt_common);
+
+ // deopt needs to jump to here to enter the interpreter (return a result)
+ deopt_frame_manager_return_dtos = __ pc();
+
+ // st(0) is live here
+ __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE)); // Result stub address array index
+ __ jmp(return_from_deopt_common);
+
+ // deopt needs to jump to here to enter the interpreter (return a result)
+ deopt_frame_manager_return_vtos = __ pc();
+
+ __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_VOID));
+
+ // Deopt return common
+ // an index is present in rcx that lets us move any possible result being
+ // return to the interpreter's stack
+ //
+ // Because we have a full sized interpreter frame on the youngest
+ // activation the stack is pushed too deep to share the tosca to
+ // stack converters directly. We shrink the stack to the desired
+ // amount and then push result and then re-extend the stack.
+ // We could have the code in size_activation layout a short
+ // frame for the top activation but that would look different
+ // than say sparc (which needs a full size activation because
+ // the windows are in the way. Really it could be short? QQQ
+ //
+ __ bind(return_from_deopt_common);
+
+ __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
+
+ // setup rsp so we can push the "result" as needed.
+ __ movptr(rsp, STATE(_stack)); // trim stack (is prepushed)
+ __ addptr(rsp, wordSize); // undo prepush
+
+ ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack);
+ // Address index(noreg, rcx, Address::times_ptr);
+ __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr)));
+ // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack)));
+ __ call(rcx); // call result converter
+
+ __ movl(STATE(_msg), (int)BytecodeInterpreter::deopt_resume);
+ __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present)
+ __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed,
+ // result if any on stack already )
+ __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth
+}
+
+// Generate the code to handle a more_monitors message from the c++ interpreter
+void CppInterpreterGenerator::generate_more_monitors() {
+
+
+ Label entry, loop;
+ const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ // 1. compute new pointers // rsp: old expression stack top
+ __ movptr(rdx, STATE(_stack_base)); // rdx: old expression stack bottom
+ __ subptr(rsp, entry_size); // move expression stack top limit
+ __ subptr(STATE(_stack), entry_size); // update interpreter stack top
+ __ subptr(STATE(_stack_limit), entry_size); // inform interpreter
+ __ subptr(rdx, entry_size); // move expression stack bottom
+ __ movptr(STATE(_stack_base), rdx); // inform interpreter
+ __ movptr(rcx, STATE(_stack)); // set start value for copy loop
+ __ jmp(entry);
+ // 2. move expression stack contents
+ __ bind(loop);
+ __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
+ __ movptr(Address(rcx, 0), rbx); // and store it at new location
+ __ addptr(rcx, wordSize); // advance to next word
+ __ bind(entry);
+ __ cmpptr(rcx, rdx); // check if bottom reached
+ __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
+ // now zero the slot so we can find it.
+ __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
+ __ movl(STATE(_msg), (int)BytecodeInterpreter::got_monitors);
+}
+
+
+// Initial entry to C++ interpreter from the call_stub.
+// This entry point is called the frame manager since it handles the generation
+// of interpreter activation frames via requests directly from the vm (via call_stub)
+// and via requests from the interpreter. The requests from the call_stub happen
+// directly thru the entry point. Requests from the interpreter happen via returning
+// from the interpreter and examining the message the interpreter has returned to
+// the frame manager. The frame manager can take the following requests:
+
+// NO_REQUEST - error, should never happen.
+// MORE_MONITORS - need a new monitor. Shuffle the expression stack on down and
+// allocate a new monitor.
+// CALL_METHOD - setup a new activation to call a new method. Very similar to what
+// happens during entry during the entry via the call stub.
+// RETURN_FROM_METHOD - remove an activation. Return to interpreter or call stub.
+//
+// Arguments:
+//
+// rbx: methodOop
+// rcx: receiver - unused (retrieved from stack as needed)
+// rsi/r13: previous frame manager state (NULL from the call_stub/c1/c2)
+//
+//
+// Stack layout at entry
+//
+// [ return address ] <--- rsp
+// [ parameter n ]
+// ...
+// [ parameter 1 ]
+// [ expression stack ]
+//
+//
+// We are free to blow any registers we like because the call_stub which brought us here
+// initially has preserved the callee save registers already.
+//
+//
+
+static address interpreter_frame_manager = NULL;
+
+address InterpreterGenerator::generate_normal_entry(bool synchronized) {
+
+ // rbx: methodOop
+ // rsi/r13: sender sp
+
+ // Because we redispatch "recursive" interpreter entries thru this same entry point
+ // the "input" register usage is a little strange and not what you expect coming
+ // from the call_stub. From the call stub rsi/rdi (current/previous) interpreter
+ // state are NULL but on "recursive" dispatches they are what you'd expect.
+ // rsi: current interpreter state (C++ interpreter) must preserve (null from call_stub/c1/c2)
+
+
+ // A single frame manager is plenty as we don't specialize for synchronized. We could and
+ // the code is pretty much ready. Would need to change the test below and for good measure
+ // modify generate_interpreter_state to only do the (pre) sync stuff stuff for synchronized
+ // routines. Not clear this is worth it yet.
+
+ if (interpreter_frame_manager) return interpreter_frame_manager;
+
+ address entry_point = __ pc();
+
+ // Fast accessor methods share this entry point.
+ // This works because frame manager is in the same codelet
+ if (UseFastAccessorMethods && !synchronized) __ bind(fast_accessor_slow_entry_path);
+
+ Label dispatch_entry_2;
+ __ movptr(rcx, sender_sp_on_entry);
+ __ movptr(state, (int32_t)NULL_WORD); // no current activation
+
+ __ jmp(dispatch_entry_2);
+
+ const Register locals = rdi;
+
+ Label re_dispatch;
+
+ __ bind(re_dispatch);
+
+ // save sender sp (doesn't include return address
+ __ lea(rcx, Address(rsp, wordSize));
+
+ __ bind(dispatch_entry_2);
+
+ // save sender sp
+ __ push(rcx);
+
+ const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
+ const Address size_of_locals (rbx, methodOopDesc::size_of_locals_offset());
+ const Address access_flags (rbx, methodOopDesc::access_flags_offset());
+
+ // const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
+ // const Address monitor_block_bot (rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
+ // const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
+
+ // get parameter size (always needed)
+ __ load_unsigned_short(rcx, size_of_parameters);
+
+ // rbx: methodOop
+ // rcx: size of parameters
+ __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
+
+ __ subptr(rdx, rcx); // rdx = no. of additional locals
+
+ // see if we've got enough room on the stack for locals plus overhead.
+ generate_stack_overflow_check(); // C++
+
+ // c++ interpreter does not use stack banging or any implicit exceptions
+ // leave for now to verify that check is proper.
+ bang_stack_shadow_pages(false);
+
+
+
+ // compute beginning of parameters (rdi)
+ __ lea(locals, Address(rsp, rcx, Address::times_ptr, wordSize));
+
+ // save sender's sp
+ // __ movl(rcx, rsp);
+
+ // get sender's sp
+ __ pop(rcx);
+
+ // get return address
+ __ pop(rax);
+
+ // rdx - # of additional locals
+ // allocate space for locals
+ // explicitly initialize locals
+ {
+ Label exit, loop;
+ __ testl(rdx, rdx); // (32bit ok)
+ __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
+ __ bind(loop);
+ __ push((int32_t)NULL_WORD); // initialize local variables
+ __ decrement(rdx); // until everything initialized
+ __ jcc(Assembler::greater, loop);
+ __ bind(exit);
+ }
+
+
+ // Assumes rax = return address
+
+ // allocate and initialize new interpreterState and method expression stack
+ // IN(locals) -> locals
+ // IN(state) -> any current interpreter activation
+ // destroys rax, rcx, rdx, rdi
+ // OUT (state) -> new interpreterState
+ // OUT(rsp) -> bottom of methods expression stack
+
+ generate_compute_interpreter_state(state, locals, rcx, false);
+
+ // Call interpreter
+
+ Label call_interpreter;
+ __ bind(call_interpreter);
+
+ // c++ interpreter does not use stack banging or any implicit exceptions
+ // leave for now to verify that check is proper.
+ bang_stack_shadow_pages(false);
+
+
+ // Call interpreter enter here if message is
+ // set and we know stack size is valid
+
+ Label call_interpreter_2;
+
+ __ bind(call_interpreter_2);
+
+ {
+ const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
+
+#ifdef _LP64
+ __ mov(c_rarg0, state);
+#else
+ __ push(state); // push arg to interpreter
+ __ movptr(thread, STATE(_thread));
+#endif // _LP64
+
+ // We can setup the frame anchor with everything we want at this point
+ // as we are thread_in_Java and no safepoints can occur until we go to
+ // vm mode. We do have to clear flags on return from vm but that is it
+ //
+ __ movptr(Address(thread, JavaThread::last_Java_fp_offset()), rbp);
+ __ movptr(Address(thread, JavaThread::last_Java_sp_offset()), rsp);
+
+ // Call the interpreter
+
+ RuntimeAddress normal(CAST_FROM_FN_PTR(address, BytecodeInterpreter::run));
+ RuntimeAddress checking(CAST_FROM_FN_PTR(address, BytecodeInterpreter::runWithChecks));
+
+ __ call(JvmtiExport::can_post_interpreter_events() ? checking : normal);
+ NOT_LP64(__ pop(rax);) // discard parameter to run
+ //
+ // state is preserved since it is callee saved
+ //
+
+ // reset_last_Java_frame
+
+ NOT_LP64(__ movl(thread, STATE(_thread));)
+ __ reset_last_Java_frame(thread, true, true);
+ }
+
+ // examine msg from interpreter to determine next action
+
+ __ movl(rdx, STATE(_msg)); // Get new message
+
+ Label call_method;
+ Label return_from_interpreted_method;
+ Label throw_exception;
+ Label bad_msg;
+ Label do_OSR;
+
+ __ cmpl(rdx, (int32_t)BytecodeInterpreter::call_method);
+ __ jcc(Assembler::equal, call_method);
+ __ cmpl(rdx, (int32_t)BytecodeInterpreter::return_from_method);
+ __ jcc(Assembler::equal, return_from_interpreted_method);
+ __ cmpl(rdx, (int32_t)BytecodeInterpreter::do_osr);
+ __ jcc(Assembler::equal, do_OSR);
+ __ cmpl(rdx, (int32_t)BytecodeInterpreter::throwing_exception);
+ __ jcc(Assembler::equal, throw_exception);
+ __ cmpl(rdx, (int32_t)BytecodeInterpreter::more_monitors);
+ __ jcc(Assembler::notEqual, bad_msg);
+
+ // Allocate more monitor space, shuffle expression stack....
+
+ generate_more_monitors();
+
+ __ jmp(call_interpreter);
+
+ // uncommon trap needs to jump to here to enter the interpreter (re-execute current bytecode)
+ unctrap_frame_manager_entry = __ pc();
+ //
+ // Load the registers we need.
+ __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
+ __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth
+ __ jmp(call_interpreter_2);
+
+
+
+ //=============================================================================
+ // Returning from a compiled method into a deopted method. The bytecode at the
+ // bcp has completed. The result of the bytecode is in the native abi (the tosca
+ // for the template based interpreter). Any stack space that was used by the
+ // bytecode that has completed has been removed (e.g. parameters for an invoke)
+ // so all that we have to do is place any pending result on the expression stack
+ // and resume execution on the next bytecode.
+
+
+ generate_deopt_handling();
+ __ jmp(call_interpreter);
+
+
+ // Current frame has caught an exception we need to dispatch to the
+ // handler. We can get here because a native interpreter frame caught
+ // an exception in which case there is no handler and we must rethrow
+ // If it is a vanilla interpreted frame the we simply drop into the
+ // interpreter and let it do the lookup.
+
+ Interpreter::_rethrow_exception_entry = __ pc();
+ // rax: exception
+ // rdx: return address/pc that threw exception
+
+ Label return_with_exception;
+ Label unwind_and_forward;
+
+ // restore state pointer.
+ __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
+
+ __ movptr(rbx, STATE(_method)); // get method
+#ifdef _LP64
+ __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
+#else
+ __ movl(rcx, STATE(_thread)); // get thread
+
+ // Store exception with interpreter will expect it
+ __ movptr(Address(rcx, Thread::pending_exception_offset()), rax);
+#endif // _LP64
+
+ // is current frame vanilla or native?
+
+ __ movl(rdx, access_flags);
+ __ testl(rdx, JVM_ACC_NATIVE);
+ __ jcc(Assembler::zero, return_with_exception); // vanilla interpreted frame, handle directly
+
+ // We drop thru to unwind a native interpreted frame with a pending exception
+ // We jump here for the initial interpreter frame with exception pending
+ // We unwind the current acivation and forward it to our caller.
+
+ __ bind(unwind_and_forward);
+
+ // unwind rbp, return stack to unextended value and re-push return address
+
+ __ movptr(rcx, STATE(_sender_sp));
+ __ leave();
+ __ pop(rdx);
+ __ mov(rsp, rcx);
+ __ push(rdx);
+ __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
+
+ // Return point from a call which returns a result in the native abi
+ // (c1/c2/jni-native). This result must be processed onto the java
+ // expression stack.
+ //
+ // A pending exception may be present in which case there is no result present
+
+ Label resume_interpreter;
+ Label do_float;
+ Label do_double;
+ Label done_conv;
+
+ // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
+ if (UseSSE < 2) {
+ __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
+ __ movptr(rbx, STATE(_result._to_call._callee)); // get method just executed
+ __ movl(rcx, Address(rbx, methodOopDesc::result_index_offset()));
+ __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index
+ __ jcc(Assembler::equal, do_float);
+ __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE)); // Result stub address array index
+ __ jcc(Assembler::equal, do_double);
+#if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
+ __ empty_FPU_stack();
+#endif // COMPILER2
+ __ jmp(done_conv);
+
+ __ bind(do_float);
+#ifdef COMPILER2
+ for (int i = 1; i < 8; i++) {
+ __ ffree(i);
+ }
+#endif // COMPILER2
+ __ jmp(done_conv);
+ __ bind(do_double);
+#ifdef COMPILER2
+ for (int i = 1; i < 8; i++) {
+ __ ffree(i);
+ }
+#endif // COMPILER2
+ __ jmp(done_conv);
+ } else {
+ __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
+ __ jmp(done_conv);
+ }
+
+ // Return point to interpreter from compiled/native method
+ InternalAddress return_from_native_method(__ pc());
+
+ __ bind(done_conv);
+
+
+ // Result if any is in tosca. The java expression stack is in the state that the
+ // calling convention left it (i.e. params may or may not be present)
+ // Copy the result from tosca and place it on java expression stack.
+
+ // Restore rsi/r13 as compiled code may not preserve it
+
+ __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
+
+ // restore stack to what we had when we left (in case i2c extended it)
+
+ __ movptr(rsp, STATE(_stack));
+ __ lea(rsp, Address(rsp, wordSize));
+
+ // If there is a pending exception then we don't really have a result to process
+
+#ifdef _LP64
+ __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
+#else
+ __ movptr(rcx, STATE(_thread)); // get thread
+ __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
+#endif // _LP64
+ __ jcc(Assembler::notZero, return_with_exception);
+
+ // get method just executed
+ __ movptr(rbx, STATE(_result._to_call._callee));
+
+ // callee left args on top of expression stack, remove them
+ __ load_unsigned_short(rcx, Address(rbx, methodOopDesc::size_of_parameters_offset()));
+ __ lea(rsp, Address(rsp, rcx, Address::times_ptr));
+
+ __ movl(rcx, Address(rbx, methodOopDesc::result_index_offset()));
+ ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack);
+ // Address index(noreg, rax, Address::times_ptr);
+ __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr)));
+ // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack)));
+ __ call(rcx); // call result converter
+ __ jmp(resume_interpreter);
+
+ // An exception is being caught on return to a vanilla interpreter frame.
+ // Empty the stack and resume interpreter
+
+ __ bind(return_with_exception);
+
+ // Exception present, empty stack
+ __ movptr(rsp, STATE(_stack_base));
+ __ jmp(resume_interpreter);
+
+ // Return from interpreted method we return result appropriate to the caller (i.e. "recursive"
+ // interpreter call, or native) and unwind this interpreter activation.
+ // All monitors should be unlocked.
+
+ __ bind(return_from_interpreted_method);
+
+ Label return_to_initial_caller;
+
+ __ movptr(rbx, STATE(_method)); // get method just executed
+ __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call?
+ __ movl(rax, Address(rbx, methodOopDesc::result_index_offset())); // get result type index
+ __ jcc(Assembler::equal, return_to_initial_caller); // back to native code (call_stub/c1/c2)
+
+ // Copy result to callers java stack
+ ExternalAddress stack_to_stack((address)CppInterpreter::_stack_to_stack);
+ // Address index(noreg, rax, Address::times_ptr);
+
+ __ movptr(rax, ArrayAddress(stack_to_stack, Address(noreg, rax, Address::times_ptr)));
+ // __ movl(rax, Address(noreg, rax, Address::times_ptr, int(AbstractInterpreter::_stack_to_stack)));
+ __ call(rax); // call result converter
+
+ Label unwind_recursive_activation;
+ __ bind(unwind_recursive_activation);
+
+ // returning to interpreter method from "recursive" interpreter call
+ // result converter left rax pointing to top of the java stack for method we are returning
+ // to. Now all we must do is unwind the state from the completed call
+
+ __ movptr(state, STATE(_prev_link)); // unwind state
+ __ leave(); // pop the frame
+ __ mov(rsp, rax); // unwind stack to remove args
+
+ // Resume the interpreter. The current frame contains the current interpreter
+ // state object.
+ //
+
+ __ bind(resume_interpreter);
+
+ // state == interpreterState object for method we are resuming
+
+ __ movl(STATE(_msg), (int)BytecodeInterpreter::method_resume);
+ __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present)
+ __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed,
+ // result if any on stack already )
+ __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth
+ __ jmp(call_interpreter_2); // No need to bang
+
+ // interpreter returning to native code (call_stub/c1/c2)
+ // convert result and unwind initial activation
+ // rax - result index
+
+ __ bind(return_to_initial_caller);
+ ExternalAddress stack_to_native((address)CppInterpreter::_stack_to_native_abi);
+ // Address index(noreg, rax, Address::times_ptr);
+
+ __ movptr(rax, ArrayAddress(stack_to_native, Address(noreg, rax, Address::times_ptr)));
+ __ call(rax); // call result converter
+
+ Label unwind_initial_activation;
+ __ bind(unwind_initial_activation);
+
+ // RETURN TO CALL_STUB/C1/C2 code (result if any in rax/rdx ST(0))
+
+ /* Current stack picture
+
+ [ incoming parameters ]
+ [ extra locals ]
+ [ return address to CALL_STUB/C1/C2]
+ fp -> [ CALL_STUB/C1/C2 fp ]
+ BytecodeInterpreter object
+ expression stack
+ sp ->
+
+ */
+
+ // return restoring the stack to the original sender_sp value
+
+ __ movptr(rcx, STATE(_sender_sp));
+ __ leave();
+ __ pop(rdi); // get return address
+ // set stack to sender's sp
+ __ mov(rsp, rcx);
+ __ jmp(rdi); // return to call_stub
+
+ // OSR request, adjust return address to make current frame into adapter frame
+ // and enter OSR nmethod
+
+ __ bind(do_OSR);
+
+ Label remove_initial_frame;
+
+ // We are going to pop this frame. Is there another interpreter frame underneath
+ // it or is it callstub/compiled?
+
+ // Move buffer to the expected parameter location
+ __ movptr(rcx, STATE(_result._osr._osr_buf));
+
+ __ movptr(rax, STATE(_result._osr._osr_entry));
+
+ __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call?
+ __ jcc(Assembler::equal, remove_initial_frame); // back to native code (call_stub/c1/c2)
+
+ __ movptr(sender_sp_on_entry, STATE(_sender_sp)); // get sender's sp in expected register
+ __ leave(); // pop the frame
+ __ mov(rsp, sender_sp_on_entry); // trim any stack expansion
+
+
+ // We know we are calling compiled so push specialized return
+ // method uses specialized entry, push a return so we look like call stub setup
+ // this path will handle fact that result is returned in registers and not
+ // on the java stack.
+
+ __ pushptr(return_from_native_method.addr());
+
+ __ jmp(rax);
+
+ __ bind(remove_initial_frame);
+
+ __ movptr(rdx, STATE(_sender_sp));
+ __ leave();
+ // get real return
+ __ pop(rsi);
+ // set stack to sender's sp
+ __ mov(rsp, rdx);
+ // repush real return
+ __ push(rsi);
+ // Enter OSR nmethod
+ __ jmp(rax);
+
+
+
+
+ // Call a new method. All we do is (temporarily) trim the expression stack
+ // push a return address to bring us back to here and leap to the new entry.
+
+ __ bind(call_method);
+
+ // stack points to next free location and not top element on expression stack
+ // method expects sp to be pointing to topmost element
+
+ __ movptr(rsp, STATE(_stack)); // pop args to c++ interpreter, set sp to java stack top
+ __ lea(rsp, Address(rsp, wordSize));
+
+ __ movptr(rbx, STATE(_result._to_call._callee)); // get method to execute
+
+ // don't need a return address if reinvoking interpreter
+
+ // Make it look like call_stub calling conventions
+
+ // Get (potential) receiver
+ __ load_unsigned_short(rcx, size_of_parameters); // get size of parameters in words
+
+ ExternalAddress recursive(CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
+ __ pushptr(recursive.addr()); // make it look good in the debugger
+
+ InternalAddress entry(entry_point);
+ __ cmpptr(STATE(_result._to_call._callee_entry_point), entry.addr()); // returning to interpreter?
+ __ jcc(Assembler::equal, re_dispatch); // yes
+
+ __ pop(rax); // pop dummy address
+
+
+ // get specialized entry
+ __ movptr(rax, STATE(_result._to_call._callee_entry_point));
+ // set sender SP
+ __ mov(sender_sp_on_entry, rsp);
+
+ // method uses specialized entry, push a return so we look like call stub setup
+ // this path will handle fact that result is returned in registers and not
+ // on the java stack.
+
+ __ pushptr(return_from_native_method.addr());
+
+ __ jmp(rax);
+
+ __ bind(bad_msg);
+ __ stop("Bad message from interpreter");
+
+ // Interpreted method "returned" with an exception pass it on...
+ // Pass result, unwind activation and continue/return to interpreter/call_stub
+ // We handle result (if any) differently based on return to interpreter or call_stub
+
+ Label unwind_initial_with_pending_exception;
+
+ __ bind(throw_exception);
+ __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from recursive interpreter call?
+ __ jcc(Assembler::equal, unwind_initial_with_pending_exception); // no, back to native code (call_stub/c1/c2)
+ __ movptr(rax, STATE(_locals)); // pop parameters get new stack value
+ __ addptr(rax, wordSize); // account for prepush before we return
+ __ jmp(unwind_recursive_activation);
+
+ __ bind(unwind_initial_with_pending_exception);
+
+ // We will unwind the current (initial) interpreter frame and forward
+ // the exception to the caller. We must put the exception in the
+ // expected register and clear pending exception and then forward.
+
+ __ jmp(unwind_and_forward);
+
+ interpreter_frame_manager = entry_point;
+ return entry_point;
+}
+
+address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
+ // determine code generation flags
+ bool synchronized = false;
+ address entry_point = NULL;
+
+ switch (kind) {
+ case Interpreter::zerolocals : break;
+ case Interpreter::zerolocals_synchronized: synchronized = true; break;
+ case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
+ case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
+ case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
+ case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
+ case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
+ case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break;
+
+ case Interpreter::java_lang_math_sin : // fall thru
+ case Interpreter::java_lang_math_cos : // fall thru
+ case Interpreter::java_lang_math_tan : // fall thru
+ case Interpreter::java_lang_math_abs : // fall thru
+ case Interpreter::java_lang_math_log : // fall thru
+ case Interpreter::java_lang_math_log10 : // fall thru
+ case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
+ case Interpreter::java_lang_ref_reference_get
+ : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
+ default : ShouldNotReachHere(); break;
+ }
+
+ if (entry_point) return entry_point;
+
+ return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
+
+}
+
+InterpreterGenerator::InterpreterGenerator(StubQueue* code)
+ : CppInterpreterGenerator(code) {
+ generate_all(); // down here so it can be "virtual"
+}
+
+// Deoptimization helpers for C++ interpreter
+
+// How much stack a method activation needs in words.
+int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
+
+ const int stub_code = 4; // see generate_call_stub
+ // Save space for one monitor to get into the interpreted method in case
+ // the method is synchronized
+ int monitor_size = method->is_synchronized() ?
+ 1*frame::interpreter_frame_monitor_size() : 0;
+
+ // total static overhead size. Account for interpreter state object, return
+ // address, saved rbp and 2 words for a "static long no_params() method" issue.
+
+ const int overhead_size = sizeof(BytecodeInterpreter)/wordSize +
+ ( frame::sender_sp_offset - frame::link_offset) + 2;
+
+ const int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries();
+ const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
+ Interpreter::stackElementWords();
+ return overhead_size + method_stack + stub_code;
+}
+
+// returns the activation size.
+static int size_activation_helper(int extra_locals_size, int monitor_size) {
+ return (extra_locals_size + // the addition space for locals
+ 2*BytesPerWord + // return address and saved rbp
+ 2*BytesPerWord + // "static long no_params() method" issue
+ sizeof(BytecodeInterpreter) + // interpreterState
+ monitor_size); // monitors
+}
+
+void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
+ frame* caller,
+ frame* current,
+ methodOop method,
+ intptr_t* locals,
+ intptr_t* stack,
+ intptr_t* stack_base,
+ intptr_t* monitor_base,
+ intptr_t* frame_bottom,
+ bool is_top_frame
+ )
+{
+ // What about any vtable?
+ //
+ to_fill->_thread = JavaThread::current();
+ // This gets filled in later but make it something recognizable for now
+ to_fill->_bcp = method->code_base();
+ to_fill->_locals = locals;
+ to_fill->_constants = method->constants()->cache();
+ to_fill->_method = method;
+ to_fill->_mdx = NULL;
+ to_fill->_stack = stack;
+ if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution() ) {
+ to_fill->_msg = deopt_resume2;
+ } else {
+ to_fill->_msg = method_resume;
+ }
+ to_fill->_result._to_call._bcp_advance = 0;
+ to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone
+ to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone
+ to_fill->_prev_link = NULL;
+
+ to_fill->_sender_sp = caller->unextended_sp();
+
+ if (caller->is_interpreted_frame()) {
+ interpreterState prev = caller->get_interpreterState();
+ to_fill->_prev_link = prev;
+ // *current->register_addr(GR_Iprev_state) = (intptr_t) prev;
+ // Make the prev callee look proper
+ prev->_result._to_call._callee = method;
+ if (*prev->_bcp == Bytecodes::_invokeinterface) {
+ prev->_result._to_call._bcp_advance = 5;
+ } else {
+ prev->_result._to_call._bcp_advance = 3;
+ }
+ }
+ to_fill->_oop_temp = NULL;
+ to_fill->_stack_base = stack_base;
+ // Need +1 here because stack_base points to the word just above the first expr stack entry
+ // and stack_limit is supposed to point to the word just below the last expr stack entry.
+ // See generate_compute_interpreter_state.
+ int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries();
+ to_fill->_stack_limit = stack_base - (method->max_stack() + extra_stack + 1);
+ to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
+
+ to_fill->_self_link = to_fill;
+ assert(stack >= to_fill->_stack_limit && stack < to_fill->_stack_base,
+ "Stack top out of range");
+}
+
+int AbstractInterpreter::layout_activation(methodOop method,
+ int tempcount, //
+ int popframe_extra_args,
+ int moncount,
+ int caller_actual_parameters,
+ int callee_param_count,
+ int callee_locals,
+ frame* caller,
+ frame* interpreter_frame,
+ bool is_top_frame) {
+
+ assert(popframe_extra_args == 0, "FIX ME");
+ // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
+ // does as far as allocating an interpreter frame.
+ // If interpreter_frame!=NULL, set up the method, locals, and monitors.
+ // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
+ // as determined by a previous call to this method.
+ // It is also guaranteed to be walkable even though it is in a skeletal state
+ // NOTE: return size is in words not bytes
+ // NOTE: tempcount is the current size of the java expression stack. For top most
+ // frames we will allocate a full sized expression stack and not the curback
+ // version that non-top frames have.
+
+ // Calculate the amount our frame will be adjust by the callee. For top frame
+ // this is zero.
+
+ // NOTE: ia64 seems to do this wrong (or at least backwards) in that it
+ // calculates the extra locals based on itself. Not what the callee does
+ // to it. So it ignores last_frame_adjust value. Seems suspicious as far
+ // as getting sender_sp correct.
+
+ int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord;
+ int monitor_size = sizeof(BasicObjectLock) * moncount;
+
+ // First calculate the frame size without any java expression stack
+ int short_frame_size = size_activation_helper(extra_locals_size,
+ monitor_size);
+
+ // Now with full size expression stack
+ int extra_stack = 0; //6815692//methodOopDesc::extra_stack_entries();
+ int full_frame_size = short_frame_size + (method->max_stack() + extra_stack) * BytesPerWord;
+
+ // and now with only live portion of the expression stack
+ short_frame_size = short_frame_size + tempcount * BytesPerWord;
+
+ // the size the activation is right now. Only top frame is full size
+ int frame_size = (is_top_frame ? full_frame_size : short_frame_size);
+
+ if (interpreter_frame != NULL) {
+#ifdef ASSERT
+ assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
+#endif
+
+ // MUCHO HACK
+
+ intptr_t* frame_bottom = (intptr_t*) ((intptr_t)interpreter_frame->sp() - (full_frame_size - frame_size));
+
+ /* Now fillin the interpreterState object */
+
+ // The state object is the first thing on the frame and easily located
+
+ interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
+
+
+ // Find the locals pointer. This is rather simple on x86 because there is no
+ // confusing rounding at the callee to account for. We can trivially locate
+ // our locals based on the current fp().
+ // Note: the + 2 is for handling the "static long no_params() method" issue.
+ // (too bad I don't really remember that issue well...)
+
+ intptr_t* locals;
+ // If the caller is interpreted we need to make sure that locals points to the first
+ // argument that the caller passed and not in an area where the stack might have been extended.
+ // because the stack to stack to converter needs a proper locals value in order to remove the
+ // arguments from the caller and place the result in the proper location. Hmm maybe it'd be
+ // simpler if we simply stored the result in the BytecodeInterpreter object and let the c++ code
+ // adjust the stack?? HMMM QQQ
+ //
+ if (caller->is_interpreted_frame()) {
+ // locals must agree with the caller because it will be used to set the
+ // caller's tos when we return.
+ interpreterState prev = caller->get_interpreterState();
+ // stack() is prepushed.
+ locals = prev->stack() + method->size_of_parameters();
+ // locals = caller->unextended_sp() + (method->size_of_parameters() - 1);
+ if (locals != interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2) {
+ // os::breakpoint();
+ }
+ } else {
+ // this is where a c2i would have placed locals (except for the +2)
+ locals = interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2;
+ }
+
+ intptr_t* monitor_base = (intptr_t*) cur_state;
+ intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
+ /* +1 because stack is always prepushed */
+ intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (tempcount + 1) * BytesPerWord);
+
+
+ BytecodeInterpreter::layout_interpreterState(cur_state,
+ caller,
+ interpreter_frame,
+ method,
+ locals,
+ stack,
+ stack_base,
+ monitor_base,
+ frame_bottom,
+ is_top_frame);
+
+ // BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
+ }
+ return frame_size/BytesPerWord;
+}
+
+#endif // CC_INTERP (all)
diff --git a/src/cpu/aarch64/vm/cppInterpreter_aarch64.hpp b/src/cpu/aarch64/vm/cppInterpreter_aarch64.hpp
new file mode 100644
index 000000000..797fcebe3
--- /dev/null
+++ b/src/cpu/aarch64/vm/cppInterpreter_aarch64.hpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_CPPINTERPRETER_X86_HPP
+#define CPU_X86_VM_CPPINTERPRETER_X86_HPP
+
+
+ protected:
+
+ // Size of interpreter code. Increase if too small. Interpreter will
+ // fail with a guarantee ("not enough space for interpreter generation");
+ // if too small.
+ // Run with +PrintInterpreter to get the VM to print out the size.
+ // Max size with JVMTI
+ const static int InterpreterCodeSize = 168 * 1024;
+
+#endif // CPU_X86_VM_CPPINTERPRETER_X86_HPP
diff --git a/src/cpu/aarch64/vm/debug_aarch64.cpp b/src/cpu/aarch64/vm/debug_aarch64.cpp
new file mode 100644
index 000000000..b128439ef
--- /dev/null
+++ b/src/cpu/aarch64/vm/debug_aarch64.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/codeCache.hpp"
+#include "code/nmethod.hpp"
+#include "runtime/frame.hpp"
+#include "runtime/init.hpp"
+#include "runtime/os.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/top.hpp"
+
+void pd_ps(frame f) {}
diff --git a/src/cpu/aarch64/vm/depChecker_aarch64.cpp b/src/cpu/aarch64/vm/depChecker_aarch64.cpp
new file mode 100644
index 000000000..be90089b0
--- /dev/null
+++ b/src/cpu/aarch64/vm/depChecker_aarch64.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "compiler/disassembler.hpp"
+#include "depChecker_aarch64.hpp"
+
+// Nothing to do on i486
diff --git a/src/cpu/aarch64/vm/depChecker_aarch64.hpp b/src/cpu/aarch64/vm/depChecker_aarch64.hpp
new file mode 100644
index 000000000..5654bbd12
--- /dev/null
+++ b/src/cpu/aarch64/vm/depChecker_aarch64.hpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_DEPCHECKER_X86_HPP
+#define CPU_X86_VM_DEPCHECKER_X86_HPP
+
+// Nothing to do on i486
+
+#endif // CPU_X86_VM_DEPCHECKER_X86_HPP
diff --git a/src/cpu/aarch64/vm/disassembler_aarch64.hpp b/src/cpu/aarch64/vm/disassembler_aarch64.hpp
new file mode 100644
index 000000000..a3b74bac4
--- /dev/null
+++ b/src/cpu/aarch64/vm/disassembler_aarch64.hpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_DISASSEMBLER_X86_HPP
+#define CPU_X86_VM_DISASSEMBLER_X86_HPP
+
+ static int pd_instruction_alignment() {
+ return 1;
+ }
+
+ static const char* pd_cpu_opts() {
+ return "";
+ }
+
+#endif // CPU_X86_VM_DISASSEMBLER_X86_HPP
diff --git a/src/cpu/aarch64/vm/dump_aarch64.cpp b/src/cpu/aarch64/vm/dump_aarch64.cpp
new file mode 100644
index 000000000..ef6d09679
--- /dev/null
+++ b/src/cpu/aarch64/vm/dump_aarch64.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "assembler_aarch64.inline.hpp"
+#include "memory/compactingPermGenGen.hpp"
+#include "memory/generation.inline.hpp"
+#include "memory/space.inline.hpp"
+
+
+
+// Generate the self-patching vtable method:
+//
+// This method will be called (as any other Klass virtual method) with
+// the Klass itself as the first argument. Example:
+//
+// oop obj;
+// int size = obj->klass()->klass_part()->oop_size(this);
+//
+// for which the virtual method call is Klass::oop_size();
+//
+// The dummy method is called with the Klass object as the first
+// operand, and an object as the second argument.
+//
+
+//=====================================================================
+
+// All of the dummy methods in the vtable are essentially identical,
+// differing only by an ordinal constant, and they bear no releationship
+// to the original method which the caller intended. Also, there needs
+// to be 'vtbl_list_size' instances of the vtable in order to
+// differentiate between the 'vtable_list_size' original Klass objects.
+
+#define __ masm->
+
+void CompactingPermGenGen::generate_vtable_methods(void** vtbl_list,
+ void** vtable,
+ char** md_top,
+ char* md_end,
+ char** mc_top,
+ char* mc_end) { Unimplemented(); }
diff --git a/src/cpu/aarch64/vm/frame_aarch64.cpp b/src/cpu/aarch64/vm/frame_aarch64.cpp
new file mode 100644
index 000000000..3401a5d90
--- /dev/null
+++ b/src/cpu/aarch64/vm/frame_aarch64.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interpreter/interpreter.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/markOop.hpp"
+#include "oops/methodOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/methodHandles.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/javaCalls.hpp"
+#include "runtime/monitorChunk.hpp"
+#include "runtime/signature.hpp"
+#include "runtime/stubCodeGenerator.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "vmreg_aarch64.inline.hpp"
+#ifdef COMPILER1
+#include "c1/c1_Runtime1.hpp"
+#include "runtime/vframeArray.hpp"
+#endif
+
+#ifdef ASSERT
+void RegisterMap::check_location_valid() {
+}
+#endif
+
+
+// Profiling/safepoint support
+
+bool frame::safe_for_sender(JavaThread *thread) { Unimplemented(); return false; }
+
+
+void frame::patch_pc(Thread* thread, address pc) { Unimplemented(); }
+
+bool frame::is_interpreted_frame() const { Unimplemented(); return false; }
+
+int frame::frame_size(RegisterMap* map) const { Unimplemented(); return 0; }
+
+intptr_t* frame::entry_frame_argument_at(int offset) const { Unimplemented(); return 0; }
+
+// sender_sp
+#ifdef CC_INTERP
+intptr_t* frame::interpreter_frame_sender_sp() const { Unimplemented(); return 0; }
+
+// monitor elements
+
+BasicObjectLock* frame::interpreter_frame_monitor_begin() const { Unimplemented(); return 0; }
+
+BasicObjectLock* frame::interpreter_frame_monitor_end() const { Unimplemented(); return 0; }
+
+#else // CC_INTERP
+
+intptr_t* frame::interpreter_frame_sender_sp() const { Unimplemented(); return 0; }
+
+void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) { Unimplemented(); }
+
+
+// monitor elements
+
+BasicObjectLock* frame::interpreter_frame_monitor_begin() const { Unimplemented(); return 0; }
+
+BasicObjectLock* frame::interpreter_frame_monitor_end() const { Unimplemented(); return 0; }
+
+void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) { Unimplemented(); }
+
+// Used by template based interpreter deoptimization
+void frame::interpreter_frame_set_last_sp(intptr_t* sp) { Unimplemented(); }
+#endif // CC_INTERP
+
+frame frame::sender_for_entry_frame(RegisterMap* map) const {Unimplemented(); frame fr; return fr; }
+
+//------------------------------------------------------------------------------
+// frame::verify_deopt_original_pc
+//
+// Verifies the calculated original PC of a deoptimization PC for the
+// given unextended SP. The unextended SP might also be the saved SP
+// for MethodHandle call sites.
+#if ASSERT
+void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) { Unimplemented(); }
+#endif
+
+//------------------------------------------------------------------------------
+// frame::adjust_unextended_sp
+void frame::adjust_unextended_sp() { Unimplemented(); }
+
+//------------------------------------------------------------------------------
+// frame::update_map_with_saved_link
+void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) { Unimplemented(); }
+
+
+//------------------------------------------------------------------------------
+// frame::sender_for_interpreter_frame
+frame frame::sender_for_interpreter_frame(RegisterMap* map) const { Unimplemented(); frame fr; return fr; }
+
+
+//------------------------------------------------------------------------------
+// frame::sender_for_compiled_frame
+frame frame::sender_for_compiled_frame(RegisterMap* map) const { Unimplemented(); frame fr; return fr; }
+
+
+//------------------------------------------------------------------------------
+// frame::sender
+frame frame::sender(RegisterMap* map) const { Unimplemented(); frame fr; return fr; }
+
+
+bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) { Unimplemented(); return false; }
+
+void frame::pd_gc_epilog() { Unimplemented(); }
+
+bool frame::is_interpreted_frame_valid(JavaThread* thread) const { Unimplemented(); return false; }
+
+BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) { Unimplemented(); return T_VOID; }
+
+
+intptr_t* frame::interpreter_frame_tos_at(jint offset) const { Unimplemented(); return 0; }
+
+#ifndef PRODUCT
+
+#define DESCRIBE_FP_OFFSET(name) \
+ values.describe(frame_no, fp() + frame::name##_offset, #name)
+
+void frame::describe_pd(FrameValues& values, int frame_no) { Unimplemented(); }
+#endif
+
+intptr_t *frame::initial_deoptimization_info() { Unimplemented(); return 0; }
+
+intptr_t* frame::real_fp() const { Unimplemented(); return 0; }
diff --git a/src/cpu/aarch64/vm/frame_aarch64.hpp b/src/cpu/aarch64/vm/frame_aarch64.hpp
new file mode 100644
index 000000000..01f6e6cc8
--- /dev/null
+++ b/src/cpu/aarch64/vm/frame_aarch64.hpp
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_FRAME_X86_HPP
+#define CPU_X86_VM_FRAME_X86_HPP
+
+#include "runtime/synchronizer.hpp"
+#include "utilities/top.hpp"
+
+// A frame represents a physical stack frame (an activation). Frames can be
+// C or Java frames, and the Java frames can be interpreted or compiled.
+// In contrast, vframes represent source-level activations, so that one physical frame
+// can correspond to multiple source level frames because of inlining.
+// A frame is comprised of {pc, fp, sp}
+// ------------------------------ Asm interpreter ----------------------------------------
+// Layout of asm interpreter frame:
+// [expression stack ] * <- sp
+// [monitors ] \
+// ... | monitor block size
+// [monitors ] /
+// [monitor block size ]
+// [byte code index/pointr] = bcx() bcx_offset
+// [pointer to locals ] = locals() locals_offset
+// [constant pool cache ] = cache() cache_offset
+// [methodData ] = mdp() mdx_offset
+// [methodOop ] = method() method_offset
+// [last sp ] = last_sp() last_sp_offset
+// [old stack pointer ] (sender_sp) sender_sp_offset
+// [old frame pointer ] <- fp = link()
+// [return pc ]
+// [oop temp ] (only for native calls)
+// [locals and parameters ]
+// <- sender sp
+// ------------------------------ Asm interpreter ----------------------------------------
+
+// ------------------------------ C++ interpreter ----------------------------------------
+//
+// Layout of C++ interpreter frame: (While executing in BytecodeInterpreter::run)
+//
+// <- SP (current esp/rsp)
+// [local variables ] BytecodeInterpreter::run local variables
+// ... BytecodeInterpreter::run local variables
+// [local variables ] BytecodeInterpreter::run local variables
+// [old frame pointer ] fp [ BytecodeInterpreter::run's ebp/rbp ]
+// [return pc ] (return to frame manager)
+// [interpreter_state* ] (arg to BytecodeInterpreter::run) --------------
+// [expression stack ] <- last_Java_sp |
+// [... ] * <- interpreter_state.stack |
+// [expression stack ] * <- interpreter_state.stack_base |
+// [monitors ] \ |
+// ... | monitor block size |
+// [monitors ] / <- interpreter_state.monitor_base |
+// [struct interpretState ] <-----------------------------------------|
+// [return pc ] (return to callee of frame manager [1]
+// [locals and parameters ]
+// <- sender sp
+
+// [1] When the c++ interpreter calls a new method it returns to the frame
+// manager which allocates a new frame on the stack. In that case there
+// is no real callee of this newly allocated frame. The frame manager is
+// aware of the additional frame(s) and will pop them as nested calls
+// complete. Howevers tTo make it look good in the debugger the frame
+// manager actually installs a dummy pc pointing to RecursiveInterpreterActivation
+// with a fake interpreter_state* parameter to make it easy to debug
+// nested calls.
+
+// Note that contrary to the layout for the assembly interpreter the
+// expression stack allocated for the C++ interpreter is full sized.
+// However this is not as bad as it seems as the interpreter frame_manager
+// will truncate the unused space on succesive method calls.
+//
+// ------------------------------ C++ interpreter ----------------------------------------
+
+ public:
+ enum {
+ pc_return_offset = 0,
+ // All frames
+ link_offset = 0,
+ return_addr_offset = 1,
+ // non-interpreter frames
+ sender_sp_offset = 2,
+
+#ifndef CC_INTERP
+
+ // Interpreter frames
+ interpreter_frame_result_handler_offset = 3, // for native calls only
+ interpreter_frame_oop_temp_offset = 2, // for native calls only
+
+ interpreter_frame_sender_sp_offset = -1,
+ // outgoing sp before a call to an invoked method
+ interpreter_frame_last_sp_offset = interpreter_frame_sender_sp_offset - 1,
+ interpreter_frame_method_offset = interpreter_frame_last_sp_offset - 1,
+ interpreter_frame_mdx_offset = interpreter_frame_method_offset - 1,
+ interpreter_frame_cache_offset = interpreter_frame_mdx_offset - 1,
+ interpreter_frame_locals_offset = interpreter_frame_cache_offset - 1,
+ interpreter_frame_bcx_offset = interpreter_frame_locals_offset - 1,
+ interpreter_frame_initial_sp_offset = interpreter_frame_bcx_offset - 1,
+
+ interpreter_frame_monitor_block_top_offset = interpreter_frame_initial_sp_offset,
+ interpreter_frame_monitor_block_bottom_offset = interpreter_frame_initial_sp_offset,
+
+#endif // CC_INTERP
+
+ // Entry frames
+#ifdef AMD64
+#ifdef _WIN64
+ entry_frame_after_call_words = 28,
+ entry_frame_call_wrapper_offset = 2,
+
+ arg_reg_save_area_bytes = 32, // Register argument save area
+#else
+ entry_frame_after_call_words = 13,
+ entry_frame_call_wrapper_offset = -6,
+
+ arg_reg_save_area_bytes = 0,
+#endif // _WIN64
+#else
+ entry_frame_call_wrapper_offset = 2,
+#endif // AMD64
+
+ // Native frames
+
+ native_frame_initial_param_offset = 2
+
+ };
+
+ intptr_t ptr_at(int offset) const {
+ return *ptr_at_addr(offset);
+ }
+
+ void ptr_at_put(int offset, intptr_t value) {
+ *ptr_at_addr(offset) = value;
+ }
+
+ private:
+ // an additional field beyond _sp and _pc:
+ intptr_t* _fp; // frame pointer
+ // The interpreter and adapters will extend the frame of the caller.
+ // Since oopMaps are based on the sp of the caller before extension
+ // we need to know that value. However in order to compute the address
+ // of the return address we need the real "raw" sp. Since sparc already
+ // uses sp() to mean "raw" sp and unextended_sp() to mean the caller's
+ // original sp we use that convention.
+
+ intptr_t* _unextended_sp;
+ void adjust_unextended_sp();
+
+ intptr_t* ptr_at_addr(int offset) const {
+ return (intptr_t*) addr_at(offset);
+ }
+
+#if ASSERT
+ // Used in frame::sender_for_{interpreter,compiled}_frame
+ static void verify_deopt_original_pc( nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false);
+ static void verify_deopt_mh_original_pc(nmethod* nm, intptr_t* unextended_sp) {
+ verify_deopt_original_pc(nm, unextended_sp, true);
+ }
+#endif
+
+ public:
+ // Constructors
+
+ frame(intptr_t* sp, intptr_t* fp, address pc);
+
+ frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc);
+
+ frame(intptr_t* sp, intptr_t* fp);
+
+ // accessors for the instance variables
+ // Note: not necessarily the real 'frame pointer' (see real_fp)
+ intptr_t* fp() const { return _fp; }
+
+ inline address* sender_pc_addr() const;
+
+ // return address of param, zero origin index.
+ inline address* native_param_addr(int idx) const;
+
+ // expression stack tos if we are nested in a java call
+ intptr_t* interpreter_frame_last_sp() const;
+
+ // helper to update a map with callee-saved RBP
+ static void update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr);
+
+#ifndef CC_INTERP
+ // deoptimization support
+ void interpreter_frame_set_last_sp(intptr_t* sp);
+#endif // CC_INTERP
+
+#ifdef CC_INTERP
+ inline interpreterState get_interpreterState() const;
+#endif // CC_INTERP
+
+#endif // CPU_X86_VM_FRAME_X86_HPP
diff --git a/src/cpu/aarch64/vm/frame_aarch64.inline.hpp b/src/cpu/aarch64/vm/frame_aarch64.inline.hpp
new file mode 100644
index 000000000..b72afce37
--- /dev/null
+++ b/src/cpu/aarch64/vm/frame_aarch64.inline.hpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_FRAME_X86_INLINE_HPP
+#define CPU_X86_VM_FRAME_X86_INLINE_HPP
+
+// Inline functions for Intel frames:
+
+// Constructors:
+
+inline frame::frame() {
+ _pc = NULL;
+ _sp = NULL;
+ _unextended_sp = NULL;
+ _fp = NULL;
+ _cb = NULL;
+ _deopt_state = unknown;
+}
+
+inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) { Unimplemented(); }
+
+inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) { Unimplemented(); }
+
+inline frame::frame(intptr_t* sp, intptr_t* fp) { Unimplemented(); }
+
+// Accessors
+
+inline bool frame::equal(frame other) const { Unimplemented(); return false; }
+
+// Return unique id for this frame. The id must have a value where we can distinguish
+// identity and younger/older relationship. NULL represents an invalid (incomparable)
+// frame.
+inline intptr_t* frame::id(void) const { Unimplemented(); return 0; }
+
+// Relationals on frames based
+// Return true if the frame is younger (more recent activation) than the frame represented by id
+inline bool frame::is_younger(intptr_t* id) const { Unimplemented(); return false; }
+
+// Return true if the frame is older (less recent activation) than the frame represented by id
+inline bool frame::is_older(intptr_t* id) const { Unimplemented(); return false; }
+
+
+
+inline intptr_t* frame::link() const { Unimplemented(); return 0; }
+inline void frame::set_link(intptr_t* addr) { Unimplemented(); }
+
+
+inline intptr_t* frame::unextended_sp() const { Unimplemented(); return 0; }
+
+// Return address:
+
+inline address* frame::sender_pc_addr() const { Unimplemented(); return 0; }
+inline address frame::sender_pc() const { Unimplemented(); return 0; }
+
+// return address of param, zero origin index.
+inline address* frame::native_param_addr(int idx) const { Unimplemented(); return 0; }
+
+#ifdef CC_INTERP
+
+inline interpreterState frame::get_interpreterState() const { Unimplemented(); return 0; }
+
+inline intptr_t* frame::sender_sp() const { Unimplemented(); return 0; }
+
+inline intptr_t** frame::interpreter_frame_locals_addr() const { Unimplemented(); return 0; }
+
+inline intptr_t* frame::interpreter_frame_bcx_addr() const { Unimplemented(); return 0; }
+
+
+// Constant pool cache
+
+inline constantPoolCacheOop* frame::interpreter_frame_cache_addr() const { Unimplemented(); return 0; }
+
+// Method
+
+inline methodOop* frame::interpreter_frame_method_addr() const { Unimplemented(); return 0; }
+
+inline intptr_t* frame::interpreter_frame_mdx_addr() const { Unimplemented(); return 0; }
+
+// top of expression stack
+inline intptr_t* frame::interpreter_frame_tos_address() const { Unimplemented(); return 0; }
+
+#else /* asm interpreter */
+inline intptr_t* frame::sender_sp() const { Unimplemented(); return 0; }
+
+inline intptr_t** frame::interpreter_frame_locals_addr() const { Unimplemented(); return 0; }
+
+inline intptr_t* frame::interpreter_frame_last_sp() const { Unimplemented(); return 0; }
+
+inline intptr_t* frame::interpreter_frame_bcx_addr() const { Unimplemented(); return 0; }
+
+
+inline intptr_t* frame::interpreter_frame_mdx_addr() const { Unimplemented(); return 0; }
+
+
+
+// Constant pool cache
+
+inline constantPoolCacheOop* frame::interpreter_frame_cache_addr() const { Unimplemented(); return 0; }
+
+// Method
+
+inline methodOop* frame::interpreter_frame_method_addr() const { Unimplemented(); return 0; }
+
+// top of expression stack
+inline intptr_t* frame::interpreter_frame_tos_address() const { Unimplemented(); return 0; }
+
+#endif /* CC_INTERP */
+
+inline int frame::pd_oop_map_offset_adjustment() const { Unimplemented(); return 0; }
+
+inline int frame::interpreter_frame_monitor_size() { Unimplemented(); return 0; }
+
+
+// expression stack
+// (the max_stack arguments are used by the GC; see class FrameClosure)
+
+inline intptr_t* frame::interpreter_frame_expression_stack() const { Unimplemented(); return 0; }
+
+
+inline jint frame::interpreter_frame_expression_stack_direction() { Unimplemented(); return -1; }
+
+
+// Entry frames
+
+inline JavaCallWrapper* frame::entry_frame_call_wrapper() const { Unimplemented(); return 0; }
+
+
+// Compiled frames
+
+inline int frame::local_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) { Unimplemented(); return 0; }
+
+inline int frame::monitor_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) { Unimplemented(); return 0; }
+
+inline int frame::min_local_offset_for_compiler(int nof_args, int max_nof_locals, int max_nof_monitors) { Unimplemented(); return 0; }
+
+
+inline bool frame::volatile_across_calls(Register reg) { Unimplemented(); return false; }
+
+
+
+inline oop frame::saved_oop_result(RegisterMap* map) const { Unimplemented(); oop o(0); return o; }
+
+inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) { Unimplemented(); }
+
+#endif // CPU_X86_VM_FRAME_X86_INLINE_HPP
diff --git a/src/cpu/aarch64/vm/globalDefinitions_aarch64.hpp b/src/cpu/aarch64/vm/globalDefinitions_aarch64.hpp
new file mode 100644
index 000000000..1c032e632
--- /dev/null
+++ b/src/cpu/aarch64/vm/globalDefinitions_aarch64.hpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
+#define CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
+
+const int StackAlignmentInBytes = 16;
+
+#endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
diff --git a/src/cpu/aarch64/vm/globals_aarch64.hpp b/src/cpu/aarch64/vm/globals_aarch64.hpp
new file mode 100644
index 000000000..1d0ef81d5
--- /dev/null
+++ b/src/cpu/aarch64/vm/globals_aarch64.hpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_GLOBALS_X86_HPP
+#define CPU_X86_VM_GLOBALS_X86_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+// Sets the default values for platform dependent flags used by the runtime system.
+// (see globals.hpp)
+
+define_pd_global(bool, ConvertSleepToYield, true);
+define_pd_global(bool, ShareVtableStubs, true);
+define_pd_global(bool, CountInterpCalls, true);
+define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this
+
+define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
+define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
+
+// See 4827828 for this change. There is no globals_core_i486.hpp. I can't
+// assign a different value for C2 without touching a number of files. Use
+// #ifdef to minimize the change as it's late in Mantis. -- FIXME.
+// c1 doesn't have this problem because the fix to 4858033 assures us
+// the the vep is aligned at CodeEntryAlignment whereas c2 only aligns
+// the uep and the vep doesn't get real alignment but just slops on by
+// only assured that the entry instruction meets the 5 byte size requirement.
+#ifdef COMPILER2
+define_pd_global(intx, CodeEntryAlignment, 32);
+#else
+define_pd_global(intx, CodeEntryAlignment, 16);
+#endif // COMPILER2
+define_pd_global(intx, OptoLoopAlignment, 16);
+define_pd_global(intx, InlineFrequencyCount, 100);
+define_pd_global(intx, InlineSmallCode, 1000);
+
+define_pd_global(intx, StackYellowPages, 2);
+define_pd_global(intx, StackRedPages, 1);
+#ifdef AMD64
+// Very large C++ stack frames using solaris-amd64 optimized builds
+// due to lack of optimization caused by C++ compiler bugs
+define_pd_global(intx, StackShadowPages, NOT_WIN64(20) WIN64_ONLY(6) DEBUG_ONLY(+2));
+#else
+define_pd_global(intx, StackShadowPages, 4 DEBUG_ONLY(+5));
+#endif // AMD64
+
+define_pd_global(intx, PreInflateSpin, 10);
+
+define_pd_global(bool, RewriteBytecodes, true);
+define_pd_global(bool, RewriteFrequentPairs, true);
+
+#ifdef _ALLBSD_SOURCE
+define_pd_global(bool, UseMembar, true);
+#else
+define_pd_global(bool, UseMembar, false);
+#endif
+
+// GC Ergo Flags
+define_pd_global(intx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
+#endif // CPU_X86_VM_GLOBALS_X86_HPP
diff --git a/src/cpu/aarch64/vm/icBuffer_aarch64.cpp b/src/cpu/aarch64/vm/icBuffer_aarch64.cpp
new file mode 100644
index 000000000..d1114ecc6
--- /dev/null
+++ b/src/cpu/aarch64/vm/icBuffer_aarch64.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "assembler_aarch64.inline.hpp"
+#include "code/icBuffer.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
+#include "interpreter/bytecodes.hpp"
+#include "memory/resourceArea.hpp"
+#include "nativeInst_aarch64.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/oop.inline2.hpp"
+
+int InlineCacheBuffer::ic_stub_code_size() {
+ // TODO -- this is just dummy code to get us bootstrapped far enough
+ // to be able to test the Assembler. this is th efirst
+ // processor-specific routine to get called during bootstrap.
+
+ return NativeInstruction::instruction_size * 2;
+}
+
+
+void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, oop cached_oop, address entry_point) { Unimplemented(); }
+
+
+address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) { Unimplemented(); return 0; }
+
+
+oop InlineCacheBuffer::ic_buffer_cached_oop(address code_begin) { Unimplemented(); return 0; }
diff --git a/src/cpu/aarch64/vm/icBuffer_aarch64.cpp~ b/src/cpu/aarch64/vm/icBuffer_aarch64.cpp~
new file mode 100644
index 000000000..0a292885a
--- /dev/null
+++ b/src/cpu/aarch64/vm/icBuffer_aarch64.cpp~
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "assembler_aarch64.inline.hpp"
+#include "code/icBuffer.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
+#include "interpreter/bytecodes.hpp"
+#include "memory/resourceArea.hpp"
+#include "nativeInst_aarch64.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/oop.inline2.hpp"
+
+int InlineCacheBuffer::ic_stub_code_size() {
+ // TODO -- this is just dummy code to get us bootstrapped far enough
+ // to be able to test the Assembler. this is th efirst
+ // processor-specific routine to get called during bootstrap.
+
+ return NativeInstruction::instruction_size * 2;
+}
+
+
+void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, oop cached_oop, address entry_point) {Unimplemented(); }
+
+
+address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) { Unimplemented(); return 0; }
+
+
+oop InlineCacheBuffer::ic_buffer_cached_oop(address code_begin) { Unimplemented(); return 0; }
diff --git a/src/cpu/aarch64/vm/icache_aarch64.cpp b/src/cpu/aarch64/vm/icache_aarch64.cpp
new file mode 100644
index 000000000..5514040ae
--- /dev/null
+++ b/src/cpu/aarch64/vm/icache_aarch64.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "assembler_aarch64.inline.hpp"
+#include "runtime/icache.hpp"
+
+extern void aarch64TestHook();
+
+#define __ _masm->
+
+int _flush_icache_stub_dummy(address addr, int lines, int magic)
+{
+ return magic;
+}
+
+void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub) {
+
+ aarch64TestHook();
+
+#if 0
+ // TODO -- this is just dummy code to get us bootstrapped far enough
+ // to be able to test the Assembler.
+
+ // we have to put come code into the buffer because the flush routine checks
+ // that the first flush is for the current start address
+ // the mark here ensures that the flush routine gets called on the way out
+
+ StubCodeMark mark(this, "ICache", "flush_icache_stub");
+
+ address start = __ pc();
+ // generate more than 8 nops and then copy 8 words of the dummy x86 code
+ __ nop();
+ __ nop();
+ __ nop();
+ __ nop();
+ __ nop();
+ __ nop();
+ __ nop();
+ __ nop();
+ __ nop();
+ __ nop();
+ __ nop();
+ __ nop();
+
+ address dummy = (address)_flush_icache_stub_dummy;
+ memcpy(start, dummy, 8 * BytesPerWord);
+
+ *flush_icache_stub = (ICache::flush_icache_stub_t)start;
+
+#endif
+ Unimplemented();
+}
+
+#undef __
diff --git a/src/cpu/aarch64/vm/icache_aarch64.hpp b/src/cpu/aarch64/vm/icache_aarch64.hpp
new file mode 100644
index 000000000..914158a65
--- /dev/null
+++ b/src/cpu/aarch64/vm/icache_aarch64.hpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_ICACHE_X86_HPP
+#define CPU_X86_VM_ICACHE_X86_HPP
+
+// Interface for updating the instruction cache. Whenever the VM modifies
+// code, part of the processor instruction cache potentially has to be flushed.
+
+// On the x86, this is a no-op -- the I-cache is guaranteed to be consistent
+// after the next jump, and the VM never modifies instructions directly ahead
+// of the instruction fetch path.
+
+// [phh] It's not clear that the above comment is correct, because on an MP
+// system where the dcaches are not snooped, only the thread doing the invalidate
+// will see the update. Even in the snooped case, a memory fence would be
+// necessary if stores weren't ordered. Fortunately, they are on all known
+// x86 implementations.
+
+class ICache : public AbstractICache {
+ public:
+#ifdef AMD64
+ enum {
+ stub_size = 64, // Size of the icache flush stub in bytes
+ line_size = 64, // Icache line size in bytes
+ log2_line_size = 6 // log2(line_size)
+ };
+
+ // Use default implementation
+#else
+ enum {
+ stub_size = 16, // Size of the icache flush stub in bytes
+ line_size = BytesPerWord, // conservative
+ log2_line_size = LogBytesPerWord // log2(line_size)
+ };
+#endif // AMD64
+};
+
+#endif // CPU_X86_VM_ICACHE_X86_HPP
diff --git a/src/cpu/aarch64/vm/interp_masm_aarch64.cpp b/src/cpu/aarch64/vm/interp_masm_aarch64.cpp
new file mode 100644
index 000000000..bca3bafd1
--- /dev/null
+++ b/src/cpu/aarch64/vm/interp_masm_aarch64.cpp
@@ -0,0 +1,385 @@
+/*
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interp_masm_aarch64.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/markOop.hpp"
+#include "oops/methodDataOop.hpp"
+#include "oops/methodOop.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiRedefineClassesTrace.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/basicLock.hpp"
+#include "runtime/biasedLocking.hpp"
+#include "runtime/sharedRuntime.hpp"
+#ifdef TARGET_OS_FAMILY_linux
+# include "thread_linux.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_solaris
+# include "thread_solaris.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_windows
+# include "thread_windows.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_bsd
+# include "thread_bsd.inline.hpp"
+#endif
+
+
+// Implementation of InterpreterMacroAssembler
+
+#ifdef CC_INTERP
+void InterpreterMacroAssembler::get_method(Register reg) { Unimplemented(); }
+#endif // CC_INTERP
+
+#ifndef CC_INTERP
+
+void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
+ int number_of_arguments) { Unimplemented(); }
+
+void InterpreterMacroAssembler::call_VM_base(Register oop_result,
+ Register java_thread,
+ Register last_java_sp,
+ address entry_point,
+ int number_of_arguments,
+ bool check_exceptions) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::load_earlyret_value(TosState state) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
+ Register reg,
+ int bcp_offset) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
+ int bcp_offset,
+ size_t index_size) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
+ Register index,
+ int bcp_offset,
+ size_t index_size) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
+ Register index,
+ Register bytecode,
+ int byte_no,
+ int bcp_offset,
+ size_t index_size) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
+ Register tmp,
+ int bcp_offset,
+ size_t index_size) { Unimplemented(); }
+
+
+// Generate a subtype check: branch to ok_is_subtype if sub_klass is a
+// subtype of super_klass.
+//
+// Args:
+// rax: superklass
+// Rsub_klass: subklass
+//
+// Kills:
+// rcx, rdi
+void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
+ Label& ok_is_subtype) { Unimplemented(); }
+
+
+
+// Java Expression Stack
+
+void InterpreterMacroAssembler::pop_ptr(Register r) { Unimplemented(); }
+
+void InterpreterMacroAssembler::pop_i(Register r) { Unimplemented(); }
+
+void InterpreterMacroAssembler::pop_l(Register r) { Unimplemented(); }
+
+void InterpreterMacroAssembler::push_ptr(Register r) { Unimplemented(); }
+
+void InterpreterMacroAssembler::push_i(Register r) { Unimplemented(); }
+
+void InterpreterMacroAssembler::push_l(Register r) { Unimplemented(); }
+
+void InterpreterMacroAssembler::pop(TosState state) { Unimplemented(); }
+
+void InterpreterMacroAssembler::push(TosState state) { Unimplemented(); }
+
+
+// Helpers for swap and dup
+void InterpreterMacroAssembler::load_ptr(int n, Register val) { Unimplemented(); }
+
+void InterpreterMacroAssembler::store_ptr(int n, Register val) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { Unimplemented(); }
+
+
+// Jump to from_interpreted entry of a call unless single stepping is possible
+// in this thread in which case we must call the i2i entry
+void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { Unimplemented(); }
+
+
+// The following two routines provide a hook so that an implementation
+// can schedule the dispatch in two parts. amd64 does not do this.
+void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) { Unimplemented(); }
+
+void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) { Unimplemented(); }
+
+void InterpreterMacroAssembler::dispatch_base(TosState state,
+ address* table,
+ bool verifyoop) { Unimplemented(); }
+
+void InterpreterMacroAssembler::dispatch_only(TosState state) { Unimplemented(); }
+
+void InterpreterMacroAssembler::dispatch_only_normal(TosState state) { Unimplemented(); }
+
+void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::dispatch_next(TosState state, int step) { Unimplemented(); }
+
+void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { Unimplemented(); }
+
+// remove activation
+//
+// Unlock the receiver if this is a synchronized method.
+// Unlock any Java monitors from syncronized blocks.
+// Remove the activation from the stack.
+//
+// If there are locked Java monitors
+// If throw_monitor_exception
+// throws IllegalMonitorStateException
+// Else if install_monitor_exception
+// installs IllegalMonitorStateException
+// Else
+// no error processing
+void InterpreterMacroAssembler::remove_activation(
+ TosState state,
+ Register ret_addr,
+ bool throw_monitor_exception,
+ bool install_monitor_exception,
+ bool notify_jvmdi) { Unimplemented(); }
+
+#endif // C_INTERP
+
+// Lock object
+//
+// Args:
+// c_rarg1: BasicObjectLock to be used for locking
+//
+// Kills:
+// rax
+// c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
+// rscratch1, rscratch2 (scratch regs)
+void InterpreterMacroAssembler::lock_object(Register lock_reg) { Unimplemented(); }
+
+
+// Unlocks an object. Used in monitorexit bytecode and
+// remove_activation. Throws an IllegalMonitorException if object is
+// not locked by current thread.
+//
+// Args:
+// c_rarg1: BasicObjectLock for lock
+//
+// Kills:
+// rax
+// c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
+// rscratch1, rscratch2 (scratch regs)
+void InterpreterMacroAssembler::unlock_object(Register lock_reg) { Unimplemented(); }
+
+#ifndef CC_INTERP
+
+void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
+ Label& zero_continue) { Unimplemented(); }
+
+
+// Set the method data pointer for the current bcp.
+void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { Unimplemented(); }
+
+void InterpreterMacroAssembler::verify_method_data_pointer() { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in,
+ int constant,
+ Register value) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
+ int constant,
+ bool decrement) { Unimplemented(); }
+
+void InterpreterMacroAssembler::increment_mdp_data_at(Address data,
+ bool decrement) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
+ Register reg,
+ int constant,
+ bool decrement) { Unimplemented(); }
+
+void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,
+ int flag_byte_constant) { Unimplemented(); }
+
+
+
+void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
+ int offset,
+ Register value,
+ Register test_value_out,
+ Label& not_equal_continue) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
+ int offset_of_disp) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
+ Register reg,
+ int offset_of_disp) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in,
+ int constant) { Unimplemented(); }
+
+
+
+void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::profile_taken_branch(Register mdp,
+ Register bumped_count) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::profile_call(Register mdp) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::profile_final_call(Register mdp) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
+ Register mdp,
+ Register reg2,
+ bool receiver_can_be_null) { Unimplemented(); }
+
+// This routine creates a state machine for updating the multi-row
+// type profile at a virtual call site (or other type-sensitive bytecode).
+// The machine visits each row (of receiver/count) until the receiver type
+// is found, or until it runs out of rows. At the same time, it remembers
+// the location of the first empty row. (An empty row records null for its
+// receiver, and can be allocated for a newly-observed receiver type.)
+// Because there are two degrees of freedom in the state, a simple linear
+// search will not work; it must be a decision tree. Hence this helper
+// function is recursive, to generate the required tree structured code.
+// It's the interpreter, so we are trading off code space for speed.
+// See below for example code.
+void InterpreterMacroAssembler::record_klass_in_profile_helper(
+ Register receiver, Register mdp,
+ Register reg2, int start_row,
+ Label& done, bool is_virtual_call) { Unimplemented(); }
+
+// Example state machine code for three profile rows:
+// // main copy of decision tree, rooted at row[1]
+// if (row[0].rec == rec) { row[0].incr(); goto done; }
+// if (row[0].rec != NULL) {
+// // inner copy of decision tree, rooted at row[1]
+// if (row[1].rec == rec) { row[1].incr(); goto done; }
+// if (row[1].rec != NULL) {
+// // degenerate decision tree, rooted at row[2]
+// if (row[2].rec == rec) { row[2].incr(); goto done; }
+// if (row[2].rec != NULL) { count.incr(); goto done; } // overflow
+// row[2].init(rec); goto done;
+// } else {
+// // remember row[1] is empty
+// if (row[2].rec == rec) { row[2].incr(); goto done; }
+// row[1].init(rec); goto done;
+// }
+// } else {
+// // remember row[0] is empty
+// if (row[1].rec == rec) { row[1].incr(); goto done; }
+// if (row[2].rec == rec) { row[2].incr(); goto done; }
+// row[0].init(rec); goto done;
+// }
+// done:
+
+void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
+ Register mdp, Register reg2,
+ bool is_virtual_call) { Unimplemented(); }
+
+void InterpreterMacroAssembler::profile_ret(Register return_bci,
+ Register mdp) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::profile_null_seen(Register mdp) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::profile_switch_default(Register mdp) { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::profile_switch_case(Register index,
+ Register mdp,
+ Register reg2) { Unimplemented(); }
+
+
+
+void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) { Unimplemented(); }
+
+void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { Unimplemented(); }
+#endif // !CC_INTERP
+
+
+void InterpreterMacroAssembler::notify_method_entry() { Unimplemented(); }
+
+
+void InterpreterMacroAssembler::notify_method_exit(
+ TosState state, NotifyMethodExitMode mode) { Unimplemented(); }
+
+// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
+void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
+ int increment, int mask,
+ Register scratch, bool preloaded,
+ Condition cond, Label* where) { Unimplemented(); }
diff --git a/src/cpu/aarch64/vm/interp_masm_aarch64.hpp b/src/cpu/aarch64/vm/interp_masm_aarch64.hpp
new file mode 100644
index 000000000..2949d766f
--- /dev/null
+++ b/src/cpu/aarch64/vm/interp_masm_aarch64.hpp
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_INTERP_MASM_X86_64_HPP
+#define CPU_X86_VM_INTERP_MASM_X86_64_HPP
+
+#include "assembler_aarch64.inline.hpp"
+#include "interpreter/invocationCounter.hpp"
+
+// This file specializes the assember with interpreter-specific macros
+
+
+class InterpreterMacroAssembler: public MacroAssembler {
+#ifndef CC_INTERP
+ protected:
+ // Interpreter specific version of call_VM_base
+ virtual void call_VM_leaf_base(address entry_point,
+ int number_of_arguments);
+
+ virtual void call_VM_base(Register oop_result,
+ Register java_thread,
+ Register last_java_sp,
+ address entry_point,
+ int number_of_arguments,
+ bool check_exceptions);
+
+ virtual void check_and_handle_popframe(Register java_thread);
+ virtual void check_and_handle_earlyret(Register java_thread);
+
+ // base routine for all dispatches
+ void dispatch_base(TosState state, address* table, bool verifyoop = true);
+#endif // CC_INTERP
+
+ public:
+ InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
+
+ void load_earlyret_value(TosState state);
+
+#ifdef CC_INTERP
+ void save_bcp() { /* not needed in c++ interpreter and harmless */ }
+ void restore_bcp() { /* not needed in c++ interpreter and harmless */ }
+
+ // Helpers for runtime call arguments/results
+ void get_method(Register reg);
+
+#else
+
+ // Interpreter-specific registers
+ void save_bcp();
+ void restore_bcp();
+ void restore_locals();
+
+ // Helpers for runtime call arguments/results
+ void get_method(Register reg);
+ void get_constant_pool(Register reg);
+ void get_constant_pool_cache(Register reg);
+ void get_cpool_and_tags(Register cpool, Register tags);
+ void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
+ void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
+ void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
+ void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
+ void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
+
+ void pop_ptr(Register r = r0);
+ void pop_i(Register r = r0);
+ void pop_l(Register r = r0);
+ void pop_f(FloatRegister r);
+ void pop_d(FloatRegister r);
+ void push_ptr(Register r = r0);
+ void push_i(Register r = r0);
+ void push_l(Register r = r0);
+ void push_f(FloatRegister r);
+ void push_d(FloatRegister r);
+
+ void pop(Register r );
+
+ void push(Register r );
+ void push(int32_t imm );
+
+ void pop(TosState state); // transition vtos -> state
+ void push(TosState state); // transition state -> vtos
+
+ void empty_expression_stack();
+
+ // Helpers for swap and dup
+ void load_ptr(int n, Register val);
+ void store_ptr(int n, Register val);
+
+ // Generate a subtype check: branch to ok_is_subtype if sub_klass is
+ // a subtype of super_klass.
+ void gen_subtype_check( Register sub_klass, Label &ok_is_subtype );
+
+ // Dispatching
+ void dispatch_prolog(TosState state, int step = 0);
+ void dispatch_epilog(TosState state, int step = 0);
+ // dispatch via ebx (assume ebx is loaded already)
+ void dispatch_only(TosState state);
+ // dispatch normal table via ebx (assume ebx is loaded already)
+ void dispatch_only_normal(TosState state);
+ void dispatch_only_noverify(TosState state);
+ // load ebx from [esi + step] and dispatch via ebx
+ void dispatch_next(TosState state, int step = 0);
+ // load ebx from [esi] and dispatch via ebx and table
+ void dispatch_via (TosState state, address* table);
+
+ // jump to an invoked target
+ void prepare_to_jump_from_interpreted();
+ void jump_from_interpreted(Register method, Register temp);
+
+
+ // Returning from interpreted functions
+ //
+ // Removes the current activation (incl. unlocking of monitors)
+ // and sets up the return address. This code is also used for
+ // exception unwindwing. In that case, we do not want to throw
+ // IllegalMonitorStateExceptions, since that might get us into an
+ // infinite rethrow exception loop.
+ // Additionally this code is used for popFrame and earlyReturn.
+ // In popFrame case we want to skip throwing an exception,
+ // installing an exception, and notifying jvmdi.
+ // In earlyReturn case we only want to skip throwing an exception
+ // and installing an exception.
+ void remove_activation(TosState state, Register ret_addr,
+ bool throw_monitor_exception = true,
+ bool install_monitor_exception = true,
+ bool notify_jvmdi = true);
+#endif // CC_INTERP
+
+ // Object locking
+ void lock_object (Register lock_reg);
+ void unlock_object(Register lock_reg);
+
+#ifndef CC_INTERP
+
+ // Interpreter profiling operations
+ void set_method_data_pointer_for_bcp();
+ void test_method_data_pointer(Register mdp, Label& zero_continue);
+ void verify_method_data_pointer();
+
+ void set_mdp_data_at(Register mdp_in, int constant, Register value);
+ void increment_mdp_data_at(Address data, bool decrement = false);
+ void increment_mdp_data_at(Register mdp_in, int constant,
+ bool decrement = false);
+ void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
+ bool decrement = false);
+ void increment_mask_and_jump(Address counter_addr,
+ int increment, int mask,
+ Register scratch, bool preloaded,
+ Condition cond, Label* where);
+ void set_mdp_flag_at(Register mdp_in, int flag_constant);
+ void test_mdp_data_at(Register mdp_in, int offset, Register value,
+ Register test_value_out,
+ Label& not_equal_continue);
+
+ void record_klass_in_profile(Register receiver, Register mdp,
+ Register reg2, bool is_virtual_call);
+ void record_klass_in_profile_helper(Register receiver, Register mdp,
+ Register reg2, int start_row,
+ Label& done, bool is_virtual_call);
+
+ void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
+ void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
+ void update_mdp_by_constant(Register mdp_in, int constant);
+ void update_mdp_for_ret(Register return_bci);
+
+ void profile_taken_branch(Register mdp, Register bumped_count);
+ void profile_not_taken_branch(Register mdp);
+ void profile_call(Register mdp);
+ void profile_final_call(Register mdp);
+ void profile_virtual_call(Register receiver, Register mdp,
+ Register scratch2,
+ bool receiver_can_be_null = false);
+ void profile_ret(Register return_bci, Register mdp);
+ void profile_null_seen(Register mdp);
+ void profile_typecheck(Register mdp, Register klass, Register scratch);
+ void profile_typecheck_failed(Register mdp);
+ void profile_switch_default(Register mdp);
+ void profile_switch_case(Register index_in_scratch, Register mdp,
+ Register scratch2);
+
+ // Debugging
+ // only if +VerifyOops && state == atos
+ void verify_oop(Register reg, TosState state = atos);
+ // only if +VerifyFPU && (state == ftos || state == dtos)
+ void verify_FPU(int stack_depth, TosState state = ftos);
+
+#endif // !CC_INTERP
+
+ typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
+
+ // support for jvmti/dtrace
+ void notify_method_entry();
+ void notify_method_exit(TosState state, NotifyMethodExitMode mode);
+};
+
+#endif // CPU_X86_VM_INTERP_MASM_X86_64_HPP
diff --git a/src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp b/src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp
new file mode 100644
index 000000000..300486520
--- /dev/null
+++ b/src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_INTERPRETERGENERATOR_X86_HPP
+#define CPU_X86_VM_INTERPRETERGENERATOR_X86_HPP
+
+
+// Generation of Interpreter
+//
+ friend class AbstractInterpreterGenerator;
+
+ private:
+
+ address generate_normal_entry(bool synchronized);
+ address generate_native_entry(bool synchronized);
+ address generate_abstract_entry(void);
+ address generate_method_handle_entry(void);
+ address generate_math_entry(AbstractInterpreter::MethodKind kind);
+ address generate_empty_entry(void);
+ address generate_accessor_entry(void);
+ address generate_Reference_get_entry();
+ void lock_method(void);
+ void generate_stack_overflow_check(void);
+
+ void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
+ void generate_counter_overflow(Label* do_continue);
+
+#endif // CPU_X86_VM_INTERPRETERGENERATOR_X86_HPP
diff --git a/src/cpu/aarch64/vm/interpreterRT_aarch64.cpp b/src/cpu/aarch64/vm/interpreterRT_aarch64.cpp
new file mode 100644
index 000000000..bd03edf7b
--- /dev/null
+++ b/src/cpu/aarch64/vm/interpreterRT_aarch64.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/universe.inline.hpp"
+#include "oops/methodOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/icache.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/signature.hpp"
+
+#define __ _masm->
+
+// Implementation of SignatureHandlerGenerator
+
+Register InterpreterRuntime::SignatureHandlerGenerator::from() { Unimplemented(); return r0; }
+Register InterpreterRuntime::SignatureHandlerGenerator::to() { Unimplemented(); return r0; }
+Register InterpreterRuntime::SignatureHandlerGenerator::temp() { Unimplemented(); return r0; }
+
+void InterpreterRuntime::SignatureHandlerGenerator::pass_int() { Unimplemented(); }
+
+void InterpreterRuntime::SignatureHandlerGenerator::pass_long() { Unimplemented(); }
+
+void InterpreterRuntime::SignatureHandlerGenerator::pass_float() { Unimplemented(); }
+
+void InterpreterRuntime::SignatureHandlerGenerator::pass_double() { Unimplemented(); }
+
+void InterpreterRuntime::SignatureHandlerGenerator::pass_object() { Unimplemented(); }
+
+void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) { Unimplemented(); }
+
+
+// Implementation of SignatureHandlerLibrary
+
+void SignatureHandlerLibrary::pd_set_handler(address handler) { Unimplemented(); }
+
+
+#ifdef _WIN64
+class SlowSignatureHandler
+ : public NativeSignatureIterator {
+ private:
+ address _from;
+ intptr_t* _to;
+ intptr_t* _reg_args;
+ intptr_t* _fp_identifiers;
+ unsigned int _num_args;
+
+ virtual void pass_int() { Unimplemented(); }
+
+ virtual void pass_long() { Unimplemented(); }
+
+ virtual void pass_object() { Unimplemented(); }
+
+ virtual void pass_float() { Unimplemented(); }
+
+ virtual void pass_double() { Unimplemented(); }
+
+ public:
+ SlowSignatureHandler(methodHandle method, address from, intptr_t* to)
+ : NativeSignatureIterator(method) { Unimplemented(); }
+};
+#else
+class SlowSignatureHandler
+ : public NativeSignatureIterator {
+ private:
+ address _from;
+ intptr_t* _to;
+ intptr_t* _int_args;
+ intptr_t* _fp_args;
+ intptr_t* _fp_identifiers;
+ unsigned int _num_int_args;
+ unsigned int _num_fp_args;
+
+ virtual void pass_int() { Unimplemented(); }
+
+ virtual void pass_long() { Unimplemented(); }
+
+ virtual void pass_object() { Unimplemented(); }
+
+ virtual void pass_float() { Unimplemented(); }
+
+ virtual void pass_double() { Unimplemented(); }
+
+ public:
+ SlowSignatureHandler(methodHandle method, address from, intptr_t* to)
+ : NativeSignatureIterator(method) { Unimplemented(); }
+};
+#endif
+
+
+IRT_ENTRY(address,
+ InterpreterRuntime::slow_signature_handler(JavaThread* thread,
+ methodOopDesc* method,
+ intptr_t* from,
+ intptr_t* to))
+ Unimplemented();
+ return 0;
+IRT_END
diff --git a/src/cpu/aarch64/vm/interpreterRT_aarch64.hpp b/src/cpu/aarch64/vm/interpreterRT_aarch64.hpp
new file mode 100644
index 000000000..f35f0122d
--- /dev/null
+++ b/src/cpu/aarch64/vm/interpreterRT_aarch64.hpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_INTERPRETERRT_X86_HPP
+#define CPU_X86_VM_INTERPRETERRT_X86_HPP
+
+#include "memory/allocation.hpp"
+
+// native method calls
+
+class SignatureHandlerGenerator: public NativeSignatureIterator {
+ private:
+ MacroAssembler* _masm;
+#ifdef AMD64
+#ifdef _WIN64
+ unsigned int _num_args;
+#else
+ unsigned int _num_fp_args;
+ unsigned int _num_int_args;
+#endif // _WIN64
+ int _stack_offset;
+#else
+ void move(int from_offset, int to_offset);
+ void box(int from_offset, int to_offset);
+#endif // AMD64
+
+ void pass_int();
+ void pass_long();
+ void pass_float();
+#ifdef AMD64
+ void pass_double();
+#endif // AMD64
+ void pass_object();
+
+ public:
+ // Creation
+ SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
+ _masm = new MacroAssembler(buffer);
+#ifdef AMD64
+#ifdef _WIN64
+ _num_args = (method->is_static() ? 1 : 0);
+ _stack_offset = (Argument::n_int_register_parameters_c+1)* wordSize; // don't overwrite return address
+#else
+ _num_int_args = (method->is_static() ? 1 : 0);
+ _num_fp_args = 0;
+ _stack_offset = wordSize; // don't overwrite return address
+#endif // _WIN64
+#endif // AMD64
+ }
+
+ // Code generation
+ void generate(uint64_t fingerprint);
+
+ // Code generation support
+ static Register from();
+ static Register to();
+ static Register temp();
+};
+
+#endif // CPU_X86_VM_INTERPRETERRT_X86_HPP
diff --git a/src/cpu/aarch64/vm/interpreter_aarch64.cpp b/src/cpu/aarch64/vm/interpreter_aarch64.cpp
new file mode 100644
index 000000000..c48ae58cb
--- /dev/null
+++ b/src/cpu/aarch64/vm/interpreter_aarch64.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/templateTable.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/methodDataOop.hpp"
+#include "oops/methodOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "prims/methodHandles.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/timer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+#ifdef COMPILER1
+#include "c1/c1_Runtime1.hpp"
+#endif
+
+#define __ _masm->
+
+
+address AbstractInterpreterGenerator::generate_slow_signature_handler() { Unimplemented(); return 0;}
+
+
+//
+// Various method entries
+//
+
+address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { Unimplemented(); return 0; }
+
+
+// Abstract method entry
+// Attempt to execute abstract method. Throw exception
+address InterpreterGenerator::generate_abstract_entry(void) { Unimplemented(); return 0; }
+
+
+// Method handle invoker
+// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...)
+address InterpreterGenerator::generate_method_handle_entry(void) { Unimplemented(); return 0; }
+
+
+// Empty method, generate a very fast return.
+
+address InterpreterGenerator::generate_empty_entry(void) { Unimplemented(); return 0; }
+
+void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { Unimplemented(); }
diff --git a/src/cpu/aarch64/vm/interpreter_aarch64.hpp b/src/cpu/aarch64/vm/interpreter_aarch64.hpp
new file mode 100644
index 000000000..8a6169c0c
--- /dev/null
+++ b/src/cpu/aarch64/vm/interpreter_aarch64.hpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_INTERPRETER_X86_HPP
+#define CPU_X86_VM_INTERPRETER_X86_HPP
+
+ public:
+ static Address::ScaleFactor stackElementScale() {
+ return NOT_LP64(Address::times_4) LP64_ONLY(Address::times_8);
+ }
+
+ // Offset from rsp (which points to the last stack element)
+ static int expr_offset_in_bytes(int i) { return stackElementSize * i; }
+
+ // Stack index relative to tos (which points at value)
+ static int expr_index_at(int i) { return stackElementWords * i; }
+
+ // Already negated by c++ interpreter
+ static int local_index_at(int i) {
+ assert(i <= 0, "local direction already negated");
+ return stackElementWords * i;
+ }
+
+#endif // CPU_X86_VM_INTERPRETER_X86_HPP
diff --git a/src/cpu/aarch64/vm/javaFrameAnchor_aarch64.hpp b/src/cpu/aarch64/vm/javaFrameAnchor_aarch64.hpp
new file mode 100644
index 000000000..77298e537
--- /dev/null
+++ b/src/cpu/aarch64/vm/javaFrameAnchor_aarch64.hpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_JAVAFRAMEANCHOR_X86_HPP
+#define CPU_X86_VM_JAVAFRAMEANCHOR_X86_HPP
+
+private:
+
+ // FP value associated with _last_Java_sp:
+ intptr_t* volatile _last_Java_fp; // pointer is volatile not what it points to
+
+public:
+ // Each arch must define reset, save, restore
+ // These are used by objects that only care about:
+ // 1 - initializing a new state (thread creation, javaCalls)
+ // 2 - saving a current state (javaCalls)
+ // 3 - restoring an old state (javaCalls)
+
+ void clear(void) {
+ // clearing _last_Java_sp must be first
+ _last_Java_sp = NULL;
+ // fence?
+ _last_Java_fp = NULL;
+ _last_Java_pc = NULL;
+ }
+
+ void copy(JavaFrameAnchor* src) {
+ // In order to make sure the transition state is valid for "this"
+ // We must clear _last_Java_sp before copying the rest of the new data
+ //
+ // Hack Alert: Temporary bugfix for 4717480/4721647
+ // To act like previous version (pd_cache_state) don't NULL _last_Java_sp
+ // unless the value is changing
+ //
+ if (_last_Java_sp != src->_last_Java_sp)
+ _last_Java_sp = NULL;
+
+ _last_Java_fp = src->_last_Java_fp;
+ _last_Java_pc = src->_last_Java_pc;
+ // Must be last so profiler will always see valid frame if has_last_frame() is true
+ _last_Java_sp = src->_last_Java_sp;
+ }
+
+ // Always walkable
+ bool walkable(void) { return true; }
+ // Never any thing to do since we are always walkable and can find address of return addresses
+ void make_walkable(JavaThread* thread) { }
+
+ intptr_t* last_Java_sp(void) const { return _last_Java_sp; }
+
+ address last_Java_pc(void) { return _last_Java_pc; }
+
+private:
+
+ static ByteSize last_Java_fp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_fp); }
+
+public:
+
+ void set_last_Java_sp(intptr_t* sp) { _last_Java_sp = sp; }
+
+ intptr_t* last_Java_fp(void) { return _last_Java_fp; }
+ // Assert (last_Java_sp == NULL || fp == NULL)
+ void set_last_Java_fp(intptr_t* fp) { _last_Java_fp = fp; }
+
+#endif // CPU_X86_VM_JAVAFRAMEANCHOR_X86_HPP
diff --git a/src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp b/src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp
new file mode 100644
index 000000000..84736825e
--- /dev/null
+++ b/src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "assembler_aarch64.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "prims/jniFastGetField.hpp"
+#include "prims/jvm_misc.hpp"
+#include "runtime/safepoint.hpp"
+
+#define __ masm->
+
+#define BUFFER_SIZE 30*wordSize
+
+// Instead of issuing lfence for LoadLoad barrier, we create data dependency
+// between loads, which is more efficient than lfence.
+
+// Common register usage:
+// rax/xmm0: result
+// c_rarg0: jni env
+// c_rarg1: obj
+// c_rarg2: jfield id
+
+// static const Register robj = r9;
+// static const Register rcounter = r10;
+// static const Register roffset = r11;
+// static const Register rcounter_addr = r11;
+
+// Warning: do not use rip relative addressing after the first counter load
+// since that may scratch r10!
+
+address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { Unimplemented(); return 0; }
+
+address JNI_FastGetField::generate_fast_get_boolean_field() { Unimplemented(); return 0; }
+
+address JNI_FastGetField::generate_fast_get_byte_field() { Unimplemented(); return 0; }
+
+address JNI_FastGetField::generate_fast_get_char_field() { Unimplemented(); return 0; }
+
+address JNI_FastGetField::generate_fast_get_short_field() { Unimplemented(); return 0; }
+
+address JNI_FastGetField::generate_fast_get_int_field() { Unimplemented(); return 0; }
+
+address JNI_FastGetField::generate_fast_get_long_field() { Unimplemented(); return 0; }
+
+address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) { Unimplemented(); return 0; }
+
+address JNI_FastGetField::generate_fast_get_float_field() { Unimplemented(); return 0; }
+
+address JNI_FastGetField::generate_fast_get_double_field() { Unimplemented(); return 0; }
diff --git a/src/cpu/aarch64/vm/jniTypes_aarch64.hpp b/src/cpu/aarch64/vm/jniTypes_aarch64.hpp
new file mode 100644
index 000000000..170cd6e3a
--- /dev/null
+++ b/src/cpu/aarch64/vm/jniTypes_aarch64.hpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_JNITYPES_X86_HPP
+#define CPU_X86_VM_JNITYPES_X86_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/oop.hpp"
+#include "prims/jni.h"
+
+// This file holds platform-dependent routines used to write primitive jni
+// types to the array of arguments passed into JavaCalls::call
+
+class JNITypes : AllStatic {
+ // These functions write a java primitive type (in native format)
+ // to a java stack slot array to be passed as an argument to JavaCalls:calls.
+ // I.e., they are functionally 'push' operations if they have a 'pos'
+ // formal parameter. Note that jlong's and jdouble's are written
+ // _in reverse_ of the order in which they appear in the interpreter
+ // stack. This is because call stubs (see stubGenerator_sparc.cpp)
+ // reverse the argument list constructed by JavaCallArguments (see
+ // javaCalls.hpp).
+
+private:
+
+#ifndef AMD64
+ // 32bit Helper routines.
+ static inline void put_int2r(jint *from, intptr_t *to) { *(jint *)(to++) = from[1];
+ *(jint *)(to ) = from[0]; }
+ static inline void put_int2r(jint *from, intptr_t *to, int& pos) { put_int2r(from, to + pos); pos += 2; }
+#endif // AMD64
+
+public:
+ // Ints are stored in native format in one JavaCallArgument slot at *to.
+ static inline void put_int(jint from, intptr_t *to) { *(jint *)(to + 0 ) = from; }
+ static inline void put_int(jint from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = from; }
+ static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; }
+
+#ifdef AMD64
+ // Longs are stored in native format in one JavaCallArgument slot at
+ // *(to+1).
+ static inline void put_long(jlong from, intptr_t *to) {
+ *(jlong*) (to + 1) = from;
+ }
+
+ static inline void put_long(jlong from, intptr_t *to, int& pos) {
+ *(jlong*) (to + 1 + pos) = from;
+ pos += 2;
+ }
+
+ static inline void put_long(jlong *from, intptr_t *to, int& pos) {
+ *(jlong*) (to + 1 + pos) = *from;
+ pos += 2;
+ }
+#else
+ // Longs are stored in big-endian word format in two JavaCallArgument slots at *to.
+ // The high half is in *to and the low half in *(to+1).
+ static inline void put_long(jlong from, intptr_t *to) { put_int2r((jint *)&from, to); }
+ static inline void put_long(jlong from, intptr_t *to, int& pos) { put_int2r((jint *)&from, to, pos); }
+ static inline void put_long(jlong *from, intptr_t *to, int& pos) { put_int2r((jint *) from, to, pos); }
+#endif // AMD64
+
+ // Oops are stored in native format in one JavaCallArgument slot at *to.
+ static inline void put_obj(oop from, intptr_t *to) { *(oop *)(to + 0 ) = from; }
+ static inline void put_obj(oop from, intptr_t *to, int& pos) { *(oop *)(to + pos++) = from; }
+ static inline void put_obj(oop *from, intptr_t *to, int& pos) { *(oop *)(to + pos++) = *from; }
+
+ // Floats are stored in native format in one JavaCallArgument slot at *to.
+ static inline void put_float(jfloat from, intptr_t *to) { *(jfloat *)(to + 0 ) = from; }
+ static inline void put_float(jfloat from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = from; }
+ static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; }
+
+#undef _JNI_SLOT_OFFSET
+#ifdef AMD64
+#define _JNI_SLOT_OFFSET 1
+ // Doubles are stored in native word format in one JavaCallArgument
+ // slot at *(to+1).
+ static inline void put_double(jdouble from, intptr_t *to) {
+ *(jdouble*) (to + 1) = from;
+ }
+
+ static inline void put_double(jdouble from, intptr_t *to, int& pos) {
+ *(jdouble*) (to + 1 + pos) = from;
+ pos += 2;
+ }
+
+ static inline void put_double(jdouble *from, intptr_t *to, int& pos) {
+ *(jdouble*) (to + 1 + pos) = *from;
+ pos += 2;
+ }
+#else
+#define _JNI_SLOT_OFFSET 0
+ // Doubles are stored in big-endian word format in two JavaCallArgument slots at *to.
+ // The high half is in *to and the low half in *(to+1).
+ static inline void put_double(jdouble from, intptr_t *to) { put_int2r((jint *)&from, to); }
+ static inline void put_double(jdouble from, intptr_t *to, int& pos) { put_int2r((jint *)&from, to, pos); }
+ static inline void put_double(jdouble *from, intptr_t *to, int& pos) { put_int2r((jint *) from, to, pos); }
+#endif // AMD64
+
+
+ // The get_xxx routines, on the other hand, actually _do_ fetch
+ // java primitive types from the interpreter stack.
+ // No need to worry about alignment on Intel.
+ static inline jint get_int (intptr_t *from) { return *(jint *) from; }
+ static inline jlong get_long (intptr_t *from) { return *(jlong *) (from + _JNI_SLOT_OFFSET); }
+ static inline oop get_obj (intptr_t *from) { return *(oop *) from; }
+ static inline jfloat get_float (intptr_t *from) { return *(jfloat *) from; }
+ static inline jdouble get_double(intptr_t *from) { return *(jdouble *)(from + _JNI_SLOT_OFFSET); }
+#undef _JNI_SLOT_OFFSET
+};
+
+#endif // CPU_X86_VM_JNITYPES_X86_HPP
diff --git a/src/cpu/aarch64/vm/jni_aarch64.h b/src/cpu/aarch64/vm/jni_aarch64.h
new file mode 100644
index 000000000..d724c8600
--- /dev/null
+++ b/src/cpu/aarch64/vm/jni_aarch64.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef _JAVASOFT_JNI_MD_H_
+#define _JAVASOFT_JNI_MD_H_
+
+#if defined(SOLARIS) || defined(LINUX) || defined(_ALLBSD_SOURCE)
+
+#if defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2)
+ #define JNIEXPORT __attribute__((visibility("default")))
+ #define JNIIMPORT __attribute__((visibility("default")))
+#else
+ #define JNIEXPORT
+ #define JNIIMPORT
+#endif
+
+ #define JNICALL
+ typedef int jint;
+#if defined(_LP64) && !defined(__APPLE__)
+ typedef long jlong;
+#else
+ /*
+ * On _LP64 __APPLE__ "long" and "long long" are both 64 bits,
+ * but we use the "long long" typedef to avoid complaints from
+ * the __APPLE__ compiler about fprintf formats.
+ */
+ typedef long long jlong;
+#endif
+
+#else
+ #define JNIEXPORT __declspec(dllexport)
+ #define JNIIMPORT __declspec(dllimport)
+ #define JNICALL __stdcall
+
+ typedef int jint;
+ typedef __int64 jlong;
+#endif
+
+typedef signed char jbyte;
+
+#endif /* !_JAVASOFT_JNI_MD_H_ */
diff --git a/src/cpu/aarch64/vm/methodHandles_aarch64.cpp b/src/cpu/aarch64/vm/methodHandles_aarch64.cpp
new file mode 100644
index 000000000..d65cf4222
--- /dev/null
+++ b/src/cpu/aarch64/vm/methodHandles_aarch64.cpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "memory/allocation.inline.hpp"
+#include "prims/methodHandles.hpp"
+
+#define __ _masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
+// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
+static RegisterOrConstant constant(int value) {
+ return RegisterOrConstant(value);
+}
+
+address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
+ address interpreted_entry) { Unimplemented(); return 0; }
+
+MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
+ address start_addr) { Unimplemented(); return 0; }
+
+// stack walking support
+
+frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) { Unimplemented(); return fr; }
+
+void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) { Unimplemented(); }
+
+oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) { Unimplemented(); return 0; }
+
+void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
+ // output params:
+ int* bounce_offset,
+ int* exception_offset,
+ int* frame_size_in_words) { Unimplemented(); }
+
+void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm,
+ Register rcx_recv,
+ Register rax_argv,
+ address return_handler,
+ Register rbx_temp) { Unimplemented(); }
+
+void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
+ Register rcx_recv,
+ Register new_sp_reg,
+ Register sender_pc_reg){ Unimplemented(); }
+
+// Emit code to verify that RBP is pointing at a valid ricochet frame.
+#ifndef PRODUCT
+enum {
+ ARG_LIMIT = 255, SLOP = 4,
+ // use this parameter for checking for garbage stack movements:
+ UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
+ // the slop defends against false alarms due to fencepost errors
+};
+#endif
+
+#ifdef ASSERT
+void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) { Unimplemented(); }
+#endif //ASSERT
+
+void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) { Unimplemented(); }
+
+void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr) { Unimplemented(); }
+
+void MethodHandles::load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr) { Unimplemented(); }
+
+void MethodHandles::load_stack_move(MacroAssembler* _masm,
+ Register rdi_stack_move,
+ Register rcx_amh,
+ bool might_be_negative) { Unimplemented(); }
+
+#ifdef ASSERT
+void MethodHandles::RicochetFrame::verify_offsets() { Unimplemented(); }
+
+void MethodHandles::RicochetFrame::verify() const { Unimplemented(); }
+#endif //PRODUCT
+
+#ifdef ASSERT
+void MethodHandles::verify_argslot(MacroAssembler* _masm,
+ Register argslot_reg,
+ const char* error_message) { Unimplemented(); }
+
+void MethodHandles::verify_argslots(MacroAssembler* _masm,
+ RegisterOrConstant arg_slots,
+ Register arg_slot_base_reg,
+ bool negate_argslots,
+ const char* error_message) { Unimplemented(); }
+
+// Make sure that arg_slots has the same sign as the given direction.
+// If (and only if) arg_slots is a assembly-time constant, also allow it to be zero.
+void MethodHandles::verify_stack_move(MacroAssembler* _masm,
+ RegisterOrConstant arg_slots, int direction) { Unimplemented(); }
+
+void MethodHandles::verify_klass(MacroAssembler* _masm,
+ Register obj, KlassHandle klass,
+ const char* error_message) { Unimplemented(); }
+#endif //ASSERT
+
+void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp) { Unimplemented(); }
+
+// Code generation
+address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { Unimplemented(); }
+
+// Helper to insert argument slots into the stack.
+// arg_slots must be a multiple of stack_move_unit() and < 0
+// rax_argslot is decremented to point to the new (shifted) location of the argslot
+// But, rdx_temp ends up holding the original value of rax_argslot.
+void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
+ RegisterOrConstant arg_slots,
+ Register rax_argslot,
+ Register rbx_temp, Register rdx_temp) { Unimplemented(); }
+
+// Helper to remove argument slots from the stack.
+// arg_slots must be a multiple of stack_move_unit() and > 0
+void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
+ RegisterOrConstant arg_slots,
+ Register rax_argslot,
+ Register rbx_temp, Register rdx_temp) { Unimplemented(); }
+
+// Helper to copy argument slots to the top of the stack.
+// The sequence starts with rax_argslot and is counted by slot_count
+// slot_count must be a multiple of stack_move_unit() and >= 0
+// This function blows the temps but does not change rax_argslot.
+void MethodHandles::push_arg_slots(MacroAssembler* _masm,
+ Register rax_argslot,
+ RegisterOrConstant slot_count,
+ int skip_words_count,
+ Register rbx_temp, Register rdx_temp) { Unimplemented(); }
+
+// in-place movement; no change to rsp
+// blows rax_temp, rdx_temp
+void MethodHandles::move_arg_slots_up(MacroAssembler* _masm,
+ Register rbx_bottom, // invariant
+ Address top_addr, // can use rax_temp
+ RegisterOrConstant positive_distance_in_slots,
+ Register rax_temp, Register rdx_temp) { Unimplemented(); }
+
+// in-place movement; no change to rsp
+// blows rax_temp, rdx_temp
+void MethodHandles::move_arg_slots_down(MacroAssembler* _masm,
+ Address bottom_addr, // can use rax_temp
+ Register rbx_top, // invariant
+ RegisterOrConstant negative_distance_in_slots,
+ Register rax_temp, Register rdx_temp) { Unimplemented(); }
+
+// Copy from a field or array element to a stacked argument slot.
+// is_element (ignored) says whether caller is loading an array element instead of an instance field.
+void MethodHandles::move_typed_arg(MacroAssembler* _masm,
+ BasicType type, bool is_element,
+ Address slot_dest, Address value_src,
+ Register rbx_temp, Register rdx_temp) { Unimplemented(); }
+
+void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type,
+ Address return_slot) { Unimplemented(); }
+
+#ifndef PRODUCT
+#define DESCRIBE_RICOCHET_OFFSET(rf, name) \
+ values.describe(frame_no, (intptr_t *) (((uintptr_t)rf) + MethodHandles::RicochetFrame::name##_offset_in_bytes()), #name)
+
+void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no) { Unimplemented(); }
+#endif // ASSERT
+
+#ifndef PRODUCT
+extern "C" void print_method_handle(oop mh);
+void trace_method_handle_stub(const char* adaptername,
+ oop mh,
+ intptr_t* saved_regs,
+ intptr_t* entry_sp) { Unimplemented(); }
+
+// The stub wraps the arguments in a struct on the stack to avoid
+// dealing with the different calling conventions for passing 6
+// arguments.
+struct MethodHandleStubArguments {
+ const char* adaptername;
+ oopDesc* mh;
+ intptr_t* saved_regs;
+ intptr_t* entry_sp;
+};
+void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) { Unimplemented(); }
+
+void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { Unimplemented(); }
+#endif //PRODUCT
+
+// which conversion op types are implemented here?
+int MethodHandles::adapter_conversion_ops_supported_mask() { Unimplemented(); }
+
+//------------------------------------------------------------------------------
+// MethodHandles::generate_method_handle_stub
+//
+// Generate an "entry" field for a method handle.
+// This determines how the method handle will respond to calls.
+void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) { Unimplemented(); }
diff --git a/src/cpu/aarch64/vm/methodHandles_aarch64.hpp b/src/cpu/aarch64/vm/methodHandles_aarch64.hpp
new file mode 100644
index 000000000..28578d482
--- /dev/null
+++ b/src/cpu/aarch64/vm/methodHandles_aarch64.hpp
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// Platform-specific definitions for method handles.
+// These definitions are inlined into class MethodHandles.
+
+// Adapters
+enum /* platform_dependent_constants */ {
+ adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 15000)) LP64_ONLY(32000 DEBUG_ONLY(+ 120000))
+};
+
+public:
+
+// The stack just after the recursive call from a ricochet frame
+// looks something like this. Offsets are marked in words, not bytes.
+// rsi (r13 on LP64) is part of the interpreter calling sequence
+// which tells the callee where is my real rsp (for frame walking).
+// (...lower memory addresses)
+// rsp: [ return pc ] always the global RicochetBlob::bounce_addr
+// rsp+1: [ recursive arg N ]
+// rsp+2: [ recursive arg N-1 ]
+// ...
+// rsp+N: [ recursive arg 1 ]
+// rsp+N+1: [ recursive method handle ]
+// ...
+// rbp-6: [ cleanup continuation pc ] <-- (struct RicochetFrame)
+// rbp-5: [ saved target MH ] the MH we will call on the saved args
+// rbp-4: [ saved args layout oop ] an int[] array which describes argument layout
+// rbp-3: [ saved args pointer ] address of transformed adapter arg M (slot 0)
+// rbp-2: [ conversion ] information about how the return value is used
+// rbp-1: [ exact sender sp ] exact TOS (rsi/r13) of original sender frame
+// rbp+0: [ saved sender fp ] (for original sender of AMH)
+// rbp+1: [ saved sender pc ] (back to original sender of AMH)
+// rbp+2: [ transformed adapter arg M ] <-- (extended TOS of original sender)
+// rbp+3: [ transformed adapter arg M-1]
+// ...
+// rbp+M+1: [ transformed adapter arg 1 ]
+// rbp+M+2: [ padding ] <-- (rbp + saved args base offset)
+// ... [ optional padding]
+// (higher memory addresses...)
+//
+// The arguments originally passed by the original sender
+// are lost, and arbitrary amounts of stack motion might have
+// happened due to argument transformation.
+// (This is done by C2I/I2C adapters and non-direct method handles.)
+// This is why there is an unpredictable amount of memory between
+// the extended and exact TOS of the sender.
+// The ricochet adapter itself will also (in general) perform
+// transformations before the recursive call.
+//
+// The transformed and saved arguments, immediately above the saved
+// return PC, are a well-formed method handle invocation ready to execute.
+// When the GC needs to walk the stack, these arguments are described
+// via the saved arg types oop, an int[] array with a private format.
+// This array is derived from the type of the transformed adapter
+// method handle, which also sits at the base of the saved argument
+// bundle. Since the GC may not be able to fish out the int[]
+// array, so it is pushed explicitly on the stack. This may be
+// an unnecessary expense.
+//
+// The following register conventions are significant at this point:
+// rsp the thread stack, as always; preserved by caller
+// rsi/r13 exact TOS of recursive frame (contents of [rbp-2])
+// rcx recursive method handle (contents of [rsp+N+1])
+// rbp preserved by caller (not used by caller)
+// Unless otherwise specified, all registers can be blown by the call.
+//
+// If this frame must be walked, the transformed adapter arguments
+// will be found with the help of the saved arguments descriptor.
+//
+// Therefore, the descriptor must match the referenced arguments.
+// The arguments must be followed by at least one word of padding,
+// which will be necessary to complete the final method handle call.
+// That word is not treated as holding an oop. Neither is the word
+//
+// The word pointed to by the return argument pointer is not
+// treated as an oop, even if points to a saved argument.
+// This allows the saved argument list to have a "hole" in it
+// to receive an oop from the recursive call.
+// (The hole might temporarily contain RETURN_VALUE_PLACEHOLDER.)
+//
+// When the recursive callee returns, RicochetBlob::bounce_addr will
+// immediately jump to the continuation stored in the RF.
+// This continuation will merge the recursive return value
+// into the saved argument list. At that point, the original
+// rsi, rbp, and rsp will be reloaded, the ricochet frame will
+// disappear, and the final target of the adapter method handle
+// will be invoked on the transformed argument list.
+
+class RicochetFrame {
+ friend class MethodHandles;
+ friend class VMStructs;
+
+ private:
+ intptr_t* _continuation; // what to do when control gets back here
+ oopDesc* _saved_target; // target method handle to invoke on saved_args
+ oopDesc* _saved_args_layout; // caching point for MethodTypeForm.vmlayout cookie
+ intptr_t* _saved_args_base; // base of pushed arguments (slot 0, arg N) (-3)
+ intptr_t _conversion; // misc. information from original AdapterMethodHandle (-2)
+ intptr_t* _exact_sender_sp; // parallel to interpreter_frame_sender_sp (-1)
+ intptr_t* _sender_link; // *must* coincide with frame::link_offset (0)
+ address _sender_pc; // *must* coincide with frame::return_addr_offset (1)
+
+ public:
+ intptr_t* continuation() const { return _continuation; }
+ oop saved_target() const { return _saved_target; }
+ oop saved_args_layout() const { return _saved_args_layout; }
+ intptr_t* saved_args_base() const { return _saved_args_base; }
+ intptr_t conversion() const { return _conversion; }
+ intptr_t* exact_sender_sp() const { return _exact_sender_sp; }
+ intptr_t* sender_link() const { return _sender_link; }
+ address sender_pc() const { return _sender_pc; }
+
+ intptr_t* extended_sender_sp() const {
+ // The extended sender SP is above the current RicochetFrame.
+ return (intptr_t*) (((address) this) + sizeof(RicochetFrame));
+ }
+
+ intptr_t return_value_slot_number() const {
+ return adapter_conversion_vminfo(conversion());
+ }
+ BasicType return_value_type() const {
+ return adapter_conversion_dest_type(conversion());
+ }
+ bool has_return_value_slot() const {
+ return return_value_type() != T_VOID;
+ }
+ intptr_t* return_value_slot_addr() const {
+ assert(has_return_value_slot(), "");
+ return saved_arg_slot_addr(return_value_slot_number());
+ }
+ intptr_t* saved_target_slot_addr() const {
+ return saved_arg_slot_addr(saved_args_length());
+ }
+ intptr_t* saved_arg_slot_addr(int slot) const {
+ assert(slot >= 0, "");
+ return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) );
+ }
+
+ jint saved_args_length() const;
+ jint saved_arg_offset(int arg) const;
+
+ // GC interface
+ oop* saved_target_addr() { return (oop*)&_saved_target; }
+ oop* saved_args_layout_addr() { return (oop*)&_saved_args_layout; }
+
+ oop compute_saved_args_layout(bool read_cache, bool write_cache);
+
+ // Compiler/assembler interface.
+ static int continuation_offset_in_bytes() { return offset_of(RicochetFrame, _continuation); }
+ static int saved_target_offset_in_bytes() { return offset_of(RicochetFrame, _saved_target); }
+ static int saved_args_layout_offset_in_bytes(){ return offset_of(RicochetFrame, _saved_args_layout); }
+ static int saved_args_base_offset_in_bytes() { return offset_of(RicochetFrame, _saved_args_base); }
+ static int conversion_offset_in_bytes() { return offset_of(RicochetFrame, _conversion); }
+ static int exact_sender_sp_offset_in_bytes() { return offset_of(RicochetFrame, _exact_sender_sp); }
+ static int sender_link_offset_in_bytes() { return offset_of(RicochetFrame, _sender_link); }
+ static int sender_pc_offset_in_bytes() { return offset_of(RicochetFrame, _sender_pc); }
+
+ // This value is not used for much, but it apparently must be nonzero.
+ static int frame_size_in_bytes() { return sender_link_offset_in_bytes(); }
+
+#ifdef ASSERT
+ // The magic number is supposed to help find ricochet frames within the bytes of stack dumps.
+ enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E };
+ static int magic_number_1_offset_in_bytes() { return -wordSize; }
+ static int magic_number_2_offset_in_bytes() { return sizeof(RicochetFrame); }
+ intptr_t magic_number_1() const { return *(intptr_t*)((address)this + magic_number_1_offset_in_bytes()); };
+ intptr_t magic_number_2() const { return *(intptr_t*)((address)this + magic_number_2_offset_in_bytes()); };
+#endif //ASSERT
+
+ enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) };
+
+ static void verify_offsets() NOT_DEBUG_RETURN;
+ void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc.
+ void zap_arguments() NOT_DEBUG_RETURN;
+
+ static void generate_ricochet_blob(MacroAssembler* _masm,
+ // output params:
+ int* bounce_offset,
+ int* exception_offset,
+ int* frame_size_in_words);
+
+ static void enter_ricochet_frame(MacroAssembler* _masm,
+ Register rcx_recv,
+ Register rax_argv,
+ address return_handler,
+ Register rbx_temp);
+ static void leave_ricochet_frame(MacroAssembler* _masm,
+ Register rcx_recv,
+ Register new_sp_reg,
+ Register sender_pc_reg);
+
+ static Address frame_address(int offset = 0);
+
+ static RicochetFrame* from_frame(const frame& fr);
+
+ static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
+
+ static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN;
+};
+
+// Additional helper methods for MethodHandles code generation:
+public:
+ static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg);
+ static void load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr);
+ static void load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr);
+
+ static void load_stack_move(MacroAssembler* _masm,
+ Register rdi_stack_move,
+ Register rcx_amh,
+ bool might_be_negative);
+
+ static void insert_arg_slots(MacroAssembler* _masm,
+ RegisterOrConstant arg_slots,
+ Register rax_argslot,
+ Register rbx_temp, Register rdx_temp);
+
+ static void remove_arg_slots(MacroAssembler* _masm,
+ RegisterOrConstant arg_slots,
+ Register rax_argslot,
+ Register rbx_temp, Register rdx_temp);
+
+ static void push_arg_slots(MacroAssembler* _masm,
+ Register rax_argslot,
+ RegisterOrConstant slot_count,
+ int skip_words_count,
+ Register rbx_temp, Register rdx_temp);
+
+ static void move_arg_slots_up(MacroAssembler* _masm,
+ Register rbx_bottom, // invariant
+ Address top_addr, // can use rax_temp
+ RegisterOrConstant positive_distance_in_slots,
+ Register rax_temp, Register rdx_temp);
+
+ static void move_arg_slots_down(MacroAssembler* _masm,
+ Address bottom_addr, // can use rax_temp
+ Register rbx_top, // invariant
+ RegisterOrConstant negative_distance_in_slots,
+ Register rax_temp, Register rdx_temp);
+
+ static void move_typed_arg(MacroAssembler* _masm,
+ BasicType type, bool is_element,
+ Address slot_dest, Address value_src,
+ Register rbx_temp, Register rdx_temp);
+
+ static void move_return_value(MacroAssembler* _masm, BasicType type,
+ Address return_slot);
+
+ static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
+ const char* error_message) NOT_DEBUG_RETURN;
+
+ static void verify_argslots(MacroAssembler* _masm,
+ RegisterOrConstant argslot_count,
+ Register argslot_reg,
+ bool negate_argslot,
+ const char* error_message) NOT_DEBUG_RETURN;
+
+ static void verify_stack_move(MacroAssembler* _masm,
+ RegisterOrConstant arg_slots,
+ int direction) NOT_DEBUG_RETURN;
+
+ static void verify_klass(MacroAssembler* _masm,
+ Register obj, KlassHandle klass,
+ const char* error_message = "wrong klass") NOT_DEBUG_RETURN;
+
+ static void verify_method_handle(MacroAssembler* _masm, Register mh_reg) {
+ verify_klass(_masm, mh_reg, SystemDictionaryHandles::MethodHandle_klass(),
+ "reference is a MH");
+ }
+
+ // Similar to InterpreterMacroAssembler::jump_from_interpreted.
+ // Takes care of special dispatch from single stepping too.
+ static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp);
+
+ static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
+
diff --git a/src/cpu/aarch64/vm/nativeInst_aarch64.cpp b/src/cpu/aarch64/vm/nativeInst_aarch64.cpp
new file mode 100644
index 000000000..4cd610547
--- /dev/null
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "assembler_aarch64.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "nativeInst_aarch64.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/handles.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "utilities/ostream.hpp"
+#ifdef COMPILER1
+#include "c1/c1_Runtime1.hpp"
+#endif
+
+void NativeInstruction::wrote(int offset) { Unimplemented(); }
+
+
+void NativeCall::verify() { Unimplemented(); }
+
+address NativeCall::destination() const { Unimplemented(); return 0; }
+
+void NativeCall::print() { Unimplemented(); }
+
+// Inserts a native call instruction at a given pc
+void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
+
+// MT-safe patching of a call instruction.
+// First patches first word of instruction to two jmp's that jmps to them
+// selfs (spinlock). Then patches the last byte, and then atomicly replaces
+// the jmp's with the first 4 byte of the new instruction.
+void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { Unimplemented(); }
+
+
+// Similar to replace_mt_safe, but just changes the destination. The
+// important thing is that free-running threads are able to execute this
+// call instruction at all times. If the displacement field is aligned
+// we can simply rely on atomicity of 32-bit writes to make sure other threads
+// will see no intermediate states. Otherwise, the first two bytes of the
+// call are guaranteed to be aligned, and can be atomically patched to a
+// self-loop to guard the instruction while we change the other bytes.
+
+// We cannot rely on locks here, since the free-running threads must run at
+// full speed.
+//
+// Used in the runtime linkage of calls; see class CompiledIC.
+// (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
+void NativeCall::set_destination_mt_safe(address dest) { Unimplemented(); }
+
+
+void NativeMovConstReg::verify() { Unimplemented(); }
+
+
+void NativeMovConstReg::print() { Unimplemented(); }
+
+//-------------------------------------------------------------------
+
+int NativeMovRegMem::instruction_start() const { Unimplemented(); return 0; }
+
+address NativeMovRegMem::instruction_address() const { Unimplemented(); return 0; }
+
+address NativeMovRegMem::next_instruction_address() const { Unimplemented(); return 0; }
+
+int NativeMovRegMem::offset() const { Unimplemented(); return 0; }
+
+void NativeMovRegMem::set_offset(int x) { Unimplemented(); }
+
+void NativeMovRegMem::verify() { Unimplemented(); }
+
+
+void NativeMovRegMem::print() { Unimplemented(); }
+
+//-------------------------------------------------------------------
+
+void NativeLoadAddress::verify() { Unimplemented(); }
+
+
+void NativeLoadAddress::print() { Unimplemented(); }
+
+//--------------------------------------------------------------------------------
+
+void NativeJump::verify() { Unimplemented(); }
+
+
+void NativeJump::insert(address code_pos, address entry) { Unimplemented(); }
+
+void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { Unimplemented(); }
+
+
+// MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
+// The problem: jmp <dest> is a 5-byte instruction. Atomical write can be only with 4 bytes.
+// First patches the first word atomically to be a jump to itself.
+// Then patches the last byte and then atomically patches the first word (4-bytes),
+// thus inserting the desired jump
+// This code is mt-safe with the following conditions: entry point is 4 byte aligned,
+// entry point is in same cache line as unverified entry point, and the instruction being
+// patched is >= 5 byte (size of patch).
+//
+// In C2 the 5+ byte sized instruction is enforced by code in MachPrologNode::emit.
+// In C1 the restriction is enforced by CodeEmitter::method_entry
+//
+void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { Unimplemented(); }
+
+void NativePopReg::insert(address code_pos, Register reg) { Unimplemented(); }
+
+
+void NativeIllegalInstruction::insert(address code_pos) { Unimplemented(); }
+
+void NativeGeneralJump::verify() { Unimplemented(); }
+
+
+void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { Unimplemented(); }
+
+
+// MT-safe patching of a long jump instruction.
+// First patches first word of instruction to two jmp's that jmps to them
+// selfs (spinlock). Then patches the last byte, and then atomicly replaces
+// the jmp's with the first 4 byte of the new instruction.
+void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { Unimplemented(); }
+
+
+
+address NativeGeneralJump::jump_destination() const { Unimplemented(); return 0; }
+
+bool NativeInstruction::is_dtrace_trap() { Unimplemented(); return false; }
diff --git a/src/cpu/aarch64/vm/nativeInst_aarch64.hpp b/src/cpu/aarch64/vm/nativeInst_aarch64.hpp
new file mode 100644
index 000000000..b1af11d0d
--- /dev/null
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.hpp
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_NATIVEINST_X86_HPP
+#define CPU_X86_VM_NATIVEINST_X86_HPP
+
+#include "asm/assembler.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/icache.hpp"
+#include "runtime/os.hpp"
+#include "utilities/top.hpp"
+
+// We have interfaces for the following instructions:
+// - NativeInstruction
+// - - NativeCall
+// - - NativeMovConstReg
+// - - NativeMovConstRegPatching
+// - - NativeMovRegMem
+// - - NativeMovRegMemPatching
+// - - NativeJump
+// - - NativeIllegalOpCode
+// - - NativeGeneralJump
+// - - NativeReturn
+// - - NativeReturnX (return with argument)
+// - - NativePushConst
+// - - NativeTstRegMem
+
+// The base class for different kinds of native instruction abstractions.
+// Provides the primitive operations to manipulate code relative to this.
+
+class NativeInstruction VALUE_OBJ_CLASS_SPEC {
+ friend class Relocation;
+ public:
+ enum { instruction_size = BytesPerWord };
+ bool is_nop();
+ bool is_dtrace_trap();
+ inline bool is_call();
+ inline bool is_illegal();
+ inline bool is_return();
+ inline bool is_jump();
+ inline bool is_cond_jump();
+ inline bool is_safepoint_poll();
+ inline bool is_mov_literal64();
+
+ protected:
+ address addr_at(int offset) const { return address(this) + offset; }
+
+ s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); }
+ u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); }
+
+ jint int_at(int offset) const { return *(jint*) addr_at(offset); }
+
+ intptr_t ptr_at(int offset) const { return *(intptr_t*) addr_at(offset); }
+
+ oop oop_at (int offset) const { return *(oop*) addr_at(offset); }
+
+
+ void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; wrote(offset); }
+ void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; wrote(offset); }
+ void set_ptr_at (int offset, intptr_t ptr) { *(intptr_t*) addr_at(offset) = ptr; wrote(offset); }
+ void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; wrote(offset); }
+
+ // This doesn't really do anything on Intel, but it is the place where
+ // cache invalidation belongs, generically:
+ void wrote(int offset);
+
+ public:
+
+ // unit test stuff
+ static void test() {} // override for testing
+
+ inline friend NativeInstruction* nativeInstruction_at(address address);
+};
+
+inline NativeInstruction* nativeInstruction_at(address address) {
+ NativeInstruction* inst = (NativeInstruction*)address;
+#ifdef ASSERT
+ //inst->verify();
+#endif
+ return inst;
+}
+
+inline NativeCall* nativeCall_at(address address);
+// The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off
+// instructions (used to manipulate inline caches, primitive & dll calls, etc.).
+
+class NativeCall: public NativeInstruction {
+ public:
+ enum { cache_line_size = BytesPerWord }; // conservative estimate!
+ address instruction_address() const { Unimplemented(); return 0; }
+ address next_instruction_address() const { Unimplemented(); return 0; }
+ int displacement() const { Unimplemented(); return 0; }
+ address displacement_address() const { Unimplemented(); return 0; }
+ address return_address() const { Unimplemented(); return 0; }
+ address destination() const;
+ void set_destination(address dest) { Unimplemented(); }
+ void set_destination_mt_safe(address dest);
+
+ void verify_alignment() { Unimplemented(); }
+ void verify();
+ void print();
+
+ // Creation
+ inline friend NativeCall* nativeCall_at(address address);
+ inline friend NativeCall* nativeCall_before(address return_address);
+
+ static bool is_call_at(address instr) { Unimplemented(); return false; }
+
+ static bool is_call_before(address return_address) { Unimplemented(); return false; }
+
+ static bool is_call_to(address instr, address target) { Unimplemented(); return false; }
+
+ // MT-safe patching of a call instruction.
+ static void insert(address code_pos, address entry);
+
+ static void replace_mt_safe(address instr_addr, address code_buffer);
+};
+
+inline NativeCall* nativeCall_at(address address) { Unimplemented(); return 0; }
+
+inline NativeCall* nativeCall_before(address return_address) { Unimplemented(); return 0; }
+
+// An interface for accessing/manipulating native mov reg, imm32 instructions.
+// (used to manipulate inlined 32bit data dll calls, etc.)
+class NativeMovConstReg: public NativeInstruction {
+ public:
+ address instruction_address() const { Unimplemented(); return 0; }
+ address next_instruction_address() const { Unimplemented(); return 0; }
+ intptr_t data() const { Unimplemented(); return 0; }
+ void set_data(intptr_t x) { Unimplemented(); };
+
+ void verify();
+ void print();
+
+ // unit test stuff
+ static void test() {}
+
+ // Creation
+ inline friend NativeMovConstReg* nativeMovConstReg_at(address address);
+ inline friend NativeMovConstReg* nativeMovConstReg_before(address address);
+};
+inline NativeMovConstReg* nativeMovConstReg_at(address address) { Unimplemented(); return 0; }
+
+class NativeMovConstRegPatching: public NativeMovConstReg {
+ private:
+ friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { Unimplemented(); return 0; }
+};
+
+// An interface for accessing/manipulating native moves of the form:
+// mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem)
+// mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg
+// mov[s/z]x[w/b/q] [reg + offset], reg
+// fld_s [reg+offset]
+// fld_d [reg+offset]
+// fstp_s [reg + offset]
+// fstp_d [reg + offset]
+// mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch)
+//
+// Warning: These routines must be able to handle any instruction sequences
+// that are generated as a result of the load/store byte,word,long
+// macros. For example: The load_unsigned_byte instruction generates
+// an xor reg,reg inst prior to generating the movb instruction. This
+// class must skip the xor instruction.
+
+class NativeMovRegMem: public NativeInstruction {
+ public:
+ // helper
+ int instruction_start() const;
+
+ address instruction_address() const;
+
+ address next_instruction_address() const;
+
+ int offset() const;
+
+ void set_offset(int x);
+
+ void add_offset_in_bytes(int add_offset) { Unimplemented(); }
+
+ void verify();
+ void print ();
+
+ // unit test stuff
+ static void test() {}
+
+ private:
+ inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
+};
+
+inline NativeMovRegMem* nativeMovRegMem_at (address address) { Unimplemented(); return 0; }
+
+class NativeMovRegMemPatching: public NativeMovRegMem {
+ private:
+ friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {Unimplemented(); return 0; }
+};
+
+// An interface for accessing/manipulating native leal instruction of form:
+// leal reg, [reg + offset]
+
+class NativeLoadAddress: public NativeMovRegMem {
+ static const bool has_rex = true;
+ static const int rex_size = 1;
+ public:
+
+ void verify();
+ void print ();
+
+ // unit test stuff
+ static void test() {}
+
+ private:
+ friend NativeLoadAddress* nativeLoadAddress_at (address address) { Unimplemented(); return 0; }
+};
+
+// jump rel32off
+
+class NativeJump: public NativeInstruction {
+ public:
+
+ address instruction_address() const { Unimplemented(); return 0; }
+ address next_instruction_address() const { Unimplemented(); return 0; }
+ address jump_destination() const { Unimplemented(); return 0; }
+
+ void set_jump_destination(address dest) { Unimplemented(); }
+
+ // Creation
+ inline friend NativeJump* nativeJump_at(address address);
+
+ void verify();
+
+ // Unit testing stuff
+ static void test() {}
+
+ // Insertion of native jump instruction
+ static void insert(address code_pos, address entry);
+ // MT-safe insertion of native jump at verified method entry
+ static void check_verified_entry_alignment(address entry, address verified_entry);
+ static void patch_verified_entry(address entry, address verified_entry, address dest);
+};
+
+inline NativeJump* nativeJump_at(address address) { Unimplemented(); return 0; };
+
+// Handles all kinds of jump on Intel. Long/far, conditional/unconditional
+class NativeGeneralJump: public NativeInstruction {
+ public:
+ address instruction_address() const { Unimplemented(); return 0; }
+ address jump_destination() const;
+
+ // Creation
+ inline friend NativeGeneralJump* nativeGeneralJump_at(address address);
+
+ // Insertion of native general jump instruction
+ static void insert_unconditional(address code_pos, address entry);
+ static void replace_mt_safe(address instr_addr, address code_buffer);
+
+ void verify();
+};
+
+inline NativeGeneralJump* nativeGeneralJump_at(address address) { Unimplemented(); return 0; }
+
+class NativePopReg : public NativeInstruction {
+ public:
+ // Insert a pop instruction
+ static void insert(address code_pos, Register reg);
+};
+
+
+class NativeIllegalInstruction: public NativeInstruction {
+ public:
+ // Insert illegal opcode as specific address
+ static void insert(address code_pos);
+};
+
+// return instruction that does not pop values of the stack
+class NativeReturn: public NativeInstruction {
+ public:
+};
+
+// return instruction that does pop values of the stack
+class NativeReturnX: public NativeInstruction {
+ public:
+};
+
+// Simple test vs memory
+class NativeTstRegMem: public NativeInstruction {
+ public:
+};
+
+inline bool NativeInstruction::is_illegal() { Unimplemented(); return false; }
+inline bool NativeInstruction::is_call() { Unimplemented(); return false; }
+inline bool NativeInstruction::is_return() { Unimplemented(); return false; }
+inline bool NativeInstruction::is_jump() { Unimplemented(); return false; }
+inline bool NativeInstruction::is_cond_jump() { Unimplemented(); return false; }
+inline bool NativeInstruction::is_safepoint_poll() { Unimplemented(); return false; }
+
+inline bool NativeInstruction::is_mov_literal64() { Unimplemented(); return false; }
+
+#endif // CPU_X86_VM_NATIVEINST_X86_HPP
diff --git a/src/cpu/aarch64/vm/registerMap_aarch64.hpp b/src/cpu/aarch64/vm/registerMap_aarch64.hpp
new file mode 100644
index 000000000..5d91b1ba7
--- /dev/null
+++ b/src/cpu/aarch64/vm/registerMap_aarch64.hpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_REGISTERMAP_X86_HPP
+#define CPU_X86_VM_REGISTERMAP_X86_HPP
+
+// machine-dependent implemention for register maps
+ friend class frame;
+
+ private:
+ // This is the hook for finding a register in an "well-known" location,
+ // such as a register block of a predetermined format.
+ // Since there is none, we just return NULL.
+ // See registerMap_sparc.hpp for an example of grabbing registers
+ // from register save areas of a standard layout.
+ address pd_location(VMReg reg) const {return NULL;}
+
+ // no PD state to clear or copy:
+ void pd_clear() {}
+ void pd_initialize() {}
+ void pd_initialize_from(const RegisterMap* map) {}
+
+#endif // CPU_X86_VM_REGISTERMAP_X86_HPP
diff --git a/src/cpu/aarch64/vm/register_aarch64.cpp b/src/cpu/aarch64/vm/register_aarch64.cpp
new file mode 100644
index 000000000..579ec35f2
--- /dev/null
+++ b/src/cpu/aarch64/vm/register_aarch64.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "register_aarch64.hpp"
+
+const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers << 1;
+
+
+const int ConcreteRegisterImpl::max_fpr = ConcreteRegisterImpl::max_gpr +
+ 2 * FloatRegisterImpl::number_of_registers;
+const char* RegisterImpl::name() const {
+ const char* names[number_of_registers] = {
+ "r0"
+ };
+ return is_valid() ? names[encoding()] : "noreg";
+}
+
+const char* FloatRegisterImpl::name() const {
+ const char* names[number_of_registers] = {
+ "v0"
+ };
+ return is_valid() ? names[encoding()] : "noreg";
+}
diff --git a/src/cpu/aarch64/vm/register_aarch64.hpp b/src/cpu/aarch64/vm/register_aarch64.hpp
index bf9dd61c2..c44d747c1 100644
--- a/src/cpu/aarch64/vm/register_aarch64.hpp
+++ b/src/cpu/aarch64/vm/register_aarch64.hpp
@@ -222,6 +222,9 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl {
1 // flags
};
+ // added to make it compile
+ static const int max_gpr;
+ static const int max_fpr;
};
#endif // CPU_AARCH64_VM_REGISTER_AARCH64_HPP
diff --git a/src/cpu/aarch64/vm/register_definitions_aarch64.cpp b/src/cpu/aarch64/vm/register_definitions_aarch64.cpp
new file mode 100644
index 000000000..3043551ab
--- /dev/null
+++ b/src/cpu/aarch64/vm/register_definitions_aarch64.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "asm/register.hpp"
+#include "register_aarch64.hpp"
+#ifdef TARGET_ARCH_MODEL_x86_32
+# include "interp_masm_x86_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_x86_64
+# include "interp_masm_x86_64.hpp"
+#endif
+
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "interp_masm_aarch64.hpp"
+#endif
+
diff --git a/src/cpu/aarch64/vm/relocInfo_aarch64.cpp b/src/cpu/aarch64/vm/relocInfo_aarch64.cpp
new file mode 100644
index 000000000..357603b83
--- /dev/null
+++ b/src/cpu/aarch64/vm/relocInfo_aarch64.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.inline.hpp"
+#include "assembler_aarch64.inline.hpp"
+#include "code/relocInfo.hpp"
+#include "nativeInst_aarch64.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/safepoint.hpp"
+
+
+void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { Unimplemented(); }
+
+
+address Relocation::pd_call_destination(address orig_addr) { Unimplemented(); return 0; }
+
+
+void Relocation::pd_set_call_destination(address x) { Unimplemented(); }
+
+
+address* Relocation::pd_address_in_code() { Unimplemented(); return 0; }
+
+
+address Relocation::pd_get_address_from_code() { Unimplemented(); return 0; }
+
+int Relocation::pd_breakpoint_size() { Unimplemented(); return 0; }
+
+void Relocation::pd_swap_in_breakpoint(address x, short* instrs, int instrlen) { Unimplemented(); }
+
+
+void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen) { Unimplemented(); }
+
+void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { Unimplemented(); }
+
+void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { Unimplemented(); }
diff --git a/src/cpu/aarch64/vm/relocInfo_aarch64.hpp b/src/cpu/aarch64/vm/relocInfo_aarch64.hpp
new file mode 100644
index 000000000..32855b343
--- /dev/null
+++ b/src/cpu/aarch64/vm/relocInfo_aarch64.hpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_RELOCINFO_X86_HPP
+#define CPU_X86_VM_RELOCINFO_X86_HPP
+
+ // machine-dependent parts of class relocInfo
+ private:
+ enum {
+ // Intel instructions are byte-aligned.
+ offset_unit = 1,
+
+ // Encodes Assembler::disp32_operand vs. Assembler::imm32_operand.
+#ifndef AMD64
+ format_width = 1
+#else
+ // vs Assembler::narrow_oop_operand.
+ format_width = 2
+#endif
+ };
+
+#endif // CPU_X86_VM_RELOCINFO_X86_HPP
diff --git a/src/cpu/aarch64/vm/runtime_aarch64.cpp b/src/cpu/aarch64/vm/runtime_aarch64.cpp
new file mode 100644
index 000000000..0f0b97273
--- /dev/null
+++ b/src/cpu/aarch64/vm/runtime_aarch64.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#ifdef COMPILER2
+#include "asm/assembler.hpp"
+#include "assembler_aarch64.inline.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "code/vmreg.hpp"
+#include "interpreter/interpreter.hpp"
+#include "nativeInst_aarch64.hpp"
+#include "opto/runtime.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "vmreg_aarch64.inline.hpp"
+#endif
+
+
+// This file should really contain the code for generating the OptoRuntime
+// exception_blob. However that code uses SimpleRuntimeFrame which only
+// exists in sharedRuntime_x86_64.cpp. When there is a sharedRuntime_<arch>.hpp
+// file and SimpleRuntimeFrame is able to move there then the exception_blob
+// code will move here where it belongs.
diff --git a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
new file mode 100644
index 000000000..ccc642c6d
--- /dev/null
+++ b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "assembler_aarch64.inline.hpp"
+#include "code/debugInfoRec.hpp"
+#include "code/icBuffer.hpp"
+#include "code/vtableStubs.hpp"
+#include "interpreter/interpreter.hpp"
+#include "oops/compiledICHolderOop.hpp"
+#include "prims/jvmtiRedefineClassesTrace.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/vframeArray.hpp"
+#include "vmreg_aarch64.inline.hpp"
+#ifdef COMPILER1
+#include "c1/c1_Runtime1.hpp"
+#endif
+#ifdef COMPILER2
+#include "opto/runtime.hpp"
+#endif
+
+#define __ masm->
+
+const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
+
+class SimpleRuntimeFrame {
+
+ public:
+
+ // Most of the runtime stubs have this simple frame layout.
+ // This class exists to make the layout shared in one place.
+ // Offsets are for compiler stack slots, which are jints.
+ enum layout {
+ // The frame sender code expects that rbp will be in the "natural" place and
+ // will override any oopMap setting for it. We must therefore force the layout
+ // so that it agrees with the frame sender code.
+ rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
+ rbp_off2,
+ return_off, return_off2,
+ framesize
+ };
+};
+
+class RegisterSaver {
+ public:
+ static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
+ static void restore_live_registers(MacroAssembler* masm);
+
+ // Offsets into the register save area
+ // Used by deoptimization when it is managing result register
+ // values on its own
+
+ static int rax_offset_in_bytes(void) { Unimplemented(); return 0; }
+ static int rdx_offset_in_bytes(void) { Unimplemented(); return 0; }
+ static int rbx_offset_in_bytes(void) { Unimplemented(); return 0; }
+ static int xmm0_offset_in_bytes(void) { Unimplemented(); return 0; }
+ static int return_offset_in_bytes(void) { Unimplemented(); return 0; }
+
+ // During deoptimization only the result registers need to be restored,
+ // all the other values have already been extracted.
+ static void restore_result_registers(MacroAssembler* masm);
+};
+
+OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { Unimplemented(); return 0; }
+
+void RegisterSaver::restore_live_registers(MacroAssembler* masm) { Unimplemented(); }
+
+void RegisterSaver::restore_result_registers(MacroAssembler* masm) { Unimplemented(); }
+
+// The java_calling_convention describes stack locations as ideal slots on
+// a frame with no abi restrictions. Since we must observe abi restrictions
+// (like the placement of the register window) the slots must be biased by
+// the following value.
+static int reg2offset_in(VMReg r) { Unimplemented(); return 0; }
+
+static int reg2offset_out(VMReg r) { Unimplemented(); return 0; }
+
+// ---------------------------------------------------------------------------
+// Read the array of BasicTypes from a signature, and compute where the
+// arguments should go. Values in the VMRegPair regs array refer to 4-byte
+// quantities. Values less than VMRegImpl::stack0 are registers, those above
+// refer to 4-byte stack slots. All stack slots are based off of the stack pointer
+// as framesizes are fixed.
+// VMRegImpl::stack0 refers to the first slot 0(sp).
+// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
+// up to RegisterImpl::number_of_registers) are the 64-bit
+// integer registers.
+
+// Note: the INPUTS in sig_bt are in units of Java argument words, which are
+// either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
+// units regardless of build. Of course for i486 there is no 64 bit build
+
+// The Java calling convention is a "shifted" version of the C ABI.
+// By skipping the first C ABI register we can call non-static jni methods
+// with small numbers of arguments without having to shuffle the arguments
+// at all. Since we control the java ABI we ought to at least get some
+// advantage out of it.
+
+int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
+ VMRegPair *regs,
+ int total_args_passed,
+ int is_outgoing) { Unimplemented(); return 0; }
+
+// Patch the callers callsite with entry to compiled code if it exists.
+static void patch_callers_callsite(MacroAssembler *masm) { Unimplemented(); }
+
+
+static void gen_c2i_adapter(MacroAssembler *masm,
+ int total_args_passed,
+ int comp_args_on_stack,
+ const BasicType *sig_bt,
+ const VMRegPair *regs,
+ Label& skip_fixup) { Unimplemented(); }
+
+static void gen_i2c_adapter(MacroAssembler *masm,
+ int total_args_passed,
+ int comp_args_on_stack,
+ const BasicType *sig_bt,
+ const VMRegPair *regs) { Unimplemented(); }
+
+// ---------------------------------------------------------------
+AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
+ int total_args_passed,
+ int comp_args_on_stack,
+ const BasicType *sig_bt,
+ const VMRegPair *regs,
+ AdapterFingerPrint* fingerprint) { Unimplemented(); return 0; }
+
+int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
+ VMRegPair *regs,
+ int total_args_passed) { Unimplemented(); return 0; }
+
+// On 64 bit we will store integer like items to the stack as
+// 64 bits items (sparc abi) even though java would only store
+// 32bits for a parameter. On 32bit it will simply be 32 bits
+// So this routine will do 32->32 on 32bit and 32->64 on 64bit
+static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { Unimplemented(); }
+
+
+static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { Unimplemented(); }
+
+// An oop arg. Must pass a handle not the oop itself
+static void object_move(MacroAssembler* masm,
+ OopMap* map,
+ int oop_handle_offset,
+ int framesize_in_slots,
+ VMRegPair src,
+ VMRegPair dst,
+ bool is_receiver,
+ int* receiver_offset) { Unimplemented(); }
+
+// A float arg may have to do float reg int reg conversion
+static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { Unimplemented(); }
+
+// A long move
+static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { Unimplemented(); }
+
+// A double move
+static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { Unimplemented(); }
+
+
+void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { Unimplemented(); }
+
+void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { Unimplemented(); }
+
+static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { Unimplemented(); }
+
+static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { Unimplemented(); }
+
+
+static void save_or_restore_arguments(MacroAssembler* masm,
+ const int stack_slots,
+ const int total_in_args,
+ const int arg_save_area,
+ OopMap* map,
+ VMRegPair* in_regs,
+ BasicType* in_sig_bt) { Unimplemented(); }
+
+
+// Check GC_locker::needs_gc and enter the runtime if it's true. This
+// keeps a new JNI critical region from starting until a GC has been
+// forced. Save down any oops in registers and describe them in an
+// OopMap.
+static void check_needs_gc_for_critical_native(MacroAssembler* masm,
+ int stack_slots,
+ int total_c_args,
+ int total_in_args,
+ int arg_save_area,
+ OopMapSet* oop_maps,
+ VMRegPair* in_regs,
+ BasicType* in_sig_bt) { Unimplemented(); }
+
+// Unpack an array argument into a pointer to the body and the length
+// if the array is non-null, otherwise pass 0 for both.
+static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { Unimplemented(); }
+
+
+class ComputeMoveOrder: public StackObj {
+ class MoveOperation: public ResourceObj {
+ friend class ComputeMoveOrder;
+ private:
+ VMRegPair _src;
+ VMRegPair _dst;
+ int _src_index;
+ int _dst_index;
+ bool _processed;
+ MoveOperation* _next;
+ MoveOperation* _prev;
+
+ static int get_id(VMRegPair r) { Unimplemented(); return 0; }
+
+ public:
+ MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
+ _src(src)
+ , _src_index(src_index)
+ , _dst(dst)
+ , _dst_index(dst_index)
+ , _next(NULL)
+ , _prev(NULL)
+ , _processed(false) { Unimplemented(); }
+
+ VMRegPair src() const { Unimplemented(); return _src; }
+ int src_id() const { Unimplemented(); return 0; }
+ int src_index() const { Unimplemented(); return 0; }
+ VMRegPair dst() const { Unimplemented(); return _src; }
+ void set_dst(int i, VMRegPair dst) { Unimplemented(); }
+ int dst_index() const { Unimplemented(); return 0; }
+ int dst_id() const { Unimplemented(); return 0; }
+ MoveOperation* next() const { Unimplemented(); return 0; }
+ MoveOperation* prev() const { Unimplemented(); return 0; }
+ void set_processed() { Unimplemented(); }
+ bool is_processed() const { Unimplemented(); return 0; }
+
+ // insert
+ void break_cycle(VMRegPair temp_register) { Unimplemented(); }
+
+ void link(GrowableArray<MoveOperation*>& killer) { Unimplemented(); }
+ };
+
+ private:
+ GrowableArray<MoveOperation*> edges;
+
+ public:
+ ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
+ BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) { Unimplemented(); }
+
+ // Collected all the move operations
+ void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) { Unimplemented(); }
+
+ // Walk the edges breaking cycles between moves. The result list
+ // can be walked in order to produce the proper set of loads
+ GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) { Unimplemented(); return 0; }
+};
+
+
+// ---------------------------------------------------------------------------
+// Generate a native wrapper for a given method. The method takes arguments
+// in the Java compiled code convention, marshals them to the native
+// convention (handlizes oops, etc), transitions to native, makes the call,
+// returns to java state (possibly blocking), unhandlizes any result and
+// returns.
+nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
+ methodHandle method,
+ int compile_id,
+ int total_in_args,
+ int comp_args_on_stack,
+ BasicType *in_sig_bt,
+ VMRegPair *in_regs,
+ BasicType ret_type) { Unimplemented(); return 0; }
+
+#ifdef HAVE_DTRACE_H
+// ---------------------------------------------------------------------------
+// Generate a dtrace nmethod for a given signature. The method takes arguments
+// in the Java compiled code convention, marshals them to the native
+// abi and then leaves nops at the position you would expect to call a native
+// function. When the probe is enabled the nops are replaced with a trap
+// instruction that dtrace inserts and the trace will cause a notification
+// to dtrace.
+//
+// The probes are only able to take primitive types and java/lang/String as
+// arguments. No other java types are allowed. Strings are converted to utf8
+// strings so that from dtrace point of view java strings are converted to C
+// strings. There is an arbitrary fixed limit on the total space that a method
+// can use for converting the strings. (256 chars per string in the signature).
+// So any java string larger then this is truncated.
+
+static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
+static bool offsets_initialized = false;
+
+
+nmethod *SharedRuntime::generate_dtrace_nmethod(MacroAssembler *masm,
+ methodHandle method) { Unimplemented(); return 0; }
+
+#endif // HAVE_DTRACE_H
+
+// this function returns the adjust size (in number of words) to a c2i adapter
+// activation for use during deoptimization
+int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) { Unimplemented(); return 0; }
+
+
+uint SharedRuntime::out_preserve_stack_slots() { Unimplemented(); return 0; }
+
+
+//------------------------------generate_deopt_blob----------------------------
+void SharedRuntime::generate_deopt_blob() { Unimplemented(); }
+
+#ifdef COMPILER2
+//------------------------------generate_uncommon_trap_blob--------------------
+void SharedRuntime::generate_uncommon_trap_blob() { Unimplemented(); }
+#endif // COMPILER2
+
+
+//------------------------------generate_handler_blob------
+//
+// Generate a special Compile2Runtime blob that saves all registers,
+// and setup oopmap.
+//
+SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) { Unimplemented(); return 0; }
+
+//
+// generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
+//
+// Generate a stub that calls into vm to find out the proper destination
+// of a java call. All the argument registers are live at this point
+// but since this is generic code we don't know what they are and the caller
+// must do any gc of the args.
+//
+RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { Unimplemented(); return 0; }
+
+
+#ifdef COMPILER2
+// This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
+//
+//------------------------------generate_exception_blob---------------------------
+// creates exception blob at the end
+// Using exception blob, this code is jumped from a compiled method.
+// (see emit_exception_handler in x86_64.ad file)
+//
+// Given an exception pc at a call we call into the runtime for the
+// handler in this method. This handler might merely restore state
+// (i.e. callee save registers) unwind the frame and jump to the
+// exception handler for the nmethod if there is no Java level handler
+// for the nmethod.
+//
+// This code is entered with a jmp.
+//
+// Arguments:
+// rax: exception oop
+// rdx: exception pc
+//
+// Results:
+// rax: exception oop
+// rdx: exception pc in caller or ???
+// destination: exception handler of caller
+//
+// Note: the exception pc MUST be at a call (precise debug information)
+// Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
+//
+
+void OptoRuntime::generate_exception_blob() { Unimplemented(); }
+#endif // COMPILER2
diff --git a/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp b/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
new file mode 100644
index 000000000..d2364e3be
--- /dev/null
+++ b/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
@@ -0,0 +1,626 @@
+/*
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "assembler_aarch64.inline.hpp"
+#include "interpreter/interpreter.hpp"
+#include "nativeInst_aarch64.hpp"
+#include "oops/instanceOop.hpp"
+#include "oops/methodOop.hpp"
+#include "oops/objArrayKlass.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/methodHandles.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubCodeGenerator.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "utilities/top.hpp"
+#ifdef TARGET_OS_FAMILY_linux
+# include "thread_linux.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_solaris
+# include "thread_solaris.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_windows
+# include "thread_windows.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_bsd
+# include "thread_bsd.inline.hpp"
+#endif
+#ifdef COMPILER2
+#include "opto/runtime.hpp"
+#endif
+
+// Declaration and definition of StubGenerator (no .hpp file).
+// For a more detailed description of the stub routine structure
+// see the comment in stubRoutines.hpp
+
+#define __ _masm->
+#define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
+#define a__ ((Assembler*)_masm)->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
+
+// Stub Code definitions
+
+static address handle_unsafe_access() { Unimplemented(); return 0; }
+
+class StubGenerator: public StubCodeGenerator {
+ private:
+
+#ifdef PRODUCT
+#define inc_counter_np(counter) (0)
+#else
+ void inc_counter_np_(int& counter) { Unimplemented(); }
+#define inc_counter_np(counter) \
+ BLOCK_COMMENT("inc_counter " #counter); \
+ inc_counter_np_(counter);
+#endif
+
+
+ address generate_call_stub(address& return_address) { Unimplemented(); return 0; }
+
+ // Return point for a Java call if there's an exception thrown in
+ // Java code. The exception is caught and transformed into a
+ // pending exception stored in JavaThread that can be tested from
+ // within the VM.
+ //
+ // Note: Usually the parameters are removed by the callee. In case
+ // of an exception crossing an activation frame boundary, that is
+ // not the case if the callee is compiled code => need to setup the
+ // rsp.
+ //
+ // rax: exception oop
+
+ address generate_catch_exception() { Unimplemented(); return 0; }
+
+ // Continuation point for runtime calls returning with a pending
+ // exception. The pending exception check happened in the runtime
+ // or native call stub. The pending exception in Thread is
+ // converted into a Java-level exception.
+ //
+ // Contract with Java-level exception handlers:
+ // rax: exception
+ // rdx: throwing pc
+ //
+ // NOTE: At entry of this stub, exception-pc must be on stack !!
+
+ address generate_forward_exception() { Unimplemented(); return 0; }
+
+ // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest)
+ //
+ // Arguments :
+ // c_rarg0: exchange_value
+ // c_rarg0: dest
+ //
+ // Result:
+ // *dest <- ex, return (orig *dest)
+ address generate_atomic_xchg() { Unimplemented(); return 0; }
+
+ // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
+ //
+ // Arguments :
+ // c_rarg0: exchange_value
+ // c_rarg1: dest
+ //
+ // Result:
+ // *dest <- ex, return (orig *dest)
+ address generate_atomic_xchg_ptr() { Unimplemented(); return 0; }
+
+ // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest,
+ // jint compare_value)
+ //
+ // Arguments :
+ // c_rarg0: exchange_value
+ // c_rarg1: dest
+ // c_rarg2: compare_value
+ //
+ // Result:
+ // if ( compare_value == *dest ) {
+ // *dest = exchange_value
+ // return compare_value;
+ // else
+ // return *dest;
+ address generate_atomic_cmpxchg() { Unimplemented(); return 0; }
+
+ // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value,
+ // volatile jlong* dest,
+ // jlong compare_value)
+ // Arguments :
+ // c_rarg0: exchange_value
+ // c_rarg1: dest
+ // c_rarg2: compare_value
+ //
+ // Result:
+ // if ( compare_value == *dest ) {
+ // *dest = exchange_value
+ // return compare_value;
+ // else
+ // return *dest;
+ address generate_atomic_cmpxchg_long() { Unimplemented(); return 0; }
+
+ // Support for jint atomic::add(jint add_value, volatile jint* dest)
+ //
+ // Arguments :
+ // c_rarg0: add_value
+ // c_rarg1: dest
+ //
+ // Result:
+ // *dest += add_value
+ // return *dest;
+ address generate_atomic_add() { Unimplemented(); return 0; }
+
+ // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
+ //
+ // Arguments :
+ // c_rarg0: add_value
+ // c_rarg1: dest
+ //
+ // Result:
+ // *dest += add_value
+ // return *dest;
+ address generate_atomic_add_ptr() { Unimplemented(); return 0; }
+
+ // Support for intptr_t OrderAccess::fence()
+ //
+ // Arguments :
+ //
+ // Result:
+ address generate_orderaccess_fence() { Unimplemented(); return 0; }
+
+ // Support for intptr_t get_previous_fp()
+ //
+ // This routine is used to find the previous frame pointer for the
+ // caller (current_frame_guess). This is used as part of debugging
+ // ps() is seemingly lost trying to find frames.
+ // This code assumes that caller current_frame_guess) has a frame.
+ address generate_get_previous_fp() { Unimplemented(); return 0; }
+
+ // Support for intptr_t get_previous_sp()
+ //
+ // This routine is used to find the previous stack pointer for the
+ // caller.
+ address generate_get_previous_sp() { Unimplemented(); return 0; }
+
+ //----------------------------------------------------------------------------------------------------
+ // Support for void verify_mxcsr()
+ //
+ // This routine is used with -Xcheck:jni to verify that native
+ // JNI code does not return to Java code without restoring the
+ // MXCSR register to our expected state.
+
+ address generate_verify_mxcsr() { Unimplemented(); return 0; }
+
+ address generate_f2i_fixup() { Unimplemented(); return 0; }
+
+ address generate_f2l_fixup() { Unimplemented(); return 0; }
+
+ address generate_d2i_fixup() { Unimplemented(); return 0; }
+
+ address generate_d2l_fixup() { Unimplemented(); return 0; }
+
+ address generate_fp_mask(const char *stub_name, int64_t mask) { Unimplemented(); return 0; }
+
+ // The following routine generates a subroutine to throw an
+ // asynchronous UnknownError when an unsafe access gets a fault that
+ // could not be reasonably prevented by the programmer. (Example:
+ // SIGBUS/OBJERR.)
+ address generate_handler_for_unsafe_access() { Unimplemented(); return 0; }
+
+ // Non-destructive plausibility checks for oops
+ //
+ // Arguments:
+ // all args on stack!
+ //
+ // Stack after saving c_rarg3:
+ // [tos + 0]: saved c_rarg3
+ // [tos + 1]: saved c_rarg2
+ // [tos + 2]: saved r12 (several TemplateTable methods use it)
+ // [tos + 3]: saved flags
+ // [tos + 4]: return address
+ // * [tos + 5]: error message (char*)
+ // * [tos + 6]: object to verify (oop)
+ // * [tos + 7]: saved rax - saved by caller and bashed
+ // * [tos + 8]: saved r10 (rscratch1) - saved by caller
+ // * = popped on exit
+ address generate_verify_oop() { Unimplemented(); return 0; }
+
+ //
+ // Verify that a register contains clean 32-bits positive value
+ // (high 32-bits are 0) so it could be used in 64-bits shifts.
+ //
+ // Input:
+ // Rint - 32-bits value
+ // Rtmp - scratch
+ //
+ void assert_clean_int(Register Rint, Register Rtmp) { Unimplemented(); }
+
+ // Generate overlap test for array copy stubs
+ //
+ // Input:
+ // c_rarg0 - from
+ // c_rarg1 - to
+ // c_rarg2 - element count
+ //
+ // Output:
+ // rax - &from[element count - 1]
+ //
+ void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { Unimplemented(); }
+ void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { Unimplemented(); }
+ void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { Unimplemented(); }
+
+ // Shuffle first three arg regs on Windows into Linux/Solaris locations.
+ //
+ // Outputs:
+ // rdi - rcx
+ // rsi - rdx
+ // rdx - r8
+ // rcx - r9
+ //
+ // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter
+ // are non-volatile. r9 and r10 should not be used by the caller.
+ //
+ void setup_arg_regs(int nargs = 3) { Unimplemented(); }
+
+ void restore_arg_regs() { Unimplemented(); }
+
+ // Generate code for an array write pre barrier
+ //
+ // addr - starting address
+ // count - element count
+ // tmp - scratch register
+ //
+ // Destroy no registers!
+ //
+ void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { Unimplemented(); }
+
+ //
+ // Generate code for an array write post barrier
+ //
+ // Input:
+ // start - register containing starting address of destination array
+ // end - register containing ending address of destination array
+ // scratch - scratch register
+ //
+ // The input registers are overwritten.
+ // The ending address is inclusive.
+ void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) { Unimplemented(); }
+
+
+ // Copy big chunks forward
+ //
+ // Inputs:
+ // end_from - source arrays end address
+ // end_to - destination array end address
+ // qword_count - 64-bits element count, negative
+ // to - scratch
+ // L_copy_32_bytes - entry label
+ // L_copy_8_bytes - exit label
+ //
+ void copy_32_bytes_forward(Register end_from, Register end_to,
+ Register qword_count, Register to,
+ Label& L_copy_32_bytes, Label& L_copy_8_bytes) { Unimplemented(); }
+
+
+ // Copy big chunks backward
+ //
+ // Inputs:
+ // from - source arrays address
+ // dest - destination array address
+ // qword_count - 64-bits element count
+ // to - scratch
+ // L_copy_32_bytes - entry label
+ // L_copy_8_bytes - exit label
+ //
+ void copy_32_bytes_backward(Register from, Register dest,
+ Register qword_count, Register to,
+ Label& L_copy_32_bytes, Label& L_copy_8_bytes) { Unimplemented(); }
+
+
+ // Arguments:
+ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
+ // ignored
+ // name - stub name string
+ //
+ // Inputs:
+ // c_rarg0 - source array address
+ // c_rarg1 - destination array address
+ // c_rarg2 - element count, treated as ssize_t, can be zero
+ //
+ // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
+ // we let the hardware handle it. The one to eight bytes within words,
+ // dwords or qwords that span cache line boundaries will still be loaded
+ // and stored atomically.
+ //
+ // Side Effects:
+ // disjoint_byte_copy_entry is set to the no-overlap entry point
+ // used by generate_conjoint_byte_copy().
+ //
+ address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { Unimplemented(); return 0; }
+
+ // Arguments:
+ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
+ // ignored
+ // name - stub name string
+ //
+ // Inputs:
+ // c_rarg0 - source array address
+ // c_rarg1 - destination array address
+ // c_rarg2 - element count, treated as ssize_t, can be zero
+ //
+ // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
+ // we let the hardware handle it. The one to eight bytes within words,
+ // dwords or qwords that span cache line boundaries will still be loaded
+ // and stored atomically.
+ //
+ address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
+ address* entry, const char *name) { Unimplemented(); return 0; }
+
+ // Arguments:
+ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
+ // ignored
+ // name - stub name string
+ //
+ // Inputs:
+ // c_rarg0 - source array address
+ // c_rarg1 - destination array address
+ // c_rarg2 - element count, treated as ssize_t, can be zero
+ //
+ // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
+ // let the hardware handle it. The two or four words within dwords
+ // or qwords that span cache line boundaries will still be loaded
+ // and stored atomically.
+ //
+ // Side Effects:
+ // disjoint_short_copy_entry is set to the no-overlap entry point
+ // used by generate_conjoint_short_copy().
+ //
+ address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { Unimplemented(); return 0; }
+
+ address generate_fill(BasicType t, bool aligned, const char *name) { Unimplemented(); return 0; }
+
+ // Arguments:
+ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
+ // ignored
+ // name - stub name string
+ //
+ // Inputs:
+ // c_rarg0 - source array address
+ // c_rarg1 - destination array address
+ // c_rarg2 - element count, treated as ssize_t, can be zero
+ //
+ // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
+ // let the hardware handle it. The two or four words within dwords
+ // or qwords that span cache line boundaries will still be loaded
+ // and stored atomically.
+ //
+ address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
+ address *entry, const char *name) { Unimplemented(); return 0; }
+
+ // Arguments:
+ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
+ // ignored
+ // is_oop - true => oop array, so generate store check code
+ // name - stub name string
+ //
+ // Inputs:
+ // c_rarg0 - source array address
+ // c_rarg1 - destination array address
+ // c_rarg2 - element count, treated as ssize_t, can be zero
+ //
+ // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
+ // the hardware handle it. The two dwords within qwords that span
+ // cache line boundaries will still be loaded and stored atomicly.
+ //
+ // Side Effects:
+ // disjoint_int_copy_entry is set to the no-overlap entry point
+ // used by generate_conjoint_int_oop_copy().
+ //
+ address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry,
+ const char *name, bool dest_uninitialized = false) { Unimplemented(); return 0; }
+
+ // Arguments:
+ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
+ // ignored
+ // is_oop - true => oop array, so generate store check code
+ // name - stub name string
+ //
+ // Inputs:
+ // c_rarg0 - source array address
+ // c_rarg1 - destination array address
+ // c_rarg2 - element count, treated as ssize_t, can be zero
+ //
+ // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
+ // the hardware handle it. The two dwords within qwords that span
+ // cache line boundaries will still be loaded and stored atomicly.
+ //
+ address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target,
+ address *entry, const char *name,
+ bool dest_uninitialized = false) { Unimplemented(); return 0; }
+
+ // Arguments:
+ // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
+ // ignored
+ // is_oop - true => oop array, so generate store check code
+ // name - stub name string
+ //
+ // Inputs:
+ // c_rarg0 - source array address
+ // c_rarg1 - destination array address
+ // c_rarg2 - element count, treated as ssize_t, can be zero
+ //
+ // Side Effects:
+ // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the
+ // no-overlap entry point used by generate_conjoint_long_oop_copy().
+ //
+ address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry,
+ const char *name, bool dest_uninitialized = false) { Unimplemented(); return 0; }
+
+ // Arguments:
+ // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
+ // ignored
+ // is_oop - true => oop array, so generate store check code
+ // name - stub name string
+ //
+ // Inputs:
+ // c_rarg0 - source array address
+ // c_rarg1 - destination array address
+ // c_rarg2 - element count, treated as ssize_t, can be zero
+ //
+ address generate_conjoint_long_oop_copy(bool aligned, bool is_oop,
+ address nooverlap_target, address *entry,
+ const char *name, bool dest_uninitialized = false) { Unimplemented(); return 0; }
+
+
+ // Helper for generating a dynamic type check.
+ // Smashes no registers.
+ void generate_type_check(Register sub_klass,
+ Register super_check_offset,
+ Register super_klass,
+ Label& L_success) { Unimplemented(); }
+
+ //
+ // Generate checkcasting array copy stub
+ //
+ // Input:
+ // c_rarg0 - source array address
+ // c_rarg1 - destination array address
+ // c_rarg2 - element count, treated as ssize_t, can be zero
+ // c_rarg3 - size_t ckoff (super_check_offset)
+ // not Win64
+ // c_rarg4 - oop ckval (super_klass)
+ // Win64
+ // rsp+40 - oop ckval (super_klass)
+ //
+ // Output:
+ // rax == 0 - success
+ // rax == -1^K - failure, where K is partial transfer count
+ //
+ address generate_checkcast_copy(const char *name, address *entry,
+ bool dest_uninitialized = false) { Unimplemented(); return 0; }
+
+ //
+ // Generate 'unsafe' array copy stub
+ // Though just as safe as the other stubs, it takes an unscaled
+ // size_t argument instead of an element count.
+ //
+ // Input:
+ // c_rarg0 - source array address
+ // c_rarg1 - destination array address
+ // c_rarg2 - byte count, treated as ssize_t, can be zero
+ //
+ // Examines the alignment of the operands and dispatches
+ // to a long, int, short, or byte copy loop.
+ //
+ address generate_unsafe_copy(const char *name,
+ address byte_copy_entry, address short_copy_entry,
+ address int_copy_entry, address long_copy_entry) { Unimplemented(); return 0; }
+
+ // Perform range checks on the proposed arraycopy.
+ // Kills temp, but nothing else.
+ // Also, clean the sign bits of src_pos and dst_pos.
+ void arraycopy_range_checks(Register src, // source array oop (c_rarg0)
+ Register src_pos, // source position (c_rarg1)
+ Register dst, // destination array oo (c_rarg2)
+ Register dst_pos, // destination position (c_rarg3)
+ Register length,
+ Register temp,
+ Label& L_failed) { Unimplemented(); }
+
+ //
+ // Generate generic array copy stubs
+ //
+ // Input:
+ // c_rarg0 - src oop
+ // c_rarg1 - src_pos (32-bits)
+ // c_rarg2 - dst oop
+ // c_rarg3 - dst_pos (32-bits)
+ // not Win64
+ // c_rarg4 - element count (32-bits)
+ // Win64
+ // rsp+40 - element count (32-bits)
+ //
+ // Output:
+ // rax == 0 - success
+ // rax == -1^K - failure, where K is partial transfer count
+ //
+ address generate_generic_copy(const char *name,
+ address byte_copy_entry, address short_copy_entry,
+ address int_copy_entry, address oop_copy_entry,
+ address long_copy_entry, address checkcast_copy_entry) { Unimplemented(); return 0; }
+
+ void generate_arraycopy_stubs() { Unimplemented(); }
+
+ void generate_math_stubs() { Unimplemented(); }
+
+#undef __
+#define __ masm->
+
+ // Continuation point for throwing of implicit exceptions that are
+ // not handled in the current activation. Fabricates an exception
+ // oop and initiates normal exception dispatching in this
+ // frame. Since we need to preserve callee-saved values (currently
+ // only for C2, but done for C1 as well) we need a callee-saved oop
+ // map and therefore have to make these stubs into RuntimeStubs
+ // rather than BufferBlobs. If the compiler needs all registers to
+ // be preserved between the fault point and the exception handler
+ // then it must assume responsibility for that in
+ // AbstractCompiler::continuation_for_implicit_null_exception or
+ // continuation_for_implicit_division_by_zero_exception. All other
+ // implicit exceptions (e.g., NullPointerException or
+ // AbstractMethodError on entry) are either at call sites or
+ // otherwise assume that stack unwinding will be initiated, so
+ // caller saved registers were assumed volatile in the compiler.
+ address generate_throw_exception(const char* name,
+ address runtime_entry,
+ Register arg1 = noreg,
+ Register arg2 = noreg) { Unimplemented(); return 0; }
+
+ // Initialization
+ void generate_initial() {Unimplemented(); }
+
+ void generate_all() { Unimplemented(); }
+
+ public:
+ StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
+ if (all) {
+ generate_all();
+ } else {
+ generate_initial();
+ }
+ }
+}; // end class declaration
+
+void StubGenerator_generate(CodeBuffer* code, bool all) {
+ StubGenerator g(code, all);
+}
diff --git a/src/cpu/aarch64/vm/stubRoutines_aarch64.cpp b/src/cpu/aarch64/vm/stubRoutines_aarch64.cpp
new file mode 100644
index 000000000..782dc9063
--- /dev/null
+++ b/src/cpu/aarch64/vm/stubRoutines_aarch64.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/stubRoutines.hpp"
+#ifdef TARGET_OS_FAMILY_linux
+# include "thread_linux.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_solaris
+# include "thread_solaris.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_windows
+# include "thread_windows.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_bsd
+# include "thread_bsd.inline.hpp"
+#endif
+
+// Implementation of the platform-specific part of StubRoutines - for
+// a description of how to extend it, see the stubRoutines.hpp file.
+
+address StubRoutines::x86::_get_previous_fp_entry = NULL;
+address StubRoutines::x86::_get_previous_sp_entry = NULL;
+
+address StubRoutines::x86::_verify_mxcsr_entry = NULL;
+
+address StubRoutines::x86::_f2i_fixup = NULL;
+address StubRoutines::x86::_f2l_fixup = NULL;
+address StubRoutines::x86::_d2i_fixup = NULL;
+address StubRoutines::x86::_d2l_fixup = NULL;
+address StubRoutines::x86::_float_sign_mask = NULL;
+address StubRoutines::x86::_float_sign_flip = NULL;
+address StubRoutines::x86::_double_sign_mask = NULL;
+address StubRoutines::x86::_double_sign_flip = NULL;
+address StubRoutines::x86::_mxcsr_std = NULL;
diff --git a/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp b/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp
new file mode 100644
index 000000000..7737c4eee
--- /dev/null
+++ b/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_STUBROUTINES_X86_64_HPP
+#define CPU_X86_VM_STUBROUTINES_X86_64_HPP
+
+// This file holds the platform specific parts of the StubRoutines
+// definition. See stubRoutines.hpp for a description on how to
+// extend it.
+
+static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
+
+enum platform_dependent_constants {
+ code_size1 = 19000, // simply increase if too small (assembler will crash if too small)
+ code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
+};
+
+class x86 {
+ friend class StubGenerator;
+
+ private:
+ static address _get_previous_fp_entry;
+ static address _get_previous_sp_entry;
+ static address _verify_mxcsr_entry;
+
+ static address _f2i_fixup;
+ static address _f2l_fixup;
+ static address _d2i_fixup;
+ static address _d2l_fixup;
+
+ static address _float_sign_mask;
+ static address _float_sign_flip;
+ static address _double_sign_mask;
+ static address _double_sign_flip;
+ static address _mxcsr_std;
+
+ public:
+
+ static address get_previous_fp_entry()
+ {
+ return _get_previous_fp_entry;
+ }
+
+ static address get_previous_sp_entry()
+ {
+ return _get_previous_sp_entry;
+ }
+
+ static address verify_mxcsr_entry()
+ {
+ return _verify_mxcsr_entry;
+ }
+
+ static address f2i_fixup()
+ {
+ return _f2i_fixup;
+ }
+
+ static address f2l_fixup()
+ {
+ return _f2l_fixup;
+ }
+
+ static address d2i_fixup()
+ {
+ return _d2i_fixup;
+ }
+
+ static address d2l_fixup()
+ {
+ return _d2l_fixup;
+ }
+
+ static address float_sign_mask()
+ {
+ return _float_sign_mask;
+ }
+
+ static address float_sign_flip()
+ {
+ return _float_sign_flip;
+ }
+
+ static address double_sign_mask()
+ {
+ return _double_sign_mask;
+ }
+
+ static address double_sign_flip()
+ {
+ return _double_sign_flip;
+ }
+
+ static address mxcsr_std()
+ {
+ return _mxcsr_std;
+ }
+};
+
+#endif // CPU_X86_VM_STUBROUTINES_X86_64_HPP
diff --git a/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.hpp b/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.hpp
new file mode 100644
index 000000000..60e95057c
--- /dev/null
+++ b/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.hpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_TEMPLATEINTERPRETERGENERATOR_X86_HPP
+#define CPU_X86_VM_TEMPLATEINTERPRETERGENERATOR_X86_HPP
+
+ protected:
+
+ void generate_fixed_frame(bool native_call);
+
+ // address generate_asm_interpreter_entry(bool synchronized);
+
+#endif // CPU_X86_VM_TEMPLATEINTERPRETERGENERATOR_X86_HPP
diff --git a/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp b/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp
new file mode 100644
index 000000000..71b48b5ac
--- /dev/null
+++ b/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/templateTable.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/methodDataOop.hpp"
+#include "oops/methodOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/timer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+#include <sys/types.h>
+
+#include "../../../../../../simulator/simulator.hpp"
+
+#define __ _masm->
+
+#ifndef CC_INTERP
+
+//-----------------------------------------------------------------------------
+
+extern "C" void entry(CodeBuffer*);
+
+//-----------------------------------------------------------------------------
+
+address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { Unimplemented(); return 0; }
+
+address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
+ const char* name) { Unimplemented(); return 0; }
+
+address TemplateInterpreterGenerator::generate_ClassCastException_handler() { Unimplemented(); return 0; }
+
+address TemplateInterpreterGenerator::generate_exception_handler_common(
+ const char* name, const char* message, bool pass_oop) { Unimplemented(); return 0; }
+
+
+address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { Unimplemented(); return 0; }
+
+
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { Unimplemented(); return 0; }
+
+
+address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
+ int step) { Unimplemented(); return 0; }
+
+int AbstractInterpreter::BasicType_as_index(BasicType type) { Unimplemented(); return 0; }
+
+
+address TemplateInterpreterGenerator::generate_result_handler_for(
+ BasicType type) { Unimplemented(); return 0; }
+
+address TemplateInterpreterGenerator::generate_safept_entry_for(
+ TosState state,
+ address runtime_entry) { Unimplemented(); return 0; }
+
+
+
+// Helpers for commoning out cases in the various type of method entries.
+//
+
+
+// increment invocation count & check for overflow
+//
+// Note: checking for negative value instead of overflow
+// so we have a 'sticky' overflow test
+//
+// rbx: method
+// ecx: invocation counter
+//
+void InterpreterGenerator::generate_counter_incr(
+ Label* overflow,
+ Label* profile_method,
+ Label* profile_method_continue) { Unimplemented(); }
+
+void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { Unimplemented(); }
+
+// See if we've got enough room on the stack for locals plus overhead.
+// The expression stack grows down incrementally, so the normal guard
+// page mechanism will work for that.
+//
+// NOTE: Since the additional locals are also always pushed (wasn't
+// obvious in generate_method_entry) so the guard should work for them
+// too.
+//
+// Args:
+// rdx: number of additional locals this frame needs (what we must check)
+// rbx: methodOop
+//
+// Kills:
+// rax
+void InterpreterGenerator::generate_stack_overflow_check(void) { Unimplemented(); }
+
+// Allocate monitor and lock method (asm interpreter)
+//
+// Args:
+// rbx: methodOop
+// r14: locals
+//
+// Kills:
+// rax
+// c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
+// rscratch1, rscratch2 (scratch regs)
+void InterpreterGenerator::lock_method(void) { Unimplemented(); }
+
+// Generate a fixed interpreter frame. This is identical setup for
+// interpreted methods and for native methods hence the shared code.
+//
+// Args:
+// rax: return address
+// rbx: methodOop
+// r14: pointer to locals
+// r13: sender sp
+// rdx: cp cache
+void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { Unimplemented(); }
+
+// End of helpers
+
+// Various method entries
+//------------------------------------------------------------------------------------------------------------------------
+//
+//
+
+// Call an accessor method (assuming it is resolved, otherwise drop
+// into vanilla (slow path) entry
+address InterpreterGenerator::generate_accessor_entry(void) { Unimplemented(); return 0; }
+
+// Method entry for java.lang.ref.Reference.get.
+address InterpreterGenerator::generate_Reference_get_entry(void) { Unimplemented(); return 0; }
+
+
+// Interpreter stub for calling a native method. (asm interpreter)
+// This sets up a somewhat different looking stack for calling the
+// native method than the typical interpreter frame setup.
+address InterpreterGenerator::generate_native_entry(bool synchronized) { Unimplemented(); return 0; }
+
+//
+// Generic interpreted method entry to (asm) interpreter
+//
+address InterpreterGenerator::generate_normal_entry(bool synchronized) { Unimplemented(); return 0; }
+
+// Entry points
+//
+// Here we generate the various kind of entries into the interpreter.
+// The two main entry type are generic bytecode methods and native
+// call method. These both come in synchronized and non-synchronized
+// versions but the frame layout they create is very similar. The
+// other method entry types are really just special purpose entries
+// that are really entry and interpretation all in one. These are for
+// trivial methods like accessor, empty, or special math methods.
+//
+// When control flow reaches any of the entry types for the interpreter
+// the following holds ->
+//
+// Arguments:
+//
+// rbx: methodOop
+//
+// Stack layout immediately at entry
+//
+// [ return address ] <--- rsp
+// [ parameter n ]
+// ...
+// [ parameter 1 ]
+// [ expression stack ] (caller's java expression stack)
+
+// Assuming that we don't go to one of the trivial specialized entries
+// the stack will look like below when we are ready to execute the
+// first bytecode (or call the native routine). The register usage
+// will be as the template based interpreter expects (see
+// interpreter_amd64.hpp).
+//
+// local variables follow incoming parameters immediately; i.e.
+// the return address is moved to the end of the locals).
+//
+// [ monitor entry ] <--- rsp
+// ...
+// [ monitor entry ]
+// [ expr. stack bottom ]
+// [ saved r13 ]
+// [ current r14 ]
+// [ methodOop ]
+// [ saved ebp ] <--- rbp
+// [ return address ]
+// [ local variable m ]
+// ...
+// [ local variable 1 ]
+// [ parameter n ]
+// ...
+// [ parameter 1 ] <--- r14
+
+address AbstractInterpreterGenerator::generate_method_entry(
+ AbstractInterpreter::MethodKind kind) { Unimplemented(); return 0; }
+
+// These should never be compiled since the interpreter will prefer
+// the compiled version to the intrinsic version.
+bool AbstractInterpreter::can_be_compiled(methodHandle m) { Unimplemented(); return false; }
+
+// How much stack a method activation needs in words.
+int AbstractInterpreter::size_top_interpreter_activation(methodOop method) { Unimplemented(); return 0; }
+
+int AbstractInterpreter::layout_activation(methodOop method,
+ int tempcount,
+ int popframe_extra_args,
+ int moncount,
+ int caller_actual_parameters,
+ int callee_param_count,
+ int callee_locals,
+ frame* caller,
+ frame* interpreter_frame,
+ bool is_top_frame) { Unimplemented(); return 0; }
+
+//-----------------------------------------------------------------------------
+// Exceptions
+
+void TemplateInterpreterGenerator::generate_throw_exception() { Unimplemented(); }
+
+
+//
+// JVMTI ForceEarlyReturn support
+//
+address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { Unimplemented(); return 0; }
+// end of ForceEarlyReturn support
+
+
+//-----------------------------------------------------------------------------
+// Helper for vtos entry point generation
+
+void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
+ address& bep,
+ address& cep,
+ address& sep,
+ address& aep,
+ address& iep,
+ address& lep,
+ address& fep,
+ address& dep,
+ address& vep) { Unimplemented(); }
+
+
+//-----------------------------------------------------------------------------
+// Generation of individual instructions
+
+// helpers for generate_and_dispatch
+
+
+InterpreterGenerator::InterpreterGenerator(StubQueue* code)
+ : TemplateInterpreterGenerator(code) {
+ generate_all(); // down here so it can be "virtual"
+}
+
+//-----------------------------------------------------------------------------
+
+// Non-product code
+#ifndef PRODUCT
+address TemplateInterpreterGenerator::generate_trace_code(TosState state) { Unimplemented(); return 0; }
+
+void TemplateInterpreterGenerator::count_bytecode() { Unimplemented(); }
+
+void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { Unimplemented(); }
+
+void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { Unimplemented(); }
+
+
+void TemplateInterpreterGenerator::trace_bytecode(Template* t) { Unimplemented(); }
+
+
+void TemplateInterpreterGenerator::stop_interpreter_at() { Unimplemented(); }
+#endif // !PRODUCT
+#endif // ! CC_INTERP
diff --git a/src/cpu/aarch64/vm/templateInterpreter_aarch64.hpp b/src/cpu/aarch64/vm/templateInterpreter_aarch64.hpp
new file mode 100644
index 000000000..c828c90fb
--- /dev/null
+++ b/src/cpu/aarch64/vm/templateInterpreter_aarch64.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
+#define CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
+
+
+ protected:
+
+ // Size of interpreter code. Increase if too small. Interpreter will
+ // fail with a guarantee ("not enough space for interpreter generation");
+ // if too small.
+ // Run with +PrintInterpreter to get the VM to print out the size.
+ // Max size with JVMTI
+#ifdef AMD64
+ const static int InterpreterCodeSize = 200 * 1024;
+#else
+ const static int InterpreterCodeSize = 168 * 1024;
+#endif // AMD64
+
+#endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
diff --git a/src/cpu/aarch64/vm/templateTable_aarch64.cpp b/src/cpu/aarch64/vm/templateTable_aarch64.cpp
new file mode 100644
index 000000000..cdc04abdc
--- /dev/null
+++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp
@@ -0,0 +1,505 @@
+/*
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/templateTable.hpp"
+#include "memory/universe.inline.hpp"
+#include "oops/methodDataOop.hpp"
+#include "oops/objArrayKlass.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/methodHandles.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+
+#ifndef CC_INTERP
+
+#define __ _masm->
+
+// Platform-dependent initialization
+
+void TemplateTable::pd_initialize() {
+ // No amd64 specific initialization
+}
+
+// Address computation: local variables
+
+static inline Address iaddress(int n) { Unimplemented(); return (address)0; }
+
+static inline Address laddress(int n) { Unimplemented(); return (address)0; }
+
+static inline Address faddress(int n) { Unimplemented(); return (address)0; }
+
+static inline Address daddress(int n) { Unimplemented(); return (address)0; }
+
+static inline Address aaddress(int n) { Unimplemented(); return (address)0; }
+
+static inline Address iaddress(Register r) { Unimplemented(); return (address)0; }
+
+static inline Address laddress(Register r) { Unimplemented(); return (address)0; }
+
+static inline Address faddress(Register r) { Unimplemented(); return (address)0; }
+
+static inline Address daddress(Register r) { Unimplemented(); return (address)0; }
+
+static inline Address aaddress(Register r) { Unimplemented(); return (address)0; }
+
+static inline Address at_rsp() { Unimplemented(); return (address)0; }
+
+// At top of Java expression stack which may be different than esp(). It
+// isn't for category 1 objects.
+static inline Address at_tos () { Unimplemented(); return (address)0; }
+
+static inline Address at_tos_p1() { Unimplemented(); return (address)0; }
+
+static inline Address at_tos_p2() { Unimplemented(); return (address)0; }
+
+static inline Address at_tos_p3() { Unimplemented(); return (address)0; }
+
+// Condition conversion
+static Assembler::Condition j_not(TemplateTable::Condition cc) { Unimplemented(); return Assembler::dummy; }
+
+
+// Miscelaneous helper routines
+// Store an oop (or NULL) at the address described by obj.
+// If val == noreg this means store a NULL
+
+static void do_oop_store(InterpreterMacroAssembler* _masm,
+ Address obj,
+ Register val,
+ BarrierSet::Name barrier,
+ bool precise) { Unimplemented(); }
+
+Address TemplateTable::at_bcp(int offset) { Unimplemented(); return (address)0; }
+
+void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
+ Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
+ int byte_no) { Unimplemented(); }
+
+
+// Individual instructions
+
+void TemplateTable::nop() {
+ transition(vtos, vtos);
+ // nothing to do
+}
+
+void TemplateTable::shouldnotreachhere() {
+ transition(vtos, vtos);
+ __ stop("shouldnotreachhere bytecode");
+}
+
+void TemplateTable::aconst_null() { Unimplemented(); }
+
+void TemplateTable::iconst(int value) { Unimplemented(); }
+
+void TemplateTable::lconst(int value) { Unimplemented(); }
+
+void TemplateTable::fconst(int value) { Unimplemented(); }
+
+void TemplateTable::dconst(int value) { Unimplemented(); }
+
+void TemplateTable::bipush() { Unimplemented(); }
+
+void TemplateTable::sipush() { Unimplemented(); }
+
+void TemplateTable::ldc(bool wide) { Unimplemented(); }
+
+// Fast path for caching oop constants.
+// %%% We should use this to handle Class and String constants also.
+// %%% It will simplify the ldc/primitive path considerably.
+void TemplateTable::fast_aldc(bool wide) { Unimplemented(); }
+
+void TemplateTable::ldc2_w() { Unimplemented(); }
+
+void TemplateTable::locals_index(Register reg, int offset) { Unimplemented(); }
+
+void TemplateTable::iload() { Unimplemented(); }
+
+void TemplateTable::fast_iload2() { Unimplemented(); }
+
+void TemplateTable::fast_iload() { Unimplemented(); }
+
+void TemplateTable::lload() { Unimplemented(); }
+
+void TemplateTable::fload() { Unimplemented(); }
+
+void TemplateTable::dload() { Unimplemented(); }
+
+void TemplateTable::aload() { Unimplemented(); }
+
+void TemplateTable::locals_index_wide(Register reg) { Unimplemented(); }
+
+void TemplateTable::wide_iload() { Unimplemented(); }
+
+void TemplateTable::wide_lload() { Unimplemented(); }
+
+void TemplateTable::wide_fload() { Unimplemented(); }
+
+void TemplateTable::wide_dload() { Unimplemented(); }
+
+void TemplateTable::wide_aload() { Unimplemented(); }
+
+void TemplateTable::index_check(Register array, Register index) { Unimplemented(); }
+
+void TemplateTable::iaload() { Unimplemented(); }
+
+void TemplateTable::laload() { Unimplemented(); }
+
+void TemplateTable::faload() { Unimplemented(); }
+
+void TemplateTable::daload() { Unimplemented(); }
+
+void TemplateTable::aaload() { Unimplemented(); }
+
+void TemplateTable::baload() { Unimplemented(); }
+
+void TemplateTable::caload() { Unimplemented(); }
+
+// iload followed by caload frequent pair
+void TemplateTable::fast_icaload() { Unimplemented(); }
+
+void TemplateTable::saload() { Unimplemented(); }
+
+void TemplateTable::iload(int n) { Unimplemented(); }
+
+void TemplateTable::lload(int n) { Unimplemented(); }
+
+void TemplateTable::fload(int n) { Unimplemented(); }
+
+void TemplateTable::dload(int n) { Unimplemented(); }
+
+void TemplateTable::aload(int n) { Unimplemented(); }
+
+void TemplateTable::aload_0() { Unimplemented(); }
+
+void TemplateTable::istore() { Unimplemented(); }
+
+void TemplateTable::lstore() { Unimplemented(); }
+
+void TemplateTable::fstore() { Unimplemented(); }
+
+void TemplateTable::dstore() { Unimplemented(); }
+
+void TemplateTable::astore() { Unimplemented(); }
+
+void TemplateTable::wide_istore() { Unimplemented(); }
+
+void TemplateTable::wide_lstore() { Unimplemented(); }
+
+void TemplateTable::wide_fstore() { Unimplemented(); }
+
+void TemplateTable::wide_dstore() { Unimplemented(); }
+
+void TemplateTable::wide_astore() { Unimplemented(); }
+
+void TemplateTable::iastore() { Unimplemented(); }
+
+void TemplateTable::lastore() { Unimplemented(); }
+
+void TemplateTable::fastore() { Unimplemented(); }
+
+void TemplateTable::dastore() { Unimplemented(); }
+
+void TemplateTable::aastore() { Unimplemented(); }
+
+void TemplateTable::bastore() { Unimplemented(); }
+
+void TemplateTable::castore() { Unimplemented(); }
+
+void TemplateTable::sastore() { Unimplemented(); }
+
+void TemplateTable::istore(int n) { Unimplemented(); }
+
+void TemplateTable::lstore(int n) { Unimplemented(); }
+
+void TemplateTable::fstore(int n) { Unimplemented(); }
+
+void TemplateTable::dstore(int n) { Unimplemented(); }
+
+void TemplateTable::astore(int n) { Unimplemented(); }
+
+void TemplateTable::pop() { Unimplemented(); }
+
+void TemplateTable::pop2() { Unimplemented(); }
+
+void TemplateTable::dup() { Unimplemented(); }
+
+void TemplateTable::dup_x1() { Unimplemented(); }
+
+void TemplateTable::dup_x2() { Unimplemented(); }
+
+void TemplateTable::dup2() { Unimplemented(); }
+
+void TemplateTable::dup2_x1() { Unimplemented(); }
+
+void TemplateTable::dup2_x2() { Unimplemented(); }
+
+void TemplateTable::swap() { Unimplemented(); }
+
+void TemplateTable::iop2(Operation op) { Unimplemented(); }
+
+void TemplateTable::lop2(Operation op) { Unimplemented(); }
+
+void TemplateTable::idiv() { Unimplemented(); }
+
+void TemplateTable::irem() { Unimplemented(); }
+
+void TemplateTable::lmul() { Unimplemented(); }
+
+void TemplateTable::ldiv() { Unimplemented(); }
+
+void TemplateTable::lrem() { Unimplemented(); }
+
+void TemplateTable::lshl() { Unimplemented(); }
+
+void TemplateTable::lshr() { Unimplemented(); }
+
+void TemplateTable::lushr() { Unimplemented(); }
+
+void TemplateTable::fop2(Operation op) { Unimplemented(); }
+
+void TemplateTable::dop2(Operation op) { Unimplemented(); }
+
+void TemplateTable::ineg() { Unimplemented(); }
+
+void TemplateTable::lneg() { Unimplemented(); }
+
+// Note: 'double' and 'long long' have 32-bits alignment on x86.
+static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { Unimplemented(); }
+
+void TemplateTable::fneg() { Unimplemented(); }
+
+void TemplateTable::dneg() { Unimplemented(); }
+
+void TemplateTable::iinc() { Unimplemented(); }
+
+void TemplateTable::wide_iinc() { Unimplemented(); }
+
+void TemplateTable::convert() { Unimplemented(); }
+
+void TemplateTable::lcmp() { Unimplemented(); }
+
+void TemplateTable::float_cmp(bool is_float, int unordered_result) { Unimplemented(); }
+
+void TemplateTable::branch(bool is_jsr, bool is_wide) { Unimplemented(); }
+
+
+void TemplateTable::if_0cmp(Condition cc) { Unimplemented(); }
+
+void TemplateTable::if_icmp(Condition cc) { Unimplemented(); }
+
+void TemplateTable::if_nullcmp(Condition cc) { Unimplemented(); }
+
+void TemplateTable::if_acmp(Condition cc) { Unimplemented(); }
+
+void TemplateTable::ret() { Unimplemented(); }
+
+void TemplateTable::wide_ret() { Unimplemented(); }
+
+void TemplateTable::tableswitch() { Unimplemented(); }
+
+void TemplateTable::lookupswitch() { Unimplemented(); }
+
+void TemplateTable::fast_linearswitch() { Unimplemented(); }
+
+void TemplateTable::fast_binaryswitch() { Unimplemented(); }
+
+
+void TemplateTable::_return(TosState state) { Unimplemented(); }
+
+// ----------------------------------------------------------------------------
+// Volatile variables demand their effects be made known to all CPU's
+// in order. Store buffers on most chips allow reads & writes to
+// reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
+// without some kind of memory barrier (i.e., it's not sufficient that
+// the interpreter does not reorder volatile references, the hardware
+// also must not reorder them).
+//
+// According to the new Java Memory Model (JMM):
+// (1) All volatiles are serialized wrt to each other. ALSO reads &
+// writes act as aquire & release, so:
+// (2) A read cannot let unrelated NON-volatile memory refs that
+// happen after the read float up to before the read. It's OK for
+// non-volatile memory refs that happen before the volatile read to
+// float down below it.
+// (3) Similar a volatile write cannot let unrelated NON-volatile
+// memory refs that happen BEFORE the write float down to after the
+// write. It's OK for non-volatile memory refs that happen after the
+// volatile write to float up before it.
+//
+// We only put in barriers around volatile refs (they are expensive),
+// not _between_ memory refs (that would require us to track the
+// flavor of the previous memory refs). Requirements (2) and (3)
+// require some barriers before volatile stores and after volatile
+// loads. These nearly cover requirement (1) but miss the
+// volatile-store-volatile-load case. This final case is placed after
+// volatile-stores although it could just as well go before
+// volatile-loads.
+
+// void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
+// order_constraint) { Unimplemented(); }
+
+void TemplateTable::resolve_cache_and_index(int byte_no,
+ Register result,
+ Register Rcache,
+ Register index,
+ size_t index_size) { Unimplemented(); }
+
+// The Rcache and index registers must be set before call
+void TemplateTable::load_field_cp_cache_entry(Register obj,
+ Register cache,
+ Register index,
+ Register off,
+ Register flags,
+ bool is_static = false) { Unimplemented(); }
+
+void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
+ Register method,
+ Register itable_index,
+ Register flags,
+ bool is_invokevirtual,
+ bool is_invokevfinal, /*unused*/
+ bool is_invokedynamic) { Unimplemented(); }
+
+
+// The registers cache and index expected to be set before call.
+// Correct values of the cache and index registers are preserved.
+void TemplateTable::jvmti_post_field_access(Register cache, Register index,
+ bool is_static, bool has_tos) { Unimplemented(); }
+
+void TemplateTable::pop_and_check_object(Register r) { Unimplemented(); }
+
+void TemplateTable::getfield_or_static(int byte_no, bool is_static) { Unimplemented(); }
+
+
+void TemplateTable::getfield(int byte_no) { Unimplemented(); }
+
+void TemplateTable::getstatic(int byte_no) { Unimplemented(); }
+
+// The registers cache and index expected to be set before call.
+// The function may destroy various registers, just not the cache and index registers.
+void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { Unimplemented(); }
+
+void TemplateTable::putfield_or_static(int byte_no, bool is_static) { Unimplemented(); }
+
+void TemplateTable::putfield(int byte_no) { Unimplemented(); }
+
+void TemplateTable::putstatic(int byte_no) { Unimplemented(); }
+
+void TemplateTable::jvmti_post_fast_field_mod() { Unimplemented(); }
+
+void TemplateTable::fast_storefield(TosState state) { Unimplemented(); }
+
+
+void TemplateTable::fast_accessfield(TosState state) { Unimplemented(); }
+
+void TemplateTable::fast_xaccess(TosState state) { Unimplemented(); }
+
+
+
+//-----------------------------------------------------------------------------
+// Calls
+
+void TemplateTable::count_calls(Register method, Register temp) { Unimplemented(); }
+
+void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) { Unimplemented(); }
+
+
+void TemplateTable::invokevirtual_helper(Register index,
+ Register recv,
+ Register flags) { Unimplemented(); }
+
+
+void TemplateTable::invokevirtual(int byte_no) { Unimplemented(); }
+
+
+void TemplateTable::invokespecial(int byte_no) { Unimplemented(); }
+
+
+void TemplateTable::invokestatic(int byte_no) { Unimplemented(); }
+
+void TemplateTable::fast_invokevfinal(int byte_no) { Unimplemented(); }
+
+void TemplateTable::invokeinterface(int byte_no) { Unimplemented(); }
+
+void TemplateTable::invokedynamic(int byte_no) { Unimplemented(); }
+
+
+//-----------------------------------------------------------------------------
+// Allocation
+
+void TemplateTable::_new() { Unimplemented(); }
+
+void TemplateTable::newarray() { Unimplemented(); }
+
+void TemplateTable::anewarray() { Unimplemented(); }
+
+void TemplateTable::arraylength() { Unimplemented(); }
+
+void TemplateTable::checkcast() { Unimplemented(); }
+
+void TemplateTable::instanceof() { Unimplemented(); }
+
+//-----------------------------------------------------------------------------
+// Breakpoints
+void TemplateTable::_breakpoint() { Unimplemented(); }
+
+//-----------------------------------------------------------------------------
+// Exceptions
+
+void TemplateTable::athrow() { Unimplemented(); }
+
+//-----------------------------------------------------------------------------
+// Synchronization
+//
+// Note: monitorenter & exit are symmetric routines; which is reflected
+// in the assembly code structure as well
+//
+// Stack layout:
+//
+// [expressions ] <--- rsp = expression stack top
+// ..
+// [expressions ]
+// [monitor entry] <--- monitor block top = expression stack bot
+// ..
+// [monitor entry]
+// [frame data ] <--- monitor block bot
+// ...
+// [saved rbp ] <--- rbp
+void TemplateTable::monitorenter() { Unimplemented(); }
+
+
+void TemplateTable::monitorexit() { Unimplemented(); }
+
+
+// Wide instructions
+void TemplateTable::wide() { Unimplemented(); }
+
+
+// Multi arrays
+void TemplateTable::multianewarray() { Unimplemented(); }
+#endif // !CC_INTERP
diff --git a/src/cpu/aarch64/vm/templateTable_aarch64.hpp b/src/cpu/aarch64/vm/templateTable_aarch64.hpp
new file mode 100644
index 000000000..39a85363f
--- /dev/null
+++ b/src/cpu/aarch64/vm/templateTable_aarch64.hpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_TEMPLATETABLE_X86_64_HPP
+#define CPU_X86_VM_TEMPLATETABLE_X86_64_HPP
+
+ static void prepare_invoke(Register method, Register index, int byte_no);
+ static void invokevirtual_helper(Register index, Register recv,
+ Register flags);
+
+ // Helpers
+ static void index_check(Register array, Register index);
+ static void index_check_without_pop(Register array, Register index);
+
+#endif // CPU_X86_VM_TEMPLATETABLE_X86_64_HPP
diff --git a/src/cpu/aarch64/vm/vmStructs_aarch64.hpp b/src/cpu/aarch64/vm/vmStructs_aarch64.hpp
new file mode 100644
index 000000000..8dddc9c3e
--- /dev/null
+++ b/src/cpu/aarch64/vm/vmStructs_aarch64.hpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_VMSTRUCTS_X86_HPP
+#define CPU_X86_VM_VMSTRUCTS_X86_HPP
+
+// These are the CPU-specific fields, types and integer
+// constants required by the Serviceability Agent. This file is
+// referenced by vmStructs.cpp.
+
+#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \
+ \
+ /******************************/ \
+ /* JavaCallWrapper */ \
+ /******************************/ \
+ /******************************/ \
+ /* JavaFrameAnchor */ \
+ /******************************/ \
+ volatile_nonstatic_field(JavaFrameAnchor, _last_Java_fp, intptr_t*) \
+ \
+
+ /* NOTE that we do not use the last_entry() macro here; it is used */
+ /* in vmStructs_<os>_<cpu>.hpp's VM_STRUCTS_OS_CPU macro (and must */
+ /* be present there) */
+
+
+#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type, last_entry) \
+
+ /* NOTE that we do not use the last_entry() macro here; it is used */
+ /* in vmStructs_<os>_<cpu>.hpp's VM_TYPES_OS_CPU macro (and must */
+ /* be present there) */
+
+
+#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
+
+ /* NOTE that we do not use the last_entry() macro here; it is used */
+ /* in vmStructs_<os>_<cpu>.hpp's VM_INT_CONSTANTS_OS_CPU macro (and must */
+ /* be present there) */
+
+#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
+
+ /* NOTE that we do not use the last_entry() macro here; it is used */
+ /* in vmStructs_<os>_<cpu>.hpp's VM_LONG_CONSTANTS_OS_CPU macro (and must */
+ /* be present there) */
+
+#endif // CPU_X86_VM_VMSTRUCTS_X86_HPP
diff --git a/src/cpu/aarch64/vm/vm_version_aarch64.cpp b/src/cpu/aarch64/vm/vm_version_aarch64.cpp
new file mode 100644
index 000000000..57b18eeab
--- /dev/null
+++ b/src/cpu/aarch64/vm/vm_version_aarch64.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "assembler_aarch64.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/java.hpp"
+#include "runtime/stubCodeGenerator.hpp"
+#include "vm_version_aarch64.hpp"
+#ifdef TARGET_OS_FAMILY_linux
+# include "os_linux.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_solaris
+# include "os_solaris.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_windows
+# include "os_windows.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_bsd
+# include "os_bsd.inline.hpp"
+#endif
+
+
+int VM_Version::_cpu;
+int VM_Version::_model;
+int VM_Version::_stepping;
+int VM_Version::_cpuFeatures;
+const char* VM_Version::_features_str = "";
+VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
+
+static BufferBlob* stub_blob;
+static const int stub_size = 550;
+
+extern "C" {
+ typedef void (*getPsrInfo_stub_t)(void*);
+}
+static getPsrInfo_stub_t getPsrInfo_stub = NULL;
+
+
+class VM_Version_StubGenerator: public StubCodeGenerator {
+ public:
+
+ VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
+
+ address generate_getPsrInfo() { Unimplemented(); return 0; }
+};
+
+
+void VM_Version::get_processor_features() { Unimplemented(); }
+
+void VM_Version::initialize() { Unimplemented(); }
diff --git a/src/cpu/aarch64/vm/vm_version_aarch64.cpp~ b/src/cpu/aarch64/vm/vm_version_aarch64.cpp~
new file mode 100644
index 000000000..5f2accfae
--- /dev/null
+++ b/src/cpu/aarch64/vm/vm_version_aarch64.cpp~
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "assembler_aarch64.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/java.hpp"
+#include "runtime/stubCodeGenerator.hpp"
+#include "vm_version_aarch64.hpp"
+#ifdef TARGET_OS_FAMILY_linux
+# include "os_linux.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_solaris
+# include "os_solaris.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_windows
+# include "os_windows.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_bsd
+# include "os_bsd.inline.hpp"
+#endif
+
+
+int VM_Version::_cpu;
+int VM_Version::_model;
+int VM_Version::_stepping;
+int VM_Version::_cpuFeatures;
+const char* VM_Version::_features_str = "";
+VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
+
+static BufferBlob* stub_blob;
+static const int stub_size = 550;
+
+extern "C" {
+ typedef void (*getPsrInfo_stub_t)(void*);
+}
+static getPsrInfo_stub_t getPsrInfo_stub = NULL;
+
+
+class VM_Version_StubGenerator: public StubCodeGenerator {
+ public:
+
+ VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
+
+ address generate_getPsrInfo() { Unimplemented(); return 0; }
+};
+
+
+void VM_Version::get_processor_features() { Unimplemented(); }
+
+void VM_Version::initialize() {
+ // TODO -- this is just dummy code to get us bootstrapped far enough
+ // to be able to test the Assembler.
+
+ // just return and assume all will be ok
+ // Undefined();
+}
diff --git a/src/cpu/aarch64/vm/vm_version_aarch64.hpp b/src/cpu/aarch64/vm/vm_version_aarch64.hpp
index 32d33cc49..7e309772a 100644
--- a/src/cpu/aarch64/vm/vm_version_aarch64.hpp
+++ b/src/cpu/aarch64/vm/vm_version_aarch64.hpp
@@ -22,14 +22,643 @@
*
*/
-#ifndef CPU_AARCH64_VM_VM_VERSION_AARCH64_HPP
-#define CPU_AARCH64_VM_VM_VERSION_AARCH64_HPP
+#ifndef CPU_X86_VM_VM_VERSION_X86_HPP
+#define CPU_X86_VM_VM_VERSION_X86_HPP
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
-class VM_Version_64 : public Abstract_VM_Version {
+class VM_Version : public Abstract_VM_Version {
public:
+ // cpuid result register layouts. These are all unions of a uint32_t
+ // (in case anyone wants access to the register as a whole) and a bitfield.
+
+ union StdCpuid1Eax {
+ uint32_t value;
+ struct {
+ uint32_t stepping : 4,
+ model : 4,
+ family : 4,
+ proc_type : 2,
+ : 2,
+ ext_model : 4,
+ ext_family : 8,
+ : 4;
+ } bits;
+ };
+
+ union StdCpuid1Ebx { // example, unused
+ uint32_t value;
+ struct {
+ uint32_t brand_id : 8,
+ clflush_size : 8,
+ threads_per_cpu : 8,
+ apic_id : 8;
+ } bits;
+ };
+
+ union StdCpuid1Ecx {
+ uint32_t value;
+ struct {
+ uint32_t sse3 : 1,
+ : 2,
+ monitor : 1,
+ : 1,
+ vmx : 1,
+ : 1,
+ est : 1,
+ : 1,
+ ssse3 : 1,
+ cid : 1,
+ : 2,
+ cmpxchg16: 1,
+ : 4,
+ dca : 1,
+ sse4_1 : 1,
+ sse4_2 : 1,
+ : 2,
+ popcnt : 1,
+ : 3,
+ osxsave : 1,
+ avx : 1,
+ : 3;
+ } bits;
+ };
+
+ union StdCpuid1Edx {
+ uint32_t value;
+ struct {
+ uint32_t : 4,
+ tsc : 1,
+ : 3,
+ cmpxchg8 : 1,
+ : 6,
+ cmov : 1,
+ : 3,
+ clflush : 1,
+ : 3,
+ mmx : 1,
+ fxsr : 1,
+ sse : 1,
+ sse2 : 1,
+ : 1,
+ ht : 1,
+ : 3;
+ } bits;
+ };
+
+ union DcpCpuid4Eax {
+ uint32_t value;
+ struct {
+ uint32_t cache_type : 5,
+ : 21,
+ cores_per_cpu : 6;
+ } bits;
+ };
+
+ union DcpCpuid4Ebx {
+ uint32_t value;
+ struct {
+ uint32_t L1_line_size : 12,
+ partitions : 10,
+ associativity : 10;
+ } bits;
+ };
+
+ union TplCpuidBEbx {
+ uint32_t value;
+ struct {
+ uint32_t logical_cpus : 16,
+ : 16;
+ } bits;
+ };
+
+ union ExtCpuid1Ecx {
+ uint32_t value;
+ struct {
+ uint32_t LahfSahf : 1,
+ CmpLegacy : 1,
+ : 4,
+ lzcnt : 1,
+ sse4a : 1,
+ misalignsse : 1,
+ prefetchw : 1,
+ : 22;
+ } bits;
+ };
+
+ union ExtCpuid1Edx {
+ uint32_t value;
+ struct {
+ uint32_t : 22,
+ mmx_amd : 1,
+ mmx : 1,
+ fxsr : 1,
+ : 4,
+ long_mode : 1,
+ tdnow2 : 1,
+ tdnow : 1;
+ } bits;
+ };
+
+ union ExtCpuid5Ex {
+ uint32_t value;
+ struct {
+ uint32_t L1_line_size : 8,
+ L1_tag_lines : 8,
+ L1_assoc : 8,
+ L1_size : 8;
+ } bits;
+ };
+
+ union ExtCpuid7Edx {
+ uint32_t value;
+ struct {
+ uint32_t : 8,
+ tsc_invariance : 1,
+ : 23;
+ } bits;
+ };
+
+ union ExtCpuid8Ecx {
+ uint32_t value;
+ struct {
+ uint32_t cores_per_cpu : 8,
+ : 24;
+ } bits;
+ };
+
+ union SefCpuid7Eax {
+ uint32_t value;
+ };
+
+ union SefCpuid7Ebx {
+ uint32_t value;
+ struct {
+ uint32_t fsgsbase : 1,
+ : 2,
+ bmi1 : 1,
+ : 1,
+ avx2 : 1,
+ : 2,
+ bmi2 : 1,
+ : 23;
+ } bits;
+ };
+
+ union XemXcr0Eax {
+ uint32_t value;
+ struct {
+ uint32_t x87 : 1,
+ sse : 1,
+ ymm : 1,
+ : 29;
+ } bits;
+ };
+
+protected:
+ static int _cpu;
+ static int _model;
+ static int _stepping;
+ static int _cpuFeatures; // features returned by the "cpuid" instruction
+ // 0 if this instruction is not available
+ static const char* _features_str;
+
+ enum {
+ CPU_CX8 = (1 << 0), // next bits are from cpuid 1 (EDX)
+ CPU_CMOV = (1 << 1),
+ CPU_FXSR = (1 << 2),
+ CPU_HT = (1 << 3),
+ CPU_MMX = (1 << 4),
+ CPU_3DNOW_PREFETCH = (1 << 5), // Processor supports 3dnow prefetch and prefetchw instructions
+ // may not necessarily support other 3dnow instructions
+ CPU_SSE = (1 << 6),
+ CPU_SSE2 = (1 << 7),
+ CPU_SSE3 = (1 << 8), // SSE3 comes from cpuid 1 (ECX)
+ CPU_SSSE3 = (1 << 9),
+ CPU_SSE4A = (1 << 10),
+ CPU_SSE4_1 = (1 << 11),
+ CPU_SSE4_2 = (1 << 12),
+ CPU_POPCNT = (1 << 13),
+ CPU_LZCNT = (1 << 14),
+ CPU_TSC = (1 << 15),
+ CPU_TSCINV = (1 << 16),
+ CPU_AVX = (1 << 17),
+ CPU_AVX2 = (1 << 18)
+ } cpuFeatureFlags;
+
+ enum {
+ // AMD
+ CPU_FAMILY_AMD_11H = 0x11,
+ // Intel
+ CPU_FAMILY_INTEL_CORE = 6,
+ CPU_MODEL_NEHALEM = 0x1e,
+ CPU_MODEL_NEHALEM_EP = 0x1a,
+ CPU_MODEL_NEHALEM_EX = 0x2e,
+ CPU_MODEL_WESTMERE = 0x25,
+ CPU_MODEL_WESTMERE_EP = 0x2c,
+ CPU_MODEL_WESTMERE_EX = 0x2f,
+ CPU_MODEL_SANDYBRIDGE = 0x2a,
+ CPU_MODEL_SANDYBRIDGE_EP = 0x2d,
+ CPU_MODEL_IVYBRIDGE_EP = 0x3a
+ } cpuExtendedFamily;
+
+ // cpuid information block. All info derived from executing cpuid with
+ // various function numbers is stored here. Intel and AMD info is
+ // merged in this block: accessor methods disentangle it.
+ //
+ // The info block is laid out in subblocks of 4 dwords corresponding to
+ // eax, ebx, ecx and edx, whether or not they contain anything useful.
+ struct CpuidInfo {
+ // cpuid function 0
+ uint32_t std_max_function;
+ uint32_t std_vendor_name_0;
+ uint32_t std_vendor_name_1;
+ uint32_t std_vendor_name_2;
+
+ // cpuid function 1
+ StdCpuid1Eax std_cpuid1_eax;
+ StdCpuid1Ebx std_cpuid1_ebx;
+ StdCpuid1Ecx std_cpuid1_ecx;
+ StdCpuid1Edx std_cpuid1_edx;
+
+ // cpuid function 4 (deterministic cache parameters)
+ DcpCpuid4Eax dcp_cpuid4_eax;
+ DcpCpuid4Ebx dcp_cpuid4_ebx;
+ uint32_t dcp_cpuid4_ecx; // unused currently
+ uint32_t dcp_cpuid4_edx; // unused currently
+
+ // cpuid function 7 (structured extended features)
+ SefCpuid7Eax sef_cpuid7_eax;
+ SefCpuid7Ebx sef_cpuid7_ebx;
+ uint32_t sef_cpuid7_ecx; // unused currently
+ uint32_t sef_cpuid7_edx; // unused currently
+
+ // cpuid function 0xB (processor topology)
+ // ecx = 0
+ uint32_t tpl_cpuidB0_eax;
+ TplCpuidBEbx tpl_cpuidB0_ebx;
+ uint32_t tpl_cpuidB0_ecx; // unused currently
+ uint32_t tpl_cpuidB0_edx; // unused currently
+
+ // ecx = 1
+ uint32_t tpl_cpuidB1_eax;
+ TplCpuidBEbx tpl_cpuidB1_ebx;
+ uint32_t tpl_cpuidB1_ecx; // unused currently
+ uint32_t tpl_cpuidB1_edx; // unused currently
+
+ // ecx = 2
+ uint32_t tpl_cpuidB2_eax;
+ TplCpuidBEbx tpl_cpuidB2_ebx;
+ uint32_t tpl_cpuidB2_ecx; // unused currently
+ uint32_t tpl_cpuidB2_edx; // unused currently
+
+ // cpuid function 0x80000000 // example, unused
+ uint32_t ext_max_function;
+ uint32_t ext_vendor_name_0;
+ uint32_t ext_vendor_name_1;
+ uint32_t ext_vendor_name_2;
+
+ // cpuid function 0x80000001
+ uint32_t ext_cpuid1_eax; // reserved
+ uint32_t ext_cpuid1_ebx; // reserved
+ ExtCpuid1Ecx ext_cpuid1_ecx;
+ ExtCpuid1Edx ext_cpuid1_edx;
+
+ // cpuid functions 0x80000002 thru 0x80000004: example, unused
+ uint32_t proc_name_0, proc_name_1, proc_name_2, proc_name_3;
+ uint32_t proc_name_4, proc_name_5, proc_name_6, proc_name_7;
+ uint32_t proc_name_8, proc_name_9, proc_name_10,proc_name_11;
+
+ // cpuid function 0x80000005 // AMD L1, Intel reserved
+ uint32_t ext_cpuid5_eax; // unused currently
+ uint32_t ext_cpuid5_ebx; // reserved
+ ExtCpuid5Ex ext_cpuid5_ecx; // L1 data cache info (AMD)
+ ExtCpuid5Ex ext_cpuid5_edx; // L1 instruction cache info (AMD)
+
+ // cpuid function 0x80000007
+ uint32_t ext_cpuid7_eax; // reserved
+ uint32_t ext_cpuid7_ebx; // reserved
+ uint32_t ext_cpuid7_ecx; // reserved
+ ExtCpuid7Edx ext_cpuid7_edx; // tscinv
+
+ // cpuid function 0x80000008
+ uint32_t ext_cpuid8_eax; // unused currently
+ uint32_t ext_cpuid8_ebx; // reserved
+ ExtCpuid8Ecx ext_cpuid8_ecx;
+ uint32_t ext_cpuid8_edx; // reserved
+
+ // extended control register XCR0 (the XFEATURE_ENABLED_MASK register)
+ XemXcr0Eax xem_xcr0_eax;
+ uint32_t xem_xcr0_edx; // reserved
+ };
+
+ // The actual cpuid info block
+ static CpuidInfo _cpuid_info;
+
+ // Extractors and predicates
+ static uint32_t extended_cpu_family() {
+ uint32_t result = _cpuid_info.std_cpuid1_eax.bits.family;
+ result += _cpuid_info.std_cpuid1_eax.bits.ext_family;
+ return result;
+ }
+
+ static uint32_t extended_cpu_model() {
+ uint32_t result = _cpuid_info.std_cpuid1_eax.bits.model;
+ result |= _cpuid_info.std_cpuid1_eax.bits.ext_model << 4;
+ return result;
+ }
+
+ static uint32_t cpu_stepping() {
+ uint32_t result = _cpuid_info.std_cpuid1_eax.bits.stepping;
+ return result;
+ }
+
+ static uint logical_processor_count() {
+ uint result = threads_per_core();
+ return result;
+ }
+
+ static uint32_t feature_flags() {
+ uint32_t result = 0;
+ if (_cpuid_info.std_cpuid1_edx.bits.cmpxchg8 != 0)
+ result |= CPU_CX8;
+ if (_cpuid_info.std_cpuid1_edx.bits.cmov != 0)
+ result |= CPU_CMOV;
+ if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || (is_amd() &&
+ _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0))
+ result |= CPU_FXSR;
+ // HT flag is set for multi-core processors also.
+ if (threads_per_core() > 1)
+ result |= CPU_HT;
+ if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || (is_amd() &&
+ _cpuid_info.ext_cpuid1_edx.bits.mmx != 0))
+ result |= CPU_MMX;
+ if (_cpuid_info.std_cpuid1_edx.bits.sse != 0)
+ result |= CPU_SSE;
+ if (_cpuid_info.std_cpuid1_edx.bits.sse2 != 0)
+ result |= CPU_SSE2;
+ if (_cpuid_info.std_cpuid1_ecx.bits.sse3 != 0)
+ result |= CPU_SSE3;
+ if (_cpuid_info.std_cpuid1_ecx.bits.ssse3 != 0)
+ result |= CPU_SSSE3;
+ if (_cpuid_info.std_cpuid1_ecx.bits.sse4_1 != 0)
+ result |= CPU_SSE4_1;
+ if (_cpuid_info.std_cpuid1_ecx.bits.sse4_2 != 0)
+ result |= CPU_SSE4_2;
+ if (_cpuid_info.std_cpuid1_ecx.bits.popcnt != 0)
+ result |= CPU_POPCNT;
+ if (_cpuid_info.std_cpuid1_ecx.bits.avx != 0 &&
+ _cpuid_info.std_cpuid1_ecx.bits.osxsave != 0 &&
+ _cpuid_info.xem_xcr0_eax.bits.sse != 0 &&
+ _cpuid_info.xem_xcr0_eax.bits.ymm != 0) {
+ result |= CPU_AVX;
+ if (_cpuid_info.sef_cpuid7_ebx.bits.avx2 != 0)
+ result |= CPU_AVX2;
+ }
+ if (_cpuid_info.std_cpuid1_edx.bits.tsc != 0)
+ result |= CPU_TSC;
+ if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0)
+ result |= CPU_TSCINV;
+
+ // AMD features.
+ if (is_amd()) {
+ if ((_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) ||
+ (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0))
+ result |= CPU_3DNOW_PREFETCH;
+ if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0)
+ result |= CPU_LZCNT;
+ if (_cpuid_info.ext_cpuid1_ecx.bits.sse4a != 0)
+ result |= CPU_SSE4A;
+ }
+
+ return result;
+ }
+
+ static void get_processor_features();
+
+public:
+ // Offsets for cpuid asm stub
+ static ByteSize std_cpuid0_offset() { return byte_offset_of(CpuidInfo, std_max_function); }
+ static ByteSize std_cpuid1_offset() { return byte_offset_of(CpuidInfo, std_cpuid1_eax); }
+ static ByteSize dcp_cpuid4_offset() { return byte_offset_of(CpuidInfo, dcp_cpuid4_eax); }
+ static ByteSize sef_cpuid7_offset() { return byte_offset_of(CpuidInfo, sef_cpuid7_eax); }
+ static ByteSize ext_cpuid1_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1_eax); }
+ static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); }
+ static ByteSize ext_cpuid7_offset() { return byte_offset_of(CpuidInfo, ext_cpuid7_eax); }
+ static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); }
+ static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); }
+ static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); }
+ static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); }
+ static ByteSize xem_xcr0_offset() { return byte_offset_of(CpuidInfo, xem_xcr0_eax); }
+
+ // Initialization
+ static void initialize();
+
+ // Asserts
+ static void assert_is_initialized() {
+ assert(_cpuid_info.std_cpuid1_eax.bits.family != 0, "VM_Version not initialized");
+ }
+
+ //
+ // Processor family:
+ // 3 - 386
+ // 4 - 486
+ // 5 - Pentium
+ // 6 - PentiumPro, Pentium II, Celeron, Xeon, Pentium III, Athlon,
+ // Pentium M, Core Solo, Core Duo, Core2 Duo
+ // family 6 model: 9, 13, 14, 15
+ // 0x0f - Pentium 4, Opteron
+ //
+ // Note: The cpu family should be used to select between
+ // instruction sequences which are valid on all Intel
+ // processors. Use the feature test functions below to
+ // determine whether a particular instruction is supported.
+ //
+ static int cpu_family() { return _cpu;}
+ static bool is_P6() { return cpu_family() >= 6; }
+ static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA'
+ static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG'
+
+ static bool supports_processor_topology() {
+ return (_cpuid_info.std_max_function >= 0xB) &&
+ // eax[4:0] | ebx[0:15] == 0 indicates invalid topology level.
+ // Some cpus have max cpuid >= 0xB but do not support processor topology.
+ ((_cpuid_info.tpl_cpuidB0_eax & 0x1f | _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus) != 0);
+ }
+
+ static uint cores_per_cpu() {
+ uint result = 1;
+ if (is_intel()) {
+ if (supports_processor_topology()) {
+ result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
+ _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
+ } else {
+ result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
+ }
+ } else if (is_amd()) {
+ result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1);
+ }
+ return result;
+ }
+
+ static uint threads_per_core() {
+ uint result = 1;
+ if (is_intel() && supports_processor_topology()) {
+ result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
+ } else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
+ result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu /
+ cores_per_cpu();
+ }
+ return result;
+ }
+
+ static intx prefetch_data_size() {
+ intx result = 0;
+ if (is_intel()) {
+ result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
+ } else if (is_amd()) {
+ result = _cpuid_info.ext_cpuid5_ecx.bits.L1_line_size;
+ }
+ if (result < 32) // not defined ?
+ result = 32; // 32 bytes by default on x86 and other x64
+ return result;
+ }
+
+ //
+ // Feature identification
+ //
+ static bool supports_cpuid() { return _cpuFeatures != 0; }
+ static bool supports_cmpxchg8() { return (_cpuFeatures & CPU_CX8) != 0; }
+ static bool supports_cmov() { return (_cpuFeatures & CPU_CMOV) != 0; }
+ static bool supports_fxsr() { return (_cpuFeatures & CPU_FXSR) != 0; }
+ static bool supports_ht() { return (_cpuFeatures & CPU_HT) != 0; }
+ static bool supports_mmx() { return (_cpuFeatures & CPU_MMX) != 0; }
+ static bool supports_sse() { return (_cpuFeatures & CPU_SSE) != 0; }
+ static bool supports_sse2() { return (_cpuFeatures & CPU_SSE2) != 0; }
+ static bool supports_sse3() { return (_cpuFeatures & CPU_SSE3) != 0; }
+ static bool supports_ssse3() { return (_cpuFeatures & CPU_SSSE3)!= 0; }
+ static bool supports_sse4_1() { return (_cpuFeatures & CPU_SSE4_1) != 0; }
+ static bool supports_sse4_2() { return (_cpuFeatures & CPU_SSE4_2) != 0; }
+ static bool supports_popcnt() { return (_cpuFeatures & CPU_POPCNT) != 0; }
+ static bool supports_avx() { return (_cpuFeatures & CPU_AVX) != 0; }
+ static bool supports_avx2() { return (_cpuFeatures & CPU_AVX2) != 0; }
+ static bool supports_tsc() { return (_cpuFeatures & CPU_TSC) != 0; }
+
+ // Intel features
+ static bool is_intel_family_core() { return is_intel() &&
+ extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }
+
+ static bool is_intel_tsc_synched_at_init() {
+ if (is_intel_family_core()) {
+ uint32_t ext_model = extended_cpu_model();
+ if (ext_model == CPU_MODEL_NEHALEM_EP ||
+ ext_model == CPU_MODEL_WESTMERE_EP ||
+ ext_model == CPU_MODEL_SANDYBRIDGE_EP ||
+ ext_model == CPU_MODEL_IVYBRIDGE_EP) {
+ // <= 2-socket invariant tsc support. EX versions are usually used
+ // in > 2-socket systems and likely don't synchronize tscs at
+ // initialization.
+ // Code that uses tsc values must be prepared for them to arbitrarily
+ // jump forward or backward.
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // AMD features
+ static bool supports_3dnow_prefetch() { return (_cpuFeatures & CPU_3DNOW_PREFETCH) != 0; }
+ static bool supports_mmx_ext() { return is_amd() && _cpuid_info.ext_cpuid1_edx.bits.mmx_amd != 0; }
+ static bool supports_lzcnt() { return (_cpuFeatures & CPU_LZCNT) != 0; }
+ static bool supports_sse4a() { return (_cpuFeatures & CPU_SSE4A) != 0; }
+
+ static bool is_amd_Barcelona() { return is_amd() &&
+ extended_cpu_family() == CPU_FAMILY_AMD_11H; }
+
+ // Intel and AMD newer cores support fast timestamps well
+ static bool supports_tscinv_bit() {
+ return (_cpuFeatures & CPU_TSCINV) != 0;
+ }
+ static bool supports_tscinv() {
+ return supports_tscinv_bit() &&
+ ( (is_amd() && !is_amd_Barcelona()) ||
+ is_intel_tsc_synched_at_init() );
+ }
+
+ // Intel Core and newer cpus have fast IDIV instruction (excluding Atom).
+ static bool has_fast_idiv() { return is_intel() && cpu_family() == 6 &&
+ supports_sse3() && _model != 0x1C; }
+
+ static bool supports_compare_and_exchange() { return true; }
+
+ static const char* cpu_features() { return _features_str; }
+
+ static intx allocate_prefetch_distance() {
+ // This method should be called before allocate_prefetch_style().
+ //
+ // Hardware prefetching (distance/size in bytes):
+ // Pentium 3 - 64 / 32
+ // Pentium 4 - 256 / 128
+ // Athlon - 64 / 32 ????
+ // Opteron - 128 / 64 only when 2 sequential cache lines accessed
+ // Core - 128 / 64
+ //
+ // Software prefetching (distance in bytes / instruction with best score):
+ // Pentium 3 - 128 / prefetchnta
+ // Pentium 4 - 512 / prefetchnta
+ // Athlon - 128 / prefetchnta
+ // Opteron - 256 / prefetchnta
+ // Core - 256 / prefetchnta
+ // It will be used only when AllocatePrefetchStyle > 0
+
+ intx count = AllocatePrefetchDistance;
+ if (count < 0) { // default ?
+ if (is_amd()) { // AMD
+ if (supports_sse2())
+ count = 256; // Opteron
+ else
+ count = 128; // Athlon
+ } else { // Intel
+ if (supports_sse2())
+ if (cpu_family() == 6) {
+ count = 256; // Pentium M, Core, Core2
+ } else {
+ count = 512; // Pentium 4
+ }
+ else
+ count = 128; // Pentium 3 (and all other old CPUs)
+ }
+ }
+ return count;
+ }
+ static intx allocate_prefetch_style() {
+ assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
+ // Return 0 if AllocatePrefetchDistance was not defined.
+ return AllocatePrefetchDistance > 0 ? AllocatePrefetchStyle : 0;
+ }
+
+ // Prefetch interval for gc copy/scan == 9 dcache lines. Derived from
+ // 50-warehouse specjbb runs on a 2-way 1.8ghz opteron using a 4gb heap.
+ // Tested intervals from 128 to 2048 in increments of 64 == one cache line.
+ // 256 bytes (4 dcache lines) was the nearest runner-up to 576.
+
+ // gc copy/scan is disabled if prefetchw isn't supported, because
+ // Prefetch::write emits an inlined prefetchw on Linux.
+ // Do not use the 3dnow prefetchw instruction. It isn't supported on em64t.
+ // The used prefetcht0 instruction works for both amd64 and em64t.
+ static intx prefetch_copy_interval_in_bytes() {
+ intx interval = PrefetchCopyIntervalInBytes;
+ return interval >= 0 ? interval : 576;
+ }
+ static intx prefetch_scan_interval_in_bytes() {
+ intx interval = PrefetchScanIntervalInBytes;
+ return interval >= 0 ? interval : 576;
+ }
+ static intx prefetch_fields_ahead() {
+ intx count = PrefetchFieldsAhead;
+ return count >= 0 ? count : 1;
+ }
};
-#endif // CPU_AARCH64_VM_VM_VERSION_AARCH64_HPP
+#endif // CPU_X86_VM_VM_VERSION_X86_HPP
diff --git a/src/cpu/aarch64/vm/vmreg_aarch64.cpp b/src/cpu/aarch64/vm/vmreg_aarch64.cpp
new file mode 100644
index 000000000..b5e8942d0
--- /dev/null
+++ b/src/cpu/aarch64/vm/vmreg_aarch64.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "code/vmreg.hpp"
+
+
+
+void VMRegImpl::set_regName() { Unimplemented(); }
diff --git a/src/cpu/aarch64/vm/vmreg_aarch64.hpp b/src/cpu/aarch64/vm/vmreg_aarch64.hpp
new file mode 100644
index 000000000..e2f7475d5
--- /dev/null
+++ b/src/cpu/aarch64/vm/vmreg_aarch64.hpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_VMREG_X86_HPP
+#define CPU_X86_VM_VMREG_X86_HPP
+
+ bool is_Register();
+ Register as_Register();
+
+ bool is_FloatRegister();
+ FloatRegister as_FloatRegister();
+#endif // CPU_X86_VM_VMREG_X86_HPP
diff --git a/src/cpu/aarch64/vm/vmreg_aarch64.inline.hpp b/src/cpu/aarch64/vm/vmreg_aarch64.inline.hpp
new file mode 100644
index 000000000..eb0cde527
--- /dev/null
+++ b/src/cpu/aarch64/vm/vmreg_aarch64.inline.hpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_VMREG_X86_INLINE_HPP
+#define CPU_X86_VM_VMREG_X86_INLINE_HPP
+
+inline VMReg RegisterImpl::as_VMReg() {
+ if( this==noreg ) return VMRegImpl::Bad();
+#ifdef AMD64
+ return VMRegImpl::as_VMReg(encoding() << 1 );
+#else
+ return VMRegImpl::as_VMReg(encoding() );
+#endif // AMD64
+}
+
+inline VMReg FloatRegisterImpl::as_VMReg() {
+ return VMRegImpl::as_VMReg((encoding() << 1) + ConcreteRegisterImpl::max_gpr);
+}
+
+inline bool VMRegImpl::is_Register() {
+ return (unsigned int) value() < (unsigned int) ConcreteRegisterImpl::max_gpr;
+}
+
+inline bool VMRegImpl::is_FloatRegister() {
+ return value() >= ConcreteRegisterImpl::max_gpr && value() < ConcreteRegisterImpl::max_fpr;
+}
+
+inline Register VMRegImpl::as_Register() {
+
+ assert( is_Register(), "must be");
+ // Yuk
+#ifdef AMD64
+ return ::as_Register(value() >> 1);
+#else
+ return ::as_Register(value());
+#endif // AMD64
+}
+
+inline FloatRegister VMRegImpl::as_FloatRegister() {
+ assert( is_FloatRegister() && is_even(value()), "must be" );
+ // Yuk
+ return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) >> 1);
+}
+
+inline bool VMRegImpl::is_concrete() {
+ assert(is_reg(), "must be");
+#ifndef AMD64
+ if (is_Register()) return true;
+#endif // AMD64
+ return is_even(value());
+}
+
+#endif // CPU_X86_VM_VMREG_X86_INLINE_HPP
diff --git a/src/cpu/aarch64/vm/vtableStubs_aarch64.cpp b/src/cpu/aarch64/vm/vtableStubs_aarch64.cpp
new file mode 100644
index 000000000..27a54b1aa
--- /dev/null
+++ b/src/cpu/aarch64/vm/vtableStubs_aarch64.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "assembler_aarch64.inline.hpp"
+#include "code/vtableStubs.hpp"
+#include "interp_masm_aarch64.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/instanceKlass.hpp"
+#include "oops/klassVtable.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "vmreg_aarch64.inline.hpp"
+#ifdef COMPILER2
+#include "opto/runtime.hpp"
+#endif
+
+// machine-dependent part of VtableStubs: create VtableStub of correct size and
+// initialize its code
+
+#define __ masm->
+
+#ifndef PRODUCT
+extern "C" void bad_compiled_vtable_index(JavaThread* thread,
+ oop receiver,
+ int index);
+#endif
+
+VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { Unimplemented(); return 0; }
+
+
+VtableStub* VtableStubs::create_itable_stub(int itable_index) { Unimplemented(); return 0; }
+
+int VtableStub::pd_code_size_limit(bool is_vtable_stub) { Unimplemented(); return 0; }
+
+int VtableStub::pd_code_alignment() { Unimplemented(); return 0; }
diff --git a/src/cpu/x86/vm/bytes_x86.hpp b/src/cpu/x86/vm/bytes_x86.hpp
index 9f939a389..8eb0ea39b 100644
--- a/src/cpu/x86/vm/bytes_x86.hpp
+++ b/src/cpu/x86/vm/bytes_x86.hpp
@@ -75,6 +75,9 @@ class Bytes: AllStatic {
#ifdef TARGET_OS_ARCH_linux_x86
# include "bytes_linux_x86.inline.hpp"
#endif
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "bytes_linux_aarch64.inline.hpp"
+#endif
#ifdef TARGET_OS_ARCH_solaris_x86
# include "bytes_solaris_x86.inline.hpp"
#endif
diff --git a/src/cpu/x86/vm/copy_x86.hpp b/src/cpu/x86/vm/copy_x86.hpp
index d5c6d5efa..8f1d74e3c 100644
--- a/src/cpu/x86/vm/copy_x86.hpp
+++ b/src/cpu/x86/vm/copy_x86.hpp
@@ -31,6 +31,9 @@
#ifdef TARGET_OS_ARCH_linux_x86
# include "copy_linux_x86.inline.hpp"
#endif
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "copy_linux_aarch64.inline.hpp"
+#endif
#ifdef TARGET_OS_ARCH_solaris_x86
# include "copy_solaris_x86.inline.hpp"
#endif
diff --git a/src/os/linux/vm/osThread_linux.cpp b/src/os/linux/vm/osThread_linux.cpp
index 8ff88a9e3..0db52ac0f 100644
--- a/src/os/linux/vm/osThread_linux.cpp
+++ b/src/os/linux/vm/osThread_linux.cpp
@@ -33,6 +33,9 @@
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "assembler_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
#endif
diff --git a/src/os/linux/vm/os_linux.cpp b/src/os/linux/vm/os_linux.cpp
index 53457ec8d..5a3790957 100644
--- a/src/os/linux/vm/os_linux.cpp
+++ b/src/os/linux/vm/os_linux.cpp
@@ -66,6 +66,10 @@
# include "assembler_x86.inline.hpp"
# include "nativeInst_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "assembler_aarch64.inline.hpp"
+# include "nativeInst_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
# include "nativeInst_sparc.hpp"
diff --git a/src/os/linux/vm/os_linux.inline.hpp b/src/os/linux/vm/os_linux.inline.hpp
index 566c0ad31..8c86294e7 100644
--- a/src/os/linux/vm/os_linux.inline.hpp
+++ b/src/os/linux/vm/os_linux.inline.hpp
@@ -31,6 +31,10 @@
# include "atomic_linux_x86.inline.hpp"
# include "orderAccess_linux_x86.inline.hpp"
#endif
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "atomic_linux_aarch64.inline.hpp"
+# include "orderAccess_linux_aarch64.inline.hpp"
+#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "atomic_linux_sparc.inline.hpp"
# include "orderAccess_linux_sparc.inline.hpp"
diff --git a/src/os/linux/vm/thread_linux.inline.hpp b/src/os/linux/vm/thread_linux.inline.hpp
index 2a5dcddb7..9e6e5209c 100644
--- a/src/os/linux/vm/thread_linux.inline.hpp
+++ b/src/os/linux/vm/thread_linux.inline.hpp
@@ -34,6 +34,11 @@
# include "orderAccess_linux_x86.inline.hpp"
# include "prefetch_linux_x86.inline.hpp"
#endif
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "atomic_linux_aarch64.inline.hpp"
+# include "orderAccess_linux_aarch64.inline.hpp"
+# include "prefetch_linux_aarch64.inline.hpp"
+#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "atomic_linux_sparc.inline.hpp"
# include "orderAccess_linux_sparc.inline.hpp"
diff --git a/src/os_cpu/linux_aarch64/vm/assembler_linux_aarch64.cpp b/src/os_cpu/linux_aarch64/vm/assembler_linux_aarch64.cpp
new file mode 100644
index 000000000..d92ec3f9b
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/assembler_linux_aarch64.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "assembler_aarch64.inline.hpp"
+#include "runtime/os.hpp"
+#include "runtime/threadLocalStorage.hpp"
+
+#if 0
+void MacroAssembler::int3() {
+ fixme()
+}
+
+void MacroAssembler::get_thread(Register thread) {
+ fixme();
+}
+#endif
diff --git a/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.inline.hpp b/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.inline.hpp
new file mode 100644
index 000000000..0ef7869a6
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.inline.hpp
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_INLINE_HPP
+#define OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_INLINE_HPP
+
+#include "orderAccess_linux_aarch64.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/os.hpp"
+#include "vm_version_aarch64.hpp"
+
+// Implementation of class atomic
+
+inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
+inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
+inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
+inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
+inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
+
+
+// Adding a lock prefix to an instruction on MP machine
+#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
+
+inline jint Atomic::add (jint add_value, volatile jint* dest) {
+ jint addend = add_value;
+ int mp = os::is_MP();
+ __asm__ volatile ( LOCK_IF_MP(%3) "xaddl %0,(%2)"
+ : "=r" (addend)
+ : "0" (addend), "r" (dest), "r" (mp)
+ : "cc", "memory");
+ return addend + add_value;
+}
+
+inline void Atomic::inc (volatile jint* dest) {
+ int mp = os::is_MP();
+ __asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
+ : "r" (dest), "r" (mp) : "cc", "memory");
+}
+
+inline void Atomic::inc_ptr(volatile void* dest) {
+ inc_ptr((volatile intptr_t*)dest);
+}
+
+inline void Atomic::dec (volatile jint* dest) {
+ int mp = os::is_MP();
+ __asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
+ : "r" (dest), "r" (mp) : "cc", "memory");
+}
+
+inline void Atomic::dec_ptr(volatile void* dest) {
+ dec_ptr((volatile intptr_t*)dest);
+}
+
+inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
+ __asm__ volatile ( "xchgl (%2),%0"
+ : "=r" (exchange_value)
+ : "0" (exchange_value), "r" (dest)
+ : "memory");
+ return exchange_value;
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
+ return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+
+inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) {
+ int mp = os::is_MP();
+ __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
+ : "=a" (exchange_value)
+ : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+ : "cc", "memory");
+ return exchange_value;
+}
+
+#ifdef AMD64
+inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
+inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+ intptr_t addend = add_value;
+ bool mp = os::is_MP();
+ __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
+ : "=r" (addend)
+ : "0" (addend), "r" (dest), "r" (mp)
+ : "cc", "memory");
+ return addend + add_value;
+}
+
+inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
+ return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+ bool mp = os::is_MP();
+ __asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
+ :
+ : "r" (dest), "r" (mp)
+ : "cc", "memory");
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+ bool mp = os::is_MP();
+ __asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
+ :
+ : "r" (dest), "r" (mp)
+ : "cc", "memory");
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+ __asm__ __volatile__ ("xchgq (%2),%0"
+ : "=r" (exchange_value)
+ : "0" (exchange_value), "r" (dest)
+ : "memory");
+ return exchange_value;
+}
+
+inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) {
+ bool mp = os::is_MP();
+ __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
+ : "=a" (exchange_value)
+ : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+ : "cc", "memory");
+ return exchange_value;
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) {
+ return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) {
+ return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
+}
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#else // !AMD64
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+ return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
+}
+
+inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
+ return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
+}
+
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+ inc((volatile jint*)dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+ dec((volatile jint*)dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+ return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+extern "C" {
+ // defined in linux_x86.s
+ jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
+ void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
+}
+
+inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) {
+ return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) {
+ return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value);
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) {
+ return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value);
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+ volatile jlong dest;
+ _Atomic_move_long(src, &dest);
+ return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+ _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+ _Atomic_move_long((volatile jlong*)&store_value, dest);
+}
+
+#endif // AMD64
+
+#endif // OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_INLINE_HPP
diff --git a/src/os_cpu/linux_aarch64/vm/bytes_linux_aarch64.inline.hpp b/src/os_cpu/linux_aarch64/vm/bytes_linux_aarch64.inline.hpp
new file mode 100644
index 000000000..93cb98822
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/bytes_linux_aarch64.inline.hpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_X86_VM_BYTES_LINUX_X86_INLINE_HPP
+#define OS_CPU_LINUX_X86_VM_BYTES_LINUX_X86_INLINE_HPP
+
+#include <byteswap.h>
+
+// Efficient swapping of data bytes from Java byte
+// ordering to native byte ordering and vice versa.
+inline u2 Bytes::swap_u2(u2 x) {
+#ifdef AMD64
+ return bswap_16(x);
+#else
+ u2 ret;
+ __asm__ __volatile__ (
+ "movw %0, %%ax;"
+ "xchg %%al, %%ah;"
+ "movw %%ax, %0"
+ :"=r" (ret) // output : register 0 => ret
+ :"0" (x) // input : x => register 0
+ :"ax", "0" // clobbered registers
+ );
+ return ret;
+#endif // AMD64
+}
+
+inline u4 Bytes::swap_u4(u4 x) {
+#ifdef AMD64
+ return bswap_32(x);
+#else
+ u4 ret;
+ __asm__ __volatile__ (
+ "bswap %0"
+ :"=r" (ret) // output : register 0 => ret
+ :"0" (x) // input : x => register 0
+ :"0" // clobbered register
+ );
+ return ret;
+#endif // AMD64
+}
+
+#ifdef AMD64
+inline u8 Bytes::swap_u8(u8 x) {
+#ifdef SPARC_WORKS
+ // workaround for SunStudio12 CR6615391
+ __asm__ __volatile__ (
+ "bswapq %0"
+ :"=r" (x) // output : register 0 => x
+ :"0" (x) // input : x => register 0
+ :"0" // clobbered register
+ );
+ return x;
+#else
+ return bswap_64(x);
+#endif
+}
+#else
+// Helper function for swap_u8
+inline u8 Bytes::swap_u8_base(u4 x, u4 y) {
+ return (((u8)swap_u4(x))<<32) | swap_u4(y);
+}
+
+inline u8 Bytes::swap_u8(u8 x) {
+ return swap_u8_base(*(u4*)&x, *(((u4*)&x)+1));
+}
+#endif // !AMD64
+
+#endif // OS_CPU_LINUX_X86_VM_BYTES_LINUX_X86_INLINE_HPP
diff --git a/src/os_cpu/linux_aarch64/vm/copy_linux_aarch64.inline.hpp b/src/os_cpu/linux_aarch64/vm/copy_linux_aarch64.inline.hpp
new file mode 100644
index 000000000..175019b86
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/copy_linux_aarch64.inline.hpp
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_X86_VM_COPY_LINUX_X86_INLINE_HPP
+#define OS_CPU_LINUX_X86_VM_COPY_LINUX_X86_INLINE_HPP
+
+static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
+#ifdef AMD64
+ (void)memmove(to, from, count * HeapWordSize);
+#else
+ // Includes a zero-count check.
+ intx temp;
+ __asm__ volatile(" testl %6,%6 ;"
+ " jz 7f ;"
+ " cmpl %4,%5 ;"
+ " leal -4(%4,%6,4),%3;"
+ " jbe 1f ;"
+ " cmpl %7,%5 ;"
+ " jbe 4f ;"
+ "1: cmpl $32,%6 ;"
+ " ja 3f ;"
+ " subl %4,%1 ;"
+ "2: movl (%4),%3 ;"
+ " movl %7,(%5,%4,1) ;"
+ " addl $4,%0 ;"
+ " subl $1,%2 ;"
+ " jnz 2b ;"
+ " jmp 7f ;"
+ "3: rep; smovl ;"
+ " jmp 7f ;"
+ "4: cmpl $32,%2 ;"
+ " movl %7,%0 ;"
+ " leal -4(%5,%6,4),%1;"
+ " ja 6f ;"
+ " subl %4,%1 ;"
+ "5: movl (%4),%3 ;"
+ " movl %7,(%5,%4,1) ;"
+ " subl $4,%0 ;"
+ " subl $1,%2 ;"
+ " jnz 5b ;"
+ " jmp 7f ;"
+ "6: std ;"
+ " rep; smovl ;"
+ " cld ;"
+ "7: nop "
+ : "=S" (from), "=D" (to), "=c" (count), "=r" (temp)
+ : "0" (from), "1" (to), "2" (count), "3" (temp)
+ : "memory", "flags");
+#endif // AMD64
+}
+
+static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
+#ifdef AMD64
+ switch (count) {
+ case 8: to[7] = from[7];
+ case 7: to[6] = from[6];
+ case 6: to[5] = from[5];
+ case 5: to[4] = from[4];
+ case 4: to[3] = from[3];
+ case 3: to[2] = from[2];
+ case 2: to[1] = from[1];
+ case 1: to[0] = from[0];
+ case 0: break;
+ default:
+ (void)memcpy(to, from, count * HeapWordSize);
+ break;
+ }
+#else
+ // Includes a zero-count check.
+ intx temp;
+ __asm__ volatile(" testl %6,%6 ;"
+ " jz 3f ;"
+ " cmpl $32,%6 ;"
+ " ja 2f ;"
+ " subl %4,%1 ;"
+ "1: movl (%4),%3 ;"
+ " movl %7,(%5,%4,1);"
+ " addl $4,%0 ;"
+ " subl $1,%2 ;"
+ " jnz 1b ;"
+ " jmp 3f ;"
+ "2: rep; smovl ;"
+ "3: nop "
+ : "=S" (from), "=D" (to), "=c" (count), "=r" (temp)
+ : "0" (from), "1" (to), "2" (count), "3" (temp)
+ : "memory", "cc");
+#endif // AMD64
+}
+
+static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
+#ifdef AMD64
+ switch (count) {
+ case 8: to[7] = from[7];
+ case 7: to[6] = from[6];
+ case 6: to[5] = from[5];
+ case 5: to[4] = from[4];
+ case 4: to[3] = from[3];
+ case 3: to[2] = from[2];
+ case 2: to[1] = from[1];
+ case 1: to[0] = from[0];
+ case 0: break;
+ default:
+ while (count-- > 0) {
+ *to++ = *from++;
+ }
+ break;
+ }
+#else
+ // pd_disjoint_words is word-atomic in this implementation.
+ pd_disjoint_words(from, to, count);
+#endif // AMD64
+}
+
+static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
+ pd_conjoint_words(from, to, count);
+}
+
+static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
+ pd_disjoint_words(from, to, count);
+}
+
+static void pd_conjoint_bytes(void* from, void* to, size_t count) {
+#ifdef AMD64
+ (void)memmove(to, from, count);
+#else
+ // Includes a zero-count check.
+ intx temp;
+ __asm__ volatile(" testl %6,%6 ;"
+ " jz 13f ;"
+ " cmpl %4,%5 ;"
+ " leal -1(%4,%6),%3 ;"
+ " jbe 1f ;"
+ " cmpl %7,%5 ;"
+ " jbe 8f ;"
+ "1: cmpl $3,%6 ;"
+ " jbe 6f ;"
+ " movl %6,%3 ;"
+ " movl $4,%2 ;"
+ " subl %4,%2 ;"
+ " andl $3,%2 ;"
+ " jz 2f ;"
+ " subl %6,%3 ;"
+ " rep; smovb ;"
+ "2: movl %7,%2 ;"
+ " shrl $2,%2 ;"
+ " jz 5f ;"
+ " cmpl $32,%2 ;"
+ " ja 4f ;"
+ " subl %4,%1 ;"
+ "3: movl (%4),%%edx ;"
+ " movl %%edx,(%5,%4,1);"
+ " addl $4,%0 ;"
+ " subl $1,%2 ;"
+ " jnz 3b ;"
+ " addl %4,%1 ;"
+ " jmp 5f ;"
+ "4: rep; smovl ;"
+ "5: movl %7,%2 ;"
+ " andl $3,%2 ;"
+ " jz 13f ;"
+ "6: xorl %7,%3 ;"
+ "7: movb (%4,%7,1),%%dl ;"
+ " movb %%dl,(%5,%7,1) ;"
+ " addl $1,%3 ;"
+ " subl $1,%2 ;"
+ " jnz 7b ;"
+ " jmp 13f ;"
+ "8: std ;"
+ " cmpl $12,%2 ;"
+ " ja 9f ;"
+ " movl %7,%0 ;"
+ " leal -1(%6,%5),%1 ;"
+ " jmp 11f ;"
+ "9: xchgl %3,%2 ;"
+ " movl %6,%0 ;"
+ " addl $1,%2 ;"
+ " leal -1(%7,%5),%1 ;"
+ " andl $3,%2 ;"
+ " jz 10f ;"
+ " subl %6,%3 ;"
+ " rep; smovb ;"
+ "10: movl %7,%2 ;"
+ " subl $3,%0 ;"
+ " shrl $2,%2 ;"
+ " subl $3,%1 ;"
+ " rep; smovl ;"
+ " andl $3,%3 ;"
+ " jz 12f ;"
+ " movl %7,%2 ;"
+ " addl $3,%0 ;"
+ " addl $3,%1 ;"
+ "11: rep; smovb ;"
+ "12: cld ;"
+ "13: nop ;"
+ : "=S" (from), "=D" (to), "=c" (count), "=r" (temp)
+ : "0" (from), "1" (to), "2" (count), "3" (temp)
+ : "memory", "flags", "%edx");
+#endif // AMD64
+}
+
+static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
+ pd_conjoint_bytes(from, to, count);
+}
+
+static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
+ _Copy_conjoint_jshorts_atomic(from, to, count);
+}
+
+static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
+#ifdef AMD64
+ _Copy_conjoint_jints_atomic(from, to, count);
+#else
+ assert(HeapWordSize == BytesPerInt, "heapwords and jints must be the same size");
+ // pd_conjoint_words is word-atomic in this implementation.
+ pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count);
+#endif // AMD64
+}
+
+static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
+#ifdef AMD64
+ _Copy_conjoint_jlongs_atomic(from, to, count);
+#else
+ // Guarantee use of fild/fistp or xmm regs via some asm code, because compilers won't.
+ if (from > to) {
+ while (count-- > 0) {
+ __asm__ volatile("fildll (%0); fistpll (%1)"
+ :
+ : "r" (from), "r" (to)
+ : "memory" );
+ ++from;
+ ++to;
+ }
+ } else {
+ while (count-- > 0) {
+ __asm__ volatile("fildll (%0,%2,8); fistpll (%1,%2,8)"
+ :
+ : "r" (from), "r" (to), "r" (count)
+ : "memory" );
+ }
+ }
+#endif // AMD64
+}
+
+static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
+#ifdef AMD64
+ assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
+ _Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
+#else
+ assert(HeapWordSize == BytesPerOop, "heapwords and oops must be the same size");
+ // pd_conjoint_words is word-atomic in this implementation.
+ pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count);
+#endif // AMD64
+}
+
+static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
+ _Copy_arrayof_conjoint_bytes(from, to, count);
+}
+
+static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
+ _Copy_arrayof_conjoint_jshorts(from, to, count);
+}
+
+static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
+#ifdef AMD64
+ _Copy_arrayof_conjoint_jints(from, to, count);
+#else
+ pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
+#endif // AMD64
+}
+
+static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
+#ifdef AMD64
+ _Copy_arrayof_conjoint_jlongs(from, to, count);
+#else
+ pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
+#endif // AMD64
+}
+
+static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
+#ifdef AMD64
+ assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
+ _Copy_arrayof_conjoint_jlongs(from, to, count);
+#else
+ pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
+#endif // AMD64
+}
+
+#endif // OS_CPU_LINUX_X86_VM_COPY_LINUX_X86_INLINE_HPP
diff --git a/src/os_cpu/linux_aarch64/vm/globals_linux_aarch64.hpp b/src/os_cpu/linux_aarch64/vm/globals_linux_aarch64.hpp
new file mode 100644
index 000000000..7ca61f27f
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/globals_linux_aarch64.hpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_X86_VM_GLOBALS_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_VM_GLOBALS_LINUX_X86_HPP
+
+// Sets the default values for platform dependent flags used by the runtime system.
+// (see globals.hpp)
+
+define_pd_global(bool, DontYieldALot, false);
+#ifdef AMD64
+define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
+define_pd_global(intx, VMThreadStackSize, 1024);
+#else
+// ThreadStackSize 320 allows a couple of test cases to run while
+// keeping the number of threads that can be created high. System
+// default ThreadStackSize appears to be 512 which is too big.
+define_pd_global(intx, ThreadStackSize, 320);
+define_pd_global(intx, VMThreadStackSize, 512);
+#endif // AMD64
+
+define_pd_global(intx, CompilerThreadStackSize, 0);
+
+define_pd_global(uintx,JVMInvokeMethodSlack, 8192);
+
+// Only used on 64 bit platforms
+define_pd_global(uintx,HeapBaseMinAddress, 2*G);
+// Only used on 64 bit Windows platforms
+define_pd_global(bool, UseVectoredExceptions, false);
+
+#endif // OS_CPU_LINUX_X86_VM_GLOBALS_LINUX_X86_HPP
diff --git a/src/os_cpu/linux_aarch64/vm/linux_aarch64.ad b/src/os_cpu/linux_aarch64/vm/linux_aarch64.ad
new file mode 100644
index 000000000..a71f7d16c
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/linux_aarch64.ad
@@ -0,0 +1,68 @@
+//
+// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+//
+//
+
+// AMD64 Linux Architecture Description File
+
+//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
+// This block specifies the encoding classes used by the compiler to
+// output byte streams. Encoding classes generate functions which are
+// called by Machine Instruction Nodes in order to generate the bit
+// encoding of the instruction. Operands specify their base encoding
+// interface with the interface keyword. There are currently
+// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
+// COND_INTER. REG_INTER causes an operand to generate a function
+// which returns its register number when queried. CONST_INTER causes
+// an operand to generate a function which returns the value of the
+// constant when queried. MEMORY_INTER causes an operand to generate
+// four functions which return the Base Register, the Index Register,
+// the Scale Value, and the Offset Value of the operand when queried.
+// COND_INTER causes an operand to generate six functions which return
+// the encoding code (ie - encoding bits for the instruction)
+// associated with each basic boolean condition for a conditional
+// instruction. Instructions specify two basic values for encoding.
+// They use the ins_encode keyword to specify their encoding class
+// (which must be one of the class names specified in the encoding
+// block), and they use the opcode keyword to specify, in order, their
+// primary, secondary, and tertiary opcode. Only the opcode sections
+// which a particular instruction needs for encoding need to be
+// specified.
+encode %{
+ // Build emit functions for each basic byte or larger field in the intel
+ // encoding scheme (opcode, rm, sib, immediate), and call them from C++
+ // code in the enc_class source block. Emit functions will live in the
+ // main source block for now. In future, we can generalize this by
+ // adding a syntax that specifies the sizes of fields in an order,
+ // so that the adlc can build the emit functions automagically
+
+ enc_class Java_To_Runtime(method meth) %{
+ %}
+
+%}
+
+
+// Platform dependent source
+
+source %{
+
+%}
diff --git a/src/os_cpu/linux_aarch64/vm/linux_aarch64.s b/src/os_cpu/linux_aarch64/vm/linux_aarch64.s
new file mode 100644
index 000000000..8be68610e
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/linux_aarch64.s
@@ -0,0 +1,402 @@
+#
+# Copyright (c) 2004, 2007, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+
+ # NOTE WELL! The _Copy functions are called directly
+ # from server-compiler-generated code via CallLeafNoFP,
+ # which means that they *must* either not use floating
+ # point or use it in the same manner as does the server
+ # compiler.
+
+ .globl _Copy_arrayof_conjoint_bytes
+ .globl _Copy_arrayof_conjoint_jshorts
+ .globl _Copy_conjoint_jshorts_atomic
+ .globl _Copy_arrayof_conjoint_jints
+ .globl _Copy_conjoint_jints_atomic
+ .globl _Copy_arrayof_conjoint_jlongs
+ .globl _Copy_conjoint_jlongs_atomic
+
+ .text
+
+ .globl SafeFetch32, Fetch32PFI, Fetch32Resume
+ .align 16
+ .type SafeFetch32,@function
+ // Prototype: int SafeFetch32 (int * Adr, int ErrValue)
+SafeFetch32:
+ movl %esi, %eax
+Fetch32PFI:
+ movl (%rdi), %eax
+Fetch32Resume:
+ ret
+
+ .globl SafeFetchN, FetchNPFI, FetchNResume
+ .align 16
+ .type SafeFetchN,@function
+ // Prototype: intptr_t SafeFetchN (intptr_t * Adr, intptr_t ErrValue)
+SafeFetchN:
+ movq %rsi, %rax
+FetchNPFI:
+ movq (%rdi), %rax
+FetchNResume:
+ ret
+
+ .globl SpinPause
+ .align 16
+ .type SpinPause,@function
+SpinPause:
+ rep
+ nop
+ movq $1, %rax
+ ret
+
+ # Support for void Copy::arrayof_conjoint_bytes(void* from,
+ # void* to,
+ # size_t count)
+ # rdi - from
+ # rsi - to
+ # rdx - count, treated as ssize_t
+ #
+ .p2align 4,,15
+ .type _Copy_arrayof_conjoint_bytes,@function
+_Copy_arrayof_conjoint_bytes:
+ movq %rdx,%r8 # byte count
+ shrq $3,%rdx # qword count
+ cmpq %rdi,%rsi
+ leaq -1(%rdi,%r8,1),%rax # from + bcount*1 - 1
+ jbe acb_CopyRight
+ cmpq %rax,%rsi
+ jbe acb_CopyLeft
+acb_CopyRight:
+ leaq -8(%rdi,%rdx,8),%rax # from + qcount*8 - 8
+ leaq -8(%rsi,%rdx,8),%rcx # to + qcount*8 - 8
+ negq %rdx
+ jmp 7f
+ .p2align 4,,15
+1: movq 8(%rax,%rdx,8),%rsi
+ movq %rsi,8(%rcx,%rdx,8)
+ addq $1,%rdx
+ jnz 1b
+2: testq $4,%r8 # check for trailing dword
+ jz 3f
+ movl 8(%rax),%esi # copy trailing dword
+ movl %esi,8(%rcx)
+ addq $4,%rax
+ addq $4,%rcx # original %rsi is trashed, so we
+ # can't use it as a base register
+3: testq $2,%r8 # check for trailing word
+ jz 4f
+ movw 8(%rax),%si # copy trailing word
+ movw %si,8(%rcx)
+ addq $2,%rcx
+4: testq $1,%r8 # check for trailing byte
+ jz 5f
+ movb -1(%rdi,%r8,1),%al # copy trailing byte
+ movb %al,8(%rcx)
+5: ret
+ .p2align 4,,15
+6: movq -24(%rax,%rdx,8),%rsi
+ movq %rsi,-24(%rcx,%rdx,8)
+ movq -16(%rax,%rdx,8),%rsi
+ movq %rsi,-16(%rcx,%rdx,8)
+ movq -8(%rax,%rdx,8),%rsi
+ movq %rsi,-8(%rcx,%rdx,8)
+ movq (%rax,%rdx,8),%rsi
+ movq %rsi,(%rcx,%rdx,8)
+7: addq $4,%rdx
+ jle 6b
+ subq $4,%rdx
+ jl 1b
+ jmp 2b
+acb_CopyLeft:
+ testq $1,%r8 # check for trailing byte
+ jz 1f
+ movb -1(%rdi,%r8,1),%cl # copy trailing byte
+ movb %cl,-1(%rsi,%r8,1)
+ subq $1,%r8 # adjust for possible trailing word
+1: testq $2,%r8 # check for trailing word
+ jz 2f
+ movw -2(%rdi,%r8,1),%cx # copy trailing word
+ movw %cx,-2(%rsi,%r8,1)
+2: testq $4,%r8 # check for trailing dword
+ jz 5f
+ movl (%rdi,%rdx,8),%ecx # copy trailing dword
+ movl %ecx,(%rsi,%rdx,8)
+ jmp 5f
+ .p2align 4,,15
+3: movq -8(%rdi,%rdx,8),%rcx
+ movq %rcx,-8(%rsi,%rdx,8)
+ subq $1,%rdx
+ jnz 3b
+ ret
+ .p2align 4,,15
+4: movq 24(%rdi,%rdx,8),%rcx
+ movq %rcx,24(%rsi,%rdx,8)
+ movq 16(%rdi,%rdx,8),%rcx
+ movq %rcx,16(%rsi,%rdx,8)
+ movq 8(%rdi,%rdx,8),%rcx
+ movq %rcx,8(%rsi,%rdx,8)
+ movq (%rdi,%rdx,8),%rcx
+ movq %rcx,(%rsi,%rdx,8)
+5: subq $4,%rdx
+ jge 4b
+ addq $4,%rdx
+ jg 3b
+ ret
+
+ # Support for void Copy::arrayof_conjoint_jshorts(void* from,
+ # void* to,
+ # size_t count)
+ # Equivalent to
+ # conjoint_jshorts_atomic
+ #
+ # If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
+ # let the hardware handle it. The tow or four words within dwords
+ # or qwords that span cache line boundaries will still be loaded
+ # and stored atomically.
+ #
+ # rdi - from
+ # rsi - to
+ # rdx - count, treated as ssize_t
+ #
+ .p2align 4,,15
+ .type _Copy_arrayof_conjoint_jshorts,@function
+ .type _Copy_conjoint_jshorts_atomic,@function
+_Copy_arrayof_conjoint_jshorts:
+_Copy_conjoint_jshorts_atomic:
+ movq %rdx,%r8 # word count
+ shrq $2,%rdx # qword count
+ cmpq %rdi,%rsi
+ leaq -2(%rdi,%r8,2),%rax # from + wcount*2 - 2
+ jbe acs_CopyRight
+ cmpq %rax,%rsi
+ jbe acs_CopyLeft
+acs_CopyRight:
+ leaq -8(%rdi,%rdx,8),%rax # from + qcount*8 - 8
+ leaq -8(%rsi,%rdx,8),%rcx # to + qcount*8 - 8
+ negq %rdx
+ jmp 6f
+1: movq 8(%rax,%rdx,8),%rsi
+ movq %rsi,8(%rcx,%rdx,8)
+ addq $1,%rdx
+ jnz 1b
+2: testq $2,%r8 # check for trailing dword
+ jz 3f
+ movl 8(%rax),%esi # copy trailing dword
+ movl %esi,8(%rcx)
+ addq $4,%rcx # original %rsi is trashed, so we
+ # can't use it as a base register
+3: testq $1,%r8 # check for trailing word
+ jz 4f
+ movw -2(%rdi,%r8,2),%si # copy trailing word
+ movw %si,8(%rcx)
+4: ret
+ .p2align 4,,15
+5: movq -24(%rax,%rdx,8),%rsi
+ movq %rsi,-24(%rcx,%rdx,8)
+ movq -16(%rax,%rdx,8),%rsi
+ movq %rsi,-16(%rcx,%rdx,8)
+ movq -8(%rax,%rdx,8),%rsi
+ movq %rsi,-8(%rcx,%rdx,8)
+ movq (%rax,%rdx,8),%rsi
+ movq %rsi,(%rcx,%rdx,8)
+6: addq $4,%rdx
+ jle 5b
+ subq $4,%rdx
+ jl 1b
+ jmp 2b
+acs_CopyLeft:
+ testq $1,%r8 # check for trailing word
+ jz 1f
+ movw -2(%rdi,%r8,2),%cx # copy trailing word
+ movw %cx,-2(%rsi,%r8,2)
+1: testq $2,%r8 # check for trailing dword
+ jz 4f
+ movl (%rdi,%rdx,8),%ecx # copy trailing dword
+ movl %ecx,(%rsi,%rdx,8)
+ jmp 4f
+2: movq -8(%rdi,%rdx,8),%rcx
+ movq %rcx,-8(%rsi,%rdx,8)
+ subq $1,%rdx
+ jnz 2b
+ ret
+ .p2align 4,,15
+3: movq 24(%rdi,%rdx,8),%rcx
+ movq %rcx,24(%rsi,%rdx,8)
+ movq 16(%rdi,%rdx,8),%rcx
+ movq %rcx,16(%rsi,%rdx,8)
+ movq 8(%rdi,%rdx,8),%rcx
+ movq %rcx,8(%rsi,%rdx,8)
+ movq (%rdi,%rdx,8),%rcx
+ movq %rcx,(%rsi,%rdx,8)
+4: subq $4,%rdx
+ jge 3b
+ addq $4,%rdx
+ jg 2b
+ ret
+
+ # Support for void Copy::arrayof_conjoint_jints(jint* from,
+ # jint* to,
+ # size_t count)
+ # Equivalent to
+ # conjoint_jints_atomic
+ #
+ # If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
+ # the hardware handle it. The two dwords within qwords that span
+ # cache line boundaries will still be loaded and stored atomically.
+ #
+ # rdi - from
+ # rsi - to
+ # rdx - count, treated as ssize_t
+ #
+ .p2align 4,,15
+ .type _Copy_arrayof_conjoint_jints,@function
+ .type _Copy_conjoint_jints_atomic,@function
+_Copy_arrayof_conjoint_jints:
+_Copy_conjoint_jints_atomic:
+ movq %rdx,%r8 # dword count
+ shrq %rdx # qword count
+ cmpq %rdi,%rsi
+ leaq -4(%rdi,%r8,4),%rax # from + dcount*4 - 4
+ jbe aci_CopyRight
+ cmpq %rax,%rsi
+ jbe aci_CopyLeft
+aci_CopyRight:
+ leaq -8(%rdi,%rdx,8),%rax # from + qcount*8 - 8
+ leaq -8(%rsi,%rdx,8),%rcx # to + qcount*8 - 8
+ negq %rdx
+ jmp 5f
+ .p2align 4,,15
+1: movq 8(%rax,%rdx,8),%rsi
+ movq %rsi,8(%rcx,%rdx,8)
+ addq $1,%rdx
+ jnz 1b
+2: testq $1,%r8 # check for trailing dword
+ jz 3f
+ movl 8(%rax),%esi # copy trailing dword
+ movl %esi,8(%rcx)
+3: ret
+ .p2align 4,,15
+4: movq -24(%rax,%rdx,8),%rsi
+ movq %rsi,-24(%rcx,%rdx,8)
+ movq -16(%rax,%rdx,8),%rsi
+ movq %rsi,-16(%rcx,%rdx,8)
+ movq -8(%rax,%rdx,8),%rsi
+ movq %rsi,-8(%rcx,%rdx,8)
+ movq (%rax,%rdx,8),%rsi
+ movq %rsi,(%rcx,%rdx,8)
+5: addq $4,%rdx
+ jle 4b
+ subq $4,%rdx
+ jl 1b
+ jmp 2b
+aci_CopyLeft:
+ testq $1,%r8 # check for trailing dword
+ jz 3f
+ movl -4(%rdi,%r8,4),%ecx # copy trailing dword
+ movl %ecx,-4(%rsi,%r8,4)
+ jmp 3f
+1: movq -8(%rdi,%rdx,8),%rcx
+ movq %rcx,-8(%rsi,%rdx,8)
+ subq $1,%rdx
+ jnz 1b
+ ret
+ .p2align 4,,15
+2: movq 24(%rdi,%rdx,8),%rcx
+ movq %rcx,24(%rsi,%rdx,8)
+ movq 16(%rdi,%rdx,8),%rcx
+ movq %rcx,16(%rsi,%rdx,8)
+ movq 8(%rdi,%rdx,8),%rcx
+ movq %rcx,8(%rsi,%rdx,8)
+ movq (%rdi,%rdx,8),%rcx
+ movq %rcx,(%rsi,%rdx,8)
+3: subq $4,%rdx
+ jge 2b
+ addq $4,%rdx
+ jg 1b
+ ret
+
+ # Support for void Copy::arrayof_conjoint_jlongs(jlong* from,
+ # jlong* to,
+ # size_t count)
+ # Equivalent to
+ # conjoint_jlongs_atomic
+ # arrayof_conjoint_oops
+ # conjoint_oops_atomic
+ #
+ # rdi - from
+ # rsi - to
+ # rdx - count, treated as ssize_t
+ #
+ .p2align 4,,15
+ .type _Copy_arrayof_conjoint_jlongs,@function
+ .type _Copy_conjoint_jlongs_atomic,@function
+_Copy_arrayof_conjoint_jlongs:
+_Copy_conjoint_jlongs_atomic:
+ cmpq %rdi,%rsi
+ leaq -8(%rdi,%rdx,8),%rax # from + count*8 - 8
+ jbe acl_CopyRight
+ cmpq %rax,%rsi
+ jbe acl_CopyLeft
+acl_CopyRight:
+ leaq -8(%rsi,%rdx,8),%rcx # to + count*8 - 8
+ negq %rdx
+ jmp 3f
+1: movq 8(%rax,%rdx,8),%rsi
+ movq %rsi,8(%rcx,%rdx,8)
+ addq $1,%rdx
+ jnz 1b
+ ret
+ .p2align 4,,15
+2: movq -24(%rax,%rdx,8),%rsi
+ movq %rsi,-24(%rcx,%rdx,8)
+ movq -16(%rax,%rdx,8),%rsi
+ movq %rsi,-16(%rcx,%rdx,8)
+ movq -8(%rax,%rdx,8),%rsi
+ movq %rsi,-8(%rcx,%rdx,8)
+ movq (%rax,%rdx,8),%rsi
+ movq %rsi,(%rcx,%rdx,8)
+3: addq $4,%rdx
+ jle 2b
+ subq $4,%rdx
+ jl 1b
+ ret
+4: movq -8(%rdi,%rdx,8),%rcx
+ movq %rcx,-8(%rsi,%rdx,8)
+ subq $1,%rdx
+ jnz 4b
+ ret
+ .p2align 4,,15
+5: movq 24(%rdi,%rdx,8),%rcx
+ movq %rcx,24(%rsi,%rdx,8)
+ movq 16(%rdi,%rdx,8),%rcx
+ movq %rcx,16(%rsi,%rdx,8)
+ movq 8(%rdi,%rdx,8),%rcx
+ movq %rcx,8(%rsi,%rdx,8)
+ movq (%rdi,%rdx,8),%rcx
+ movq %rcx,(%rsi,%rdx,8)
+acl_CopyLeft:
+ subq $4,%rdx
+ jge 5b
+ addq $4,%rdx
+ jg 4b
+ ret
diff --git a/src/os_cpu/linux_aarch64/vm/orderAccess_linux_aarch64.inline.hpp b/src/os_cpu/linux_aarch64/vm/orderAccess_linux_aarch64.inline.hpp
new file mode 100644
index 000000000..b3de8a232
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/orderAccess_linux_aarch64.inline.hpp
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
+#define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
+
+#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
+#include "vm_version_aarch64.hpp"
+
+// Implementation of class OrderAccess.
+
+inline void OrderAccess::loadload() { acquire(); }
+inline void OrderAccess::storestore() { release(); }
+inline void OrderAccess::loadstore() { acquire(); }
+inline void OrderAccess::storeload() { fence(); }
+
+inline void OrderAccess::acquire() {
+ volatile intptr_t local_dummy;
+#ifdef AMD64
+ __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory");
+#else
+ __asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory");
+#endif // AMD64
+}
+
+inline void OrderAccess::release() {
+ // Avoid hitting the same cache-line from
+ // different threads.
+ volatile jint local_dummy = 0;
+}
+
+inline void OrderAccess::fence() {
+ if (os::is_MP()) {
+ // always use locked addl since mfence is sometimes expensive
+#ifdef AMD64
+ __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
+#else
+ __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
+#endif
+ }
+}
+
+inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
+inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; }
+inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
+inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); }
+inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
+inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
+inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
+inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
+inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
+inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
+
+inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; }
+inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; }
+inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
+
+inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
+inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
+inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
+inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
+inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
+inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
+inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
+inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
+inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
+inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
+
+inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
+inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; }
+
+inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
+ __asm__ volatile ( "xchgb (%2),%0"
+ : "=q" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+}
+inline void OrderAccess::store_fence(jshort* p, jshort v) {
+ __asm__ volatile ( "xchgw (%2),%0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+}
+inline void OrderAccess::store_fence(jint* p, jint v) {
+ __asm__ volatile ( "xchgl (%2),%0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+}
+
+inline void OrderAccess::store_fence(jlong* p, jlong v) {
+#ifdef AMD64
+ __asm__ __volatile__ ("xchgq (%2), %0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+#else
+ *p = v; fence();
+#endif // AMD64
+}
+
+// AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
+// compiler does the inlining this is simpler.
+inline void OrderAccess::store_fence(jubyte* p, jubyte v) { store_fence((jbyte*)p, (jbyte)v); }
+inline void OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
+inline void OrderAccess::store_fence(juint* p, juint v) { store_fence((jint*)p, (jint)v); }
+inline void OrderAccess::store_fence(julong* p, julong v) { store_fence((jlong*)p, (jlong)v); }
+inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); }
+inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
+
+inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
+#ifdef AMD64
+ __asm__ __volatile__ ("xchgq (%2), %0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+#else
+ store_fence((jint*)p, (jint)v);
+#endif // AMD64
+}
+
+inline void OrderAccess::store_ptr_fence(void** p, void* v) {
+#ifdef AMD64
+ __asm__ __volatile__ ("xchgq (%2), %0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+#else
+ store_fence((jint*)p, (jint)v);
+#endif // AMD64
+}
+
+// Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
+inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) {
+ __asm__ volatile ( "xchgb (%2),%0"
+ : "=q" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+}
+inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
+ __asm__ volatile ( "xchgw (%2),%0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+}
+inline void OrderAccess::release_store_fence(volatile jint* p, jint v) {
+ __asm__ volatile ( "xchgl (%2),%0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+}
+
+inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) {
+#ifdef AMD64
+ __asm__ __volatile__ ( "xchgq (%2), %0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+#else
+ release_store(p, v); fence();
+#endif // AMD64
+}
+
+inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store_fence((volatile jbyte*)p, (jbyte)v); }
+inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
+inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store_fence((volatile jint*)p, (jint)v); }
+inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store_fence((volatile jlong*)p, (jlong)v); }
+
+inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
+inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
+
+inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
+#ifdef AMD64
+ __asm__ __volatile__ ( "xchgq (%2), %0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+#else
+ release_store_fence((volatile jint*)p, (jint)v);
+#endif // AMD64
+}
+inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) {
+#ifdef AMD64
+ __asm__ __volatile__ ( "xchgq (%2), %0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+#else
+ release_store_fence((volatile jint*)p, (jint)v);
+#endif // AMD64
+}
+
+#endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
diff --git a/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp b/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp
new file mode 100644
index 000000000..a12a3e318
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp
@@ -0,0 +1,872 @@
+/*
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// no precompiled headers
+#include "assembler_aarch64.inline.hpp"
+#include "classfile/classLoader.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "classfile/vmSymbols.hpp"
+#include "code/icBuffer.hpp"
+#include "code/vtableStubs.hpp"
+#include "interpreter/interpreter.hpp"
+#include "jvm_linux.h"
+#include "memory/allocation.inline.hpp"
+#include "mutex_linux.inline.hpp"
+#include "nativeInst_aarch64.hpp"
+#include "os_share_linux.hpp"
+#include "prims/jniFastGetField.hpp"
+#include "prims/jvm.h"
+#include "prims/jvm_misc.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/extendedPC.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/java.hpp"
+#include "runtime/javaCalls.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/osThread.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/timer.hpp"
+#include "thread_linux.inline.hpp"
+#include "utilities/events.hpp"
+#include "utilities/vmError.hpp"
+#ifdef COMPILER1
+#include "c1/c1_Runtime1.hpp"
+#endif
+#ifdef COMPILER2
+#include "opto/runtime.hpp"
+#endif
+
+// put OS-includes here
+# include <sys/types.h>
+# include <sys/mman.h>
+# include <pthread.h>
+# include <signal.h>
+# include <errno.h>
+# include <dlfcn.h>
+# include <stdlib.h>
+# include <stdio.h>
+# include <unistd.h>
+# include <sys/resource.h>
+# include <pthread.h>
+# include <sys/stat.h>
+# include <sys/time.h>
+# include <sys/utsname.h>
+# include <sys/socket.h>
+# include <sys/wait.h>
+# include <pwd.h>
+# include <poll.h>
+# include <ucontext.h>
+# include <fpu_control.h>
+
+#ifdef AMD64
+#define REG_SP REG_RSP
+#define REG_PC REG_RIP
+#define REG_FP REG_RBP
+#define SPELL_REG_SP "rsp"
+#define SPELL_REG_FP "rbp"
+#else
+#define REG_SP REG_UESP
+#define REG_PC REG_EIP
+#define REG_FP REG_EBP
+#define SPELL_REG_SP "esp"
+#define SPELL_REG_FP "ebp"
+#endif // AMD64
+
+address os::current_stack_pointer() {
+#ifdef SPARC_WORKS
+ register void *esp;
+ __asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp));
+ return (address) ((char*)esp + sizeof(long)*2);
+#else
+ register void *esp __asm__ (SPELL_REG_SP);
+ return (address) esp;
+#endif
+}
+
+char* os::non_memory_address_word() {
+ // Must never look like an address returned by reserve_memory,
+ // even in its subfields (as defined by the CPU immediate fields,
+ // if the CPU splits constants across multiple instructions).
+
+ return (char*) -1;
+}
+
+void os::initialize_thread() {
+// Nothing to do.
+}
+
+address os::Linux::ucontext_get_pc(ucontext_t * uc) {
+ return (address)uc->uc_mcontext.gregs[REG_PC];
+}
+
+intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) {
+ return (intptr_t*)uc->uc_mcontext.gregs[REG_SP];
+}
+
+intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) {
+ return (intptr_t*)uc->uc_mcontext.gregs[REG_FP];
+}
+
+// For Forte Analyzer AsyncGetCallTrace profiling support - thread
+// is currently interrupted by SIGPROF.
+// os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
+// frames. Currently we don't do that on Linux, so it's the same as
+// os::fetch_frame_from_context().
+ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread,
+ ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
+
+ assert(thread != NULL, "just checking");
+ assert(ret_sp != NULL, "just checking");
+ assert(ret_fp != NULL, "just checking");
+
+ return os::fetch_frame_from_context(uc, ret_sp, ret_fp);
+}
+
+ExtendedPC os::fetch_frame_from_context(void* ucVoid,
+ intptr_t** ret_sp, intptr_t** ret_fp) {
+
+ ExtendedPC epc;
+ ucontext_t* uc = (ucontext_t*)ucVoid;
+
+ if (uc != NULL) {
+ epc = ExtendedPC(os::Linux::ucontext_get_pc(uc));
+ if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
+ if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc);
+ } else {
+ // construct empty ExtendedPC for return value checking
+ epc = ExtendedPC(NULL);
+ if (ret_sp) *ret_sp = (intptr_t *)NULL;
+ if (ret_fp) *ret_fp = (intptr_t *)NULL;
+ }
+
+ return epc;
+}
+
+frame os::fetch_frame_from_context(void* ucVoid) {
+ intptr_t* sp;
+ intptr_t* fp;
+ ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
+ return frame(sp, fp, epc.pc());
+}
+
+// By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
+// turned off by -fomit-frame-pointer,
+frame os::get_sender_for_C_frame(frame* fr) {
+ return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
+}
+
+intptr_t* _get_previous_fp() {
+#ifdef SPARC_WORKS
+ register intptr_t **ebp;
+ __asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
+#else
+ register intptr_t **ebp __asm__ (SPELL_REG_FP);
+#endif
+ return (intptr_t*) *ebp; // we want what it points to.
+}
+
+
+frame os::current_frame() {
+ intptr_t* fp = _get_previous_fp();
+ frame myframe((intptr_t*)os::current_stack_pointer(),
+ (intptr_t*)fp,
+ CAST_FROM_FN_PTR(address, os::current_frame));
+ if (os::is_first_C_frame(&myframe)) {
+ // stack is not walkable
+ return frame(NULL, NULL, NULL);
+ } else {
+ return os::get_sender_for_C_frame(&myframe);
+ }
+}
+
+// Utility functions
+
+// From IA32 System Programming Guide
+enum {
+ trap_page_fault = 0xE
+};
+
+extern "C" void Fetch32PFI () ;
+extern "C" void Fetch32Resume () ;
+#ifdef AMD64
+extern "C" void FetchNPFI () ;
+extern "C" void FetchNResume () ;
+#endif // AMD64
+
+extern "C" JNIEXPORT int
+JVM_handle_linux_signal(int sig,
+ siginfo_t* info,
+ void* ucVoid,
+ int abort_if_unrecognized) {
+ ucontext_t* uc = (ucontext_t*) ucVoid;
+
+ Thread* t = ThreadLocalStorage::get_thread_slow();
+
+ SignalHandlerMark shm(t);
+
+ // Note: it's not uncommon that JNI code uses signal/sigset to install
+ // then restore certain signal handler (e.g. to temporarily block SIGPIPE,
+ // or have a SIGILL handler when detecting CPU type). When that happens,
+ // JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To
+ // avoid unnecessary crash when libjsig is not preloaded, try handle signals
+ // that do not require siginfo/ucontext first.
+
+ if (sig == SIGPIPE || sig == SIGXFSZ) {
+ // allow chained handler to go first
+ if (os::Linux::chained_handler(sig, info, ucVoid)) {
+ return true;
+ } else {
+ if (PrintMiscellaneous && (WizardMode || Verbose)) {
+ char buf[64];
+ warning("Ignoring %s - see bugs 4229104 or 646499219",
+ os::exception_name(sig, buf, sizeof(buf)));
+ }
+ return true;
+ }
+ }
+
+ JavaThread* thread = NULL;
+ VMThread* vmthread = NULL;
+ if (os::Linux::signal_handlers_are_installed) {
+ if (t != NULL ){
+ if(t->is_Java_thread()) {
+ thread = (JavaThread*)t;
+ }
+ else if(t->is_VM_thread()){
+ vmthread = (VMThread *)t;
+ }
+ }
+ }
+/*
+ NOTE: does not seem to work on linux.
+ if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
+ // can't decode this kind of signal
+ info = NULL;
+ } else {
+ assert(sig == info->si_signo, "bad siginfo");
+ }
+*/
+ // decide if this trap can be handled by a stub
+ address stub = NULL;
+
+ address pc = NULL;
+
+ //%note os_trap_1
+ if (info != NULL && uc != NULL && thread != NULL) {
+ pc = (address) os::Linux::ucontext_get_pc(uc);
+
+ if (pc == (address) Fetch32PFI) {
+ uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ;
+ return 1 ;
+ }
+#ifdef AMD64
+ if (pc == (address) FetchNPFI) {
+ uc->uc_mcontext.gregs[REG_PC] = intptr_t (FetchNResume) ;
+ return 1 ;
+ }
+#endif // AMD64
+
+ // Handle ALL stack overflow variations here
+ if (sig == SIGSEGV) {
+ address addr = (address) info->si_addr;
+
+ // check if fault address is within thread stack
+ if (addr < thread->stack_base() &&
+ addr >= thread->stack_base() - thread->stack_size()) {
+ // stack overflow
+ if (thread->in_stack_yellow_zone(addr)) {
+ thread->disable_stack_yellow_zone();
+ if (thread->thread_state() == _thread_in_Java) {
+ // Throw a stack overflow exception. Guard pages will be reenabled
+ // while unwinding the stack.
+ stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
+ } else {
+ // Thread was in the vm or native code. Return and try to finish.
+ return 1;
+ }
+ } else if (thread->in_stack_red_zone(addr)) {
+ // Fatal red zone violation. Disable the guard pages and fall through
+ // to handle_unexpected_exception way down below.
+ thread->disable_stack_red_zone();
+ tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
+ } else {
+ // Accessing stack address below sp may cause SEGV if current
+ // thread has MAP_GROWSDOWN stack. This should only happen when
+ // current thread was created by user code with MAP_GROWSDOWN flag
+ // and then attached to VM. See notes in os_linux.cpp.
+ if (thread->osthread()->expanding_stack() == 0) {
+ thread->osthread()->set_expanding_stack();
+ if (os::Linux::manually_expand_stack(thread, addr)) {
+ thread->osthread()->clear_expanding_stack();
+ return 1;
+ }
+ thread->osthread()->clear_expanding_stack();
+ } else {
+ fatal("recursive segv. expanding stack.");
+ }
+ }
+ }
+ }
+
+ if (thread->thread_state() == _thread_in_Java) {
+ // Java thread running in Java code => find exception handler if any
+ // a fault inside compiled code, the interpreter, or a stub
+
+ if (sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) {
+ stub = SharedRuntime::get_poll_stub(pc);
+ } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
+ // BugId 4454115: A read from a MappedByteBuffer can fault
+ // here if the underlying file has been truncated.
+ // Do not crash the VM in such a case.
+ CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
+ nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
+ if (nm != NULL && nm->has_unsafe_access()) {
+ stub = StubRoutines::handler_for_unsafe_access();
+ }
+ }
+ else
+
+#ifdef AMD64
+ if (sig == SIGFPE &&
+ (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
+ stub =
+ SharedRuntime::
+ continuation_for_implicit_exception(thread,
+ pc,
+ SharedRuntime::
+ IMPLICIT_DIVIDE_BY_ZERO);
+#else
+ if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) {
+ // HACK: si_code does not work on linux 2.2.12-20!!!
+ int op = pc[0];
+ if (op == 0xDB) {
+ // FIST
+ // TODO: The encoding of D2I in i486.ad can cause an exception
+ // prior to the fist instruction if there was an invalid operation
+ // pending. We want to dismiss that exception. From the win_32
+ // side it also seems that if it really was the fist causing
+ // the exception that we do the d2i by hand with different
+ // rounding. Seems kind of weird.
+ // NOTE: that we take the exception at the NEXT floating point instruction.
+ assert(pc[0] == 0xDB, "not a FIST opcode");
+ assert(pc[1] == 0x14, "not a FIST opcode");
+ assert(pc[2] == 0x24, "not a FIST opcode");
+ return true;
+ } else if (op == 0xF7) {
+ // IDIV
+ stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
+ } else {
+ // TODO: handle more cases if we are using other x86 instructions
+ // that can generate SIGFPE signal on linux.
+ tty->print_cr("unknown opcode 0x%X with SIGFPE.", op);
+ fatal("please update this code.");
+ }
+#endif // AMD64
+ } else if (sig == SIGSEGV &&
+ !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) {
+ // Determination of interpreter/vtable stub/compiled code null exception
+ stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
+ }
+ } else if (thread->thread_state() == _thread_in_vm &&
+ sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
+ thread->doing_unsafe_access()) {
+ stub = StubRoutines::handler_for_unsafe_access();
+ }
+
+ // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
+ // and the heap gets shrunk before the field access.
+ if ((sig == SIGSEGV) || (sig == SIGBUS)) {
+ address addr = JNI_FastGetField::find_slowcase_pc(pc);
+ if (addr != (address)-1) {
+ stub = addr;
+ }
+ }
+
+ // Check to see if we caught the safepoint code in the
+ // process of write protecting the memory serialization page.
+ // It write enables the page immediately after protecting it
+ // so we can just return to retry the write.
+ if ((sig == SIGSEGV) &&
+ os::is_memory_serialize_page(thread, (address) info->si_addr)) {
+ // Block current thread until the memory serialize page permission restored.
+ os::block_on_serialize_page_trap();
+ return true;
+ }
+ }
+
+#ifndef AMD64
+ // Execution protection violation
+ //
+ // This should be kept as the last step in the triage. We don't
+ // have a dedicated trap number for a no-execute fault, so be
+ // conservative and allow other handlers the first shot.
+ //
+ // Note: We don't test that info->si_code == SEGV_ACCERR here.
+ // this si_code is so generic that it is almost meaningless; and
+ // the si_code for this condition may change in the future.
+ // Furthermore, a false-positive should be harmless.
+ if (UnguardOnExecutionViolation > 0 &&
+ (sig == SIGSEGV || sig == SIGBUS) &&
+ uc->uc_mcontext.gregs[REG_TRAPNO] == trap_page_fault) {
+ int page_size = os::vm_page_size();
+ address addr = (address) info->si_addr;
+ address pc = os::Linux::ucontext_get_pc(uc);
+ // Make sure the pc and the faulting address are sane.
+ //
+ // If an instruction spans a page boundary, and the page containing
+ // the beginning of the instruction is executable but the following
+ // page is not, the pc and the faulting address might be slightly
+ // different - we still want to unguard the 2nd page in this case.
+ //
+ // 15 bytes seems to be a (very) safe value for max instruction size.
+ bool pc_is_near_addr =
+ (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
+ bool instr_spans_page_boundary =
+ (align_size_down((intptr_t) pc ^ (intptr_t) addr,
+ (intptr_t) page_size) > 0);
+
+ if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
+ static volatile address last_addr =
+ (address) os::non_memory_address_word();
+
+ // In conservative mode, don't unguard unless the address is in the VM
+ if (addr != last_addr &&
+ (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
+
+ // Set memory to RWX and retry
+ address page_start =
+ (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
+ bool res = os::protect_memory((char*) page_start, page_size,
+ os::MEM_PROT_RWX);
+
+ if (PrintMiscellaneous && Verbose) {
+ char buf[256];
+ jio_snprintf(buf, sizeof(buf), "Execution protection violation "
+ "at " INTPTR_FORMAT
+ ", unguarding " INTPTR_FORMAT ": %s, errno=%d", addr,
+ page_start, (res ? "success" : "failed"), errno);
+ tty->print_raw_cr(buf);
+ }
+ stub = pc;
+
+ // Set last_addr so if we fault again at the same address, we don't end
+ // up in an endless loop.
+ //
+ // There are two potential complications here. Two threads trapping at
+ // the same address at the same time could cause one of the threads to
+ // think it already unguarded, and abort the VM. Likely very rare.
+ //
+ // The other race involves two threads alternately trapping at
+ // different addresses and failing to unguard the page, resulting in
+ // an endless loop. This condition is probably even more unlikely than
+ // the first.
+ //
+ // Although both cases could be avoided by using locks or thread local
+ // last_addr, these solutions are unnecessary complication: this
+ // handler is a best-effort safety net, not a complete solution. It is
+ // disabled by default and should only be used as a workaround in case
+ // we missed any no-execute-unsafe VM code.
+
+ last_addr = addr;
+ }
+ }
+ }
+#endif // !AMD64
+
+ if (stub != NULL) {
+ // save all thread context in case we need to restore it
+ if (thread != NULL) thread->set_saved_exception_pc(pc);
+
+ uc->uc_mcontext.gregs[REG_PC] = (greg_t)stub;
+ return true;
+ }
+
+ // signal-chaining
+ if (os::Linux::chained_handler(sig, info, ucVoid)) {
+ return true;
+ }
+
+ if (!abort_if_unrecognized) {
+ // caller wants another chance, so give it to him
+ return false;
+ }
+
+ if (pc == NULL && uc != NULL) {
+ pc = os::Linux::ucontext_get_pc(uc);
+ }
+
+ // unmask current signal
+ sigset_t newset;
+ sigemptyset(&newset);
+ sigaddset(&newset, sig);
+ sigprocmask(SIG_UNBLOCK, &newset, NULL);
+
+ VMError err(t, sig, pc, info, ucVoid);
+ err.report_and_die();
+
+ ShouldNotReachHere();
+}
+
+void os::Linux::init_thread_fpu_state(void) {
+#ifndef AMD64
+ // set fpu to 53 bit precision
+ set_fpu_control_word(0x27f);
+#endif // !AMD64
+}
+
+int os::Linux::get_fpu_control_word(void) {
+#ifdef AMD64
+ return 0;
+#else
+ int fpu_control;
+ _FPU_GETCW(fpu_control);
+ return fpu_control & 0xffff;
+#endif // AMD64
+}
+
+void os::Linux::set_fpu_control_word(int fpu_control) {
+#ifndef AMD64
+ _FPU_SETCW(fpu_control);
+#endif // !AMD64
+}
+
+// Check that the linux kernel version is 2.4 or higher since earlier
+// versions do not support SSE without patches.
+bool os::supports_sse() {
+#ifdef AMD64
+ return true;
+#else
+ struct utsname uts;
+ if( uname(&uts) != 0 ) return false; // uname fails?
+ char *minor_string;
+ int major = strtol(uts.release,&minor_string,10);
+ int minor = strtol(minor_string+1,NULL,10);
+ bool result = (major > 2 || (major==2 && minor >= 4));
+#ifndef PRODUCT
+ if (PrintMiscellaneous && Verbose) {
+ tty->print("OS version is %d.%d, which %s support SSE/SSE2\n",
+ major,minor, result ? "DOES" : "does NOT");
+ }
+#endif
+ return result;
+#endif // AMD64
+}
+
+bool os::is_allocatable(size_t bytes) {
+#ifdef AMD64
+ // unused on amd64?
+ return true;
+#else
+
+ if (bytes < 2 * G) {
+ return true;
+ }
+
+ char* addr = reserve_memory(bytes, NULL);
+
+ if (addr != NULL) {
+ release_memory(addr, bytes);
+ }
+
+ return addr != NULL;
+#endif // AMD64
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// thread stack
+
+#ifdef AMD64
+size_t os::Linux::min_stack_allowed = 64 * K;
+
+// amd64: pthread on amd64 is always in floating stack mode
+bool os::Linux::supports_variable_stack_size() { return true; }
+#else
+size_t os::Linux::min_stack_allowed = (48 DEBUG_ONLY(+4))*K;
+
+#ifdef __GNUC__
+#define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;})
+#endif
+
+// Test if pthread library can support variable thread stack size. LinuxThreads
+// in fixed stack mode allocates 2M fixed slot for each thread. LinuxThreads
+// in floating stack mode and NPTL support variable stack size.
+bool os::Linux::supports_variable_stack_size() {
+ if (os::Linux::is_NPTL()) {
+ // NPTL, yes
+ return true;
+
+ } else {
+ // Note: We can't control default stack size when creating a thread.
+ // If we use non-default stack size (pthread_attr_setstacksize), both
+ // floating stack and non-floating stack LinuxThreads will return the
+ // same value. This makes it impossible to implement this function by
+ // detecting thread stack size directly.
+ //
+ // An alternative approach is to check %gs. Fixed-stack LinuxThreads
+ // do not use %gs, so its value is 0. Floating-stack LinuxThreads use
+ // %gs (either as LDT selector or GDT selector, depending on kernel)
+ // to access thread specific data.
+ //
+ // Note that %gs is a reserved glibc register since early 2001, so
+ // applications are not allowed to change its value (Ulrich Drepper from
+ // Redhat confirmed that all known offenders have been modified to use
+ // either %fs or TSD). In the worst case scenario, when VM is embedded in
+ // a native application that plays with %gs, we might see non-zero %gs
+ // even LinuxThreads is running in fixed stack mode. As the result, we'll
+ // return true and skip _thread_safety_check(), so we may not be able to
+ // detect stack-heap collisions. But otherwise it's harmless.
+ //
+#ifdef __GNUC__
+ return (GET_GS() != 0);
+#else
+ return false;
+#endif
+ }
+}
+#endif // AMD64
+
+// return default stack size for thr_type
+size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
+ // default stack size (compiler thread needs larger stack)
+#ifdef AMD64
+ size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
+#else
+ size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
+#endif // AMD64
+ return s;
+}
+
+size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
+ // Creating guard page is very expensive. Java thread has HotSpot
+ // guard page, only enable glibc guard page for non-Java threads.
+ return (thr_type == java_thread ? 0 : page_size());
+}
+
+// Java thread:
+//
+// Low memory addresses
+// +------------------------+
+// | |\ JavaThread created by VM does not have glibc
+// | glibc guard page | - guard, attached Java thread usually has
+// | |/ 1 page glibc guard.
+// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
+// | |\
+// | HotSpot Guard Pages | - red and yellow pages
+// | |/
+// +------------------------+ JavaThread::stack_yellow_zone_base()
+// | |\
+// | Normal Stack | -
+// | |/
+// P2 +------------------------+ Thread::stack_base()
+//
+// Non-Java thread:
+//
+// Low memory addresses
+// +------------------------+
+// | |\
+// | glibc guard page | - usually 1 page
+// | |/
+// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
+// | |\
+// | Normal Stack | -
+// | |/
+// P2 +------------------------+ Thread::stack_base()
+//
+// ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from
+// pthread_attr_getstack()
+
+static void current_stack_region(address * bottom, size_t * size) {
+ if (os::Linux::is_initial_thread()) {
+ // initial thread needs special handling because pthread_getattr_np()
+ // may return bogus value.
+ *bottom = os::Linux::initial_thread_stack_bottom();
+ *size = os::Linux::initial_thread_stack_size();
+ } else {
+ pthread_attr_t attr;
+
+ int rslt = pthread_getattr_np(pthread_self(), &attr);
+
+ // JVM needs to know exact stack location, abort if it fails
+ if (rslt != 0) {
+ if (rslt == ENOMEM) {
+ vm_exit_out_of_memory(0, "pthread_getattr_np");
+ } else {
+ fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
+ }
+ }
+
+ if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {
+ fatal("Can not locate current stack attributes!");
+ }
+
+ pthread_attr_destroy(&attr);
+
+ }
+ assert(os::current_stack_pointer() >= *bottom &&
+ os::current_stack_pointer() < *bottom + *size, "just checking");
+}
+
+address os::current_stack_base() {
+ address bottom;
+ size_t size;
+ current_stack_region(&bottom, &size);
+ return (bottom + size);
+}
+
+size_t os::current_stack_size() {
+ // stack size includes normal stack and HotSpot guard pages
+ address bottom;
+ size_t size;
+ current_stack_region(&bottom, &size);
+ return size;
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// helper functions for fatal error handler
+
+void os::print_context(outputStream *st, void *context) {
+ if (context == NULL) return;
+
+ ucontext_t *uc = (ucontext_t*)context;
+ st->print_cr("Registers:");
+#ifdef AMD64
+ st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]);
+ st->print(", RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]);
+ st->print(", RCX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RCX]);
+ st->print(", RDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDX]);
+ st->cr();
+ st->print( "RSP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSP]);
+ st->print(", RBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBP]);
+ st->print(", RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]);
+ st->print(", RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]);
+ st->cr();
+ st->print( "R8 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
+ st->print(", R9 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]);
+ st->print(", R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]);
+ st->print(", R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]);
+ st->cr();
+ st->print( "R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]);
+ st->print(", R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]);
+ st->print(", R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]);
+ st->print(", R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]);
+ st->cr();
+ st->print( "RIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RIP]);
+ st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]);
+ st->print(", CSGSFS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_CSGSFS]);
+ st->print(", ERR=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ERR]);
+ st->cr();
+ st->print(" TRAPNO=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_TRAPNO]);
+#else
+ st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EAX]);
+ st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBX]);
+ st->print(", ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ECX]);
+ st->print(", EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDX]);
+ st->cr();
+ st->print( "ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_UESP]);
+ st->print(", EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBP]);
+ st->print(", ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ESI]);
+ st->print(", EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDI]);
+ st->cr();
+ st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EIP]);
+ st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]);
+ st->print(", CR2=" INTPTR_FORMAT, uc->uc_mcontext.cr2);
+#endif // AMD64
+ st->cr();
+ st->cr();
+
+ intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
+ st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
+ print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t));
+ st->cr();
+
+ // Note: it may be unsafe to inspect memory near pc. For example, pc may
+ // point to garbage if entry point in an nmethod is corrupted. Leave
+ // this at the end, and hope for the best.
+ address pc = os::Linux::ucontext_get_pc(uc);
+ st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
+ print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
+}
+
+void os::print_register_info(outputStream *st, void *context) {
+ if (context == NULL) return;
+
+ ucontext_t *uc = (ucontext_t*)context;
+
+ st->print_cr("Register to memory mapping:");
+ st->cr();
+
+ // this is horrendously verbose but the layout of the registers in the
+ // context does not match how we defined our abstract Register set, so
+ // we can't just iterate through the gregs area
+
+ // this is only for the "general purpose" registers
+
+#ifdef AMD64
+ st->print("RAX="); print_location(st, uc->uc_mcontext.gregs[REG_RAX]);
+ st->print("RBX="); print_location(st, uc->uc_mcontext.gregs[REG_RBX]);
+ st->print("RCX="); print_location(st, uc->uc_mcontext.gregs[REG_RCX]);
+ st->print("RDX="); print_location(st, uc->uc_mcontext.gregs[REG_RDX]);
+ st->print("RSP="); print_location(st, uc->uc_mcontext.gregs[REG_RSP]);
+ st->print("RBP="); print_location(st, uc->uc_mcontext.gregs[REG_RBP]);
+ st->print("RSI="); print_location(st, uc->uc_mcontext.gregs[REG_RSI]);
+ st->print("RDI="); print_location(st, uc->uc_mcontext.gregs[REG_RDI]);
+ st->print("R8 ="); print_location(st, uc->uc_mcontext.gregs[REG_R8]);
+ st->print("R9 ="); print_location(st, uc->uc_mcontext.gregs[REG_R9]);
+ st->print("R10="); print_location(st, uc->uc_mcontext.gregs[REG_R10]);
+ st->print("R11="); print_location(st, uc->uc_mcontext.gregs[REG_R11]);
+ st->print("R12="); print_location(st, uc->uc_mcontext.gregs[REG_R12]);
+ st->print("R13="); print_location(st, uc->uc_mcontext.gregs[REG_R13]);
+ st->print("R14="); print_location(st, uc->uc_mcontext.gregs[REG_R14]);
+ st->print("R15="); print_location(st, uc->uc_mcontext.gregs[REG_R15]);
+#else
+ st->print("EAX="); print_location(st, uc->uc_mcontext.gregs[REG_EAX]);
+ st->print("EBX="); print_location(st, uc->uc_mcontext.gregs[REG_EBX]);
+ st->print("ECX="); print_location(st, uc->uc_mcontext.gregs[REG_ECX]);
+ st->print("EDX="); print_location(st, uc->uc_mcontext.gregs[REG_EDX]);
+ st->print("ESP="); print_location(st, uc->uc_mcontext.gregs[REG_ESP]);
+ st->print("EBP="); print_location(st, uc->uc_mcontext.gregs[REG_EBP]);
+ st->print("ESI="); print_location(st, uc->uc_mcontext.gregs[REG_ESI]);
+ st->print("EDI="); print_location(st, uc->uc_mcontext.gregs[REG_EDI]);
+#endif // AMD64
+
+ st->cr();
+}
+
+void os::setup_fpu() {
+#ifndef AMD64
+ address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std();
+ __asm__ volatile ( "fldcw (%0)" :
+ : "r" (fpu_cntrl) : "memory");
+#endif // !AMD64
+}
+
+#ifndef PRODUCT
+void os::verify_stack_alignment() {
+#ifdef AMD64
+ assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
+#endif
+}
+#endif
diff --git a/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.hpp b/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.hpp
new file mode 100644
index 000000000..9bb22f8e6
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.hpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_X86_VM_OS_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_VM_OS_LINUX_X86_HPP
+
+ static void setup_fpu();
+ static bool supports_sse();
+
+ static jlong rdtsc();
+
+ static bool is_allocatable(size_t bytes);
+
+ // Used to register dynamic code cache area with the OS
+ // Note: Currently only used in 64 bit Windows implementations
+ static bool register_code_area(char *low, char *high) { return true; }
+
+#endif // OS_CPU_LINUX_X86_VM_OS_LINUX_X86_HPP
diff --git a/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.inline.hpp b/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.inline.hpp
new file mode 100644
index 000000000..fee719b01
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.inline.hpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_X86_VM_OS_LINUX_X86_INLINE_HPP
+#define OS_CPU_LINUX_X86_VM_OS_LINUX_X86_INLINE_HPP
+
+#include "runtime/os.hpp"
+
+// See http://www.technovelty.org/code/c/reading-rdtsc.htl for details
+inline jlong os::rdtsc() {
+#ifndef AMD64
+ // 64 bit result in edx:eax
+ uint64_t res;
+ __asm__ __volatile__ ("rdtsc" : "=A" (res));
+ return (jlong)res;
+#else
+ uint64_t res;
+ uint32_t ts1, ts2;
+ __asm__ __volatile__ ("rdtsc" : "=a" (ts1), "=d" (ts2));
+ res = ((uint64_t)ts1 | (uint64_t)ts2 << 32);
+ return (jlong)res;
+#endif // AMD64
+}
+
+#endif // OS_CPU_LINUX_X86_VM_OS_LINUX_X86_INLINE_HPP
diff --git a/src/os_cpu/linux_aarch64/vm/prefetch_linux_aarch64.inline.hpp b/src/os_cpu/linux_aarch64/vm/prefetch_linux_aarch64.inline.hpp
new file mode 100644
index 000000000..ba1cf32aa
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/prefetch_linux_aarch64.inline.hpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_X86_VM_PREFETCH_LINUX_X86_INLINE_HPP
+#define OS_CPU_LINUX_X86_VM_PREFETCH_LINUX_X86_INLINE_HPP
+
+#include "runtime/prefetch.hpp"
+
+
+inline void Prefetch::read (void *loc, intx interval) {
+#ifdef AMD64
+ __asm__ ("prefetcht0 (%0,%1,1)" : : "r" (loc), "r" (interval));
+#endif // AMD64
+}
+
+inline void Prefetch::write(void *loc, intx interval) {
+#ifdef AMD64
+
+ // Do not use the 3dnow prefetchw instruction. It isn't supported on em64t.
+ // __asm__ ("prefetchw (%0,%1,1)" : : "r" (loc), "r" (interval));
+ __asm__ ("prefetcht0 (%0,%1,1)" : : "r" (loc), "r" (interval));
+
+#endif // AMD64
+}
+
+#endif // OS_CPU_LINUX_X86_VM_PREFETCH_LINUX_X86_INLINE_HPP
diff --git a/src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.cpp b/src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.cpp
new file mode 100644
index 000000000..45fcea9e0
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/threadLocalStorage.hpp"
+#include "thread_linux.inline.hpp"
+
+// Map stack pointer (%esp) to thread pointer for faster TLS access
+//
+// Here we use a flat table for better performance. Getting current thread
+// is down to one memory access (read _sp_map[%esp>>12]) in generated code
+// and two in runtime code (-fPIC code needs an extra load for _sp_map).
+//
+// This code assumes stack page is not shared by different threads. It works
+// in 32-bit VM when page size is 4K (or a multiple of 4K, if that matters).
+//
+// Notice that _sp_map is allocated in the bss segment, which is ZFOD
+// (zero-fill-on-demand). While it reserves 4M address space upfront,
+// actual memory pages are committed on demand.
+//
+// If an application creates and destroys a lot of threads, usually the
+// stack space freed by a thread will soon get reused by new thread
+// (this is especially true in NPTL or LinuxThreads in fixed-stack mode).
+// No memory page in _sp_map is wasted.
+//
+// However, it's still possible that we might end up populating &
+// committing a large fraction of the 4M table over time, but the actual
+// amount of live data in the table could be quite small. The max wastage
+// is less than 4M bytes. If it becomes an issue, we could use madvise()
+// with MADV_DONTNEED to reclaim unused (i.e. all-zero) pages in _sp_map.
+// MADV_DONTNEED on Linux keeps the virtual memory mapping, but zaps the
+// physical memory page (i.e. similar to MADV_FREE on Solaris).
+
+#if !defined(AMD64) && !defined(MINIMIZE_RAM_USAGE)
+Thread* ThreadLocalStorage::_sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
+
+void ThreadLocalStorage::generate_code_for_get_thread() {
+ // nothing we can do here for user-level thread
+}
+
+void ThreadLocalStorage::pd_init() {
+ assert(align_size_down(os::vm_page_size(), PAGE_SIZE) == os::vm_page_size(),
+ "page size must be multiple of PAGE_SIZE");
+}
+
+void ThreadLocalStorage::pd_set_thread(Thread* thread) {
+ os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
+ address stack_top = os::current_stack_base();
+ size_t stack_size = os::current_stack_size();
+
+ for (address p = stack_top - stack_size; p < stack_top; p += PAGE_SIZE) {
+ // pd_set_thread() is called with non-NULL value when a new thread is
+ // created/attached, or with NULL value when a thread is about to exit.
+ // If both "thread" and the corresponding _sp_map[] entry are non-NULL,
+ // they should have the same value. Otherwise it might indicate that the
+ // stack page is shared by multiple threads. However, a more likely cause
+ // for this assertion to fail is that an attached thread exited without
+ // detaching itself from VM, which is a program error and could cause VM
+ // to crash.
+ assert(thread == NULL || _sp_map[(uintptr_t)p >> PAGE_SHIFT] == NULL ||
+ thread == _sp_map[(uintptr_t)p >> PAGE_SHIFT],
+ "thread exited without detaching from VM??");
+ _sp_map[(uintptr_t)p >> PAGE_SHIFT] = thread;
+ }
+}
+#else
+
+void ThreadLocalStorage::generate_code_for_get_thread() {
+ // nothing we can do here for user-level thread
+}
+
+void ThreadLocalStorage::pd_init() {
+}
+
+void ThreadLocalStorage::pd_set_thread(Thread* thread) {
+ os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
+}
+#endif // !AMD64 && !MINIMIZE_RAM_USAGE
diff --git a/src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.hpp b/src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.hpp
new file mode 100644
index 000000000..f3f2f26f8
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.hpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_X86_VM_THREADLS_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_VM_THREADLS_LINUX_X86_HPP
+
+ // Processor dependent parts of ThreadLocalStorage
+
+#if !defined(AMD64) && !defined(MINIMIZE_RAM_USAGE)
+
+ // map stack pointer to thread pointer - see notes in threadLS_linux_x86.cpp
+ #define SP_BITLENGTH 32
+ #define PAGE_SHIFT 12
+ #define PAGE_SIZE (1UL << PAGE_SHIFT)
+ static Thread* _sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
+
+public:
+
+ static Thread** sp_map_addr() { return _sp_map; }
+
+ static Thread* thread() {
+ uintptr_t sp;
+ __asm__ volatile ("movl %%esp, %0" : "=r" (sp));
+ return _sp_map[sp >> PAGE_SHIFT];
+ }
+
+#else
+
+public:
+
+ static Thread* thread() {
+ return (Thread*) os::thread_local_storage_at(thread_index());
+ }
+
+#endif // AMD64 || MINIMIZE_RAM_USAGE
+
+#endif // OS_CPU_LINUX_X86_VM_THREADLS_LINUX_X86_HPP
diff --git a/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.cpp b/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.cpp
new file mode 100644
index 000000000..3f04c7730
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/frame.inline.hpp"
+#include "thread_linux.inline.hpp"
+
+// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
+// currently interrupted by SIGPROF
+bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
+ void* ucontext, bool isInJava) {
+
+ assert(Thread::current() == this, "caller must be current thread");
+ assert(this->is_Java_thread(), "must be JavaThread");
+
+ JavaThread* jt = (JavaThread *)this;
+
+ // If we have a last_Java_frame, then we should use it even if
+ // isInJava == true. It should be more reliable than ucontext info.
+ if (jt->has_last_Java_frame()) {
+ *fr_addr = jt->pd_last_frame();
+ return true;
+ }
+
+ // At this point, we don't have a last_Java_frame, so
+ // we try to glean some information out of the ucontext
+ // if we were running Java code when SIGPROF came in.
+ if (isInJava) {
+ ucontext_t* uc = (ucontext_t*) ucontext;
+
+ intptr_t* ret_fp;
+ intptr_t* ret_sp;
+ ExtendedPC addr = os::Linux::fetch_frame_from_ucontext(this, uc,
+ &ret_sp, &ret_fp);
+ if (addr.pc() == NULL || ret_sp == NULL ) {
+ // ucontext wasn't useful
+ return false;
+ }
+
+ frame ret_frame(ret_sp, ret_fp, addr.pc());
+ if (!ret_frame.safe_for_sender(jt)) {
+#ifdef COMPILER2
+ // C2 uses ebp as a general register see if NULL fp helps
+ frame ret_frame2(ret_sp, NULL, addr.pc());
+ if (!ret_frame2.safe_for_sender(jt)) {
+ // nothing else to try if the frame isn't good
+ return false;
+ }
+ ret_frame = ret_frame2;
+#else
+ // nothing else to try if the frame isn't good
+ return false;
+#endif /* COMPILER2 */
+ }
+ *fr_addr = ret_frame;
+ return true;
+ }
+
+ // nothing else to try
+ return false;
+}
+
+void JavaThread::cache_global_variables() { }
+
diff --git a/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.hpp b/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.hpp
new file mode 100644
index 000000000..7a7d22252
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_X86_VM_THREAD_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_VM_THREAD_LINUX_X86_HPP
+
+ private:
+ void pd_initialize() {
+ _anchor.clear();
+ }
+
+ frame pd_last_frame() {
+ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+ if (_anchor.last_Java_pc() != NULL) {
+ return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
+ } else {
+ // This will pick up pc from sp
+ return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp());
+ }
+ }
+
+ public:
+ // Mutators are highly dangerous....
+ intptr_t* last_Java_fp() { return _anchor.last_Java_fp(); }
+ void set_last_Java_fp(intptr_t* fp) { _anchor.set_last_Java_fp(fp); }
+
+ void set_base_of_stack_pointer(intptr_t* base_sp) {
+ }
+
+ static ByteSize last_Java_fp_offset() {
+ return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
+ }
+
+ intptr_t* base_of_stack_pointer() {
+ return NULL;
+ }
+ void record_base_of_stack_pointer() {
+ }
+
+ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
+ bool isInJava);
+
+ // These routines are only used on cpu architectures that
+ // have separate register stacks (Itanium).
+ static bool register_stack_overflow() { return false; }
+ static void enable_register_stack_guard() {}
+ static void disable_register_stack_guard() {}
+
+#endif // OS_CPU_LINUX_X86_VM_THREAD_LINUX_X86_HPP
diff --git a/src/os_cpu/linux_aarch64/vm/vmStructs_linux_aarch64.hpp b/src/os_cpu/linux_aarch64/vm/vmStructs_linux_aarch64.hpp
new file mode 100644
index 000000000..c01e6c91c
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/vmStructs_linux_aarch64.hpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_X86_VM_VMSTRUCTS_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_VM_VMSTRUCTS_LINUX_X86_HPP
+
+// These are the OS and CPU-specific fields, types and integer
+// constants required by the Serviceability Agent. This file is
+// referenced by vmStructs.cpp.
+
+#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \
+ \
+ /******************************/ \
+ /* Threads (NOTE: incomplete) */ \
+ /******************************/ \
+ nonstatic_field(OSThread, _thread_id, OSThread::thread_id_t) \
+ nonstatic_field(OSThread, _pthread_id, pthread_t) \
+ /* This must be the last entry, and must be present */ \
+ last_entry()
+
+
+#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type, last_entry) \
+ \
+ /**********************/ \
+ /* Posix Thread IDs */ \
+ /**********************/ \
+ \
+ declare_integer_type(OSThread::thread_id_t) \
+ declare_unsigned_integer_type(pthread_t) \
+ \
+ /* This must be the last entry, and must be present */ \
+ last_entry()
+
+#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
+ \
+ /* This must be the last entry, and must be present */ \
+ last_entry()
+
+#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant, last_entry) \
+ \
+ /* This must be the last entry, and must be present */ \
+ last_entry()
+
+#endif // OS_CPU_LINUX_X86_VM_VMSTRUCTS_LINUX_X86_HPP
diff --git a/src/os_cpu/linux_aarch64/vm/vm_version_linux_aarch64.cpp b/src/os_cpu/linux_aarch64/vm/vm_version_linux_aarch64.cpp
new file mode 100644
index 000000000..e9a0c606f
--- /dev/null
+++ b/src/os_cpu/linux_aarch64/vm/vm_version_linux_aarch64.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/os.hpp"
+#include "vm_version_aarch64.hpp"
+
diff --git a/src/share/vm/adlc/main.cpp b/src/share/vm/adlc/main.cpp
index 47e207a4b..5cf345c35 100644
--- a/src/share/vm/adlc/main.cpp
+++ b/src/share/vm/adlc/main.cpp
@@ -235,6 +235,11 @@ int main(int argc, char *argv[])
AD.addInclude(AD._CPP_file, "nativeInst_x86.hpp");
AD.addInclude(AD._CPP_file, "vmreg_x86.inline.hpp");
#endif
+#ifdef TARGET_ARCH_aarch64
+ AD.addInclude(AD._CPP_file, "assembler_aarch64.inline.hpp");
+ AD.addInclude(AD._CPP_file, "nativeInst_aarch64.hpp");
+ AD.addInclude(AD._CPP_file, "vmreg_aarch64.inline.hpp");
+#endif
#ifdef TARGET_ARCH_sparc
AD.addInclude(AD._CPP_file, "assembler_sparc.inline.hpp");
AD.addInclude(AD._CPP_file, "nativeInst_sparc.hpp");
diff --git a/src/share/vm/asm/assembler.cpp b/src/share/vm/asm/assembler.cpp
index 2bcdcbc88..8e51e20d9 100644
--- a/src/share/vm/asm/assembler.cpp
+++ b/src/share/vm/asm/assembler.cpp
@@ -31,6 +31,9 @@
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "assembler_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
#endif
diff --git a/src/share/vm/asm/assembler.hpp b/src/share/vm/asm/assembler.hpp
index 67e328087..a680ac895 100644
--- a/src/share/vm/asm/assembler.hpp
+++ b/src/share/vm/asm/assembler.hpp
@@ -31,7 +31,7 @@
#include "utilities/debug.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/top.hpp"
-#ifdef TARGET_ARCH_AARCH64
+#ifdef TARGET_ARCH_aarch64
# include "register_aarch64.hpp"
# include "vm_version_aarch64.hpp"
#else
diff --git a/src/share/vm/asm/codeBuffer.hpp b/src/share/vm/asm/codeBuffer.hpp
index 53c90c2e6..90ec8de2b 100644
--- a/src/share/vm/asm/codeBuffer.hpp
+++ b/src/share/vm/asm/codeBuffer.hpp
@@ -561,6 +561,9 @@ class CodeBuffer: public StackObj {
#ifdef TARGET_ARCH_x86
# include "codeBuffer_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "codeBuffer_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "codeBuffer_sparc.hpp"
#endif
diff --git a/src/share/vm/c1/c1_Defs.hpp b/src/share/vm/c1/c1_Defs.hpp
index bebb3b0be..b0cd76373 100644
--- a/src/share/vm/c1/c1_Defs.hpp
+++ b/src/share/vm/c1/c1_Defs.hpp
@@ -29,6 +29,9 @@
#ifdef TARGET_ARCH_x86
# include "register_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "register_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "register_sparc.hpp"
#endif
@@ -53,6 +56,9 @@ enum {
#ifdef TARGET_ARCH_x86
# include "c1_Defs_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c1_Defs_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c1_Defs_sparc.hpp"
#endif
diff --git a/src/share/vm/c1/c1_FpuStackSim.hpp b/src/share/vm/c1/c1_FpuStackSim.hpp
index a1e4c3832..f07e97a4d 100644
--- a/src/share/vm/c1/c1_FpuStackSim.hpp
+++ b/src/share/vm/c1/c1_FpuStackSim.hpp
@@ -35,6 +35,9 @@ class FpuStackSim;
#ifdef TARGET_ARCH_x86
# include "c1_FpuStackSim_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c1_FpuStackSim_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c1_FpuStackSim_sparc.hpp"
#endif
diff --git a/src/share/vm/c1/c1_FrameMap.cpp b/src/share/vm/c1/c1_FrameMap.cpp
index ea50b276f..660534665 100644
--- a/src/share/vm/c1/c1_FrameMap.cpp
+++ b/src/share/vm/c1/c1_FrameMap.cpp
@@ -29,6 +29,9 @@
#ifdef TARGET_ARCH_x86
# include "vmreg_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "vmreg_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "vmreg_sparc.inline.hpp"
#endif
diff --git a/src/share/vm/c1/c1_FrameMap.hpp b/src/share/vm/c1/c1_FrameMap.hpp
index 288fc5c1b..1b3c68b95 100644
--- a/src/share/vm/c1/c1_FrameMap.hpp
+++ b/src/share/vm/c1/c1_FrameMap.hpp
@@ -85,6 +85,9 @@ class FrameMap : public CompilationResourceObj {
#ifdef TARGET_ARCH_x86
# include "c1_FrameMap_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c1_FrameMap_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c1_FrameMap_sparc.hpp"
#endif
diff --git a/src/share/vm/c1/c1_LIR.cpp b/src/share/vm/c1/c1_LIR.cpp
index c85e71f99..a00012b4f 100644
--- a/src/share/vm/c1/c1_LIR.cpp
+++ b/src/share/vm/c1/c1_LIR.cpp
@@ -43,6 +43,7 @@ Register LIR_OprDesc::as_register_hi() const {
}
#if defined(X86)
+#ifndef TARGET_ARCH_aarch64
XMMRegister LIR_OprDesc::as_xmm_float_reg() const {
return FrameMap::nr2xmmreg(xmm_regnr());
@@ -53,6 +54,7 @@ XMMRegister LIR_OprDesc::as_xmm_double_reg() const {
return FrameMap::nr2xmmreg(xmm_regnrLo());
}
+#endif
#endif // X86
#if defined(SPARC) || defined(PPC)
@@ -1499,10 +1501,12 @@ void LIR_OprDesc::print(outputStream* out) const {
out->print(as_register_hi()->name());
out->print(as_register_lo()->name());
#if defined(X86)
+#ifndef TARGET_ARCH_aarch64
} else if (is_single_xmm()) {
out->print(as_xmm_float_reg()->name());
} else if (is_double_xmm()) {
out->print(as_xmm_double_reg()->name());
+#endif
} else if (is_single_fpu()) {
out->print("fpu%d", fpu_regnr());
} else if (is_double_fpu()) {
diff --git a/src/share/vm/c1/c1_LIR.hpp b/src/share/vm/c1/c1_LIR.hpp
index eb9ff9a5f..9ab60eef0 100644
--- a/src/share/vm/c1/c1_LIR.hpp
+++ b/src/share/vm/c1/c1_LIR.hpp
@@ -433,10 +433,12 @@ class LIR_OprDesc: public CompilationResourceObj {
}
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
XMMRegister as_xmm_float_reg() const;
XMMRegister as_xmm_double_reg() const;
// for compatibility with RInfo
int fpu () const { return lo_reg_half(); }
+#endif
#endif // X86
#if defined(SPARC) || defined(ARM) || defined(PPC)
FloatRegister as_float_reg () const;
diff --git a/src/share/vm/c1/c1_LIRAssembler.cpp b/src/share/vm/c1/c1_LIRAssembler.cpp
index b3082cb0c..180b0409c 100644
--- a/src/share/vm/c1/c1_LIRAssembler.cpp
+++ b/src/share/vm/c1/c1_LIRAssembler.cpp
@@ -34,6 +34,10 @@
# include "nativeInst_x86.hpp"
# include "vmreg_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "nativeInst_aarch64.hpp"
+# include "vmreg_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
# include "vmreg_sparc.inline.hpp"
diff --git a/src/share/vm/c1/c1_LIRAssembler.hpp b/src/share/vm/c1/c1_LIRAssembler.hpp
index 58adf5919..24348c1ad 100644
--- a/src/share/vm/c1/c1_LIRAssembler.hpp
+++ b/src/share/vm/c1/c1_LIRAssembler.hpp
@@ -252,6 +252,9 @@ class LIR_Assembler: public CompilationResourceObj {
#ifdef TARGET_ARCH_x86
# include "c1_LIRAssembler_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c1_LIRAssembler_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c1_LIRAssembler_sparc.hpp"
#endif
diff --git a/src/share/vm/c1/c1_LinearScan.cpp b/src/share/vm/c1/c1_LinearScan.cpp
index 56f946365..d6cd728e8 100644
--- a/src/share/vm/c1/c1_LinearScan.cpp
+++ b/src/share/vm/c1/c1_LinearScan.cpp
@@ -35,6 +35,9 @@
#ifdef TARGET_ARCH_x86
# include "vmreg_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "vmreg_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "vmreg_sparc.inline.hpp"
#endif
@@ -137,11 +140,13 @@ int LinearScan::reg_num(LIR_Opr opr) {
} else if (opr->is_double_cpu()) {
return opr->cpu_regnrLo();
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
} else if (opr->is_single_xmm()) {
return opr->fpu_regnr() + pd_first_xmm_reg;
} else if (opr->is_double_xmm()) {
return opr->fpu_regnrLo() + pd_first_xmm_reg;
#endif
+#endif
} else if (opr->is_single_fpu()) {
return opr->fpu_regnr() + pd_first_fpu_reg;
} else if (opr->is_double_fpu()) {
@@ -1292,8 +1297,10 @@ void LinearScan::build_intervals() {
// perfomed and so the temp ranges would be useless
if (has_fpu_registers()) {
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
if (UseSSE < 2) {
#endif
+#endif
for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) {
LIR_Opr opr = FrameMap::caller_save_fpu_reg_at(i);
assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
@@ -1301,6 +1308,7 @@ void LinearScan::build_intervals() {
caller_save_registers[num_caller_save_registers++] = reg_num(opr);
}
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
}
if (UseSSE > 0) {
for (i = 0; i < FrameMap::nof_caller_save_xmm_regs; i++) {
@@ -1311,6 +1319,7 @@ void LinearScan::build_intervals() {
}
}
#endif
+#endif
}
assert(num_caller_save_registers <= LinearScan::nof_regs, "out of bounds");
@@ -2107,12 +2116,14 @@ LIR_Opr LinearScan::calc_operand_for_interval(const Interval* interval) {
#ifndef __SOFTFP__
case T_FLOAT: {
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
if (UseSSE >= 1) {
assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
assert(interval->assigned_regHi() == any_reg, "must not have hi register");
return LIR_OprFact::single_xmm(assigned_reg - pd_first_xmm_reg);
}
#endif
+#endif
assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
assert(interval->assigned_regHi() == any_reg, "must not have hi register");
@@ -2121,12 +2132,14 @@ LIR_Opr LinearScan::calc_operand_for_interval(const Interval* interval) {
case T_DOUBLE: {
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
if (UseSSE >= 2) {
assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)");
return LIR_OprFact::double_xmm(assigned_reg - pd_first_xmm_reg);
}
#endif
+#endif
#ifdef SPARC
assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
@@ -2602,6 +2615,7 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
return 1;
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
} else if (opr->is_single_xmm()) {
VMReg rname = opr->as_xmm_float_reg()->as_VMReg();
LocationValue* sv = new LocationValue(Location::new_reg_loc(Location::normal, rname));
@@ -2609,6 +2623,7 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
scope_values->append(sv);
return 1;
#endif
+#endif
} else if (opr->is_single_fpu()) {
#ifdef X86
@@ -2692,6 +2707,7 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
} else if (opr->is_double_xmm()) {
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation");
VMReg rname_first = opr->as_xmm_double_reg()->as_VMReg();
@@ -2707,6 +2723,7 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
}
# endif
#endif
+#endif
} else if (opr->is_double_fpu()) {
// On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of
@@ -3617,10 +3634,12 @@ void RegisterVerifier::process_operations(LIR_List* ops, IntervalList* input_sta
}
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) {
state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL);
}
#endif
+#endif
}
// process xhandler before output and temp operands
@@ -4536,9 +4555,11 @@ void Interval::print(outputStream* out) const {
} else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) {
opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg);
#ifdef X86
+#ifndef TARGET_ARCH_aarch64
} else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= pd_last_xmm_reg) {
opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg);
#endif
+#endif
} else {
ShouldNotReachHere();
}
diff --git a/src/share/vm/c1/c1_LinearScan.hpp b/src/share/vm/c1/c1_LinearScan.hpp
index 0c06f1b01..75042cd47 100644
--- a/src/share/vm/c1/c1_LinearScan.hpp
+++ b/src/share/vm/c1/c1_LinearScan.hpp
@@ -128,8 +128,7 @@ class LinearScan : public CompilationResourceObj {
any_reg = -1,
nof_cpu_regs = pd_nof_cpu_regs_linearscan,
nof_fpu_regs = pd_nof_fpu_regs_linearscan,
- nof_xmm_regs = pd_nof_xmm_regs_linearscan,
- nof_regs = nof_cpu_regs + nof_fpu_regs + nof_xmm_regs
+ nof_regs = nof_cpu_regs + nof_fpu_regs
};
private:
@@ -976,6 +975,9 @@ class LinearScanTimers : public StackObj {
#ifdef TARGET_ARCH_x86
# include "c1_LinearScan_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c1_LinearScan_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c1_LinearScan_sparc.hpp"
#endif
diff --git a/src/share/vm/c1/c1_MacroAssembler.hpp b/src/share/vm/c1/c1_MacroAssembler.hpp
index 55d980350..28fa682fd 100644
--- a/src/share/vm/c1/c1_MacroAssembler.hpp
+++ b/src/share/vm/c1/c1_MacroAssembler.hpp
@@ -29,6 +29,9 @@
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "assembler_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
#endif
@@ -64,6 +67,9 @@ class C1_MacroAssembler: public MacroAssembler {
#ifdef TARGET_ARCH_x86
# include "c1_MacroAssembler_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c1_MacroAssembler_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c1_MacroAssembler_sparc.hpp"
#endif
diff --git a/src/share/vm/c1/c1_globals.hpp b/src/share/vm/c1/c1_globals.hpp
index 15f3cc10f..662890a99 100644
--- a/src/share/vm/c1/c1_globals.hpp
+++ b/src/share/vm/c1/c1_globals.hpp
@@ -29,6 +29,9 @@
#ifdef TARGET_ARCH_x86
# include "c1_globals_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c1_globals_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c1_globals_sparc.hpp"
#endif
diff --git a/src/share/vm/classfile/classFileStream.hpp b/src/share/vm/classfile/classFileStream.hpp
index cf6f0e5f3..6299fca4f 100644
--- a/src/share/vm/classfile/classFileStream.hpp
+++ b/src/share/vm/classfile/classFileStream.hpp
@@ -29,6 +29,9 @@
#ifdef TARGET_ARCH_x86
# include "bytes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "bytes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "bytes_sparc.hpp"
#endif
diff --git a/src/share/vm/classfile/stackMapTable.hpp b/src/share/vm/classfile/stackMapTable.hpp
index 08e9bd254..9ce2cb8f3 100644
--- a/src/share/vm/classfile/stackMapTable.hpp
+++ b/src/share/vm/classfile/stackMapTable.hpp
@@ -33,6 +33,9 @@
#ifdef TARGET_ARCH_x86
# include "bytes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "bytes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "bytes_sparc.hpp"
#endif
diff --git a/src/share/vm/classfile/verifier.cpp b/src/share/vm/classfile/verifier.cpp
index 9ab47f7b6..949e9987c 100644
--- a/src/share/vm/classfile/verifier.cpp
+++ b/src/share/vm/classfile/verifier.cpp
@@ -45,6 +45,9 @@
#ifdef TARGET_ARCH_x86
# include "bytes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "bytes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "bytes_sparc.hpp"
#endif
diff --git a/src/share/vm/code/codeBlob.cpp b/src/share/vm/code/codeBlob.cpp
index 244c32043..49384b5e1 100644
--- a/src/share/vm/code/codeBlob.cpp
+++ b/src/share/vm/code/codeBlob.cpp
@@ -42,6 +42,9 @@
#ifdef TARGET_ARCH_x86
# include "nativeInst_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "nativeInst_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
#endif
diff --git a/src/share/vm/code/compiledIC.hpp b/src/share/vm/code/compiledIC.hpp
index fe1cfb30b..ee5c6f31d 100644
--- a/src/share/vm/code/compiledIC.hpp
+++ b/src/share/vm/code/compiledIC.hpp
@@ -32,6 +32,9 @@
#ifdef TARGET_ARCH_x86
# include "nativeInst_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "nativeInst_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
#endif
diff --git a/src/share/vm/code/icBuffer.cpp b/src/share/vm/code/icBuffer.cpp
index ed704575e..736fa7f8d 100644
--- a/src/share/vm/code/icBuffer.cpp
+++ b/src/share/vm/code/icBuffer.cpp
@@ -40,6 +40,9 @@
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "assembler_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
#endif
diff --git a/src/share/vm/code/relocInfo.cpp b/src/share/vm/code/relocInfo.cpp
index 4fd82dfaa..55beb1fff 100644
--- a/src/share/vm/code/relocInfo.cpp
+++ b/src/share/vm/code/relocInfo.cpp
@@ -33,6 +33,10 @@
# include "assembler_x86.inline.hpp"
# include "nativeInst_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "assembler_aarch64.inline.hpp"
+# include "nativeInst_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
# include "nativeInst_sparc.hpp"
diff --git a/src/share/vm/code/relocInfo.hpp b/src/share/vm/code/relocInfo.hpp
index 97fcbb1a3..d81911f36 100644
--- a/src/share/vm/code/relocInfo.hpp
+++ b/src/share/vm/code/relocInfo.hpp
@@ -424,8 +424,8 @@ class relocInfo VALUE_OBJ_CLASS_SPEC {
#ifdef TARGET_ARCH_x86
# include "relocInfo_x86.hpp"
#endif
-#ifdef TARGET_ARCH_AARCH64
-# include "relocInfo_x86.hpp"
+#ifdef TARGET_ARCH_aarch64
+# include "relocInfo_aarch64.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "relocInfo_sparc.hpp"
diff --git a/src/share/vm/code/vmreg.hpp b/src/share/vm/code/vmreg.hpp
index d57e6f89b..a2b137a2a 100644
--- a/src/share/vm/code/vmreg.hpp
+++ b/src/share/vm/code/vmreg.hpp
@@ -30,6 +30,9 @@
#ifdef TARGET_ARCH_x86
# include "register_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "register_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "register_sparc.hpp"
#endif
@@ -51,6 +54,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "adfiles/adGlobals_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "adfiles/adGlobals_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "adfiles/adGlobals_sparc.hpp"
#endif
@@ -170,6 +176,9 @@ public:
#ifdef TARGET_ARCH_x86
# include "vmreg_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "vmreg_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "vmreg_sparc.hpp"
#endif
diff --git a/src/share/vm/compiler/disassembler.cpp b/src/share/vm/compiler/disassembler.cpp
index 9603e863e..f15c91581 100644
--- a/src/share/vm/compiler/disassembler.cpp
+++ b/src/share/vm/compiler/disassembler.cpp
@@ -35,6 +35,9 @@
#ifdef TARGET_ARCH_x86
# include "depChecker_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "depChecker_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "depChecker_sparc.hpp"
#endif
diff --git a/src/share/vm/compiler/disassembler.hpp b/src/share/vm/compiler/disassembler.hpp
index a70b8ccd3..9f63ebc3f 100644
--- a/src/share/vm/compiler/disassembler.hpp
+++ b/src/share/vm/compiler/disassembler.hpp
@@ -67,6 +67,9 @@ class Disassembler {
#ifdef TARGET_ARCH_x86
# include "disassembler_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "disassembler_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "disassembler_sparc.hpp"
#endif
diff --git a/src/share/vm/interpreter/abstractInterpreter.hpp b/src/share/vm/interpreter/abstractInterpreter.hpp
index 756ad88e2..a0e5e9bf2 100644
--- a/src/share/vm/interpreter/abstractInterpreter.hpp
+++ b/src/share/vm/interpreter/abstractInterpreter.hpp
@@ -35,6 +35,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "interp_masm_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "interp_masm_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "interp_masm_sparc.hpp"
#endif
diff --git a/src/share/vm/interpreter/bytecode.hpp b/src/share/vm/interpreter/bytecode.hpp
index 107161a97..7008f346e 100644
--- a/src/share/vm/interpreter/bytecode.hpp
+++ b/src/share/vm/interpreter/bytecode.hpp
@@ -31,6 +31,9 @@
#ifdef TARGET_ARCH_x86
# include "bytes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "bytes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "bytes_sparc.hpp"
#endif
diff --git a/src/share/vm/interpreter/bytecodeInterpreter.cpp b/src/share/vm/interpreter/bytecodeInterpreter.cpp
index 68bd1c73e..6c9098375 100644
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp
@@ -44,6 +44,9 @@
#ifdef TARGET_OS_ARCH_linux_x86
# include "orderAccess_linux_x86.inline.hpp"
#endif
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "orderAccess_linux_aarch64.inline.hpp"
+#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "orderAccess_linux_sparc.inline.hpp"
#endif
diff --git a/src/share/vm/interpreter/bytecodeInterpreter.hpp b/src/share/vm/interpreter/bytecodeInterpreter.hpp
index 8757760c3..6d507595e 100644
--- a/src/share/vm/interpreter/bytecodeInterpreter.hpp
+++ b/src/share/vm/interpreter/bytecodeInterpreter.hpp
@@ -35,6 +35,9 @@
#ifdef TARGET_ARCH_x86
# include "bytes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "bytes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "bytes_sparc.hpp"
#endif
@@ -588,6 +591,9 @@ void print();
#ifdef TARGET_ARCH_x86
# include "bytecodeInterpreter_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "bytecodeInterpreter_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "bytecodeInterpreter_sparc.hpp"
#endif
diff --git a/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp b/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp
index 3715a5212..f5db0b4d9 100644
--- a/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp
+++ b/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp
@@ -46,6 +46,9 @@
#ifdef TARGET_ARCH_x86
# include "bytecodeInterpreter_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "bytecodeInterpreter_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "bytecodeInterpreter_sparc.inline.hpp"
#endif
diff --git a/src/share/vm/interpreter/bytecodeStream.hpp b/src/share/vm/interpreter/bytecodeStream.hpp
index 532a05632..e0c583492 100644
--- a/src/share/vm/interpreter/bytecodeStream.hpp
+++ b/src/share/vm/interpreter/bytecodeStream.hpp
@@ -31,6 +31,9 @@
#ifdef TARGET_ARCH_x86
# include "bytes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "bytes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "bytes_sparc.hpp"
#endif
diff --git a/src/share/vm/interpreter/bytecodes.cpp b/src/share/vm/interpreter/bytecodes.cpp
index 5e4b9dcee..c26167334 100644
--- a/src/share/vm/interpreter/bytecodes.cpp
+++ b/src/share/vm/interpreter/bytecodes.cpp
@@ -29,6 +29,9 @@
#ifdef TARGET_ARCH_x86
# include "bytes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "bytes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "bytes_sparc.hpp"
#endif
diff --git a/src/share/vm/interpreter/bytecodes.hpp b/src/share/vm/interpreter/bytecodes.hpp
index 2ea733660..dcb5242a6 100644
--- a/src/share/vm/interpreter/bytecodes.hpp
+++ b/src/share/vm/interpreter/bytecodes.hpp
@@ -288,6 +288,9 @@ class Bytecodes: AllStatic {
#ifdef TARGET_ARCH_x86
# include "bytecodes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "bytecodes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "bytecodes_sparc.hpp"
#endif
diff --git a/src/share/vm/interpreter/cppInterpreter.hpp b/src/share/vm/interpreter/cppInterpreter.hpp
index 4997a4432..18c50f7c4 100644
--- a/src/share/vm/interpreter/cppInterpreter.hpp
+++ b/src/share/vm/interpreter/cppInterpreter.hpp
@@ -84,6 +84,9 @@ class CppInterpreter: public AbstractInterpreter {
#ifdef TARGET_ARCH_x86
# include "cppInterpreter_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "cppInterpreter_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "cppInterpreter_sparc.hpp"
#endif
diff --git a/src/share/vm/interpreter/cppInterpreterGenerator.hpp b/src/share/vm/interpreter/cppInterpreterGenerator.hpp
index c27805e3e..5d6b3c59f 100644
--- a/src/share/vm/interpreter/cppInterpreterGenerator.hpp
+++ b/src/share/vm/interpreter/cppInterpreterGenerator.hpp
@@ -47,6 +47,9 @@ class CppInterpreterGenerator: public AbstractInterpreterGenerator {
#ifdef TARGET_ARCH_x86
# include "cppInterpreterGenerator_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "cppInterpreterGenerator_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "cppInterpreterGenerator_sparc.hpp"
#endif
diff --git a/src/share/vm/interpreter/interpreter.hpp b/src/share/vm/interpreter/interpreter.hpp
index 0ab0be747..71681ef96 100644
--- a/src/share/vm/interpreter/interpreter.hpp
+++ b/src/share/vm/interpreter/interpreter.hpp
@@ -146,6 +146,9 @@ class Interpreter: public CC_INTERP_ONLY(CppInterpreter) NOT_CC_INTERP(TemplateI
#ifdef TARGET_ARCH_x86
# include "interpreter_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "interpreter_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "interpreter_sparc.hpp"
#endif
diff --git a/src/share/vm/interpreter/interpreterGenerator.hpp b/src/share/vm/interpreter/interpreterGenerator.hpp
index 7bc43eccb..1dc7cb298 100644
--- a/src/share/vm/interpreter/interpreterGenerator.hpp
+++ b/src/share/vm/interpreter/interpreterGenerator.hpp
@@ -44,6 +44,9 @@ InterpreterGenerator(StubQueue* _code);
#ifdef TARGET_ARCH_x86
# include "interpreterGenerator_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "interpreterGenerator_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "interpreterGenerator_sparc.hpp"
#endif
diff --git a/src/share/vm/interpreter/interpreterRuntime.cpp b/src/share/vm/interpreter/interpreterRuntime.cpp
index e1c166587..8660ff37a 100644
--- a/src/share/vm/interpreter/interpreterRuntime.cpp
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp
@@ -59,6 +59,9 @@
#ifdef TARGET_ARCH_x86
# include "vm_version_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "vm_version_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "vm_version_sparc.hpp"
#endif
diff --git a/src/share/vm/interpreter/interpreterRuntime.hpp b/src/share/vm/interpreter/interpreterRuntime.hpp
index 93c1a9eca..13db73886 100644
--- a/src/share/vm/interpreter/interpreterRuntime.hpp
+++ b/src/share/vm/interpreter/interpreterRuntime.hpp
@@ -153,6 +153,9 @@ class InterpreterRuntime: AllStatic {
#ifdef TARGET_ARCH_x86
# include "interpreterRT_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "interpreterRT_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "interpreterRT_sparc.hpp"
#endif
diff --git a/src/share/vm/interpreter/templateInterpreter.cpp b/src/share/vm/interpreter/templateInterpreter.cpp
index 3c8ef1252..459d02f98 100644
--- a/src/share/vm/interpreter/templateInterpreter.cpp
+++ b/src/share/vm/interpreter/templateInterpreter.cpp
@@ -341,7 +341,6 @@ void TemplateInterpreterGenerator::generate_all() {
}
{ CodeletMark cm(_masm, "throw exception entrypoints");
- generate_AARM64_loop();
Interpreter::_throw_ArrayIndexOutOfBoundsException_entry = generate_ArrayIndexOutOfBounds_handler("java/lang/ArrayIndexOutOfBoundsException");
Interpreter::_throw_ArrayStoreException_entry = generate_klass_exception_handler("java/lang/ArrayStoreException" );
Interpreter::_throw_ArithmeticException_entry = generate_exception_handler("java/lang/ArithmeticException" , "/ by zero");
diff --git a/src/share/vm/interpreter/templateInterpreter.hpp b/src/share/vm/interpreter/templateInterpreter.hpp
index 4db0a419c..160f7e5c3 100644
--- a/src/share/vm/interpreter/templateInterpreter.hpp
+++ b/src/share/vm/interpreter/templateInterpreter.hpp
@@ -186,6 +186,9 @@ class TemplateInterpreter: public AbstractInterpreter {
#ifdef TARGET_ARCH_x86
# include "templateInterpreter_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "templateInterpreter_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "templateInterpreter_sparc.hpp"
#endif
diff --git a/src/share/vm/interpreter/templateInterpreterGenerator.hpp b/src/share/vm/interpreter/templateInterpreterGenerator.hpp
index 2b8c38c27..943dea552 100644
--- a/src/share/vm/interpreter/templateInterpreterGenerator.hpp
+++ b/src/share/vm/interpreter/templateInterpreterGenerator.hpp
@@ -31,13 +31,13 @@
#ifndef CC_INTERP
class TemplateInterpreterGenerator: public AbstractInterpreterGenerator {
+
protected:
// entry points for shared code sequence
address _unimplemented_bytecode;
address _illegal_bytecode_sequence;
- address generate_AARM64_loop();
// shared code sequences
// Converter for native abi result to tosca result
@@ -91,6 +91,9 @@ class TemplateInterpreterGenerator: public AbstractInterpreterGenerator {
#ifdef TARGET_ARCH_x86
# include "templateInterpreterGenerator_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "templateInterpreterGenerator_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "templateInterpreterGenerator_sparc.hpp"
#endif
diff --git a/src/share/vm/interpreter/templateTable.hpp b/src/share/vm/interpreter/templateTable.hpp
index 3b006ad03..88ee0b59c 100644
--- a/src/share/vm/interpreter/templateTable.hpp
+++ b/src/share/vm/interpreter/templateTable.hpp
@@ -34,6 +34,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "interp_masm_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "interp_masm_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "interp_masm_sparc.hpp"
#endif
@@ -364,6 +367,9 @@ class TemplateTable: AllStatic {
#ifdef TARGET_ARCH_MODEL_x86_64
# include "templateTable_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "templateTable_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "templateTable_sparc.hpp"
#endif
diff --git a/src/share/vm/oops/constantPoolOop.hpp b/src/share/vm/oops/constantPoolOop.hpp
index c2f985d77..b45a57f98 100644
--- a/src/share/vm/oops/constantPoolOop.hpp
+++ b/src/share/vm/oops/constantPoolOop.hpp
@@ -33,6 +33,9 @@
#ifdef TARGET_ARCH_x86
# include "bytes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "bytes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "bytes_sparc.hpp"
#endif
diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp
index a050f960f..2e50dcbeb 100644
--- a/src/share/vm/oops/oop.inline.hpp
+++ b/src/share/vm/oops/oop.inline.hpp
@@ -46,6 +46,9 @@
#ifdef TARGET_ARCH_x86
# include "bytes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "bytes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "bytes_sparc.hpp"
#endif
diff --git a/src/share/vm/oops/typeArrayOop.hpp b/src/share/vm/oops/typeArrayOop.hpp
index 292609e43..1e1b37d31 100644
--- a/src/share/vm/oops/typeArrayOop.hpp
+++ b/src/share/vm/oops/typeArrayOop.hpp
@@ -30,6 +30,9 @@
#ifdef TARGET_OS_ARCH_linux_x86
# include "orderAccess_linux_x86.inline.hpp"
#endif
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "orderAccess_linux_aarch64.inline.hpp"
+#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "orderAccess_linux_sparc.inline.hpp"
#endif
diff --git a/src/share/vm/opto/buildOopMap.cpp b/src/share/vm/opto/buildOopMap.cpp
index fc731604e..6d6b42163 100644
--- a/src/share/vm/opto/buildOopMap.cpp
+++ b/src/share/vm/opto/buildOopMap.cpp
@@ -35,6 +35,9 @@
#ifdef TARGET_ARCH_x86
# include "vmreg_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "vmreg_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "vmreg_sparc.inline.hpp"
#endif
diff --git a/src/share/vm/opto/c2_globals.hpp b/src/share/vm/opto/c2_globals.hpp
index 4d5424da5..85120f007 100644
--- a/src/share/vm/opto/c2_globals.hpp
+++ b/src/share/vm/opto/c2_globals.hpp
@@ -29,6 +29,9 @@
#ifdef TARGET_ARCH_x86
# include "c2_globals_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c2_globals_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c2_globals_sparc.hpp"
#endif
diff --git a/src/share/vm/opto/c2compiler.cpp b/src/share/vm/opto/c2compiler.cpp
index 713e3f1d1..7083c6c33 100644
--- a/src/share/vm/opto/c2compiler.cpp
+++ b/src/share/vm/opto/c2compiler.cpp
@@ -31,6 +31,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "adfiles/ad_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
#endif
diff --git a/src/share/vm/opto/compile.cpp b/src/share/vm/opto/compile.cpp
index 0a374d792..af0217604 100644
--- a/src/share/vm/opto/compile.cpp
+++ b/src/share/vm/opto/compile.cpp
@@ -68,6 +68,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "adfiles/ad_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
#endif
diff --git a/src/share/vm/opto/gcm.cpp b/src/share/vm/opto/gcm.cpp
index 8b8f31157..2eb58650b 100644
--- a/src/share/vm/opto/gcm.cpp
+++ b/src/share/vm/opto/gcm.cpp
@@ -41,6 +41,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "adfiles/ad_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
#endif
diff --git a/src/share/vm/opto/lcm.cpp b/src/share/vm/opto/lcm.cpp
index 1ad9f0b1f..a5c220b7f 100644
--- a/src/share/vm/opto/lcm.cpp
+++ b/src/share/vm/opto/lcm.cpp
@@ -36,6 +36,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "adfiles/ad_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
#endif
diff --git a/src/share/vm/opto/locknode.hpp b/src/share/vm/opto/locknode.hpp
index 91b99bc00..74c0a38e7 100644
--- a/src/share/vm/opto/locknode.hpp
+++ b/src/share/vm/opto/locknode.hpp
@@ -34,6 +34,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "adfiles/ad_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
#endif
diff --git a/src/share/vm/opto/machnode.hpp b/src/share/vm/opto/machnode.hpp
index 566e031d1..b535cc922 100644
--- a/src/share/vm/opto/machnode.hpp
+++ b/src/share/vm/opto/machnode.hpp
@@ -95,12 +95,14 @@ public:
}
#if defined(IA32) || defined(AMD64)
+#ifndef TARGET_ARCH_aarch64
XMMRegister as_XMMRegister(PhaseRegAlloc *ra_, const Node *node) const {
return ::as_XMMRegister(reg(ra_, node));
}
XMMRegister as_XMMRegister(PhaseRegAlloc *ra_, const Node *node, int idx) const {
return ::as_XMMRegister(reg(ra_, node, idx));
}
+#endif // TARGET_ARCH_aarch64
#endif
virtual intptr_t constant() const;
diff --git a/src/share/vm/opto/matcher.cpp b/src/share/vm/opto/matcher.cpp
index 397385670..8f45c5677 100644
--- a/src/share/vm/opto/matcher.cpp
+++ b/src/share/vm/opto/matcher.cpp
@@ -43,6 +43,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "adfiles/ad_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
#endif
diff --git a/src/share/vm/opto/output.hpp b/src/share/vm/opto/output.hpp
index 50b6e7603..402c7cf96 100644
--- a/src/share/vm/opto/output.hpp
+++ b/src/share/vm/opto/output.hpp
@@ -33,6 +33,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "adfiles/ad_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
#endif
diff --git a/src/share/vm/opto/regmask.cpp b/src/share/vm/opto/regmask.cpp
index ce220f01b..5d4668a53 100644
--- a/src/share/vm/opto/regmask.cpp
+++ b/src/share/vm/opto/regmask.cpp
@@ -31,6 +31,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "adfiles/ad_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
#endif
diff --git a/src/share/vm/opto/regmask.hpp b/src/share/vm/opto/regmask.hpp
index e50ff84ca..25c1d9b16 100644
--- a/src/share/vm/opto/regmask.hpp
+++ b/src/share/vm/opto/regmask.hpp
@@ -34,6 +34,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "adfiles/adGlobals_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "adfiles/adGlobals_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "adfiles/adGlobals_sparc.hpp"
#endif
diff --git a/src/share/vm/opto/runtime.cpp b/src/share/vm/opto/runtime.cpp
index b03a3943a..dc62b0fca 100644
--- a/src/share/vm/opto/runtime.cpp
+++ b/src/share/vm/opto/runtime.cpp
@@ -74,6 +74,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "adfiles/ad_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
#endif
diff --git a/src/share/vm/prims/jniCheck.cpp b/src/share/vm/prims/jniCheck.cpp
index 3bf4ecd1a..e830fb999 100644
--- a/src/share/vm/prims/jniCheck.cpp
+++ b/src/share/vm/prims/jniCheck.cpp
@@ -39,6 +39,9 @@
#ifdef TARGET_ARCH_x86
# include "jniTypes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "jniTypes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "jniTypes_sparc.hpp"
#endif
diff --git a/src/share/vm/prims/jni_md.h b/src/share/vm/prims/jni_md.h
index 13d70b974..6209a6644 100644
--- a/src/share/vm/prims/jni_md.h
+++ b/src/share/vm/prims/jni_md.h
@@ -27,8 +27,8 @@
#ifdef TARGET_ARCH_x86
# include "jni_x86.h"
#endif
-#ifdef TARGET_ARCH_AARCH64
-# include "jni_x86.h"
+#ifdef TARGET_ARCH_aarch64
+# include "jni_aarch64.h"
#endif
#ifdef TARGET_ARCH_sparc
# include "jni_sparc.h"
diff --git a/src/share/vm/prims/jvmtiClassFileReconstituter.cpp b/src/share/vm/prims/jvmtiClassFileReconstituter.cpp
index d7dfea5a9..4d349e3d6 100644
--- a/src/share/vm/prims/jvmtiClassFileReconstituter.cpp
+++ b/src/share/vm/prims/jvmtiClassFileReconstituter.cpp
@@ -31,6 +31,9 @@
#ifdef TARGET_ARCH_x86
# include "bytes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "bytes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "bytes_sparc.hpp"
#endif
diff --git a/src/share/vm/prims/methodHandles.hpp b/src/share/vm/prims/methodHandles.hpp
index 514ba6aa9..0ce89d91e 100644
--- a/src/share/vm/prims/methodHandles.hpp
+++ b/src/share/vm/prims/methodHandles.hpp
@@ -726,6 +726,9 @@ public:
#ifdef TARGET_ARCH_x86
# include "methodHandles_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "methodHandles_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "methodHandles_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/atomic.cpp b/src/share/vm/runtime/atomic.cpp
index 80780d780..5e74aa465 100644
--- a/src/share/vm/runtime/atomic.cpp
+++ b/src/share/vm/runtime/atomic.cpp
@@ -39,6 +39,9 @@
#ifdef TARGET_OS_ARCH_linux_x86
# include "atomic_linux_x86.inline.hpp"
#endif
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "atomic_linux_aarch64.inline.hpp"
+#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "atomic_linux_sparc.inline.hpp"
#endif
diff --git a/src/share/vm/runtime/deoptimization.cpp b/src/share/vm/runtime/deoptimization.cpp
index 0e2a9839b..6c36ac745 100644
--- a/src/share/vm/runtime/deoptimization.cpp
+++ b/src/share/vm/runtime/deoptimization.cpp
@@ -53,6 +53,9 @@
#ifdef TARGET_ARCH_x86
# include "vmreg_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "vmreg_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "vmreg_sparc.inline.hpp"
#endif
@@ -72,6 +75,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "adfiles/ad_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/dtraceJSDT.hpp b/src/share/vm/runtime/dtraceJSDT.hpp
index bff431084..38de0a7da 100644
--- a/src/share/vm/runtime/dtraceJSDT.hpp
+++ b/src/share/vm/runtime/dtraceJSDT.hpp
@@ -29,6 +29,9 @@
#ifdef TARGET_ARCH_x86
# include "nativeInst_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "nativeInst_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/frame.cpp b/src/share/vm/runtime/frame.cpp
index 7ae9aa8d5..5bb1b8562 100644
--- a/src/share/vm/runtime/frame.cpp
+++ b/src/share/vm/runtime/frame.cpp
@@ -47,6 +47,9 @@
#ifdef TARGET_ARCH_x86
# include "nativeInst_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "nativeInst_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/frame.hpp b/src/share/vm/runtime/frame.hpp
index c55380e57..939ee0658 100644
--- a/src/share/vm/runtime/frame.hpp
+++ b/src/share/vm/runtime/frame.hpp
@@ -38,6 +38,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "adfiles/adGlobals_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "adfiles/adGlobals_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "adfiles/adGlobals_sparc.hpp"
#endif
@@ -479,6 +482,9 @@ class frame VALUE_OBJ_CLASS_SPEC {
#ifdef TARGET_ARCH_x86
# include "frame_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "frame_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "frame_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/frame.inline.hpp b/src/share/vm/runtime/frame.inline.hpp
index b80b042dc..af1038dca 100644
--- a/src/share/vm/runtime/frame.inline.hpp
+++ b/src/share/vm/runtime/frame.inline.hpp
@@ -34,6 +34,9 @@
#ifdef TARGET_ARCH_x86
# include "jniTypes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "jniTypes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "jniTypes_sparc.hpp"
#endif
@@ -88,6 +91,9 @@ inline bool frame::is_first_frame() const {
#ifdef TARGET_ARCH_x86
# include "frame_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "frame_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "frame_sparc.inline.hpp"
#endif
diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
index 1838252c3..498f28a2c 100644
--- a/src/share/vm/runtime/globals.hpp
+++ b/src/share/vm/runtime/globals.hpp
@@ -40,6 +40,9 @@
#ifdef TARGET_ARCH_x86
# include "globals_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "globals_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "globals_sparc.hpp"
#endif
@@ -67,6 +70,9 @@
#ifdef TARGET_OS_ARCH_linux_x86
# include "globals_linux_x86.hpp"
#endif
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "globals_linux_aarch64.hpp"
+#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "globals_linux_sparc.hpp"
#endif
@@ -98,6 +104,9 @@
#ifdef TARGET_ARCH_x86
# include "c1_globals_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c1_globals_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c1_globals_sparc.hpp"
#endif
@@ -124,6 +133,9 @@
#ifdef TARGET_ARCH_x86
# include "c2_globals_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "c2_globals_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "c2_globals_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/icache.hpp b/src/share/vm/runtime/icache.hpp
index d460a0ffb..ba81a06ff 100644
--- a/src/share/vm/runtime/icache.hpp
+++ b/src/share/vm/runtime/icache.hpp
@@ -71,6 +71,9 @@ class AbstractICache : AllStatic {
#ifdef TARGET_ARCH_x86
# include "icache_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "icache_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "icache_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/java.cpp b/src/share/vm/runtime/java.cpp
index f256e515e..69f59b736 100644
--- a/src/share/vm/runtime/java.cpp
+++ b/src/share/vm/runtime/java.cpp
@@ -66,6 +66,9 @@
#ifdef TARGET_ARCH_x86
# include "vm_version_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "vm_version_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "vm_version_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/javaCalls.hpp b/src/share/vm/runtime/javaCalls.hpp
index d4f85956f..f6847e97f 100644
--- a/src/share/vm/runtime/javaCalls.hpp
+++ b/src/share/vm/runtime/javaCalls.hpp
@@ -33,6 +33,9 @@
#ifdef TARGET_ARCH_x86
# include "jniTypes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "jniTypes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "jniTypes_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/javaFrameAnchor.hpp b/src/share/vm/runtime/javaFrameAnchor.hpp
index 8374aa2a9..3507ea158 100644
--- a/src/share/vm/runtime/javaFrameAnchor.hpp
+++ b/src/share/vm/runtime/javaFrameAnchor.hpp
@@ -29,6 +29,9 @@
#ifdef TARGET_OS_ARCH_linux_x86
# include "orderAccess_linux_x86.inline.hpp"
#endif
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "orderAccess_linux_aarch64.inline.hpp"
+#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "orderAccess_linux_sparc.inline.hpp"
#endif
@@ -109,6 +112,9 @@ friend class JavaCallWrapper;
#ifdef TARGET_ARCH_x86
# include "javaFrameAnchor_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "javaFrameAnchor_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "javaFrameAnchor_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/os.hpp b/src/share/vm/runtime/os.hpp
index 68f8a3ab6..51874126a 100644
--- a/src/share/vm/runtime/os.hpp
+++ b/src/share/vm/runtime/os.hpp
@@ -702,6 +702,9 @@ class os: AllStatic {
#ifdef TARGET_OS_ARCH_linux_x86
# include "os_linux_x86.hpp"
#endif
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "os_linux_aarch64.hpp"
+#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "os_linux_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/registerMap.hpp b/src/share/vm/runtime/registerMap.hpp
index 5dd677ac4..67ef212d6 100644
--- a/src/share/vm/runtime/registerMap.hpp
+++ b/src/share/vm/runtime/registerMap.hpp
@@ -30,6 +30,9 @@
#ifdef TARGET_ARCH_x86
# include "register_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "register_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "register_sparc.hpp"
#endif
@@ -138,6 +141,9 @@ class RegisterMap : public StackObj {
#ifdef TARGET_ARCH_x86
# include "registerMap_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "registerMap_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "registerMap_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/relocator.hpp b/src/share/vm/runtime/relocator.hpp
index c34866f74..72045730f 100644
--- a/src/share/vm/runtime/relocator.hpp
+++ b/src/share/vm/runtime/relocator.hpp
@@ -30,6 +30,9 @@
#ifdef TARGET_ARCH_x86
# include "bytes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "bytes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "bytes_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/safepoint.cpp b/src/share/vm/runtime/safepoint.cpp
index 514e7c976..efe0bf4a0 100644
--- a/src/share/vm/runtime/safepoint.cpp
+++ b/src/share/vm/runtime/safepoint.cpp
@@ -53,6 +53,10 @@
# include "nativeInst_x86.hpp"
# include "vmreg_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "nativeInst_aarch64.hpp"
+# include "vmreg_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
# include "vmreg_sparc.inline.hpp"
diff --git a/src/share/vm/runtime/sharedRuntime.cpp b/src/share/vm/runtime/sharedRuntime.cpp
index 503ad0719..587bb9310 100644
--- a/src/share/vm/runtime/sharedRuntime.cpp
+++ b/src/share/vm/runtime/sharedRuntime.cpp
@@ -60,6 +60,10 @@
# include "nativeInst_x86.hpp"
# include "vmreg_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "nativeInst_aarch64.hpp"
+# include "vmreg_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
# include "vmreg_sparc.inline.hpp"
diff --git a/src/share/vm/runtime/stackValueCollection.cpp b/src/share/vm/runtime/stackValueCollection.cpp
index 110f7120d..8c382a40e 100644
--- a/src/share/vm/runtime/stackValueCollection.cpp
+++ b/src/share/vm/runtime/stackValueCollection.cpp
@@ -27,6 +27,9 @@
#ifdef TARGET_ARCH_x86
# include "jniTypes_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "jniTypes_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "jniTypes_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/statSampler.cpp b/src/share/vm/runtime/statSampler.cpp
index 0b24def24..41f469622 100644
--- a/src/share/vm/runtime/statSampler.cpp
+++ b/src/share/vm/runtime/statSampler.cpp
@@ -36,6 +36,9 @@
#ifdef TARGET_ARCH_x86
# include "vm_version_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "vm_version_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "vm_version_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/stubCodeGenerator.cpp b/src/share/vm/runtime/stubCodeGenerator.cpp
index b6068a5d9..5fc9797c9 100644
--- a/src/share/vm/runtime/stubCodeGenerator.cpp
+++ b/src/share/vm/runtime/stubCodeGenerator.cpp
@@ -30,6 +30,9 @@
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "assembler_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
#endif
diff --git a/src/share/vm/runtime/stubRoutines.hpp b/src/share/vm/runtime/stubRoutines.hpp
index 8481dce6d..bef1f6b00 100644
--- a/src/share/vm/runtime/stubRoutines.hpp
+++ b/src/share/vm/runtime/stubRoutines.hpp
@@ -34,6 +34,9 @@
#ifdef TARGET_ARCH_x86
# include "nativeInst_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "nativeInst_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
#endif
@@ -105,6 +108,9 @@ class StubRoutines: AllStatic {
#ifdef TARGET_ARCH_MODEL_x86_64
# include "stubRoutines_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "stubRoutines_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "stubRoutines_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/thread.hpp b/src/share/vm/runtime/thread.hpp
index 7846cc070..04b5bef65 100644
--- a/src/share/vm/runtime/thread.hpp
+++ b/src/share/vm/runtime/thread.hpp
@@ -1630,6 +1630,9 @@ public:
#ifdef TARGET_OS_ARCH_linux_x86
# include "thread_linux_x86.hpp"
#endif
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "thread_linux_aarch64.hpp"
+#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "thread_linux_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/threadLocalStorage.hpp b/src/share/vm/runtime/threadLocalStorage.hpp
index c2f7a9e4c..a731f6dd3 100644
--- a/src/share/vm/runtime/threadLocalStorage.hpp
+++ b/src/share/vm/runtime/threadLocalStorage.hpp
@@ -47,6 +47,9 @@ class ThreadLocalStorage : AllStatic {
#ifdef TARGET_OS_ARCH_linux_x86
# include "threadLS_linux_x86.hpp"
#endif
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "threadLS_linux_aarch64.hpp"
+#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "threadLS_linux_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/vmStructs.cpp b/src/share/vm/runtime/vmStructs.cpp
index 921945a95..d3920426e 100644
--- a/src/share/vm/runtime/vmStructs.cpp
+++ b/src/share/vm/runtime/vmStructs.cpp
@@ -112,6 +112,9 @@
#ifdef TARGET_ARCH_x86
# include "vmStructs_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "vmStructs_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "vmStructs_sparc.hpp"
#endif
@@ -139,6 +142,9 @@
#ifdef TARGET_OS_ARCH_linux_x86
# include "vmStructs_linux_x86.hpp"
#endif
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "vmStructs_linux_aarch64.hpp"
+#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "vmStructs_linux_sparc.hpp"
#endif
@@ -208,6 +214,9 @@
#ifdef TARGET_ARCH_MODEL_x86_64
# include "adfiles/adGlobals_x86_64.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_aarch64
+# include "adfiles/adGlobals_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "adfiles/adGlobals_sparc.hpp"
#endif
diff --git a/src/share/vm/runtime/vm_version.cpp b/src/share/vm/runtime/vm_version.cpp
index 2d51b6781..728031fd3 100644
--- a/src/share/vm/runtime/vm_version.cpp
+++ b/src/share/vm/runtime/vm_version.cpp
@@ -29,6 +29,9 @@
#ifdef TARGET_ARCH_x86
# include "vm_version_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "vm_version_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "vm_version_sparc.hpp"
#endif
diff --git a/src/share/vm/utilities/copy.hpp b/src/share/vm/utilities/copy.hpp
index 3dcbfeee2..dabe28f06 100644
--- a/src/share/vm/utilities/copy.hpp
+++ b/src/share/vm/utilities/copy.hpp
@@ -325,6 +325,9 @@ class Copy : AllStatic {
#ifdef TARGET_ARCH_x86
# include "copy_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "copy_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "copy_sparc.hpp"
#endif
diff --git a/src/share/vm/utilities/globalDefinitions.hpp b/src/share/vm/utilities/globalDefinitions.hpp
index 19dcec160..9a8526d97 100644
--- a/src/share/vm/utilities/globalDefinitions.hpp
+++ b/src/share/vm/utilities/globalDefinitions.hpp
@@ -340,6 +340,9 @@ extern int LogMinObjAlignmentInBytes;
#ifdef TARGET_ARCH_x86
# include "globalDefinitions_x86.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "globalDefinitions_aarch64.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "globalDefinitions_sparc.hpp"
#endif
diff --git a/src/share/vm/utilities/taskqueue.hpp b/src/share/vm/utilities/taskqueue.hpp
index 545c6dbb5..09539f765 100644
--- a/src/share/vm/utilities/taskqueue.hpp
+++ b/src/share/vm/utilities/taskqueue.hpp
@@ -32,6 +32,9 @@
#ifdef TARGET_OS_ARCH_linux_x86
# include "orderAccess_linux_x86.inline.hpp"
#endif
+#ifdef TARGET_OS_ARCH_linux_aarch64
+# include "orderAccess_linux_aarch64.inline.hpp"
+#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "orderAccess_linux_sparc.inline.hpp"
#endif